diff --git a/.github/workflows/helm-dependency.yaml b/.github/workflows/helm-dependency.yaml new file mode 100644 index 0000000..92de5c5 --- /dev/null +++ b/.github/workflows/helm-dependency.yaml @@ -0,0 +1,42 @@ +name: Helm Dependency Build + +on: + push: + branches: + - dev + - main + paths: + - 'kubernetes/**' + +jobs: + build-dependencies: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + with: + persist-credentials: true + + - name: Set up Helm + uses: azure/setup-helm@v3 + with: + version: v3.12.0 + + - name: Build Helm dependencies + working-directory: ./kubernetes + run: | + rm -f Chart.lock + helm dependency build + + - name: Commit and push if dependencies updated + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + git add kubernetes/Chart.lock kubernetes/charts/ + if ! git diff --cached --quiet; then + git commit -m "chore: update Helm dependencies (charts/ and Chart.lock)" + git push + else + echo "No changes to Helm dependencies." + fi diff --git a/kubernetes/charts/Authentication-Layer/Chart.yaml b/kubernetes/charts/Authentication-Layer/Chart.yaml new file mode 100644 index 0000000..649d153 --- /dev/null +++ b/kubernetes/charts/Authentication-Layer/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: Authentication-Layer +description: Authentication Layer Service for RAG +type: application +version: 0.1.0 +appVersion: "1.0" \ No newline at end of file diff --git a/kubernetes/charts/Authentication-Layer/templates/deployment-byk-authentication-layer.yaml b/kubernetes/charts/Authentication-Layer/templates/deployment-byk-authentication-layer.yaml new file mode 100644 index 0000000..e3c1c6f --- /dev/null +++ b/kubernetes/charts/Authentication-Layer/templates/deployment-byk-authentication-layer.yaml @@ -0,0 +1,34 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + spec: + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.authentication.image.repository }}:{{ .Values.authentication.image.tag }}" + imagePullPolicy: {{ .Values.authentication.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + env: + - name: PORT + value: {{ .Values.authentication.environment.serverPort | quote }} + - name: TIM_SERVICE_URL + value: {{ .Values.authentication.environment.timServiceUrl | quote }} + - name: CORS_ORIGINS + value: {{ .Values.authentication.environment.corsOrigins | quote }} + +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Authentication-Layer/templates/ingress-byk-authentication-layer.yaml b/kubernetes/charts/Authentication-Layer/templates/ingress-byk-authentication-layer.yaml new file mode 100644 index 0000000..bf443fd --- /dev/null +++ b/kubernetes/charts/Authentication-Layer/templates/ingress-byk-authentication-layer.yaml @@ -0,0 +1,29 @@ +{{- if .Values.ingress.enabled }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: "{{ .Values.release_name }}-ingress" + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + cert-manager.io/cluster-issuer: {{ .Values.ingress.certIssuerName | quote }} + labels: + name: "{{ .Values.release_name }}-ingress" +spec: + rules: + - host: auth.{{ .Values.domain }} + http: + paths: + - pathType: Prefix + path: "/" + backend: + service: + name: "{{ .Values.release_name }}" + port: + number: 3004 + tls: + - hosts: + - auth.{{ .Values.domain }} + secretName: {{ .Values.secretname }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Authentication-Layer/templates/service-byk-authentication-layer.yaml b/kubernetes/charts/Authentication-Layer/templates/service-byk-authentication-layer.yaml new file mode 100644 index 0000000..a17b39d --- /dev/null +++ b/kubernetes/charts/Authentication-Layer/templates/service-byk-authentication-layer.yaml @@ -0,0 +1,17 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + name: http + selector: + app: "{{ .Values.release_name }}" +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Authentication-Layer/values.yaml b/kubernetes/charts/Authentication-Layer/values.yaml new file mode 100644 index 0000000..544da2b --- /dev/null +++ b/kubernetes/charts/Authentication-Layer/values.yaml @@ -0,0 +1,35 @@ +replicas: 1 +enabled: true + + +release_name: "authentication-layer" +domain: "rag.local" # need to set this +secretname: "authentication-layer-tls" + +ingress: + enabled: true + certIssuerName: "letsencrypt-prod" + +# Authentication Layer Configuration +authentication: + image: + repository: "ghcr.io/buerokratt/authentication-layer" # Update with actual auth-layer image repository + tag: "latest" + pullPolicy: Always + + environment: + serverPort: "3004" + timServiceUrl: "http://tim:8085" + corsOrigins: "http://localhost:3001,http://localhost:3003,http://localhost:8086" + +service: + type: ClusterIP + port: 3004 + +resources: + requests: + memory: "10Mi" + cpu: "1m" + limits: + memory: "50Mi" + cpu: "5m" diff --git a/kubernetes/charts/ClickHouse/Chart.yaml b/kubernetes/charts/ClickHouse/Chart.yaml new file mode 100644 index 0000000..60e9ced --- /dev/null +++ b/kubernetes/charts/ClickHouse/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: ClickHouse +description: ClickHouse analytics database for Langfuse +type: application +version: 0.1.0 +appVersion: "latest" \ No newline at end of file diff --git a/kubernetes/charts/ClickHouse/templates/deployment-byk-clickhouse.yaml b/kubernetes/charts/ClickHouse/templates/deployment-byk-clickhouse.yaml new file mode 100644 index 0000000..78f9969 --- /dev/null +++ b/kubernetes/charts/ClickHouse/templates/deployment-byk-clickhouse.yaml @@ -0,0 +1,88 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: clickhouse +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + component: clickhouse + spec: + {{- if .Values.securityContext }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.clickhouse.registry }}/{{ .Values.images.clickhouse.repository }}:{{ .Values.images.clickhouse.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.httpPort }} + protocol: TCP + - name: native + containerPort: {{ .Values.service.nativePort }} + protocol: TCP + # Non-sensitive env's from values.yaml + env: + - name: CLICKHOUSE_DB + value: "{{ .Values.env.CLICKHOUSE_DB }}" + # Sensitive env's from Kubernetes Secret + {{- if .Values.envFrom }} + envFrom: + {{- toYaml .Values.envFrom | nindent 12 }} + {{- end }} + {{- if .Values.healthcheck.enabled }} + livenessProbe: + httpGet: + path: "{{ .Values.healthcheck.httpPath }}" + port: {{ .Values.service.httpPort }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + readinessProbe: + httpGet: + path: "{{ .Values.healthcheck.httpPath }}" + port: {{ .Values.service.httpPort }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + {{- end }} + {{- if .Values.persistence.enabled }} + volumeMounts: + - name: langfuse-clickhouse-data + mountPath: /var/lib/clickhouse + - name: langfuse-clickhouse-logs + mountPath: /var/log/clickhouse-server + {{- end }} + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + {{- if .Values.persistence.enabled }} + volumes: + - name: langfuse-clickhouse-data + persistentVolumeClaim: + claimName: "{{ .Values.release_name }}-data" + - name: langfuse-clickhouse-logs + persistentVolumeClaim: + claimName: "{{ .Values.release_name }}-logs" + {{- end }} + restartPolicy: Always +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/ClickHouse/templates/pvc-clickhouse.yaml b/kubernetes/charts/ClickHouse/templates/pvc-clickhouse.yaml new file mode 100644 index 0000000..910b761 --- /dev/null +++ b/kubernetes/charts/ClickHouse/templates/pvc-clickhouse.yaml @@ -0,0 +1,37 @@ +{{- if and .Values.enabled .Values.persistence.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "{{ .Values.release_name }}-data" + labels: + app: "{{ .Values.release_name }}" + component: clickhouse + type: data +spec: + accessModes: + - {{ .Values.persistence.data.accessMode }} + resources: + requests: + storage: {{ .Values.persistence.data.size }} + {{- if .Values.persistence.data.storageClass }} + storageClassName: {{ .Values.persistence.data.storageClass }} + {{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "{{ .Values.release_name }}-logs" + labels: + app: "{{ .Values.release_name }}" + component: clickhouse + type: logs +spec: + accessModes: + - {{ .Values.persistence.logs.accessMode }} + resources: + requests: + storage: {{ .Values.persistence.logs.size }} + {{- if .Values.persistence.logs.storageClass }} + storageClassName: {{ .Values.persistence.logs.storageClass }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/ClickHouse/templates/secret.yaml b/kubernetes/charts/ClickHouse/templates/secret.yaml new file mode 100644 index 0000000..984a5dd --- /dev/null +++ b/kubernetes/charts/ClickHouse/templates/secret.yaml @@ -0,0 +1,13 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: clickhouse-secrets + labels: + app: "{{ .Values.release_name }}" + component: clickhouse +type: Opaque +stringData: + CLICKHOUSE_USER: "" + CLICKHOUSE_PASSWORD: "" +{{- end }} diff --git a/kubernetes/charts/ClickHouse/templates/service-byk-clickhouse.yaml b/kubernetes/charts/ClickHouse/templates/service-byk-clickhouse.yaml new file mode 100644 index 0000000..1610d18 --- /dev/null +++ b/kubernetes/charts/ClickHouse/templates/service-byk-clickhouse.yaml @@ -0,0 +1,22 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: clickhouse +spec: + type: {{ .Values.service.type }} + selector: + app: "{{ .Values.release_name }}" + ports: + - name: http + protocol: TCP + port: {{ .Values.service.httpPort }} + targetPort: {{ .Values.service.httpPort }} + - name: native + protocol: TCP + port: {{ .Values.service.nativePort }} + targetPort: {{ .Values.service.nativePort }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/ClickHouse/values.yaml b/kubernetes/charts/ClickHouse/values.yaml new file mode 100644 index 0000000..287c84a --- /dev/null +++ b/kubernetes/charts/ClickHouse/values.yaml @@ -0,0 +1,63 @@ +replicas: 1 +enabled: true + +images: + clickhouse: + registry: "docker.io" + repository: "clickhouse/clickhouse-server" + tag: "latest" + +release_name: "clickhouse" + +service: + type: ClusterIP + # ClickHouse HTTP interface port + httpPort: 8123 + # ClickHouse native protocol port + nativePort: 9000 + +# Environment variables +env: + CLICKHOUSE_DB: "default" + +# Reference to Kubernetes Secret +envFrom: + - secretRef: + name: clickhouse-secrets + +# Security context +securityContext: + runAsUser: 101 + runAsGroup: 101 + fsGroup: 101 + +persistence: + enabled: true + data: + storageClass: "" + accessMode: ReadWriteOnce + size: 10Gi + logs: + storageClass: "" + accessMode: ReadWriteOnce + size: 5Gi + +resources: + requests: + memory: "512Mi" + cpu: "100m" + limits: + memory: "2Gi" + cpu: "500m" + +pullPolicy: IfNotPresent + +healthcheck: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + # HTTP endpoint for health check + httpPath: "/ping" \ No newline at end of file diff --git a/kubernetes/charts/CronManager/Chart.yaml b/kubernetes/charts/CronManager/Chart.yaml new file mode 100644 index 0000000..31b14b5 --- /dev/null +++ b/kubernetes/charts/CronManager/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: CronManager +description: CronManager Service for RAG +type: application +version: 0.1.0 +appVersion: "1.0" \ No newline at end of file diff --git a/kubernetes/charts/CronManager/templates/configmap-cronmanager-config.yaml b/kubernetes/charts/CronManager/templates/configmap-cronmanager-config.yaml new file mode 100644 index 0000000..a60d8ac --- /dev/null +++ b/kubernetes/charts/CronManager/templates/configmap-cronmanager-config.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "{{ .Values.release_name }}-config" + labels: + app: "{{ .Values.release_name }}" +data: + constants.ini: | + + RAG_MODULE_RUUTER_PRIVATE={{ .Values.constants.RAG_MODULE_RUUTER_PRIVATE }} + RAG_MODULE_RUUTER_PUBLIC={{ .Values.constants.RAG_MODULE_RUUTER_PUBLIC }} + RAG_MODULE_RESQL={{ .Values.constants.RAG_MODULE_RESQL }} + RAG_MODULE_TIM={{ .Values.constants.RAG_MODULE_TIM }} + RAG_MODULE_DATAMAPPER={{ .Values.constants.RAG_MODULE_DATAMAPPER }} + \ No newline at end of file diff --git a/kubernetes/charts/CronManager/templates/deployment-byk-cronmanager.yaml b/kubernetes/charts/CronManager/templates/deployment-byk-cronmanager.yaml new file mode 100644 index 0000000..375d5db --- /dev/null +++ b/kubernetes/charts/CronManager/templates/deployment-byk-cronmanager.yaml @@ -0,0 +1,107 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + annotations: + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + app: "{{ .Values.release_name }}" + spec: + securityContext: + runAsUser: 0 + runAsGroup: 0 + fsGroup: 0 + initContainers: + - name: git-clone + image: alpine/git:latest + securityContext: + runAsUser: 0 + runAsGroup: 0 + volumeMounts: + - name: dsl + mountPath: /DSL + - name: scripts + mountPath: /app/scripts + - name: vector-indexer + mountPath: /app/src/vector_indexer + command: + - sh + - -c + - | + git clone --single-branch --depth 1 --branch wip https://github.com/rootcodelabs/RAG-Module /tmp/rag && + + mkdir -p /app/src/vector_indexer && + mkdir -p /app/scripts && + mkdir -p /DSL + + cp -r /tmp/rag/DSL/CronManager/DSL/* /DSL/ && + cp -r /tmp/rag/DSL/CronManager/script/* /app/scripts/ && + cp -r /tmp/rag/src/vector_indexer/* /app/src/vector_indexer/ && + + # Set execute permissions on all shell scripts + chmod +x /app/scripts/*.sh && + echo "Scripts copied and permissions set successfully" + + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.cronmanager.image.registry }}/{{ .Values.cronmanager.image.repository }}:{{ .Values.cronmanager.image.tag }}" + imagePullPolicy: {{ .Values.cronmanager.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.cronmanager.environment.containerPort }} + protocol: TCP + env: + - name: PYTHONPATH + value: {{ .Values.cronmanager.environment.pythonPath | quote }} + - name: VAULT_ADDR + value: {{ .Values.cronmanager.environment.VAULT_ADDR | quote }} + - name: RAG_MODULE_RUUTER_PRIVATE + value: {{ .Values.constants.RAG_MODULE_RUUTER_PRIVATE | quote }} + - name: RAG_MODULE_RESQL + value: {{ .Values.constants.RAG_MODULE_RESQL | quote }} + - name: RAG_MODULE_TIM + value: {{ .Values.constants.RAG_MODULE_TIM | quote }} + - name: UV_VERBOSE + value: "1" + + volumeMounts: + - name: dsl + mountPath: /DSL + - name: cronmanager-data + mountPath: /app/data + - name: scripts + mountPath: /app/scripts + - name: vector-indexer + mountPath: /app/src/vector_indexer + - name: datasets + mountPath: /app/datasets + + volumes: + - name: dsl + emptyDir: {} + - name: scripts + emptyDir: {} + - name: vector-indexer + emptyDir: {} + - name: datasets + emptyDir: {} + - name: cronmanager-data + persistentVolumeClaim: + claimName: "{{ .Values.release_name }}-data" + - name: config-volume + configMap: + name: "{{ .Values.release_name }}-config" + +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/CronManager/templates/pvc-cronmanager.yaml b/kubernetes/charts/CronManager/templates/pvc-cronmanager.yaml new file mode 100644 index 0000000..f278883 --- /dev/null +++ b/kubernetes/charts/CronManager/templates/pvc-cronmanager.yaml @@ -0,0 +1,17 @@ +{{- if .Values.persistence.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "{{ .Values.release_name }}-data" + labels: + app: "{{ .Values.release_name }}" +spec: + accessModes: + - {{ .Values.persistence.accessMode }} + {{- if .Values.persistence.storageClass }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/CronManager/templates/service-byk-cronmanager.yaml b/kubernetes/charts/CronManager/templates/service-byk-cronmanager.yaml new file mode 100644 index 0000000..c6d6722 --- /dev/null +++ b/kubernetes/charts/CronManager/templates/service-byk-cronmanager.yaml @@ -0,0 +1,17 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: http + selector: + app: "{{ .Values.release_name }}" +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/CronManager/values.yaml b/kubernetes/charts/CronManager/values.yaml new file mode 100644 index 0000000..825c897 --- /dev/null +++ b/kubernetes/charts/CronManager/values.yaml @@ -0,0 +1,46 @@ +replicas: 1 +enabled: true +release_name: "cron-manager" + +cronmanager: + image: + registry: ghcr.io + repository: buerokratt/cronmanager + tag: "python-1.2.0" + pullPolicy: IfNotPresent + + environment: + containerPort: "8080" + pythonPath: "/app:/app/src/vector_indexer" + VAULT_ADDR: "http://vault:8200" + +service: + type: ClusterIP + port: 9010 + targetPort: 8080 + +# PVC Configuration +persistence: + enabled: true + storageClass: "" + accessMode: ReadWriteOnce + size: 10Gi + +# Service URLs +constants: + RAG_MODULE_RUUTER_PRIVATE: "http://ruuter-private:8088" + RAG_MODULE_RUUTER_PUBLIC: "http://ruuter-public:8086" + RAG_MODULE_RESQL: "http://resql:8082" + RAG_MODULE_TIM: "http://tim:8085" + RAG_MODULE_DATAMAPPER: "http://data-mapper:3000" + +resources: + requests: + memory: "1Gi" + cpu: "500m" + limits: + memory: "4Gi" + cpu: "2000m" + +podAnnotations: + dsl-checksum: "initial" \ No newline at end of file diff --git a/kubernetes/charts/DataMapper/Chart.yaml b/kubernetes/charts/DataMapper/Chart.yaml new file mode 100644 index 0000000..a39f755 --- /dev/null +++ b/kubernetes/charts/DataMapper/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: data-mapper +description: A Helm chart for Data Mapper +type: application +version: 0.1.0 +appVersion: "1.0" \ No newline at end of file diff --git a/kubernetes/charts/DataMapper/templates/deployment-byk-data-mapper.yaml b/kubernetes/charts/DataMapper/templates/deployment-byk-data-mapper.yaml new file mode 100644 index 0000000..f52cf13 --- /dev/null +++ b/kubernetes/charts/DataMapper/templates/deployment-byk-data-mapper.yaml @@ -0,0 +1,67 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + annotations: + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + app: "{{ .Values.release_name }}" + spec: + initContainers: + - name: git-clone + image: alpine/git:latest + volumeMounts: + - name: dsl-lib + mountPath: /workspace/app/lib + command: + - sh + - -c + - | + git clone --single-branch --depth 1 --branch wip https://github.com/rootcodelabs/RAG-Module /tmp/rag && + + # mkdir -p /workspace/app/views/rag-search && + mkdir -p /workspace/app/lib && + + # cp -r /tmp/rag/DSL/DMapper/rag-search/hbs/* /workspace/app/views/rag-search && + cp -r /tmp/rag/DSL/DMapper/rag-search/lib/* /workspace/app/lib + + + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.scope.registry }}/{{ .Values.images.scope.repository }}:{{ .Values.images.scope.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: http + env: + - name: PORT + value: "{{ .Values.env.PORT }}" + - name: CONTENT_FOLDER + value: "{{ .Values.env.CONTENT_FOLDER }}" + volumeMounts: + - name: dsl-lib + mountPath: /workspace/app/lib + + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + volumes: + - name: dsl-lib + emptyDir: {} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/DataMapper/templates/service-byk-data-mapper.yaml b/kubernetes/charts/DataMapper/templates/service-byk-data-mapper.yaml new file mode 100644 index 0000000..c6d6722 --- /dev/null +++ b/kubernetes/charts/DataMapper/templates/service-byk-data-mapper.yaml @@ -0,0 +1,17 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: http + selector: + app: "{{ .Values.release_name }}" +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/DataMapper/values.yaml b/kubernetes/charts/DataMapper/values.yaml new file mode 100644 index 0000000..3267f3b --- /dev/null +++ b/kubernetes/charts/DataMapper/values.yaml @@ -0,0 +1,33 @@ +replicas: 1 +enabled: true +release_name: "data-mapper" + +images: + scope: + registry: "ghcr.io" + repository: "buerokratt/datamapper" + tag: "v2.2.9" + +service: + type: ClusterIP + port: 3001 + targetPort: 3000 + +env: + # DataMapper specific configuration + PORT: "3000" + CONTENT_FOLDER: "/data" + +resources: + requests: + memory: "512Mi" + cpu: "250m" + limits: + memory: "1Gi" + cpu: "500m" + + +pullPolicy: IfNotPresent + +podAnnotations: + dsl-checksum: "initial" \ No newline at end of file diff --git a/kubernetes/charts/GUI/Chart.yaml b/kubernetes/charts/GUI/Chart.yaml new file mode 100644 index 0000000..2fb3f33 --- /dev/null +++ b/kubernetes/charts/GUI/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: GUI +description: A Helm chart for GUI in RAG +type: application +version: 0.1.0 +appVersion: "1.0" \ No newline at end of file diff --git a/kubernetes/charts/GUI/templates/deployment-byk-gui.yaml b/kubernetes/charts/GUI/templates/deployment-byk-gui.yaml new file mode 100644 index 0000000..94be872 --- /dev/null +++ b/kubernetes/charts/GUI/templates/deployment-byk-gui.yaml @@ -0,0 +1,87 @@ +{{- if .Values.gui.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Values.gui.release_name }} + labels: + app: {{ .Values.gui.release_name }} +spec: + replicas: {{ .Values.gui.replicas }} + selector: + matchLabels: + app: {{ .Values.gui.release_name }} + template: + metadata: + labels: + app: {{ .Values.gui.release_name }} + + spec: + containers: + - name: {{ .Values.gui.release_name }} + image: "{{ .Values.gui.image.repository }}:{{ .Values.gui.image.tag }}" + imagePullPolicy: {{ .Values.gui.image.pullPolicy }} + ports: + - containerPort: {{ .Values.gui.port }} + protocol: TCP + env: + # Node.js environment configuration + - name: NODE_ENV + value: {{ .Values.gui.nodeEnv | quote }} + - name: PORT + value: {{ .Values.gui.port | quote }} + - name: DEBUG_ENABLED + value: {{ .Values.gui.debugEnabled | quote }} + - name: CHOKIDAR_USEPOLLING + value: "true" + + # React application configuration + - name: REACT_APP_RUUTER_API_URL + value: {{ .Values.gui.services.ruuterPublic | quote }} + - name: REACT_APP_RUUTER_PRIVATE_API_URL + value: {{ .Values.gui.services.ruuterPrivate | quote }} + - name: REACT_APP_EXTERNAL_API_URL + value: {{ .Values.gui.services.datasetGenerator | quote }} + - name: REACT_APP_CUSTOMER_SERVICE_LOGIN + value: {{ printf "%s/et/dev-auth" .Values.gui.services.authenticationLayer | quote }} + - name: REACT_APP_NOTIFICATION_NODE_URL + value: {{ .Values.gui.services.notificationNode | quote }} + - name: REACT_APP_CSP + value: {{ .Values.gui.csp | quote }} + - name: REACT_APP_SERVICE_ID + value: {{ .Values.gui.serviceId | quote }} + - name: REACT_APP_ENABLE_HIDDEN_FEATURES + value: {{ .Values.gui.enableHiddenFeatures | quote | upper }} + + # Vite development server configuration + - name: VITE_HOST + value: {{ .Values.gui.vite.host | quote }} + - name: VITE_ALLOWED_HOSTS + value: {{ .Values.gui.vite.allowedHosts | quote }} + + resources: + limits: + cpu: {{ .Values.gui.resources.limits.cpu }} + memory: {{ .Values.gui.resources.limits.memory }} + requests: + cpu: {{ .Values.gui.resources.requests.cpu }} + memory: {{ .Values.gui.resources.requests.memory }} + + # livenessProbe: + # httpGet: + # path: / + # port: {{ .Values.gui.port }} + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + + # readinessProbe: + # httpGet: + # path: / + # port: {{ .Values.gui.port }} + # initialDelaySeconds: 10 + # periodSeconds: 5 + # timeoutSeconds: 3 + + restartPolicy: Always + +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/GUI/templates/ingress-byk-gui.yaml b/kubernetes/charts/GUI/templates/ingress-byk-gui.yaml new file mode 100644 index 0000000..72b429f --- /dev/null +++ b/kubernetes/charts/GUI/templates/ingress-byk-gui.yaml @@ -0,0 +1,20 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: gui-ingress + namespace: rag-module + annotations: + kubernetes.io/ingress.class: nginx +spec: + rules: + - host: localhost + http: + paths: + - path: /rag-search + pathType: Prefix + backend: + service: + name: gui + port: + number: 3003 + \ No newline at end of file diff --git a/kubernetes/charts/GUI/templates/service-byk-gui.yaml b/kubernetes/charts/GUI/templates/service-byk-gui.yaml new file mode 100644 index 0000000..1a7a35a --- /dev/null +++ b/kubernetes/charts/GUI/templates/service-byk-gui.yaml @@ -0,0 +1,15 @@ +{{- if .Values.gui.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.gui.release_name }} +spec: + type: {{ .Values.gui.service.type }} + ports: + - port: {{ .Values.gui.service.port }} + targetPort: {{ .Values.gui.service.targetPort }} + protocol: TCP + name: http + selector: + app: {{ .Values.gui.release_name }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/GUI/values.yaml b/kubernetes/charts/GUI/values.yaml new file mode 100644 index 0000000..e48710e --- /dev/null +++ b/kubernetes/charts/GUI/values.yaml @@ -0,0 +1,62 @@ +gui: + enabled: true + release_name: gui + image: + repository: "ghcr.io/buerokratt/rag-gui" # Update with actual GUI image repository + tag: sha-84833e1 + pullPolicy: Always + + # React application configuration + nodeEnv: production + port: 3001 + debugEnabled: true + enableHiddenFeatures: false + + #service URLs + services: + ruuterPublic: "http://ruuter-public:8086" + ruuterPrivate: "http://localhost:8088" + authenticationLayer: "http://authentication-layer:3004" + notificationNode: "http://notifications-node:4040" + datasetGenerator: "http://dataset-gen-service:8000" + + # Content Security Policy - Updated for browser access + csp: "default-src 'self'; connect-src 'self' http://ruuter-public:8086 https://ruuter-public:8086 http://ruuter-private:8088 https://ruuter-private:8088 http://authentication-layer:3004 https://authentication-layer:3004 http://notifications-node:4040 https://notifications-node:4040 http://dataset-gen-service:8000 https://dataset-gen-service:8000 http://localhost:* https://localhost:* http://global-classifier.local https://global-classifier.local ws://global-classifier.local wss://global-classifier.local; script-src 'self' 'unsafe-eval' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; img-src 'self' data: blob:; font-src 'self' data:;" + + # Service configuration + serviceId: "conversations,settings,monitoring" + + # Vite development server (for development mode) + vite: + host: "0.0.0.0" + allowedHosts: "localhost,127.0.0.1" + + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 100m + memory: 256Mi + + replicas: 1 + + service: + type: ClusterIP + port: 3001 + targetPort: 3001 + + # ingress: + # enabled: true + # className: nginx + # annotations: + # nginx.ingress.kubernetes.io/rewrite-target: / + # nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" + # nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" + # nginx.ingress.kubernetes.io/proxy-body-size: "50m" + # hosts: + # - host: rag.local + # paths: + # - path: / + # pathType: Prefix + # tls: [] \ No newline at end of file diff --git a/kubernetes/charts/Grafana/Chart.yaml b/kubernetes/charts/Grafana/Chart.yaml new file mode 100644 index 0000000..0bdeaa7 --- /dev/null +++ b/kubernetes/charts/Grafana/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: Grafana +description: A Helm chart for Grafana dashboard and monitoring +type: application +version: 0.1.0 +appVersion: "10.2.0" \ No newline at end of file diff --git a/kubernetes/charts/Grafana/dashboards/grafana-dashboard-deployment.json b/kubernetes/charts/Grafana/dashboards/grafana-dashboard-deployment.json new file mode 100644 index 0000000..a1e469f --- /dev/null +++ b/kubernetes/charts/Grafana/dashboards/grafana-dashboard-deployment.json @@ -0,0 +1,167 @@ +{ + "id": null, + "title": "RAG Module Orchestrator", + "tags": ["deployment", "models", "triton"], + "timezone": "browser", + "refresh": "30s", + "time": { + "from": "now-1h", + "to": "now" + }, + "templating": { + "list": [ + { + "name": "service_name", + "type": "query", + "label": "Service Name", + "refresh": 1, + "query": "label_values(service)", + "datasource": { + "type": "loki", + "uid": "loki-datasource" + }, + "multi": true, + "includeAll": true, + "allValue": ".*", + "current": { + "selected": true, + "text": "All", + "value": "$__all" + }, + "options": [], + "regex": "", + "sort": 0, + "skipUrlSync": false, + "hide": 0 + }, + { + "name": "log_level", + "type": "custom", + "label": "Log Level", + "multi": true, + "includeAll": true, + "allValue": "ERROR|INFO|WARNING|DEBUG", + "current": { + "selected": true, + "text": "All", + "value": "$__all" + }, + "options": [ + { + "text": "All", + "value": "$__all", + "selected": true + }, + { + "text": "ERROR", + "value": "ERROR", + "selected": false + }, + { + "text": "WARNING", + "value": "WARNING", + "selected": false + }, + { + "text": "INFO", + "value": "INFO", + "selected": false + }, + { + "text": "DEBUG", + "value": "DEBUG", + "selected": false + } + ], + "query": "ERROR,INFO,WARNING,DEBUG", + "queryType": "", + "refresh": 0, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "hide": 0 + } + ] + }, + "panels": [ + { + "id": 1, + "title": "Log Messages Over Time by Level", + "type": "graph", + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 0 + }, + "targets": [ + { + "expr": "sum by (service, level) (count_over_time({service=~\"$service_name\", level=~\"$log_level\"}[5m]))", + "refId": "A", + "legendFormat": "{{service}} - {{level}}", + "datasource": { + "type": "loki", + "uid": "loki-datasource" + } + } + ], + "yAxes": [ + { + "label": "Log Count", + "min": 0 + } + ], + "xAxis": { + "show": true + }, + "legend": { + "show": true, + "values": true, + "current": true, + "total": true + }, + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "fill": 1, + "linewidth": 2, + "pointradius": 2, + "bars": false, + "lines": true, + "points": false, + "stack": false, + "percentage": false, + "nullPointMode": "null as zero" + }, + { + "id": 2, + "title": "Deployment Logs", + "type": "logs", + "gridPos": { + "h": 12, + "w": 24, + "x": 0, + "y": 8 + }, + "targets": [ + { + "expr": "{service=~\"$service_name\", level=~\"$log_level\"}", + "refId": "A", + "datasource": { + "type": "loki", + "uid": "loki-datasource" + } + } + ], + "options": { + "showTime": true, + "showLabels": true, + "showCommonLabels": false, + "wrapLogMessage": true, + "sortOrder": "Descending" + } + } + ] +} diff --git a/kubernetes/charts/Grafana/templates/configmap-dashboards.yaml b/kubernetes/charts/Grafana/templates/configmap-dashboards.yaml new file mode 100644 index 0000000..3228eca --- /dev/null +++ b/kubernetes/charts/Grafana/templates/configmap-dashboards.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-dashboards + labels: + app: grafana +data: +{{- range $path, $content := .Files.Glob "dashboards/*.json" }} + {{ base $path }}: | +{{ $.Files.Get $path | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Grafana/templates/configmap-grafana.yaml b/kubernetes/charts/Grafana/templates/configmap-grafana.yaml new file mode 100644 index 0000000..c701d66 --- /dev/null +++ b/kubernetes/charts/Grafana/templates/configmap-grafana.yaml @@ -0,0 +1,39 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-datasources + labels: + app: grafana +data: + datasources.yaml: | + apiVersion: 1 + datasources: + {{- range .Values.datasources }} + - name: {{ .name }} + type: {{ .type }} + url: {{ .url }} + access: {{ .access }} + isDefault: {{ .isDefault }} + {{- end }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-dashboard-providers + labels: + app: grafana +data: + dashboards.yaml: | + apiVersion: 1 + providers: + {{- range .Values.dashboardProviders }} + - name: {{ .name }} + orgId: {{ .orgId }} + folder: '{{ .folder }}' + type: {{ .type }} + disableDeletion: {{ .disableDeletion }} + updateIntervalSeconds: {{ .updateIntervalSeconds }} + allowUiUpdates: {{ .allowUiUpdates }} + options: + path: {{ .options.path }} + {{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Grafana/templates/deployment-grafana.yaml b/kubernetes/charts/Grafana/templates/deployment-grafana.yaml new file mode 100644 index 0000000..d9191db --- /dev/null +++ b/kubernetes/charts/Grafana/templates/deployment-grafana.yaml @@ -0,0 +1,79 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Values.release_name }} + labels: + app: {{ .Values.release_name }} +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: {{ .Values.release_name }} + template: + metadata: + labels: + app: {{ .Values.release_name }} + spec: + containers: + - name: {{ .Values.release_name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag}}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.port }} + protocol: TCP + # Non-sensitive env's from values.yaml + env: + {{- range $key, $value := .Values.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + # Sensitive env's from Kubernetes Secret + {{- if .Values.envFrom }} + envFrom: + {{- toYaml .Values.envFrom | nindent 12 }} + {{- end }} + volumeMounts: + - name: datasources + mountPath: /etc/grafana/provisioning/datasources + readOnly: true + - name: dashboard-providers + mountPath: /etc/grafana/provisioning/dashboards + readOnly: true + - name: dashboards + mountPath: /etc/grafana/dashboards + readOnly: true + {{- if .Values.persistence.enabled }} + - name: storage + mountPath: /var/lib/grafana + {{- end }} + livenessProbe: + httpGet: + path: /api/health + port: http + initialDelaySeconds: 60 + periodSeconds: 30 + readinessProbe: + httpGet: + path: /api/health + port: http + initialDelaySeconds: 30 + periodSeconds: 10 + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumes: + - name: datasources + configMap: + name: grafana-datasources + - name: dashboard-providers + configMap: + name: grafana-dashboard-providers + - name: dashboards + configMap: + name: grafana-dashboards + {{- if .Values.persistence.enabled }} + - name: storage + persistentVolumeClaim: + claimName: grafana-storage + {{- end }} + \ No newline at end of file diff --git a/kubernetes/charts/Grafana/templates/pvc-grafana.yaml b/kubernetes/charts/Grafana/templates/pvc-grafana.yaml new file mode 100644 index 0000000..23b6f2e --- /dev/null +++ b/kubernetes/charts/Grafana/templates/pvc-grafana.yaml @@ -0,0 +1,17 @@ +{{- if .Values.persistence.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: grafana-storage + labels: + app: grafana +spec: + accessModes: + - {{ .Values.persistence.accessMode }} + {{- if .Values.persistence.storageClass }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Grafana/templates/secret.yaml b/kubernetes/charts/Grafana/templates/secret.yaml new file mode 100644 index 0000000..f1748ad --- /dev/null +++ b/kubernetes/charts/Grafana/templates/secret.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: grafana-secrets + labels: + app: "{{ .Values.release_name }}" +type: Opaque +stringData: + GF_SECURITY_ADMIN_USER: "" + GF_SECURITY_ADMIN_PASSWORD: "" diff --git a/kubernetes/charts/Grafana/templates/service-grafana.yaml b/kubernetes/charts/Grafana/templates/service-grafana.yaml new file mode 100644 index 0000000..84ff6d0 --- /dev/null +++ b/kubernetes/charts/Grafana/templates/service-grafana.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.release_name }} + labels: + app: {{ .Values.release_name }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: http + selector: + app: {{ .Values.release_name }} \ No newline at end of file diff --git a/kubernetes/charts/Grafana/values.yaml b/kubernetes/charts/Grafana/values.yaml new file mode 100644 index 0000000..bd0d08c --- /dev/null +++ b/kubernetes/charts/Grafana/values.yaml @@ -0,0 +1,65 @@ +replicas: 1 + +release_name: "grafana" + +image: + repository: grafana/grafana + pullPolicy: IfNotPresent + tag: "10.0.0" + +nameOverride: "" +fullnameOverride: "" + +port: 3000 + +service: + type: ClusterIP + port: 4005 + targetPort: 3000 + +persistence: + enabled: true + storageClass: "" + accessMode: ReadWriteOnce + size: 5Gi + +resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 250m + memory: 256Mi + +# Admin configuration +admin: + enabled: true + +# Datasources configuration +datasources: + - name: Loki + type: loki + url: http://loki:3100 + access: proxy + isDefault: true + +# Dashboard providers +dashboardProviders: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: false + updateIntervalSeconds: 10 + allowUiUpdates: true + options: + path: /var/lib/grafana/dashboards + +# Environment variables (non-sensitive) +env: + GF_USERS_ALLOW_SIGN_UP: "false" + +# Reference to Kubernetes Secret +envFrom: + - secretRef: + name: grafana-secrets diff --git a/kubernetes/charts/LLM-Orchestration-Service/Chart.yaml b/kubernetes/charts/LLM-Orchestration-Service/Chart.yaml new file mode 100644 index 0000000..1be8ea8 --- /dev/null +++ b/kubernetes/charts/LLM-Orchestration-Service/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: LLM-Orchestration-Service +description: LLM Orchestration Service for RAG Module +version: 0.1.0 +appVersion: "1.0.0" +type: application \ No newline at end of file diff --git a/kubernetes/charts/LLM-Orchestration-Service/templates/deployment-byk-llm-orchestration.yaml b/kubernetes/charts/LLM-Orchestration-Service/templates/deployment-byk-llm-orchestration.yaml new file mode 100644 index 0000000..4a6013f --- /dev/null +++ b/kubernetes/charts/LLM-Orchestration-Service/templates/deployment-byk-llm-orchestration.yaml @@ -0,0 +1,166 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: llm-orchestration +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + component: llm-orchestration + spec: + {{- if .Values.initContainer.enabled }} + initContainers: + - name: volume-init + image: "{{ .Values.initContainer.image.repository }}:{{ .Values.initContainer.image.tag }}" + command: + - sh + - -c + - | + echo "Initializing runtime volumes..." + + # Initialize config volume if empty + if [ ! -d "{{ .Values.volumes.config.mountPath }}" ] || [ -z "$(ls -A {{ .Values.volumes.config.mountPath }})" ]; then + echo "Creating config directory structure..." + mkdir -p {{ .Values.volumes.config.mountPath }} + # Generate initial config files here + # This is where your app would create its runtime config + echo "Config volume initialized" + fi + + # Initialize optimization volume if empty + if [ ! -d "{{ .Values.volumes.optimization.mountPath }}" ] || [ -z "$(ls -A {{ .Values.volumes.optimization.mountPath }})" ]; then + echo "Creating optimization modules directory structure..." + mkdir -p {{ .Values.volumes.optimization.mountPath }} + # This is where your app would create its optimized modules + echo "Optimization volume initialized" + fi + + # Set proper permissions + chmod -R 755 {{ .Values.volumes.config.mountPath }} || true + chmod -R 755 {{ .Values.volumes.optimization.mountPath }} || true + + echo "Volume initialization complete" + volumeMounts: + {{- if .Values.volumes.config.enabled }} + - name: config-volume + mountPath: {{ .Values.volumes.config.mountPath }} + {{- end }} + {{- if .Values.volumes.optimization.enabled }} + - name: optimization-volume + mountPath: {{ .Values.volumes.optimization.mountPath }} + {{- end }} + {{- end }} + + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.llmOrchestration.repository }}:{{ .Values.images.llmOrchestration.tag }}" + imagePullPolicy: {{ .Values.images.llmOrchestration.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.targetPort }} + protocol: TCP + env: + - name: ENVIRONMENT + value: "{{ .Values.app.environment }}" + - name: PORT + value: "{{ .Values.service.targetPort }}" + + # Vault configuration + {{- if .Values.vault.enabled }} + - name: VAULT_ADDR + value: "{{ .Values.vault.addr }}" + - name: VAULT_TOKEN + value: "{{ .Values.vault.tokenPath }}" + {{- end }} + + # Additional environment variables from values + {{- range $key, $value := .Values.env }} + - name: {{ $key }} + value: "{{ $value }}" + {{- end }} + + {{- if .Values.healthcheck.enabled }} + livenessProbe: + httpGet: + path: "{{ .Values.healthcheck.httpPath }}" + port: {{ .Values.service.targetPort }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + readinessProbe: + httpGet: + path: "{{ .Values.healthcheck.readinessPath | default .Values.healthcheck.httpPath }}" + port: {{ .Values.service.targetPort }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + {{- end }} + + volumeMounts: + # Runtime-generated config volume + {{- if .Values.volumes.config.enabled }} + - name: config-volume + mountPath: {{ .Values.volumes.config.mountPath }} + {{- end }} + # Runtime-generated optimization modules + {{- if .Values.volumes.optimization.enabled }} + - name: optimization-volume + mountPath: {{ .Values.volumes.optimization.mountPath }} + {{- end }} + # Persistent logs + {{- if .Values.volumes.logs.enabled }} + - name: logs-volume + mountPath: {{ .Values.volumes.logs.mountPath }} + {{- end }} + # Vault token (from agent) + {{- if and .Values.vault.enabled .Values.volumes.vaultToken.enabled }} + - name: vault-token + mountPath: {{ .Values.volumes.vaultToken.mountPath }} + readOnly: true + {{- end }} + + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + + volumes: + # Runtime-generated config volume (PVC) + {{- if .Values.volumes.config.enabled }} + - name: config-volume + persistentVolumeClaim: + claimName: "{{ .Values.release_name }}-config" + {{- end }} + # Runtime-generated optimization volume (PVC) + {{- if .Values.volumes.optimization.enabled }} + - name: optimization-volume + persistentVolumeClaim: + claimName: "{{ .Values.release_name }}-optimization" + {{- end }} + # Persistent logs (PVC) + {{- if .Values.volumes.logs.enabled }} + - name: logs-volume + persistentVolumeClaim: + claimName: "{{ .Values.release_name }}-logs" + {{- end }} + # Vault token (shared PVC with vault-agent-llm) + {{- if and .Values.vault.enabled .Values.volumes.vaultToken.enabled }} + - name: vault-token + persistentVolumeClaim: + claimName: vault-agent-token + {{- end }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/LLM-Orchestration-Service/templates/pvc-volumes.yaml b/kubernetes/charts/LLM-Orchestration-Service/templates/pvc-volumes.yaml new file mode 100644 index 0000000..f2be2c3 --- /dev/null +++ b/kubernetes/charts/LLM-Orchestration-Service/templates/pvc-volumes.yaml @@ -0,0 +1,61 @@ +{{- if and .Values.enabled .Values.volumes.logs.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "{{ .Values.release_name }}-logs" + labels: + app: "{{ .Values.release_name }}" + component: llm-orchestration + type: logs +spec: + accessModes: + - {{ .Values.volumes.logs.accessMode }} + resources: + requests: + storage: {{ .Values.volumes.logs.size }} + {{- if .Values.volumes.logs.storageClass }} + storageClassName: {{ .Values.volumes.logs.storageClass }} + {{- end }} +{{- end }} + +--- +{{- if and .Values.enabled .Values.volumes.config.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "{{ .Values.release_name }}-config" + labels: + app: "{{ .Values.release_name }}" + component: llm-orchestration + type: config +spec: + accessModes: + - {{ .Values.volumes.config.accessMode }} + resources: + requests: + storage: {{ .Values.volumes.config.size }} + {{- if .Values.volumes.config.storageClass }} + storageClassName: {{ .Values.volumes.config.storageClass }} + {{- end }} +{{- end }} + +--- +{{- if and .Values.enabled .Values.volumes.optimization.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "{{ .Values.release_name }}-optimization" + labels: + app: "{{ .Values.release_name }}" + component: llm-orchestration + type: optimization +spec: + accessModes: + - {{ .Values.volumes.optimization.accessMode }} + resources: + requests: + storage: {{ .Values.volumes.optimization.size }} + {{- if .Values.volumes.optimization.storageClass }} + storageClassName: {{ .Values.volumes.optimization.storageClass }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/LLM-Orchestration-Service/templates/service-byk-llm-orchestration.yaml b/kubernetes/charts/LLM-Orchestration-Service/templates/service-byk-llm-orchestration.yaml new file mode 100644 index 0000000..63b9bb6 --- /dev/null +++ b/kubernetes/charts/LLM-Orchestration-Service/templates/service-byk-llm-orchestration.yaml @@ -0,0 +1,18 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: llm-orchestration +spec: + type: {{ .Values.service.type }} + selector: + app: "{{ .Values.release_name }}" + ports: + - name: http + protocol: TCP + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/LLM-Orchestration-Service/values.yaml b/kubernetes/charts/LLM-Orchestration-Service/values.yaml new file mode 100644 index 0000000..b64723e --- /dev/null +++ b/kubernetes/charts/LLM-Orchestration-Service/values.yaml @@ -0,0 +1,86 @@ +replicas: 1 +enabled: true + +images: + llmOrchestration: + repository: "ghcr.io/buerokratt/llm-orchestration-service" # Update with actual llm-orchestration image repository + tag: "latest" + pullPolicy: "IfNotPresent" + +release_name: "llm-orchestration-service" + +service: + type: ClusterIP + port: 8100 + targetPort: 8100 + +app: + environment: "production" + +# Volume configurations +volumes: + # Runtime-generated config volume (managed by InitContainer + PVC) + config: + enabled: true + mountPath: "/app/src/llm_config_module/config" + size: "1Gi" + accessMode: "ReadWriteOnce" + storageClass: "" + + # Runtime-generated optimization modules (managed by InitContainer + PVC) + optimization: + enabled: true + mountPath: "/app/src/optimization/optimized_modules" + size: "5Gi" + accessMode: "ReadWriteOnce" + storageClass: "" + + # Logs volume (persistent) + logs: + enabled: true + mountPath: "/app/logs" + size: "5Gi" + accessMode: "ReadWriteOnce" + storageClass: "" + + # Vault agent token volume (emptyDir - managed by sidecar) + vaultToken: + enabled: true + mountPath: "/agent/out" + +# InitContainer configuration for runtime volume preparation +initContainer: + enabled: true + image: + repository: "ghcr.io/buerokratt/llm-orchestration-service" # Update with actual llm-orchestration image repository + tag: "latest" + # InitContainer will prepare the runtime volumes + prepareVolumes: true + +env: + ENVIRONMENT: "production" + +vault: + enabled: true + addr: "http://vault:8200" + tokenPath: "/agent/out/token" + +resources: + requests: + memory: "512Mi" + cpu: "200m" + limits: + memory: "2Gi" + cpu: "1000m" + +healthcheck: + enabled: false + initialDelaySeconds: 40 + periodSeconds: 30 + timeoutSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + # LLM orchestration health endpoint + httpPath: "/health" + # Additional readiness checks + readinessPath: "/ready" diff --git a/kubernetes/charts/Langfuse-Web/Chart.yaml b/kubernetes/charts/Langfuse-Web/Chart.yaml new file mode 100644 index 0000000..041da91 --- /dev/null +++ b/kubernetes/charts/Langfuse-Web/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: Langfuse-Web +description: Langfuse web interface and API for LLM observability +type: application +version: 0.1.0 +appVersion: "3" \ No newline at end of file diff --git a/kubernetes/charts/Langfuse-Web/templates/deployment-byk-langfuse-web.yaml b/kubernetes/charts/Langfuse-Web/templates/deployment-byk-langfuse-web.yaml new file mode 100644 index 0000000..403e253 --- /dev/null +++ b/kubernetes/charts/Langfuse-Web/templates/deployment-byk-langfuse-web.yaml @@ -0,0 +1,65 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: langfuse-web +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + component: langfuse-web + spec: + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.langfuse_web.registry }}/{{ .Values.images.langfuse_web.repository }}:{{ .Values.images.langfuse_web.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.targetPort }} + protocol: TCP + # Non-sensitive env's from values.yaml + env: + {{- range $key, $value := .Values.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + # Sensitive env's from Kubernetes Secret + {{- if .Values.envFrom }} + envFrom: + {{- toYaml .Values.envFrom | nindent 12 }} + {{- end }} + {{- if .Values.healthcheck.enabled }} + livenessProbe: + httpGet: + path: /api/public/health + port: {{ .Values.service.port }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + readinessProbe: + httpGet: + path: /api/public/health + port: {{ .Values.service.port }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + {{- end }} + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + restartPolicy: Always +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Langfuse-Web/templates/secret.yaml b/kubernetes/charts/Langfuse-Web/templates/secret.yaml new file mode 100644 index 0000000..1c3ae5c --- /dev/null +++ b/kubernetes/charts/Langfuse-Web/templates/secret.yaml @@ -0,0 +1,25 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: langfuse-web-secrets + labels: + app: "{{ .Values.release_name }}" + component: langfuse-web +type: Opaque +stringData: + DATABASE_URL: "" + NEXTAUTH_SECRET: "" + ENCRYPTION_KEY: "" + SALT: "" + CLICKHOUSE_MIGRATION_URL: "" + CLICKHOUSE_USER: "" + CLICKHOUSE_PASSWORD: "" + LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID: "" + LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY: "" + LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID: "" + LANGFUSE_S3_MEDIA_UPLOAD_SECRET_ACCESS_KEY: "" + LANGFUSE_S3_BATCH_EXPORT_ACCESS_KEY_ID: "" + LANGFUSE_S3_BATCH_EXPORT_SECRET_ACCESS_KEY: "" + REDIS_AUTH: "" +{{- end }} diff --git a/kubernetes/charts/Langfuse-Web/templates/service-byk-langfuse-web.yaml b/kubernetes/charts/Langfuse-Web/templates/service-byk-langfuse-web.yaml new file mode 100644 index 0000000..9594b42 --- /dev/null +++ b/kubernetes/charts/Langfuse-Web/templates/service-byk-langfuse-web.yaml @@ -0,0 +1,18 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: langfuse-web +spec: + type: {{ .Values.service.type }} + selector: + app: "{{ .Values.release_name }}" + ports: + - name: http + protocol: TCP + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Langfuse-Web/values.yaml b/kubernetes/charts/Langfuse-Web/values.yaml new file mode 100644 index 0000000..6dfaf1c --- /dev/null +++ b/kubernetes/charts/Langfuse-Web/values.yaml @@ -0,0 +1,97 @@ +replicas: 1 +enabled: true + +images: + langfuse_web: + registry: "docker.io" + repository: "langfuse/langfuse" + tag: "3" + +release_name: "langfuse-web" + +service: + type: ClusterIP + port: 3005 + targetPort: 3000 + +# Environment variables +env: + # Non-sensitive configuration + NEXTAUTH_URL: "http://localhost:3000" + TELEMETRY_ENABLED: "true" + LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES: "true" + + # ClickHouse configuration (non-sensitive) + CLICKHOUSE_URL: "http://clickhouse:8123" + CLICKHOUSE_CLUSTER_ENABLED: "false" + + # S3/MinIO configuration (non-sensitive) + LANGFUSE_USE_AZURE_BLOB: "false" + LANGFUSE_S3_EVENT_UPLOAD_BUCKET: "rag-search" + LANGFUSE_S3_EVENT_UPLOAD_REGION: "auto" + LANGFUSE_S3_EVENT_UPLOAD_ENDPOINT: "http://minio:9000" + LANGFUSE_S3_EVENT_UPLOAD_FORCE_PATH_STYLE: "true" + LANGFUSE_S3_EVENT_UPLOAD_PREFIX: "langfuse/events/" + + LANGFUSE_S3_MEDIA_UPLOAD_BUCKET: "rag-search" + LANGFUSE_S3_MEDIA_UPLOAD_REGION: "auto" + LANGFUSE_S3_MEDIA_UPLOAD_ENDPOINT: "http://minio:9000" + LANGFUSE_S3_MEDIA_UPLOAD_FORCE_PATH_STYLE: "true" + LANGFUSE_S3_MEDIA_UPLOAD_PREFIX: "langfuse/media/" + + LANGFUSE_S3_BATCH_EXPORT_ENABLED: "false" + LANGFUSE_S3_BATCH_EXPORT_BUCKET: "rag-search" + LANGFUSE_S3_BATCH_EXPORT_PREFIX: "langfuse/exports/" + LANGFUSE_S3_BATCH_EXPORT_REGION: "auto" + LANGFUSE_S3_BATCH_EXPORT_ENDPOINT: "http://minio:9000" + LANGFUSE_S3_BATCH_EXPORT_EXTERNAL_ENDPOINT: "http://minio:9000" + LANGFUSE_S3_BATCH_EXPORT_FORCE_PATH_STYLE: "true" + LANGFUSE_INGESTION_QUEUE_DELAY_MS: "" + LANGFUSE_INGESTION_CLICKHOUSE_WRITE_INTERVAL_MS: "" + + # Redis configuration (non-sensitive) + REDIS_HOST: "redis" + REDIS_PORT: "6379" + REDIS_TLS_ENABLED: "false" + REDIS_TLS_CA: "" + REDIS_TLS_CERT: "" + REDIS_TLS_KEY: "" + + # Email configuration + EMAIL_FROM_ADDRESS: "" + SMTP_CONNECTION_URL: "" + + # Langfuse initialization (Web-specific) + LANGFUSE_INIT_ORG_ID: "" + LANGFUSE_INIT_ORG_NAME: "" + LANGFUSE_INIT_PROJECT_ID: "" + LANGFUSE_INIT_PROJECT_NAME: "" + LANGFUSE_INIT_PROJECT_PUBLIC_KEY: "" + LANGFUSE_INIT_PROJECT_SECRET_KEY: "" + LANGFUSE_INIT_USER_EMAIL: "" + LANGFUSE_INIT_USER_NAME: "" + LANGFUSE_INIT_USER_PASSWORD: "" + +# Reference to Kubernetes Secret +envFrom: + - secretRef: + name: langfuse-web-secrets + + + +resources: + requests: + memory: "512Mi" + cpu: "100m" + limits: + memory: "1Gi" + cpu: "500m" + +pullPolicy: IfNotPresent + +healthcheck: + enabled: true + initialDelaySeconds: 60 + periodSeconds: 30 + timeoutSeconds: 10 + failureThreshold: 3 \ No newline at end of file diff --git a/kubernetes/charts/Langfuse-Worker/Chart.yaml b/kubernetes/charts/Langfuse-Worker/Chart.yaml new file mode 100644 index 0000000..4117b9c --- /dev/null +++ b/kubernetes/charts/Langfuse-Worker/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: Langfuse-Worker +description: Langfuse background worker for LLM observability +type: application +version: 0.1.0 +appVersion: "3" \ No newline at end of file diff --git a/kubernetes/charts/Langfuse-Worker/templates/deployment-byk-langfuse-worker.yaml b/kubernetes/charts/Langfuse-Worker/templates/deployment-byk-langfuse-worker.yaml new file mode 100644 index 0000000..1ab3c55 --- /dev/null +++ b/kubernetes/charts/Langfuse-Worker/templates/deployment-byk-langfuse-worker.yaml @@ -0,0 +1,65 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: langfuse-worker +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + component: langfuse-worker + spec: + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.langfuse_worker.registry }}/{{ .Values.images.langfuse_worker.repository }}:{{ .Values.images.langfuse_worker.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + ports: + - name: worker + containerPort: {{ .Values.service.port }} + protocol: TCP + # Non-sensitive env's from values.yaml + env: + {{- range $key, $value := .Values.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + # Sensitive env's from Kubernetes Secret + {{- if .Values.envFrom }} + envFrom: + {{- toYaml .Values.envFrom | nindent 12 }} + {{- end }} + {{- if .Values.healthcheck.enabled }} + livenessProbe: + httpGet: + path: /api/public/health + port: {{ .Values.service.port }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + readinessProbe: + httpGet: + path: /api/public/health + port: {{ .Values.service.port }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + {{- end }} + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + restartPolicy: Always +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Langfuse-Worker/templates/secret.yaml b/kubernetes/charts/Langfuse-Worker/templates/secret.yaml new file mode 100644 index 0000000..d7ec52b --- /dev/null +++ b/kubernetes/charts/Langfuse-Worker/templates/secret.yaml @@ -0,0 +1,24 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: langfuse-worker-secrets + labels: + app: "{{ .Values.release_name }}" + component: langfuse-worker +type: Opaque +stringData: + DATABASE_URL: "" + ENCRYPTION_KEY: "" + SALT: "" + CLICKHOUSE_MIGRATION_URL: "" + CLICKHOUSE_USER: "" + CLICKHOUSE_PASSWORD: "" + LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID: "" + LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY: "" + LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID: "" + LANGFUSE_S3_MEDIA_UPLOAD_SECRET_ACCESS_KEY: "" + LANGFUSE_S3_BATCH_EXPORT_ACCESS_KEY_ID: "" + LANGFUSE_S3_BATCH_EXPORT_SECRET_ACCESS_KEY: "" + REDIS_AUTH: "" +{{- end }} diff --git a/kubernetes/charts/Langfuse-Worker/templates/service-byk-langfuse-worker.yaml b/kubernetes/charts/Langfuse-Worker/templates/service-byk-langfuse-worker.yaml new file mode 100644 index 0000000..da32c5c --- /dev/null +++ b/kubernetes/charts/Langfuse-Worker/templates/service-byk-langfuse-worker.yaml @@ -0,0 +1,18 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: langfuse-worker +spec: + type: {{ .Values.service.type }} + selector: + app: "{{ .Values.release_name }}" + ports: + - name: worker + protocol: TCP + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Langfuse-Worker/values.yaml b/kubernetes/charts/Langfuse-Worker/values.yaml new file mode 100644 index 0000000..0a7343e --- /dev/null +++ b/kubernetes/charts/Langfuse-Worker/values.yaml @@ -0,0 +1,84 @@ +replicas: 1 +enabled: true + +images: + langfuse_worker: + registry: "docker.io" + repository: "langfuse/langfuse-worker" + tag: "3" + +release_name: "langfuse-worker" + +service: + type: ClusterIP + port: 3030 + +# Environment variables +env: + # Non-sensitive configuration + NEXTAUTH_URL: "http://localhost:3000" + TELEMETRY_ENABLED: "true" + LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES: "true" + + # ClickHouse configuration (non-sensitive) + CLICKHOUSE_URL: "http://clickhouse:8123" + CLICKHOUSE_CLUSTER_ENABLED: "false" + + # S3/MinIO configuration (non-sensitive) + LANGFUSE_USE_AZURE_BLOB: "false" + LANGFUSE_S3_EVENT_UPLOAD_BUCKET: "rag-search" + LANGFUSE_S3_EVENT_UPLOAD_REGION: "auto" + LANGFUSE_S3_EVENT_UPLOAD_ENDPOINT: "http://minio:9000" + LANGFUSE_S3_EVENT_UPLOAD_FORCE_PATH_STYLE: "true" + LANGFUSE_S3_EVENT_UPLOAD_PREFIX: "langfuse/events/" + + LANGFUSE_S3_MEDIA_UPLOAD_BUCKET: "rag-search" + LANGFUSE_S3_MEDIA_UPLOAD_REGION: "auto" + LANGFUSE_S3_MEDIA_UPLOAD_ENDPOINT: "http://minio:9000" + LANGFUSE_S3_MEDIA_UPLOAD_FORCE_PATH_STYLE: "true" + LANGFUSE_S3_MEDIA_UPLOAD_PREFIX: "langfuse/media/" + + LANGFUSE_S3_BATCH_EXPORT_ENABLED: "false" + LANGFUSE_S3_BATCH_EXPORT_BUCKET: "rag-search" + LANGFUSE_S3_BATCH_EXPORT_PREFIX: "langfuse/exports/" + LANGFUSE_S3_BATCH_EXPORT_REGION: "auto" + LANGFUSE_S3_BATCH_EXPORT_ENDPOINT: "http://minio:9000" + LANGFUSE_S3_BATCH_EXPORT_EXTERNAL_ENDPOINT: "http://minio:9000" + LANGFUSE_S3_BATCH_EXPORT_FORCE_PATH_STYLE: "true" + LANGFUSE_INGESTION_QUEUE_DELAY_MS: "" + LANGFUSE_INGESTION_CLICKHOUSE_WRITE_INTERVAL_MS: "" + + # Redis configuration (non-sensitive) + REDIS_HOST: "redis" + REDIS_PORT: "6379" + REDIS_TLS_ENABLED: "false" + REDIS_TLS_CA: "" + REDIS_TLS_CERT: "" + REDIS_TLS_KEY: "" + + # Email configuration + EMAIL_FROM_ADDRESS: "" + SMTP_CONNECTION_URL: "" + +# Reference to Kubernetes Secret +# Sensitive credentials should be set in templates/secret.yaml before deployment +envFrom: + - secretRef: + name: langfuse-worker-secrets + +resources: + requests: + memory: "512Mi" + cpu: "100m" + limits: + memory: "2Gi" + cpu: "500m" + +pullPolicy: IfNotPresent + +healthcheck: + enabled: true + initialDelaySeconds: 60 + periodSeconds: 30 + timeoutSeconds: 10 + failureThreshold: 3 \ No newline at end of file diff --git a/kubernetes/charts/Liquibase/Chart.yaml b/kubernetes/charts/Liquibase/Chart.yaml new file mode 100644 index 0000000..78f3d45 --- /dev/null +++ b/kubernetes/charts/Liquibase/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: Liquibase +description: A Helm chart for Liquibase for database migrations +type: application +version: 0.1.0 +appVersion: "1.0" \ No newline at end of file diff --git a/kubernetes/charts/Liquibase/templates/liquibase-job.yaml b/kubernetes/charts/Liquibase/templates/liquibase-job.yaml new file mode 100644 index 0000000..3ec6bea --- /dev/null +++ b/kubernetes/charts/Liquibase/templates/liquibase-job.yaml @@ -0,0 +1,74 @@ +{{- if .Values.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + backoffLimit: {{ .Values.backoffLimit }} + template: + metadata: + annotations: + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + app: "{{ .Values.release_name }}" + spec: + restartPolicy: OnFailure + volumes: + - name: liquibase-repo + emptyDir: {} + initContainers: + - name: git-clone + image: alpine/git:latest + volumeMounts: + - name: liquibase-repo + mountPath: /liquibase-files + command: + - sh + - -c + - | + git clone --single-branch --depth 1 --branch wip https://github.com/rootcodelabs/RAG-Module /tmp/rag && + + cp -r /tmp/rag/DSL/Liquibase/* /liquibase-files + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.scope.repository }}:{{ .Values.images.scope.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + env: + {{- range .Values.env }} + - name: {{ .name }} + value: "{{ .value }}" + {{- end }} + # Sensitive env's from Kubernetes Secret + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: liquibase-secrets + key: POSTGRES_USER + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: liquibase-secrets + key: POSTGRES_PASSWORD + + volumeMounts: + - name: liquibase-repo + mountPath: /liquibase-files + command: ["/bin/sh", "-c"] + args: + - | + echo "--- Listing files in /liquibase-files ---" + ls -R /liquibase-files + cd /liquibase-files + echo "--- Now running Liquibase ---" + liquibase \ + --changeLogFile=/master.yml \ + --url=jdbc:postgresql://rag-search-db:5432/rag-search \ + --username=postgres \ + --password=dbadmin \ + update + +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Liquibase/templates/secret.yaml b/kubernetes/charts/Liquibase/templates/secret.yaml new file mode 100644 index 0000000..90b0eb7 --- /dev/null +++ b/kubernetes/charts/Liquibase/templates/secret.yaml @@ -0,0 +1,12 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: liquibase-secrets + labels: + app: "{{ .Values.release_name }}" +type: Opaque +stringData: + POSTGRES_USER: "" + POSTGRES_PASSWORD: "" +{{- end }} diff --git a/kubernetes/charts/Liquibase/values.yaml b/kubernetes/charts/Liquibase/values.yaml new file mode 100644 index 0000000..3d68b49 --- /dev/null +++ b/kubernetes/charts/Liquibase/values.yaml @@ -0,0 +1,21 @@ +enabled: true +release_name: "component-byk-liquibase" +backoffLimit: 3 + +images: + scope: + repository: "liquibase/liquibase" + tag: "4.33.0" + +env: + - name: LIQUIBASE_URL + value: "jdbc:postgresql://rag-search-db:5432/rag-search" + - name: LIQUIBASE_CHANGELOG_FILE + value: /master.yml + + + +pullPolicy: IfNotPresent + +podAnnotations: + dsl-checksum: "211bdc77c12b" \ No newline at end of file diff --git a/kubernetes/charts/Loki/Chart.yaml b/kubernetes/charts/Loki/Chart.yaml new file mode 100644 index 0000000..570e167 --- /dev/null +++ b/kubernetes/charts/Loki/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: Loki +description: A Helm chart for Loki +type: application +version: 0.1.0 +appVersion: "2.9.0" \ No newline at end of file diff --git a/kubernetes/charts/Loki/templates/configmap-loki.yaml b/kubernetes/charts/Loki/templates/configmap-loki.yaml new file mode 100644 index 0000000..ebee18b --- /dev/null +++ b/kubernetes/charts/Loki/templates/configmap-loki.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: loki-config + labels: + app: loki +data: + loki.yaml: | +{{ .Values.config | toYaml | indent 4 }} \ No newline at end of file diff --git a/kubernetes/charts/Loki/templates/deployment-loki.yaml b/kubernetes/charts/Loki/templates/deployment-loki.yaml new file mode 100644 index 0000000..7967b8a --- /dev/null +++ b/kubernetes/charts/Loki/templates/deployment-loki.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Values.release_name }} + labels: + app: {{ .Values.release_name }} +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: {{ .Values.release_name }} + template: + metadata: + labels: + app: {{ .Values.release_name }} + spec: + containers: + - name: {{ .Values.release_name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag}}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.port }} + protocol: TCP + volumeMounts: + - name: config + mountPath: /etc/loki/local-config.yaml + {{- if .Values.persistence.enabled }} + - name: storage + mountPath: /loki + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumes: + - name: config + configMap: + name: loki-config + {{- if .Values.persistence.enabled }} + - name: storage + persistentVolumeClaim: + claimName: loki-storage + {{- end }} + \ No newline at end of file diff --git a/kubernetes/charts/Loki/templates/pvc-loki.yaml b/kubernetes/charts/Loki/templates/pvc-loki.yaml new file mode 100644 index 0000000..5d505a5 --- /dev/null +++ b/kubernetes/charts/Loki/templates/pvc-loki.yaml @@ -0,0 +1,17 @@ +{{- if .Values.persistence.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: loki-storage + labels: + app: loki +spec: + accessModes: + - {{ .Values.persistence.accessMode }} + {{- if .Values.persistence.storageClass }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Loki/templates/service-loki.yaml b/kubernetes/charts/Loki/templates/service-loki.yaml new file mode 100644 index 0000000..8415837 --- /dev/null +++ b/kubernetes/charts/Loki/templates/service-loki.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.release_name }} + labels: + app: {{ .Values.release_name }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: http + selector: + app: {{ .Values.release_name }} + + \ No newline at end of file diff --git a/kubernetes/charts/Loki/values.yaml b/kubernetes/charts/Loki/values.yaml new file mode 100644 index 0000000..1b059e4 --- /dev/null +++ b/kubernetes/charts/Loki/values.yaml @@ -0,0 +1,85 @@ +replicas: 1 + +release_name: "loki" + +image: + repository: grafana/loki + pullPolicy: IfNotPresent + tag: "2.9.0" + +nameOverride: "" +fullnameOverride: "" + +port: 3100 + +service: + type: ClusterIP + port: 3100 + targetPort: 3100 + +persistence: + enabled: true + storageClass: "" + accessMode: ReadWriteOnce + size: 10Gi + +resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 250m + memory: 256Mi + +# Loki configuration - will be mounted as ConfigMap +config: + auth_enabled: false + + server: + http_listen_port: 3100 + grpc_listen_port: 9096 + + common: + path_prefix: /loki + storage: + filesystem: + chunks_directory: /loki/chunks + rules_directory: /loki/rules + replication_factor: 1 + ring: + instance_addr: 127.0.0.1 + kvstore: + store: inmemory + + query_range: + results_cache: + cache: + embedded_cache: + enabled: true + max_size_mb: 100 + + schema_config: + configs: + - from: 2020-10-24 + store: boltdb-shipper + object_store: filesystem + schema: v11 + index: + prefix: index_ + period: 24h + + ruler: + alertmanager_url: http://localhost:9093 + +# By default, Loki will send anonymous, but uniquely-identifiable usage and configuration +# analytics to Grafana Labs. These statistics are sent to https://stats.grafana.org/ +# +# Statistics help us better understand how Loki is used, and they show us performance +# levels for most users. This helps us prioritize features and documentation. +# For more information on what's sent, look at +# https://github.com/grafana/loki/blob/main/pkg/usagestats/stats.go +# Refer to the buildReport method to see what goes into a report. +# +# If you would like to disable reporting, uncomment the following lines: + analytics: + reporting_enabled: false diff --git a/kubernetes/charts/Qdrant/Chart.yaml b/kubernetes/charts/Qdrant/Chart.yaml new file mode 100644 index 0000000..ec80635 --- /dev/null +++ b/kubernetes/charts/Qdrant/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: Qdrant +description: Qdrant vector database for RAG +type: application +version: 0.1.0 +appVersion: "v1.15.1" \ No newline at end of file diff --git a/kubernetes/charts/Qdrant/templates/service-byk-qdrant.yaml b/kubernetes/charts/Qdrant/templates/service-byk-qdrant.yaml new file mode 100644 index 0000000..e0c0e4c --- /dev/null +++ b/kubernetes/charts/Qdrant/templates/service-byk-qdrant.yaml @@ -0,0 +1,31 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: qdrant +spec: + type: {{ .Values.service.type }} + {{- if eq .Values.service.type "ClusterIP" }} + {{- if .Values.service.headless }} + clusterIP: None + {{- end }} + {{- end }} + selector: + app: "{{ .Values.release_name }}" + ports: + - name: http + protocol: TCP + port: {{ .Values.service.httpPort }} + targetPort: {{ .Values.service.httpPort }} + - name: grpc + protocol: TCP + port: {{ .Values.service.grpcPort }} + targetPort: {{ .Values.service.grpcPort }} + - name: metrics + protocol: TCP + port: {{ .Values.service.metricsPort }} + targetPort: {{ .Values.service.metricsPort }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Qdrant/templates/statefulset-byk-qdrant.yaml b/kubernetes/charts/Qdrant/templates/statefulset-byk-qdrant.yaml new file mode 100644 index 0000000..13d81cb --- /dev/null +++ b/kubernetes/charts/Qdrant/templates/statefulset-byk-qdrant.yaml @@ -0,0 +1,82 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: qdrant +spec: + serviceName: "{{ .Values.release_name }}" + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + component: qdrant + spec: + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.qdrant.registry }}/{{ .Values.images.qdrant.repository }}:{{ .Values.images.qdrant.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.httpPort }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.service.grpcPort }} + protocol: TCP + - name: metrics + containerPort: {{ .Values.service.metricsPort }} + protocol: TCP + {{- if .Values.healthcheck.enabled }} + livenessProbe: + httpGet: + path: "{{ .Values.healthcheck.httpPath }}" + port: {{ .Values.service.httpPort }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + readinessProbe: + httpGet: + path: "{{ .Values.healthcheck.httpPath }}" + port: {{ .Values.service.httpPort }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + {{- end }} + {{- if .Values.persistence.enabled }} + volumeMounts: + - name: qdrant-storage + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + {{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: qdrant-storage + labels: + app: "{{ .Values.release_name }}" + component: qdrant + spec: + accessModes: + - {{ .Values.persistence.accessMode }} + resources: + requests: + storage: {{ .Values.persistence.size }} + {{- if .Values.persistence.storageClass }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Qdrant/values.yaml b/kubernetes/charts/Qdrant/values.yaml new file mode 100644 index 0000000..4a83496 --- /dev/null +++ b/kubernetes/charts/Qdrant/values.yaml @@ -0,0 +1,50 @@ +replicas: 1 +enabled: true + +images: + qdrant: + registry: "docker.io" + repository: "qdrant/qdrant" + tag: "v1.15.1" + +release_name: "qdrant" + +service: + type: ClusterIP + # Set to true for headless service (direct pod access) + headless: false + # Qdrant HTTP API port + httpPort: 6333 + # Qdrant gRPC API port + grpcPort: 6334 + # Internal metrics port + metricsPort: 6335 + +persistence: + enabled: true + storageClass: "" + accessMode: ReadWriteOnce + size: 20Gi + mountPath: "/qdrant/storage" + +resources: + requests: + memory: "512Mi" + cpu: "100m" + limits: + memory: "2Gi" + cpu: "1000m" + +pullPolicy: IfNotPresent + +healthcheck: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + # HTTP endpoint for health check + httpPath: "/collections" + + diff --git a/kubernetes/charts/Redis/Chart.yaml b/kubernetes/charts/Redis/Chart.yaml new file mode 100644 index 0000000..cc5354e --- /dev/null +++ b/kubernetes/charts/Redis/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: Redis +description: Redis cache and session store for RAG +type: application +version: 0.1.0 +appVersion: "7" \ No newline at end of file diff --git a/kubernetes/charts/Redis/templates/deployment-byk-redis.yaml b/kubernetes/charts/Redis/templates/deployment-byk-redis.yaml new file mode 100644 index 0000000..f60b6d6 --- /dev/null +++ b/kubernetes/charts/Redis/templates/deployment-byk-redis.yaml @@ -0,0 +1,68 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: redis +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + component: redis + spec: + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.redis.registry }}/{{ .Values.images.redis.repository }}:{{ .Values.images.redis.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + ports: + - name: redis + containerPort: {{ .Values.service.port }} + protocol: TCP + {{- if .Values.auth.enabled }} + command: + - redis-server + - --requirepass + - $(REDIS_PASSWORD) + {{- end }} + # Sensitive env's from Kubernetes Secret + {{- if .Values.envFrom }} + envFrom: + {{- toYaml .Values.envFrom | nindent 12 }} + {{- end }} + {{- if .Values.healthcheck.enabled }} + livenessProbe: + exec: + command: + - redis-cli + - ping + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + readinessProbe: + exec: + command: + - redis-cli + - ping + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + {{- end }} + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + + restartPolicy: Always +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Redis/templates/secret.yaml b/kubernetes/charts/Redis/templates/secret.yaml new file mode 100644 index 0000000..27ad056 --- /dev/null +++ b/kubernetes/charts/Redis/templates/secret.yaml @@ -0,0 +1,12 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: redis-secrets + labels: + app: "{{ .Values.release_name }}" + component: redis +type: Opaque +stringData: + REDIS_PASSWORD: "" +{{- end }} diff --git a/kubernetes/charts/Redis/templates/service-byk-redis.yaml b/kubernetes/charts/Redis/templates/service-byk-redis.yaml new file mode 100644 index 0000000..a030f5a --- /dev/null +++ b/kubernetes/charts/Redis/templates/service-byk-redis.yaml @@ -0,0 +1,18 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: redis +spec: + type: {{ .Values.service.type }} + selector: + app: "{{ .Values.release_name }}" + ports: + - name: redis + protocol: TCP + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Redis/values.yaml b/kubernetes/charts/Redis/values.yaml new file mode 100644 index 0000000..784f8c1 --- /dev/null +++ b/kubernetes/charts/Redis/values.yaml @@ -0,0 +1,41 @@ +replicas: 1 +enabled: true + +images: + redis: + registry: "docker.io" + repository: "redis" + tag: "7" + +release_name: "redis" + +service: + type: ClusterIP + port: 6379 + +auth: + enabled: true + +# Reference to Kubernetes Secret +envFrom: + - secretRef: + name: redis-secrets + +# Resource configuration +resources: + requests: + memory: "128Mi" + cpu: "50m" + limits: + memory: "512Mi" + cpu: "200m" + +pullPolicy: IfNotPresent + +healthcheck: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 \ No newline at end of file diff --git a/kubernetes/charts/Resql/Chart.yaml b/kubernetes/charts/Resql/Chart.yaml new file mode 100644 index 0000000..2de36f8 --- /dev/null +++ b/kubernetes/charts/Resql/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: resql +description: Database abstraction layer for RAG +type: application +version: 0.1.0 +appVersion: "1.0" \ No newline at end of file diff --git a/kubernetes/charts/Resql/templates/deployment-byk-resql.yaml b/kubernetes/charts/Resql/templates/deployment-byk-resql.yaml new file mode 100644 index 0000000..c44dc30 --- /dev/null +++ b/kubernetes/charts/Resql/templates/deployment-byk-resql.yaml @@ -0,0 +1,76 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + annotations: + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + app: "{{ .Values.release_name }}" + spec: + volumes: + - name: dsl + emptyDir: {} + initContainers: + - name: git-clone-dsl + image: alpine/git:latest + volumeMounts: + - name: dsl + mountPath: /DSL + command: + - sh + - -c + - | + git clone --single-branch --depth 1 --branch wip \ + https://github.com/rootcodelabs/RAG-Module /tmp/rag && + + cp -r /tmp/rag/DSL/Resql/* /DSL/ + + + + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.resql.registry }}/{{ .Values.images.resql.repository }}:{{ .Values.images.resql.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + env: + - name: logging.level.root + value: "{{ .Values.env.LOGGING_LEVEL_ROOT }}" + - name: SQLMS_DATASOURCES_0_NAME + value: "{{ .Values.env.SQLMS_DATASOURCES_0_NAME }}" + - name: SQLMS_DATASOURCES_0_JDBCURL + value: "{{ .Values.env.SQLMS_DATASOURCES_0_JDBCURL }}" + - name: SQLMS_DATASOURCES_0_USERNAME + value: "{{ .Values.env.SQLMS_DATASOURCES_0_USERNAME }}" + # Sensitive env from Kubernetes Secret + - name: SQLMS_DATASOURCES_0_PASSWORD + valueFrom: + secretKeyRef: + name: resql-secrets + key: SQLMS_DATASOURCES_0_PASSWORD + - name: LOGGING_LEVEL_ORG_SPRINGFRAMEWORK_BOOT + value: "{{ .Values.env.LOGGING_LEVEL_ORG_SPRINGFRAMEWORK_BOOT }}" + - name: SQLMS_SAVED_QUERIES_DIR + value: "/DSL" + volumeMounts: + - name: dsl + mountPath: /DSL + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + +{{- end }} diff --git a/kubernetes/charts/Resql/templates/secret.yaml b/kubernetes/charts/Resql/templates/secret.yaml new file mode 100644 index 0000000..335257b --- /dev/null +++ b/kubernetes/charts/Resql/templates/secret.yaml @@ -0,0 +1,11 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: resql-secrets + labels: + app: "{{ .Values.release_name }}" +type: Opaque +stringData: + SQLMS_DATASOURCES_0_PASSWORD: "" +{{- end }} diff --git a/kubernetes/charts/Resql/templates/service-byk-resql.yaml b/kubernetes/charts/Resql/templates/service-byk-resql.yaml new file mode 100644 index 0000000..3312d10 --- /dev/null +++ b/kubernetes/charts/Resql/templates/service-byk-resql.yaml @@ -0,0 +1,14 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" +spec: + type: {{ .Values.service.type }} + selector: + app: "{{ .Values.release_name }}" + ports: + - protocol: TCP + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Resql/values.yaml b/kubernetes/charts/Resql/values.yaml new file mode 100644 index 0000000..c49d1ba --- /dev/null +++ b/kubernetes/charts/Resql/values.yaml @@ -0,0 +1,34 @@ +replicas: 1 +enabled: true +images: + resql: + registry: "ghcr.io" + repository: "buerokratt/resql" + tag: "v1.3.4" + +release_name: "resql" + +service: + type: ClusterIP + port: 8082 + +env: + LOGGING_LEVEL_ROOT: "INFO" + SQLMS_DATASOURCES_0_NAME: "byk" + SQLMS_DATASOURCES_0_JDBCURL: "jdbc:postgresql://rag-search-db:5432/rag-search" + SQLMS_DATASOURCES_0_USERNAME: "postgres" + LOGGING_LEVEL_ORG_SPRINGFRAMEWORK_BOOT: "INFO" + JAVA_OPTS: "-Xms1g -Xmx3g" + +resources: + requests: + memory: "1000Mi" + cpu: "50m" + limits: + memory: "4Gi" + cpu: "50m" + +pullPolicy: IfNotPresent + +podAnnotations: + dsl-checksum: "initial" diff --git a/kubernetes/charts/Ruuter-Private/Chart.yaml b/kubernetes/charts/Ruuter-Private/Chart.yaml new file mode 100644 index 0000000..845f24e --- /dev/null +++ b/kubernetes/charts/Ruuter-Private/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: ruuter-private +description: A Helm chart for Ruuter Private API Gateway +type: application +version: 0.1.0 +appVersion: "1.0" \ No newline at end of file diff --git a/kubernetes/charts/Ruuter-Private/templates/configmap-byk-ruuter-private.yaml b/kubernetes/charts/Ruuter-Private/templates/configmap-byk-ruuter-private.yaml new file mode 100644 index 0000000..6f84c28 --- /dev/null +++ b/kubernetes/charts/Ruuter-Private/templates/configmap-byk-ruuter-private.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "{{ .Values.release_name }}-constants" + labels: + app: "{{ .Values.release_name }}" +data: + constants.ini: | + [DSL] + RAG_SEARCH_RUUTER_PUBLIC=http://ruuter-public:8086/rag-search + RAG_SEARCH_RUUTER_PRIVATE=http://ruuter-private:8088/rag-search + RAG_SEARCH_DMAPPER=http://data-mapper:3000 + RAG_SEARCH_RESQL=http://resql:8082/rag-search + RAG_SEARCH_PROJECT_LAYER=rag-search + RAG_SEARCH_TIM=http://tim:8085 + RAG_SEARCH_CRON_MANAGER=http://cron-manager:9010 + RAG_SEARCH_LLM_ORCHESTRATOR=http://llm-orchestration-service:8100/orchestrate + DOMAIN=localhost \ No newline at end of file diff --git a/kubernetes/charts/Ruuter-Private/templates/deployment-byk-ruuter-private.yaml b/kubernetes/charts/Ruuter-Private/templates/deployment-byk-ruuter-private.yaml new file mode 100644 index 0000000..866d3a7 --- /dev/null +++ b/kubernetes/charts/Ruuter-Private/templates/deployment-byk-ruuter-private.yaml @@ -0,0 +1,98 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + annotations: + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + app: "{{ .Values.release_name }}" + spec: + initContainers: + - name: git-clone + image: alpine/git:latest + volumeMounts: + - name: dsl + mountPath: /DSL + command: + - sh + - -c + - | + git clone --single-branch --depth 1 --branch wip https://github.com/rootcodelabs/RAG-Module /tmp/rag && + + cp -r /tmp/rag/DSL/Ruuter.private/* /DSL/ + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.scope.registry }}/{{ .Values.images.scope.repository }}:{{ .Values.images.scope.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: http + env: + + - name: application.cors.allowedOrigins + value: "{{ .Values.env.APPLICATION_CORS_ALLOWEDORIGINS }}" + - name: application.httpCodesAllowList + value: "{{ .Values.env.APPLICATION_HTTPCODESALLOWLIST }}" + - name: application.internalRequests.allowedIPs + value: "{{ .Values.env.APPLICATION_INTERNALREQUESTS_ALLOWEDIPS }}" + - name: application.logging.displayRequestContent + value: "{{ .Values.env.APPLICATION_LOGGING_DISPLAYREQUESTCONTENT }}" + - name: application.logging.displayResponseContent + value: "{{ .Values.env.APPLICATION_LOGGING_DISPLAYRESPONSECONTENT }}" + - name: application.logging.printStackTrace + value: "{{ .Values.env.APPLICATION_LOGGING_PRINTSTACKTRACE }}" + - name: application.internalRequests.disabled + value: "{{ .Values.env.APPLICATION_INTERNALREQUESTS_DISABLED }}" + - name: server.port + value: "{{ .Values.env.SERVER_PORT }}" + # Sensitive env from Kubernetes Secret + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: ruuter-private-secrets + key: DB_PASSWORD + + + - name: logging.level.root + value: "{{ .Values.env.LOGGING_LEVEL_ROOT }}" + - name: LOG_LEVEL_TIMING + value: "{{ .Values.env.LOG_LEVEL_TIMING }}" + - name: application.DSL.allowedFiletypes + value: "{{ .Values.env.APPLICATION_DSL_ALLOWEDFILETYPES }}" + - name: application.httpResponseSizeLimit + value: "{{ .Values.env.APPLICATION_HTTPRESPONSESIZELIMIT }}" + - name: application.openSearchConfiguration.index + value: "{{ .Values.env.APPLICATION_OPENSEARCHCONFIGURATION_INDEX }}" + volumeMounts: + - name: dsl + mountPath: /DSL + - name: urls-env + mountPath: /app/constants.ini + subPath: constants.ini + + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + volumes: + - name: dsl + emptyDir: {} + - name: urls-env + configMap: + name: "{{ .Values.release_name }}-constants" +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Ruuter-Private/templates/ingress-ruuter-private.yaml b/kubernetes/charts/Ruuter-Private/templates/ingress-ruuter-private.yaml new file mode 100644 index 0000000..94655a6 --- /dev/null +++ b/kubernetes/charts/Ruuter-Private/templates/ingress-ruuter-private.yaml @@ -0,0 +1,46 @@ +{{- if .Values.ingress.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: "{{ .Values.release_name }}-ingress" + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/cors-allow-methods: "GET, POST, OPTIONS" + nginx.ingress.kubernetes.io/cors-allow-headers: "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization,X-Forwarded-For" + nginx.ingress.kubernetes.io/cors-allow-origin: "{{ .Values.ingress.corsAllowOrigin }}" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/cors-allow-credentials: "true" + nginx.ingress.kubernetes.io/additional-response-headers: "Access-Control-Allow-Headers: Content-Type" + nginx.ingress.kubernetes.io/cors-expose-headers: "Content-Length, Content-Range" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + # Private Ruuter may need IP whitelisting for security + nginx.ingress.kubernetes.io/whitelist-source-range: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,127.0.0.1/32" + {{- if .Values.ingress.ssl.enabled }} + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + cert-manager.io/cluster-issuer: {{ .Values.ingress.ssl.certIssuerName | quote }} + {{- end }} + {{- with .Values.ingress.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + name: "{{ .Values.release_name }}-ingress" + app: "{{ .Values.release_name }}" +spec: + rules: + - host: {{ .Values.ingress.host }} + http: + paths: + - pathType: Prefix + path: / + backend: + service: + name: "{{ .Values.release_name }}" + port: + number: {{ .Values.service.port }} + {{- if .Values.ingress.ssl.enabled }} + tls: + - hosts: + - {{ .Values.ingress.host }} + secretName: {{ .Values.ingress.ssl.secretName }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Ruuter-Private/templates/secret.yaml b/kubernetes/charts/Ruuter-Private/templates/secret.yaml new file mode 100644 index 0000000..1db3b29 --- /dev/null +++ b/kubernetes/charts/Ruuter-Private/templates/secret.yaml @@ -0,0 +1,11 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: ruuter-private-secrets + labels: + app: "{{ .Values.release_name }}" +type: Opaque +stringData: + DB_PASSWORD: "" +{{- end }} diff --git a/kubernetes/charts/Ruuter-Private/templates/service-byk-ruuter-private.yaml b/kubernetes/charts/Ruuter-Private/templates/service-byk-ruuter-private.yaml new file mode 100644 index 0000000..c6d6722 --- /dev/null +++ b/kubernetes/charts/Ruuter-Private/templates/service-byk-ruuter-private.yaml @@ -0,0 +1,17 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: http + selector: + app: "{{ .Values.release_name }}" +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Ruuter-Private/values.yaml b/kubernetes/charts/Ruuter-Private/values.yaml new file mode 100644 index 0000000..3a48ac1 --- /dev/null +++ b/kubernetes/charts/Ruuter-Private/values.yaml @@ -0,0 +1,58 @@ +replicas: 1 +enabled: true +release_name: "ruuter-private" + +images: + scope: + registry: "ghcr.io" + repository: "buerokratt/ruuter" + tag: "v2.2.1" + +service: + type: ClusterIP + port: 8088 + targetPort: 8088 + +env: + + APPLICATION_CORS_ALLOWEDORIGINS: "http://gui:3001,http://ruuter-private:8088,http://ruuter-public:8086,http://authentication-layer:3004,http://notifications-node:4040,http://dataset-gen-service:8000,http://localhost:3001" + APPLICATION_HTTPCODESALLOWLIST: "200,201,202,400,401,403,500" + APPLICATION_INTERNALREQUESTS_ALLOWEDIPS: "127.0.0.1" + APPLICATION_LOGGING_DISPLAYREQUESTCONTENT: "true" + APPLICATION_LOGGING_DISPLAYRESPONSECONTENT: "true" + APPLICATION_LOGGING_PRINTSTACKTRACE: "true" + APPLICATION_INTERNALREQUESTS_DISABLED: "true" + + + + LOGGING_LEVEL_ROOT: "INFO" + LOG_LEVEL_TIMING: "INFO" + APPLICATION_DSL_ALLOWEDFILETYPES: ".yml,.yaml,.md,.tmp" + APPLICATION_HTTPRESPONSESIZELIMIT: "2000" + APPLICATION_OPENSEARCHCONFIGURATION_INDEX: "ruuterlog" + SERVER_PORT: "8088" + +resources: + requests: + memory: "1000Mi" + cpu: "50m" + limits: + memory: "2000Mi" + cpu: "50m" + + +ingress: + enabled: false + host: "rag.local" #change this to domain + corsAllowOrigin: "http://localhost:3001,http://localhost:3003,http://localhost:8088,http://localhost:3002,http://localhost:3004,http://localhost:8000" + ssl: + enabled: false + certIssuerName: "letsencrypt-prod" + secretName: "rag-ruuter-private-tls" + annotations: {} + +pullPolicy: IfNotPresent + +podAnnotations: + dsl-checksum: "initial" + diff --git a/kubernetes/charts/Ruuter-Public/Chart.yaml b/kubernetes/charts/Ruuter-Public/Chart.yaml new file mode 100644 index 0000000..662e775 --- /dev/null +++ b/kubernetes/charts/Ruuter-Public/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: ruuter-public +description: A Helm chart for Ruuter Public API Gateway +type: application +version: 0.1.0 +appVersion: "1.0" \ No newline at end of file diff --git a/kubernetes/charts/Ruuter-Public/templates/configmap-byk-ruuter-public.yaml b/kubernetes/charts/Ruuter-Public/templates/configmap-byk-ruuter-public.yaml new file mode 100644 index 0000000..a6a56c0 --- /dev/null +++ b/kubernetes/charts/Ruuter-Public/templates/configmap-byk-ruuter-public.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "{{ .Values.release_name }}-constants" + labels: + app: "{{ .Values.release_name }}" +data: + constants.ini: | + [DSL] + RAG_SEARCH_RUUTER_PUBLIC=http://ruuter-public:8086/rag-search + RAG_SEARCH_RUUTER_PRIVATE=http://ruuter-private:8088/rag-search + RAG_SEARCH_DMAPPER=http://data-mapper:3000 + RAG_SEARCH_RESQL=http://resql:8082/rag-search + RAG_SEARCH_PROJECT_LAYER=rag-search + RAG_SEARCH_TIM=http://tim:8085 + RAG_SEARCH_CRON_MANAGER=http://cron-manager:9010 + RAG_SEARCH_LLM_ORCHESTRATOR=http://llm-orchestration-service:8100/orchestrate + DOMAIN=localhost \ No newline at end of file diff --git a/kubernetes/charts/Ruuter-Public/templates/deployment-byk-ruuter-public.yaml b/kubernetes/charts/Ruuter-Public/templates/deployment-byk-ruuter-public.yaml new file mode 100644 index 0000000..e081430 --- /dev/null +++ b/kubernetes/charts/Ruuter-Public/templates/deployment-byk-ruuter-public.yaml @@ -0,0 +1,97 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + annotations: + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + app: "{{ .Values.release_name }}" + spec: + initContainers: + - name: git-clone + image: alpine/git:latest + volumeMounts: + - name: dsl + mountPath: /DSL + command: + - sh + - -c + - | + git clone --single-branch --depth 1 --branch wip https://github.com/rootcodelabs/RAG-Module /tmp/rag && + + cp -r /tmp/rag/DSL/Ruuter.public/* /DSL/ + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.scope.registry }}/{{ .Values.images.scope.repository }}:{{ .Values.images.scope.tag }}" + ports: + - containerPort: {{ .Values.service.port }} + name: http + env: + - name: application.cors.allowedOrigins + value: "{{ .Values.env.APPLICATION_CORS_ALLOWEDORIGINS }}" + - name: application.httpCodesAllowList + value: "{{ .Values.env.APPLICATION_HTTPCODESALLOWLIST }}" + - name: application.internalRequests.allowedIPs + value: "{{ .Values.env.APPLICATION_INTERNALREQUESTS_ALLOWEDIPS }}" + - name: application.logging.displayRequestContent + value: "{{ .Values.env.APPLICATION_LOGGING_DISPLAYREQUESTCONTENT }}" + - name: application.logging.displayResponseContent + value: "{{ .Values.env.APPLICATION_LOGGING_DISPLAYRESPONSECONTENT }}" + - name: application.logging.printStackTrace + value: "{{ .Values.env.APPLICATION_LOGGING_PRINTSTACKTRACE }}" + - name: application.internalRequests.disabled + value: "{{ .Values.env.APPLICATION_INTERNALREQUESTS_DISABLED }}" + - name: server.port + value: "{{ .Values.env.SERVER_PORT }}" + - name: application.constants.file + value: "/app/constants.ini" + # Sensitive env from Kubernetes Secret + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: ruuter-public-secrets + key: DB_PASSWORD + + - name: logging.level.root + value: "{{ .Values.env.LOGGING_LEVEL_ROOT }}" + - name: LOG_LEVEL_TIMING + value: "{{ .Values.env.LOG_LEVEL_TIMING }}" + - name: application.DSL.allowedFiletypes + value: "{{ .Values.env.APPLICATION_DSL_ALLOWEDFILETYPES }}" + - name: application.httpResponseSizeLimit + value: "{{ .Values.env.APPLICATION_HTTPRESPONSESIZELIMIT }}" + - name: application.openSearchConfiguration.index + value: "{{ .Values.env.APPLICATION_OPENSEARCHCONFIGURATION_INDEX }}" + volumeMounts: + - name: dsl + mountPath: /DSL + - name: urls-env + mountPath: /app/constants.ini + subPath: constants.ini + + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + volumes: + - name: dsl + emptyDir: {} + - name: urls-env + configMap: + name: "{{ .Values.release_name }}-constants" +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Ruuter-Public/templates/ingress-ruuter-public.yaml b/kubernetes/charts/Ruuter-Public/templates/ingress-ruuter-public.yaml new file mode 100644 index 0000000..3a1e4c5 --- /dev/null +++ b/kubernetes/charts/Ruuter-Public/templates/ingress-ruuter-public.yaml @@ -0,0 +1,45 @@ +{{- if .Values.ingress.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: "{{ .Values.release_name }}-ingress" + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/cors-allow-methods: "GET, POST, OPTIONS" + nginx.ingress.kubernetes.io/cors-allow-headers: "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization,X-Forwarded-For" + nginx.ingress.kubernetes.io/cors-allow-origin: "{{ .Values.ingress.corsAllowOrigin }}" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/cors-allow-credentials: "true" + nginx.ingress.kubernetes.io/additional-response-headers: "Access-Control-Allow-Headers: Content-Type" + nginx.ingress.kubernetes.io/cors-expose-headers: "Content-Length, Content-Range" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + {{- if .Values.ingress.ssl.enabled }} + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + cert-manager.io/cluster-issuer: {{ .Values.ingress.ssl.certIssuerName | quote }} + {{- end }} + {{- with .Values.ingress.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + name: "{{ .Values.release_name }}-ingress" + app: "{{ .Values.release_name }}" +spec: + rules: + - host: {{ .Values.ingress.host }} + http: + paths: + - pathType: Prefix + path: / + backend: + service: + name: "{{ .Values.release_name }}" + port: + number: {{ .Values.service.port }} + + {{- if .Values.ingress.ssl.enabled }} + tls: + - hosts: + - {{ .Values.ingress.host }} + secretName: {{ .Values.ingress.ssl.secretName }} + {{- end }} +{{- end }} diff --git a/kubernetes/charts/Ruuter-Public/templates/secret.yaml b/kubernetes/charts/Ruuter-Public/templates/secret.yaml new file mode 100644 index 0000000..e9f76ce --- /dev/null +++ b/kubernetes/charts/Ruuter-Public/templates/secret.yaml @@ -0,0 +1,11 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: ruuter-public-secrets + labels: + app: "{{ .Values.release_name }}" +type: Opaque +stringData: + DB_PASSWORD: "" +{{- end }} diff --git a/kubernetes/charts/Ruuter-Public/templates/service-byk-ruuter-public.yaml b/kubernetes/charts/Ruuter-Public/templates/service-byk-ruuter-public.yaml new file mode 100644 index 0000000..6e10cd8 --- /dev/null +++ b/kubernetes/charts/Ruuter-Public/templates/service-byk-ruuter-public.yaml @@ -0,0 +1,18 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: http + selector: + app: "{{ .Values.release_name }}" + +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Ruuter-Public/values.yaml b/kubernetes/charts/Ruuter-Public/values.yaml new file mode 100644 index 0000000..320d43f --- /dev/null +++ b/kubernetes/charts/Ruuter-Public/values.yaml @@ -0,0 +1,54 @@ +replicas: 1 +enabled: true +release_name: "ruuter-public" + +images: + scope: + registry: "ghcr.io" + repository: "buerokratt/ruuter" + tag: v2.2.1 + +service: + type: ClusterIP + port: 8086 + targetPort: 8086 + +env: + APPLICATION_CORS_ALLOWEDORIGINS: "http://localhost:8086,http://localhost:3001,http://localhost:3003,http://localhost:3004,http://localhost:8080,http://localhost:8000,http://localhost:8090" + APPLICATION_HTTPCODESALLOWLIST: "200,201,202,204,400,401,403,500" + APPLICATION_INTERNALREQUESTS_ALLOWEDIPS: "127.0.0.1" + APPLICATION_LOGGING_DISPLAYREQUESTCONTENT: "true" + APPLICATION_LOGGING_DISPLAYRESPONSECONTENT: "true" + APPLICATION_LOGGING_PRINTSTACKTRACE: "true" + APPLICATION_INTERNALREQUESTS_DISABLED: "true" + SERVER_PORT: "8086" + + LOGGING_LEVEL_ROOT: "INFO" + LOG_LEVEL_TIMING: "INFO" + APPLICATION_DSL_ALLOWEDFILETYPES: ".yml,.yaml,.md,.tmp" + APPLICATION_HTTPRESPONSESIZELIMIT: "2000" + APPLICATION_OPENSEARCHCONFIGURATION_INDEX: "ruuterlog" + +resources: + requests: + memory: "1000Mi" + cpu: "50m" + limits: + memory: "2000Mi" + cpu: "50m" + + +ingress: + enabled: true + host: "rag.local" # Change this to domain + corsAllowOrigin: "http://localhost:8086,http://localhost:3001,http://localhost:3003,http://localhost:3004,http://localhost:8080,http://localhost:8000,http://localhost:8090" + ssl: + enabled: false # Set to true for production with proper certificates + certIssuerName: "letsencrypt-prod" + secretName: "rag-ruuter-tls" + + +pullPolicy: IfNotPresent + +podAnnotations: + dsl-checksum: "94b84bb5ff4d" diff --git a/kubernetes/charts/S3-Ferry/Chart.yaml b/kubernetes/charts/S3-Ferry/Chart.yaml new file mode 100644 index 0000000..882054c --- /dev/null +++ b/kubernetes/charts/S3-Ferry/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: S3-Ferry +description: A Helm chart for S3-Ferry +type: application +version: 0.1.0 +appVersion: "latest" \ No newline at end of file diff --git a/kubernetes/charts/S3-Ferry/templates/deployment-s3.yaml b/kubernetes/charts/S3-Ferry/templates/deployment-s3.yaml new file mode 100644 index 0000000..af396e8 --- /dev/null +++ b/kubernetes/charts/S3-Ferry/templates/deployment-s3.yaml @@ -0,0 +1,59 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Values.release_name }} + labels: + app: {{ .Values.release_name }} +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: {{ .Values.release_name }} + template: + metadata: + labels: + app: {{ .Values.release_name }} + spec: + containers: + - name: {{ .Values.release_name }} + image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.port }} + protocol: TCP + # Non-sensitive env's from ConfigMap + env: + {{- range $key, $value := .Values.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + # Sensitive env's from Kubernetes Secret + {{- if .Values.envFrom }} + envFrom: + {{- toYaml .Values.envFrom | nindent 12 }} + {{- end }} + + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: shared + mountPath: /app/shared + - name: cron-data + mountPath: /app/data + {{- end }} + - name: datasets + mountPath: /app/datasets + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumes: + {{- if .Values.persistence.enabled }} + - name: shared + persistentVolumeClaim: + claimName: s3-ferry-shared + - name: cron-data + persistentVolumeClaim: + claimName: s3-ferry-cron-data + {{- end }} + - name: datasets + emptyDir: {} + \ No newline at end of file diff --git a/kubernetes/charts/S3-Ferry/templates/pvc-s3.yaml b/kubernetes/charts/S3-Ferry/templates/pvc-s3.yaml new file mode 100644 index 0000000..f973360 --- /dev/null +++ b/kubernetes/charts/S3-Ferry/templates/pvc-s3.yaml @@ -0,0 +1,36 @@ +{{- if .Values.persistence.enabled }} +# Shared volume PVC +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: s3-ferry-shared + labels: + app: s3-ferry +spec: + accessModes: + - {{ .Values.persistence.accessMode }} + {{- if .Values.persistence.storageClass }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.shared.size }} + +--- +# Cron data PVC +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: s3-ferry-cron-data + labels: + app: s3-ferry +spec: + accessModes: + - {{ .Values.persistence.accessMode }} + {{- if .Values.persistence.storageClass }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.cronData.size }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/S3-Ferry/templates/secret.yaml b/kubernetes/charts/S3-Ferry/templates/secret.yaml new file mode 100644 index 0000000..ac341bd --- /dev/null +++ b/kubernetes/charts/S3-Ferry/templates/secret.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Secret +metadata: + name: s3-ferry-secrets + labels: + app: "{{ .Values.release_name }}" +type: Opaque +stringData: + S3_SECRET_ACCESS_KEY: "" + S3_ACCESS_KEY_ID: "" + GF_SECURITY_ADMIN_USER: "" + GF_SECURITY_ADMIN_PASSWORD: "" diff --git a/kubernetes/charts/S3-Ferry/templates/service-s3.yaml b/kubernetes/charts/S3-Ferry/templates/service-s3.yaml new file mode 100644 index 0000000..84ff6d0 --- /dev/null +++ b/kubernetes/charts/S3-Ferry/templates/service-s3.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.release_name }} + labels: + app: {{ .Values.release_name }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: http + selector: + app: {{ .Values.release_name }} \ No newline at end of file diff --git a/kubernetes/charts/S3-Ferry/values.yaml b/kubernetes/charts/S3-Ferry/values.yaml new file mode 100644 index 0000000..6274d41 --- /dev/null +++ b/kubernetes/charts/S3-Ferry/values.yaml @@ -0,0 +1,62 @@ +replicas: 1 + +release_name: "s3-ferry" + +image: + registry: "ghcr.io" + repository: "buerokratt/s3-ferry" + pullPolicy: IfNotPresent + tag: "PRE-ALPHA-1.1.1" + +nameOverride: "" +fullnameOverride: "" + +port: 3000 + +service: + type: ClusterIP + port: 3006 + targetPort: 3000 + + +persistence: + enabled: true + storageClass: "" + accessMode: ReadWriteOnce + outputDatasets: + size: 5Gi + shared: + size: 2Gi + cronData: + size: 3Gi + +resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 250m + memory: 256Mi + +# Environment variables (non-sensitive) +env: + API_CORS_ORIGIN: "*" + API_DOCUMENTATION_ENABLED: "true" + S3_REGION: "eu-west-1" + S3_ENDPOINT_URL: "http://minio:9000" + S3_ENDPOINT_NAME: "minio:9000" + S3_DATA_BUCKET_PATH: "resources" + S3_DATA_BUCKET_NAME: "rag-search" + FS_DATA_DIRECTORY_PATH: "/app" + S3_HEALTH_ENDPOINT: "http://minio:9000/minio/health/live" + MINIO_BROWSER_REDIRECT_URL: "http://localhost:9091" + GF_USERS_ALLOW_SIGN_UP: "false" + PORT: "3000" + +# Reference to Kubernetes Secret +envFrom: + - secretRef: + name: s3-ferry-secrets + + + diff --git a/kubernetes/charts/TIM/Chart.yaml b/kubernetes/charts/TIM/Chart.yaml new file mode 100644 index 0000000..7ac0a74 --- /dev/null +++ b/kubernetes/charts/TIM/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: TIM +description: TIM Authentication Service for RAG +type: application +version: 0.1.0 +appVersion: "1.0" diff --git a/kubernetes/charts/TIM/templates/configmap-byk-tim.yaml b/kubernetes/charts/TIM/templates/configmap-byk-tim.yaml new file mode 100644 index 0000000..58f6986 --- /dev/null +++ b/kubernetes/charts/TIM/templates/configmap-byk-tim.yaml @@ -0,0 +1,56 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: tim-config +data: + application.properties: | + security.oauth2.client.client-id={{ .Values.tim.config.oauth2_client_id }} + security.oauth2.client.client-secret=${OAUTH2_CLIENT_SECRET} + security.oauth2.client.scope={{ .Values.tim.config.oauth2_client_scope }} + security.oauth2.client.registered-redirect-uri=https://tim.{{ .Values.global.domain }}/authenticate + security.oauth2.client.user-authorization-uri={{ .Values.tim.config.oauth2_user_auth_uri }} + security.oauth2.client.access-token-uri={{ .Values.tim.config.oauth2_access_token_uri }} + security.oauth2.resource.jwk.key-set-uri={{ .Values.tim.config.oauth2_jwk_uri }} + security.allowlist.jwt=0.0.0.0/0 + security.cookie.same-site=Lax + frontpage.redirect.url=http://localhost:3004 + + logging.level.root={{ .Values.tim.config.logging_level_root }} + + spring.datasource.url=jdbc:postgresql://tim-postgresql:5432/tim + spring.datasource.username={{ .Values.global.tim_postgresql.auth.username }} + spring.datasource.password=${POSTGRES_PASSWORD} + spring.datasource.driver-class-name=org.postgresql.Driver + spring.liquibase.change-log=classpath:master.xml + + spring.profiles.active={{ .Values.tim.config.spring_profiles_active }} + + # Legacy integration properties + legacy-portal-integration.sessionCookieName={{ .Values.tim.config.legacy_cookie_name }} + legacy-portal-integration.sessionCookieDomain={{ .Values.tim.config.legacy_cookie_domain }} + legacy-portal-integration.taraAuthDeployedOnLegacyDomain=true + legacy-portal-integration.sessionTimeoutMinutes=30 + legacy-portal-integration.requestIpHeader=X-FORWARDED-FOR + legacy-portal-integration.requestIpAttribute=request_ip + legacy-portal-integration.redirectUrlHeader=Referer + legacy-portal-integration.redirectUrlAttribute=url_redirect + legacy-portal-integration.legacyPortalRefererMarker={{ .Values.tim.config.legacy_referer_marker }} + legacy-portal-integration.legacyUrl={{ .Values.tim.config.legacy_url }} + + # JWT configuration + jwt-integration.signature.key-store=classpath:jwtkeystore.jks + jwt-integration.signature.key-store-password=${KEY_STORE_PASSWORD} + jwt-integration.signature.keyStoreType=JKS + jwt-integration.signature.keyAlias=jwtsign + jwt-integration.signature.issuer={{ .Values.tim.config.jwt_issuer }} + jwt-integration.signature.cookieName=JWTTOKEN + + userIPHeaderName=x-forwarded-for + userIPLoggingPrefix=from IP + userIPLoggingMDCkey=userIP + + headers.contentSecurityPolicy=upgrade-insecure-requests;default-src 'self' 'unsafe-inline' 'unsafe-eval' https://tim.{{ .Values.global.domain }} https://admin.{{ .Values.global.domain }} https://ruuter.{{ .Values.global.domain }}/v2/public/ https://ruuter.{{ .Values.global.domain }}/v2/private/ tim ruuter ruuter-private backoffice-login;object-src 'self';script-src 'self' 'unsafe-inline' 'unsafe-eval' https://{{ .Values.global.domain }} https://admin.{{ .Values.global.domain }} https://tim.{{ .Values.global.domain }};connect-src 'self' https://{{ .Values.global.domain }} https://tim.{{ .Values.global.domain }} https://admin.{{ .Values.global.domain }} https://ruuter.{{ .Values.global.domain }}/v2/public/ https://ruuter.{{ .Values.global.domain }}/v2/private/;frame-src 'self';media-src 'none' + cors.allowedOrigins=http://localhost:8086,http://localhost:3004,http://localhost:8085,http://component-byk-ruuter-public:8086,http://global-classifier.local + auth.success.redirect.whitelist=http://localhost:3004/auth/callback,http://localhost:8086,http://global-classifier.local/auth/callback + server.port={{ .Values.tim.service.port }} + jwt.whitelist.period=30000 \ No newline at end of file diff --git a/kubernetes/charts/TIM/templates/deployment-byk-tim.yaml b/kubernetes/charts/TIM/templates/deployment-byk-tim.yaml new file mode 100644 index 0000000..1095687 --- /dev/null +++ b/kubernetes/charts/TIM/templates/deployment-byk-tim.yaml @@ -0,0 +1,46 @@ +{{- if .Values.tim.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Values.tim.nameOverride | default "tim" }} + labels: + app: {{ .Values.tim.nameOverride | default "tim" }} +spec: + replicas: {{ .Values.tim.replicaCount | default 1 }} + selector: + matchLabels: + app: {{ .Values.tim.nameOverride | default "tim" }} + template: + metadata: + labels: + app: {{ .Values.tim.nameOverride | default "tim" }} + spec: + containers: + - name: {{ .Values.tim.nameOverride | default "tim" }} + image: "{{ .Values.tim.image.repository }}:{{ .Values.tim.image.tag }}" + imagePullPolicy: {{ .Values.tim.image.pullPolicy }} + env: + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: tim-env-secret + key: POSTGRES_PASSWORD + - name: "OAUTH2_CLIENT_SECRET" + valueFrom: + secretKeyRef: + name: "tim-env-secret" + key: "oauth2_client_secret" + - name: "KEY_STORE_PASSWORD" + valueFrom: + secretKeyRef: + name: "tim-env-secret" + key: "jwt_integration_key_store_password" + volumeMounts: + - name: application-properties + mountPath: /workspace/app/src/main/resources/application.properties + subPath: application.properties + volumes: + - name: application-properties + configMap: + name: tim-config +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/TIM/templates/ingress.yaml b/kubernetes/charts/TIM/templates/ingress.yaml new file mode 100644 index 0000000..129ff01 --- /dev/null +++ b/kubernetes/charts/TIM/templates/ingress.yaml @@ -0,0 +1,30 @@ +{{- if and .Values.tim.enabled .Values.tim.ingress.enabled }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ .Values.tim.nameOverride | default "tim" }}-ingress + namespace: {{ .Values.namespace }} + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + cert-manager.io/cluster-issuer: letsencrypt-prod + labels: + name: {{ .Values.tim.nameOverride | default "tim" }}-ingress +spec: + rules: + - host: {{ .Values.tim.ingress.host }} + http: + paths: + - pathType: Prefix + path: "/" + backend: + service: + name: {{ .Values.tim.nameOverride | default "tim" }} + port: + number: {{ .Values.tim.service.port }} + tls: + - hosts: + - {{ .Values.tim.ingress.host }} + secretName: {{ .Values.tim.ingress.secretName }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/TIM/templates/secret-byk-tim.yaml b/kubernetes/charts/TIM/templates/secret-byk-tim.yaml new file mode 100644 index 0000000..81fc11a --- /dev/null +++ b/kubernetes/charts/TIM/templates/secret-byk-tim.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: tim-env-secret +type: Opaque +stringData: + oauth2_client_secret: "" + jwt_integration_key_store_password: "" + POSTGRES_PASSWORD: "" + diff --git a/kubernetes/charts/TIM/templates/service-byk-tim.yaml b/kubernetes/charts/TIM/templates/service-byk-tim.yaml new file mode 100644 index 0000000..1a1722d --- /dev/null +++ b/kubernetes/charts/TIM/templates/service-byk-tim.yaml @@ -0,0 +1,15 @@ +{{- if .Values.tim.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.tim.nameOverride | default "tim" }} + labels: + app: {{ .Values.tim.nameOverride | default "tim" }} +spec: + type: {{ .Values.tim.service.type | default "ClusterIP" }} + ports: + - port: {{ .Values.tim.service.port }} + targetPort: {{ .Values.tim.service.port }} + selector: + app: {{ .Values.tim.nameOverride | default "tim" }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/TIM/values.yaml b/kubernetes/charts/TIM/values.yaml new file mode 100644 index 0000000..daba08c --- /dev/null +++ b/kubernetes/charts/TIM/values.yaml @@ -0,0 +1,44 @@ +global: + domain: localhost + tim_postgresql: + auth: + username: tim +tim: + enabled: true + nameOverride: tim + ingress: + enabled: false + host: tim.example.com + secretName: tim-tls + image: + repository: ghcr.io/buerokratt/tim + tag: pre-apha-2.7.1 + pullPolicy: IfNotPresent + service: + type: ClusterIP + port: 8085 + env: + - POSTGRES_PASSWORD: "tim" + + config: + security_allowlist_jwt: "ruuter-public,ruuter-private,ruuter,ruuter-internal,data-mapper,resql,tim,tim-postgresql,chat-widget,authentication-layer,127.0.0.1,::1" + jwt_issuer: "tim-issuer" + spring_profiles_active: "dev" + logging_level_root: "DEBUG" + legacy_cookie_name: "PHPSESSID" + legacy_cookie_domain: "example.com" + legacy_referer_marker: "NA" + legacy_url: "NA" + oauth2_client_id: "your-client-id" + oauth2_client_scope: "read,write" + oauth2_user_auth_uri: "https://tara-test.ria.ee/oidc/authorize" + oauth2_access_token_uri: "https://tara-test.ria.ee/oidc/token" + oauth2_jwk_uri: "https://tara-test.ria.ee/oidc/jwks" + + resources: + limits: + cpu: "500m" + memory: "512Mi" + requests: + cpu: "250m" + memory: "256Mi" \ No newline at end of file diff --git a/kubernetes/charts/Vault-Agent-LLM/Chart.yaml b/kubernetes/charts/Vault-Agent-LLM/Chart.yaml new file mode 100644 index 0000000..07e7677 --- /dev/null +++ b/kubernetes/charts/Vault-Agent-LLM/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: Vault-Agent-LLM +description: Vault Agent for LLM Orchestration Service secret injection +type: application +version: 0.1.0 +appVersion: "1.20.3" +dependencies: [] \ No newline at end of file diff --git a/kubernetes/charts/Vault-Agent-LLM/templates/configmap.yaml b/kubernetes/charts/Vault-Agent-LLM/templates/configmap.yaml new file mode 100644 index 0000000..17d90c0 --- /dev/null +++ b/kubernetes/charts/Vault-Agent-LLM/templates/configmap.yaml @@ -0,0 +1,48 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.release_name }}-config + labels: + app: {{ .Values.release_name }} + component: vault-agent-llm +data: + agent.hcl: | + # Vault agent configuration for LLM Orchestration Service + + vault { + address = "http://vault:8200" + } + + pid_file = "/agent/out/pidfile" + + auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "/agent/credentials/role_id" + secret_id_file_path = "/agent/credentials/secret_id" + remove_secret_id_file_after_reading = false + } + } + + sink "file" { + config = { + path = "/agent/out/token" + } + } + } + + cache { + default_lease_duration = "1h" + } + + listener "tcp" { + address = "127.0.0.1:8201" + tls_disable = true + } + + api_proxy { + use_auto_auth_token = true + enforce_consistency = "always" + when_inconsistent = "forward" + } diff --git a/kubernetes/charts/Vault-Agent-LLM/templates/deployment.yaml b/kubernetes/charts/Vault-Agent-LLM/templates/deployment.yaml new file mode 100644 index 0000000..ebd785c --- /dev/null +++ b/kubernetes/charts/Vault-Agent-LLM/templates/deployment.yaml @@ -0,0 +1,101 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Values.release_name }} + labels: + app: {{ .Values.release_name }} + component: vault-agent-llm +spec: + replicas: {{ .Values.deployment.replicas }} + selector: + matchLabels: + app: {{ .Values.release_name }} + component: vault-agent-llm + template: + metadata: + labels: + app: {{ .Values.release_name }} + component: vault-agent-llm + spec: + {{- if .Values.affinity.enabled }} + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - {{ .Values.vault.serviceName }} + topologyKey: kubernetes.io/hostname + {{- end }} + volumes: + {{- if .Values.volumes.agentCredentials.enabled }} + - name: vault-agent-creds + persistentVolumeClaim: + claimName: vault-agent-creds + {{- end }} + {{- if .Values.volumes.agentToken.enabled }} + - name: vault-agent-token + persistentVolumeClaim: + claimName: vault-agent-token + {{- end }} + {{- if .Values.volumes.agentConfig.enabled }} + - name: vault-agent-config + configMap: + name: {{ .Values.release_name }}-config + defaultMode: 0644 + {{- end }} + containers: + - name: vault-agent + image: "{{ .Values.images.vault.registry }}/{{ .Values.images.vault.repository }}:{{ .Values.images.vault.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - vault + - agent + - -config=/agent/config/agent.hcl + - -log-level=info + env: + - name: VAULT_ADDR + value: {{ .Values.vault.addr | quote }} + - name: VAULT_SKIP_VERIFY + value: "true" + volumeMounts: + {{- if .Values.volumes.agentCredentials.enabled }} + - name: vault-agent-creds + mountPath: {{ .Values.volumes.agentCredentials.mountPath }} + readOnly: true + {{- end }} + {{- if .Values.volumes.agentToken.enabled }} + - name: vault-agent-token + mountPath: {{ .Values.volumes.agentToken.mountPath }} + {{- end }} + {{- if .Values.volumes.agentConfig.enabled }} + - name: vault-agent-config + mountPath: {{ .Values.volumes.agentConfig.mountPath }} + readOnly: true + {{- end }} + {{- if .Values.probes.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.probes.livenessProbe.httpGet.path }} + port: {{ .Values.probes.livenessProbe.httpGet.port }} + initialDelaySeconds: {{ .Values.probes.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.probes.livenessProbe.periodSeconds }} + {{- end }} + {{- if .Values.probes.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.probes.readinessProbe.httpGet.path }} + port: {{ .Values.probes.readinessProbe.httpGet.port }} + initialDelaySeconds: {{ .Values.probes.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.probes.readinessProbe.periodSeconds }} + {{- end }} + {{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 10 }} + {{- end }} + securityContext: + capabilities: + add: + - IPC_LOCK \ No newline at end of file diff --git a/kubernetes/charts/Vault-Agent-LLM/values.yaml b/kubernetes/charts/Vault-Agent-LLM/values.yaml new file mode 100644 index 0000000..29b630f --- /dev/null +++ b/kubernetes/charts/Vault-Agent-LLM/values.yaml @@ -0,0 +1,101 @@ +enabled: true + +images: + vault: + registry: "docker.io" + repository: "hashicorp/vault" + tag: "1.20.3" + +release_name: "vault-agent-llm" + +# Vault service dependency +vault: + serviceName: "vault" + addr: "http://vault:8200" + +# Pod affinity to ensure co-location with Vault +affinity: + enabled: true + # Ensure this pod is scheduled on same node as Vault pod + colocateWithVault: true + +# Deployment configuration +deployment: + replicas: 1 + # Use Deployment for consistent agent behavior + type: "Deployment" + +# Shared volumes for vault ecosystem +volumes: + agentCredentials: + enabled: true + mountPath: "/agent/credentials" + # Uses same PVC as vault-init + + agentToken: + enabled: true + mountPath: "/agent/out" + # Uses same PVC as vault-init + + agentConfig: + enabled: true + mountPath: "/agent/config" + # ConfigMap for vault agent configuration + +# Vault agent configuration +agent: + enabled: true + config: + # Auto-auth configuration + autoAuth: + method: "kubernetes" + mountPath: "auth/kubernetes" + + # Cache configuration + cache: + enabled: true + + # Template configuration for secret injection + templates: + enabled: true + secrets: + - name: "llm-secrets" + path: "/agent/out/secrets.env" + template: | + {{- with secret "secret/llm-orchestration" -}} + OPENAI_API_KEY={{ .Data.data.openai_api_key }} + ANTHROPIC_API_KEY={{ .Data.data.anthropic_api_key }} + AZURE_OPENAI_API_KEY={{ .Data.data.azure_openai_api_key }} + AZURE_OPENAI_ENDPOINT={{ .Data.data.azure_openai_endpoint }} + OLLAMA_HOST={{ .Data.data.ollama_host }} + VECTOR_DB_HOST={{ .Data.data.vector_db_host }} + VECTOR_DB_PORT={{ .Data.data.vector_db_port }} + VECTOR_DB_COLLECTION={{ .Data.data.vector_db_collection }} + {{- end -}} + +pullPolicy: IfNotPresent + +resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "256Mi" + cpu: "200m" + +probes: + livenessProbe: + enabled: false + httpGet: + path: "/v1/sys/health" + port: 8200 + initialDelaySeconds: 30 + periodSeconds: 30 + + readinessProbe: + enabled: false + httpGet: + path: "/v1/sys/health" + port: 8200 + initialDelaySeconds: 10 + periodSeconds: 10 \ No newline at end of file diff --git a/kubernetes/charts/Vault-Init/Chart.yaml b/kubernetes/charts/Vault-Init/Chart.yaml new file mode 100644 index 0000000..83178bb --- /dev/null +++ b/kubernetes/charts/Vault-Init/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: Vault-Init +description: Vault initialization job for RAG Module +version: 0.1.0 +appVersion: "1.20.3" +type: application \ No newline at end of file diff --git a/kubernetes/charts/Vault-Init/templates/configmap.yaml b/kubernetes/charts/Vault-Init/templates/configmap.yaml new file mode 100644 index 0000000..c0d55b4 --- /dev/null +++ b/kubernetes/charts/Vault-Init/templates/configmap.yaml @@ -0,0 +1,186 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.release_name }}-script + labels: + app: {{ .Values.release_name }} + component: vault-init +data: + {{ .Values.initScript.filename }}: | + #!/bin/sh + set -e + + VAULT_ADDR="${VAULT_ADDR:-http://vault:8200}" + UNSEAL_KEYS_FILE="/vault/data/unseal-keys.json" + INIT_FLAG="/vault/data/.initialized" + + echo "=== Vault Initialization Script ===" + + # Wait for Vault to be ready + echo "Waiting for Vault..." + for i in $(seq 1 30); do + if wget -q -O- "$VAULT_ADDR/v1/sys/health" >/dev/null 2>&1; then + echo "Vault is ready" + break + fi + echo "Waiting... ($i/30)" + sleep 2 + done + + # Check if this is first time + if [ ! -f "$INIT_FLAG" ]; then + echo "=== FIRST TIME DEPLOYMENT ===" + + # Initialize Vault + echo "Initializing Vault..." + wget -q -O- --post-data='{"secret_shares":5,"secret_threshold":3}' \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/sys/init" > "$UNSEAL_KEYS_FILE" + + ROOT_TOKEN=$(grep -o '"root_token":"[^"]*"' "$UNSEAL_KEYS_FILE" | cut -d':' -f2 | tr -d '"') + export VAULT_TOKEN="$ROOT_TOKEN" + + # Extract unseal keys + KEY1=$(grep -o '"keys":\[[^]]*\]' "$UNSEAL_KEYS_FILE" | grep -o '"[^"]*"' | sed -n '2p' | tr -d '"') + KEY2=$(grep -o '"keys":\[[^]]*\]' "$UNSEAL_KEYS_FILE" | grep -o '"[^"]*"' | sed -n '3p' | tr -d '"') + KEY3=$(grep -o '"keys":\[[^]]*\]' "$UNSEAL_KEYS_FILE" | grep -o '"[^"]*"' | sed -n '4p' | tr -d '"') + + # Unseal Vault + echo "Unsealing Vault..." + wget -q -O- --post-data="{\"key\":\"$KEY1\"}" \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/sys/unseal" >/dev/null + + wget -q -O- --post-data="{\"key\":\"$KEY2\"}" \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/sys/unseal" >/dev/null + + wget -q -O- --post-data="{\"key\":\"$KEY3\"}" \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/sys/unseal" >/dev/null + + sleep 2 + echo "Vault unsealed" + + # Enable KV v2 + echo "Enabling KV v2 secrets engine..." + wget -q -O- --post-data='{"type":"kv","options":{"version":"2"}}' \ + --header="X-Vault-Token: $ROOT_TOKEN" \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/sys/mounts/secret" >/dev/null 2>&1 || echo "KV already enabled" + + # Enable AppRole + echo "Enabling AppRole..." + wget -q -O- --post-data='{"type":"approle"}' \ + --header="X-Vault-Token: $ROOT_TOKEN" \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/sys/auth/approle" >/dev/null 2>&1 || echo "AppRole already enabled" + + # Create policy + echo "Creating llm-orchestration policy..." + POLICY='path "secret/metadata/llm/*" { capabilities = ["list", "delete"] } + path "secret/data/llm/*" { capabilities = ["create", "read", "update", "delete"] } + path "auth/token/lookup-self" { capabilities = ["read"] } + path "secret/metadata/embeddings/*" { capabilities = ["list", "delete"] } + path "secret/data/embeddings/*" { capabilities = ["create", "read", "update", "delete"] }' + + POLICY_JSON=$(echo "$POLICY" | jq -Rs '{"policy":.}') + wget -q -O- --post-data="$POLICY_JSON" \ + --header="X-Vault-Token: $ROOT_TOKEN" \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/sys/policies/acl/llm-orchestration" >/dev/null + + # Create AppRole + echo "Creating llm-orchestration-service AppRole..." + wget -q -O- --post-data='{"token_policies":["llm-orchestration"],"token_no_default_policy":true,"token_ttl":"1h","token_max_ttl":"24h","secret_id_ttl":"24h","secret_id_num_uses":0,"bind_secret_id":true}' \ + --header="X-Vault-Token: $ROOT_TOKEN" \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/auth/approle/role/llm-orchestration-service" >/dev/null + + # Ensure credentials directory exists + mkdir -p /agent/credentials + + # Get role_id + echo "Getting role_id..." + ROLE_ID=$(wget -q -O- \ + --header="X-Vault-Token: $ROOT_TOKEN" \ + "$VAULT_ADDR/v1/auth/approle/role/llm-orchestration-service/role-id" | \ + grep -o '"role_id":"[^"]*"' | cut -d':' -f2 | tr -d '"') + echo "$ROLE_ID" > /agent/credentials/role_id + + # Generate secret_id + echo "Generating secret_id..." + SECRET_ID=$(wget -q -O- --post-data='' \ + --header="X-Vault-Token: $ROOT_TOKEN" \ + "$VAULT_ADDR/v1/auth/approle/role/llm-orchestration-service/secret-id" | \ + grep -o '"secret_id":"[^"]*"' | cut -d':' -f2 | tr -d '"') + echo "$SECRET_ID" > /agent/credentials/secret_id + + chmod 644 /agent/credentials/role_id /agent/credentials/secret_id + + # Mark as initialized + touch "$INIT_FLAG" + echo "=== First time setup complete ===" + + else + echo "=== SUBSEQUENT DEPLOYMENT ===" + + # Check if Vault is sealed + SEALED=$(wget -q -O- "$VAULT_ADDR/v1/sys/seal-status" | grep -o '"sealed":[^,}]*' | cut -d':' -f2) + + if [ "$SEALED" = "true" ]; then + echo "Vault is sealed. Unsealing..." + + # Load unseal keys + KEY1=$(grep -o '"keys":\[[^]]*\]' "$UNSEAL_KEYS_FILE" | grep -o '"[^"]*"' | sed -n '2p' | tr -d '"') + KEY2=$(grep -o '"keys":\[[^]]*\]' "$UNSEAL_KEYS_FILE" | grep -o '"[^"]*"' | sed -n '3p' | tr -d '"') + KEY3=$(grep -o '"keys":\[[^]]*\]' "$UNSEAL_KEYS_FILE" | grep -o '"[^"]*"' | sed -n '4p' | tr -d '"') + + wget -q -O- --post-data="{\"key\":\"$KEY1\"}" \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/sys/unseal" >/dev/null + + wget -q -O- --post-data="{\"key\":\"$KEY2\"}" \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/sys/unseal" >/dev/null + + wget -q -O- --post-data="{\"key\":\"$KEY3\"}" \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/sys/unseal" >/dev/null + + sleep 2 + echo "Vault unsealed" + + # Get root token + ROOT_TOKEN=$(grep -o '"root_token":"[^"]*"' "$UNSEAL_KEYS_FILE" | cut -d':' -f2 | tr -d '"') + export VAULT_TOKEN="$ROOT_TOKEN" + + # Ensure credentials directory exists + mkdir -p /agent/credentials + + # Regenerate secret_id after unseal + echo "Regenerating secret_id..." + SECRET_ID=$(wget -q -O- --post-data='' \ + --header="X-Vault-Token: $ROOT_TOKEN" \ + "$VAULT_ADDR/v1/auth/approle/role/llm-orchestration-service/secret-id" | \ + grep -o '"secret_id":"[^"]*"' | cut -d':' -f2 | tr -d '"') + echo "$SECRET_ID" > /agent/credentials/secret_id + chmod 644 /agent/credentials/secret_id + + # Ensure role_id exists + if [ ! -f /agent/credentials/role_id ]; then + echo "Copying role_id..." + mkdir -p /agent/credentials + ROLE_ID=$(wget -q -O- \ + --header="X-Vault-Token: $ROOT_TOKEN" \ + "$VAULT_ADDR/v1/auth/approle/role/llm-orchestration-service/role-id" | \ + grep -o '"role_id":"[^"]*"' | cut -d':' -f2 | tr -d '"') + echo "$ROLE_ID" > /agent/credentials/role_id + chmod 644 /agent/credentials/role_id + fi + else + echo "Vault is unsealed. No action needed." + fi + fi + + echo "=== Vault init complete ===" \ No newline at end of file diff --git a/kubernetes/charts/Vault-Init/templates/job.yaml b/kubernetes/charts/Vault-Init/templates/job.yaml new file mode 100644 index 0000000..96f2ada --- /dev/null +++ b/kubernetes/charts/Vault-Init/templates/job.yaml @@ -0,0 +1,93 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Values.release_name }} + labels: + app: {{ .Values.release_name }} + component: vault-init +spec: + backoffLimit: {{ .Values.job.backoffLimit }} + template: + metadata: + labels: + app: {{ .Values.release_name }} + component: vault-init + spec: + restartPolicy: {{ .Values.job.restartPolicy }} + {{- if .Values.affinity.enabled }} + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - {{ .Values.vault.serviceName }} + topologyKey: kubernetes.io/hostname + {{- end }} + volumes: + {{- if .Values.volumes.vaultData.enabled }} + - name: vault-data + persistentVolumeClaim: + claimName: vault-storage-{{ .Values.vault.serviceName }}-0 + {{- end }} + {{- if .Values.volumes.agentCredentials.enabled }} + - name: vault-agent-creds + persistentVolumeClaim: + claimName: vault-agent-creds + {{- end }} + {{- if .Values.volumes.agentToken.enabled }} + - name: vault-agent-token + persistentVolumeClaim: + claimName: vault-agent-token + {{- end }} + {{- if .Values.initScript.enabled }} + - name: init-script + configMap: + name: {{ .Values.release_name }}-script + defaultMode: 0755 + {{- end }} + containers: + - name: vault-init + image: "{{ .Values.images.vault.registry }}/{{ .Values.images.vault.repository }}:{{ .Values.images.vault.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + command: ["/bin/sh", "-c"] + args: + - | + # Install dependencies and setup permissions + apk add --no-cache curl jq + mkdir -p /agent/credentials /agent/out + chmod -R 755 /agent/credentials + chmod -R 770 /agent/out + echo "Permissions set successfully" + + # Run the init script + /bin/sh /scripts/{{ .Values.initScript.filename }} + env: + - name: VAULT_ADDR + value: {{ .Values.vault.addr | quote }} + - name: VAULT_SKIP_VERIFY + value: "true" + volumeMounts: + {{- if .Values.volumes.vaultData.enabled }} + - name: vault-data + mountPath: {{ .Values.volumes.vaultData.mountPath }} + {{- end }} + {{- if .Values.volumes.agentCredentials.enabled }} + - name: vault-agent-creds + mountPath: {{ .Values.volumes.agentCredentials.mountPath }} + {{- end }} + {{- if .Values.volumes.agentToken.enabled }} + - name: vault-agent-token + mountPath: {{ .Values.volumes.agentToken.mountPath }} + {{- end }} + {{- if .Values.initScript.enabled }} + - name: init-script + mountPath: "/scripts" + readOnly: true + {{- end }} + {{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 10 }} + {{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Vault-Init/templates/pvc.yaml b/kubernetes/charts/Vault-Init/templates/pvc.yaml new file mode 100644 index 0000000..7084dc3 --- /dev/null +++ b/kubernetes/charts/Vault-Init/templates/pvc.yaml @@ -0,0 +1,37 @@ +{{- if .Values.volumes.agentCredentials.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: vault-agent-creds + labels: + app: {{ .Values.release_name }} + component: vault-init +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: {{ .Values.volumes.agentCredentials.size }} + {{- if .Values.volumes.agentCredentials.storageClass }} + storageClassName: {{ .Values.volumes.agentCredentials.storageClass }} + {{- end }} +--- +{{- end }} +{{- if .Values.volumes.agentToken.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: vault-agent-token + labels: + app: {{ .Values.release_name }} + component: vault-init +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: {{ .Values.volumes.agentToken.size }} + {{- if .Values.volumes.agentToken.storageClass }} + storageClassName: {{ .Values.volumes.agentToken.storageClass }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Vault-Init/values.yaml b/kubernetes/charts/Vault-Init/values.yaml new file mode 100644 index 0000000..207da59 --- /dev/null +++ b/kubernetes/charts/Vault-Init/values.yaml @@ -0,0 +1,60 @@ +enabled: true + +images: + vault: + registry: "docker.io" + repository: "hashicorp/vault" + tag: "1.20.3" + +release_name: "vault-init" + +# Vault service dependency +vault: + serviceName: "vault" + addr: "http://vault:8200" + +# Pod affinity to ensure co-location with Vault +affinity: + enabled: true + # Ensure this pod is scheduled on same node as Vault pod + colocateWithVault: true + + +job: + backoffLimit: 3 + restartPolicy: "Never" + + +volumes: + vaultData: + enabled: true + mountPath: "/vault/data" + + agentCredentials: + enabled: true + mountPath: "/agent/credentials" + size: "100Mi" + accessMode: "ReadWriteMany" + storageClass: "" + + agentToken: + enabled: true + mountPath: "/agent/out" + size: "100Mi" + accessMode: "ReadWriteMany" + storageClass: "" + +# Init script configuration +initScript: + enabled: true + filename: "vault-init.sh" + +pullPolicy: IfNotPresent + +resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "256Mi" + cpu: "200m" \ No newline at end of file diff --git a/kubernetes/charts/Vault/Chart.yaml b/kubernetes/charts/Vault/Chart.yaml new file mode 100644 index 0000000..4b6ffec --- /dev/null +++ b/kubernetes/charts/Vault/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: Vault +description: HashiCorp Vault secrets management for RAG Module +version: 0.1.0 +appVersion: "1.20.3" +type: application \ No newline at end of file diff --git a/kubernetes/charts/Vault/templates/configmap.yaml b/kubernetes/charts/Vault/templates/configmap.yaml new file mode 100644 index 0000000..1e32fd9 --- /dev/null +++ b/kubernetes/charts/Vault/templates/configmap.yaml @@ -0,0 +1,66 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: "{{ .Values.release_name }}-config" + labels: + app: "{{ .Values.release_name }}" + component: vault +data: + vault.hcl: | + # HashiCorp Vault Server Configuration + # Production-ready configuration for LLM Orchestration Service + + # Storage backend - Raft for high availability + storage "raft" { + path = "/vault/file" + node_id = "vault-node-1" + + # Retry join configuration for clustering (single node for now) + retry_join { + leader_api_addr = "http://vault:8200" + } + } + + # HTTP listener configuration + listener "tcp" { + address = "0.0.0.0:8200" + tls_disable = true + + # Enable CORS for web UI access + cors_enabled = true + cors_allowed_origins = [ + "http://localhost:8200", + "http://vault:8200" + ] + } + + # Cluster listener for HA (required even for single node) + listener "tcp" { + address = "0.0.0.0:8201" + cluster_addr = "http://0.0.0.0:8201" + tls_disable = true + } + + # API and cluster addresses + api_addr = "http://vault:8200" + cluster_addr = "http://vault:8201" + + # Security and performance settings + disable_mlock = false + disable_cache = false + ui = false + + # Default lease and maximum lease durations + default_lease_ttl = "168h" # 7 days + max_lease_ttl = "720h" # 30 days + + # Logging configuration + log_level = "INFO" + log_format = "json" + + # Development settings (remove in production) + # Note: In production, you should not use dev mode + # and should properly initialize and unseal the vault + +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Vault/templates/service-byk-vault.yaml b/kubernetes/charts/Vault/templates/service-byk-vault.yaml new file mode 100644 index 0000000..b7501f9 --- /dev/null +++ b/kubernetes/charts/Vault/templates/service-byk-vault.yaml @@ -0,0 +1,23 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: vault +spec: + type: {{ .Values.service.type }} + {{- if eq .Values.service.type "ClusterIP" }} + {{- if .Values.service.headless }} + clusterIP: None + {{- end }} + {{- end }} + selector: + app: "{{ .Values.release_name }}" + ports: + - name: http + protocol: TCP + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Vault/templates/statefulset-byk-vault.yaml b/kubernetes/charts/Vault/templates/statefulset-byk-vault.yaml new file mode 100644 index 0000000..b68fbbc --- /dev/null +++ b/kubernetes/charts/Vault/templates/statefulset-byk-vault.yaml @@ -0,0 +1,122 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: vault +spec: + serviceName: "{{ .Values.release_name }}" + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + component: vault + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }} + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.initContainer.enabled }} + initContainers: + - name: vault-init + image: "{{ .Values.initContainer.image.registry }}/{{ .Values.initContainer.image.repository }}:{{ .Values.initContainer.image.tag }}" + command: + - sh + - -c + - | + chown -R 100:1000 /vault/file + chmod -R 755 /vault/file + volumeMounts: + - name: vault-storage + mountPath: {{ .Values.persistence.mountPath }} + securityContext: + runAsUser: 0 + {{- end }} + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.vault.registry }}/{{ .Values.images.vault.repository }}:{{ .Values.images.vault.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - vault + - server + - -config=/vault/config/vault.hcl + ports: + - name: http + containerPort: {{ .Values.service.targetPort }} + protocol: TCP + - name: cluster + containerPort: 8201 + protocol: TCP + env: + - name: VAULT_ADDR + value: "http://0.0.0.0:{{ .Values.service.targetPort }}" + - name: VAULT_SKIP_VERIFY_CONFIG_PERMISSIONS + value: "true" + {{- if .Values.healthcheck.enabled }} + livenessProbe: + httpGet: + path: "{{ .Values.healthcheck.httpPath }}" + port: {{ .Values.service.targetPort }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + readinessProbe: + httpGet: + path: "{{ .Values.healthcheck.httpPath }}" + port: {{ .Values.service.targetPort }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + {{- end }} + volumeMounts: + - name: vault-config + mountPath: /vault/config + readOnly: true + {{- if .Values.persistence.enabled }} + - name: vault-storage + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + securityContext: + capabilities: + add: + - IPC_LOCK + volumes: + - name: vault-config + configMap: + name: "{{ .Values.release_name }}-config" + {{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: vault-storage + labels: + app: "{{ .Values.release_name }}" + component: vault + spec: + accessModes: + - {{ .Values.persistence.accessMode }} + resources: + requests: + storage: {{ .Values.persistence.size }} + {{- if .Values.persistence.storageClass }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Vault/values.yaml b/kubernetes/charts/Vault/values.yaml new file mode 100644 index 0000000..253aa93 --- /dev/null +++ b/kubernetes/charts/Vault/values.yaml @@ -0,0 +1,71 @@ +replicas: 1 +enabled: true + +images: + vault: + registry: "docker.io" + repository: "hashicorp/vault" + tag: "1.20.3" + +release_name: "vault" + +service: + type: ClusterIP + # Set to true for headless service (direct pod access) + headless: false + port: 8200 + targetPort: 8200 + +persistence: + enabled: true + storageClass: "" + accessMode: ReadWriteMany + size: 10Gi + mountPath: "/vault/file" + +# Vault configuration +vault: + config: + # File storage backend + storage_file_path: "/vault/file" + # API settings + disable_mlock: true + ui: true + # Network settings + listener_address: "0.0.0.0:8200" + cluster_address: "0.0.0.0:8201" + +resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "1Gi" + cpu: "500m" + +pullPolicy: IfNotPresent + +healthcheck: + enabled: false + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 20 + successThreshold: 1 + # Vault health endpoint + httpPath: "/v1/sys/health" + +securityContext: + enabled: true + runAsNonRoot: false + runAsUser: 0 + runAsGroup: 0 + fsGroup: 0 + +# Init container configuration +initContainer: + enabled: true + image: + registry: "docker.io" + repository: "busybox" + tag: "1.35" \ No newline at end of file diff --git a/kubernetes/charts/database/Chart.yaml b/kubernetes/charts/database/Chart.yaml new file mode 100644 index 0000000..9612978 --- /dev/null +++ b/kubernetes/charts/database/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: database +description: PostgreSQL databases for RAG Module using pure PostgreSQL +type: application +version: 0.2.0 + \ No newline at end of file diff --git a/kubernetes/charts/database/templates/secret.yaml b/kubernetes/charts/database/templates/secret.yaml new file mode 100644 index 0000000..244a450 --- /dev/null +++ b/kubernetes/charts/database/templates/secret.yaml @@ -0,0 +1,12 @@ +{{- range .Values.databases }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }}-secret + labels: + app: {{ .name }} +type: Opaque +data: + password: {{ .password | b64enc | quote }} +--- +{{- end }} diff --git a/kubernetes/charts/database/templates/service.yaml b/kubernetes/charts/database/templates/service.yaml new file mode 100644 index 0000000..2a1a539 --- /dev/null +++ b/kubernetes/charts/database/templates/service.yaml @@ -0,0 +1,34 @@ +{{- range .Values.databases }} +apiVersion: v1 +kind: Service +metadata: + name: {{ .name }} + labels: + app: {{ .name }} +spec: + type: ClusterIP + selector: + app: {{ .name }} + ports: + - name: postgres + port: {{ $.Values.service.port }} + targetPort: {{ $.Values.service.port }} +--- +# Headless service for StatefulSet +apiVersion: v1 +kind: Service +metadata: + name: {{ .name }}-headless + labels: + app: {{ .name }} +spec: + type: ClusterIP + clusterIP: None + selector: + app: {{ .name }} + ports: + - name: postgres + port: {{ $.Values.service.port }} + targetPort: {{ $.Values.service.port }} +--- +{{- end }} diff --git a/kubernetes/charts/database/templates/statefulset.yaml b/kubernetes/charts/database/templates/statefulset.yaml new file mode 100644 index 0000000..4ff6581 --- /dev/null +++ b/kubernetes/charts/database/templates/statefulset.yaml @@ -0,0 +1,66 @@ +{{- range .Values.databases }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ .name }} + labels: + app: {{ .name }} +spec: + serviceName: {{ .name }}-headless + replicas: 1 + selector: + matchLabels: + app: {{ .name }} + template: + metadata: + labels: + app: {{ .name }} + spec: + securityContext: + fsGroup: 999 + terminationGracePeriodSeconds: 30 + containers: + - name: postgresql + image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + env: + - name: POSTGRES_USER + value: "{{ .username }}" + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .name }}-secret + key: password + - name: POSTGRES_DB + value: "{{ .db }}" + - name: PGDATA + value: /var/lib/postgresql/data/pgdata + ports: + - name: postgres + containerPort: {{ $.Values.service.port }} + livenessProbe: + tcpSocket: + port: {{ $.Values.service.port }} + initialDelaySeconds: 30 + periodSeconds: 10 + readinessProbe: + tcpSocket: + port: {{ $.Values.service.port }} + initialDelaySeconds: 15 + periodSeconds: 10 + volumeMounts: + - name: data + mountPath: /var/lib/postgresql/data + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: {{ toYaml $.Values.persistence.accessModes | nindent 8 }} + resources: + requests: + storage: {{ .storage }} + {{- if $.Values.persistence.storageClass }} + storageClassName: {{ $.Values.persistence.storageClass }} + {{- end }} +--- +{{- end }} diff --git a/kubernetes/charts/database/values.yaml b/kubernetes/charts/database/values.yaml new file mode 100644 index 0000000..8c43f0f --- /dev/null +++ b/kubernetes/charts/database/values.yaml @@ -0,0 +1,25 @@ +# Centralized database configuration using pure PostgreSQL +databases: + - name: rag-search-db + username: postgres + password: "{{ ragSearchDB.password }}" + db: rag-search + storage: 8Gi + - name: tim-postgresql + username: tim + password: "{{ TIMDB.password }}" + db: tim + storage: 1Gi + +image: + repository: postgres + tag: "14.1" + pullPolicy: IfNotPresent + +service: + port: 5432 + +persistence: + storageClass: "" # specify your own + accessModes: ["ReadWriteOnce"] + diff --git a/kubernetes/charts/minio/Chart.yaml b/kubernetes/charts/minio/Chart.yaml new file mode 100644 index 0000000..e2bd6d5 --- /dev/null +++ b/kubernetes/charts/minio/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: minio +description: minio object storage server +type: application +version: 0.1.0 +appVersion: 2.0.0 \ No newline at end of file diff --git a/kubernetes/charts/minio/templates/deployment-minio.yaml b/kubernetes/charts/minio/templates/deployment-minio.yaml new file mode 100644 index 0000000..1ba2cc7 --- /dev/null +++ b/kubernetes/charts/minio/templates/deployment-minio.yaml @@ -0,0 +1,71 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + spec: + initContainers: + - name: create-buckets + image: busybox:latest + command: + - sh + - -c + - | + mkdir -p /data/rag-search/resources/langfuse + mkdir -p /data/rag-search/resources/models + mkdir -p /data/rag-search/resources/datasets + mkdir -p /data/rag-search/resources/qdrant + mkdir -p /data/rag-search/resources/system + echo "Bucket directories created successfully" + volumeMounts: + - name: minio-data + mountPath: /data + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}" + command: + - minio + - server + - /data + - --console-address + - :9001 + ports: + - containerPort: {{ .Values.ports.api }} + name: api + protocol: TCP + - containerPort: {{ .Values.ports.console }} + name: console + protocol: TCP + # Non-sensitive env's from values.yaml + env: + {{- range $key, $value := .Values.env }} + - name: {{ $key }} + value: "{{ $value }}" + {{- end }} + # Sensitive env's from Kubernetes Secret + {{- if .Values.envFrom }} + envFrom: + {{- toYaml .Values.envFrom | nindent 12 }} + {{- end }} + volumeMounts: + - name: minio-data + mountPath: /data + volumes: + - name: minio-data + persistentVolumeClaim: + claimName: pvc-minio-data + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" \ No newline at end of file diff --git a/kubernetes/charts/minio/templates/ingress-minio.yaml b/kubernetes/charts/minio/templates/ingress-minio.yaml new file mode 100644 index 0000000..390c93a --- /dev/null +++ b/kubernetes/charts/minio/templates/ingress-minio.yaml @@ -0,0 +1,37 @@ +{{- if .Values.ingress.enabled }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: "{{ .Values.release_name }}-ingress" + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + nginx.ingress.kubernetes.io/proxy-connect-timeout: "10s" + nginx.ingress.kubernetes.io/proxy-send-timeout: "600s" + nginx.ingress.kubernetes.io/proxy-read-timeout: "600s" + nginx.ingress.kubernetes.io/cors-allow-origin: "*" + nginx.ingress.kubernetes.io/cors-allow-methods: "GET, POST, PUT, DELETE, OPTIONS" + nginx.ingress.kubernetes.io/cors-allow-headers: "Origin, X-Requested-With, Content-Type, Cache-Control, Connection, Accept" + cert-manager.io/cluster-issuer: "letsencrypt-prod-issuer" + labels: + name: "{{ .Values.release_name }}-ingress" +spec: + rules: + - host: "{{ .Values.ingress.host }}" + http: + paths: + - pathType: Prefix + path: "{{ .Values.ingress.path }}" + backend: + service: + name: "{{ .Values.release_name }}" + port: + number: {{ .Values.ports.api }} + {{- if .Values.ingress.tls.enabled }} + tls: + - hosts: + - "{{ .Values.ingress.host }}" + secretName: "{{ .Values.secretname }}" + {{- end }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/minio/templates/pvc-minio-data.yaml b/kubernetes/charts/minio/templates/pvc-minio-data.yaml new file mode 100644 index 0000000..2794e30 --- /dev/null +++ b/kubernetes/charts/minio/templates/pvc-minio-data.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-minio-data +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.volumes.minio_data.size }} \ No newline at end of file diff --git a/kubernetes/charts/minio/templates/secret.yaml b/kubernetes/charts/minio/templates/secret.yaml new file mode 100644 index 0000000..a3d9c65 --- /dev/null +++ b/kubernetes/charts/minio/templates/secret.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: minio-secrets + labels: + app: "{{ .Values.release_name }}" +type: Opaque +stringData: + MINIO_ROOT_USER: "" + MINIO_ROOT_PASSWORD: "" diff --git a/kubernetes/charts/minio/templates/service-minio.yaml b/kubernetes/charts/minio/templates/service-minio.yaml new file mode 100644 index 0000000..9f52de7 --- /dev/null +++ b/kubernetes/charts/minio/templates/service-minio.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" +spec: + selector: + app: "{{ .Values.release_name }}" + ports: + - port: {{ .Values.ports.api }} + targetPort: api + protocol: TCP + name: api + - port: {{ .Values.ports.console }} + targetPort: console + protocol: TCP + name: console \ No newline at end of file diff --git a/kubernetes/charts/minio/values.yaml b/kubernetes/charts/minio/values.yaml new file mode 100644 index 0000000..f2f7c7d --- /dev/null +++ b/kubernetes/charts/minio/values.yaml @@ -0,0 +1,44 @@ +release_name: "minio" + +image: + registry: "docker.io" + repository: "minio/minio" + tag: "latest" + +replicas: 1 + +resources: + requests: + memory: "500Mi" + cpu: "250m" + limits: + memory: "1Gi" + cpu: "500m" + +env: + MINIO_BROWSER_REDIRECT_URL: "http://localhost:9001" + +# Reference to Kubernetes Secret +envFrom: + - secretRef: + name: minio-secrets + +volumes: + minio_data: + type: pvc + size: "5Gi" + +ports: + api: 9000 + console: 9001 + +ingress: + enabled: true + host: "domain" + path: "/" + tls: + enabled: true +secretname: "minio-tls" + +istio: + enabled: false \ No newline at end of file