From ca1d913d06ba93fcd31ea1a47d6c487dc9421d84 Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Mon, 19 Jan 2026 11:02:42 +0530 Subject: [PATCH 1/3] helm charts for byk and rag components --- kubernetes/CONTAINER_REGISTRY_SETUP.md | 35 ++++ kubernetes/Chart.lock | 75 +++++++ kubernetes/Chart.yaml | 80 ++++++++ .../charts/Authentication-Layer/Chart.yaml | 6 + .../deployment-byk-authentication-layer.yaml | 34 ++++ .../ingress-byk-authentication-layer.yaml | 29 +++ .../service-byk-authentication-layer.yaml | 17 ++ .../charts/Authentication-Layer/values.yaml | 35 ++++ kubernetes/charts/ClickHouse/Chart.yaml | 6 + .../templates/deployment-byk-clickhouse.yaml | 86 ++++++++ .../ClickHouse/templates/pvc-clickhouse.yaml | 37 ++++ .../templates/service-byk-clickhouse.yaml | 22 +++ kubernetes/charts/ClickHouse/values.yaml | 60 ++++++ kubernetes/charts/CronManager/Chart.yaml | 6 + .../configmap-cronmanager-config.yaml | 15 ++ .../templates/deployment-byk-cronmanager.yaml | 103 ++++++++++ .../templates/pvc-cronmanager.yaml | 17 ++ .../templates/service-byk-cronmanager.yaml | 17 ++ kubernetes/charts/CronManager/values.yaml | 43 ++++ kubernetes/charts/DataMapper/Chart.yaml | 6 + .../templates/deployment-byk-data-mapper.yaml | 63 ++++++ .../templates/service-byk-data-mapper.yaml | 17 ++ kubernetes/charts/DataMapper/values.yaml | 31 +++ kubernetes/charts/GUI/Chart.yaml | 6 + .../GUI/templates/deployment-byk-gui.yaml | 87 ++++++++ .../charts/GUI/templates/ingress-byk-gui.yaml | 20 ++ .../charts/GUI/templates/service-byk-gui.yaml | 15 ++ kubernetes/charts/GUI/values.yaml | 62 ++++++ kubernetes/charts/Grafana/Chart.yaml | 6 + .../grafana-dashboard-deployment.json | 167 ++++++++++++++++ .../templates/configmap-dashboards.yaml | 11 ++ .../Grafana/templates/configmap-grafana.yaml | 39 ++++ .../Grafana/templates/deployment-grafana.yaml | 73 +++++++ .../charts/Grafana/templates/pvc-grafana.yaml | 17 ++ .../Grafana/templates/service-grafana.yaml | 15 ++ kubernetes/charts/Grafana/values.yaml | 62 ++++++ .../LLM-Orchestration-Service/Chart.yaml | 6 + .../deployment-byk-llm-orchestration.yaml | 166 ++++++++++++++++ .../templates/pvc-volumes.yaml | 61 ++++++ .../service-byk-llm-orchestration.yaml | 18 ++ .../LLM-Orchestration-Service/values.yaml | 86 ++++++++ kubernetes/charts/Langfuse-Web/Chart.yaml | 6 + .../deployment-byk-langfuse-web.yaml | 59 ++++++ .../templates/service-byk-langfuse-web.yaml | 18 ++ kubernetes/charts/Langfuse-Web/values.yaml | 104 ++++++++++ kubernetes/charts/Langfuse-Worker/Chart.yaml | 6 + .../deployment-byk-langfuse-worker.yaml | 59 ++++++ .../service-byk-langfuse-worker.yaml | 18 ++ kubernetes/charts/Langfuse-Worker/values.yaml | 91 +++++++++ kubernetes/charts/Liquibase/Chart.yaml | 6 + .../Liquibase/templates/liquibase-job.yaml | 60 ++++++ kubernetes/charts/Liquibase/values.yaml | 21 ++ kubernetes/charts/Loki/Chart.yaml | 6 + .../charts/Loki/templates/configmap-loki.yaml | 9 + .../Loki/templates/deployment-loki.yaml | 43 ++++ .../charts/Loki/templates/pvc-loki.yaml | 17 ++ .../charts/Loki/templates/service-loki.yaml | 17 ++ kubernetes/charts/Loki/values.yaml | 85 ++++++++ kubernetes/charts/Qdrant/Chart.yaml | 6 + .../Qdrant/templates/service-byk-qdrant.yaml | 31 +++ .../templates/statefulset-byk-qdrant.yaml | 82 ++++++++ kubernetes/charts/Qdrant/values.yaml | 50 +++++ kubernetes/charts/Redis/Chart.yaml | 6 + .../Redis/templates/deployment-byk-redis.yaml | 68 +++++++ .../Redis/templates/service-byk-redis.yaml | 18 ++ kubernetes/charts/Redis/values.yaml | 40 ++++ kubernetes/charts/Resql/Chart.yaml | 6 + .../Resql/templates/deployment-byk-resql.yaml | 68 +++++++ .../Resql/templates/service-byk-resql.yaml | 14 ++ kubernetes/charts/Resql/values.yaml | 32 +++ kubernetes/charts/Ruuter-Private/Chart.yaml | 6 + .../configmap-byk-ruuter-private.yaml | 19 ++ .../deployment-byk-ruuter-private.yaml | 88 +++++++++ .../templates/ingress-ruuter-private.yaml | 46 +++++ .../templates/service-byk-ruuter-private.yaml | 17 ++ kubernetes/charts/Ruuter-Private/values.yaml | 56 ++++++ kubernetes/charts/Ruuter-Public/Chart.yaml | 6 + .../configmap-byk-ruuter-public.yaml | 19 ++ .../deployment-byk-ruuter-public.yaml | 87 ++++++++ .../templates/ingress-ruuter-public.yaml | 45 +++++ .../templates/service-byk-ruuter-public.yaml | 18 ++ kubernetes/charts/Ruuter-Public/values.yaml | 51 +++++ kubernetes/charts/S3-Ferry/Chart.yaml | 6 + .../S3-Ferry/templates/configmap-s3.yaml | 10 + .../S3-Ferry/templates/deployment-s3.yaml | 50 +++++ .../charts/S3-Ferry/templates/pvc-s3.yaml | 36 ++++ .../charts/S3-Ferry/templates/service-s3.yaml | 15 ++ kubernetes/charts/S3-Ferry/values.yaml | 61 ++++++ kubernetes/charts/TIM-database/Chart.yaml | 6 + .../templates/deployment-byk-timdb.yaml | 42 ++++ .../TIM-database/templates/pvc-byk-timdb.yaml | 19 ++ .../templates/secret-byk-timdb.yaml | 9 + .../templates/service-byk-timdb.yaml | 15 ++ kubernetes/charts/TIM-database/values.yaml | 29 +++ kubernetes/charts/TIM/Chart.yaml | 6 + .../TIM/templates/configmap-byk-tim.yaml | 56 ++++++ .../TIM/templates/deployment-byk-tim.yaml | 46 +++++ kubernetes/charts/TIM/templates/ingress.yaml | 30 +++ .../charts/TIM/templates/secret-byk-tim.yaml | 9 + .../charts/TIM/templates/service-byk-tim.yaml | 15 ++ kubernetes/charts/TIM/values.yaml | 45 +++++ kubernetes/charts/Vault-Agent-LLM/Chart.yaml | 7 + .../Vault-Agent-LLM/templates/configmap.yaml | 48 +++++ .../Vault-Agent-LLM/templates/deployment.yaml | 101 ++++++++++ kubernetes/charts/Vault-Agent-LLM/values.yaml | 101 ++++++++++ kubernetes/charts/Vault-Init/Chart.yaml | 6 + .../Vault-Init/templates/configmap.yaml | 186 ++++++++++++++++++ .../charts/Vault-Init/templates/job.yaml | 93 +++++++++ .../charts/Vault-Init/templates/pvc.yaml | 37 ++++ kubernetes/charts/Vault-Init/values.yaml | 60 ++++++ kubernetes/charts/Vault/Chart.yaml | 6 + .../charts/Vault/templates/configmap.yaml | 66 +++++++ .../Vault/templates/service-byk-vault.yaml | 23 +++ .../templates/statefulset-byk-vault.yaml | 122 ++++++++++++ kubernetes/charts/Vault/values.yaml | 71 +++++++ kubernetes/charts/database/Chart.lock | 6 + kubernetes/charts/database/Chart.yaml | 12 ++ kubernetes/charts/database/values.yaml | 14 ++ kubernetes/charts/minio/Chart.yaml | 6 + .../minio/templates/deployment-minio.yaml | 65 ++++++ .../charts/minio/templates/ingress-minio.yaml | 37 ++++ .../minio/templates/pvc-minio-data.yaml | 10 + .../charts/minio/templates/service-minio.yaml | 16 ++ kubernetes/charts/minio/values.yaml | 41 ++++ kubernetes/dashboard-admin.yaml | 18 ++ kubernetes/values.yaml | 78 ++++++++ 126 files changed, 4996 insertions(+) create mode 100644 kubernetes/CONTAINER_REGISTRY_SETUP.md create mode 100644 kubernetes/Chart.lock create mode 100644 kubernetes/Chart.yaml create mode 100644 kubernetes/charts/Authentication-Layer/Chart.yaml create mode 100644 kubernetes/charts/Authentication-Layer/templates/deployment-byk-authentication-layer.yaml create mode 100644 kubernetes/charts/Authentication-Layer/templates/ingress-byk-authentication-layer.yaml create mode 100644 kubernetes/charts/Authentication-Layer/templates/service-byk-authentication-layer.yaml create mode 100644 kubernetes/charts/Authentication-Layer/values.yaml create mode 100644 kubernetes/charts/ClickHouse/Chart.yaml create mode 100644 kubernetes/charts/ClickHouse/templates/deployment-byk-clickhouse.yaml create mode 100644 kubernetes/charts/ClickHouse/templates/pvc-clickhouse.yaml create mode 100644 kubernetes/charts/ClickHouse/templates/service-byk-clickhouse.yaml create mode 100644 kubernetes/charts/ClickHouse/values.yaml create mode 100644 kubernetes/charts/CronManager/Chart.yaml create mode 100644 kubernetes/charts/CronManager/templates/configmap-cronmanager-config.yaml create mode 100644 kubernetes/charts/CronManager/templates/deployment-byk-cronmanager.yaml create mode 100644 kubernetes/charts/CronManager/templates/pvc-cronmanager.yaml create mode 100644 kubernetes/charts/CronManager/templates/service-byk-cronmanager.yaml create mode 100644 kubernetes/charts/CronManager/values.yaml create mode 100644 kubernetes/charts/DataMapper/Chart.yaml create mode 100644 kubernetes/charts/DataMapper/templates/deployment-byk-data-mapper.yaml create mode 100644 kubernetes/charts/DataMapper/templates/service-byk-data-mapper.yaml create mode 100644 kubernetes/charts/DataMapper/values.yaml create mode 100644 kubernetes/charts/GUI/Chart.yaml create mode 100644 kubernetes/charts/GUI/templates/deployment-byk-gui.yaml create mode 100644 kubernetes/charts/GUI/templates/ingress-byk-gui.yaml create mode 100644 kubernetes/charts/GUI/templates/service-byk-gui.yaml create mode 100644 kubernetes/charts/GUI/values.yaml create mode 100644 kubernetes/charts/Grafana/Chart.yaml create mode 100644 kubernetes/charts/Grafana/dashboards/grafana-dashboard-deployment.json create mode 100644 kubernetes/charts/Grafana/templates/configmap-dashboards.yaml create mode 100644 kubernetes/charts/Grafana/templates/configmap-grafana.yaml create mode 100644 kubernetes/charts/Grafana/templates/deployment-grafana.yaml create mode 100644 kubernetes/charts/Grafana/templates/pvc-grafana.yaml create mode 100644 kubernetes/charts/Grafana/templates/service-grafana.yaml create mode 100644 kubernetes/charts/Grafana/values.yaml create mode 100644 kubernetes/charts/LLM-Orchestration-Service/Chart.yaml create mode 100644 kubernetes/charts/LLM-Orchestration-Service/templates/deployment-byk-llm-orchestration.yaml create mode 100644 kubernetes/charts/LLM-Orchestration-Service/templates/pvc-volumes.yaml create mode 100644 kubernetes/charts/LLM-Orchestration-Service/templates/service-byk-llm-orchestration.yaml create mode 100644 kubernetes/charts/LLM-Orchestration-Service/values.yaml create mode 100644 kubernetes/charts/Langfuse-Web/Chart.yaml create mode 100644 kubernetes/charts/Langfuse-Web/templates/deployment-byk-langfuse-web.yaml create mode 100644 kubernetes/charts/Langfuse-Web/templates/service-byk-langfuse-web.yaml create mode 100644 kubernetes/charts/Langfuse-Web/values.yaml create mode 100644 kubernetes/charts/Langfuse-Worker/Chart.yaml create mode 100644 kubernetes/charts/Langfuse-Worker/templates/deployment-byk-langfuse-worker.yaml create mode 100644 kubernetes/charts/Langfuse-Worker/templates/service-byk-langfuse-worker.yaml create mode 100644 kubernetes/charts/Langfuse-Worker/values.yaml create mode 100644 kubernetes/charts/Liquibase/Chart.yaml create mode 100644 kubernetes/charts/Liquibase/templates/liquibase-job.yaml create mode 100644 kubernetes/charts/Liquibase/values.yaml create mode 100644 kubernetes/charts/Loki/Chart.yaml create mode 100644 kubernetes/charts/Loki/templates/configmap-loki.yaml create mode 100644 kubernetes/charts/Loki/templates/deployment-loki.yaml create mode 100644 kubernetes/charts/Loki/templates/pvc-loki.yaml create mode 100644 kubernetes/charts/Loki/templates/service-loki.yaml create mode 100644 kubernetes/charts/Loki/values.yaml create mode 100644 kubernetes/charts/Qdrant/Chart.yaml create mode 100644 kubernetes/charts/Qdrant/templates/service-byk-qdrant.yaml create mode 100644 kubernetes/charts/Qdrant/templates/statefulset-byk-qdrant.yaml create mode 100644 kubernetes/charts/Qdrant/values.yaml create mode 100644 kubernetes/charts/Redis/Chart.yaml create mode 100644 kubernetes/charts/Redis/templates/deployment-byk-redis.yaml create mode 100644 kubernetes/charts/Redis/templates/service-byk-redis.yaml create mode 100644 kubernetes/charts/Redis/values.yaml create mode 100644 kubernetes/charts/Resql/Chart.yaml create mode 100644 kubernetes/charts/Resql/templates/deployment-byk-resql.yaml create mode 100644 kubernetes/charts/Resql/templates/service-byk-resql.yaml create mode 100644 kubernetes/charts/Resql/values.yaml create mode 100644 kubernetes/charts/Ruuter-Private/Chart.yaml create mode 100644 kubernetes/charts/Ruuter-Private/templates/configmap-byk-ruuter-private.yaml create mode 100644 kubernetes/charts/Ruuter-Private/templates/deployment-byk-ruuter-private.yaml create mode 100644 kubernetes/charts/Ruuter-Private/templates/ingress-ruuter-private.yaml create mode 100644 kubernetes/charts/Ruuter-Private/templates/service-byk-ruuter-private.yaml create mode 100644 kubernetes/charts/Ruuter-Private/values.yaml create mode 100644 kubernetes/charts/Ruuter-Public/Chart.yaml create mode 100644 kubernetes/charts/Ruuter-Public/templates/configmap-byk-ruuter-public.yaml create mode 100644 kubernetes/charts/Ruuter-Public/templates/deployment-byk-ruuter-public.yaml create mode 100644 kubernetes/charts/Ruuter-Public/templates/ingress-ruuter-public.yaml create mode 100644 kubernetes/charts/Ruuter-Public/templates/service-byk-ruuter-public.yaml create mode 100644 kubernetes/charts/Ruuter-Public/values.yaml create mode 100644 kubernetes/charts/S3-Ferry/Chart.yaml create mode 100644 kubernetes/charts/S3-Ferry/templates/configmap-s3.yaml create mode 100644 kubernetes/charts/S3-Ferry/templates/deployment-s3.yaml create mode 100644 kubernetes/charts/S3-Ferry/templates/pvc-s3.yaml create mode 100644 kubernetes/charts/S3-Ferry/templates/service-s3.yaml create mode 100644 kubernetes/charts/S3-Ferry/values.yaml create mode 100644 kubernetes/charts/TIM-database/Chart.yaml create mode 100644 kubernetes/charts/TIM-database/templates/deployment-byk-timdb.yaml create mode 100644 kubernetes/charts/TIM-database/templates/pvc-byk-timdb.yaml create mode 100644 kubernetes/charts/TIM-database/templates/secret-byk-timdb.yaml create mode 100644 kubernetes/charts/TIM-database/templates/service-byk-timdb.yaml create mode 100644 kubernetes/charts/TIM-database/values.yaml create mode 100644 kubernetes/charts/TIM/Chart.yaml create mode 100644 kubernetes/charts/TIM/templates/configmap-byk-tim.yaml create mode 100644 kubernetes/charts/TIM/templates/deployment-byk-tim.yaml create mode 100644 kubernetes/charts/TIM/templates/ingress.yaml create mode 100644 kubernetes/charts/TIM/templates/secret-byk-tim.yaml create mode 100644 kubernetes/charts/TIM/templates/service-byk-tim.yaml create mode 100644 kubernetes/charts/TIM/values.yaml create mode 100644 kubernetes/charts/Vault-Agent-LLM/Chart.yaml create mode 100644 kubernetes/charts/Vault-Agent-LLM/templates/configmap.yaml create mode 100644 kubernetes/charts/Vault-Agent-LLM/templates/deployment.yaml create mode 100644 kubernetes/charts/Vault-Agent-LLM/values.yaml create mode 100644 kubernetes/charts/Vault-Init/Chart.yaml create mode 100644 kubernetes/charts/Vault-Init/templates/configmap.yaml create mode 100644 kubernetes/charts/Vault-Init/templates/job.yaml create mode 100644 kubernetes/charts/Vault-Init/templates/pvc.yaml create mode 100644 kubernetes/charts/Vault-Init/values.yaml create mode 100644 kubernetes/charts/Vault/Chart.yaml create mode 100644 kubernetes/charts/Vault/templates/configmap.yaml create mode 100644 kubernetes/charts/Vault/templates/service-byk-vault.yaml create mode 100644 kubernetes/charts/Vault/templates/statefulset-byk-vault.yaml create mode 100644 kubernetes/charts/Vault/values.yaml create mode 100644 kubernetes/charts/database/Chart.lock create mode 100644 kubernetes/charts/database/Chart.yaml create mode 100644 kubernetes/charts/database/values.yaml create mode 100644 kubernetes/charts/minio/Chart.yaml create mode 100644 kubernetes/charts/minio/templates/deployment-minio.yaml create mode 100644 kubernetes/charts/minio/templates/ingress-minio.yaml create mode 100644 kubernetes/charts/minio/templates/pvc-minio-data.yaml create mode 100644 kubernetes/charts/minio/templates/service-minio.yaml create mode 100644 kubernetes/charts/minio/values.yaml create mode 100644 kubernetes/dashboard-admin.yaml create mode 100644 kubernetes/values.yaml diff --git a/kubernetes/CONTAINER_REGISTRY_SETUP.md b/kubernetes/CONTAINER_REGISTRY_SETUP.md new file mode 100644 index 00000000..d88f16b6 --- /dev/null +++ b/kubernetes/CONTAINER_REGISTRY_SETUP.md @@ -0,0 +1,35 @@ +# Container Registry Setup Guide + +This guide explains what components need to push to gcr + +## Overview + +The RAG Module consists of multiple container images that need to be pushed to your container registry. Currently, we use ECR for testing, but you should push images to your own registry before deployment. + + + +## Step 1: Build Container Images + +Build all required images from the repository root: + +### **1.1 GUI (Frontend)** + +```bash +cd GUI +docker build -t rag-module/gui:latest -f Dockerfile.dev . +cd .. +``` + +update the GUI helms values image: repository section with actual image + +### **1.2 LLM Orchestration Service** + +```bash +docker build -t rag-module/llm-orchestration-service:latest -f Dockerfile.llm_orchestration_service . +``` +update the LLM Orchestration Service helms values image: repository section with actual image (there are two places to update in this file) + +### **1.3 Authentication Layer** + + + diff --git a/kubernetes/Chart.lock b/kubernetes/Chart.lock new file mode 100644 index 00000000..aa9d953c --- /dev/null +++ b/kubernetes/Chart.lock @@ -0,0 +1,75 @@ +dependencies: +- name: database + repository: file://./charts/database + version: 0.1.0 +- name: TIM-database + repository: file://./charts/TIM-database + version: 0.1.0 +- name: resql + repository: file://./charts/Resql + version: 0.1.0 +- name: ruuter-public + repository: file://./charts/Ruuter-Public + version: 0.1.0 +- name: ruuter-private + repository: file://./charts/Ruuter-Private + version: 0.1.0 +- name: data-mapper + repository: file://./charts/DataMapper + version: 0.1.0 +- name: TIM + repository: file://./charts/TIM + version: 0.1.0 +- name: Authentication-Layer + repository: file://./charts/Authentication-Layer + version: 0.1.0 +- name: CronManager + repository: file://./charts/CronManager + version: 0.1.0 +- name: GUI + repository: file://./charts/GUI + version: 0.1.0 +- name: Loki + repository: file://./charts/Loki + version: 0.1.0 +- name: Grafana + repository: file://./charts/Grafana + version: 0.1.0 +- name: S3-Ferry + repository: file://./charts/S3-Ferry + version: 0.1.0 +- name: minio + repository: file://./charts/minio + version: 0.1.0 +- name: Redis + repository: file://./charts/Redis + version: 0.1.0 +- name: Qdrant + repository: file://./charts/Qdrant + version: 0.1.0 +- name: ClickHouse + repository: file://./charts/ClickHouse + version: 0.1.0 +- name: Langfuse-Web + repository: file://./charts/Langfuse-Web + version: 0.1.0 +- name: Langfuse-Worker + repository: file://./charts/Langfuse-Worker + version: 0.1.0 +- name: Vault + repository: file://./charts/Vault + version: 0.1.0 +- name: Vault-Init + repository: file://./charts/Vault-Init + version: 0.1.0 +- name: Vault-Agent-LLM + repository: file://./charts/Vault-Agent-LLM + version: 0.1.0 +- name: LLM-Orchestration-Service + repository: file://./charts/LLM-Orchestration-Service + version: 0.1.0 +- name: Liquibase + repository: file://./charts/Liquibase + version: 0.1.0 +digest: sha256:ebf9bd6c7a999f2ab58598fdfff371579d6c7ca17d35e87fc8200668c2ae493e +generated: "2025-12-02T13:11:13.8392479+05:30" diff --git a/kubernetes/Chart.yaml b/kubernetes/Chart.yaml new file mode 100644 index 00000000..698d4b9e --- /dev/null +++ b/kubernetes/Chart.yaml @@ -0,0 +1,80 @@ +apiVersion: v2 +name: rag-module +description: Umbrella chart for RAG Module +version: 0.1.0 +type: application + +dependencies: + - name: database + version: 0.1.0 + repository: "file://./charts/database" + - name: TIM-database + version: 0.1.0 + repository: "file://./charts/TIM-database" + - name: resql + version: 0.1.0 + repository: "file://./charts/Resql" + - name: ruuter-public + version: 0.1.0 + repository: "file://./charts/Ruuter-Public" + - name: ruuter-private + version: 0.1.0 + repository: "file://./charts/Ruuter-Private" + - name: data-mapper + version: 0.1.0 + repository: "file://./charts/DataMapper" + - name: TIM + version: 0.1.0 + repository: "file://./charts/TIM" + - name: Authentication-Layer + version: 0.1.0 + repository: "file://./charts/Authentication-Layer" + - name: CronManager + version: 0.1.0 + repository: "file://./charts/CronManager" + - name: GUI + version: 0.1.0 + repository: "file://./charts/GUI" + - name: Loki + version: 0.1.0 + repository: "file://./charts/Loki" + - name: Grafana + version: 0.1.0 + repository: "file://./charts/Grafana" + - name: S3-Ferry + version: 0.1.0 + repository: "file://./charts/S3-Ferry" + - name: minio + version: 0.1.0 + repository: "file://./charts/minio" + - name: Redis + version: 0.1.0 + repository: "file://./charts/Redis" + - name: Qdrant + version: 0.1.0 + repository: "file://./charts/Qdrant" + - name: ClickHouse + version: 0.1.0 + repository: "file://./charts/ClickHouse" + - name: Langfuse-Web + version: 0.1.0 + repository: "file://./charts/Langfuse-Web" + - name: Langfuse-Worker + version: 0.1.0 + repository: "file://./charts/Langfuse-Worker" + - name: Vault + version: 0.1.0 + repository: "file://./charts/Vault" + - name: Vault-Init + version: 0.1.0 + repository: "file://./charts/Vault-Init" + - name: Vault-Agent-LLM + version: 0.1.0 + repository: "file://./charts/Vault-Agent-LLM" + - name: LLM-Orchestration-Service + version: 0.1.0 + repository: "file://./charts/LLM-Orchestration-Service" + - name: Liquibase + version: 0.1.0 + repository: "file://./charts/Liquibase" + diff --git a/kubernetes/charts/Authentication-Layer/Chart.yaml b/kubernetes/charts/Authentication-Layer/Chart.yaml new file mode 100644 index 00000000..649d153c --- /dev/null +++ b/kubernetes/charts/Authentication-Layer/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: Authentication-Layer +description: Authentication Layer Service for RAG +type: application +version: 0.1.0 +appVersion: "1.0" \ No newline at end of file diff --git a/kubernetes/charts/Authentication-Layer/templates/deployment-byk-authentication-layer.yaml b/kubernetes/charts/Authentication-Layer/templates/deployment-byk-authentication-layer.yaml new file mode 100644 index 00000000..e3c1c6f9 --- /dev/null +++ b/kubernetes/charts/Authentication-Layer/templates/deployment-byk-authentication-layer.yaml @@ -0,0 +1,34 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + spec: + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.authentication.image.repository }}:{{ .Values.authentication.image.tag }}" + imagePullPolicy: {{ .Values.authentication.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + env: + - name: PORT + value: {{ .Values.authentication.environment.serverPort | quote }} + - name: TIM_SERVICE_URL + value: {{ .Values.authentication.environment.timServiceUrl | quote }} + - name: CORS_ORIGINS + value: {{ .Values.authentication.environment.corsOrigins | quote }} + +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Authentication-Layer/templates/ingress-byk-authentication-layer.yaml b/kubernetes/charts/Authentication-Layer/templates/ingress-byk-authentication-layer.yaml new file mode 100644 index 00000000..bf443fd2 --- /dev/null +++ b/kubernetes/charts/Authentication-Layer/templates/ingress-byk-authentication-layer.yaml @@ -0,0 +1,29 @@ +{{- if .Values.ingress.enabled }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: "{{ .Values.release_name }}-ingress" + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + cert-manager.io/cluster-issuer: {{ .Values.ingress.certIssuerName | quote }} + labels: + name: "{{ .Values.release_name }}-ingress" +spec: + rules: + - host: auth.{{ .Values.domain }} + http: + paths: + - pathType: Prefix + path: "/" + backend: + service: + name: "{{ .Values.release_name }}" + port: + number: 3004 + tls: + - hosts: + - auth.{{ .Values.domain }} + secretName: {{ .Values.secretname }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Authentication-Layer/templates/service-byk-authentication-layer.yaml b/kubernetes/charts/Authentication-Layer/templates/service-byk-authentication-layer.yaml new file mode 100644 index 00000000..a17b39d2 --- /dev/null +++ b/kubernetes/charts/Authentication-Layer/templates/service-byk-authentication-layer.yaml @@ -0,0 +1,17 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + name: http + selector: + app: "{{ .Values.release_name }}" +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Authentication-Layer/values.yaml b/kubernetes/charts/Authentication-Layer/values.yaml new file mode 100644 index 00000000..544da2b9 --- /dev/null +++ b/kubernetes/charts/Authentication-Layer/values.yaml @@ -0,0 +1,35 @@ +replicas: 1 +enabled: true + + +release_name: "authentication-layer" +domain: "rag.local" # need to set this +secretname: "authentication-layer-tls" + +ingress: + enabled: true + certIssuerName: "letsencrypt-prod" + +# Authentication Layer Configuration +authentication: + image: + repository: "ghcr.io/buerokratt/authentication-layer" # Update with actual auth-layer image repository + tag: "latest" + pullPolicy: Always + + environment: + serverPort: "3004" + timServiceUrl: "http://tim:8085" + corsOrigins: "http://localhost:3001,http://localhost:3003,http://localhost:8086" + +service: + type: ClusterIP + port: 3004 + +resources: + requests: + memory: "10Mi" + cpu: "1m" + limits: + memory: "50Mi" + cpu: "5m" diff --git a/kubernetes/charts/ClickHouse/Chart.yaml b/kubernetes/charts/ClickHouse/Chart.yaml new file mode 100644 index 00000000..60e9ced1 --- /dev/null +++ b/kubernetes/charts/ClickHouse/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: ClickHouse +description: ClickHouse analytics database for Langfuse +type: application +version: 0.1.0 +appVersion: "latest" \ No newline at end of file diff --git a/kubernetes/charts/ClickHouse/templates/deployment-byk-clickhouse.yaml b/kubernetes/charts/ClickHouse/templates/deployment-byk-clickhouse.yaml new file mode 100644 index 00000000..1deb38f2 --- /dev/null +++ b/kubernetes/charts/ClickHouse/templates/deployment-byk-clickhouse.yaml @@ -0,0 +1,86 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: clickhouse +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + component: clickhouse + spec: + {{- if .Values.securityContext }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.clickhouse.registry }}/{{ .Values.images.clickhouse.repository }}:{{ .Values.images.clickhouse.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.httpPort }} + protocol: TCP + - name: native + containerPort: {{ .Values.service.nativePort }} + protocol: TCP + env: + - name: CLICKHOUSE_DB + value: "{{ .Values.env.CLICKHOUSE_DB }}" + - name: CLICKHOUSE_USER + value: "{{ .Values.env.CLICKHOUSE_USER }}" + - name: CLICKHOUSE_PASSWORD + value: "{{ .Values.env.CLICKHOUSE_PASSWORD }}" + {{- if .Values.healthcheck.enabled }} + livenessProbe: + httpGet: + path: "{{ .Values.healthcheck.httpPath }}" + port: {{ .Values.service.httpPort }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + readinessProbe: + httpGet: + path: "{{ .Values.healthcheck.httpPath }}" + port: {{ .Values.service.httpPort }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + {{- end }} + {{- if .Values.persistence.enabled }} + volumeMounts: + - name: langfuse_clickhouse_data + mountPath: /var/lib/clickhouse + - name: langfuse_clickhouse_logs + mountPath: /var/log/clickhouse-server + {{- end }} + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + {{- if .Values.persistence.enabled }} + volumes: + - name: langfuse_clickhouse_data + persistentVolumeClaim: + claimName: "{{ .Values.release_name }}-data" + - name: langfuse_clickhouse_logs + persistentVolumeClaim: + claimName: "{{ .Values.release_name }}-logs" + {{- end }} + restartPolicy: Always +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/ClickHouse/templates/pvc-clickhouse.yaml b/kubernetes/charts/ClickHouse/templates/pvc-clickhouse.yaml new file mode 100644 index 00000000..910b761e --- /dev/null +++ b/kubernetes/charts/ClickHouse/templates/pvc-clickhouse.yaml @@ -0,0 +1,37 @@ +{{- if and .Values.enabled .Values.persistence.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "{{ .Values.release_name }}-data" + labels: + app: "{{ .Values.release_name }}" + component: clickhouse + type: data +spec: + accessModes: + - {{ .Values.persistence.data.accessMode }} + resources: + requests: + storage: {{ .Values.persistence.data.size }} + {{- if .Values.persistence.data.storageClass }} + storageClassName: {{ .Values.persistence.data.storageClass }} + {{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "{{ .Values.release_name }}-logs" + labels: + app: "{{ .Values.release_name }}" + component: clickhouse + type: logs +spec: + accessModes: + - {{ .Values.persistence.logs.accessMode }} + resources: + requests: + storage: {{ .Values.persistence.logs.size }} + {{- if .Values.persistence.logs.storageClass }} + storageClassName: {{ .Values.persistence.logs.storageClass }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/ClickHouse/templates/service-byk-clickhouse.yaml b/kubernetes/charts/ClickHouse/templates/service-byk-clickhouse.yaml new file mode 100644 index 00000000..1610d18b --- /dev/null +++ b/kubernetes/charts/ClickHouse/templates/service-byk-clickhouse.yaml @@ -0,0 +1,22 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: clickhouse +spec: + type: {{ .Values.service.type }} + selector: + app: "{{ .Values.release_name }}" + ports: + - name: http + protocol: TCP + port: {{ .Values.service.httpPort }} + targetPort: {{ .Values.service.httpPort }} + - name: native + protocol: TCP + port: {{ .Values.service.nativePort }} + targetPort: {{ .Values.service.nativePort }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/ClickHouse/values.yaml b/kubernetes/charts/ClickHouse/values.yaml new file mode 100644 index 00000000..7d181c4a --- /dev/null +++ b/kubernetes/charts/ClickHouse/values.yaml @@ -0,0 +1,60 @@ +replicas: 1 +enabled: true + +images: + clickhouse: + registry: "docker.io" + repository: "clickhouse/clickhouse-server" + tag: "latest" + +release_name: "clickhouse" + +service: + type: ClusterIP + # ClickHouse HTTP interface port + httpPort: 8123 + # ClickHouse native protocol port + nativePort: 9000 + +# Environment variables +env: + CLICKHOUSE_DB: "default" + CLICKHOUSE_USER: "clickhouse" + CLICKHOUSE_PASSWORD: "changeme" + +# Security context +securityContext: + runAsUser: 101 + runAsGroup: 101 + fsGroup: 101 + +persistence: + enabled: true + data: + storageClass: "" + accessMode: ReadWriteOnce + size: 10Gi + logs: + storageClass: "" + accessMode: ReadWriteOnce + size: 5Gi + +resources: + requests: + memory: "512Mi" + cpu: "100m" + limits: + memory: "2Gi" + cpu: "500m" + +pullPolicy: IfNotPresent + +healthcheck: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + # HTTP endpoint for health check + httpPath: "/ping" \ No newline at end of file diff --git a/kubernetes/charts/CronManager/Chart.yaml b/kubernetes/charts/CronManager/Chart.yaml new file mode 100644 index 00000000..31b14b5f --- /dev/null +++ b/kubernetes/charts/CronManager/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: CronManager +description: CronManager Service for RAG +type: application +version: 0.1.0 +appVersion: "1.0" \ No newline at end of file diff --git a/kubernetes/charts/CronManager/templates/configmap-cronmanager-config.yaml b/kubernetes/charts/CronManager/templates/configmap-cronmanager-config.yaml new file mode 100644 index 00000000..a60d8aca --- /dev/null +++ b/kubernetes/charts/CronManager/templates/configmap-cronmanager-config.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "{{ .Values.release_name }}-config" + labels: + app: "{{ .Values.release_name }}" +data: + constants.ini: | + + RAG_MODULE_RUUTER_PRIVATE={{ .Values.constants.RAG_MODULE_RUUTER_PRIVATE }} + RAG_MODULE_RUUTER_PUBLIC={{ .Values.constants.RAG_MODULE_RUUTER_PUBLIC }} + RAG_MODULE_RESQL={{ .Values.constants.RAG_MODULE_RESQL }} + RAG_MODULE_TIM={{ .Values.constants.RAG_MODULE_TIM }} + RAG_MODULE_DATAMAPPER={{ .Values.constants.RAG_MODULE_DATAMAPPER }} + \ No newline at end of file diff --git a/kubernetes/charts/CronManager/templates/deployment-byk-cronmanager.yaml b/kubernetes/charts/CronManager/templates/deployment-byk-cronmanager.yaml new file mode 100644 index 00000000..0fbfc953 --- /dev/null +++ b/kubernetes/charts/CronManager/templates/deployment-byk-cronmanager.yaml @@ -0,0 +1,103 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + spec: + securityContext: + runAsUser: 0 + runAsGroup: 0 + fsGroup: 0 + initContainers: + - name: git-clone + image: alpine/git:latest + securityContext: + runAsUser: 0 + runAsGroup: 0 + volumeMounts: + - name: dsl + mountPath: /DSL + - name: scripts + mountPath: /app/scripts + - name: vector-indexer + mountPath: /app/src/vector_indexer + command: + - sh + - -c + - | + git clone --single-branch --depth 1 --branch wip https://github.com/rootcodelabs/RAG-Module /tmp/rag && + + mkdir -p /app/src/vector_indexer && + mkdir -p /app/scripts && + mkdir -p /DSL + + cp -r /tmp/rag/DSL/CronManager/DSL/* /DSL/ && + cp -r /tmp/rag/DSL/CronManager/script/* /app/scripts/ && + cp -r /tmp/rag/src/vector_indexer/* /app/src/vector_indexer/ && + + # Set execute permissions on all shell scripts + chmod +x /app/scripts/*.sh && + echo "Scripts copied and permissions set successfully" + + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.cronmanager.image.registry }}/{{ .Values.cronmanager.image.repository }}:{{ .Values.cronmanager.image.tag }}" + imagePullPolicy: {{ .Values.cronmanager.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.cronmanager.environment.containerPort }} + protocol: TCP + env: + - name: PYTHONPATH + value: {{ .Values.cronmanager.environment.pythonPath | quote }} + - name: VAULT_ADDR + value: {{ .Values.cronmanager.environment.VAULT_ADDR | quote }} + - name: RAG_MODULE_RUUTER_PRIVATE + value: {{ .Values.constants.RAG_MODULE_RUUTER_PRIVATE | quote }} + - name: RAG_MODULE_RESQL + value: {{ .Values.constants.RAG_MODULE_RESQL | quote }} + - name: RAG_MODULE_TIM + value: {{ .Values.constants.RAG_MODULE_TIM | quote }} + - name: UV_VERBOSE + value: "1" + + volumeMounts: + - name: dsl + mountPath: /DSL + - name: cronmanager-data + mountPath: /app/data + - name: scripts + mountPath: /app/scripts + - name: vector-indexer + mountPath: /app/src/vector_indexer + - name: datasets + mountPath: /app/datasets + + volumes: + - name: dsl + emptyDir: {} + - name: scripts + emptyDir: {} + - name: vector-indexer + emptyDir: {} + - name: datasets + emptyDir: {} + - name: cronmanager-data + persistentVolumeClaim: + claimName: "{{ .Values.release_name }}-data" + - name: config-volume + configMap: + name: "{{ .Values.release_name }}-config" + +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/CronManager/templates/pvc-cronmanager.yaml b/kubernetes/charts/CronManager/templates/pvc-cronmanager.yaml new file mode 100644 index 00000000..f278883c --- /dev/null +++ b/kubernetes/charts/CronManager/templates/pvc-cronmanager.yaml @@ -0,0 +1,17 @@ +{{- if .Values.persistence.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "{{ .Values.release_name }}-data" + labels: + app: "{{ .Values.release_name }}" +spec: + accessModes: + - {{ .Values.persistence.accessMode }} + {{- if .Values.persistence.storageClass }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/CronManager/templates/service-byk-cronmanager.yaml b/kubernetes/charts/CronManager/templates/service-byk-cronmanager.yaml new file mode 100644 index 00000000..c6d67227 --- /dev/null +++ b/kubernetes/charts/CronManager/templates/service-byk-cronmanager.yaml @@ -0,0 +1,17 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: http + selector: + app: "{{ .Values.release_name }}" +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/CronManager/values.yaml b/kubernetes/charts/CronManager/values.yaml new file mode 100644 index 00000000..21c24d4d --- /dev/null +++ b/kubernetes/charts/CronManager/values.yaml @@ -0,0 +1,43 @@ +replicas: 1 +enabled: true +release_name: "cron-manager" + +cronmanager: + image: + registry: ghcr.io + repository: buerokratt/cronmanager + tag: "python-1.2.0" + pullPolicy: IfNotPresent + + environment: + containerPort: "8080" + pythonPath: "/app:/app/src/vector_indexer" + VAULT_ADDR: "http://vault:8200" + +service: + type: ClusterIP + port: 9010 + targetPort: 8080 + +# PVC Configuration +persistence: + enabled: true + storageClass: "" + accessMode: ReadWriteOnce + size: 10Gi + +# Service URLs +constants: + RAG_MODULE_RUUTER_PRIVATE: "http://ruuter-private:8088" + RAG_MODULE_RUUTER_PUBLIC: "http://ruuter-public:8086" + RAG_MODULE_RESQL: "http://resql:8082" + RAG_MODULE_TIM: "http://tim:8085" + RAG_MODULE_DATAMAPPER: "http://data-mapper:3000" + +resources: + requests: + memory: "1Gi" + cpu: "500m" + limits: + memory: "4Gi" + cpu: "2000m" \ No newline at end of file diff --git a/kubernetes/charts/DataMapper/Chart.yaml b/kubernetes/charts/DataMapper/Chart.yaml new file mode 100644 index 00000000..a39f7550 --- /dev/null +++ b/kubernetes/charts/DataMapper/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: data-mapper +description: A Helm chart for Data Mapper +type: application +version: 0.1.0 +appVersion: "1.0" \ No newline at end of file diff --git a/kubernetes/charts/DataMapper/templates/deployment-byk-data-mapper.yaml b/kubernetes/charts/DataMapper/templates/deployment-byk-data-mapper.yaml new file mode 100644 index 00000000..09f23652 --- /dev/null +++ b/kubernetes/charts/DataMapper/templates/deployment-byk-data-mapper.yaml @@ -0,0 +1,63 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + spec: + initContainers: + - name: git-clone + image: alpine/git:latest + volumeMounts: + - name: dsl-lib + mountPath: /workspace/app/lib + command: + - sh + - -c + - | + git clone --single-branch --depth 1 --branch wip https://github.com/rootcodelabs/RAG-Module /tmp/rag && + + # mkdir -p /workspace/app/views/rag-search && + mkdir -p /workspace/app/lib && + + # cp -r /tmp/rag/DSL/DMapper/rag-search/hbs/* /workspace/app/views/rag-search && + cp -r /tmp/rag/DSL/DMapper/rag-search/lib/* /workspace/app/lib + + + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.scope.registry }}/{{ .Values.images.scope.repository }}:{{ .Values.images.scope.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: http + env: + - name: PORT + value: "{{ .Values.env.PORT }}" + - name: CONTENT_FOLDER + value: "{{ .Values.env.CONTENT_FOLDER }}" + volumeMounts: + - name: dsl-lib + mountPath: /workspace/app/lib + + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + volumes: + - name: dsl-lib + emptyDir: {} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/DataMapper/templates/service-byk-data-mapper.yaml b/kubernetes/charts/DataMapper/templates/service-byk-data-mapper.yaml new file mode 100644 index 00000000..c6d67227 --- /dev/null +++ b/kubernetes/charts/DataMapper/templates/service-byk-data-mapper.yaml @@ -0,0 +1,17 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: http + selector: + app: "{{ .Values.release_name }}" +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/DataMapper/values.yaml b/kubernetes/charts/DataMapper/values.yaml new file mode 100644 index 00000000..220526ca --- /dev/null +++ b/kubernetes/charts/DataMapper/values.yaml @@ -0,0 +1,31 @@ +replicas: 1 +enabled: true +release_name: "data-mapper" + +images: + scope: + registry: "ghcr.io" + repository: "buerokratt/datamapper" + tag: "v2.2.9" + +service: + type: ClusterIP + port: 3001 + targetPort: 3000 + +env: + # DataMapper specific configuration + PORT: "3000" + CONTENT_FOLDER: "/data" + +resources: + requests: + memory: "512Mi" + cpu: "250m" + limits: + memory: "1Gi" + cpu: "500m" + + +pullPolicy: IfNotPresent + diff --git a/kubernetes/charts/GUI/Chart.yaml b/kubernetes/charts/GUI/Chart.yaml new file mode 100644 index 00000000..2fb3f331 --- /dev/null +++ b/kubernetes/charts/GUI/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: GUI +description: A Helm chart for GUI in RAG +type: application +version: 0.1.0 +appVersion: "1.0" \ No newline at end of file diff --git a/kubernetes/charts/GUI/templates/deployment-byk-gui.yaml b/kubernetes/charts/GUI/templates/deployment-byk-gui.yaml new file mode 100644 index 00000000..081b1842 --- /dev/null +++ b/kubernetes/charts/GUI/templates/deployment-byk-gui.yaml @@ -0,0 +1,87 @@ +{{- if .Values.gui.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Values.gui.release_name }} + labels: + app: {{ .Values.gui.release_name }} +spec: + replicas: {{ .Values.gui.replicas }} + selector: + matchLabels: + app: {{ .Values.gui.release_name }} + template: + metadata: + labels: + app: {{ .Values.gui.release_name }} + + spec: + containers: + - name: {{ .Values.gui.release_name }} + image: "{{ .Values.gui.image.repository }}:{{ .Values.gui.image.tag }}" + imagePullPolicy: {{ .Values.gui.image.pullPolicy }} + ports: + - containerPort: {{ .Values.gui.port }} + protocol: TCP + env: + # Node.js environment configuration + - name: NODE_ENV + value: {{ .Values.gui.nodeEnv | quote }} + - name: PORT + value: {{ .Values.gui.port | quote }} + - name: DEBUG_ENABLED + value: {{ .Values.gui.debugEnabled | quote }} + - name: CHOKIDAR_USEPOLLING + value: "true" + + # React application configuration + - name: REACT_APP_RUUTER_API_URL + value: {{ .Values.gui.services.ruuterPublic | quote }} + - name: REACT_APP_RUUTER_PRIVATE_API_URL + value: {{ .Values.gui.services.ruuterPrivate | quote }} + - name: REACT_APP_EXTERNAL_API_URL + value: {{ .Values.gui.services.datasetGenerator | quote }} + - name: REACT_APP_CUSTOMER_SERVICE_LOGIN + value: {{ printf "%s/et/dev-auth" .Values.gui.services.authenticationLayer | quote }} + - name: REACT_APP_NOTIFICATION_NODE_URL + value: {{ .Values.gui.services.notificationNode | quote }} + - name: REACT_APP_CSP + value: {{ .Values.gui.csp | quote }} + - name: REACT_APP_SERVICE_ID + value: {{ .Values.gui.serviceId | quote }} + - name: REACT_APP_ENABLE_HIDDEN_FEATURES + value: {{ .Values.gui.enableHiddenFeatures | quote | upper }} + + # Vite development server configuration + - name: VITE_HOST + value: {{ .Values.gui.vite.host | quote }} + - name: VITE_ALLOWED_HOSTS + value: {{ .Values.gui.vite.allowedHosts | quote }} + + resources: + limits: + cpu: {{ .Values.gui.resources.limits.cpu }} + memory: {{ .Values.gui.resources.limits.memory }} + requests: + cpu: {{ .Values.gui.resources.requests.cpu }} + memory: {{ .Values.gui.resources.requests.memory }} + + livenessProbe: + httpGet: + path: / + port: {{ .Values.gui.port }} + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + + readinessProbe: + httpGet: + path: / + port: {{ .Values.gui.port }} + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + + restartPolicy: Always + +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/GUI/templates/ingress-byk-gui.yaml b/kubernetes/charts/GUI/templates/ingress-byk-gui.yaml new file mode 100644 index 00000000..72b429f9 --- /dev/null +++ b/kubernetes/charts/GUI/templates/ingress-byk-gui.yaml @@ -0,0 +1,20 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: gui-ingress + namespace: rag-module + annotations: + kubernetes.io/ingress.class: nginx +spec: + rules: + - host: localhost + http: + paths: + - path: /rag-search + pathType: Prefix + backend: + service: + name: gui + port: + number: 3003 + \ No newline at end of file diff --git a/kubernetes/charts/GUI/templates/service-byk-gui.yaml b/kubernetes/charts/GUI/templates/service-byk-gui.yaml new file mode 100644 index 00000000..1a7a35a3 --- /dev/null +++ b/kubernetes/charts/GUI/templates/service-byk-gui.yaml @@ -0,0 +1,15 @@ +{{- if .Values.gui.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.gui.release_name }} +spec: + type: {{ .Values.gui.service.type }} + ports: + - port: {{ .Values.gui.service.port }} + targetPort: {{ .Values.gui.service.targetPort }} + protocol: TCP + name: http + selector: + app: {{ .Values.gui.release_name }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/GUI/values.yaml b/kubernetes/charts/GUI/values.yaml new file mode 100644 index 00000000..192a9e52 --- /dev/null +++ b/kubernetes/charts/GUI/values.yaml @@ -0,0 +1,62 @@ +gui: + enabled: true + release_name: gui + image: + repository: "ghcr.io/buerokratt/rag-gui" # Update with actual GUI image repository + tag: latest + pullPolicy: IfNotPresent + + # React application configuration + nodeEnv: production + port: 3001 + debugEnabled: true + enableHiddenFeatures: false + + #service URLs + services: + ruuterPublic: "http://ruuter-public:8086" + ruuterPrivate: "http://localhost:8088" + authenticationLayer: "http://authentication-layer:3004" + notificationNode: "http://notifications-node:4040" + datasetGenerator: "http://dataset-gen-service:8000" + + # Content Security Policy - Updated for browser access + csp: "default-src 'self'; connect-src 'self' http://ruuter-public:8086 https://ruuter-public:8086 http://ruuter-private:8088 https://ruuter-private:8088 http://authentication-layer:3004 https://authentication-layer:3004 http://notifications-node:4040 https://notifications-node:4040 http://dataset-gen-service:8000 https://dataset-gen-service:8000 http://localhost:* https://localhost:* http://global-classifier.local https://global-classifier.local ws://global-classifier.local wss://global-classifier.local; script-src 'self' 'unsafe-eval' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; img-src 'self' data: blob:; font-src 'self' data:;" + + # Service configuration + serviceId: "conversations,settings,monitoring" + + # Vite development server (for development mode) + vite: + host: "0.0.0.0" + allowedHosts: "localhost,127.0.0.1" + + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 100m + memory: 256Mi + + replicas: 1 + + service: + type: ClusterIP + port: 3001 + targetPort: 3001 + + # ingress: + # enabled: true + # className: nginx + # annotations: + # nginx.ingress.kubernetes.io/rewrite-target: / + # nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" + # nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" + # nginx.ingress.kubernetes.io/proxy-body-size: "50m" + # hosts: + # - host: rag.local + # paths: + # - path: / + # pathType: Prefix + # tls: [] \ No newline at end of file diff --git a/kubernetes/charts/Grafana/Chart.yaml b/kubernetes/charts/Grafana/Chart.yaml new file mode 100644 index 00000000..0bdeaa7d --- /dev/null +++ b/kubernetes/charts/Grafana/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: Grafana +description: A Helm chart for Grafana dashboard and monitoring +type: application +version: 0.1.0 +appVersion: "10.2.0" \ No newline at end of file diff --git a/kubernetes/charts/Grafana/dashboards/grafana-dashboard-deployment.json b/kubernetes/charts/Grafana/dashboards/grafana-dashboard-deployment.json new file mode 100644 index 00000000..a1e469f2 --- /dev/null +++ b/kubernetes/charts/Grafana/dashboards/grafana-dashboard-deployment.json @@ -0,0 +1,167 @@ +{ + "id": null, + "title": "RAG Module Orchestrator", + "tags": ["deployment", "models", "triton"], + "timezone": "browser", + "refresh": "30s", + "time": { + "from": "now-1h", + "to": "now" + }, + "templating": { + "list": [ + { + "name": "service_name", + "type": "query", + "label": "Service Name", + "refresh": 1, + "query": "label_values(service)", + "datasource": { + "type": "loki", + "uid": "loki-datasource" + }, + "multi": true, + "includeAll": true, + "allValue": ".*", + "current": { + "selected": true, + "text": "All", + "value": "$__all" + }, + "options": [], + "regex": "", + "sort": 0, + "skipUrlSync": false, + "hide": 0 + }, + { + "name": "log_level", + "type": "custom", + "label": "Log Level", + "multi": true, + "includeAll": true, + "allValue": "ERROR|INFO|WARNING|DEBUG", + "current": { + "selected": true, + "text": "All", + "value": "$__all" + }, + "options": [ + { + "text": "All", + "value": "$__all", + "selected": true + }, + { + "text": "ERROR", + "value": "ERROR", + "selected": false + }, + { + "text": "WARNING", + "value": "WARNING", + "selected": false + }, + { + "text": "INFO", + "value": "INFO", + "selected": false + }, + { + "text": "DEBUG", + "value": "DEBUG", + "selected": false + } + ], + "query": "ERROR,INFO,WARNING,DEBUG", + "queryType": "", + "refresh": 0, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "hide": 0 + } + ] + }, + "panels": [ + { + "id": 1, + "title": "Log Messages Over Time by Level", + "type": "graph", + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 0 + }, + "targets": [ + { + "expr": "sum by (service, level) (count_over_time({service=~\"$service_name\", level=~\"$log_level\"}[5m]))", + "refId": "A", + "legendFormat": "{{service}} - {{level}}", + "datasource": { + "type": "loki", + "uid": "loki-datasource" + } + } + ], + "yAxes": [ + { + "label": "Log Count", + "min": 0 + } + ], + "xAxis": { + "show": true + }, + "legend": { + "show": true, + "values": true, + "current": true, + "total": true + }, + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "fill": 1, + "linewidth": 2, + "pointradius": 2, + "bars": false, + "lines": true, + "points": false, + "stack": false, + "percentage": false, + "nullPointMode": "null as zero" + }, + { + "id": 2, + "title": "Deployment Logs", + "type": "logs", + "gridPos": { + "h": 12, + "w": 24, + "x": 0, + "y": 8 + }, + "targets": [ + { + "expr": "{service=~\"$service_name\", level=~\"$log_level\"}", + "refId": "A", + "datasource": { + "type": "loki", + "uid": "loki-datasource" + } + } + ], + "options": { + "showTime": true, + "showLabels": true, + "showCommonLabels": false, + "wrapLogMessage": true, + "sortOrder": "Descending" + } + } + ] +} diff --git a/kubernetes/charts/Grafana/templates/configmap-dashboards.yaml b/kubernetes/charts/Grafana/templates/configmap-dashboards.yaml new file mode 100644 index 00000000..844aa70a --- /dev/null +++ b/kubernetes/charts/Grafana/templates/configmap-dashboards.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-dashboards + labels: + app: grafana +data: +{{- range $path, $content := .Files.Glob "dashboards/*.json" }} + {{ base $path }}: | +{{ $content | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Grafana/templates/configmap-grafana.yaml b/kubernetes/charts/Grafana/templates/configmap-grafana.yaml new file mode 100644 index 00000000..c701d665 --- /dev/null +++ b/kubernetes/charts/Grafana/templates/configmap-grafana.yaml @@ -0,0 +1,39 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-datasources + labels: + app: grafana +data: + datasources.yaml: | + apiVersion: 1 + datasources: + {{- range .Values.datasources }} + - name: {{ .name }} + type: {{ .type }} + url: {{ .url }} + access: {{ .access }} + isDefault: {{ .isDefault }} + {{- end }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-dashboard-providers + labels: + app: grafana +data: + dashboards.yaml: | + apiVersion: 1 + providers: + {{- range .Values.dashboardProviders }} + - name: {{ .name }} + orgId: {{ .orgId }} + folder: '{{ .folder }}' + type: {{ .type }} + disableDeletion: {{ .disableDeletion }} + updateIntervalSeconds: {{ .updateIntervalSeconds }} + allowUiUpdates: {{ .allowUiUpdates }} + options: + path: {{ .options.path }} + {{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Grafana/templates/deployment-grafana.yaml b/kubernetes/charts/Grafana/templates/deployment-grafana.yaml new file mode 100644 index 00000000..4b6fef87 --- /dev/null +++ b/kubernetes/charts/Grafana/templates/deployment-grafana.yaml @@ -0,0 +1,73 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Values.release_name }} + labels: + app: {{ .Values.release_name }} +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: {{ .Values.release_name }} + template: + metadata: + labels: + app: {{ .Values.release_name }} + spec: + containers: + - name: {{ .Values.release_name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag}}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.port }} + protocol: TCP + env: + {{- range $key, $value := .Values.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + volumeMounts: + - name: datasources + mountPath: /etc/grafana/provisioning/datasources + readOnly: true + - name: dashboard-providers + mountPath: /etc/grafana/provisioning/dashboards + readOnly: true + - name: dashboards + mountPath: /etc/grafana/dashboards + readOnly: true + {{- if .Values.persistence.enabled }} + - name: storage + mountPath: /var/lib/grafana + {{- end }} + livenessProbe: + httpGet: + path: /api/health + port: http + initialDelaySeconds: 60 + periodSeconds: 30 + readinessProbe: + httpGet: + path: /api/health + port: http + initialDelaySeconds: 30 + periodSeconds: 10 + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumes: + - name: datasources + configMap: + name: grafana-datasources + - name: dashboard-providers + configMap: + name: grafana-dashboard-providers + - name: dashboards + configMap: + name: grafana-dashboards + {{- if .Values.persistence.enabled }} + - name: storage + persistentVolumeClaim: + claimName: grafana-storage + {{- end }} + \ No newline at end of file diff --git a/kubernetes/charts/Grafana/templates/pvc-grafana.yaml b/kubernetes/charts/Grafana/templates/pvc-grafana.yaml new file mode 100644 index 00000000..23b6f2e5 --- /dev/null +++ b/kubernetes/charts/Grafana/templates/pvc-grafana.yaml @@ -0,0 +1,17 @@ +{{- if .Values.persistence.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: grafana-storage + labels: + app: grafana +spec: + accessModes: + - {{ .Values.persistence.accessMode }} + {{- if .Values.persistence.storageClass }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Grafana/templates/service-grafana.yaml b/kubernetes/charts/Grafana/templates/service-grafana.yaml new file mode 100644 index 00000000..84ff6d0a --- /dev/null +++ b/kubernetes/charts/Grafana/templates/service-grafana.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.release_name }} + labels: + app: {{ .Values.release_name }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: http + selector: + app: {{ .Values.release_name }} \ No newline at end of file diff --git a/kubernetes/charts/Grafana/values.yaml b/kubernetes/charts/Grafana/values.yaml new file mode 100644 index 00000000..805b8404 --- /dev/null +++ b/kubernetes/charts/Grafana/values.yaml @@ -0,0 +1,62 @@ +replicas: 1 + +release_name: "grafana" + +image: + repository: grafana/grafana + pullPolicy: IfNotPresent + tag: "10.0.0" + +nameOverride: "" +fullnameOverride: "" + +port: 3000 + +service: + type: ClusterIP + port: 4005 + targetPort: 3000 + +persistence: + enabled: true + storageClass: "" + accessMode: ReadWriteOnce + size: 5Gi + +resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 250m + memory: 256Mi + +admin: + user: admin + password: admin + +# Datasources configuration +datasources: + - name: Loki + type: loki + url: http://loki:3100 + access: proxy + isDefault: true + +# Dashboard providers +dashboardProviders: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: false + updateIntervalSeconds: 10 + allowUiUpdates: true + options: + path: /var/lib/grafana/dashboards + +# Environment variables +env: + GF_SECURITY_ADMIN_USER: admin + GF_SECURITY_ADMIN_PASSWORD: admin123 + GF_USERS_ALLOW_SIGN_UP: "false" diff --git a/kubernetes/charts/LLM-Orchestration-Service/Chart.yaml b/kubernetes/charts/LLM-Orchestration-Service/Chart.yaml new file mode 100644 index 00000000..1be8ea8c --- /dev/null +++ b/kubernetes/charts/LLM-Orchestration-Service/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: LLM-Orchestration-Service +description: LLM Orchestration Service for RAG Module +version: 0.1.0 +appVersion: "1.0.0" +type: application \ No newline at end of file diff --git a/kubernetes/charts/LLM-Orchestration-Service/templates/deployment-byk-llm-orchestration.yaml b/kubernetes/charts/LLM-Orchestration-Service/templates/deployment-byk-llm-orchestration.yaml new file mode 100644 index 00000000..4a6013f3 --- /dev/null +++ b/kubernetes/charts/LLM-Orchestration-Service/templates/deployment-byk-llm-orchestration.yaml @@ -0,0 +1,166 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: llm-orchestration +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + component: llm-orchestration + spec: + {{- if .Values.initContainer.enabled }} + initContainers: + - name: volume-init + image: "{{ .Values.initContainer.image.repository }}:{{ .Values.initContainer.image.tag }}" + command: + - sh + - -c + - | + echo "Initializing runtime volumes..." + + # Initialize config volume if empty + if [ ! -d "{{ .Values.volumes.config.mountPath }}" ] || [ -z "$(ls -A {{ .Values.volumes.config.mountPath }})" ]; then + echo "Creating config directory structure..." + mkdir -p {{ .Values.volumes.config.mountPath }} + # Generate initial config files here + # This is where your app would create its runtime config + echo "Config volume initialized" + fi + + # Initialize optimization volume if empty + if [ ! -d "{{ .Values.volumes.optimization.mountPath }}" ] || [ -z "$(ls -A {{ .Values.volumes.optimization.mountPath }})" ]; then + echo "Creating optimization modules directory structure..." + mkdir -p {{ .Values.volumes.optimization.mountPath }} + # This is where your app would create its optimized modules + echo "Optimization volume initialized" + fi + + # Set proper permissions + chmod -R 755 {{ .Values.volumes.config.mountPath }} || true + chmod -R 755 {{ .Values.volumes.optimization.mountPath }} || true + + echo "Volume initialization complete" + volumeMounts: + {{- if .Values.volumes.config.enabled }} + - name: config-volume + mountPath: {{ .Values.volumes.config.mountPath }} + {{- end }} + {{- if .Values.volumes.optimization.enabled }} + - name: optimization-volume + mountPath: {{ .Values.volumes.optimization.mountPath }} + {{- end }} + {{- end }} + + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.llmOrchestration.repository }}:{{ .Values.images.llmOrchestration.tag }}" + imagePullPolicy: {{ .Values.images.llmOrchestration.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.targetPort }} + protocol: TCP + env: + - name: ENVIRONMENT + value: "{{ .Values.app.environment }}" + - name: PORT + value: "{{ .Values.service.targetPort }}" + + # Vault configuration + {{- if .Values.vault.enabled }} + - name: VAULT_ADDR + value: "{{ .Values.vault.addr }}" + - name: VAULT_TOKEN + value: "{{ .Values.vault.tokenPath }}" + {{- end }} + + # Additional environment variables from values + {{- range $key, $value := .Values.env }} + - name: {{ $key }} + value: "{{ $value }}" + {{- end }} + + {{- if .Values.healthcheck.enabled }} + livenessProbe: + httpGet: + path: "{{ .Values.healthcheck.httpPath }}" + port: {{ .Values.service.targetPort }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + readinessProbe: + httpGet: + path: "{{ .Values.healthcheck.readinessPath | default .Values.healthcheck.httpPath }}" + port: {{ .Values.service.targetPort }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + {{- end }} + + volumeMounts: + # Runtime-generated config volume + {{- if .Values.volumes.config.enabled }} + - name: config-volume + mountPath: {{ .Values.volumes.config.mountPath }} + {{- end }} + # Runtime-generated optimization modules + {{- if .Values.volumes.optimization.enabled }} + - name: optimization-volume + mountPath: {{ .Values.volumes.optimization.mountPath }} + {{- end }} + # Persistent logs + {{- if .Values.volumes.logs.enabled }} + - name: logs-volume + mountPath: {{ .Values.volumes.logs.mountPath }} + {{- end }} + # Vault token (from agent) + {{- if and .Values.vault.enabled .Values.volumes.vaultToken.enabled }} + - name: vault-token + mountPath: {{ .Values.volumes.vaultToken.mountPath }} + readOnly: true + {{- end }} + + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + + volumes: + # Runtime-generated config volume (PVC) + {{- if .Values.volumes.config.enabled }} + - name: config-volume + persistentVolumeClaim: + claimName: "{{ .Values.release_name }}-config" + {{- end }} + # Runtime-generated optimization volume (PVC) + {{- if .Values.volumes.optimization.enabled }} + - name: optimization-volume + persistentVolumeClaim: + claimName: "{{ .Values.release_name }}-optimization" + {{- end }} + # Persistent logs (PVC) + {{- if .Values.volumes.logs.enabled }} + - name: logs-volume + persistentVolumeClaim: + claimName: "{{ .Values.release_name }}-logs" + {{- end }} + # Vault token (shared PVC with vault-agent-llm) + {{- if and .Values.vault.enabled .Values.volumes.vaultToken.enabled }} + - name: vault-token + persistentVolumeClaim: + claimName: vault-agent-token + {{- end }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/LLM-Orchestration-Service/templates/pvc-volumes.yaml b/kubernetes/charts/LLM-Orchestration-Service/templates/pvc-volumes.yaml new file mode 100644 index 00000000..f2be2c30 --- /dev/null +++ b/kubernetes/charts/LLM-Orchestration-Service/templates/pvc-volumes.yaml @@ -0,0 +1,61 @@ +{{- if and .Values.enabled .Values.volumes.logs.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "{{ .Values.release_name }}-logs" + labels: + app: "{{ .Values.release_name }}" + component: llm-orchestration + type: logs +spec: + accessModes: + - {{ .Values.volumes.logs.accessMode }} + resources: + requests: + storage: {{ .Values.volumes.logs.size }} + {{- if .Values.volumes.logs.storageClass }} + storageClassName: {{ .Values.volumes.logs.storageClass }} + {{- end }} +{{- end }} + +--- +{{- if and .Values.enabled .Values.volumes.config.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "{{ .Values.release_name }}-config" + labels: + app: "{{ .Values.release_name }}" + component: llm-orchestration + type: config +spec: + accessModes: + - {{ .Values.volumes.config.accessMode }} + resources: + requests: + storage: {{ .Values.volumes.config.size }} + {{- if .Values.volumes.config.storageClass }} + storageClassName: {{ .Values.volumes.config.storageClass }} + {{- end }} +{{- end }} + +--- +{{- if and .Values.enabled .Values.volumes.optimization.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "{{ .Values.release_name }}-optimization" + labels: + app: "{{ .Values.release_name }}" + component: llm-orchestration + type: optimization +spec: + accessModes: + - {{ .Values.volumes.optimization.accessMode }} + resources: + requests: + storage: {{ .Values.volumes.optimization.size }} + {{- if .Values.volumes.optimization.storageClass }} + storageClassName: {{ .Values.volumes.optimization.storageClass }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/LLM-Orchestration-Service/templates/service-byk-llm-orchestration.yaml b/kubernetes/charts/LLM-Orchestration-Service/templates/service-byk-llm-orchestration.yaml new file mode 100644 index 00000000..63b9bb62 --- /dev/null +++ b/kubernetes/charts/LLM-Orchestration-Service/templates/service-byk-llm-orchestration.yaml @@ -0,0 +1,18 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: llm-orchestration +spec: + type: {{ .Values.service.type }} + selector: + app: "{{ .Values.release_name }}" + ports: + - name: http + protocol: TCP + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/LLM-Orchestration-Service/values.yaml b/kubernetes/charts/LLM-Orchestration-Service/values.yaml new file mode 100644 index 00000000..b64723ef --- /dev/null +++ b/kubernetes/charts/LLM-Orchestration-Service/values.yaml @@ -0,0 +1,86 @@ +replicas: 1 +enabled: true + +images: + llmOrchestration: + repository: "ghcr.io/buerokratt/llm-orchestration-service" # Update with actual llm-orchestration image repository + tag: "latest" + pullPolicy: "IfNotPresent" + +release_name: "llm-orchestration-service" + +service: + type: ClusterIP + port: 8100 + targetPort: 8100 + +app: + environment: "production" + +# Volume configurations +volumes: + # Runtime-generated config volume (managed by InitContainer + PVC) + config: + enabled: true + mountPath: "/app/src/llm_config_module/config" + size: "1Gi" + accessMode: "ReadWriteOnce" + storageClass: "" + + # Runtime-generated optimization modules (managed by InitContainer + PVC) + optimization: + enabled: true + mountPath: "/app/src/optimization/optimized_modules" + size: "5Gi" + accessMode: "ReadWriteOnce" + storageClass: "" + + # Logs volume (persistent) + logs: + enabled: true + mountPath: "/app/logs" + size: "5Gi" + accessMode: "ReadWriteOnce" + storageClass: "" + + # Vault agent token volume (emptyDir - managed by sidecar) + vaultToken: + enabled: true + mountPath: "/agent/out" + +# InitContainer configuration for runtime volume preparation +initContainer: + enabled: true + image: + repository: "ghcr.io/buerokratt/llm-orchestration-service" # Update with actual llm-orchestration image repository + tag: "latest" + # InitContainer will prepare the runtime volumes + prepareVolumes: true + +env: + ENVIRONMENT: "production" + +vault: + enabled: true + addr: "http://vault:8200" + tokenPath: "/agent/out/token" + +resources: + requests: + memory: "512Mi" + cpu: "200m" + limits: + memory: "2Gi" + cpu: "1000m" + +healthcheck: + enabled: false + initialDelaySeconds: 40 + periodSeconds: 30 + timeoutSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + # LLM orchestration health endpoint + httpPath: "/health" + # Additional readiness checks + readinessPath: "/ready" diff --git a/kubernetes/charts/Langfuse-Web/Chart.yaml b/kubernetes/charts/Langfuse-Web/Chart.yaml new file mode 100644 index 00000000..041da91b --- /dev/null +++ b/kubernetes/charts/Langfuse-Web/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: Langfuse-Web +description: Langfuse web interface and API for LLM observability +type: application +version: 0.1.0 +appVersion: "3" \ No newline at end of file diff --git a/kubernetes/charts/Langfuse-Web/templates/deployment-byk-langfuse-web.yaml b/kubernetes/charts/Langfuse-Web/templates/deployment-byk-langfuse-web.yaml new file mode 100644 index 00000000..59cff5c1 --- /dev/null +++ b/kubernetes/charts/Langfuse-Web/templates/deployment-byk-langfuse-web.yaml @@ -0,0 +1,59 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: langfuse-web +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + component: langfuse-web + spec: + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.langfuse_web.registry }}/{{ .Values.images.langfuse_web.repository }}:{{ .Values.images.langfuse_web.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.targetPort }} + protocol: TCP + env: + {{- range $key, $value := .Values.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- if .Values.healthcheck.enabled }} + livenessProbe: + httpGet: + path: /api/public/health + port: {{ .Values.service.port }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + readinessProbe: + httpGet: + path: /api/public/health + port: {{ .Values.service.port }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + {{- end }} + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + restartPolicy: Always +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Langfuse-Web/templates/service-byk-langfuse-web.yaml b/kubernetes/charts/Langfuse-Web/templates/service-byk-langfuse-web.yaml new file mode 100644 index 00000000..9594b424 --- /dev/null +++ b/kubernetes/charts/Langfuse-Web/templates/service-byk-langfuse-web.yaml @@ -0,0 +1,18 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: langfuse-web +spec: + type: {{ .Values.service.type }} + selector: + app: "{{ .Values.release_name }}" + ports: + - name: http + protocol: TCP + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Langfuse-Web/values.yaml b/kubernetes/charts/Langfuse-Web/values.yaml new file mode 100644 index 00000000..a85b0eae --- /dev/null +++ b/kubernetes/charts/Langfuse-Web/values.yaml @@ -0,0 +1,104 @@ +replicas: 1 +enabled: true + +images: + langfuse_web: + registry: "docker.io" + repository: "langfuse/langfuse" + tag: "3" + +release_name: "langfuse-web" + +service: + type: ClusterIP + port: 3005 + targetPort: 3000 + +# Environment variables +env: + # Database configuration + NEXTAUTH_URL: "http://localhost:3000" + DATABASE_URL: "postgresql://postgres:dbadmin@rag_search_db:5432/rag-search" + SALT: "changeme" + ENCRYPTION_KEY: "changeme" + NEXTAUTH_SECRET: "changeme" + TELEMETRY_ENABLED: "true" + LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES: "true" + + # ClickHouse configuration + CLICKHOUSE_MIGRATION_URL: "clickhouse://clickhouse:9000" + CLICKHOUSE_URL: "http://clickhouse:8123" + CLICKHOUSE_USER: "default" + CLICKHOUSE_PASSWORD: "clickhouse" + CLICKHOUSE_CLUSTER_ENABLED: "false" + + # S3/MinIO configuration + LANGFUSE_USE_AZURE_BLOB: "false" + LANGFUSE_S3_EVENT_UPLOAD_BUCKET: "rag-search" + LANGFUSE_S3_EVENT_UPLOAD_REGION: "auto" + LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID: "changeme" + LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY: "changeme" + LANGFUSE_S3_EVENT_UPLOAD_ENDPOINT: "http://minio:9000" + LANGFUSE_S3_EVENT_UPLOAD_FORCE_PATH_STYLE: "true" + LANGFUSE_S3_EVENT_UPLOAD_PREFIX: "langfuse/events/" + + LANGFUSE_S3_MEDIA_UPLOAD_BUCKET: "rag-search" + LANGFUSE_S3_MEDIA_UPLOAD_REGION: "auto" + LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID: "changeme" + LANGFUSE_S3_MEDIA_UPLOAD_SECRET_ACCESS_KEY: "changeme" + LANGFUSE_S3_MEDIA_UPLOAD_ENDPOINT: "http://minio:9000" + LANGFUSE_S3_MEDIA_UPLOAD_FORCE_PATH_STYLE: "true" + LANGFUSE_S3_MEDIA_UPLOAD_PREFIX: "langfuse/media/" + + LANGFUSE_S3_BATCH_EXPORT_ENABLED: "false" + LANGFUSE_S3_BATCH_EXPORT_BUCKET: "rag-search" + LANGFUSE_S3_BATCH_EXPORT_PREFIX: "langfuse/exports/" + LANGFUSE_S3_BATCH_EXPORT_REGION: "auto" + LANGFUSE_S3_BATCH_EXPORT_ENDPOINT: "http://minio:9000" + LANGFUSE_S3_BATCH_EXPORT_EXTERNAL_ENDPOINT: "http://minio:9000" + LANGFUSE_S3_BATCH_EXPORT_ACCESS_KEY_ID: "changeme" + LANGFUSE_S3_BATCH_EXPORT_SECRET_ACCESS_KEY: "changeme" + LANGFUSE_S3_BATCH_EXPORT_FORCE_PATH_STYLE: "true" + LANGFUSE_INGESTION_QUEUE_DELAY_MS: "" + LANGFUSE_INGESTION_CLICKHOUSE_WRITE_INTERVAL_MS: "" + + # Redis configuration + REDIS_HOST: "redis" + REDIS_PORT: "6379" + REDIS_AUTH: "myredissecret" + REDIS_TLS_ENABLED: "false" + REDIS_TLS_CA: "" + REDIS_TLS_CERT: "" + REDIS_TLS_KEY: "" + + # Email configuration + EMAIL_FROM_ADDRESS: "" + SMTP_CONNECTION_URL: "" + + # Langfuse initialization (Web-specific) + LANGFUSE_INIT_ORG_ID: "" + LANGFUSE_INIT_ORG_NAME: "" + LANGFUSE_INIT_PROJECT_ID: "" + LANGFUSE_INIT_PROJECT_NAME: "" + LANGFUSE_INIT_PROJECT_PUBLIC_KEY: "" + LANGFUSE_INIT_PROJECT_SECRET_KEY: "" + LANGFUSE_INIT_USER_EMAIL: "" + LANGFUSE_INIT_USER_NAME: "" + LANGFUSE_INIT_USER_PASSWORD: "" + +resources: + requests: + memory: "512Mi" + cpu: "100m" + limits: + memory: "1Gi" + cpu: "500m" + +pullPolicy: IfNotPresent + +healthcheck: + enabled: true + initialDelaySeconds: 60 + periodSeconds: 30 + timeoutSeconds: 10 + failureThreshold: 3 \ No newline at end of file diff --git a/kubernetes/charts/Langfuse-Worker/Chart.yaml b/kubernetes/charts/Langfuse-Worker/Chart.yaml new file mode 100644 index 00000000..4117b9c0 --- /dev/null +++ b/kubernetes/charts/Langfuse-Worker/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: Langfuse-Worker +description: Langfuse background worker for LLM observability +type: application +version: 0.1.0 +appVersion: "3" \ No newline at end of file diff --git a/kubernetes/charts/Langfuse-Worker/templates/deployment-byk-langfuse-worker.yaml b/kubernetes/charts/Langfuse-Worker/templates/deployment-byk-langfuse-worker.yaml new file mode 100644 index 00000000..3a82d36c --- /dev/null +++ b/kubernetes/charts/Langfuse-Worker/templates/deployment-byk-langfuse-worker.yaml @@ -0,0 +1,59 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: langfuse-worker +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + component: langfuse-worker + spec: + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.langfuse_worker.registry }}/{{ .Values.images.langfuse_worker.repository }}:{{ .Values.images.langfuse_worker.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + ports: + - name: worker + containerPort: {{ .Values.service.port }} + protocol: TCP + env: + {{- range $key, $value := .Values.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- if .Values.healthcheck.enabled }} + livenessProbe: + httpGet: + path: /api/public/health + port: {{ .Values.service.port }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + readinessProbe: + httpGet: + path: /api/public/health + port: {{ .Values.service.port }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + {{- end }} + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + restartPolicy: Always +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Langfuse-Worker/templates/service-byk-langfuse-worker.yaml b/kubernetes/charts/Langfuse-Worker/templates/service-byk-langfuse-worker.yaml new file mode 100644 index 00000000..da32c5c2 --- /dev/null +++ b/kubernetes/charts/Langfuse-Worker/templates/service-byk-langfuse-worker.yaml @@ -0,0 +1,18 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: langfuse-worker +spec: + type: {{ .Values.service.type }} + selector: + app: "{{ .Values.release_name }}" + ports: + - name: worker + protocol: TCP + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Langfuse-Worker/values.yaml b/kubernetes/charts/Langfuse-Worker/values.yaml new file mode 100644 index 00000000..61e5cb0c --- /dev/null +++ b/kubernetes/charts/Langfuse-Worker/values.yaml @@ -0,0 +1,91 @@ +replicas: 1 +enabled: true + +images: + langfuse_worker: + registry: "docker.io" + repository: "langfuse/langfuse-worker" + tag: "3" + +release_name: "langfuse-worker" + +service: + type: ClusterIP + port: 3030 + +# Environment variables +env: + # Database configuration + NEXTAUTH_URL: "http://localhost:3000" + DATABASE_URL: "postgresql://postgres:dbadmin@rag_search_db:5432/rag-search" + SALT: "changeme" + ENCRYPTION_KEY: "changeme" + TELEMETRY_ENABLED: "true" + LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES: "true" + + # ClickHouse configuration + CLICKHOUSE_MIGRATION_URL: "clickhouse://clickhouse:9000" + CLICKHOUSE_URL: "http://clickhouse:8123" + CLICKHOUSE_USER: "default" + CLICKHOUSE_PASSWORD: "clickhouse" + CLICKHOUSE_CLUSTER_ENABLED: "false" + + # S3/MinIO configuration + LANGFUSE_USE_AZURE_BLOB: "false" + LANGFUSE_S3_EVENT_UPLOAD_BUCKET: "rag-search" + LANGFUSE_S3_EVENT_UPLOAD_REGION: "auto" + LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID: "changeme" + LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY: "changeme" + LANGFUSE_S3_EVENT_UPLOAD_ENDPOINT: "http://minio:9000" + LANGFUSE_S3_EVENT_UPLOAD_FORCE_PATH_STYLE: "true" + LANGFUSE_S3_EVENT_UPLOAD_PREFIX: "langfuse/events/" + + LANGFUSE_S3_MEDIA_UPLOAD_BUCKET: "rag-search" + LANGFUSE_S3_MEDIA_UPLOAD_REGION: "auto" + LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID: "changeme" + LANGFUSE_S3_MEDIA_UPLOAD_SECRET_ACCESS_KEY: "changeme" + LANGFUSE_S3_MEDIA_UPLOAD_ENDPOINT: "http://minio:9000" + LANGFUSE_S3_MEDIA_UPLOAD_FORCE_PATH_STYLE: "true" + LANGFUSE_S3_MEDIA_UPLOAD_PREFIX: "langfuse/media/" + + LANGFUSE_S3_BATCH_EXPORT_ENABLED: "false" + LANGFUSE_S3_BATCH_EXPORT_BUCKET: "rag-search" + LANGFUSE_S3_BATCH_EXPORT_PREFIX: "langfuse/exports/" + LANGFUSE_S3_BATCH_EXPORT_REGION: "auto" + LANGFUSE_S3_BATCH_EXPORT_ENDPOINT: "http://minio:9000" + LANGFUSE_S3_BATCH_EXPORT_EXTERNAL_ENDPOINT: "http://minio:9000" + LANGFUSE_S3_BATCH_EXPORT_ACCESS_KEY_ID: "changeme" + LANGFUSE_S3_BATCH_EXPORT_SECRET_ACCESS_KEY: "changeme" + LANGFUSE_S3_BATCH_EXPORT_FORCE_PATH_STYLE: "true" + LANGFUSE_INGESTION_QUEUE_DELAY_MS: "" + LANGFUSE_INGESTION_CLICKHOUSE_WRITE_INTERVAL_MS: "" + + # Redis configuration + REDIS_HOST: "redis" + REDIS_PORT: "6379" + REDIS_AUTH: "myredissecret" + REDIS_TLS_ENABLED: "false" + REDIS_TLS_CA: "" + REDIS_TLS_CERT: "" + REDIS_TLS_KEY: "" + + # Email configuration + EMAIL_FROM_ADDRESS: "" + SMTP_CONNECTION_URL: "" + +resources: + requests: + memory: "512Mi" + cpu: "100m" + limits: + memory: "2Gi" + cpu: "500m" + +pullPolicy: IfNotPresent + +healthcheck: + enabled: true + initialDelaySeconds: 60 + periodSeconds: 30 + timeoutSeconds: 10 + failureThreshold: 3 \ No newline at end of file diff --git a/kubernetes/charts/Liquibase/Chart.yaml b/kubernetes/charts/Liquibase/Chart.yaml new file mode 100644 index 00000000..78f3d45f --- /dev/null +++ b/kubernetes/charts/Liquibase/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: Liquibase +description: A Helm chart for Liquibase for database migrations +type: application +version: 0.1.0 +appVersion: "1.0" \ No newline at end of file diff --git a/kubernetes/charts/Liquibase/templates/liquibase-job.yaml b/kubernetes/charts/Liquibase/templates/liquibase-job.yaml new file mode 100644 index 00000000..d9e54cee --- /dev/null +++ b/kubernetes/charts/Liquibase/templates/liquibase-job.yaml @@ -0,0 +1,60 @@ +{{- if .Values.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + backoffLimit: {{ .Values.backoffLimit }} + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + spec: + restartPolicy: OnFailure + volumes: + - name: liquibase-repo + emptyDir: {} + initContainers: + - name: git-clone + image: alpine/git:latest + volumeMounts: + - name: liquibase-repo + mountPath: /liquibase-files + command: + - sh + - -c + - | + git clone --single-branch --depth 1 --branch wip https://github.com/rootcodelabs/RAG-Module /tmp/rag && + + cp -r /tmp/rag/DSL/Liquibase/* /liquibase-files + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.scope.repository }}:{{ .Values.images.scope.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + env: + {{- range .Values.env }} + - name: {{ .name }} + value: "{{ .value }}" + {{- end }} + + + volumeMounts: + - name: liquibase-repo + mountPath: /liquibase-files + command: ["/bin/sh", "-c"] + args: + - | + echo "--- Listing files in /liquibase-files ---" + ls -R /liquibase-files + cd /liquibase-files + echo "--- Now running Liquibase ---" + liquibase \ + --changeLogFile=/master.yml \ + --url=jdbc:postgresql://rag-search-db:5432/rag-search \ + --username=postgres \ + --password=dbadmin \ + update + +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Liquibase/values.yaml b/kubernetes/charts/Liquibase/values.yaml new file mode 100644 index 00000000..0d538422 --- /dev/null +++ b/kubernetes/charts/Liquibase/values.yaml @@ -0,0 +1,21 @@ +enabled: true +release_name: "component-byk-liquibase" +backoffLimit: 3 + +images: + scope: + repository: "liquibase/liquibase" + tag: "4.33.0" + +env: + - name: POSTGRES_USER + value: "postgres" + - name: POSTGRES_PASSWORD + value: "dbadmin" + - name: LIQUIBASE_URL + value: "jdbc:postgresql://rag-search-db:5432/rag-search" + - name: LIQUIBASE_CHANGELOG_FILE + value: /master.yml + + +pullPolicy: IfNotPresent \ No newline at end of file diff --git a/kubernetes/charts/Loki/Chart.yaml b/kubernetes/charts/Loki/Chart.yaml new file mode 100644 index 00000000..570e167c --- /dev/null +++ b/kubernetes/charts/Loki/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: Loki +description: A Helm chart for Loki +type: application +version: 0.1.0 +appVersion: "2.9.0" \ No newline at end of file diff --git a/kubernetes/charts/Loki/templates/configmap-loki.yaml b/kubernetes/charts/Loki/templates/configmap-loki.yaml new file mode 100644 index 00000000..ebee18b3 --- /dev/null +++ b/kubernetes/charts/Loki/templates/configmap-loki.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: loki-config + labels: + app: loki +data: + loki.yaml: | +{{ .Values.config | toYaml | indent 4 }} \ No newline at end of file diff --git a/kubernetes/charts/Loki/templates/deployment-loki.yaml b/kubernetes/charts/Loki/templates/deployment-loki.yaml new file mode 100644 index 00000000..7967b8a3 --- /dev/null +++ b/kubernetes/charts/Loki/templates/deployment-loki.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Values.release_name }} + labels: + app: {{ .Values.release_name }} +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: {{ .Values.release_name }} + template: + metadata: + labels: + app: {{ .Values.release_name }} + spec: + containers: + - name: {{ .Values.release_name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag}}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.port }} + protocol: TCP + volumeMounts: + - name: config + mountPath: /etc/loki/local-config.yaml + {{- if .Values.persistence.enabled }} + - name: storage + mountPath: /loki + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumes: + - name: config + configMap: + name: loki-config + {{- if .Values.persistence.enabled }} + - name: storage + persistentVolumeClaim: + claimName: loki-storage + {{- end }} + \ No newline at end of file diff --git a/kubernetes/charts/Loki/templates/pvc-loki.yaml b/kubernetes/charts/Loki/templates/pvc-loki.yaml new file mode 100644 index 00000000..5d505a52 --- /dev/null +++ b/kubernetes/charts/Loki/templates/pvc-loki.yaml @@ -0,0 +1,17 @@ +{{- if .Values.persistence.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: loki-storage + labels: + app: loki +spec: + accessModes: + - {{ .Values.persistence.accessMode }} + {{- if .Values.persistence.storageClass }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Loki/templates/service-loki.yaml b/kubernetes/charts/Loki/templates/service-loki.yaml new file mode 100644 index 00000000..84158378 --- /dev/null +++ b/kubernetes/charts/Loki/templates/service-loki.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.release_name }} + labels: + app: {{ .Values.release_name }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: http + selector: + app: {{ .Values.release_name }} + + \ No newline at end of file diff --git a/kubernetes/charts/Loki/values.yaml b/kubernetes/charts/Loki/values.yaml new file mode 100644 index 00000000..1b059e44 --- /dev/null +++ b/kubernetes/charts/Loki/values.yaml @@ -0,0 +1,85 @@ +replicas: 1 + +release_name: "loki" + +image: + repository: grafana/loki + pullPolicy: IfNotPresent + tag: "2.9.0" + +nameOverride: "" +fullnameOverride: "" + +port: 3100 + +service: + type: ClusterIP + port: 3100 + targetPort: 3100 + +persistence: + enabled: true + storageClass: "" + accessMode: ReadWriteOnce + size: 10Gi + +resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 250m + memory: 256Mi + +# Loki configuration - will be mounted as ConfigMap +config: + auth_enabled: false + + server: + http_listen_port: 3100 + grpc_listen_port: 9096 + + common: + path_prefix: /loki + storage: + filesystem: + chunks_directory: /loki/chunks + rules_directory: /loki/rules + replication_factor: 1 + ring: + instance_addr: 127.0.0.1 + kvstore: + store: inmemory + + query_range: + results_cache: + cache: + embedded_cache: + enabled: true + max_size_mb: 100 + + schema_config: + configs: + - from: 2020-10-24 + store: boltdb-shipper + object_store: filesystem + schema: v11 + index: + prefix: index_ + period: 24h + + ruler: + alertmanager_url: http://localhost:9093 + +# By default, Loki will send anonymous, but uniquely-identifiable usage and configuration +# analytics to Grafana Labs. These statistics are sent to https://stats.grafana.org/ +# +# Statistics help us better understand how Loki is used, and they show us performance +# levels for most users. This helps us prioritize features and documentation. +# For more information on what's sent, look at +# https://github.com/grafana/loki/blob/main/pkg/usagestats/stats.go +# Refer to the buildReport method to see what goes into a report. +# +# If you would like to disable reporting, uncomment the following lines: + analytics: + reporting_enabled: false diff --git a/kubernetes/charts/Qdrant/Chart.yaml b/kubernetes/charts/Qdrant/Chart.yaml new file mode 100644 index 00000000..ec806350 --- /dev/null +++ b/kubernetes/charts/Qdrant/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: Qdrant +description: Qdrant vector database for RAG +type: application +version: 0.1.0 +appVersion: "v1.15.1" \ No newline at end of file diff --git a/kubernetes/charts/Qdrant/templates/service-byk-qdrant.yaml b/kubernetes/charts/Qdrant/templates/service-byk-qdrant.yaml new file mode 100644 index 00000000..e0c0e4c6 --- /dev/null +++ b/kubernetes/charts/Qdrant/templates/service-byk-qdrant.yaml @@ -0,0 +1,31 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: qdrant +spec: + type: {{ .Values.service.type }} + {{- if eq .Values.service.type "ClusterIP" }} + {{- if .Values.service.headless }} + clusterIP: None + {{- end }} + {{- end }} + selector: + app: "{{ .Values.release_name }}" + ports: + - name: http + protocol: TCP + port: {{ .Values.service.httpPort }} + targetPort: {{ .Values.service.httpPort }} + - name: grpc + protocol: TCP + port: {{ .Values.service.grpcPort }} + targetPort: {{ .Values.service.grpcPort }} + - name: metrics + protocol: TCP + port: {{ .Values.service.metricsPort }} + targetPort: {{ .Values.service.metricsPort }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Qdrant/templates/statefulset-byk-qdrant.yaml b/kubernetes/charts/Qdrant/templates/statefulset-byk-qdrant.yaml new file mode 100644 index 00000000..13d81cb4 --- /dev/null +++ b/kubernetes/charts/Qdrant/templates/statefulset-byk-qdrant.yaml @@ -0,0 +1,82 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: qdrant +spec: + serviceName: "{{ .Values.release_name }}" + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + component: qdrant + spec: + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.qdrant.registry }}/{{ .Values.images.qdrant.repository }}:{{ .Values.images.qdrant.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.httpPort }} + protocol: TCP + - name: grpc + containerPort: {{ .Values.service.grpcPort }} + protocol: TCP + - name: metrics + containerPort: {{ .Values.service.metricsPort }} + protocol: TCP + {{- if .Values.healthcheck.enabled }} + livenessProbe: + httpGet: + path: "{{ .Values.healthcheck.httpPath }}" + port: {{ .Values.service.httpPort }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + readinessProbe: + httpGet: + path: "{{ .Values.healthcheck.httpPath }}" + port: {{ .Values.service.httpPort }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + {{- end }} + {{- if .Values.persistence.enabled }} + volumeMounts: + - name: qdrant-storage + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + {{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: qdrant-storage + labels: + app: "{{ .Values.release_name }}" + component: qdrant + spec: + accessModes: + - {{ .Values.persistence.accessMode }} + resources: + requests: + storage: {{ .Values.persistence.size }} + {{- if .Values.persistence.storageClass }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Qdrant/values.yaml b/kubernetes/charts/Qdrant/values.yaml new file mode 100644 index 00000000..4a83496a --- /dev/null +++ b/kubernetes/charts/Qdrant/values.yaml @@ -0,0 +1,50 @@ +replicas: 1 +enabled: true + +images: + qdrant: + registry: "docker.io" + repository: "qdrant/qdrant" + tag: "v1.15.1" + +release_name: "qdrant" + +service: + type: ClusterIP + # Set to true for headless service (direct pod access) + headless: false + # Qdrant HTTP API port + httpPort: 6333 + # Qdrant gRPC API port + grpcPort: 6334 + # Internal metrics port + metricsPort: 6335 + +persistence: + enabled: true + storageClass: "" + accessMode: ReadWriteOnce + size: 20Gi + mountPath: "/qdrant/storage" + +resources: + requests: + memory: "512Mi" + cpu: "100m" + limits: + memory: "2Gi" + cpu: "1000m" + +pullPolicy: IfNotPresent + +healthcheck: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + # HTTP endpoint for health check + httpPath: "/collections" + + diff --git a/kubernetes/charts/Redis/Chart.yaml b/kubernetes/charts/Redis/Chart.yaml new file mode 100644 index 00000000..cc5354ea --- /dev/null +++ b/kubernetes/charts/Redis/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: Redis +description: Redis cache and session store for RAG +type: application +version: 0.1.0 +appVersion: "7" \ No newline at end of file diff --git a/kubernetes/charts/Redis/templates/deployment-byk-redis.yaml b/kubernetes/charts/Redis/templates/deployment-byk-redis.yaml new file mode 100644 index 00000000..b67ab1d2 --- /dev/null +++ b/kubernetes/charts/Redis/templates/deployment-byk-redis.yaml @@ -0,0 +1,68 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: redis +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + component: redis + spec: + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.redis.registry }}/{{ .Values.images.redis.repository }}:{{ .Values.images.redis.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + ports: + - name: redis + containerPort: {{ .Values.service.port }} + protocol: TCP + {{- if .Values.auth.enabled }} + command: + - redis-server + - --requirepass + - $(REDIS_PASSWORD) + {{- end }} + env: + {{- if .Values.auth.enabled }} + - name: REDIS_PASSWORD + value: "{{ .Values.auth.password }}" + {{- end }} + {{- if .Values.healthcheck.enabled }} + livenessProbe: + exec: + command: + - redis-cli + - ping + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + readinessProbe: + exec: + command: + - redis-cli + - ping + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + {{- end }} + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + + restartPolicy: Always +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Redis/templates/service-byk-redis.yaml b/kubernetes/charts/Redis/templates/service-byk-redis.yaml new file mode 100644 index 00000000..a030f5aa --- /dev/null +++ b/kubernetes/charts/Redis/templates/service-byk-redis.yaml @@ -0,0 +1,18 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: redis +spec: + type: {{ .Values.service.type }} + selector: + app: "{{ .Values.release_name }}" + ports: + - name: redis + protocol: TCP + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Redis/values.yaml b/kubernetes/charts/Redis/values.yaml new file mode 100644 index 00000000..01018687 --- /dev/null +++ b/kubernetes/charts/Redis/values.yaml @@ -0,0 +1,40 @@ +replicas: 1 +enabled: true + +images: + redis: + registry: "docker.io" + repository: "redis" + tag: "7" + +release_name: "redis" + +service: + type: ClusterIP + port: 6379 + +auth: + enabled: true + password: "myredissecret" + +env: + REDIS_PASSWORD: "myredissecret" + +# Resource configuration +resources: + requests: + memory: "128Mi" + cpu: "50m" + limits: + memory: "512Mi" + cpu: "200m" + +pullPolicy: IfNotPresent + +healthcheck: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 \ No newline at end of file diff --git a/kubernetes/charts/Resql/Chart.yaml b/kubernetes/charts/Resql/Chart.yaml new file mode 100644 index 00000000..2de36f8f --- /dev/null +++ b/kubernetes/charts/Resql/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: resql +description: Database abstraction layer for RAG +type: application +version: 0.1.0 +appVersion: "1.0" \ No newline at end of file diff --git a/kubernetes/charts/Resql/templates/deployment-byk-resql.yaml b/kubernetes/charts/Resql/templates/deployment-byk-resql.yaml new file mode 100644 index 00000000..edc52930 --- /dev/null +++ b/kubernetes/charts/Resql/templates/deployment-byk-resql.yaml @@ -0,0 +1,68 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + spec: + volumes: + - name: dsl + emptyDir: {} + initContainers: + - name: git-clone-dsl + image: alpine/git:latest + volumeMounts: + - name: dsl + mountPath: /DSL + command: + - sh + - -c + - | + git clone --single-branch --depth 1 --branch wip \ + https://github.com/rootcodelabs/RAG-Module /tmp/rag && + + cp -r /tmp/rag/DSL/Resql/* /DSL/ + + + + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.resql.registry }}/{{ .Values.images.resql.repository }}:{{ .Values.images.resql.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + env: + - name: logging.level.root + value: "{{ .Values.env.LOGGING_LEVEL_ROOT }}" + - name: SQLMS_DATASOURCES_0_NAME + value: "{{ .Values.env.SQLMS_DATASOURCES_0_NAME }}" + - name: SQLMS_DATASOURCES_0_JDBCURL + value: "{{ .Values.env.SQLMS_DATASOURCES_0_JDBCURL }}" + - name: SQLMS_DATASOURCES_0_USERNAME + value: "{{ .Values.env.SQLMS_DATASOURCES_0_USERNAME }}" + - name: SQLMS_DATASOURCES_0_PASSWORD + value: "{{ .Values.env.SQLMS_DATASOURCES_0_PASSWORD }}" + - name: LOGGING_LEVEL_ORG_SPRINGFRAMEWORK_BOOT + value: "{{ .Values.env.LOGGING_LEVEL_ORG_SPRINGFRAMEWORK_BOOT }}" + - name: SQLMS_SAVED_QUERIES_DIR + value: "/DSL" + volumeMounts: + - name: dsl + mountPath: /DSL + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + +{{- end }} diff --git a/kubernetes/charts/Resql/templates/service-byk-resql.yaml b/kubernetes/charts/Resql/templates/service-byk-resql.yaml new file mode 100644 index 00000000..3312d10d --- /dev/null +++ b/kubernetes/charts/Resql/templates/service-byk-resql.yaml @@ -0,0 +1,14 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" +spec: + type: {{ .Values.service.type }} + selector: + app: "{{ .Values.release_name }}" + ports: + - protocol: TCP + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Resql/values.yaml b/kubernetes/charts/Resql/values.yaml new file mode 100644 index 00000000..470b7a0d --- /dev/null +++ b/kubernetes/charts/Resql/values.yaml @@ -0,0 +1,32 @@ +replicas: 1 +enabled: true +images: + resql: + registry: "ghcr.io" + repository: "buerokratt/resql" + tag: "v1.3.4" + +release_name: "resql" + +service: + type: ClusterIP + port: 8082 + +env: + LOGGING_LEVEL_ROOT: "INFO" + SQLMS_DATASOURCES_0_NAME: "byk" + SQLMS_DATASOURCES_0_JDBCURL: "jdbc:postgresql://rag-search-db:5432/rag-search" + SQLMS_DATASOURCES_0_USERNAME: "postgres" + SQLMS_DATASOURCES_0_PASSWORD: "dbadmin" + LOGGING_LEVEL_ORG_SPRINGFRAMEWORK_BOOT: "INFO" + JAVA_OPTS: "-Xms1g -Xmx3g" + +resources: + requests: + memory: "1000Mi" + cpu: "50m" + limits: + memory: "4Gi" + cpu: "50m" + +pullPolicy: IfNotPresent diff --git a/kubernetes/charts/Ruuter-Private/Chart.yaml b/kubernetes/charts/Ruuter-Private/Chart.yaml new file mode 100644 index 00000000..845f24ec --- /dev/null +++ b/kubernetes/charts/Ruuter-Private/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: ruuter-private +description: A Helm chart for Ruuter Private API Gateway +type: application +version: 0.1.0 +appVersion: "1.0" \ No newline at end of file diff --git a/kubernetes/charts/Ruuter-Private/templates/configmap-byk-ruuter-private.yaml b/kubernetes/charts/Ruuter-Private/templates/configmap-byk-ruuter-private.yaml new file mode 100644 index 00000000..9a20ec2d --- /dev/null +++ b/kubernetes/charts/Ruuter-Private/templates/configmap-byk-ruuter-private.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "{{ .Values.release_name }}-constants" + labels: + app: "{{ .Values.release_name }}" +data: + constants.ini: | + [DSL] + RAG_SEARCH_RUUTER_PUBLIC=http://ruuter-public:8086/rag-search + RAG_SEARCH_RUUTER_PRIVATE=http://ruuter-private:8088/rag-search + RAG_SEARCH_DMAPPER=http://data-mapper:3000 + RAG_SEARCH_RESQL=http://resql:8082/rag-search + RAG_SEARCH_PROJECT_LAYER=rag-search + RAG_SEARCH_TIM=http://tim:8085 + RAG_SEARCH_CRON_MANAGER=http://cron-manager:9010 + RAG_SEARCH_LLM_ORCHESTRATOR=http://llm-orchestration-service:8100/orchestrate + DOMAIN=localhost + DB_PASSWORD=dbadmin \ No newline at end of file diff --git a/kubernetes/charts/Ruuter-Private/templates/deployment-byk-ruuter-private.yaml b/kubernetes/charts/Ruuter-Private/templates/deployment-byk-ruuter-private.yaml new file mode 100644 index 00000000..c2082f6d --- /dev/null +++ b/kubernetes/charts/Ruuter-Private/templates/deployment-byk-ruuter-private.yaml @@ -0,0 +1,88 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + spec: + initContainers: + - name: git-clone + image: alpine/git:latest + volumeMounts: + - name: dsl + mountPath: /DSL + command: + - sh + - -c + - | + git clone --single-branch --depth 1 --branch wip https://github.com/rootcodelabs/RAG-Module /tmp/rag && + + cp -r /tmp/rag/DSL/Ruuter.private/* /DSL/ + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.scope.registry }}/{{ .Values.images.scope.repository }}:{{ .Values.images.scope.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + ports: + - containerPort: {{ .Values.service.port }} + name: http + env: + + - name: application.cors.allowedOrigins + value: "{{ .Values.env.APPLICATION_CORS_ALLOWEDORIGINS }}" + - name: application.httpCodesAllowList + value: "{{ .Values.env.APPLICATION_HTTPCODESALLOWLIST }}" + - name: application.internalRequests.allowedIPs + value: "{{ .Values.env.APPLICATION_INTERNALREQUESTS_ALLOWEDIPS }}" + - name: application.logging.displayRequestContent + value: "{{ .Values.env.APPLICATION_LOGGING_DISPLAYREQUESTCONTENT }}" + - name: application.logging.displayResponseContent + value: "{{ .Values.env.APPLICATION_LOGGING_DISPLAYRESPONSECONTENT }}" + - name: application.logging.printStackTrace + value: "{{ .Values.env.APPLICATION_LOGGING_PRINTSTACKTRACE }}" + - name: application.internalRequests.disabled + value: "{{ .Values.env.APPLICATION_INTERNALREQUESTS_DISABLED }}" + - name: server.port + value: "{{ .Values.env.SERVER_PORT }}" + + + - name: logging.level.root + value: "{{ .Values.env.LOGGING_LEVEL_ROOT }}" + - name: LOG_LEVEL_TIMING + value: "{{ .Values.env.LOG_LEVEL_TIMING }}" + - name: application.DSL.allowedFiletypes + value: "{{ .Values.env.APPLICATION_DSL_ALLOWEDFILETYPES }}" + - name: application.httpResponseSizeLimit + value: "{{ .Values.env.APPLICATION_HTTPRESPONSESIZELIMIT }}" + - name: application.openSearchConfiguration.index + value: "{{ .Values.env.APPLICATION_OPENSEARCHCONFIGURATION_INDEX }}" + volumeMounts: + - name: dsl + mountPath: /DSL + - name: urls-env + mountPath: /app/constants.ini + subPath: constants.ini + + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + volumes: + - name: dsl + emptyDir: {} + - name: urls-env + configMap: + name: "{{ .Values.release_name }}-constants" +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Ruuter-Private/templates/ingress-ruuter-private.yaml b/kubernetes/charts/Ruuter-Private/templates/ingress-ruuter-private.yaml new file mode 100644 index 00000000..94655a6d --- /dev/null +++ b/kubernetes/charts/Ruuter-Private/templates/ingress-ruuter-private.yaml @@ -0,0 +1,46 @@ +{{- if .Values.ingress.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: "{{ .Values.release_name }}-ingress" + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/cors-allow-methods: "GET, POST, OPTIONS" + nginx.ingress.kubernetes.io/cors-allow-headers: "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization,X-Forwarded-For" + nginx.ingress.kubernetes.io/cors-allow-origin: "{{ .Values.ingress.corsAllowOrigin }}" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/cors-allow-credentials: "true" + nginx.ingress.kubernetes.io/additional-response-headers: "Access-Control-Allow-Headers: Content-Type" + nginx.ingress.kubernetes.io/cors-expose-headers: "Content-Length, Content-Range" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + # Private Ruuter may need IP whitelisting for security + nginx.ingress.kubernetes.io/whitelist-source-range: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,127.0.0.1/32" + {{- if .Values.ingress.ssl.enabled }} + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + cert-manager.io/cluster-issuer: {{ .Values.ingress.ssl.certIssuerName | quote }} + {{- end }} + {{- with .Values.ingress.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + name: "{{ .Values.release_name }}-ingress" + app: "{{ .Values.release_name }}" +spec: + rules: + - host: {{ .Values.ingress.host }} + http: + paths: + - pathType: Prefix + path: / + backend: + service: + name: "{{ .Values.release_name }}" + port: + number: {{ .Values.service.port }} + {{- if .Values.ingress.ssl.enabled }} + tls: + - hosts: + - {{ .Values.ingress.host }} + secretName: {{ .Values.ingress.ssl.secretName }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Ruuter-Private/templates/service-byk-ruuter-private.yaml b/kubernetes/charts/Ruuter-Private/templates/service-byk-ruuter-private.yaml new file mode 100644 index 00000000..c6d67227 --- /dev/null +++ b/kubernetes/charts/Ruuter-Private/templates/service-byk-ruuter-private.yaml @@ -0,0 +1,17 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: http + selector: + app: "{{ .Values.release_name }}" +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Ruuter-Private/values.yaml b/kubernetes/charts/Ruuter-Private/values.yaml new file mode 100644 index 00000000..a6a6d64d --- /dev/null +++ b/kubernetes/charts/Ruuter-Private/values.yaml @@ -0,0 +1,56 @@ +replicas: 1 +enabled: true +release_name: "ruuter-private" + +images: + scope: + registry: "ghcr.io" + repository: "buerokratt/ruuter" + tag: "v2.2.1" + +service: + type: ClusterIP + port: 8088 + targetPort: 8088 + +env: + + APPLICATION_CORS_ALLOWEDORIGINS: "http://gui:3001,http://ruuter-private:8088,http://ruuter-public:8086,http://authentication-layer:3004,http://notifications-node:4040,http://dataset-gen-service:8000,http://localhost:3001" + APPLICATION_HTTPCODESALLOWLIST: "200,201,202,400,401,403,500" + APPLICATION_INTERNALREQUESTS_ALLOWEDIPS: "127.0.0.1" + APPLICATION_LOGGING_DISPLAYREQUESTCONTENT: "true" + APPLICATION_LOGGING_DISPLAYRESPONSECONTENT: "true" + APPLICATION_LOGGING_PRINTSTACKTRACE: "true" + APPLICATION_INTERNALREQUESTS_DISABLED: "true" + + + + LOGGING_LEVEL_ROOT: "INFO" + LOG_LEVEL_TIMING: "INFO" + APPLICATION_DSL_ALLOWEDFILETYPES: ".yml,.yaml,.md,.tmp" + APPLICATION_HTTPRESPONSESIZELIMIT: "2000" + APPLICATION_OPENSEARCHCONFIGURATION_INDEX: "ruuterlog" + SERVER_PORT: "8088" + +resources: + requests: + memory: "1000Mi" + cpu: "50m" + limits: + memory: "2000Mi" + cpu: "50m" + + +ingress: + enabled: false + host: "rag.local" #change this to domain + corsAllowOrigin: "http://localhost:3001,http://localhost:3003,http://localhost:8088,http://localhost:3002,http://localhost:3004,http://localhost:8000" + ssl: + enabled: false + certIssuerName: "letsencrypt-prod" + secretName: "rag-ruuter-private-tls" + annotations: {} + +pullPolicy: IfNotPresent + + diff --git a/kubernetes/charts/Ruuter-Public/Chart.yaml b/kubernetes/charts/Ruuter-Public/Chart.yaml new file mode 100644 index 00000000..662e775e --- /dev/null +++ b/kubernetes/charts/Ruuter-Public/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: ruuter-public +description: A Helm chart for Ruuter Public API Gateway +type: application +version: 0.1.0 +appVersion: "1.0" \ No newline at end of file diff --git a/kubernetes/charts/Ruuter-Public/templates/configmap-byk-ruuter-public.yaml b/kubernetes/charts/Ruuter-Public/templates/configmap-byk-ruuter-public.yaml new file mode 100644 index 00000000..354b6f2a --- /dev/null +++ b/kubernetes/charts/Ruuter-Public/templates/configmap-byk-ruuter-public.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "{{ .Values.release_name }}-constants" + labels: + app: "{{ .Values.release_name }}" +data: + constants.ini: | + [DSL] + RAG_SEARCH_RUUTER_PUBLIC=http://ruuter-public:8086/rag-search + RAG_SEARCH_RUUTER_PRIVATE=http://ruuter-private:8088/rag-search + RAG_SEARCH_DMAPPER=http://data-mapper:3000 + RAG_SEARCH_RESQL=http://resql:8082/rag-search + RAG_SEARCH_PROJECT_LAYER=rag-search + RAG_SEARCH_TIM=http://tim:8085 + RAG_SEARCH_CRON_MANAGER=http://cron-manager:9010 + RAG_SEARCH_LLM_ORCHESTRATOR=http://llm-orchestration-service:8100/orchestrate + DOMAIN=localhost + DB_PASSWORD=dbadmin \ No newline at end of file diff --git a/kubernetes/charts/Ruuter-Public/templates/deployment-byk-ruuter-public.yaml b/kubernetes/charts/Ruuter-Public/templates/deployment-byk-ruuter-public.yaml new file mode 100644 index 00000000..ebf2c2f8 --- /dev/null +++ b/kubernetes/charts/Ruuter-Public/templates/deployment-byk-ruuter-public.yaml @@ -0,0 +1,87 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + spec: + initContainers: + - name: git-clone + image: alpine/git:latest + volumeMounts: + - name: dsl + mountPath: /DSL + command: + - sh + - -c + - | + git clone --single-branch --depth 1 --branch wip https://github.com/rootcodelabs/RAG-Module /tmp/rag && + + cp -r /tmp/rag/DSL/Ruuter.public/* /DSL/ + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.scope.registry }}/{{ .Values.images.scope.repository }}:{{ .Values.images.scope.tag }}" + ports: + - containerPort: {{ .Values.service.port }} + name: http + env: + - name: application.cors.allowedOrigins + value: "{{ .Values.env.APPLICATION_CORS_ALLOWEDORIGINS }}" + - name: application.httpCodesAllowList + value: "{{ .Values.env.APPLICATION_HTTPCODESALLOWLIST }}" + - name: application.internalRequests.allowedIPs + value: "{{ .Values.env.APPLICATION_INTERNALREQUESTS_ALLOWEDIPS }}" + - name: application.logging.displayRequestContent + value: "{{ .Values.env.APPLICATION_LOGGING_DISPLAYREQUESTCONTENT }}" + - name: application.logging.displayResponseContent + value: "{{ .Values.env.APPLICATION_LOGGING_DISPLAYRESPONSECONTENT }}" + - name: application.logging.printStackTrace + value: "{{ .Values.env.APPLICATION_LOGGING_PRINTSTACKTRACE }}" + - name: application.internalRequests.disabled + value: "{{ .Values.env.APPLICATION_INTERNALREQUESTS_DISABLED }}" + - name: server.port + value: "{{ .Values.env.SERVER_PORT }}" + - name: application.constants.file + value: "/app/constants.ini" + + - name: logging.level.root + value: "{{ .Values.env.LOGGING_LEVEL_ROOT }}" + - name: LOG_LEVEL_TIMING + value: "{{ .Values.env.LOG_LEVEL_TIMING }}" + - name: application.DSL.allowedFiletypes + value: "{{ .Values.env.APPLICATION_DSL_ALLOWEDFILETYPES }}" + - name: application.httpResponseSizeLimit + value: "{{ .Values.env.APPLICATION_HTTPRESPONSESIZELIMIT }}" + - name: application.openSearchConfiguration.index + value: "{{ .Values.env.APPLICATION_OPENSEARCHCONFIGURATION_INDEX }}" + volumeMounts: + - name: dsl + mountPath: /DSL + - name: urls-env + mountPath: /app/constants.ini + subPath: constants.ini + + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + volumes: + - name: dsl + emptyDir: {} + - name: urls-env + configMap: + name: "{{ .Values.release_name }}-constants" +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Ruuter-Public/templates/ingress-ruuter-public.yaml b/kubernetes/charts/Ruuter-Public/templates/ingress-ruuter-public.yaml new file mode 100644 index 00000000..3a1e4c55 --- /dev/null +++ b/kubernetes/charts/Ruuter-Public/templates/ingress-ruuter-public.yaml @@ -0,0 +1,45 @@ +{{- if .Values.ingress.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: "{{ .Values.release_name }}-ingress" + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/cors-allow-methods: "GET, POST, OPTIONS" + nginx.ingress.kubernetes.io/cors-allow-headers: "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization,X-Forwarded-For" + nginx.ingress.kubernetes.io/cors-allow-origin: "{{ .Values.ingress.corsAllowOrigin }}" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/cors-allow-credentials: "true" + nginx.ingress.kubernetes.io/additional-response-headers: "Access-Control-Allow-Headers: Content-Type" + nginx.ingress.kubernetes.io/cors-expose-headers: "Content-Length, Content-Range" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + {{- if .Values.ingress.ssl.enabled }} + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + cert-manager.io/cluster-issuer: {{ .Values.ingress.ssl.certIssuerName | quote }} + {{- end }} + {{- with .Values.ingress.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + name: "{{ .Values.release_name }}-ingress" + app: "{{ .Values.release_name }}" +spec: + rules: + - host: {{ .Values.ingress.host }} + http: + paths: + - pathType: Prefix + path: / + backend: + service: + name: "{{ .Values.release_name }}" + port: + number: {{ .Values.service.port }} + + {{- if .Values.ingress.ssl.enabled }} + tls: + - hosts: + - {{ .Values.ingress.host }} + secretName: {{ .Values.ingress.ssl.secretName }} + {{- end }} +{{- end }} diff --git a/kubernetes/charts/Ruuter-Public/templates/service-byk-ruuter-public.yaml b/kubernetes/charts/Ruuter-Public/templates/service-byk-ruuter-public.yaml new file mode 100644 index 00000000..6e10cd82 --- /dev/null +++ b/kubernetes/charts/Ruuter-Public/templates/service-byk-ruuter-public.yaml @@ -0,0 +1,18 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: http + selector: + app: "{{ .Values.release_name }}" + +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Ruuter-Public/values.yaml b/kubernetes/charts/Ruuter-Public/values.yaml new file mode 100644 index 00000000..635d51c9 --- /dev/null +++ b/kubernetes/charts/Ruuter-Public/values.yaml @@ -0,0 +1,51 @@ +replicas: 1 +enabled: true +release_name: "ruuter-public" + +images: + scope: + registry: "ghcr.io" + repository: "buerokratt/ruuter" + tag: v2.2.1 + +service: + type: ClusterIP + port: 8086 + targetPort: 8086 + +env: + APPLICATION_CORS_ALLOWEDORIGINS: "http://localhost:8086,http://localhost:3001,http://localhost:3003,http://localhost:3004,http://localhost:8080,http://localhost:8000,http://localhost:8090" + APPLICATION_HTTPCODESALLOWLIST: "200,201,202,204,400,401,403,500" + APPLICATION_INTERNALREQUESTS_ALLOWEDIPS: "127.0.0.1" + APPLICATION_LOGGING_DISPLAYREQUESTCONTENT: "true" + APPLICATION_LOGGING_DISPLAYRESPONSECONTENT: "true" + APPLICATION_LOGGING_PRINTSTACKTRACE: "true" + APPLICATION_INTERNALREQUESTS_DISABLED: "true" + SERVER_PORT: "8086" + + LOGGING_LEVEL_ROOT: "INFO" + LOG_LEVEL_TIMING: "INFO" + APPLICATION_DSL_ALLOWEDFILETYPES: ".yml,.yaml,.md,.tmp" + APPLICATION_HTTPRESPONSESIZELIMIT: "2000" + APPLICATION_OPENSEARCHCONFIGURATION_INDEX: "ruuterlog" + +resources: + requests: + memory: "1000Mi" + cpu: "50m" + limits: + memory: "2000Mi" + cpu: "50m" + + +ingress: + enabled: true + host: "rag.local" # Change this to domain + corsAllowOrigin: "http://localhost:8086,http://localhost:3001,http://localhost:3003,http://localhost:3004,http://localhost:8080,http://localhost:8000,http://localhost:8090" + ssl: + enabled: false # Set to true for production with proper certificates + certIssuerName: "letsencrypt-prod" + secretName: "rag-ruuter-tls" + + +pullPolicy: IfNotPresent diff --git a/kubernetes/charts/S3-Ferry/Chart.yaml b/kubernetes/charts/S3-Ferry/Chart.yaml new file mode 100644 index 00000000..882054c1 --- /dev/null +++ b/kubernetes/charts/S3-Ferry/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: S3-Ferry +description: A Helm chart for S3-Ferry +type: application +version: 0.1.0 +appVersion: "latest" \ No newline at end of file diff --git a/kubernetes/charts/S3-Ferry/templates/configmap-s3.yaml b/kubernetes/charts/S3-Ferry/templates/configmap-s3.yaml new file mode 100644 index 00000000..5a80ebe0 --- /dev/null +++ b/kubernetes/charts/S3-Ferry/templates/configmap-s3.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.release_name }}-config + labels: + app: {{ .Values.release_name }} +data: + {{- range $key, $value := .Values.env }} + {{ $key }}: {{ $value | quote }} + {{- end }} \ No newline at end of file diff --git a/kubernetes/charts/S3-Ferry/templates/deployment-s3.yaml b/kubernetes/charts/S3-Ferry/templates/deployment-s3.yaml new file mode 100644 index 00000000..16678196 --- /dev/null +++ b/kubernetes/charts/S3-Ferry/templates/deployment-s3.yaml @@ -0,0 +1,50 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Values.release_name }} + labels: + app: {{ .Values.release_name }} +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: {{ .Values.release_name }} + template: + metadata: + labels: + app: {{ .Values.release_name }} + spec: + containers: + - name: {{ .Values.release_name }} + image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.port }} + protocol: TCP + envFrom: + - configMapRef: + name: {{ .Values.release_name }}-config + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: shared + mountPath: /app/shared + - name: cron-data + mountPath: /app/data + {{- end }} + - name: datasets + mountPath: /app/datasets + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumes: + {{- if .Values.persistence.enabled }} + - name: shared + persistentVolumeClaim: + claimName: s3-ferry-shared + - name: cron-data + persistentVolumeClaim: + claimName: s3-ferry-cron-data + {{- end }} + - name: datasets + emptyDir: {} + \ No newline at end of file diff --git a/kubernetes/charts/S3-Ferry/templates/pvc-s3.yaml b/kubernetes/charts/S3-Ferry/templates/pvc-s3.yaml new file mode 100644 index 00000000..f973360c --- /dev/null +++ b/kubernetes/charts/S3-Ferry/templates/pvc-s3.yaml @@ -0,0 +1,36 @@ +{{- if .Values.persistence.enabled }} +# Shared volume PVC +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: s3-ferry-shared + labels: + app: s3-ferry +spec: + accessModes: + - {{ .Values.persistence.accessMode }} + {{- if .Values.persistence.storageClass }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.shared.size }} + +--- +# Cron data PVC +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: s3-ferry-cron-data + labels: + app: s3-ferry +spec: + accessModes: + - {{ .Values.persistence.accessMode }} + {{- if .Values.persistence.storageClass }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.cronData.size }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/S3-Ferry/templates/service-s3.yaml b/kubernetes/charts/S3-Ferry/templates/service-s3.yaml new file mode 100644 index 00000000..84ff6d0a --- /dev/null +++ b/kubernetes/charts/S3-Ferry/templates/service-s3.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.release_name }} + labels: + app: {{ .Values.release_name }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: http + selector: + app: {{ .Values.release_name }} \ No newline at end of file diff --git a/kubernetes/charts/S3-Ferry/values.yaml b/kubernetes/charts/S3-Ferry/values.yaml new file mode 100644 index 00000000..69c03c5f --- /dev/null +++ b/kubernetes/charts/S3-Ferry/values.yaml @@ -0,0 +1,61 @@ +replicas: 1 + +release_name: "s3-ferry" + +image: + registry: "ghcr.io" + repository: "buerokratt/s3-ferry" + pullPolicy: IfNotPresent + tag: "PRE-ALPHA-1.1.1" + +nameOverride: "" +fullnameOverride: "" + +port: 3000 + +service: + type: ClusterIP + port: 3006 + targetPort: 3000 + + +persistence: + enabled: true + storageClass: "" + accessMode: ReadWriteOnce + outputDatasets: + size: 5Gi + shared: + size: 2Gi + cronData: + size: 3Gi + +resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 250m + memory: 256Mi + +# Environment variables +env: + API_CORS_ORIGIN: "*" + API_DOCUMENTATION_ENABLED: "true" + S3_REGION: "eu-west-1" + S3_ENDPOINT_URL: "http://minio:9000" + S3_ENDPOINT_NAME: "minio:9000" + S3_DATA_BUCKET_PATH: "resources" + S3_DATA_BUCKET_NAME: "rag-search" + FS_DATA_DIRECTORY_PATH: "/app" + S3_SECRET_ACCESS_KEY: "changeme" + S3_ACCESS_KEY_ID: "changeme" + S3_HEALTH_ENDPOINT: "http://minio:9000/minio/health/live" + MINIO_BROWSER_REDIRECT_URL: "http://localhost:9091" + GF_SECURITY_ADMIN_USER: "admin" + GF_SECURITY_ADMIN_PASSWORD: "admin123" + GF_USERS_ALLOW_SIGN_UP: "false" + PORT: "3000" + + + diff --git a/kubernetes/charts/TIM-database/Chart.yaml b/kubernetes/charts/TIM-database/Chart.yaml new file mode 100644 index 00000000..2bcdf24d --- /dev/null +++ b/kubernetes/charts/TIM-database/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: TIM-database +description: TIM postgresql database +type: application +version: 0.1.0 +appVersion: "1.0" diff --git a/kubernetes/charts/TIM-database/templates/deployment-byk-timdb.yaml b/kubernetes/charts/TIM-database/templates/deployment-byk-timdb.yaml new file mode 100644 index 00000000..7e8fa6ba --- /dev/null +++ b/kubernetes/charts/TIM-database/templates/deployment-byk-timdb.yaml @@ -0,0 +1,42 @@ +{{- if .Values.timPostgresql.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Values.timPostgresql.nameOverride | default "tim-postgresql" }} + labels: + app: {{ .Values.timPostgresql.nameOverride | default "tim-postgresql" }} +spec: + replicas: {{ .Values.timPostgresql.replicaCount | default 1 }} + selector: + matchLabels: + app: {{ .Values.timPostgresql.nameOverride | default "tim-postgresql" }} + template: + metadata: + labels: + app: {{ .Values.timPostgresql.nameOverride | default "tim-postgresql" }} + spec: + containers: + - name: {{ .Values.timPostgresql.nameOverride | default "tim-postgresql" }} + image: "{{ .Values.timPostgresql.image.repository }}:{{ .Values.timPostgresql.image.tag }}" + imagePullPolicy: {{ .Values.timPostgresql.image.pullPolicy }} + env: + {{- range .Values.timPostgresql.env }} + - name: {{ .name }} + value: {{ .value | quote }} + {{- end }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.timPostgresql.secret.name }} + key: POSTGRES_PASSWORD + ports: + - containerPort: {{ .Values.timPostgresql.service.port }} + resources: + {{- toYaml .Values.timPostgresql.resources | nindent 12 }} + volumes: + {{- if .Values.timPostgresql.persistence.enabled }} + - name: postgres-storage + persistentVolumeClaim: + claimName: {{ .Values.timPostgresql.persistence.existingClaim | default (printf "%s-pvc" (.Values.timPostgresql.nameOverride | default "tim-postgresql")) }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/TIM-database/templates/pvc-byk-timdb.yaml b/kubernetes/charts/TIM-database/templates/pvc-byk-timdb.yaml new file mode 100644 index 00000000..6dfdc707 --- /dev/null +++ b/kubernetes/charts/TIM-database/templates/pvc-byk-timdb.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.timPostgresql.enabled .Values.timPostgresql.persistence.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ .Values.timPostgresql.persistence.existingClaim | default (printf "%s-pvc" (.Values.timPostgresql.nameOverride | default "tim-postgresql")) }} + labels: + app: {{ .Values.timPostgresql.nameOverride | default "tim-postgresql" }} +spec: + accessModes: + {{- range .Values.timPostgresql.persistence.accessModes }} + - {{ . }} + {{- end }} + resources: + requests: + storage: {{ .Values.timPostgresql.persistence.size }} + {{- if .Values.timPostgresql.persistence.storageClass }} + storageClassName: {{ .Values.timPostgresql.persistence.storageClass }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/TIM-database/templates/secret-byk-timdb.yaml b/kubernetes/charts/TIM-database/templates/secret-byk-timdb.yaml new file mode 100644 index 00000000..30b97aea --- /dev/null +++ b/kubernetes/charts/TIM-database/templates/secret-byk-timdb.yaml @@ -0,0 +1,9 @@ +{{- if .Values.timPostgresql.secret.create }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.timPostgresql.secret.name }} +type: Opaque +data: + POSTGRES_PASSWORD: {{ .Values.timPostgresql.secret.keys.POSTGRES_PASSWORD | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/TIM-database/templates/service-byk-timdb.yaml b/kubernetes/charts/TIM-database/templates/service-byk-timdb.yaml new file mode 100644 index 00000000..686d20ce --- /dev/null +++ b/kubernetes/charts/TIM-database/templates/service-byk-timdb.yaml @@ -0,0 +1,15 @@ +{{- if .Values.timPostgresql.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.timPostgresql.nameOverride }} +spec: + type: {{ .Values.timPostgresql.service.type | default "ClusterIP" }} + selector: + app: {{ .Values.timPostgresql.nameOverride }} + ports: + - name: postgres + port: {{ .Values.timPostgresql.service.port }} + targetPort: {{ .Values.timPostgresql.service.port }} + nodePort: {{- if eq .Values.timPostgresql.service.type "NodePort" }} {{ .Values.timPostgresql.service.externalPort }} {{- end }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/TIM-database/values.yaml b/kubernetes/charts/TIM-database/values.yaml new file mode 100644 index 00000000..2e9ce2b7 --- /dev/null +++ b/kubernetes/charts/TIM-database/values.yaml @@ -0,0 +1,29 @@ +timPostgresql: + enabled: true + nameOverride: tim-postgresql + image: + repository: postgres + tag: "14.1" + pullPolicy: IfNotPresent + service: + type: ClusterIP + port: 5432 + externalPort: 9876 + env: + - name: POSTGRES_USER + value: "tim" + - name: POSTGRES_DB + value: "tim" + - name: POSTGRES_HOST_AUTH_METHOD + value: "trust" + secret: + create: true + name: tim-postgres-secret + keys: + POSTGRES_PASSWORD: "Ab123" + persistence: + enabled: true + size: 1Gi + storageClass: "" + accessModes: ["ReadWriteOnce"] + existingClaim: "" diff --git a/kubernetes/charts/TIM/Chart.yaml b/kubernetes/charts/TIM/Chart.yaml new file mode 100644 index 00000000..7ac0a745 --- /dev/null +++ b/kubernetes/charts/TIM/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: TIM +description: TIM Authentication Service for RAG +type: application +version: 0.1.0 +appVersion: "1.0" diff --git a/kubernetes/charts/TIM/templates/configmap-byk-tim.yaml b/kubernetes/charts/TIM/templates/configmap-byk-tim.yaml new file mode 100644 index 00000000..58f6986b --- /dev/null +++ b/kubernetes/charts/TIM/templates/configmap-byk-tim.yaml @@ -0,0 +1,56 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: tim-config +data: + application.properties: | + security.oauth2.client.client-id={{ .Values.tim.config.oauth2_client_id }} + security.oauth2.client.client-secret=${OAUTH2_CLIENT_SECRET} + security.oauth2.client.scope={{ .Values.tim.config.oauth2_client_scope }} + security.oauth2.client.registered-redirect-uri=https://tim.{{ .Values.global.domain }}/authenticate + security.oauth2.client.user-authorization-uri={{ .Values.tim.config.oauth2_user_auth_uri }} + security.oauth2.client.access-token-uri={{ .Values.tim.config.oauth2_access_token_uri }} + security.oauth2.resource.jwk.key-set-uri={{ .Values.tim.config.oauth2_jwk_uri }} + security.allowlist.jwt=0.0.0.0/0 + security.cookie.same-site=Lax + frontpage.redirect.url=http://localhost:3004 + + logging.level.root={{ .Values.tim.config.logging_level_root }} + + spring.datasource.url=jdbc:postgresql://tim-postgresql:5432/tim + spring.datasource.username={{ .Values.global.tim_postgresql.auth.username }} + spring.datasource.password=${POSTGRES_PASSWORD} + spring.datasource.driver-class-name=org.postgresql.Driver + spring.liquibase.change-log=classpath:master.xml + + spring.profiles.active={{ .Values.tim.config.spring_profiles_active }} + + # Legacy integration properties + legacy-portal-integration.sessionCookieName={{ .Values.tim.config.legacy_cookie_name }} + legacy-portal-integration.sessionCookieDomain={{ .Values.tim.config.legacy_cookie_domain }} + legacy-portal-integration.taraAuthDeployedOnLegacyDomain=true + legacy-portal-integration.sessionTimeoutMinutes=30 + legacy-portal-integration.requestIpHeader=X-FORWARDED-FOR + legacy-portal-integration.requestIpAttribute=request_ip + legacy-portal-integration.redirectUrlHeader=Referer + legacy-portal-integration.redirectUrlAttribute=url_redirect + legacy-portal-integration.legacyPortalRefererMarker={{ .Values.tim.config.legacy_referer_marker }} + legacy-portal-integration.legacyUrl={{ .Values.tim.config.legacy_url }} + + # JWT configuration + jwt-integration.signature.key-store=classpath:jwtkeystore.jks + jwt-integration.signature.key-store-password=${KEY_STORE_PASSWORD} + jwt-integration.signature.keyStoreType=JKS + jwt-integration.signature.keyAlias=jwtsign + jwt-integration.signature.issuer={{ .Values.tim.config.jwt_issuer }} + jwt-integration.signature.cookieName=JWTTOKEN + + userIPHeaderName=x-forwarded-for + userIPLoggingPrefix=from IP + userIPLoggingMDCkey=userIP + + headers.contentSecurityPolicy=upgrade-insecure-requests;default-src 'self' 'unsafe-inline' 'unsafe-eval' https://tim.{{ .Values.global.domain }} https://admin.{{ .Values.global.domain }} https://ruuter.{{ .Values.global.domain }}/v2/public/ https://ruuter.{{ .Values.global.domain }}/v2/private/ tim ruuter ruuter-private backoffice-login;object-src 'self';script-src 'self' 'unsafe-inline' 'unsafe-eval' https://{{ .Values.global.domain }} https://admin.{{ .Values.global.domain }} https://tim.{{ .Values.global.domain }};connect-src 'self' https://{{ .Values.global.domain }} https://tim.{{ .Values.global.domain }} https://admin.{{ .Values.global.domain }} https://ruuter.{{ .Values.global.domain }}/v2/public/ https://ruuter.{{ .Values.global.domain }}/v2/private/;frame-src 'self';media-src 'none' + cors.allowedOrigins=http://localhost:8086,http://localhost:3004,http://localhost:8085,http://component-byk-ruuter-public:8086,http://global-classifier.local + auth.success.redirect.whitelist=http://localhost:3004/auth/callback,http://localhost:8086,http://global-classifier.local/auth/callback + server.port={{ .Values.tim.service.port }} + jwt.whitelist.period=30000 \ No newline at end of file diff --git a/kubernetes/charts/TIM/templates/deployment-byk-tim.yaml b/kubernetes/charts/TIM/templates/deployment-byk-tim.yaml new file mode 100644 index 00000000..10956879 --- /dev/null +++ b/kubernetes/charts/TIM/templates/deployment-byk-tim.yaml @@ -0,0 +1,46 @@ +{{- if .Values.tim.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Values.tim.nameOverride | default "tim" }} + labels: + app: {{ .Values.tim.nameOverride | default "tim" }} +spec: + replicas: {{ .Values.tim.replicaCount | default 1 }} + selector: + matchLabels: + app: {{ .Values.tim.nameOverride | default "tim" }} + template: + metadata: + labels: + app: {{ .Values.tim.nameOverride | default "tim" }} + spec: + containers: + - name: {{ .Values.tim.nameOverride | default "tim" }} + image: "{{ .Values.tim.image.repository }}:{{ .Values.tim.image.tag }}" + imagePullPolicy: {{ .Values.tim.image.pullPolicy }} + env: + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: tim-env-secret + key: POSTGRES_PASSWORD + - name: "OAUTH2_CLIENT_SECRET" + valueFrom: + secretKeyRef: + name: "tim-env-secret" + key: "oauth2_client_secret" + - name: "KEY_STORE_PASSWORD" + valueFrom: + secretKeyRef: + name: "tim-env-secret" + key: "jwt_integration_key_store_password" + volumeMounts: + - name: application-properties + mountPath: /workspace/app/src/main/resources/application.properties + subPath: application.properties + volumes: + - name: application-properties + configMap: + name: tim-config +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/TIM/templates/ingress.yaml b/kubernetes/charts/TIM/templates/ingress.yaml new file mode 100644 index 00000000..129ff01c --- /dev/null +++ b/kubernetes/charts/TIM/templates/ingress.yaml @@ -0,0 +1,30 @@ +{{- if and .Values.tim.enabled .Values.tim.ingress.enabled }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ .Values.tim.nameOverride | default "tim" }}-ingress + namespace: {{ .Values.namespace }} + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + cert-manager.io/cluster-issuer: letsencrypt-prod + labels: + name: {{ .Values.tim.nameOverride | default "tim" }}-ingress +spec: + rules: + - host: {{ .Values.tim.ingress.host }} + http: + paths: + - pathType: Prefix + path: "/" + backend: + service: + name: {{ .Values.tim.nameOverride | default "tim" }} + port: + number: {{ .Values.tim.service.port }} + tls: + - hosts: + - {{ .Values.tim.ingress.host }} + secretName: {{ .Values.tim.ingress.secretName }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/TIM/templates/secret-byk-tim.yaml b/kubernetes/charts/TIM/templates/secret-byk-tim.yaml new file mode 100644 index 00000000..0692c868 --- /dev/null +++ b/kubernetes/charts/TIM/templates/secret-byk-tim.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: tim-env-secret +type: Opaque +data: + oauth2_client_secret: "{{ .Values.tim.config.oauth2_client_secret | b64enc }}" + jwt_integration_key_store_password: "{{ .Values.tim.config.jwt_keystore_password | b64enc }}" + POSTGRES_PASSWORD: "{{ "dbadmin" | b64enc }}" diff --git a/kubernetes/charts/TIM/templates/service-byk-tim.yaml b/kubernetes/charts/TIM/templates/service-byk-tim.yaml new file mode 100644 index 00000000..1a1722d3 --- /dev/null +++ b/kubernetes/charts/TIM/templates/service-byk-tim.yaml @@ -0,0 +1,15 @@ +{{- if .Values.tim.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.tim.nameOverride | default "tim" }} + labels: + app: {{ .Values.tim.nameOverride | default "tim" }} +spec: + type: {{ .Values.tim.service.type | default "ClusterIP" }} + ports: + - port: {{ .Values.tim.service.port }} + targetPort: {{ .Values.tim.service.port }} + selector: + app: {{ .Values.tim.nameOverride | default "tim" }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/TIM/values.yaml b/kubernetes/charts/TIM/values.yaml new file mode 100644 index 00000000..a057edcd --- /dev/null +++ b/kubernetes/charts/TIM/values.yaml @@ -0,0 +1,45 @@ +global: + domain: localhost + tim_postgresql: + auth: + username: tim +tim: + enabled: true + nameOverride: tim + ingress: + enabled: false + host: tim.example.com + secretName: tim-tls + image: + repository: ghcr.io/buerokratt/tim + tag: pre-apha-2.7.1 + pullPolicy: IfNotPresent + service: + type: ClusterIP + port: 8085 + env: + - POSTGRES_PASSWORD: "tim" + + config: + security_allowlist_jwt: "ruuter-public,ruuter-private,ruuter,ruuter-internal,data-mapper,resql,tim,tim-postgresql,chat-widget,authentication-layer,127.0.0.1,::1" + jwt_keystore_password: "defaultpassword" + jwt_issuer: "tim-issuer" + spring_profiles_active: "dev" + logging_level_root: "DEBUG" + legacy_cookie_name: "PHPSESSID" + legacy_cookie_domain: "example.com" + legacy_referer_marker: "NA" + legacy_url: "NA" + oauth2_client_id: "your-client-id" + oauth2_client_secret: "my-secret-value" + oauth2_client_scope: "read,write" + oauth2_user_auth_uri: "https://tara-test.ria.ee/oidc/authorize" + oauth2_access_token_uri: "https://tara-test.ria.ee/oidc/token" + oauth2_jwk_uri: "https://tara-test.ria.ee/oidc/jwks" + resources: + limits: + cpu: "500m" + memory: "512Mi" + requests: + cpu: "250m" + memory: "256Mi" \ No newline at end of file diff --git a/kubernetes/charts/Vault-Agent-LLM/Chart.yaml b/kubernetes/charts/Vault-Agent-LLM/Chart.yaml new file mode 100644 index 00000000..07e7677e --- /dev/null +++ b/kubernetes/charts/Vault-Agent-LLM/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: Vault-Agent-LLM +description: Vault Agent for LLM Orchestration Service secret injection +type: application +version: 0.1.0 +appVersion: "1.20.3" +dependencies: [] \ No newline at end of file diff --git a/kubernetes/charts/Vault-Agent-LLM/templates/configmap.yaml b/kubernetes/charts/Vault-Agent-LLM/templates/configmap.yaml new file mode 100644 index 00000000..17d90c05 --- /dev/null +++ b/kubernetes/charts/Vault-Agent-LLM/templates/configmap.yaml @@ -0,0 +1,48 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.release_name }}-config + labels: + app: {{ .Values.release_name }} + component: vault-agent-llm +data: + agent.hcl: | + # Vault agent configuration for LLM Orchestration Service + + vault { + address = "http://vault:8200" + } + + pid_file = "/agent/out/pidfile" + + auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "/agent/credentials/role_id" + secret_id_file_path = "/agent/credentials/secret_id" + remove_secret_id_file_after_reading = false + } + } + + sink "file" { + config = { + path = "/agent/out/token" + } + } + } + + cache { + default_lease_duration = "1h" + } + + listener "tcp" { + address = "127.0.0.1:8201" + tls_disable = true + } + + api_proxy { + use_auto_auth_token = true + enforce_consistency = "always" + when_inconsistent = "forward" + } diff --git a/kubernetes/charts/Vault-Agent-LLM/templates/deployment.yaml b/kubernetes/charts/Vault-Agent-LLM/templates/deployment.yaml new file mode 100644 index 00000000..ebd785c6 --- /dev/null +++ b/kubernetes/charts/Vault-Agent-LLM/templates/deployment.yaml @@ -0,0 +1,101 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Values.release_name }} + labels: + app: {{ .Values.release_name }} + component: vault-agent-llm +spec: + replicas: {{ .Values.deployment.replicas }} + selector: + matchLabels: + app: {{ .Values.release_name }} + component: vault-agent-llm + template: + metadata: + labels: + app: {{ .Values.release_name }} + component: vault-agent-llm + spec: + {{- if .Values.affinity.enabled }} + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - {{ .Values.vault.serviceName }} + topologyKey: kubernetes.io/hostname + {{- end }} + volumes: + {{- if .Values.volumes.agentCredentials.enabled }} + - name: vault-agent-creds + persistentVolumeClaim: + claimName: vault-agent-creds + {{- end }} + {{- if .Values.volumes.agentToken.enabled }} + - name: vault-agent-token + persistentVolumeClaim: + claimName: vault-agent-token + {{- end }} + {{- if .Values.volumes.agentConfig.enabled }} + - name: vault-agent-config + configMap: + name: {{ .Values.release_name }}-config + defaultMode: 0644 + {{- end }} + containers: + - name: vault-agent + image: "{{ .Values.images.vault.registry }}/{{ .Values.images.vault.repository }}:{{ .Values.images.vault.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - vault + - agent + - -config=/agent/config/agent.hcl + - -log-level=info + env: + - name: VAULT_ADDR + value: {{ .Values.vault.addr | quote }} + - name: VAULT_SKIP_VERIFY + value: "true" + volumeMounts: + {{- if .Values.volumes.agentCredentials.enabled }} + - name: vault-agent-creds + mountPath: {{ .Values.volumes.agentCredentials.mountPath }} + readOnly: true + {{- end }} + {{- if .Values.volumes.agentToken.enabled }} + - name: vault-agent-token + mountPath: {{ .Values.volumes.agentToken.mountPath }} + {{- end }} + {{- if .Values.volumes.agentConfig.enabled }} + - name: vault-agent-config + mountPath: {{ .Values.volumes.agentConfig.mountPath }} + readOnly: true + {{- end }} + {{- if .Values.probes.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.probes.livenessProbe.httpGet.path }} + port: {{ .Values.probes.livenessProbe.httpGet.port }} + initialDelaySeconds: {{ .Values.probes.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.probes.livenessProbe.periodSeconds }} + {{- end }} + {{- if .Values.probes.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.probes.readinessProbe.httpGet.path }} + port: {{ .Values.probes.readinessProbe.httpGet.port }} + initialDelaySeconds: {{ .Values.probes.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.probes.readinessProbe.periodSeconds }} + {{- end }} + {{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 10 }} + {{- end }} + securityContext: + capabilities: + add: + - IPC_LOCK \ No newline at end of file diff --git a/kubernetes/charts/Vault-Agent-LLM/values.yaml b/kubernetes/charts/Vault-Agent-LLM/values.yaml new file mode 100644 index 00000000..29b630f9 --- /dev/null +++ b/kubernetes/charts/Vault-Agent-LLM/values.yaml @@ -0,0 +1,101 @@ +enabled: true + +images: + vault: + registry: "docker.io" + repository: "hashicorp/vault" + tag: "1.20.3" + +release_name: "vault-agent-llm" + +# Vault service dependency +vault: + serviceName: "vault" + addr: "http://vault:8200" + +# Pod affinity to ensure co-location with Vault +affinity: + enabled: true + # Ensure this pod is scheduled on same node as Vault pod + colocateWithVault: true + +# Deployment configuration +deployment: + replicas: 1 + # Use Deployment for consistent agent behavior + type: "Deployment" + +# Shared volumes for vault ecosystem +volumes: + agentCredentials: + enabled: true + mountPath: "/agent/credentials" + # Uses same PVC as vault-init + + agentToken: + enabled: true + mountPath: "/agent/out" + # Uses same PVC as vault-init + + agentConfig: + enabled: true + mountPath: "/agent/config" + # ConfigMap for vault agent configuration + +# Vault agent configuration +agent: + enabled: true + config: + # Auto-auth configuration + autoAuth: + method: "kubernetes" + mountPath: "auth/kubernetes" + + # Cache configuration + cache: + enabled: true + + # Template configuration for secret injection + templates: + enabled: true + secrets: + - name: "llm-secrets" + path: "/agent/out/secrets.env" + template: | + {{- with secret "secret/llm-orchestration" -}} + OPENAI_API_KEY={{ .Data.data.openai_api_key }} + ANTHROPIC_API_KEY={{ .Data.data.anthropic_api_key }} + AZURE_OPENAI_API_KEY={{ .Data.data.azure_openai_api_key }} + AZURE_OPENAI_ENDPOINT={{ .Data.data.azure_openai_endpoint }} + OLLAMA_HOST={{ .Data.data.ollama_host }} + VECTOR_DB_HOST={{ .Data.data.vector_db_host }} + VECTOR_DB_PORT={{ .Data.data.vector_db_port }} + VECTOR_DB_COLLECTION={{ .Data.data.vector_db_collection }} + {{- end -}} + +pullPolicy: IfNotPresent + +resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "256Mi" + cpu: "200m" + +probes: + livenessProbe: + enabled: false + httpGet: + path: "/v1/sys/health" + port: 8200 + initialDelaySeconds: 30 + periodSeconds: 30 + + readinessProbe: + enabled: false + httpGet: + path: "/v1/sys/health" + port: 8200 + initialDelaySeconds: 10 + periodSeconds: 10 \ No newline at end of file diff --git a/kubernetes/charts/Vault-Init/Chart.yaml b/kubernetes/charts/Vault-Init/Chart.yaml new file mode 100644 index 00000000..83178bb8 --- /dev/null +++ b/kubernetes/charts/Vault-Init/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: Vault-Init +description: Vault initialization job for RAG Module +version: 0.1.0 +appVersion: "1.20.3" +type: application \ No newline at end of file diff --git a/kubernetes/charts/Vault-Init/templates/configmap.yaml b/kubernetes/charts/Vault-Init/templates/configmap.yaml new file mode 100644 index 00000000..c0d55b4b --- /dev/null +++ b/kubernetes/charts/Vault-Init/templates/configmap.yaml @@ -0,0 +1,186 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.release_name }}-script + labels: + app: {{ .Values.release_name }} + component: vault-init +data: + {{ .Values.initScript.filename }}: | + #!/bin/sh + set -e + + VAULT_ADDR="${VAULT_ADDR:-http://vault:8200}" + UNSEAL_KEYS_FILE="/vault/data/unseal-keys.json" + INIT_FLAG="/vault/data/.initialized" + + echo "=== Vault Initialization Script ===" + + # Wait for Vault to be ready + echo "Waiting for Vault..." + for i in $(seq 1 30); do + if wget -q -O- "$VAULT_ADDR/v1/sys/health" >/dev/null 2>&1; then + echo "Vault is ready" + break + fi + echo "Waiting... ($i/30)" + sleep 2 + done + + # Check if this is first time + if [ ! -f "$INIT_FLAG" ]; then + echo "=== FIRST TIME DEPLOYMENT ===" + + # Initialize Vault + echo "Initializing Vault..." + wget -q -O- --post-data='{"secret_shares":5,"secret_threshold":3}' \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/sys/init" > "$UNSEAL_KEYS_FILE" + + ROOT_TOKEN=$(grep -o '"root_token":"[^"]*"' "$UNSEAL_KEYS_FILE" | cut -d':' -f2 | tr -d '"') + export VAULT_TOKEN="$ROOT_TOKEN" + + # Extract unseal keys + KEY1=$(grep -o '"keys":\[[^]]*\]' "$UNSEAL_KEYS_FILE" | grep -o '"[^"]*"' | sed -n '2p' | tr -d '"') + KEY2=$(grep -o '"keys":\[[^]]*\]' "$UNSEAL_KEYS_FILE" | grep -o '"[^"]*"' | sed -n '3p' | tr -d '"') + KEY3=$(grep -o '"keys":\[[^]]*\]' "$UNSEAL_KEYS_FILE" | grep -o '"[^"]*"' | sed -n '4p' | tr -d '"') + + # Unseal Vault + echo "Unsealing Vault..." + wget -q -O- --post-data="{\"key\":\"$KEY1\"}" \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/sys/unseal" >/dev/null + + wget -q -O- --post-data="{\"key\":\"$KEY2\"}" \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/sys/unseal" >/dev/null + + wget -q -O- --post-data="{\"key\":\"$KEY3\"}" \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/sys/unseal" >/dev/null + + sleep 2 + echo "Vault unsealed" + + # Enable KV v2 + echo "Enabling KV v2 secrets engine..." + wget -q -O- --post-data='{"type":"kv","options":{"version":"2"}}' \ + --header="X-Vault-Token: $ROOT_TOKEN" \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/sys/mounts/secret" >/dev/null 2>&1 || echo "KV already enabled" + + # Enable AppRole + echo "Enabling AppRole..." + wget -q -O- --post-data='{"type":"approle"}' \ + --header="X-Vault-Token: $ROOT_TOKEN" \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/sys/auth/approle" >/dev/null 2>&1 || echo "AppRole already enabled" + + # Create policy + echo "Creating llm-orchestration policy..." + POLICY='path "secret/metadata/llm/*" { capabilities = ["list", "delete"] } + path "secret/data/llm/*" { capabilities = ["create", "read", "update", "delete"] } + path "auth/token/lookup-self" { capabilities = ["read"] } + path "secret/metadata/embeddings/*" { capabilities = ["list", "delete"] } + path "secret/data/embeddings/*" { capabilities = ["create", "read", "update", "delete"] }' + + POLICY_JSON=$(echo "$POLICY" | jq -Rs '{"policy":.}') + wget -q -O- --post-data="$POLICY_JSON" \ + --header="X-Vault-Token: $ROOT_TOKEN" \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/sys/policies/acl/llm-orchestration" >/dev/null + + # Create AppRole + echo "Creating llm-orchestration-service AppRole..." + wget -q -O- --post-data='{"token_policies":["llm-orchestration"],"token_no_default_policy":true,"token_ttl":"1h","token_max_ttl":"24h","secret_id_ttl":"24h","secret_id_num_uses":0,"bind_secret_id":true}' \ + --header="X-Vault-Token: $ROOT_TOKEN" \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/auth/approle/role/llm-orchestration-service" >/dev/null + + # Ensure credentials directory exists + mkdir -p /agent/credentials + + # Get role_id + echo "Getting role_id..." + ROLE_ID=$(wget -q -O- \ + --header="X-Vault-Token: $ROOT_TOKEN" \ + "$VAULT_ADDR/v1/auth/approle/role/llm-orchestration-service/role-id" | \ + grep -o '"role_id":"[^"]*"' | cut -d':' -f2 | tr -d '"') + echo "$ROLE_ID" > /agent/credentials/role_id + + # Generate secret_id + echo "Generating secret_id..." + SECRET_ID=$(wget -q -O- --post-data='' \ + --header="X-Vault-Token: $ROOT_TOKEN" \ + "$VAULT_ADDR/v1/auth/approle/role/llm-orchestration-service/secret-id" | \ + grep -o '"secret_id":"[^"]*"' | cut -d':' -f2 | tr -d '"') + echo "$SECRET_ID" > /agent/credentials/secret_id + + chmod 644 /agent/credentials/role_id /agent/credentials/secret_id + + # Mark as initialized + touch "$INIT_FLAG" + echo "=== First time setup complete ===" + + else + echo "=== SUBSEQUENT DEPLOYMENT ===" + + # Check if Vault is sealed + SEALED=$(wget -q -O- "$VAULT_ADDR/v1/sys/seal-status" | grep -o '"sealed":[^,}]*' | cut -d':' -f2) + + if [ "$SEALED" = "true" ]; then + echo "Vault is sealed. Unsealing..." + + # Load unseal keys + KEY1=$(grep -o '"keys":\[[^]]*\]' "$UNSEAL_KEYS_FILE" | grep -o '"[^"]*"' | sed -n '2p' | tr -d '"') + KEY2=$(grep -o '"keys":\[[^]]*\]' "$UNSEAL_KEYS_FILE" | grep -o '"[^"]*"' | sed -n '3p' | tr -d '"') + KEY3=$(grep -o '"keys":\[[^]]*\]' "$UNSEAL_KEYS_FILE" | grep -o '"[^"]*"' | sed -n '4p' | tr -d '"') + + wget -q -O- --post-data="{\"key\":\"$KEY1\"}" \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/sys/unseal" >/dev/null + + wget -q -O- --post-data="{\"key\":\"$KEY2\"}" \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/sys/unseal" >/dev/null + + wget -q -O- --post-data="{\"key\":\"$KEY3\"}" \ + --header='Content-Type: application/json' \ + "$VAULT_ADDR/v1/sys/unseal" >/dev/null + + sleep 2 + echo "Vault unsealed" + + # Get root token + ROOT_TOKEN=$(grep -o '"root_token":"[^"]*"' "$UNSEAL_KEYS_FILE" | cut -d':' -f2 | tr -d '"') + export VAULT_TOKEN="$ROOT_TOKEN" + + # Ensure credentials directory exists + mkdir -p /agent/credentials + + # Regenerate secret_id after unseal + echo "Regenerating secret_id..." + SECRET_ID=$(wget -q -O- --post-data='' \ + --header="X-Vault-Token: $ROOT_TOKEN" \ + "$VAULT_ADDR/v1/auth/approle/role/llm-orchestration-service/secret-id" | \ + grep -o '"secret_id":"[^"]*"' | cut -d':' -f2 | tr -d '"') + echo "$SECRET_ID" > /agent/credentials/secret_id + chmod 644 /agent/credentials/secret_id + + # Ensure role_id exists + if [ ! -f /agent/credentials/role_id ]; then + echo "Copying role_id..." + mkdir -p /agent/credentials + ROLE_ID=$(wget -q -O- \ + --header="X-Vault-Token: $ROOT_TOKEN" \ + "$VAULT_ADDR/v1/auth/approle/role/llm-orchestration-service/role-id" | \ + grep -o '"role_id":"[^"]*"' | cut -d':' -f2 | tr -d '"') + echo "$ROLE_ID" > /agent/credentials/role_id + chmod 644 /agent/credentials/role_id + fi + else + echo "Vault is unsealed. No action needed." + fi + fi + + echo "=== Vault init complete ===" \ No newline at end of file diff --git a/kubernetes/charts/Vault-Init/templates/job.yaml b/kubernetes/charts/Vault-Init/templates/job.yaml new file mode 100644 index 00000000..96f2ada5 --- /dev/null +++ b/kubernetes/charts/Vault-Init/templates/job.yaml @@ -0,0 +1,93 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Values.release_name }} + labels: + app: {{ .Values.release_name }} + component: vault-init +spec: + backoffLimit: {{ .Values.job.backoffLimit }} + template: + metadata: + labels: + app: {{ .Values.release_name }} + component: vault-init + spec: + restartPolicy: {{ .Values.job.restartPolicy }} + {{- if .Values.affinity.enabled }} + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - {{ .Values.vault.serviceName }} + topologyKey: kubernetes.io/hostname + {{- end }} + volumes: + {{- if .Values.volumes.vaultData.enabled }} + - name: vault-data + persistentVolumeClaim: + claimName: vault-storage-{{ .Values.vault.serviceName }}-0 + {{- end }} + {{- if .Values.volumes.agentCredentials.enabled }} + - name: vault-agent-creds + persistentVolumeClaim: + claimName: vault-agent-creds + {{- end }} + {{- if .Values.volumes.agentToken.enabled }} + - name: vault-agent-token + persistentVolumeClaim: + claimName: vault-agent-token + {{- end }} + {{- if .Values.initScript.enabled }} + - name: init-script + configMap: + name: {{ .Values.release_name }}-script + defaultMode: 0755 + {{- end }} + containers: + - name: vault-init + image: "{{ .Values.images.vault.registry }}/{{ .Values.images.vault.repository }}:{{ .Values.images.vault.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + command: ["/bin/sh", "-c"] + args: + - | + # Install dependencies and setup permissions + apk add --no-cache curl jq + mkdir -p /agent/credentials /agent/out + chmod -R 755 /agent/credentials + chmod -R 770 /agent/out + echo "Permissions set successfully" + + # Run the init script + /bin/sh /scripts/{{ .Values.initScript.filename }} + env: + - name: VAULT_ADDR + value: {{ .Values.vault.addr | quote }} + - name: VAULT_SKIP_VERIFY + value: "true" + volumeMounts: + {{- if .Values.volumes.vaultData.enabled }} + - name: vault-data + mountPath: {{ .Values.volumes.vaultData.mountPath }} + {{- end }} + {{- if .Values.volumes.agentCredentials.enabled }} + - name: vault-agent-creds + mountPath: {{ .Values.volumes.agentCredentials.mountPath }} + {{- end }} + {{- if .Values.volumes.agentToken.enabled }} + - name: vault-agent-token + mountPath: {{ .Values.volumes.agentToken.mountPath }} + {{- end }} + {{- if .Values.initScript.enabled }} + - name: init-script + mountPath: "/scripts" + readOnly: true + {{- end }} + {{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 10 }} + {{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Vault-Init/templates/pvc.yaml b/kubernetes/charts/Vault-Init/templates/pvc.yaml new file mode 100644 index 00000000..7084dc3c --- /dev/null +++ b/kubernetes/charts/Vault-Init/templates/pvc.yaml @@ -0,0 +1,37 @@ +{{- if .Values.volumes.agentCredentials.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: vault-agent-creds + labels: + app: {{ .Values.release_name }} + component: vault-init +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: {{ .Values.volumes.agentCredentials.size }} + {{- if .Values.volumes.agentCredentials.storageClass }} + storageClassName: {{ .Values.volumes.agentCredentials.storageClass }} + {{- end }} +--- +{{- end }} +{{- if .Values.volumes.agentToken.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: vault-agent-token + labels: + app: {{ .Values.release_name }} + component: vault-init +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: {{ .Values.volumes.agentToken.size }} + {{- if .Values.volumes.agentToken.storageClass }} + storageClassName: {{ .Values.volumes.agentToken.storageClass }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Vault-Init/values.yaml b/kubernetes/charts/Vault-Init/values.yaml new file mode 100644 index 00000000..207da592 --- /dev/null +++ b/kubernetes/charts/Vault-Init/values.yaml @@ -0,0 +1,60 @@ +enabled: true + +images: + vault: + registry: "docker.io" + repository: "hashicorp/vault" + tag: "1.20.3" + +release_name: "vault-init" + +# Vault service dependency +vault: + serviceName: "vault" + addr: "http://vault:8200" + +# Pod affinity to ensure co-location with Vault +affinity: + enabled: true + # Ensure this pod is scheduled on same node as Vault pod + colocateWithVault: true + + +job: + backoffLimit: 3 + restartPolicy: "Never" + + +volumes: + vaultData: + enabled: true + mountPath: "/vault/data" + + agentCredentials: + enabled: true + mountPath: "/agent/credentials" + size: "100Mi" + accessMode: "ReadWriteMany" + storageClass: "" + + agentToken: + enabled: true + mountPath: "/agent/out" + size: "100Mi" + accessMode: "ReadWriteMany" + storageClass: "" + +# Init script configuration +initScript: + enabled: true + filename: "vault-init.sh" + +pullPolicy: IfNotPresent + +resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "256Mi" + cpu: "200m" \ No newline at end of file diff --git a/kubernetes/charts/Vault/Chart.yaml b/kubernetes/charts/Vault/Chart.yaml new file mode 100644 index 00000000..4b6ffec3 --- /dev/null +++ b/kubernetes/charts/Vault/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: Vault +description: HashiCorp Vault secrets management for RAG Module +version: 0.1.0 +appVersion: "1.20.3" +type: application \ No newline at end of file diff --git a/kubernetes/charts/Vault/templates/configmap.yaml b/kubernetes/charts/Vault/templates/configmap.yaml new file mode 100644 index 00000000..1e32fd90 --- /dev/null +++ b/kubernetes/charts/Vault/templates/configmap.yaml @@ -0,0 +1,66 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: "{{ .Values.release_name }}-config" + labels: + app: "{{ .Values.release_name }}" + component: vault +data: + vault.hcl: | + # HashiCorp Vault Server Configuration + # Production-ready configuration for LLM Orchestration Service + + # Storage backend - Raft for high availability + storage "raft" { + path = "/vault/file" + node_id = "vault-node-1" + + # Retry join configuration for clustering (single node for now) + retry_join { + leader_api_addr = "http://vault:8200" + } + } + + # HTTP listener configuration + listener "tcp" { + address = "0.0.0.0:8200" + tls_disable = true + + # Enable CORS for web UI access + cors_enabled = true + cors_allowed_origins = [ + "http://localhost:8200", + "http://vault:8200" + ] + } + + # Cluster listener for HA (required even for single node) + listener "tcp" { + address = "0.0.0.0:8201" + cluster_addr = "http://0.0.0.0:8201" + tls_disable = true + } + + # API and cluster addresses + api_addr = "http://vault:8200" + cluster_addr = "http://vault:8201" + + # Security and performance settings + disable_mlock = false + disable_cache = false + ui = false + + # Default lease and maximum lease durations + default_lease_ttl = "168h" # 7 days + max_lease_ttl = "720h" # 30 days + + # Logging configuration + log_level = "INFO" + log_format = "json" + + # Development settings (remove in production) + # Note: In production, you should not use dev mode + # and should properly initialize and unseal the vault + +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Vault/templates/service-byk-vault.yaml b/kubernetes/charts/Vault/templates/service-byk-vault.yaml new file mode 100644 index 00000000..b7501f94 --- /dev/null +++ b/kubernetes/charts/Vault/templates/service-byk-vault.yaml @@ -0,0 +1,23 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: vault +spec: + type: {{ .Values.service.type }} + {{- if eq .Values.service.type "ClusterIP" }} + {{- if .Values.service.headless }} + clusterIP: None + {{- end }} + {{- end }} + selector: + app: "{{ .Values.release_name }}" + ports: + - name: http + protocol: TCP + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Vault/templates/statefulset-byk-vault.yaml b/kubernetes/charts/Vault/templates/statefulset-byk-vault.yaml new file mode 100644 index 00000000..b68fbbcb --- /dev/null +++ b/kubernetes/charts/Vault/templates/statefulset-byk-vault.yaml @@ -0,0 +1,122 @@ +{{- if .Values.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: "{{ .Values.release_name }}" + labels: + app: "{{ .Values.release_name }}" + component: vault +spec: + serviceName: "{{ .Values.release_name }}" + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + component: vault + spec: + {{- if .Values.securityContext.enabled }} + securityContext: + runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }} + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.initContainer.enabled }} + initContainers: + - name: vault-init + image: "{{ .Values.initContainer.image.registry }}/{{ .Values.initContainer.image.repository }}:{{ .Values.initContainer.image.tag }}" + command: + - sh + - -c + - | + chown -R 100:1000 /vault/file + chmod -R 755 /vault/file + volumeMounts: + - name: vault-storage + mountPath: {{ .Values.persistence.mountPath }} + securityContext: + runAsUser: 0 + {{- end }} + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.images.vault.registry }}/{{ .Values.images.vault.repository }}:{{ .Values.images.vault.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - vault + - server + - -config=/vault/config/vault.hcl + ports: + - name: http + containerPort: {{ .Values.service.targetPort }} + protocol: TCP + - name: cluster + containerPort: 8201 + protocol: TCP + env: + - name: VAULT_ADDR + value: "http://0.0.0.0:{{ .Values.service.targetPort }}" + - name: VAULT_SKIP_VERIFY_CONFIG_PERMISSIONS + value: "true" + {{- if .Values.healthcheck.enabled }} + livenessProbe: + httpGet: + path: "{{ .Values.healthcheck.httpPath }}" + port: {{ .Values.service.targetPort }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + readinessProbe: + httpGet: + path: "{{ .Values.healthcheck.httpPath }}" + port: {{ .Values.service.targetPort }} + initialDelaySeconds: {{ .Values.healthcheck.initialDelaySeconds }} + periodSeconds: {{ .Values.healthcheck.periodSeconds }} + timeoutSeconds: {{ .Values.healthcheck.timeoutSeconds }} + failureThreshold: {{ .Values.healthcheck.failureThreshold }} + {{- end }} + volumeMounts: + - name: vault-config + mountPath: /vault/config + readOnly: true + {{- if .Values.persistence.enabled }} + - name: vault-storage + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" + securityContext: + capabilities: + add: + - IPC_LOCK + volumes: + - name: vault-config + configMap: + name: "{{ .Values.release_name }}-config" + {{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: vault-storage + labels: + app: "{{ .Values.release_name }}" + component: vault + spec: + accessModes: + - {{ .Values.persistence.accessMode }} + resources: + requests: + storage: {{ .Values.persistence.size }} + {{- if .Values.persistence.storageClass }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Vault/values.yaml b/kubernetes/charts/Vault/values.yaml new file mode 100644 index 00000000..253aa938 --- /dev/null +++ b/kubernetes/charts/Vault/values.yaml @@ -0,0 +1,71 @@ +replicas: 1 +enabled: true + +images: + vault: + registry: "docker.io" + repository: "hashicorp/vault" + tag: "1.20.3" + +release_name: "vault" + +service: + type: ClusterIP + # Set to true for headless service (direct pod access) + headless: false + port: 8200 + targetPort: 8200 + +persistence: + enabled: true + storageClass: "" + accessMode: ReadWriteMany + size: 10Gi + mountPath: "/vault/file" + +# Vault configuration +vault: + config: + # File storage backend + storage_file_path: "/vault/file" + # API settings + disable_mlock: true + ui: true + # Network settings + listener_address: "0.0.0.0:8200" + cluster_address: "0.0.0.0:8201" + +resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "1Gi" + cpu: "500m" + +pullPolicy: IfNotPresent + +healthcheck: + enabled: false + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 20 + successThreshold: 1 + # Vault health endpoint + httpPath: "/v1/sys/health" + +securityContext: + enabled: true + runAsNonRoot: false + runAsUser: 0 + runAsGroup: 0 + fsGroup: 0 + +# Init container configuration +initContainer: + enabled: true + image: + registry: "docker.io" + repository: "busybox" + tag: "1.35" \ No newline at end of file diff --git a/kubernetes/charts/database/Chart.lock b/kubernetes/charts/database/Chart.lock new file mode 100644 index 00000000..641f6d08 --- /dev/null +++ b/kubernetes/charts/database/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 12.2.6 +digest: sha256:6f50554d914d878d490c46307f120b87d39854e42f81411b13ffdd23aad21cb6 +generated: "2025-12-02T13:43:50.4497212+05:30" diff --git a/kubernetes/charts/database/Chart.yaml b/kubernetes/charts/database/Chart.yaml new file mode 100644 index 00000000..2facc943 --- /dev/null +++ b/kubernetes/charts/database/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v2 +name: database +description: PostgreSQL databases for RAG +type: application +version: 0.1.0 + +dependencies: + - name: postgresql + version: 12.2.6 + repository: https://charts.bitnami.com/bitnami + alias: rag-search-db + \ No newline at end of file diff --git a/kubernetes/charts/database/values.yaml b/kubernetes/charts/database/values.yaml new file mode 100644 index 00000000..d7841e56 --- /dev/null +++ b/kubernetes/charts/database/values.yaml @@ -0,0 +1,14 @@ +rag-search-db: + fullnameOverride: rag-search-db + image: + tag: latest + auth: + postgresPassword: dbadmin + username: postgres + password: dbadmin + database: rag-search + primary: + persistence: + enabled: true + size: 8Gi + diff --git a/kubernetes/charts/minio/Chart.yaml b/kubernetes/charts/minio/Chart.yaml new file mode 100644 index 00000000..e2bd6d5b --- /dev/null +++ b/kubernetes/charts/minio/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: minio +description: minio object storage server +type: application +version: 0.1.0 +appVersion: 2.0.0 \ No newline at end of file diff --git a/kubernetes/charts/minio/templates/deployment-minio.yaml b/kubernetes/charts/minio/templates/deployment-minio.yaml new file mode 100644 index 00000000..2012d5bf --- /dev/null +++ b/kubernetes/charts/minio/templates/deployment-minio.yaml @@ -0,0 +1,65 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.release_name }}" +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: "{{ .Values.release_name }}" + template: + metadata: + labels: + app: "{{ .Values.release_name }}" + spec: + initContainers: + - name: create-buckets + image: busybox:latest + command: + - sh + - -c + - | + mkdir -p /data/rag-search/resources/langfuse + mkdir -p /data/rag-search/resources/models + mkdir -p /data/rag-search/resources/datasets + mkdir -p /data/rag-search/resources/qdrant + mkdir -p /data/rag-search/resources/system + echo "Bucket directories created successfully" + volumeMounts: + - name: minio-data + mountPath: /data + containers: + - name: "{{ .Values.release_name }}" + image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}" + command: + - minio + - server + - /data + - --console-address + - :9001 + ports: + - containerPort: {{ .Values.ports.api }} + name: api + protocol: TCP + - containerPort: {{ .Values.ports.console }} + name: console + protocol: TCP + env: +{{- range $key, $value := .Values.env }} + - name: {{ $key }} + value: "{{ $value }}" +{{- end }} + volumeMounts: + - name: minio-data + mountPath: /data + volumes: + - name: minio-data + persistentVolumeClaim: + claimName: pvc-minio-data + resources: + requests: + memory: "{{ .Values.resources.requests.memory }}" + cpu: "{{ .Values.resources.requests.cpu }}" + limits: + memory: "{{ .Values.resources.limits.memory }}" + cpu: "{{ .Values.resources.limits.cpu }}" \ No newline at end of file diff --git a/kubernetes/charts/minio/templates/ingress-minio.yaml b/kubernetes/charts/minio/templates/ingress-minio.yaml new file mode 100644 index 00000000..390c93aa --- /dev/null +++ b/kubernetes/charts/minio/templates/ingress-minio.yaml @@ -0,0 +1,37 @@ +{{- if .Values.ingress.enabled }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: "{{ .Values.release_name }}-ingress" + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + nginx.ingress.kubernetes.io/proxy-connect-timeout: "10s" + nginx.ingress.kubernetes.io/proxy-send-timeout: "600s" + nginx.ingress.kubernetes.io/proxy-read-timeout: "600s" + nginx.ingress.kubernetes.io/cors-allow-origin: "*" + nginx.ingress.kubernetes.io/cors-allow-methods: "GET, POST, PUT, DELETE, OPTIONS" + nginx.ingress.kubernetes.io/cors-allow-headers: "Origin, X-Requested-With, Content-Type, Cache-Control, Connection, Accept" + cert-manager.io/cluster-issuer: "letsencrypt-prod-issuer" + labels: + name: "{{ .Values.release_name }}-ingress" +spec: + rules: + - host: "{{ .Values.ingress.host }}" + http: + paths: + - pathType: Prefix + path: "{{ .Values.ingress.path }}" + backend: + service: + name: "{{ .Values.release_name }}" + port: + number: {{ .Values.ports.api }} + {{- if .Values.ingress.tls.enabled }} + tls: + - hosts: + - "{{ .Values.ingress.host }}" + secretName: "{{ .Values.secretname }}" + {{- end }} +{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/minio/templates/pvc-minio-data.yaml b/kubernetes/charts/minio/templates/pvc-minio-data.yaml new file mode 100644 index 00000000..2794e301 --- /dev/null +++ b/kubernetes/charts/minio/templates/pvc-minio-data.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-minio-data +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.volumes.minio_data.size }} \ No newline at end of file diff --git a/kubernetes/charts/minio/templates/service-minio.yaml b/kubernetes/charts/minio/templates/service-minio.yaml new file mode 100644 index 00000000..9f52de72 --- /dev/null +++ b/kubernetes/charts/minio/templates/service-minio.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Values.release_name }}" +spec: + selector: + app: "{{ .Values.release_name }}" + ports: + - port: {{ .Values.ports.api }} + targetPort: api + protocol: TCP + name: api + - port: {{ .Values.ports.console }} + targetPort: console + protocol: TCP + name: console \ No newline at end of file diff --git a/kubernetes/charts/minio/values.yaml b/kubernetes/charts/minio/values.yaml new file mode 100644 index 00000000..5919e678 --- /dev/null +++ b/kubernetes/charts/minio/values.yaml @@ -0,0 +1,41 @@ +release_name: "minio" + +image: + registry: "docker.io" + repository: "minio/minio" + tag: "latest" + +replicas: 1 + +resources: + requests: + memory: "500Mi" + cpu: "250m" + limits: + memory: "1Gi" + cpu: "500m" + +env: + MINIO_ROOT_USER: "minioadmin" + MINIO_ROOT_PASSWORD: "minioadmin" + MINIO_BROWSER_REDIRECT_URL: "http://localhost:9001" + +volumes: + minio_data: + type: pvc + size: "5Gi" + +ports: + api: 9000 + console: 9001 + +ingress: + enabled: true + host: "domain" + path: "/" + tls: + enabled: true +secretname: "minio-tls" + +istio: + enabled: false \ No newline at end of file diff --git a/kubernetes/dashboard-admin.yaml b/kubernetes/dashboard-admin.yaml new file mode 100644 index 00000000..04855539 --- /dev/null +++ b/kubernetes/dashboard-admin.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: admin-user + namespace: kubernetes-dashboard +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: admin-user +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: admin-user + namespace: kubernetes-dashboard \ No newline at end of file diff --git a/kubernetes/values.yaml b/kubernetes/values.yaml new file mode 100644 index 00000000..92c78531 --- /dev/null +++ b/kubernetes/values.yaml @@ -0,0 +1,78 @@ +# Global configuration for RAG Module +global: + domain: "rag-module.local" + namespace: "rag-module" + storageClass: "standard" + +# Individual service configurations +database: + enabled: true + +TIM-database: + enabled: true + +resql: + enabled: true + +ruuter-public: + enabled: true + +ruuter-private: + enabled: true + +data-mapper: + enabled: true + +TIM: + enabled: true + +Authentication-Layer: + enabled: true + +CronManager: + enabled: true + +GUI: + enabled: true + +Loki: + enabled: true + +Grafana: + enabled: true + +S3-Ferry: + enabled: true + +minio: + enabled: true + +Redis: + enabled: true + +Qdrant: + enabled: true + +ClickHouse: + enabled: true + +Langfuse-Web: + enabled: true + +Langfuse-Worker: + enabled: true + +Vault: + enabled: true + +Vault-Init: + enabled: true + +Vault-Agent-LLM: + enabled: true + +LLM-Orchestration-Service: + enabled: true + +Liquibase: + enabled: true From 3becd2ad8d4afed22f65905bae9c83d3e63ce010 Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Mon, 19 Jan 2026 11:05:43 +0530 Subject: [PATCH 2/3] Remove outdated Vite configuration files and associated plugins --- ....timestamp-1767946562610-7e8d2a8c1f401.mjs | 70 ----------------- ....timestamp-1767946574215-f7ac6ce2fedaa.mjs | 70 ----------------- ....timestamp-1768278822370-7924bd5f138d9.mjs | 77 ------------------- ....timestamp-1768278833602-e10c19bbae925.mjs | 77 ------------------- 4 files changed, 294 deletions(-) delete mode 100644 GUI/vite.config.ts.timestamp-1767946562610-7e8d2a8c1f401.mjs delete mode 100644 GUI/vite.config.ts.timestamp-1767946574215-f7ac6ce2fedaa.mjs delete mode 100644 GUI/vite.config.ts.timestamp-1768278822370-7924bd5f138d9.mjs delete mode 100644 GUI/vite.config.ts.timestamp-1768278833602-e10c19bbae925.mjs diff --git a/GUI/vite.config.ts.timestamp-1767946562610-7e8d2a8c1f401.mjs b/GUI/vite.config.ts.timestamp-1767946562610-7e8d2a8c1f401.mjs deleted file mode 100644 index b770c4c5..00000000 --- a/GUI/vite.config.ts.timestamp-1767946562610-7e8d2a8c1f401.mjs +++ /dev/null @@ -1,70 +0,0 @@ -// vite.config.ts -import { defineConfig } from "file:///app/node_modules/vite/dist/node/index.js"; -import react from "file:///app/node_modules/@vitejs/plugin-react/dist/index.mjs"; -import tsconfigPaths from "file:///app/node_modules/vite-tsconfig-paths/dist/index.mjs"; -import svgr from "file:///app/node_modules/vite-plugin-svgr/dist/index.mjs"; -import path from "path"; - -// vitePlugin.js -function removeHiddenMenuItems(str) { - var _a, _b; - const badJson = str.replace("export default [", "[").replace("];", "]"); - const correctJson = badJson.replace(/(['"])?([a-z0-9A-Z_]+)(['"])?:/g, '"$2": '); - const isHiddenFeaturesEnabled = ((_a = process.env.REACT_APP_ENABLE_HIDDEN_FEATURES) == null ? void 0 : _a.toLowerCase().trim()) === "true" || ((_b = process.env.REACT_APP_ENABLE_HIDDEN_FEATURES) == null ? void 0 : _b.toLowerCase().trim()) === "1"; - const json = removeHidden(JSON.parse(correctJson), isHiddenFeaturesEnabled); - const updatedJson = JSON.stringify(json); - return "export default " + updatedJson + ";"; -} -function removeHidden(menuItems, isHiddenFeaturesEnabled) { - var _a; - if (!menuItems) - return menuItems; - const arr = (_a = menuItems == null ? void 0 : menuItems.filter((x) => !x.hidden)) == null ? void 0 : _a.filter((x) => isHiddenFeaturesEnabled || x.hiddenMode !== "production"); - for (const a of arr) { - a.children = removeHidden(a.children, isHiddenFeaturesEnabled); - } - return arr; -} - -// vite.config.ts -var __vite_injected_original_dirname = "/app"; -var vite_config_default = defineConfig({ - envPrefix: "REACT_APP_", - plugins: [ - react(), - tsconfigPaths(), - svgr(), - { - name: "removeHiddenMenuItemsPlugin", - transform: (str, id) => { - if (!id.endsWith("/menu-structure.json")) - return str; - return removeHiddenMenuItems(str); - } - } - ], - base: "/rag-search", - build: { - outDir: "./build", - target: "es2015", - emptyOutDir: true - }, - server: { - headers: { - ...process.env.REACT_APP_CSP && { - "Content-Security-Policy": process.env.REACT_APP_CSP - } - }, - allowedHosts: ["est-rag-rtc.rootcode.software", "localhost", "127.0.0.1"] - }, - resolve: { - alias: { - "~@fontsource": path.resolve(__vite_injected_original_dirname, "node_modules/@fontsource"), - "@": `${path.resolve(__vite_injected_original_dirname, "./src")}` - } - } -}); -export { - vite_config_default as default -}; -//# sourceMappingURL=data:application/json;base64,ewogICJ2ZXJzaW9uIjogMywKICAic291cmNlcyI6IFsidml0ZS5jb25maWcudHMiLCAidml0ZVBsdWdpbi5qcyJdLAogICJzb3VyY2VzQ29udGVudCI6IFsiY29uc3QgX192aXRlX2luamVjdGVkX29yaWdpbmFsX2Rpcm5hbWUgPSBcIi9hcHBcIjtjb25zdCBfX3ZpdGVfaW5qZWN0ZWRfb3JpZ2luYWxfZmlsZW5hbWUgPSBcIi9hcHAvdml0ZS5jb25maWcudHNcIjtjb25zdCBfX3ZpdGVfaW5qZWN0ZWRfb3JpZ2luYWxfaW1wb3J0X21ldGFfdXJsID0gXCJmaWxlOi8vL2FwcC92aXRlLmNvbmZpZy50c1wiO2ltcG9ydCB7IGRlZmluZUNvbmZpZyB9IGZyb20gJ3ZpdGUnO1xuaW1wb3J0IHJlYWN0IGZyb20gJ0B2aXRlanMvcGx1Z2luLXJlYWN0JztcbmltcG9ydCB0c2NvbmZpZ1BhdGhzIGZyb20gJ3ZpdGUtdHNjb25maWctcGF0aHMnO1xuaW1wb3J0IHN2Z3IgZnJvbSAndml0ZS1wbHVnaW4tc3Zncic7XG5pbXBvcnQgcGF0aCBmcm9tICdwYXRoJztcbmltcG9ydCB7IHJlbW92ZUhpZGRlbk1lbnVJdGVtcyB9IGZyb20gJy4vdml0ZVBsdWdpbic7XG5cbi8vIGh0dHBzOi8vdml0ZWpzLmRldi9jb25maWcvXG5leHBvcnQgZGVmYXVsdCBkZWZpbmVDb25maWcoe1xuICBlbnZQcmVmaXg6ICdSRUFDVF9BUFBfJyxcbiAgcGx1Z2luczogW1xuICAgIHJlYWN0KCksXG4gICAgdHNjb25maWdQYXRocygpLFxuICAgIHN2Z3IoKSxcbiAgICB7XG4gICAgICBuYW1lOiAncmVtb3ZlSGlkZGVuTWVudUl0ZW1zUGx1Z2luJyxcbiAgICAgIHRyYW5zZm9ybTogKHN0ciwgaWQpID0+IHtcbiAgICAgICAgaWYoIWlkLmVuZHNXaXRoKCcvbWVudS1zdHJ1Y3R1cmUuanNvbicpKVxuICAgICAgICAgIHJldHVybiBzdHI7XG4gICAgICAgIHJldHVybiByZW1vdmVIaWRkZW5NZW51SXRlbXMoc3RyKTtcbiAgICAgIH0sXG4gICAgfSxcbiAgXSxcbiAgYmFzZTogJy9yYWctc2VhcmNoJyxcbiAgYnVpbGQ6IHtcbiAgICBvdXREaXI6ICcuL2J1aWxkJyxcbiAgICB0YXJnZXQ6ICdlczIwMTUnLFxuICAgIGVtcHR5T3V0RGlyOiB0cnVlLFxuICB9LFxuICBzZXJ2ZXI6IHtcbiAgICBoZWFkZXJzOiB7XG4gICAgICAuLi4ocHJvY2Vzcy5lbnYuUkVBQ1RfQVBQX0NTUCAmJiB7XG4gICAgICAgICdDb250ZW50LVNlY3VyaXR5LVBvbGljeSc6IHByb2Nlc3MuZW52LlJFQUNUX0FQUF9DU1AsXG4gICAgICB9KSxcbiAgICB9LFxuICAgIGFsbG93ZWRIb3N0czogWydlc3QtcmFnLXJ0Yy5yb290Y29kZS5zb2Z0d2FyZScsICdsb2NhbGhvc3QnLCAnMTI3LjAuMC4xJ10sXG5cbiAgfSxcbiAgcmVzb2x2ZToge1xuICAgIGFsaWFzOiB7XG4gICAgICAnfkBmb250c291cmNlJzogcGF0aC5yZXNvbHZlKF9fZGlybmFtZSwgJ25vZGVfbW9kdWxlcy9AZm9udHNvdXJjZScpLFxuICAgICAgJ0AnOiBgJHtwYXRoLnJlc29sdmUoX19kaXJuYW1lLCAnLi9zcmMnKX1gLFxuICAgIH0sXG4gIH0sXG59KTtcbiIsICJjb25zdCBfX3ZpdGVfaW5qZWN0ZWRfb3JpZ2luYWxfZGlybmFtZSA9IFwiL2FwcFwiO2NvbnN0IF9fdml0ZV9pbmplY3RlZF9vcmlnaW5hbF9maWxlbmFtZSA9IFwiL2FwcC92aXRlUGx1Z2luLmpzXCI7Y29uc3QgX192aXRlX2luamVjdGVkX29yaWdpbmFsX2ltcG9ydF9tZXRhX3VybCA9IFwiZmlsZTovLy9hcHAvdml0ZVBsdWdpbi5qc1wiO2V4cG9ydCBmdW5jdGlvbiByZW1vdmVIaWRkZW5NZW51SXRlbXMoc3RyKSB7XG4gIGNvbnN0IGJhZEpzb24gPSBzdHIucmVwbGFjZSgnZXhwb3J0IGRlZmF1bHQgWycsICdbJykucmVwbGFjZSgnXTsnLCAnXScpO1xuICBjb25zdCBjb3JyZWN0SnNvbiA9IGJhZEpzb24ucmVwbGFjZSgvKFsnXCJdKT8oW2EtejAtOUEtWl9dKykoWydcIl0pPzovZywgJ1wiJDJcIjogJyk7XG5cbiBjb25zdCBpc0hpZGRlbkZlYXR1cmVzRW5hYmxlZCA9IFxuICAgIHByb2Nlc3MuZW52LlJFQUNUX0FQUF9FTkFCTEVfSElEREVOX0ZFQVRVUkVTPy50b0xvd2VyQ2FzZSgpLnRyaW0oKSA9PT0gJ3RydWUnIHx8XG4gICAgcHJvY2Vzcy5lbnYuUkVBQ1RfQVBQX0VOQUJMRV9ISURERU5fRkVBVFVSRVM/LnRvTG93ZXJDYXNlKCkudHJpbSgpID09PSAnMSc7XG5cbiAgY29uc3QganNvbiA9IHJlbW92ZUhpZGRlbihKU09OLnBhcnNlKGNvcnJlY3RKc29uKSwgaXNIaWRkZW5GZWF0dXJlc0VuYWJsZWQpO1xuICBcbiAgY29uc3QgdXBkYXRlZEpzb24gPSBKU09OLnN0cmluZ2lmeShqc29uKTtcblxuICByZXR1cm4gJ2V4cG9ydCBkZWZhdWx0ICcgKyB1cGRhdGVkSnNvbiArICc7J1xufVxuXG5mdW5jdGlvbiByZW1vdmVIaWRkZW4obWVudUl0ZW1zLCBpc0hpZGRlbkZlYXR1cmVzRW5hYmxlZCkge1xuICBpZighbWVudUl0ZW1zKSByZXR1cm4gbWVudUl0ZW1zO1xuICBjb25zdCBhcnIgPSBtZW51SXRlbXNcbiAgICA/LmZpbHRlcih4ID0+ICF4LmhpZGRlbilcbiAgICA/LmZpbHRlcih4ID0+IGlzSGlkZGVuRmVhdHVyZXNFbmFibGVkIHx8IHguaGlkZGVuTW9kZSAhPT0gXCJwcm9kdWN0aW9uXCIpO1xuICBmb3IgKGNvbnN0IGEgb2YgYXJyKSB7XG4gICAgYS5jaGlsZHJlbiA9IHJlbW92ZUhpZGRlbihhLmNoaWxkcmVuLCBpc0hpZGRlbkZlYXR1cmVzRW5hYmxlZCk7XG4gIH1cbiAgcmV0dXJuIGFycjtcbn1cbiJdLAogICJtYXBwaW5ncyI6ICI7QUFBOEwsU0FBUyxvQkFBb0I7QUFDM04sT0FBTyxXQUFXO0FBQ2xCLE9BQU8sbUJBQW1CO0FBQzFCLE9BQU8sVUFBVTtBQUNqQixPQUFPLFVBQVU7OztBQ0prTCxTQUFTLHNCQUFzQixLQUFLO0FBQXZPO0FBQ0UsUUFBTSxVQUFVLElBQUksUUFBUSxvQkFBb0IsR0FBRyxFQUFFLFFBQVEsTUFBTSxHQUFHO0FBQ3RFLFFBQU0sY0FBYyxRQUFRLFFBQVEsbUNBQW1DLFFBQVE7QUFFaEYsUUFBTSw0QkFDSCxhQUFRLElBQUkscUNBQVosbUJBQThDLGNBQWMsWUFBVyxZQUN2RSxhQUFRLElBQUkscUNBQVosbUJBQThDLGNBQWMsWUFBVztBQUV6RSxRQUFNLE9BQU8sYUFBYSxLQUFLLE1BQU0sV0FBVyxHQUFHLHVCQUF1QjtBQUUxRSxRQUFNLGNBQWMsS0FBSyxVQUFVLElBQUk7QUFFdkMsU0FBTyxvQkFBb0IsY0FBYztBQUMzQztBQUVBLFNBQVMsYUFBYSxXQUFXLHlCQUF5QjtBQWYxRDtBQWdCRSxNQUFHLENBQUM7QUFBVyxXQUFPO0FBQ3RCLFFBQU0sT0FBTSw0Q0FDUixPQUFPLE9BQUssQ0FBQyxFQUFFLFlBRFAsbUJBRVIsT0FBTyxPQUFLLDJCQUEyQixFQUFFLGVBQWU7QUFDNUQsYUFBVyxLQUFLLEtBQUs7QUFDbkIsTUFBRSxXQUFXLGFBQWEsRUFBRSxVQUFVLHVCQUF1QjtBQUFBLEVBQy9EO0FBQ0EsU0FBTztBQUNUOzs7QUR4QkEsSUFBTSxtQ0FBbUM7QUFRekMsSUFBTyxzQkFBUSxhQUFhO0FBQUEsRUFDMUIsV0FBVztBQUFBLEVBQ1gsU0FBUztBQUFBLElBQ1AsTUFBTTtBQUFBLElBQ04sY0FBYztBQUFBLElBQ2QsS0FBSztBQUFBLElBQ0w7QUFBQSxNQUNFLE1BQU07QUFBQSxNQUNOLFdBQVcsQ0FBQyxLQUFLLE9BQU87QUFDdEIsWUFBRyxDQUFDLEdBQUcsU0FBUyxzQkFBc0I7QUFDcEMsaUJBQU87QUFDVCxlQUFPLHNCQUFzQixHQUFHO0FBQUEsTUFDbEM7QUFBQSxJQUNGO0FBQUEsRUFDRjtBQUFBLEVBQ0EsTUFBTTtBQUFBLEVBQ04sT0FBTztBQUFBLElBQ0wsUUFBUTtBQUFBLElBQ1IsUUFBUTtBQUFBLElBQ1IsYUFBYTtBQUFBLEVBQ2Y7QUFBQSxFQUNBLFFBQVE7QUFBQSxJQUNOLFNBQVM7QUFBQSxNQUNQLEdBQUksUUFBUSxJQUFJLGlCQUFpQjtBQUFBLFFBQy9CLDJCQUEyQixRQUFRLElBQUk7QUFBQSxNQUN6QztBQUFBLElBQ0Y7QUFBQSxJQUNBLGNBQWMsQ0FBQyxpQ0FBaUMsYUFBYSxXQUFXO0FBQUEsRUFFMUU7QUFBQSxFQUNBLFNBQVM7QUFBQSxJQUNQLE9BQU87QUFBQSxNQUNMLGdCQUFnQixLQUFLLFFBQVEsa0NBQVcsMEJBQTBCO0FBQUEsTUFDbEUsS0FBSyxHQUFHLEtBQUssUUFBUSxrQ0FBVyxPQUFPLENBQUM7QUFBQSxJQUMxQztBQUFBLEVBQ0Y7QUFDRixDQUFDOyIsCiAgIm5hbWVzIjogW10KfQo= diff --git a/GUI/vite.config.ts.timestamp-1767946574215-f7ac6ce2fedaa.mjs b/GUI/vite.config.ts.timestamp-1767946574215-f7ac6ce2fedaa.mjs deleted file mode 100644 index b770c4c5..00000000 --- a/GUI/vite.config.ts.timestamp-1767946574215-f7ac6ce2fedaa.mjs +++ /dev/null @@ -1,70 +0,0 @@ -// vite.config.ts -import { defineConfig } from "file:///app/node_modules/vite/dist/node/index.js"; -import react from "file:///app/node_modules/@vitejs/plugin-react/dist/index.mjs"; -import tsconfigPaths from "file:///app/node_modules/vite-tsconfig-paths/dist/index.mjs"; -import svgr from "file:///app/node_modules/vite-plugin-svgr/dist/index.mjs"; -import path from "path"; - -// vitePlugin.js -function removeHiddenMenuItems(str) { - var _a, _b; - const badJson = str.replace("export default [", "[").replace("];", "]"); - const correctJson = badJson.replace(/(['"])?([a-z0-9A-Z_]+)(['"])?:/g, '"$2": '); - const isHiddenFeaturesEnabled = ((_a = process.env.REACT_APP_ENABLE_HIDDEN_FEATURES) == null ? void 0 : _a.toLowerCase().trim()) === "true" || ((_b = process.env.REACT_APP_ENABLE_HIDDEN_FEATURES) == null ? void 0 : _b.toLowerCase().trim()) === "1"; - const json = removeHidden(JSON.parse(correctJson), isHiddenFeaturesEnabled); - const updatedJson = JSON.stringify(json); - return "export default " + updatedJson + ";"; -} -function removeHidden(menuItems, isHiddenFeaturesEnabled) { - var _a; - if (!menuItems) - return menuItems; - const arr = (_a = menuItems == null ? void 0 : menuItems.filter((x) => !x.hidden)) == null ? void 0 : _a.filter((x) => isHiddenFeaturesEnabled || x.hiddenMode !== "production"); - for (const a of arr) { - a.children = removeHidden(a.children, isHiddenFeaturesEnabled); - } - return arr; -} - -// vite.config.ts -var __vite_injected_original_dirname = "/app"; -var vite_config_default = defineConfig({ - envPrefix: "REACT_APP_", - plugins: [ - react(), - tsconfigPaths(), - svgr(), - { - name: "removeHiddenMenuItemsPlugin", - transform: (str, id) => { - if (!id.endsWith("/menu-structure.json")) - return str; - return removeHiddenMenuItems(str); - } - } - ], - base: "/rag-search", - build: { - outDir: "./build", - target: "es2015", - emptyOutDir: true - }, - server: { - headers: { - ...process.env.REACT_APP_CSP && { - "Content-Security-Policy": process.env.REACT_APP_CSP - } - }, - allowedHosts: ["est-rag-rtc.rootcode.software", "localhost", "127.0.0.1"] - }, - resolve: { - alias: { - "~@fontsource": path.resolve(__vite_injected_original_dirname, "node_modules/@fontsource"), - "@": `${path.resolve(__vite_injected_original_dirname, "./src")}` - } - } -}); -export { - vite_config_default as default -}; -//# sourceMappingURL=data:application/json;base64,ewogICJ2ZXJzaW9uIjogMywKICAic291cmNlcyI6IFsidml0ZS5jb25maWcudHMiLCAidml0ZVBsdWdpbi5qcyJdLAogICJzb3VyY2VzQ29udGVudCI6IFsiY29uc3QgX192aXRlX2luamVjdGVkX29yaWdpbmFsX2Rpcm5hbWUgPSBcIi9hcHBcIjtjb25zdCBfX3ZpdGVfaW5qZWN0ZWRfb3JpZ2luYWxfZmlsZW5hbWUgPSBcIi9hcHAvdml0ZS5jb25maWcudHNcIjtjb25zdCBfX3ZpdGVfaW5qZWN0ZWRfb3JpZ2luYWxfaW1wb3J0X21ldGFfdXJsID0gXCJmaWxlOi8vL2FwcC92aXRlLmNvbmZpZy50c1wiO2ltcG9ydCB7IGRlZmluZUNvbmZpZyB9IGZyb20gJ3ZpdGUnO1xuaW1wb3J0IHJlYWN0IGZyb20gJ0B2aXRlanMvcGx1Z2luLXJlYWN0JztcbmltcG9ydCB0c2NvbmZpZ1BhdGhzIGZyb20gJ3ZpdGUtdHNjb25maWctcGF0aHMnO1xuaW1wb3J0IHN2Z3IgZnJvbSAndml0ZS1wbHVnaW4tc3Zncic7XG5pbXBvcnQgcGF0aCBmcm9tICdwYXRoJztcbmltcG9ydCB7IHJlbW92ZUhpZGRlbk1lbnVJdGVtcyB9IGZyb20gJy4vdml0ZVBsdWdpbic7XG5cbi8vIGh0dHBzOi8vdml0ZWpzLmRldi9jb25maWcvXG5leHBvcnQgZGVmYXVsdCBkZWZpbmVDb25maWcoe1xuICBlbnZQcmVmaXg6ICdSRUFDVF9BUFBfJyxcbiAgcGx1Z2luczogW1xuICAgIHJlYWN0KCksXG4gICAgdHNjb25maWdQYXRocygpLFxuICAgIHN2Z3IoKSxcbiAgICB7XG4gICAgICBuYW1lOiAncmVtb3ZlSGlkZGVuTWVudUl0ZW1zUGx1Z2luJyxcbiAgICAgIHRyYW5zZm9ybTogKHN0ciwgaWQpID0+IHtcbiAgICAgICAgaWYoIWlkLmVuZHNXaXRoKCcvbWVudS1zdHJ1Y3R1cmUuanNvbicpKVxuICAgICAgICAgIHJldHVybiBzdHI7XG4gICAgICAgIHJldHVybiByZW1vdmVIaWRkZW5NZW51SXRlbXMoc3RyKTtcbiAgICAgIH0sXG4gICAgfSxcbiAgXSxcbiAgYmFzZTogJy9yYWctc2VhcmNoJyxcbiAgYnVpbGQ6IHtcbiAgICBvdXREaXI6ICcuL2J1aWxkJyxcbiAgICB0YXJnZXQ6ICdlczIwMTUnLFxuICAgIGVtcHR5T3V0RGlyOiB0cnVlLFxuICB9LFxuICBzZXJ2ZXI6IHtcbiAgICBoZWFkZXJzOiB7XG4gICAgICAuLi4ocHJvY2Vzcy5lbnYuUkVBQ1RfQVBQX0NTUCAmJiB7XG4gICAgICAgICdDb250ZW50LVNlY3VyaXR5LVBvbGljeSc6IHByb2Nlc3MuZW52LlJFQUNUX0FQUF9DU1AsXG4gICAgICB9KSxcbiAgICB9LFxuICAgIGFsbG93ZWRIb3N0czogWydlc3QtcmFnLXJ0Yy5yb290Y29kZS5zb2Z0d2FyZScsICdsb2NhbGhvc3QnLCAnMTI3LjAuMC4xJ10sXG5cbiAgfSxcbiAgcmVzb2x2ZToge1xuICAgIGFsaWFzOiB7XG4gICAgICAnfkBmb250c291cmNlJzogcGF0aC5yZXNvbHZlKF9fZGlybmFtZSwgJ25vZGVfbW9kdWxlcy9AZm9udHNvdXJjZScpLFxuICAgICAgJ0AnOiBgJHtwYXRoLnJlc29sdmUoX19kaXJuYW1lLCAnLi9zcmMnKX1gLFxuICAgIH0sXG4gIH0sXG59KTtcbiIsICJjb25zdCBfX3ZpdGVfaW5qZWN0ZWRfb3JpZ2luYWxfZGlybmFtZSA9IFwiL2FwcFwiO2NvbnN0IF9fdml0ZV9pbmplY3RlZF9vcmlnaW5hbF9maWxlbmFtZSA9IFwiL2FwcC92aXRlUGx1Z2luLmpzXCI7Y29uc3QgX192aXRlX2luamVjdGVkX29yaWdpbmFsX2ltcG9ydF9tZXRhX3VybCA9IFwiZmlsZTovLy9hcHAvdml0ZVBsdWdpbi5qc1wiO2V4cG9ydCBmdW5jdGlvbiByZW1vdmVIaWRkZW5NZW51SXRlbXMoc3RyKSB7XG4gIGNvbnN0IGJhZEpzb24gPSBzdHIucmVwbGFjZSgnZXhwb3J0IGRlZmF1bHQgWycsICdbJykucmVwbGFjZSgnXTsnLCAnXScpO1xuICBjb25zdCBjb3JyZWN0SnNvbiA9IGJhZEpzb24ucmVwbGFjZSgvKFsnXCJdKT8oW2EtejAtOUEtWl9dKykoWydcIl0pPzovZywgJ1wiJDJcIjogJyk7XG5cbiBjb25zdCBpc0hpZGRlbkZlYXR1cmVzRW5hYmxlZCA9IFxuICAgIHByb2Nlc3MuZW52LlJFQUNUX0FQUF9FTkFCTEVfSElEREVOX0ZFQVRVUkVTPy50b0xvd2VyQ2FzZSgpLnRyaW0oKSA9PT0gJ3RydWUnIHx8XG4gICAgcHJvY2Vzcy5lbnYuUkVBQ1RfQVBQX0VOQUJMRV9ISURERU5fRkVBVFVSRVM/LnRvTG93ZXJDYXNlKCkudHJpbSgpID09PSAnMSc7XG5cbiAgY29uc3QganNvbiA9IHJlbW92ZUhpZGRlbihKU09OLnBhcnNlKGNvcnJlY3RKc29uKSwgaXNIaWRkZW5GZWF0dXJlc0VuYWJsZWQpO1xuICBcbiAgY29uc3QgdXBkYXRlZEpzb24gPSBKU09OLnN0cmluZ2lmeShqc29uKTtcblxuICByZXR1cm4gJ2V4cG9ydCBkZWZhdWx0ICcgKyB1cGRhdGVkSnNvbiArICc7J1xufVxuXG5mdW5jdGlvbiByZW1vdmVIaWRkZW4obWVudUl0ZW1zLCBpc0hpZGRlbkZlYXR1cmVzRW5hYmxlZCkge1xuICBpZighbWVudUl0ZW1zKSByZXR1cm4gbWVudUl0ZW1zO1xuICBjb25zdCBhcnIgPSBtZW51SXRlbXNcbiAgICA/LmZpbHRlcih4ID0+ICF4LmhpZGRlbilcbiAgICA/LmZpbHRlcih4ID0+IGlzSGlkZGVuRmVhdHVyZXNFbmFibGVkIHx8IHguaGlkZGVuTW9kZSAhPT0gXCJwcm9kdWN0aW9uXCIpO1xuICBmb3IgKGNvbnN0IGEgb2YgYXJyKSB7XG4gICAgYS5jaGlsZHJlbiA9IHJlbW92ZUhpZGRlbihhLmNoaWxkcmVuLCBpc0hpZGRlbkZlYXR1cmVzRW5hYmxlZCk7XG4gIH1cbiAgcmV0dXJuIGFycjtcbn1cbiJdLAogICJtYXBwaW5ncyI6ICI7QUFBOEwsU0FBUyxvQkFBb0I7QUFDM04sT0FBTyxXQUFXO0FBQ2xCLE9BQU8sbUJBQW1CO0FBQzFCLE9BQU8sVUFBVTtBQUNqQixPQUFPLFVBQVU7OztBQ0prTCxTQUFTLHNCQUFzQixLQUFLO0FBQXZPO0FBQ0UsUUFBTSxVQUFVLElBQUksUUFBUSxvQkFBb0IsR0FBRyxFQUFFLFFBQVEsTUFBTSxHQUFHO0FBQ3RFLFFBQU0sY0FBYyxRQUFRLFFBQVEsbUNBQW1DLFFBQVE7QUFFaEYsUUFBTSw0QkFDSCxhQUFRLElBQUkscUNBQVosbUJBQThDLGNBQWMsWUFBVyxZQUN2RSxhQUFRLElBQUkscUNBQVosbUJBQThDLGNBQWMsWUFBVztBQUV6RSxRQUFNLE9BQU8sYUFBYSxLQUFLLE1BQU0sV0FBVyxHQUFHLHVCQUF1QjtBQUUxRSxRQUFNLGNBQWMsS0FBSyxVQUFVLElBQUk7QUFFdkMsU0FBTyxvQkFBb0IsY0FBYztBQUMzQztBQUVBLFNBQVMsYUFBYSxXQUFXLHlCQUF5QjtBQWYxRDtBQWdCRSxNQUFHLENBQUM7QUFBVyxXQUFPO0FBQ3RCLFFBQU0sT0FBTSw0Q0FDUixPQUFPLE9BQUssQ0FBQyxFQUFFLFlBRFAsbUJBRVIsT0FBTyxPQUFLLDJCQUEyQixFQUFFLGVBQWU7QUFDNUQsYUFBVyxLQUFLLEtBQUs7QUFDbkIsTUFBRSxXQUFXLGFBQWEsRUFBRSxVQUFVLHVCQUF1QjtBQUFBLEVBQy9EO0FBQ0EsU0FBTztBQUNUOzs7QUR4QkEsSUFBTSxtQ0FBbUM7QUFRekMsSUFBTyxzQkFBUSxhQUFhO0FBQUEsRUFDMUIsV0FBVztBQUFBLEVBQ1gsU0FBUztBQUFBLElBQ1AsTUFBTTtBQUFBLElBQ04sY0FBYztBQUFBLElBQ2QsS0FBSztBQUFBLElBQ0w7QUFBQSxNQUNFLE1BQU07QUFBQSxNQUNOLFdBQVcsQ0FBQyxLQUFLLE9BQU87QUFDdEIsWUFBRyxDQUFDLEdBQUcsU0FBUyxzQkFBc0I7QUFDcEMsaUJBQU87QUFDVCxlQUFPLHNCQUFzQixHQUFHO0FBQUEsTUFDbEM7QUFBQSxJQUNGO0FBQUEsRUFDRjtBQUFBLEVBQ0EsTUFBTTtBQUFBLEVBQ04sT0FBTztBQUFBLElBQ0wsUUFBUTtBQUFBLElBQ1IsUUFBUTtBQUFBLElBQ1IsYUFBYTtBQUFBLEVBQ2Y7QUFBQSxFQUNBLFFBQVE7QUFBQSxJQUNOLFNBQVM7QUFBQSxNQUNQLEdBQUksUUFBUSxJQUFJLGlCQUFpQjtBQUFBLFFBQy9CLDJCQUEyQixRQUFRLElBQUk7QUFBQSxNQUN6QztBQUFBLElBQ0Y7QUFBQSxJQUNBLGNBQWMsQ0FBQyxpQ0FBaUMsYUFBYSxXQUFXO0FBQUEsRUFFMUU7QUFBQSxFQUNBLFNBQVM7QUFBQSxJQUNQLE9BQU87QUFBQSxNQUNMLGdCQUFnQixLQUFLLFFBQVEsa0NBQVcsMEJBQTBCO0FBQUEsTUFDbEUsS0FBSyxHQUFHLEtBQUssUUFBUSxrQ0FBVyxPQUFPLENBQUM7QUFBQSxJQUMxQztBQUFBLEVBQ0Y7QUFDRixDQUFDOyIsCiAgIm5hbWVzIjogW10KfQo= diff --git a/GUI/vite.config.ts.timestamp-1768278822370-7924bd5f138d9.mjs b/GUI/vite.config.ts.timestamp-1768278822370-7924bd5f138d9.mjs deleted file mode 100644 index 3ffe5928..00000000 --- a/GUI/vite.config.ts.timestamp-1768278822370-7924bd5f138d9.mjs +++ /dev/null @@ -1,77 +0,0 @@ -// vite.config.ts -import { defineConfig } from "file:///app/node_modules/vite/dist/node/index.js"; -import react from "file:///app/node_modules/@vitejs/plugin-react/dist/index.mjs"; -import tsconfigPaths from "file:///app/node_modules/vite-tsconfig-paths/dist/index.mjs"; -import svgr from "file:///app/node_modules/vite-plugin-svgr/dist/index.mjs"; -import path from "path"; - -// vitePlugin.js -function removeHiddenMenuItems(str) { - var _a, _b; - const badJson = str.replace("export default [", "[").replace("];", "]"); - const correctJson = badJson.replace(/(['"])?([a-z0-9A-Z_]+)(['"])?:/g, '"$2": '); - const isHiddenFeaturesEnabled = ((_a = process.env.REACT_APP_ENABLE_HIDDEN_FEATURES) == null ? void 0 : _a.toLowerCase().trim()) === "true" || ((_b = process.env.REACT_APP_ENABLE_HIDDEN_FEATURES) == null ? void 0 : _b.toLowerCase().trim()) === "1"; - const json = removeHidden(JSON.parse(correctJson), isHiddenFeaturesEnabled); - const updatedJson = JSON.stringify(json); - return "export default " + updatedJson + ";"; -} -function removeHidden(menuItems, isHiddenFeaturesEnabled) { - var _a; - if (!menuItems) - return menuItems; - const arr = (_a = menuItems == null ? void 0 : menuItems.filter((x) => !x.hidden)) == null ? void 0 : _a.filter((x) => isHiddenFeaturesEnabled || x.hiddenMode !== "production"); - for (const a of arr) { - a.children = removeHidden(a.children, isHiddenFeaturesEnabled); - } - return arr; -} - -// vite.config.ts -var __vite_injected_original_dirname = "/app"; -var vite_config_default = defineConfig({ - envPrefix: "REACT_APP_", - plugins: [ - react(), - tsconfigPaths(), - svgr(), - { - name: "removeHiddenMenuItemsPlugin", - transform: (str, id) => { - if (!id.endsWith("/menu-structure.json")) - return str; - return removeHiddenMenuItems(str); - } - } - ], - base: "/rag-search", - build: { - outDir: "./build", - target: "es2015", - emptyOutDir: true - }, - server: { - headers: { - ...process.env.REACT_APP_CSP && { - "Content-Security-Policy": process.env.REACT_APP_CSP - } - }, - allowedHosts: ["est-rag-rtc.rootcode.software", "localhost", "127.0.0.1"], - proxy: { - "/vault-agent-gui": { - target: "http://vault-agent-gui:8202", - changeOrigin: true, - rewrite: (path2) => path2.replace(/^\/vault-agent-gui/, "") - } - } - }, - resolve: { - alias: { - "~@fontsource": path.resolve(__vite_injected_original_dirname, "node_modules/@fontsource"), - "@": `${path.resolve(__vite_injected_original_dirname, "./src")}` - } - } -}); -export { - vite_config_default as default -}; -//# sourceMappingURL=data:application/json;base64,ewogICJ2ZXJzaW9uIjogMywKICAic291cmNlcyI6IFsidml0ZS5jb25maWcudHMiLCAidml0ZVBsdWdpbi5qcyJdLAogICJzb3VyY2VzQ29udGVudCI6IFsiY29uc3QgX192aXRlX2luamVjdGVkX29yaWdpbmFsX2Rpcm5hbWUgPSBcIi9hcHBcIjtjb25zdCBfX3ZpdGVfaW5qZWN0ZWRfb3JpZ2luYWxfZmlsZW5hbWUgPSBcIi9hcHAvdml0ZS5jb25maWcudHNcIjtjb25zdCBfX3ZpdGVfaW5qZWN0ZWRfb3JpZ2luYWxfaW1wb3J0X21ldGFfdXJsID0gXCJmaWxlOi8vL2FwcC92aXRlLmNvbmZpZy50c1wiO2ltcG9ydCB7IGRlZmluZUNvbmZpZyB9IGZyb20gJ3ZpdGUnO1xuaW1wb3J0IHJlYWN0IGZyb20gJ0B2aXRlanMvcGx1Z2luLXJlYWN0JztcbmltcG9ydCB0c2NvbmZpZ1BhdGhzIGZyb20gJ3ZpdGUtdHNjb25maWctcGF0aHMnO1xuaW1wb3J0IHN2Z3IgZnJvbSAndml0ZS1wbHVnaW4tc3Zncic7XG5pbXBvcnQgcGF0aCBmcm9tICdwYXRoJztcbmltcG9ydCB7IHJlbW92ZUhpZGRlbk1lbnVJdGVtcyB9IGZyb20gJy4vdml0ZVBsdWdpbic7XG5cbi8vIGh0dHBzOi8vdml0ZWpzLmRldi9jb25maWcvXG5leHBvcnQgZGVmYXVsdCBkZWZpbmVDb25maWcoe1xuICBlbnZQcmVmaXg6ICdSRUFDVF9BUFBfJyxcbiAgcGx1Z2luczogW1xuICAgIHJlYWN0KCksXG4gICAgdHNjb25maWdQYXRocygpLFxuICAgIHN2Z3IoKSxcbiAgICB7XG4gICAgICBuYW1lOiAncmVtb3ZlSGlkZGVuTWVudUl0ZW1zUGx1Z2luJyxcbiAgICAgIHRyYW5zZm9ybTogKHN0ciwgaWQpID0+IHtcbiAgICAgICAgaWYoIWlkLmVuZHNXaXRoKCcvbWVudS1zdHJ1Y3R1cmUuanNvbicpKVxuICAgICAgICAgIHJldHVybiBzdHI7XG4gICAgICAgIHJldHVybiByZW1vdmVIaWRkZW5NZW51SXRlbXMoc3RyKTtcbiAgICAgIH0sXG4gICAgfSxcbiAgXSxcbiAgYmFzZTogJy9yYWctc2VhcmNoJyxcbiAgYnVpbGQ6IHtcbiAgICBvdXREaXI6ICcuL2J1aWxkJyxcbiAgICB0YXJnZXQ6ICdlczIwMTUnLFxuICAgIGVtcHR5T3V0RGlyOiB0cnVlLFxuICB9LFxuICBzZXJ2ZXI6IHtcbiAgICBoZWFkZXJzOiB7XG4gICAgICAuLi4ocHJvY2Vzcy5lbnYuUkVBQ1RfQVBQX0NTUCAmJiB7XG4gICAgICAgICdDb250ZW50LVNlY3VyaXR5LVBvbGljeSc6IHByb2Nlc3MuZW52LlJFQUNUX0FQUF9DU1AsXG4gICAgICB9KSxcbiAgICB9LFxuICAgIGFsbG93ZWRIb3N0czogWydlc3QtcmFnLXJ0Yy5yb290Y29kZS5zb2Z0d2FyZScsICdsb2NhbGhvc3QnLCAnMTI3LjAuMC4xJ10sXG4gICAgcHJveHk6IHtcbiAgICAgICcvdmF1bHQtYWdlbnQtZ3VpJzoge1xuICAgICAgICB0YXJnZXQ6ICdodHRwOi8vdmF1bHQtYWdlbnQtZ3VpOjgyMDInLFxuICAgICAgICBjaGFuZ2VPcmlnaW46IHRydWUsXG4gICAgICAgIHJld3JpdGU6IChwYXRoKSA9PiBwYXRoLnJlcGxhY2UoL15cXC92YXVsdC1hZ2VudC1ndWkvLCAnJyksXG4gICAgICB9LFxuICAgIH0sXG4gIH0sXG4gIHJlc29sdmU6IHtcbiAgICBhbGlhczoge1xuICAgICAgJ35AZm9udHNvdXJjZSc6IHBhdGgucmVzb2x2ZShfX2Rpcm5hbWUsICdub2RlX21vZHVsZXMvQGZvbnRzb3VyY2UnKSxcbiAgICAgICdAJzogYCR7cGF0aC5yZXNvbHZlKF9fZGlybmFtZSwgJy4vc3JjJyl9YCxcbiAgICB9LFxuICB9LFxufSk7XG4iLCAiY29uc3QgX192aXRlX2luamVjdGVkX29yaWdpbmFsX2Rpcm5hbWUgPSBcIi9hcHBcIjtjb25zdCBfX3ZpdGVfaW5qZWN0ZWRfb3JpZ2luYWxfZmlsZW5hbWUgPSBcIi9hcHAvdml0ZVBsdWdpbi5qc1wiO2NvbnN0IF9fdml0ZV9pbmplY3RlZF9vcmlnaW5hbF9pbXBvcnRfbWV0YV91cmwgPSBcImZpbGU6Ly8vYXBwL3ZpdGVQbHVnaW4uanNcIjtleHBvcnQgZnVuY3Rpb24gcmVtb3ZlSGlkZGVuTWVudUl0ZW1zKHN0cikge1xuICBjb25zdCBiYWRKc29uID0gc3RyLnJlcGxhY2UoJ2V4cG9ydCBkZWZhdWx0IFsnLCAnWycpLnJlcGxhY2UoJ107JywgJ10nKTtcbiAgY29uc3QgY29ycmVjdEpzb24gPSBiYWRKc29uLnJlcGxhY2UoLyhbJ1wiXSk/KFthLXowLTlBLVpfXSspKFsnXCJdKT86L2csICdcIiQyXCI6ICcpO1xuXG4gY29uc3QgaXNIaWRkZW5GZWF0dXJlc0VuYWJsZWQgPSBcbiAgICBwcm9jZXNzLmVudi5SRUFDVF9BUFBfRU5BQkxFX0hJRERFTl9GRUFUVVJFUz8udG9Mb3dlckNhc2UoKS50cmltKCkgPT09ICd0cnVlJyB8fFxuICAgIHByb2Nlc3MuZW52LlJFQUNUX0FQUF9FTkFCTEVfSElEREVOX0ZFQVRVUkVTPy50b0xvd2VyQ2FzZSgpLnRyaW0oKSA9PT0gJzEnO1xuXG4gIGNvbnN0IGpzb24gPSByZW1vdmVIaWRkZW4oSlNPTi5wYXJzZShjb3JyZWN0SnNvbiksIGlzSGlkZGVuRmVhdHVyZXNFbmFibGVkKTtcbiAgXG4gIGNvbnN0IHVwZGF0ZWRKc29uID0gSlNPTi5zdHJpbmdpZnkoanNvbik7XG5cbiAgcmV0dXJuICdleHBvcnQgZGVmYXVsdCAnICsgdXBkYXRlZEpzb24gKyAnOydcbn1cblxuZnVuY3Rpb24gcmVtb3ZlSGlkZGVuKG1lbnVJdGVtcywgaXNIaWRkZW5GZWF0dXJlc0VuYWJsZWQpIHtcbiAgaWYoIW1lbnVJdGVtcykgcmV0dXJuIG1lbnVJdGVtcztcbiAgY29uc3QgYXJyID0gbWVudUl0ZW1zXG4gICAgPy5maWx0ZXIoeCA9PiAheC5oaWRkZW4pXG4gICAgPy5maWx0ZXIoeCA9PiBpc0hpZGRlbkZlYXR1cmVzRW5hYmxlZCB8fCB4LmhpZGRlbk1vZGUgIT09IFwicHJvZHVjdGlvblwiKTtcbiAgZm9yIChjb25zdCBhIG9mIGFycikge1xuICAgIGEuY2hpbGRyZW4gPSByZW1vdmVIaWRkZW4oYS5jaGlsZHJlbiwgaXNIaWRkZW5GZWF0dXJlc0VuYWJsZWQpO1xuICB9XG4gIHJldHVybiBhcnI7XG59XG4iXSwKICAibWFwcGluZ3MiOiAiO0FBQThMLFNBQVMsb0JBQW9CO0FBQzNOLE9BQU8sV0FBVztBQUNsQixPQUFPLG1CQUFtQjtBQUMxQixPQUFPLFVBQVU7QUFDakIsT0FBTyxVQUFVOzs7QUNKa0wsU0FBUyxzQkFBc0IsS0FBSztBQUF2TztBQUNFLFFBQU0sVUFBVSxJQUFJLFFBQVEsb0JBQW9CLEdBQUcsRUFBRSxRQUFRLE1BQU0sR0FBRztBQUN0RSxRQUFNLGNBQWMsUUFBUSxRQUFRLG1DQUFtQyxRQUFRO0FBRWhGLFFBQU0sNEJBQ0gsYUFBUSxJQUFJLHFDQUFaLG1CQUE4QyxjQUFjLFlBQVcsWUFDdkUsYUFBUSxJQUFJLHFDQUFaLG1CQUE4QyxjQUFjLFlBQVc7QUFFekUsUUFBTSxPQUFPLGFBQWEsS0FBSyxNQUFNLFdBQVcsR0FBRyx1QkFBdUI7QUFFMUUsUUFBTSxjQUFjLEtBQUssVUFBVSxJQUFJO0FBRXZDLFNBQU8sb0JBQW9CLGNBQWM7QUFDM0M7QUFFQSxTQUFTLGFBQWEsV0FBVyx5QkFBeUI7QUFmMUQ7QUFnQkUsTUFBRyxDQUFDO0FBQVcsV0FBTztBQUN0QixRQUFNLE9BQU0sNENBQ1IsT0FBTyxPQUFLLENBQUMsRUFBRSxZQURQLG1CQUVSLE9BQU8sT0FBSywyQkFBMkIsRUFBRSxlQUFlO0FBQzVELGFBQVcsS0FBSyxLQUFLO0FBQ25CLE1BQUUsV0FBVyxhQUFhLEVBQUUsVUFBVSx1QkFBdUI7QUFBQSxFQUMvRDtBQUNBLFNBQU87QUFDVDs7O0FEeEJBLElBQU0sbUNBQW1DO0FBUXpDLElBQU8sc0JBQVEsYUFBYTtBQUFBLEVBQzFCLFdBQVc7QUFBQSxFQUNYLFNBQVM7QUFBQSxJQUNQLE1BQU07QUFBQSxJQUNOLGNBQWM7QUFBQSxJQUNkLEtBQUs7QUFBQSxJQUNMO0FBQUEsTUFDRSxNQUFNO0FBQUEsTUFDTixXQUFXLENBQUMsS0FBSyxPQUFPO0FBQ3RCLFlBQUcsQ0FBQyxHQUFHLFNBQVMsc0JBQXNCO0FBQ3BDLGlCQUFPO0FBQ1QsZUFBTyxzQkFBc0IsR0FBRztBQUFBLE1BQ2xDO0FBQUEsSUFDRjtBQUFBLEVBQ0Y7QUFBQSxFQUNBLE1BQU07QUFBQSxFQUNOLE9BQU87QUFBQSxJQUNMLFFBQVE7QUFBQSxJQUNSLFFBQVE7QUFBQSxJQUNSLGFBQWE7QUFBQSxFQUNmO0FBQUEsRUFDQSxRQUFRO0FBQUEsSUFDTixTQUFTO0FBQUEsTUFDUCxHQUFJLFFBQVEsSUFBSSxpQkFBaUI7QUFBQSxRQUMvQiwyQkFBMkIsUUFBUSxJQUFJO0FBQUEsTUFDekM7QUFBQSxJQUNGO0FBQUEsSUFDQSxjQUFjLENBQUMsaUNBQWlDLGFBQWEsV0FBVztBQUFBLElBQ3hFLE9BQU87QUFBQSxNQUNMLG9CQUFvQjtBQUFBLFFBQ2xCLFFBQVE7QUFBQSxRQUNSLGNBQWM7QUFBQSxRQUNkLFNBQVMsQ0FBQ0EsVUFBU0EsTUFBSyxRQUFRLHNCQUFzQixFQUFFO0FBQUEsTUFDMUQ7QUFBQSxJQUNGO0FBQUEsRUFDRjtBQUFBLEVBQ0EsU0FBUztBQUFBLElBQ1AsT0FBTztBQUFBLE1BQ0wsZ0JBQWdCLEtBQUssUUFBUSxrQ0FBVywwQkFBMEI7QUFBQSxNQUNsRSxLQUFLLEdBQUcsS0FBSyxRQUFRLGtDQUFXLE9BQU8sQ0FBQztBQUFBLElBQzFDO0FBQUEsRUFDRjtBQUNGLENBQUM7IiwKICAibmFtZXMiOiBbInBhdGgiXQp9Cg== diff --git a/GUI/vite.config.ts.timestamp-1768278833602-e10c19bbae925.mjs b/GUI/vite.config.ts.timestamp-1768278833602-e10c19bbae925.mjs deleted file mode 100644 index 3ffe5928..00000000 --- a/GUI/vite.config.ts.timestamp-1768278833602-e10c19bbae925.mjs +++ /dev/null @@ -1,77 +0,0 @@ -// vite.config.ts -import { defineConfig } from "file:///app/node_modules/vite/dist/node/index.js"; -import react from "file:///app/node_modules/@vitejs/plugin-react/dist/index.mjs"; -import tsconfigPaths from "file:///app/node_modules/vite-tsconfig-paths/dist/index.mjs"; -import svgr from "file:///app/node_modules/vite-plugin-svgr/dist/index.mjs"; -import path from "path"; - -// vitePlugin.js -function removeHiddenMenuItems(str) { - var _a, _b; - const badJson = str.replace("export default [", "[").replace("];", "]"); - const correctJson = badJson.replace(/(['"])?([a-z0-9A-Z_]+)(['"])?:/g, '"$2": '); - const isHiddenFeaturesEnabled = ((_a = process.env.REACT_APP_ENABLE_HIDDEN_FEATURES) == null ? void 0 : _a.toLowerCase().trim()) === "true" || ((_b = process.env.REACT_APP_ENABLE_HIDDEN_FEATURES) == null ? void 0 : _b.toLowerCase().trim()) === "1"; - const json = removeHidden(JSON.parse(correctJson), isHiddenFeaturesEnabled); - const updatedJson = JSON.stringify(json); - return "export default " + updatedJson + ";"; -} -function removeHidden(menuItems, isHiddenFeaturesEnabled) { - var _a; - if (!menuItems) - return menuItems; - const arr = (_a = menuItems == null ? void 0 : menuItems.filter((x) => !x.hidden)) == null ? void 0 : _a.filter((x) => isHiddenFeaturesEnabled || x.hiddenMode !== "production"); - for (const a of arr) { - a.children = removeHidden(a.children, isHiddenFeaturesEnabled); - } - return arr; -} - -// vite.config.ts -var __vite_injected_original_dirname = "/app"; -var vite_config_default = defineConfig({ - envPrefix: "REACT_APP_", - plugins: [ - react(), - tsconfigPaths(), - svgr(), - { - name: "removeHiddenMenuItemsPlugin", - transform: (str, id) => { - if (!id.endsWith("/menu-structure.json")) - return str; - return removeHiddenMenuItems(str); - } - } - ], - base: "/rag-search", - build: { - outDir: "./build", - target: "es2015", - emptyOutDir: true - }, - server: { - headers: { - ...process.env.REACT_APP_CSP && { - "Content-Security-Policy": process.env.REACT_APP_CSP - } - }, - allowedHosts: ["est-rag-rtc.rootcode.software", "localhost", "127.0.0.1"], - proxy: { - "/vault-agent-gui": { - target: "http://vault-agent-gui:8202", - changeOrigin: true, - rewrite: (path2) => path2.replace(/^\/vault-agent-gui/, "") - } - } - }, - resolve: { - alias: { - "~@fontsource": path.resolve(__vite_injected_original_dirname, "node_modules/@fontsource"), - "@": `${path.resolve(__vite_injected_original_dirname, "./src")}` - } - } -}); -export { - vite_config_default as default -}; -//# sourceMappingURL=data:application/json;base64,ewogICJ2ZXJzaW9uIjogMywKICAic291cmNlcyI6IFsidml0ZS5jb25maWcudHMiLCAidml0ZVBsdWdpbi5qcyJdLAogICJzb3VyY2VzQ29udGVudCI6IFsiY29uc3QgX192aXRlX2luamVjdGVkX29yaWdpbmFsX2Rpcm5hbWUgPSBcIi9hcHBcIjtjb25zdCBfX3ZpdGVfaW5qZWN0ZWRfb3JpZ2luYWxfZmlsZW5hbWUgPSBcIi9hcHAvdml0ZS5jb25maWcudHNcIjtjb25zdCBfX3ZpdGVfaW5qZWN0ZWRfb3JpZ2luYWxfaW1wb3J0X21ldGFfdXJsID0gXCJmaWxlOi8vL2FwcC92aXRlLmNvbmZpZy50c1wiO2ltcG9ydCB7IGRlZmluZUNvbmZpZyB9IGZyb20gJ3ZpdGUnO1xuaW1wb3J0IHJlYWN0IGZyb20gJ0B2aXRlanMvcGx1Z2luLXJlYWN0JztcbmltcG9ydCB0c2NvbmZpZ1BhdGhzIGZyb20gJ3ZpdGUtdHNjb25maWctcGF0aHMnO1xuaW1wb3J0IHN2Z3IgZnJvbSAndml0ZS1wbHVnaW4tc3Zncic7XG5pbXBvcnQgcGF0aCBmcm9tICdwYXRoJztcbmltcG9ydCB7IHJlbW92ZUhpZGRlbk1lbnVJdGVtcyB9IGZyb20gJy4vdml0ZVBsdWdpbic7XG5cbi8vIGh0dHBzOi8vdml0ZWpzLmRldi9jb25maWcvXG5leHBvcnQgZGVmYXVsdCBkZWZpbmVDb25maWcoe1xuICBlbnZQcmVmaXg6ICdSRUFDVF9BUFBfJyxcbiAgcGx1Z2luczogW1xuICAgIHJlYWN0KCksXG4gICAgdHNjb25maWdQYXRocygpLFxuICAgIHN2Z3IoKSxcbiAgICB7XG4gICAgICBuYW1lOiAncmVtb3ZlSGlkZGVuTWVudUl0ZW1zUGx1Z2luJyxcbiAgICAgIHRyYW5zZm9ybTogKHN0ciwgaWQpID0+IHtcbiAgICAgICAgaWYoIWlkLmVuZHNXaXRoKCcvbWVudS1zdHJ1Y3R1cmUuanNvbicpKVxuICAgICAgICAgIHJldHVybiBzdHI7XG4gICAgICAgIHJldHVybiByZW1vdmVIaWRkZW5NZW51SXRlbXMoc3RyKTtcbiAgICAgIH0sXG4gICAgfSxcbiAgXSxcbiAgYmFzZTogJy9yYWctc2VhcmNoJyxcbiAgYnVpbGQ6IHtcbiAgICBvdXREaXI6ICcuL2J1aWxkJyxcbiAgICB0YXJnZXQ6ICdlczIwMTUnLFxuICAgIGVtcHR5T3V0RGlyOiB0cnVlLFxuICB9LFxuICBzZXJ2ZXI6IHtcbiAgICBoZWFkZXJzOiB7XG4gICAgICAuLi4ocHJvY2Vzcy5lbnYuUkVBQ1RfQVBQX0NTUCAmJiB7XG4gICAgICAgICdDb250ZW50LVNlY3VyaXR5LVBvbGljeSc6IHByb2Nlc3MuZW52LlJFQUNUX0FQUF9DU1AsXG4gICAgICB9KSxcbiAgICB9LFxuICAgIGFsbG93ZWRIb3N0czogWydlc3QtcmFnLXJ0Yy5yb290Y29kZS5zb2Z0d2FyZScsICdsb2NhbGhvc3QnLCAnMTI3LjAuMC4xJ10sXG4gICAgcHJveHk6IHtcbiAgICAgICcvdmF1bHQtYWdlbnQtZ3VpJzoge1xuICAgICAgICB0YXJnZXQ6ICdodHRwOi8vdmF1bHQtYWdlbnQtZ3VpOjgyMDInLFxuICAgICAgICBjaGFuZ2VPcmlnaW46IHRydWUsXG4gICAgICAgIHJld3JpdGU6IChwYXRoKSA9PiBwYXRoLnJlcGxhY2UoL15cXC92YXVsdC1hZ2VudC1ndWkvLCAnJyksXG4gICAgICB9LFxuICAgIH0sXG4gIH0sXG4gIHJlc29sdmU6IHtcbiAgICBhbGlhczoge1xuICAgICAgJ35AZm9udHNvdXJjZSc6IHBhdGgucmVzb2x2ZShfX2Rpcm5hbWUsICdub2RlX21vZHVsZXMvQGZvbnRzb3VyY2UnKSxcbiAgICAgICdAJzogYCR7cGF0aC5yZXNvbHZlKF9fZGlybmFtZSwgJy4vc3JjJyl9YCxcbiAgICB9LFxuICB9LFxufSk7XG4iLCAiY29uc3QgX192aXRlX2luamVjdGVkX29yaWdpbmFsX2Rpcm5hbWUgPSBcIi9hcHBcIjtjb25zdCBfX3ZpdGVfaW5qZWN0ZWRfb3JpZ2luYWxfZmlsZW5hbWUgPSBcIi9hcHAvdml0ZVBsdWdpbi5qc1wiO2NvbnN0IF9fdml0ZV9pbmplY3RlZF9vcmlnaW5hbF9pbXBvcnRfbWV0YV91cmwgPSBcImZpbGU6Ly8vYXBwL3ZpdGVQbHVnaW4uanNcIjtleHBvcnQgZnVuY3Rpb24gcmVtb3ZlSGlkZGVuTWVudUl0ZW1zKHN0cikge1xuICBjb25zdCBiYWRKc29uID0gc3RyLnJlcGxhY2UoJ2V4cG9ydCBkZWZhdWx0IFsnLCAnWycpLnJlcGxhY2UoJ107JywgJ10nKTtcbiAgY29uc3QgY29ycmVjdEpzb24gPSBiYWRKc29uLnJlcGxhY2UoLyhbJ1wiXSk/KFthLXowLTlBLVpfXSspKFsnXCJdKT86L2csICdcIiQyXCI6ICcpO1xuXG4gY29uc3QgaXNIaWRkZW5GZWF0dXJlc0VuYWJsZWQgPSBcbiAgICBwcm9jZXNzLmVudi5SRUFDVF9BUFBfRU5BQkxFX0hJRERFTl9GRUFUVVJFUz8udG9Mb3dlckNhc2UoKS50cmltKCkgPT09ICd0cnVlJyB8fFxuICAgIHByb2Nlc3MuZW52LlJFQUNUX0FQUF9FTkFCTEVfSElEREVOX0ZFQVRVUkVTPy50b0xvd2VyQ2FzZSgpLnRyaW0oKSA9PT0gJzEnO1xuXG4gIGNvbnN0IGpzb24gPSByZW1vdmVIaWRkZW4oSlNPTi5wYXJzZShjb3JyZWN0SnNvbiksIGlzSGlkZGVuRmVhdHVyZXNFbmFibGVkKTtcbiAgXG4gIGNvbnN0IHVwZGF0ZWRKc29uID0gSlNPTi5zdHJpbmdpZnkoanNvbik7XG5cbiAgcmV0dXJuICdleHBvcnQgZGVmYXVsdCAnICsgdXBkYXRlZEpzb24gKyAnOydcbn1cblxuZnVuY3Rpb24gcmVtb3ZlSGlkZGVuKG1lbnVJdGVtcywgaXNIaWRkZW5GZWF0dXJlc0VuYWJsZWQpIHtcbiAgaWYoIW1lbnVJdGVtcykgcmV0dXJuIG1lbnVJdGVtcztcbiAgY29uc3QgYXJyID0gbWVudUl0ZW1zXG4gICAgPy5maWx0ZXIoeCA9PiAheC5oaWRkZW4pXG4gICAgPy5maWx0ZXIoeCA9PiBpc0hpZGRlbkZlYXR1cmVzRW5hYmxlZCB8fCB4LmhpZGRlbk1vZGUgIT09IFwicHJvZHVjdGlvblwiKTtcbiAgZm9yIChjb25zdCBhIG9mIGFycikge1xuICAgIGEuY2hpbGRyZW4gPSByZW1vdmVIaWRkZW4oYS5jaGlsZHJlbiwgaXNIaWRkZW5GZWF0dXJlc0VuYWJsZWQpO1xuICB9XG4gIHJldHVybiBhcnI7XG59XG4iXSwKICAibWFwcGluZ3MiOiAiO0FBQThMLFNBQVMsb0JBQW9CO0FBQzNOLE9BQU8sV0FBVztBQUNsQixPQUFPLG1CQUFtQjtBQUMxQixPQUFPLFVBQVU7QUFDakIsT0FBTyxVQUFVOzs7QUNKa0wsU0FBUyxzQkFBc0IsS0FBSztBQUF2TztBQUNFLFFBQU0sVUFBVSxJQUFJLFFBQVEsb0JBQW9CLEdBQUcsRUFBRSxRQUFRLE1BQU0sR0FBRztBQUN0RSxRQUFNLGNBQWMsUUFBUSxRQUFRLG1DQUFtQyxRQUFRO0FBRWhGLFFBQU0sNEJBQ0gsYUFBUSxJQUFJLHFDQUFaLG1CQUE4QyxjQUFjLFlBQVcsWUFDdkUsYUFBUSxJQUFJLHFDQUFaLG1CQUE4QyxjQUFjLFlBQVc7QUFFekUsUUFBTSxPQUFPLGFBQWEsS0FBSyxNQUFNLFdBQVcsR0FBRyx1QkFBdUI7QUFFMUUsUUFBTSxjQUFjLEtBQUssVUFBVSxJQUFJO0FBRXZDLFNBQU8sb0JBQW9CLGNBQWM7QUFDM0M7QUFFQSxTQUFTLGFBQWEsV0FBVyx5QkFBeUI7QUFmMUQ7QUFnQkUsTUFBRyxDQUFDO0FBQVcsV0FBTztBQUN0QixRQUFNLE9BQU0sNENBQ1IsT0FBTyxPQUFLLENBQUMsRUFBRSxZQURQLG1CQUVSLE9BQU8sT0FBSywyQkFBMkIsRUFBRSxlQUFlO0FBQzVELGFBQVcsS0FBSyxLQUFLO0FBQ25CLE1BQUUsV0FBVyxhQUFhLEVBQUUsVUFBVSx1QkFBdUI7QUFBQSxFQUMvRDtBQUNBLFNBQU87QUFDVDs7O0FEeEJBLElBQU0sbUNBQW1DO0FBUXpDLElBQU8sc0JBQVEsYUFBYTtBQUFBLEVBQzFCLFdBQVc7QUFBQSxFQUNYLFNBQVM7QUFBQSxJQUNQLE1BQU07QUFBQSxJQUNOLGNBQWM7QUFBQSxJQUNkLEtBQUs7QUFBQSxJQUNMO0FBQUEsTUFDRSxNQUFNO0FBQUEsTUFDTixXQUFXLENBQUMsS0FBSyxPQUFPO0FBQ3RCLFlBQUcsQ0FBQyxHQUFHLFNBQVMsc0JBQXNCO0FBQ3BDLGlCQUFPO0FBQ1QsZUFBTyxzQkFBc0IsR0FBRztBQUFBLE1BQ2xDO0FBQUEsSUFDRjtBQUFBLEVBQ0Y7QUFBQSxFQUNBLE1BQU07QUFBQSxFQUNOLE9BQU87QUFBQSxJQUNMLFFBQVE7QUFBQSxJQUNSLFFBQVE7QUFBQSxJQUNSLGFBQWE7QUFBQSxFQUNmO0FBQUEsRUFDQSxRQUFRO0FBQUEsSUFDTixTQUFTO0FBQUEsTUFDUCxHQUFJLFFBQVEsSUFBSSxpQkFBaUI7QUFBQSxRQUMvQiwyQkFBMkIsUUFBUSxJQUFJO0FBQUEsTUFDekM7QUFBQSxJQUNGO0FBQUEsSUFDQSxjQUFjLENBQUMsaUNBQWlDLGFBQWEsV0FBVztBQUFBLElBQ3hFLE9BQU87QUFBQSxNQUNMLG9CQUFvQjtBQUFBLFFBQ2xCLFFBQVE7QUFBQSxRQUNSLGNBQWM7QUFBQSxRQUNkLFNBQVMsQ0FBQ0EsVUFBU0EsTUFBSyxRQUFRLHNCQUFzQixFQUFFO0FBQUEsTUFDMUQ7QUFBQSxJQUNGO0FBQUEsRUFDRjtBQUFBLEVBQ0EsU0FBUztBQUFBLElBQ1AsT0FBTztBQUFBLE1BQ0wsZ0JBQWdCLEtBQUssUUFBUSxrQ0FBVywwQkFBMEI7QUFBQSxNQUNsRSxLQUFLLEdBQUcsS0FBSyxRQUFRLGtDQUFXLE9BQU8sQ0FBQztBQUFBLElBQzFDO0FBQUEsRUFDRjtBQUNGLENBQUM7IiwKICAibmFtZXMiOiBbInBhdGgiXQp9Cg== From 327e5b2b0881f99d3d7eba3639c7699b494021c7 Mon Sep 17 00:00:00 2001 From: erangi-ar Date: Mon, 9 Feb 2026 12:54:49 +0530 Subject: [PATCH 3/3] resolve pr comments --- .github/workflows/helm-dependency.yaml | 42 ++++++++++ kubernetes/CONTAINER_REGISTRY_SETUP.md | 35 -------- kubernetes/Chart.lock | 75 ----------------- kubernetes/Chart.yaml | 80 ------------------- .../templates/deployment-byk-clickhouse.yaml | 18 +++-- .../charts/ClickHouse/templates/secret.yaml | 13 +++ kubernetes/charts/ClickHouse/values.yaml | 7 +- .../templates/deployment-byk-cronmanager.yaml | 4 + kubernetes/charts/CronManager/values.yaml | 5 +- .../templates/deployment-byk-data-mapper.yaml | 4 + kubernetes/charts/DataMapper/values.yaml | 2 + .../GUI/templates/deployment-byk-gui.yaml | 28 +++---- kubernetes/charts/GUI/values.yaml | 6 +- .../templates/configmap-dashboards.yaml | 2 +- .../Grafana/templates/deployment-grafana.yaml | 6 ++ .../charts/Grafana/templates/secret.yaml | 10 +++ kubernetes/charts/Grafana/values.yaml | 13 +-- .../deployment-byk-langfuse-web.yaml | 6 ++ .../charts/Langfuse-Web/templates/secret.yaml | 25 ++++++ kubernetes/charts/Langfuse-Web/values.yaml | 29 +++---- .../deployment-byk-langfuse-worker.yaml | 6 ++ .../Langfuse-Worker/templates/secret.yaml | 24 ++++++ kubernetes/charts/Langfuse-Worker/values.yaml | 27 +++---- .../Liquibase/templates/liquibase-job.yaml | 18 ++++- .../charts/Liquibase/templates/secret.yaml | 12 +++ kubernetes/charts/Liquibase/values.yaml | 10 +-- .../Redis/templates/deployment-byk-redis.yaml | 10 +-- kubernetes/charts/Redis/templates/secret.yaml | 12 +++ kubernetes/charts/Redis/values.yaml | 9 ++- .../Resql/templates/deployment-byk-resql.yaml | 10 ++- kubernetes/charts/Resql/templates/secret.yaml | 11 +++ kubernetes/charts/Resql/values.yaml | 4 +- .../configmap-byk-ruuter-private.yaml | 3 +- .../deployment-byk-ruuter-private.yaml | 10 +++ .../Ruuter-Private/templates/secret.yaml | 11 +++ kubernetes/charts/Ruuter-Private/values.yaml | 2 + .../configmap-byk-ruuter-public.yaml | 3 +- .../deployment-byk-ruuter-public.yaml | 10 +++ .../Ruuter-Public/templates/secret.yaml | 11 +++ kubernetes/charts/Ruuter-Public/values.yaml | 3 + .../S3-Ferry/templates/configmap-s3.yaml | 10 --- .../S3-Ferry/templates/deployment-s3.yaml | 13 ++- .../charts/S3-Ferry/templates/secret.yaml | 12 +++ kubernetes/charts/S3-Ferry/values.yaml | 11 +-- kubernetes/charts/TIM-database/Chart.yaml | 6 -- .../templates/deployment-byk-timdb.yaml | 42 ---------- .../TIM-database/templates/pvc-byk-timdb.yaml | 19 ----- .../templates/secret-byk-timdb.yaml | 9 --- .../templates/service-byk-timdb.yaml | 15 ---- kubernetes/charts/TIM-database/values.yaml | 29 ------- .../charts/TIM/templates/secret-byk-tim.yaml | 9 ++- kubernetes/charts/TIM/values.yaml | 3 +- kubernetes/charts/database/Chart.lock | 6 -- kubernetes/charts/database/Chart.yaml | 10 +-- .../charts/database/templates/secret.yaml | 12 +++ .../charts/database/templates/service.yaml | 34 ++++++++ .../database/templates/statefulset.yaml | 66 +++++++++++++++ kubernetes/charts/database/values.yaml | 35 +++++--- .../minio/templates/deployment-minio.yaml | 10 ++- kubernetes/charts/minio/templates/secret.yaml | 10 +++ kubernetes/charts/minio/values.yaml | 7 +- kubernetes/dashboard-admin.yaml | 18 ----- kubernetes/values.yaml | 78 ------------------ 63 files changed, 530 insertions(+), 550 deletions(-) create mode 100644 .github/workflows/helm-dependency.yaml delete mode 100644 kubernetes/CONTAINER_REGISTRY_SETUP.md delete mode 100644 kubernetes/Chart.lock delete mode 100644 kubernetes/Chart.yaml create mode 100644 kubernetes/charts/ClickHouse/templates/secret.yaml create mode 100644 kubernetes/charts/Grafana/templates/secret.yaml create mode 100644 kubernetes/charts/Langfuse-Web/templates/secret.yaml create mode 100644 kubernetes/charts/Langfuse-Worker/templates/secret.yaml create mode 100644 kubernetes/charts/Liquibase/templates/secret.yaml create mode 100644 kubernetes/charts/Redis/templates/secret.yaml create mode 100644 kubernetes/charts/Resql/templates/secret.yaml create mode 100644 kubernetes/charts/Ruuter-Private/templates/secret.yaml create mode 100644 kubernetes/charts/Ruuter-Public/templates/secret.yaml delete mode 100644 kubernetes/charts/S3-Ferry/templates/configmap-s3.yaml create mode 100644 kubernetes/charts/S3-Ferry/templates/secret.yaml delete mode 100644 kubernetes/charts/TIM-database/Chart.yaml delete mode 100644 kubernetes/charts/TIM-database/templates/deployment-byk-timdb.yaml delete mode 100644 kubernetes/charts/TIM-database/templates/pvc-byk-timdb.yaml delete mode 100644 kubernetes/charts/TIM-database/templates/secret-byk-timdb.yaml delete mode 100644 kubernetes/charts/TIM-database/templates/service-byk-timdb.yaml delete mode 100644 kubernetes/charts/TIM-database/values.yaml delete mode 100644 kubernetes/charts/database/Chart.lock create mode 100644 kubernetes/charts/database/templates/secret.yaml create mode 100644 kubernetes/charts/database/templates/service.yaml create mode 100644 kubernetes/charts/database/templates/statefulset.yaml create mode 100644 kubernetes/charts/minio/templates/secret.yaml delete mode 100644 kubernetes/dashboard-admin.yaml delete mode 100644 kubernetes/values.yaml diff --git a/.github/workflows/helm-dependency.yaml b/.github/workflows/helm-dependency.yaml new file mode 100644 index 00000000..92de5c51 --- /dev/null +++ b/.github/workflows/helm-dependency.yaml @@ -0,0 +1,42 @@ +name: Helm Dependency Build + +on: + push: + branches: + - dev + - main + paths: + - 'kubernetes/**' + +jobs: + build-dependencies: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + with: + persist-credentials: true + + - name: Set up Helm + uses: azure/setup-helm@v3 + with: + version: v3.12.0 + + - name: Build Helm dependencies + working-directory: ./kubernetes + run: | + rm -f Chart.lock + helm dependency build + + - name: Commit and push if dependencies updated + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + git add kubernetes/Chart.lock kubernetes/charts/ + if ! git diff --cached --quiet; then + git commit -m "chore: update Helm dependencies (charts/ and Chart.lock)" + git push + else + echo "No changes to Helm dependencies." + fi diff --git a/kubernetes/CONTAINER_REGISTRY_SETUP.md b/kubernetes/CONTAINER_REGISTRY_SETUP.md deleted file mode 100644 index d88f16b6..00000000 --- a/kubernetes/CONTAINER_REGISTRY_SETUP.md +++ /dev/null @@ -1,35 +0,0 @@ -# Container Registry Setup Guide - -This guide explains what components need to push to gcr - -## Overview - -The RAG Module consists of multiple container images that need to be pushed to your container registry. Currently, we use ECR for testing, but you should push images to your own registry before deployment. - - - -## Step 1: Build Container Images - -Build all required images from the repository root: - -### **1.1 GUI (Frontend)** - -```bash -cd GUI -docker build -t rag-module/gui:latest -f Dockerfile.dev . -cd .. -``` - -update the GUI helms values image: repository section with actual image - -### **1.2 LLM Orchestration Service** - -```bash -docker build -t rag-module/llm-orchestration-service:latest -f Dockerfile.llm_orchestration_service . -``` -update the LLM Orchestration Service helms values image: repository section with actual image (there are two places to update in this file) - -### **1.3 Authentication Layer** - - - diff --git a/kubernetes/Chart.lock b/kubernetes/Chart.lock deleted file mode 100644 index aa9d953c..00000000 --- a/kubernetes/Chart.lock +++ /dev/null @@ -1,75 +0,0 @@ -dependencies: -- name: database - repository: file://./charts/database - version: 0.1.0 -- name: TIM-database - repository: file://./charts/TIM-database - version: 0.1.0 -- name: resql - repository: file://./charts/Resql - version: 0.1.0 -- name: ruuter-public - repository: file://./charts/Ruuter-Public - version: 0.1.0 -- name: ruuter-private - repository: file://./charts/Ruuter-Private - version: 0.1.0 -- name: data-mapper - repository: file://./charts/DataMapper - version: 0.1.0 -- name: TIM - repository: file://./charts/TIM - version: 0.1.0 -- name: Authentication-Layer - repository: file://./charts/Authentication-Layer - version: 0.1.0 -- name: CronManager - repository: file://./charts/CronManager - version: 0.1.0 -- name: GUI - repository: file://./charts/GUI - version: 0.1.0 -- name: Loki - repository: file://./charts/Loki - version: 0.1.0 -- name: Grafana - repository: file://./charts/Grafana - version: 0.1.0 -- name: S3-Ferry - repository: file://./charts/S3-Ferry - version: 0.1.0 -- name: minio - repository: file://./charts/minio - version: 0.1.0 -- name: Redis - repository: file://./charts/Redis - version: 0.1.0 -- name: Qdrant - repository: file://./charts/Qdrant - version: 0.1.0 -- name: ClickHouse - repository: file://./charts/ClickHouse - version: 0.1.0 -- name: Langfuse-Web - repository: file://./charts/Langfuse-Web - version: 0.1.0 -- name: Langfuse-Worker - repository: file://./charts/Langfuse-Worker - version: 0.1.0 -- name: Vault - repository: file://./charts/Vault - version: 0.1.0 -- name: Vault-Init - repository: file://./charts/Vault-Init - version: 0.1.0 -- name: Vault-Agent-LLM - repository: file://./charts/Vault-Agent-LLM - version: 0.1.0 -- name: LLM-Orchestration-Service - repository: file://./charts/LLM-Orchestration-Service - version: 0.1.0 -- name: Liquibase - repository: file://./charts/Liquibase - version: 0.1.0 -digest: sha256:ebf9bd6c7a999f2ab58598fdfff371579d6c7ca17d35e87fc8200668c2ae493e -generated: "2025-12-02T13:11:13.8392479+05:30" diff --git a/kubernetes/Chart.yaml b/kubernetes/Chart.yaml deleted file mode 100644 index 698d4b9e..00000000 --- a/kubernetes/Chart.yaml +++ /dev/null @@ -1,80 +0,0 @@ -apiVersion: v2 -name: rag-module -description: Umbrella chart for RAG Module -version: 0.1.0 -type: application - -dependencies: - - name: database - version: 0.1.0 - repository: "file://./charts/database" - - name: TIM-database - version: 0.1.0 - repository: "file://./charts/TIM-database" - - name: resql - version: 0.1.0 - repository: "file://./charts/Resql" - - name: ruuter-public - version: 0.1.0 - repository: "file://./charts/Ruuter-Public" - - name: ruuter-private - version: 0.1.0 - repository: "file://./charts/Ruuter-Private" - - name: data-mapper - version: 0.1.0 - repository: "file://./charts/DataMapper" - - name: TIM - version: 0.1.0 - repository: "file://./charts/TIM" - - name: Authentication-Layer - version: 0.1.0 - repository: "file://./charts/Authentication-Layer" - - name: CronManager - version: 0.1.0 - repository: "file://./charts/CronManager" - - name: GUI - version: 0.1.0 - repository: "file://./charts/GUI" - - name: Loki - version: 0.1.0 - repository: "file://./charts/Loki" - - name: Grafana - version: 0.1.0 - repository: "file://./charts/Grafana" - - name: S3-Ferry - version: 0.1.0 - repository: "file://./charts/S3-Ferry" - - name: minio - version: 0.1.0 - repository: "file://./charts/minio" - - name: Redis - version: 0.1.0 - repository: "file://./charts/Redis" - - name: Qdrant - version: 0.1.0 - repository: "file://./charts/Qdrant" - - name: ClickHouse - version: 0.1.0 - repository: "file://./charts/ClickHouse" - - name: Langfuse-Web - version: 0.1.0 - repository: "file://./charts/Langfuse-Web" - - name: Langfuse-Worker - version: 0.1.0 - repository: "file://./charts/Langfuse-Worker" - - name: Vault - version: 0.1.0 - repository: "file://./charts/Vault" - - name: Vault-Init - version: 0.1.0 - repository: "file://./charts/Vault-Init" - - name: Vault-Agent-LLM - version: 0.1.0 - repository: "file://./charts/Vault-Agent-LLM" - - name: LLM-Orchestration-Service - version: 0.1.0 - repository: "file://./charts/LLM-Orchestration-Service" - - name: Liquibase - version: 0.1.0 - repository: "file://./charts/Liquibase" - diff --git a/kubernetes/charts/ClickHouse/templates/deployment-byk-clickhouse.yaml b/kubernetes/charts/ClickHouse/templates/deployment-byk-clickhouse.yaml index 1deb38f2..78f99697 100644 --- a/kubernetes/charts/ClickHouse/templates/deployment-byk-clickhouse.yaml +++ b/kubernetes/charts/ClickHouse/templates/deployment-byk-clickhouse.yaml @@ -34,13 +34,15 @@ spec: - name: native containerPort: {{ .Values.service.nativePort }} protocol: TCP + # Non-sensitive env's from values.yaml env: - name: CLICKHOUSE_DB value: "{{ .Values.env.CLICKHOUSE_DB }}" - - name: CLICKHOUSE_USER - value: "{{ .Values.env.CLICKHOUSE_USER }}" - - name: CLICKHOUSE_PASSWORD - value: "{{ .Values.env.CLICKHOUSE_PASSWORD }}" + # Sensitive env's from Kubernetes Secret + {{- if .Values.envFrom }} + envFrom: + {{- toYaml .Values.envFrom | nindent 12 }} + {{- end }} {{- if .Values.healthcheck.enabled }} livenessProbe: httpGet: @@ -61,9 +63,9 @@ spec: {{- end }} {{- if .Values.persistence.enabled }} volumeMounts: - - name: langfuse_clickhouse_data + - name: langfuse-clickhouse-data mountPath: /var/lib/clickhouse - - name: langfuse_clickhouse_logs + - name: langfuse-clickhouse-logs mountPath: /var/log/clickhouse-server {{- end }} resources: @@ -75,10 +77,10 @@ spec: cpu: "{{ .Values.resources.limits.cpu }}" {{- if .Values.persistence.enabled }} volumes: - - name: langfuse_clickhouse_data + - name: langfuse-clickhouse-data persistentVolumeClaim: claimName: "{{ .Values.release_name }}-data" - - name: langfuse_clickhouse_logs + - name: langfuse-clickhouse-logs persistentVolumeClaim: claimName: "{{ .Values.release_name }}-logs" {{- end }} diff --git a/kubernetes/charts/ClickHouse/templates/secret.yaml b/kubernetes/charts/ClickHouse/templates/secret.yaml new file mode 100644 index 00000000..984a5dd7 --- /dev/null +++ b/kubernetes/charts/ClickHouse/templates/secret.yaml @@ -0,0 +1,13 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: clickhouse-secrets + labels: + app: "{{ .Values.release_name }}" + component: clickhouse +type: Opaque +stringData: + CLICKHOUSE_USER: "" + CLICKHOUSE_PASSWORD: "" +{{- end }} diff --git a/kubernetes/charts/ClickHouse/values.yaml b/kubernetes/charts/ClickHouse/values.yaml index 7d181c4a..287c84a5 100644 --- a/kubernetes/charts/ClickHouse/values.yaml +++ b/kubernetes/charts/ClickHouse/values.yaml @@ -19,8 +19,11 @@ service: # Environment variables env: CLICKHOUSE_DB: "default" - CLICKHOUSE_USER: "clickhouse" - CLICKHOUSE_PASSWORD: "changeme" + +# Reference to Kubernetes Secret +envFrom: + - secretRef: + name: clickhouse-secrets # Security context securityContext: diff --git a/kubernetes/charts/CronManager/templates/deployment-byk-cronmanager.yaml b/kubernetes/charts/CronManager/templates/deployment-byk-cronmanager.yaml index 0fbfc953..375d5db5 100644 --- a/kubernetes/charts/CronManager/templates/deployment-byk-cronmanager.yaml +++ b/kubernetes/charts/CronManager/templates/deployment-byk-cronmanager.yaml @@ -12,6 +12,10 @@ spec: app: "{{ .Values.release_name }}" template: metadata: + annotations: + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} labels: app: "{{ .Values.release_name }}" spec: diff --git a/kubernetes/charts/CronManager/values.yaml b/kubernetes/charts/CronManager/values.yaml index 21c24d4d..825c8971 100644 --- a/kubernetes/charts/CronManager/values.yaml +++ b/kubernetes/charts/CronManager/values.yaml @@ -40,4 +40,7 @@ resources: cpu: "500m" limits: memory: "4Gi" - cpu: "2000m" \ No newline at end of file + cpu: "2000m" + +podAnnotations: + dsl-checksum: "initial" \ No newline at end of file diff --git a/kubernetes/charts/DataMapper/templates/deployment-byk-data-mapper.yaml b/kubernetes/charts/DataMapper/templates/deployment-byk-data-mapper.yaml index 09f23652..f52cf138 100644 --- a/kubernetes/charts/DataMapper/templates/deployment-byk-data-mapper.yaml +++ b/kubernetes/charts/DataMapper/templates/deployment-byk-data-mapper.yaml @@ -12,6 +12,10 @@ spec: app: "{{ .Values.release_name }}" template: metadata: + annotations: + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} labels: app: "{{ .Values.release_name }}" spec: diff --git a/kubernetes/charts/DataMapper/values.yaml b/kubernetes/charts/DataMapper/values.yaml index 220526ca..3267f3b9 100644 --- a/kubernetes/charts/DataMapper/values.yaml +++ b/kubernetes/charts/DataMapper/values.yaml @@ -29,3 +29,5 @@ resources: pullPolicy: IfNotPresent +podAnnotations: + dsl-checksum: "initial" \ No newline at end of file diff --git a/kubernetes/charts/GUI/templates/deployment-byk-gui.yaml b/kubernetes/charts/GUI/templates/deployment-byk-gui.yaml index 081b1842..94be8722 100644 --- a/kubernetes/charts/GUI/templates/deployment-byk-gui.yaml +++ b/kubernetes/charts/GUI/templates/deployment-byk-gui.yaml @@ -66,21 +66,21 @@ spec: cpu: {{ .Values.gui.resources.requests.cpu }} memory: {{ .Values.gui.resources.requests.memory }} - livenessProbe: - httpGet: - path: / - port: {{ .Values.gui.port }} - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 5 + # livenessProbe: + # httpGet: + # path: / + # port: {{ .Values.gui.port }} + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 - readinessProbe: - httpGet: - path: / - port: {{ .Values.gui.port }} - initialDelaySeconds: 10 - periodSeconds: 5 - timeoutSeconds: 3 + # readinessProbe: + # httpGet: + # path: / + # port: {{ .Values.gui.port }} + # initialDelaySeconds: 10 + # periodSeconds: 5 + # timeoutSeconds: 3 restartPolicy: Always diff --git a/kubernetes/charts/GUI/values.yaml b/kubernetes/charts/GUI/values.yaml index 192a9e52..e48710e3 100644 --- a/kubernetes/charts/GUI/values.yaml +++ b/kubernetes/charts/GUI/values.yaml @@ -2,9 +2,9 @@ gui: enabled: true release_name: gui image: - repository: "ghcr.io/buerokratt/rag-gui" # Update with actual GUI image repository - tag: latest - pullPolicy: IfNotPresent + repository: "ghcr.io/buerokratt/rag-gui" # Update with actual GUI image repository + tag: sha-84833e1 + pullPolicy: Always # React application configuration nodeEnv: production diff --git a/kubernetes/charts/Grafana/templates/configmap-dashboards.yaml b/kubernetes/charts/Grafana/templates/configmap-dashboards.yaml index 844aa70a..3228eca8 100644 --- a/kubernetes/charts/Grafana/templates/configmap-dashboards.yaml +++ b/kubernetes/charts/Grafana/templates/configmap-dashboards.yaml @@ -7,5 +7,5 @@ metadata: data: {{- range $path, $content := .Files.Glob "dashboards/*.json" }} {{ base $path }}: | -{{ $content | indent 4 }} +{{ $.Files.Get $path | indent 4 }} {{- end }} \ No newline at end of file diff --git a/kubernetes/charts/Grafana/templates/deployment-grafana.yaml b/kubernetes/charts/Grafana/templates/deployment-grafana.yaml index 4b6fef87..d9191db7 100644 --- a/kubernetes/charts/Grafana/templates/deployment-grafana.yaml +++ b/kubernetes/charts/Grafana/templates/deployment-grafana.yaml @@ -22,11 +22,17 @@ spec: - name: http containerPort: {{ .Values.port }} protocol: TCP + # Non-sensitive env's from values.yaml env: {{- range $key, $value := .Values.env }} - name: {{ $key }} value: {{ $value | quote }} {{- end }} + # Sensitive env's from Kubernetes Secret + {{- if .Values.envFrom }} + envFrom: + {{- toYaml .Values.envFrom | nindent 12 }} + {{- end }} volumeMounts: - name: datasources mountPath: /etc/grafana/provisioning/datasources diff --git a/kubernetes/charts/Grafana/templates/secret.yaml b/kubernetes/charts/Grafana/templates/secret.yaml new file mode 100644 index 00000000..f1748ade --- /dev/null +++ b/kubernetes/charts/Grafana/templates/secret.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: grafana-secrets + labels: + app: "{{ .Values.release_name }}" +type: Opaque +stringData: + GF_SECURITY_ADMIN_USER: "" + GF_SECURITY_ADMIN_PASSWORD: "" diff --git a/kubernetes/charts/Grafana/values.yaml b/kubernetes/charts/Grafana/values.yaml index 805b8404..bd0d08cf 100644 --- a/kubernetes/charts/Grafana/values.yaml +++ b/kubernetes/charts/Grafana/values.yaml @@ -31,9 +31,9 @@ resources: cpu: 250m memory: 256Mi +# Admin configuration admin: - user: admin - password: admin + enabled: true # Datasources configuration datasources: @@ -55,8 +55,11 @@ dashboardProviders: options: path: /var/lib/grafana/dashboards -# Environment variables +# Environment variables (non-sensitive) env: - GF_SECURITY_ADMIN_USER: admin - GF_SECURITY_ADMIN_PASSWORD: admin123 GF_USERS_ALLOW_SIGN_UP: "false" + +# Reference to Kubernetes Secret +envFrom: + - secretRef: + name: grafana-secrets diff --git a/kubernetes/charts/Langfuse-Web/templates/deployment-byk-langfuse-web.yaml b/kubernetes/charts/Langfuse-Web/templates/deployment-byk-langfuse-web.yaml index 59cff5c1..403e253f 100644 --- a/kubernetes/charts/Langfuse-Web/templates/deployment-byk-langfuse-web.yaml +++ b/kubernetes/charts/Langfuse-Web/templates/deployment-byk-langfuse-web.yaml @@ -25,11 +25,17 @@ spec: - name: http containerPort: {{ .Values.service.targetPort }} protocol: TCP + # Non-sensitive env's from values.yaml env: {{- range $key, $value := .Values.env }} - name: {{ $key }} value: {{ $value | quote }} {{- end }} + # Sensitive env's from Kubernetes Secret + {{- if .Values.envFrom }} + envFrom: + {{- toYaml .Values.envFrom | nindent 12 }} + {{- end }} {{- if .Values.healthcheck.enabled }} livenessProbe: httpGet: diff --git a/kubernetes/charts/Langfuse-Web/templates/secret.yaml b/kubernetes/charts/Langfuse-Web/templates/secret.yaml new file mode 100644 index 00000000..1c3ae5c1 --- /dev/null +++ b/kubernetes/charts/Langfuse-Web/templates/secret.yaml @@ -0,0 +1,25 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: langfuse-web-secrets + labels: + app: "{{ .Values.release_name }}" + component: langfuse-web +type: Opaque +stringData: + DATABASE_URL: "" + NEXTAUTH_SECRET: "" + ENCRYPTION_KEY: "" + SALT: "" + CLICKHOUSE_MIGRATION_URL: "" + CLICKHOUSE_USER: "" + CLICKHOUSE_PASSWORD: "" + LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID: "" + LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY: "" + LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID: "" + LANGFUSE_S3_MEDIA_UPLOAD_SECRET_ACCESS_KEY: "" + LANGFUSE_S3_BATCH_EXPORT_ACCESS_KEY_ID: "" + LANGFUSE_S3_BATCH_EXPORT_SECRET_ACCESS_KEY: "" + REDIS_AUTH: "" +{{- end }} diff --git a/kubernetes/charts/Langfuse-Web/values.yaml b/kubernetes/charts/Langfuse-Web/values.yaml index a85b0eae..6dfaf1cf 100644 --- a/kubernetes/charts/Langfuse-Web/values.yaml +++ b/kubernetes/charts/Langfuse-Web/values.yaml @@ -16,36 +16,25 @@ service: # Environment variables env: - # Database configuration + # Non-sensitive configuration NEXTAUTH_URL: "http://localhost:3000" - DATABASE_URL: "postgresql://postgres:dbadmin@rag_search_db:5432/rag-search" - SALT: "changeme" - ENCRYPTION_KEY: "changeme" - NEXTAUTH_SECRET: "changeme" TELEMETRY_ENABLED: "true" LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES: "true" - # ClickHouse configuration - CLICKHOUSE_MIGRATION_URL: "clickhouse://clickhouse:9000" + # ClickHouse configuration (non-sensitive) CLICKHOUSE_URL: "http://clickhouse:8123" - CLICKHOUSE_USER: "default" - CLICKHOUSE_PASSWORD: "clickhouse" CLICKHOUSE_CLUSTER_ENABLED: "false" - # S3/MinIO configuration + # S3/MinIO configuration (non-sensitive) LANGFUSE_USE_AZURE_BLOB: "false" LANGFUSE_S3_EVENT_UPLOAD_BUCKET: "rag-search" LANGFUSE_S3_EVENT_UPLOAD_REGION: "auto" - LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID: "changeme" - LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY: "changeme" LANGFUSE_S3_EVENT_UPLOAD_ENDPOINT: "http://minio:9000" LANGFUSE_S3_EVENT_UPLOAD_FORCE_PATH_STYLE: "true" LANGFUSE_S3_EVENT_UPLOAD_PREFIX: "langfuse/events/" LANGFUSE_S3_MEDIA_UPLOAD_BUCKET: "rag-search" LANGFUSE_S3_MEDIA_UPLOAD_REGION: "auto" - LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID: "changeme" - LANGFUSE_S3_MEDIA_UPLOAD_SECRET_ACCESS_KEY: "changeme" LANGFUSE_S3_MEDIA_UPLOAD_ENDPOINT: "http://minio:9000" LANGFUSE_S3_MEDIA_UPLOAD_FORCE_PATH_STYLE: "true" LANGFUSE_S3_MEDIA_UPLOAD_PREFIX: "langfuse/media/" @@ -56,16 +45,13 @@ env: LANGFUSE_S3_BATCH_EXPORT_REGION: "auto" LANGFUSE_S3_BATCH_EXPORT_ENDPOINT: "http://minio:9000" LANGFUSE_S3_BATCH_EXPORT_EXTERNAL_ENDPOINT: "http://minio:9000" - LANGFUSE_S3_BATCH_EXPORT_ACCESS_KEY_ID: "changeme" - LANGFUSE_S3_BATCH_EXPORT_SECRET_ACCESS_KEY: "changeme" LANGFUSE_S3_BATCH_EXPORT_FORCE_PATH_STYLE: "true" LANGFUSE_INGESTION_QUEUE_DELAY_MS: "" LANGFUSE_INGESTION_CLICKHOUSE_WRITE_INTERVAL_MS: "" - # Redis configuration + # Redis configuration (non-sensitive) REDIS_HOST: "redis" REDIS_PORT: "6379" - REDIS_AUTH: "myredissecret" REDIS_TLS_ENABLED: "false" REDIS_TLS_CA: "" REDIS_TLS_CERT: "" @@ -86,6 +72,13 @@ env: LANGFUSE_INIT_USER_NAME: "" LANGFUSE_INIT_USER_PASSWORD: "" +# Reference to Kubernetes Secret +envFrom: + - secretRef: + name: langfuse-web-secrets + + + resources: requests: memory: "512Mi" diff --git a/kubernetes/charts/Langfuse-Worker/templates/deployment-byk-langfuse-worker.yaml b/kubernetes/charts/Langfuse-Worker/templates/deployment-byk-langfuse-worker.yaml index 3a82d36c..1ab3c559 100644 --- a/kubernetes/charts/Langfuse-Worker/templates/deployment-byk-langfuse-worker.yaml +++ b/kubernetes/charts/Langfuse-Worker/templates/deployment-byk-langfuse-worker.yaml @@ -25,11 +25,17 @@ spec: - name: worker containerPort: {{ .Values.service.port }} protocol: TCP + # Non-sensitive env's from values.yaml env: {{- range $key, $value := .Values.env }} - name: {{ $key }} value: {{ $value | quote }} {{- end }} + # Sensitive env's from Kubernetes Secret + {{- if .Values.envFrom }} + envFrom: + {{- toYaml .Values.envFrom | nindent 12 }} + {{- end }} {{- if .Values.healthcheck.enabled }} livenessProbe: httpGet: diff --git a/kubernetes/charts/Langfuse-Worker/templates/secret.yaml b/kubernetes/charts/Langfuse-Worker/templates/secret.yaml new file mode 100644 index 00000000..d7ec52bd --- /dev/null +++ b/kubernetes/charts/Langfuse-Worker/templates/secret.yaml @@ -0,0 +1,24 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: langfuse-worker-secrets + labels: + app: "{{ .Values.release_name }}" + component: langfuse-worker +type: Opaque +stringData: + DATABASE_URL: "" + ENCRYPTION_KEY: "" + SALT: "" + CLICKHOUSE_MIGRATION_URL: "" + CLICKHOUSE_USER: "" + CLICKHOUSE_PASSWORD: "" + LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID: "" + LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY: "" + LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID: "" + LANGFUSE_S3_MEDIA_UPLOAD_SECRET_ACCESS_KEY: "" + LANGFUSE_S3_BATCH_EXPORT_ACCESS_KEY_ID: "" + LANGFUSE_S3_BATCH_EXPORT_SECRET_ACCESS_KEY: "" + REDIS_AUTH: "" +{{- end }} diff --git a/kubernetes/charts/Langfuse-Worker/values.yaml b/kubernetes/charts/Langfuse-Worker/values.yaml index 61e5cb0c..0a7343eb 100644 --- a/kubernetes/charts/Langfuse-Worker/values.yaml +++ b/kubernetes/charts/Langfuse-Worker/values.yaml @@ -15,35 +15,25 @@ service: # Environment variables env: - # Database configuration + # Non-sensitive configuration NEXTAUTH_URL: "http://localhost:3000" - DATABASE_URL: "postgresql://postgres:dbadmin@rag_search_db:5432/rag-search" - SALT: "changeme" - ENCRYPTION_KEY: "changeme" TELEMETRY_ENABLED: "true" LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES: "true" - # ClickHouse configuration - CLICKHOUSE_MIGRATION_URL: "clickhouse://clickhouse:9000" + # ClickHouse configuration (non-sensitive) CLICKHOUSE_URL: "http://clickhouse:8123" - CLICKHOUSE_USER: "default" - CLICKHOUSE_PASSWORD: "clickhouse" CLICKHOUSE_CLUSTER_ENABLED: "false" - # S3/MinIO configuration + # S3/MinIO configuration (non-sensitive) LANGFUSE_USE_AZURE_BLOB: "false" LANGFUSE_S3_EVENT_UPLOAD_BUCKET: "rag-search" LANGFUSE_S3_EVENT_UPLOAD_REGION: "auto" - LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID: "changeme" - LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY: "changeme" LANGFUSE_S3_EVENT_UPLOAD_ENDPOINT: "http://minio:9000" LANGFUSE_S3_EVENT_UPLOAD_FORCE_PATH_STYLE: "true" LANGFUSE_S3_EVENT_UPLOAD_PREFIX: "langfuse/events/" LANGFUSE_S3_MEDIA_UPLOAD_BUCKET: "rag-search" LANGFUSE_S3_MEDIA_UPLOAD_REGION: "auto" - LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID: "changeme" - LANGFUSE_S3_MEDIA_UPLOAD_SECRET_ACCESS_KEY: "changeme" LANGFUSE_S3_MEDIA_UPLOAD_ENDPOINT: "http://minio:9000" LANGFUSE_S3_MEDIA_UPLOAD_FORCE_PATH_STYLE: "true" LANGFUSE_S3_MEDIA_UPLOAD_PREFIX: "langfuse/media/" @@ -54,16 +44,13 @@ env: LANGFUSE_S3_BATCH_EXPORT_REGION: "auto" LANGFUSE_S3_BATCH_EXPORT_ENDPOINT: "http://minio:9000" LANGFUSE_S3_BATCH_EXPORT_EXTERNAL_ENDPOINT: "http://minio:9000" - LANGFUSE_S3_BATCH_EXPORT_ACCESS_KEY_ID: "changeme" - LANGFUSE_S3_BATCH_EXPORT_SECRET_ACCESS_KEY: "changeme" LANGFUSE_S3_BATCH_EXPORT_FORCE_PATH_STYLE: "true" LANGFUSE_INGESTION_QUEUE_DELAY_MS: "" LANGFUSE_INGESTION_CLICKHOUSE_WRITE_INTERVAL_MS: "" - # Redis configuration + # Redis configuration (non-sensitive) REDIS_HOST: "redis" REDIS_PORT: "6379" - REDIS_AUTH: "myredissecret" REDIS_TLS_ENABLED: "false" REDIS_TLS_CA: "" REDIS_TLS_CERT: "" @@ -73,6 +60,12 @@ env: EMAIL_FROM_ADDRESS: "" SMTP_CONNECTION_URL: "" +# Reference to Kubernetes Secret +# Sensitive credentials should be set in templates/secret.yaml before deployment +envFrom: + - secretRef: + name: langfuse-worker-secrets + resources: requests: memory: "512Mi" diff --git a/kubernetes/charts/Liquibase/templates/liquibase-job.yaml b/kubernetes/charts/Liquibase/templates/liquibase-job.yaml index d9e54cee..3ec6bea8 100644 --- a/kubernetes/charts/Liquibase/templates/liquibase-job.yaml +++ b/kubernetes/charts/Liquibase/templates/liquibase-job.yaml @@ -9,6 +9,10 @@ spec: backoffLimit: {{ .Values.backoffLimit }} template: metadata: + annotations: + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} labels: app: "{{ .Values.release_name }}" spec: @@ -38,8 +42,18 @@ spec: - name: {{ .name }} value: "{{ .value }}" {{- end }} - - + # Sensitive env's from Kubernetes Secret + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: liquibase-secrets + key: POSTGRES_USER + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: liquibase-secrets + key: POSTGRES_PASSWORD + volumeMounts: - name: liquibase-repo mountPath: /liquibase-files diff --git a/kubernetes/charts/Liquibase/templates/secret.yaml b/kubernetes/charts/Liquibase/templates/secret.yaml new file mode 100644 index 00000000..90b0eb75 --- /dev/null +++ b/kubernetes/charts/Liquibase/templates/secret.yaml @@ -0,0 +1,12 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: liquibase-secrets + labels: + app: "{{ .Values.release_name }}" +type: Opaque +stringData: + POSTGRES_USER: "" + POSTGRES_PASSWORD: "" +{{- end }} diff --git a/kubernetes/charts/Liquibase/values.yaml b/kubernetes/charts/Liquibase/values.yaml index 0d538422..3d68b496 100644 --- a/kubernetes/charts/Liquibase/values.yaml +++ b/kubernetes/charts/Liquibase/values.yaml @@ -8,14 +8,14 @@ images: tag: "4.33.0" env: - - name: POSTGRES_USER - value: "postgres" - - name: POSTGRES_PASSWORD - value: "dbadmin" - name: LIQUIBASE_URL value: "jdbc:postgresql://rag-search-db:5432/rag-search" - name: LIQUIBASE_CHANGELOG_FILE value: /master.yml -pullPolicy: IfNotPresent \ No newline at end of file + +pullPolicy: IfNotPresent + +podAnnotations: + dsl-checksum: "211bdc77c12b" \ No newline at end of file diff --git a/kubernetes/charts/Redis/templates/deployment-byk-redis.yaml b/kubernetes/charts/Redis/templates/deployment-byk-redis.yaml index b67ab1d2..f60b6d69 100644 --- a/kubernetes/charts/Redis/templates/deployment-byk-redis.yaml +++ b/kubernetes/charts/Redis/templates/deployment-byk-redis.yaml @@ -31,11 +31,11 @@ spec: - --requirepass - $(REDIS_PASSWORD) {{- end }} - env: - {{- if .Values.auth.enabled }} - - name: REDIS_PASSWORD - value: "{{ .Values.auth.password }}" - {{- end }} + # Sensitive env's from Kubernetes Secret + {{- if .Values.envFrom }} + envFrom: + {{- toYaml .Values.envFrom | nindent 12 }} + {{- end }} {{- if .Values.healthcheck.enabled }} livenessProbe: exec: diff --git a/kubernetes/charts/Redis/templates/secret.yaml b/kubernetes/charts/Redis/templates/secret.yaml new file mode 100644 index 00000000..27ad0560 --- /dev/null +++ b/kubernetes/charts/Redis/templates/secret.yaml @@ -0,0 +1,12 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: redis-secrets + labels: + app: "{{ .Values.release_name }}" + component: redis +type: Opaque +stringData: + REDIS_PASSWORD: "" +{{- end }} diff --git a/kubernetes/charts/Redis/values.yaml b/kubernetes/charts/Redis/values.yaml index 01018687..784f8c1c 100644 --- a/kubernetes/charts/Redis/values.yaml +++ b/kubernetes/charts/Redis/values.yaml @@ -15,10 +15,11 @@ service: auth: enabled: true - password: "myredissecret" - -env: - REDIS_PASSWORD: "myredissecret" + +# Reference to Kubernetes Secret +envFrom: + - secretRef: + name: redis-secrets # Resource configuration resources: diff --git a/kubernetes/charts/Resql/templates/deployment-byk-resql.yaml b/kubernetes/charts/Resql/templates/deployment-byk-resql.yaml index edc52930..c44dc303 100644 --- a/kubernetes/charts/Resql/templates/deployment-byk-resql.yaml +++ b/kubernetes/charts/Resql/templates/deployment-byk-resql.yaml @@ -10,6 +10,10 @@ spec: app: "{{ .Values.release_name }}" template: metadata: + annotations: + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} labels: app: "{{ .Values.release_name }}" spec: @@ -48,8 +52,12 @@ spec: value: "{{ .Values.env.SQLMS_DATASOURCES_0_JDBCURL }}" - name: SQLMS_DATASOURCES_0_USERNAME value: "{{ .Values.env.SQLMS_DATASOURCES_0_USERNAME }}" + # Sensitive env from Kubernetes Secret - name: SQLMS_DATASOURCES_0_PASSWORD - value: "{{ .Values.env.SQLMS_DATASOURCES_0_PASSWORD }}" + valueFrom: + secretKeyRef: + name: resql-secrets + key: SQLMS_DATASOURCES_0_PASSWORD - name: LOGGING_LEVEL_ORG_SPRINGFRAMEWORK_BOOT value: "{{ .Values.env.LOGGING_LEVEL_ORG_SPRINGFRAMEWORK_BOOT }}" - name: SQLMS_SAVED_QUERIES_DIR diff --git a/kubernetes/charts/Resql/templates/secret.yaml b/kubernetes/charts/Resql/templates/secret.yaml new file mode 100644 index 00000000..335257b8 --- /dev/null +++ b/kubernetes/charts/Resql/templates/secret.yaml @@ -0,0 +1,11 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: resql-secrets + labels: + app: "{{ .Values.release_name }}" +type: Opaque +stringData: + SQLMS_DATASOURCES_0_PASSWORD: "" +{{- end }} diff --git a/kubernetes/charts/Resql/values.yaml b/kubernetes/charts/Resql/values.yaml index 470b7a0d..c49d1bab 100644 --- a/kubernetes/charts/Resql/values.yaml +++ b/kubernetes/charts/Resql/values.yaml @@ -17,7 +17,6 @@ env: SQLMS_DATASOURCES_0_NAME: "byk" SQLMS_DATASOURCES_0_JDBCURL: "jdbc:postgresql://rag-search-db:5432/rag-search" SQLMS_DATASOURCES_0_USERNAME: "postgres" - SQLMS_DATASOURCES_0_PASSWORD: "dbadmin" LOGGING_LEVEL_ORG_SPRINGFRAMEWORK_BOOT: "INFO" JAVA_OPTS: "-Xms1g -Xmx3g" @@ -30,3 +29,6 @@ resources: cpu: "50m" pullPolicy: IfNotPresent + +podAnnotations: + dsl-checksum: "initial" diff --git a/kubernetes/charts/Ruuter-Private/templates/configmap-byk-ruuter-private.yaml b/kubernetes/charts/Ruuter-Private/templates/configmap-byk-ruuter-private.yaml index 9a20ec2d..6f84c283 100644 --- a/kubernetes/charts/Ruuter-Private/templates/configmap-byk-ruuter-private.yaml +++ b/kubernetes/charts/Ruuter-Private/templates/configmap-byk-ruuter-private.yaml @@ -15,5 +15,4 @@ data: RAG_SEARCH_TIM=http://tim:8085 RAG_SEARCH_CRON_MANAGER=http://cron-manager:9010 RAG_SEARCH_LLM_ORCHESTRATOR=http://llm-orchestration-service:8100/orchestrate - DOMAIN=localhost - DB_PASSWORD=dbadmin \ No newline at end of file + DOMAIN=localhost \ No newline at end of file diff --git a/kubernetes/charts/Ruuter-Private/templates/deployment-byk-ruuter-private.yaml b/kubernetes/charts/Ruuter-Private/templates/deployment-byk-ruuter-private.yaml index c2082f6d..866d3a7d 100644 --- a/kubernetes/charts/Ruuter-Private/templates/deployment-byk-ruuter-private.yaml +++ b/kubernetes/charts/Ruuter-Private/templates/deployment-byk-ruuter-private.yaml @@ -12,6 +12,10 @@ spec: app: "{{ .Values.release_name }}" template: metadata: + annotations: + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} labels: app: "{{ .Values.release_name }}" spec: @@ -53,6 +57,12 @@ spec: value: "{{ .Values.env.APPLICATION_INTERNALREQUESTS_DISABLED }}" - name: server.port value: "{{ .Values.env.SERVER_PORT }}" + # Sensitive env from Kubernetes Secret + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: ruuter-private-secrets + key: DB_PASSWORD - name: logging.level.root diff --git a/kubernetes/charts/Ruuter-Private/templates/secret.yaml b/kubernetes/charts/Ruuter-Private/templates/secret.yaml new file mode 100644 index 00000000..1db3b29a --- /dev/null +++ b/kubernetes/charts/Ruuter-Private/templates/secret.yaml @@ -0,0 +1,11 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: ruuter-private-secrets + labels: + app: "{{ .Values.release_name }}" +type: Opaque +stringData: + DB_PASSWORD: "" +{{- end }} diff --git a/kubernetes/charts/Ruuter-Private/values.yaml b/kubernetes/charts/Ruuter-Private/values.yaml index a6a6d64d..3a48ac17 100644 --- a/kubernetes/charts/Ruuter-Private/values.yaml +++ b/kubernetes/charts/Ruuter-Private/values.yaml @@ -53,4 +53,6 @@ ingress: pullPolicy: IfNotPresent +podAnnotations: + dsl-checksum: "initial" diff --git a/kubernetes/charts/Ruuter-Public/templates/configmap-byk-ruuter-public.yaml b/kubernetes/charts/Ruuter-Public/templates/configmap-byk-ruuter-public.yaml index 354b6f2a..a6a56c0c 100644 --- a/kubernetes/charts/Ruuter-Public/templates/configmap-byk-ruuter-public.yaml +++ b/kubernetes/charts/Ruuter-Public/templates/configmap-byk-ruuter-public.yaml @@ -15,5 +15,4 @@ data: RAG_SEARCH_TIM=http://tim:8085 RAG_SEARCH_CRON_MANAGER=http://cron-manager:9010 RAG_SEARCH_LLM_ORCHESTRATOR=http://llm-orchestration-service:8100/orchestrate - DOMAIN=localhost - DB_PASSWORD=dbadmin \ No newline at end of file + DOMAIN=localhost \ No newline at end of file diff --git a/kubernetes/charts/Ruuter-Public/templates/deployment-byk-ruuter-public.yaml b/kubernetes/charts/Ruuter-Public/templates/deployment-byk-ruuter-public.yaml index ebf2c2f8..e0814302 100644 --- a/kubernetes/charts/Ruuter-Public/templates/deployment-byk-ruuter-public.yaml +++ b/kubernetes/charts/Ruuter-Public/templates/deployment-byk-ruuter-public.yaml @@ -12,6 +12,10 @@ spec: app: "{{ .Values.release_name }}" template: metadata: + annotations: + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} labels: app: "{{ .Values.release_name }}" spec: @@ -53,6 +57,12 @@ spec: value: "{{ .Values.env.SERVER_PORT }}" - name: application.constants.file value: "/app/constants.ini" + # Sensitive env from Kubernetes Secret + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: ruuter-public-secrets + key: DB_PASSWORD - name: logging.level.root value: "{{ .Values.env.LOGGING_LEVEL_ROOT }}" diff --git a/kubernetes/charts/Ruuter-Public/templates/secret.yaml b/kubernetes/charts/Ruuter-Public/templates/secret.yaml new file mode 100644 index 00000000..e9f76ce1 --- /dev/null +++ b/kubernetes/charts/Ruuter-Public/templates/secret.yaml @@ -0,0 +1,11 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: ruuter-public-secrets + labels: + app: "{{ .Values.release_name }}" +type: Opaque +stringData: + DB_PASSWORD: "" +{{- end }} diff --git a/kubernetes/charts/Ruuter-Public/values.yaml b/kubernetes/charts/Ruuter-Public/values.yaml index 635d51c9..320d43f6 100644 --- a/kubernetes/charts/Ruuter-Public/values.yaml +++ b/kubernetes/charts/Ruuter-Public/values.yaml @@ -49,3 +49,6 @@ ingress: pullPolicy: IfNotPresent + +podAnnotations: + dsl-checksum: "94b84bb5ff4d" diff --git a/kubernetes/charts/S3-Ferry/templates/configmap-s3.yaml b/kubernetes/charts/S3-Ferry/templates/configmap-s3.yaml deleted file mode 100644 index 5a80ebe0..00000000 --- a/kubernetes/charts/S3-Ferry/templates/configmap-s3.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ .Values.release_name }}-config - labels: - app: {{ .Values.release_name }} -data: - {{- range $key, $value := .Values.env }} - {{ $key }}: {{ $value | quote }} - {{- end }} \ No newline at end of file diff --git a/kubernetes/charts/S3-Ferry/templates/deployment-s3.yaml b/kubernetes/charts/S3-Ferry/templates/deployment-s3.yaml index 16678196..af396e8c 100644 --- a/kubernetes/charts/S3-Ferry/templates/deployment-s3.yaml +++ b/kubernetes/charts/S3-Ferry/templates/deployment-s3.yaml @@ -22,9 +22,18 @@ spec: - name: http containerPort: {{ .Values.port }} protocol: TCP + # Non-sensitive env's from ConfigMap + env: + {{- range $key, $value := .Values.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + # Sensitive env's from Kubernetes Secret + {{- if .Values.envFrom }} envFrom: - - configMapRef: - name: {{ .Values.release_name }}-config + {{- toYaml .Values.envFrom | nindent 12 }} + {{- end }} + volumeMounts: {{- if .Values.persistence.enabled }} - name: shared diff --git a/kubernetes/charts/S3-Ferry/templates/secret.yaml b/kubernetes/charts/S3-Ferry/templates/secret.yaml new file mode 100644 index 00000000..ac341bd8 --- /dev/null +++ b/kubernetes/charts/S3-Ferry/templates/secret.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Secret +metadata: + name: s3-ferry-secrets + labels: + app: "{{ .Values.release_name }}" +type: Opaque +stringData: + S3_SECRET_ACCESS_KEY: "" + S3_ACCESS_KEY_ID: "" + GF_SECURITY_ADMIN_USER: "" + GF_SECURITY_ADMIN_PASSWORD: "" diff --git a/kubernetes/charts/S3-Ferry/values.yaml b/kubernetes/charts/S3-Ferry/values.yaml index 69c03c5f..6274d414 100644 --- a/kubernetes/charts/S3-Ferry/values.yaml +++ b/kubernetes/charts/S3-Ferry/values.yaml @@ -38,7 +38,7 @@ resources: cpu: 250m memory: 256Mi -# Environment variables +# Environment variables (non-sensitive) env: API_CORS_ORIGIN: "*" API_DOCUMENTATION_ENABLED: "true" @@ -48,14 +48,15 @@ env: S3_DATA_BUCKET_PATH: "resources" S3_DATA_BUCKET_NAME: "rag-search" FS_DATA_DIRECTORY_PATH: "/app" - S3_SECRET_ACCESS_KEY: "changeme" - S3_ACCESS_KEY_ID: "changeme" S3_HEALTH_ENDPOINT: "http://minio:9000/minio/health/live" MINIO_BROWSER_REDIRECT_URL: "http://localhost:9091" - GF_SECURITY_ADMIN_USER: "admin" - GF_SECURITY_ADMIN_PASSWORD: "admin123" GF_USERS_ALLOW_SIGN_UP: "false" PORT: "3000" +# Reference to Kubernetes Secret +envFrom: + - secretRef: + name: s3-ferry-secrets + diff --git a/kubernetes/charts/TIM-database/Chart.yaml b/kubernetes/charts/TIM-database/Chart.yaml deleted file mode 100644 index 2bcdf24d..00000000 --- a/kubernetes/charts/TIM-database/Chart.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v2 -name: TIM-database -description: TIM postgresql database -type: application -version: 0.1.0 -appVersion: "1.0" diff --git a/kubernetes/charts/TIM-database/templates/deployment-byk-timdb.yaml b/kubernetes/charts/TIM-database/templates/deployment-byk-timdb.yaml deleted file mode 100644 index 7e8fa6ba..00000000 --- a/kubernetes/charts/TIM-database/templates/deployment-byk-timdb.yaml +++ /dev/null @@ -1,42 +0,0 @@ -{{- if .Values.timPostgresql.enabled }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ .Values.timPostgresql.nameOverride | default "tim-postgresql" }} - labels: - app: {{ .Values.timPostgresql.nameOverride | default "tim-postgresql" }} -spec: - replicas: {{ .Values.timPostgresql.replicaCount | default 1 }} - selector: - matchLabels: - app: {{ .Values.timPostgresql.nameOverride | default "tim-postgresql" }} - template: - metadata: - labels: - app: {{ .Values.timPostgresql.nameOverride | default "tim-postgresql" }} - spec: - containers: - - name: {{ .Values.timPostgresql.nameOverride | default "tim-postgresql" }} - image: "{{ .Values.timPostgresql.image.repository }}:{{ .Values.timPostgresql.image.tag }}" - imagePullPolicy: {{ .Values.timPostgresql.image.pullPolicy }} - env: - {{- range .Values.timPostgresql.env }} - - name: {{ .name }} - value: {{ .value | quote }} - {{- end }} - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .Values.timPostgresql.secret.name }} - key: POSTGRES_PASSWORD - ports: - - containerPort: {{ .Values.timPostgresql.service.port }} - resources: - {{- toYaml .Values.timPostgresql.resources | nindent 12 }} - volumes: - {{- if .Values.timPostgresql.persistence.enabled }} - - name: postgres-storage - persistentVolumeClaim: - claimName: {{ .Values.timPostgresql.persistence.existingClaim | default (printf "%s-pvc" (.Values.timPostgresql.nameOverride | default "tim-postgresql")) }} - {{- end }} -{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/TIM-database/templates/pvc-byk-timdb.yaml b/kubernetes/charts/TIM-database/templates/pvc-byk-timdb.yaml deleted file mode 100644 index 6dfdc707..00000000 --- a/kubernetes/charts/TIM-database/templates/pvc-byk-timdb.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if and .Values.timPostgresql.enabled .Values.timPostgresql.persistence.enabled }} -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: {{ .Values.timPostgresql.persistence.existingClaim | default (printf "%s-pvc" (.Values.timPostgresql.nameOverride | default "tim-postgresql")) }} - labels: - app: {{ .Values.timPostgresql.nameOverride | default "tim-postgresql" }} -spec: - accessModes: - {{- range .Values.timPostgresql.persistence.accessModes }} - - {{ . }} - {{- end }} - resources: - requests: - storage: {{ .Values.timPostgresql.persistence.size }} - {{- if .Values.timPostgresql.persistence.storageClass }} - storageClassName: {{ .Values.timPostgresql.persistence.storageClass }} - {{- end }} -{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/TIM-database/templates/secret-byk-timdb.yaml b/kubernetes/charts/TIM-database/templates/secret-byk-timdb.yaml deleted file mode 100644 index 30b97aea..00000000 --- a/kubernetes/charts/TIM-database/templates/secret-byk-timdb.yaml +++ /dev/null @@ -1,9 +0,0 @@ -{{- if .Values.timPostgresql.secret.create }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ .Values.timPostgresql.secret.name }} -type: Opaque -data: - POSTGRES_PASSWORD: {{ .Values.timPostgresql.secret.keys.POSTGRES_PASSWORD | b64enc | quote }} -{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/TIM-database/templates/service-byk-timdb.yaml b/kubernetes/charts/TIM-database/templates/service-byk-timdb.yaml deleted file mode 100644 index 686d20ce..00000000 --- a/kubernetes/charts/TIM-database/templates/service-byk-timdb.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- if .Values.timPostgresql.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.timPostgresql.nameOverride }} -spec: - type: {{ .Values.timPostgresql.service.type | default "ClusterIP" }} - selector: - app: {{ .Values.timPostgresql.nameOverride }} - ports: - - name: postgres - port: {{ .Values.timPostgresql.service.port }} - targetPort: {{ .Values.timPostgresql.service.port }} - nodePort: {{- if eq .Values.timPostgresql.service.type "NodePort" }} {{ .Values.timPostgresql.service.externalPort }} {{- end }} -{{- end }} \ No newline at end of file diff --git a/kubernetes/charts/TIM-database/values.yaml b/kubernetes/charts/TIM-database/values.yaml deleted file mode 100644 index 2e9ce2b7..00000000 --- a/kubernetes/charts/TIM-database/values.yaml +++ /dev/null @@ -1,29 +0,0 @@ -timPostgresql: - enabled: true - nameOverride: tim-postgresql - image: - repository: postgres - tag: "14.1" - pullPolicy: IfNotPresent - service: - type: ClusterIP - port: 5432 - externalPort: 9876 - env: - - name: POSTGRES_USER - value: "tim" - - name: POSTGRES_DB - value: "tim" - - name: POSTGRES_HOST_AUTH_METHOD - value: "trust" - secret: - create: true - name: tim-postgres-secret - keys: - POSTGRES_PASSWORD: "Ab123" - persistence: - enabled: true - size: 1Gi - storageClass: "" - accessModes: ["ReadWriteOnce"] - existingClaim: "" diff --git a/kubernetes/charts/TIM/templates/secret-byk-tim.yaml b/kubernetes/charts/TIM/templates/secret-byk-tim.yaml index 0692c868..81fc11a1 100644 --- a/kubernetes/charts/TIM/templates/secret-byk-tim.yaml +++ b/kubernetes/charts/TIM/templates/secret-byk-tim.yaml @@ -3,7 +3,8 @@ kind: Secret metadata: name: tim-env-secret type: Opaque -data: - oauth2_client_secret: "{{ .Values.tim.config.oauth2_client_secret | b64enc }}" - jwt_integration_key_store_password: "{{ .Values.tim.config.jwt_keystore_password | b64enc }}" - POSTGRES_PASSWORD: "{{ "dbadmin" | b64enc }}" +stringData: + oauth2_client_secret: "" + jwt_integration_key_store_password: "" + POSTGRES_PASSWORD: "" + diff --git a/kubernetes/charts/TIM/values.yaml b/kubernetes/charts/TIM/values.yaml index a057edcd..daba08c5 100644 --- a/kubernetes/charts/TIM/values.yaml +++ b/kubernetes/charts/TIM/values.yaml @@ -22,7 +22,6 @@ tim: config: security_allowlist_jwt: "ruuter-public,ruuter-private,ruuter,ruuter-internal,data-mapper,resql,tim,tim-postgresql,chat-widget,authentication-layer,127.0.0.1,::1" - jwt_keystore_password: "defaultpassword" jwt_issuer: "tim-issuer" spring_profiles_active: "dev" logging_level_root: "DEBUG" @@ -31,11 +30,11 @@ tim: legacy_referer_marker: "NA" legacy_url: "NA" oauth2_client_id: "your-client-id" - oauth2_client_secret: "my-secret-value" oauth2_client_scope: "read,write" oauth2_user_auth_uri: "https://tara-test.ria.ee/oidc/authorize" oauth2_access_token_uri: "https://tara-test.ria.ee/oidc/token" oauth2_jwk_uri: "https://tara-test.ria.ee/oidc/jwks" + resources: limits: cpu: "500m" diff --git a/kubernetes/charts/database/Chart.lock b/kubernetes/charts/database/Chart.lock deleted file mode 100644 index 641f6d08..00000000 --- a/kubernetes/charts/database/Chart.lock +++ /dev/null @@ -1,6 +0,0 @@ -dependencies: -- name: postgresql - repository: https://charts.bitnami.com/bitnami - version: 12.2.6 -digest: sha256:6f50554d914d878d490c46307f120b87d39854e42f81411b13ffdd23aad21cb6 -generated: "2025-12-02T13:43:50.4497212+05:30" diff --git a/kubernetes/charts/database/Chart.yaml b/kubernetes/charts/database/Chart.yaml index 2facc943..9612978c 100644 --- a/kubernetes/charts/database/Chart.yaml +++ b/kubernetes/charts/database/Chart.yaml @@ -1,12 +1,6 @@ apiVersion: v2 name: database -description: PostgreSQL databases for RAG +description: PostgreSQL databases for RAG Module using pure PostgreSQL type: application -version: 0.1.0 - -dependencies: - - name: postgresql - version: 12.2.6 - repository: https://charts.bitnami.com/bitnami - alias: rag-search-db +version: 0.2.0 \ No newline at end of file diff --git a/kubernetes/charts/database/templates/secret.yaml b/kubernetes/charts/database/templates/secret.yaml new file mode 100644 index 00000000..244a4504 --- /dev/null +++ b/kubernetes/charts/database/templates/secret.yaml @@ -0,0 +1,12 @@ +{{- range .Values.databases }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }}-secret + labels: + app: {{ .name }} +type: Opaque +data: + password: {{ .password | b64enc | quote }} +--- +{{- end }} diff --git a/kubernetes/charts/database/templates/service.yaml b/kubernetes/charts/database/templates/service.yaml new file mode 100644 index 00000000..2a1a5393 --- /dev/null +++ b/kubernetes/charts/database/templates/service.yaml @@ -0,0 +1,34 @@ +{{- range .Values.databases }} +apiVersion: v1 +kind: Service +metadata: + name: {{ .name }} + labels: + app: {{ .name }} +spec: + type: ClusterIP + selector: + app: {{ .name }} + ports: + - name: postgres + port: {{ $.Values.service.port }} + targetPort: {{ $.Values.service.port }} +--- +# Headless service for StatefulSet +apiVersion: v1 +kind: Service +metadata: + name: {{ .name }}-headless + labels: + app: {{ .name }} +spec: + type: ClusterIP + clusterIP: None + selector: + app: {{ .name }} + ports: + - name: postgres + port: {{ $.Values.service.port }} + targetPort: {{ $.Values.service.port }} +--- +{{- end }} diff --git a/kubernetes/charts/database/templates/statefulset.yaml b/kubernetes/charts/database/templates/statefulset.yaml new file mode 100644 index 00000000..4ff65816 --- /dev/null +++ b/kubernetes/charts/database/templates/statefulset.yaml @@ -0,0 +1,66 @@ +{{- range .Values.databases }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ .name }} + labels: + app: {{ .name }} +spec: + serviceName: {{ .name }}-headless + replicas: 1 + selector: + matchLabels: + app: {{ .name }} + template: + metadata: + labels: + app: {{ .name }} + spec: + securityContext: + fsGroup: 999 + terminationGracePeriodSeconds: 30 + containers: + - name: postgresql + image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + env: + - name: POSTGRES_USER + value: "{{ .username }}" + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .name }}-secret + key: password + - name: POSTGRES_DB + value: "{{ .db }}" + - name: PGDATA + value: /var/lib/postgresql/data/pgdata + ports: + - name: postgres + containerPort: {{ $.Values.service.port }} + livenessProbe: + tcpSocket: + port: {{ $.Values.service.port }} + initialDelaySeconds: 30 + periodSeconds: 10 + readinessProbe: + tcpSocket: + port: {{ $.Values.service.port }} + initialDelaySeconds: 15 + periodSeconds: 10 + volumeMounts: + - name: data + mountPath: /var/lib/postgresql/data + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: {{ toYaml $.Values.persistence.accessModes | nindent 8 }} + resources: + requests: + storage: {{ .storage }} + {{- if $.Values.persistence.storageClass }} + storageClassName: {{ $.Values.persistence.storageClass }} + {{- end }} +--- +{{- end }} diff --git a/kubernetes/charts/database/values.yaml b/kubernetes/charts/database/values.yaml index d7841e56..8c43f0f5 100644 --- a/kubernetes/charts/database/values.yaml +++ b/kubernetes/charts/database/values.yaml @@ -1,14 +1,25 @@ -rag-search-db: - fullnameOverride: rag-search-db - image: - tag: latest - auth: - postgresPassword: dbadmin +# Centralized database configuration using pure PostgreSQL +databases: + - name: rag-search-db username: postgres - password: dbadmin - database: rag-search - primary: - persistence: - enabled: true - size: 8Gi + password: "{{ ragSearchDB.password }}" + db: rag-search + storage: 8Gi + - name: tim-postgresql + username: tim + password: "{{ TIMDB.password }}" + db: tim + storage: 1Gi + +image: + repository: postgres + tag: "14.1" + pullPolicy: IfNotPresent + +service: + port: 5432 + +persistence: + storageClass: "" # specify your own + accessModes: ["ReadWriteOnce"] diff --git a/kubernetes/charts/minio/templates/deployment-minio.yaml b/kubernetes/charts/minio/templates/deployment-minio.yaml index 2012d5bf..1ba2cc74 100644 --- a/kubernetes/charts/minio/templates/deployment-minio.yaml +++ b/kubernetes/charts/minio/templates/deployment-minio.yaml @@ -44,11 +44,17 @@ spec: - containerPort: {{ .Values.ports.console }} name: console protocol: TCP + # Non-sensitive env's from values.yaml env: -{{- range $key, $value := .Values.env }} + {{- range $key, $value := .Values.env }} - name: {{ $key }} value: "{{ $value }}" -{{- end }} + {{- end }} + # Sensitive env's from Kubernetes Secret + {{- if .Values.envFrom }} + envFrom: + {{- toYaml .Values.envFrom | nindent 12 }} + {{- end }} volumeMounts: - name: minio-data mountPath: /data diff --git a/kubernetes/charts/minio/templates/secret.yaml b/kubernetes/charts/minio/templates/secret.yaml new file mode 100644 index 00000000..a3d9c65f --- /dev/null +++ b/kubernetes/charts/minio/templates/secret.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: minio-secrets + labels: + app: "{{ .Values.release_name }}" +type: Opaque +stringData: + MINIO_ROOT_USER: "" + MINIO_ROOT_PASSWORD: "" diff --git a/kubernetes/charts/minio/values.yaml b/kubernetes/charts/minio/values.yaml index 5919e678..f2f7c7de 100644 --- a/kubernetes/charts/minio/values.yaml +++ b/kubernetes/charts/minio/values.yaml @@ -16,10 +16,13 @@ resources: cpu: "500m" env: - MINIO_ROOT_USER: "minioadmin" - MINIO_ROOT_PASSWORD: "minioadmin" MINIO_BROWSER_REDIRECT_URL: "http://localhost:9001" +# Reference to Kubernetes Secret +envFrom: + - secretRef: + name: minio-secrets + volumes: minio_data: type: pvc diff --git a/kubernetes/dashboard-admin.yaml b/kubernetes/dashboard-admin.yaml deleted file mode 100644 index 04855539..00000000 --- a/kubernetes/dashboard-admin.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: admin-user - namespace: kubernetes-dashboard ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: admin-user -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: -- kind: ServiceAccount - name: admin-user - namespace: kubernetes-dashboard \ No newline at end of file diff --git a/kubernetes/values.yaml b/kubernetes/values.yaml deleted file mode 100644 index 92c78531..00000000 --- a/kubernetes/values.yaml +++ /dev/null @@ -1,78 +0,0 @@ -# Global configuration for RAG Module -global: - domain: "rag-module.local" - namespace: "rag-module" - storageClass: "standard" - -# Individual service configurations -database: - enabled: true - -TIM-database: - enabled: true - -resql: - enabled: true - -ruuter-public: - enabled: true - -ruuter-private: - enabled: true - -data-mapper: - enabled: true - -TIM: - enabled: true - -Authentication-Layer: - enabled: true - -CronManager: - enabled: true - -GUI: - enabled: true - -Loki: - enabled: true - -Grafana: - enabled: true - -S3-Ferry: - enabled: true - -minio: - enabled: true - -Redis: - enabled: true - -Qdrant: - enabled: true - -ClickHouse: - enabled: true - -Langfuse-Web: - enabled: true - -Langfuse-Worker: - enabled: true - -Vault: - enabled: true - -Vault-Init: - enabled: true - -Vault-Agent-LLM: - enabled: true - -LLM-Orchestration-Service: - enabled: true - -Liquibase: - enabled: true