- Overlap between consecutive chunks
+ Overlap between consecutive chunks. Defaults to 1/8 of chunk size if empty.
@@ -450,14 +539,15 @@ const GraphRAGConfig = () => {
+ setSemanticMethod(v)}>
-
+
Percentile
Standard Deviation
Interquartile
+ Gradient
diff --git a/graphrag-ui/src/pages/setup/LLMConfig.tsx b/graphrag-ui/src/pages/setup/LLMConfig.tsx
index aa5596f..5050845 100644
--- a/graphrag-ui/src/pages/setup/LLMConfig.tsx
+++ b/graphrag-ui/src/pages/setup/LLMConfig.tsx
@@ -64,7 +64,7 @@ const PROVIDER_FIELDS: Record = {
{ key: "AWS_SECRET_ACCESS_KEY", label: "AWS Secret Access Key", type: "password", required: true }
],
configFields: [
- { key: "region_name", label: "AWS Region", type: "text", required: true, placeholder: "us-east-1" }
+ { key: "region_name", label: "AWS Region", type: "text", required: false, placeholder: "us-east-1" }
]
},
groq: {
@@ -107,6 +107,20 @@ const PROVIDER_FIELDS: Record = {
}
};
+// Single provider list shared across all service Select dropdowns
+const LLM_PROVIDERS = [
+ { value: "openai", label: "OpenAI" },
+ { value: "azure", label: "Azure OpenAI" },
+ { value: "genai", label: "Google GenAI (Gemini)" },
+ { value: "vertexai", label: "Google Vertex AI" },
+ { value: "bedrock", label: "AWS Bedrock" },
+ { value: "groq", label: "Groq" },
+ { value: "ollama", label: "Ollama" },
+ { value: "sagemaker", label: "AWS SageMaker" },
+ { value: "huggingface", label: "HuggingFace" },
+ { value: "watsonx", label: "IBM WatsonX" },
+] as const;
+
const LLMConfig = () => {
const [selectedGraph, setSelectedGraph] = useState(sessionStorage.getItem("selectedGraph") || "");
const [availableGraphs, setAvailableGraphs] = useState([]);
@@ -119,13 +133,14 @@ const LLMConfig = () => {
const [messageType, setMessageType] = useState<"success" | "error" | "">("");
const [testResults, setTestResults] = useState(null);
const [connectionTested, setConnectionTested] = useState(false);
-
+
// Single provider state
const [singleProvider, setSingleProvider] = useState("openai");
const [singleConfig, setSingleConfig] = useState>({});
const [singleDefaultModel, setSingleDefaultModel] = useState("");
const [singleEmbeddingModel, setSingleEmbeddingModel] = useState("");
const [multimodalModel, setMultimodalModel] = useState("");
+ const [useCustomMultimodal, setUseCustomMultimodal] = useState(false);
// Multi-provider state
const [completionProvider, setCompletionProvider] = useState("openai");
@@ -183,124 +198,129 @@ const LLMConfig = () => {
const llmConfig = data.llm_config;
setLlmConfigAccess(data.llm_config_access === "chatbot_only" ? "chatbot_only" : "full");
+ // Store graph overrides when in per-graph scope
+ if (data.graph_overrides) {
+ setGraphOverrides(data.graph_overrides);
+ } else {
+ setGraphOverrides({});
+ }
+
+ // Detect providers (needed by chat/multimodal fallback below)
+ const completionProv = llmConfig.completion_service?.llm_service?.toLowerCase();
+ const embeddingProv = llmConfig.embedding_service?.embedding_model_service?.toLowerCase();
+ const multimodalProv = llmConfig.multimodal_service?.llm_service?.toLowerCase();
+ const chatProv = llmConfig.chat_service?.llm_service?.toLowerCase();
+ const defaultProv = completionProv || "openai";
+
+ // All config field keys that any provider might use
+ const allConfigKeys = ["base_url", "azure_deployment", "region_name", "project", "location", "endpoint_name", "endpoint_url"];
+
+ // Build the base config: top-level auth + completion_service fields.
+ // Every service inherits missing keys from this base.
+ const baseConfig: Record = {};
+ // Layer 1: top-level auth
+ if (llmConfig.authentication_configuration) {
+ for (const [key, value] of Object.entries(llmConfig.authentication_configuration)) {
+ if (typeof value === "string") baseConfig[key] = value;
+ }
+ }
+ // Layer 2: completion_service config fields + auth
+ if (llmConfig.completion_service) {
+ for (const key of allConfigKeys) {
+ if (llmConfig.completion_service[key]) baseConfig[key] = llmConfig.completion_service[key];
+ }
+ if (llmConfig.completion_service.authentication_configuration) {
+ for (const [key, value] of Object.entries(llmConfig.completion_service.authentication_configuration)) {
+ if (typeof value === "string") baseConfig[key] = value;
+ }
+ }
+ }
+
+ // Helper: load a service config, inheriting all missing keys from baseConfig
+ const loadServiceConfigResolved = (svc: any) => {
+ // Start with base config as defaults
+ const cfg: Record = { ...baseConfig };
+ // Override with service-specific config fields
+ if (svc) {
+ for (const key of allConfigKeys) {
+ if (svc[key]) cfg[key] = svc[key];
+ }
+ // Override with service-specific auth
+ if (svc.authentication_configuration) {
+ for (const [key, value] of Object.entries(svc.authentication_configuration)) {
+ if (typeof value === "string") cfg[key] = value;
+ }
+ }
+ }
+ return cfg;
+ };
+
// Parse per-graph chatbot config (chatbot_only mode)
if (data.global_chat_info) {
setGlobalChatInfo(data.global_chat_info);
}
if (data.chatbot_config) {
setUseCustomChatbot(true);
- setChatbotProvider(data.chatbot_config.llm_service?.toLowerCase() || "openai");
+ setChatbotProvider(data.chatbot_config.llm_service?.toLowerCase() || defaultProv);
setChatbotModelName(data.chatbot_config.llm_model || "");
setChatbotTemperature(String(data.chatbot_config.model_kwargs?.temperature ?? "0"));
- // Load provider-specific config fields + masked auth
- const cfg: Record = {};
- for (const key of ["base_url", "azure_deployment", "region_name", "project", "location", "endpoint_name", "endpoint_url"]) {
- if (data.chatbot_config[key]) cfg[key] = data.chatbot_config[key];
- }
- if (data.chatbot_config.authentication_configuration) {
- for (const [key, value] of Object.entries(data.chatbot_config.authentication_configuration)) {
- if (typeof value === "string") cfg[key] = value;
- }
- }
- setChatbotProviderConfig(cfg);
+ // Resolve chatbot config: base config + chatbot overrides
+ setChatbotProviderConfig(loadServiceConfigResolved(data.chatbot_config));
} else {
setUseCustomChatbot(false);
}
- // Store graph overrides when in per-graph scope
- if (data.graph_overrides) {
- setGraphOverrides(data.graph_overrides);
- } else {
- setGraphOverrides({});
- }
-
const currentDefaultModel = llmConfig.completion_service?.llm_model || "";
setSingleDefaultModel(currentDefaultModel);
+ const allSameProvider =
+ completionProv === embeddingProv &&
+ (!multimodalProv || completionProv === multimodalProv) &&
+ (!chatProv || completionProv === chatProv);
+
+ setUseMultipleProviders(!allSameProvider);
+
// Load chat_service config for full mode (superadmin)
+ // Chat inherits from base (completion) when not explicitly set
if (llmConfig.chat_service) {
setUseCustomChatbot(true);
- setChatbotProvider(llmConfig.chat_service.llm_service?.toLowerCase() || "openai");
+ setChatbotProvider(chatProv || defaultProv);
setChatbotModelName(llmConfig.chat_service.llm_model || "");
setChatbotTemperature(String(llmConfig.chat_service.model_kwargs?.temperature ?? "0"));
- const chatCfg: Record = {};
- for (const key of ["base_url", "azure_deployment", "region_name", "project", "location", "endpoint_name", "endpoint_url"]) {
- if (llmConfig.chat_service[key]) chatCfg[key] = llmConfig.chat_service[key];
- }
- if (llmConfig.chat_service.authentication_configuration) {
- for (const [key, value] of Object.entries(llmConfig.chat_service.authentication_configuration)) {
- if (typeof value === "string") chatCfg[key] = value;
- }
- }
- setChatbotProviderConfig(chatCfg);
+ setChatbotProviderConfig(loadServiceConfigResolved(llmConfig.chat_service));
} else {
setUseCustomChatbot(false);
- setChatbotProvider("openai");
+ setChatbotProvider(defaultProv);
setChatbotModelName("");
setChatbotTemperature("0");
- setChatbotProviderConfig({});
+ setChatbotProviderConfig({ ...baseConfig });
}
- // Detect if using multiple providers
- const completionProv = llmConfig.completion_service?.llm_service?.toLowerCase();
- const embeddingProv = llmConfig.embedding_service?.embedding_model_service?.toLowerCase();
- const multimodalProv = llmConfig.multimodal_service?.llm_service?.toLowerCase();
- const chatProv = llmConfig.chat_service?.llm_service?.toLowerCase();
-
- const allSameProvider =
- completionProv === embeddingProv &&
- (!multimodalProv || completionProv === multimodalProv) &&
- (!chatProv || completionProv === chatProv);
-
- setUseMultipleProviders(!allSameProvider);
-
- // Helper: load config fields + masked auth fields from a service config
- const loadServiceConfig = (svc: any, configKeys: string[]) => {
- const cfg: Record = {};
- for (const key of configKeys) {
- if (svc?.[key]) cfg[key] = svc[key];
- }
- // Load masked auth fields from authentication_configuration
- if (svc?.authentication_configuration) {
- for (const [key, value] of Object.entries(svc.authentication_configuration)) {
- if (typeof value === "string") cfg[key] = value;
- }
- }
- return cfg;
- };
-
- const completionConfigKeys = ["base_url", "azure_deployment", "region_name", "project", "location", "endpoint_name", "endpoint_url"];
- const embeddingConfigKeys = ["base_url", "azure_deployment", "region_name"];
-
- if (!allSameProvider) {
- // Multi-provider mode - Load from backend
- setCompletionProvider(completionProv || "openai");
- setCompletionDefaultModel(llmConfig.completion_service?.llm_model || "");
- setCompletionConfig(loadServiceConfig(llmConfig.completion_service, completionConfigKeys));
-
- setEmbeddingProvider(embeddingProv || "openai");
- setEmbeddingModel(llmConfig.embedding_service?.model_name || "");
- setEmbeddingConfig(loadServiceConfig(llmConfig.embedding_service, embeddingConfigKeys));
-
- setMultimodalProvider(multimodalProv || "openai");
- setMultimodalModelName(llmConfig.multimodal_service?.llm_model || "");
- setMultimodalConfig(loadServiceConfig(llmConfig.multimodal_service, ["azure_deployment"]));
- } else {
- // Single provider mode - Load from backend
- setSingleProvider(completionProv || "openai");
- setSingleDefaultModel(llmConfig.completion_service?.llm_model || "");
- setSingleEmbeddingModel(llmConfig.embedding_service?.model_name || "");
- setMultimodalModel(llmConfig.multimodal_service?.llm_model || "");
- // Load config + auth from completion_service (single provider shares auth)
- const singleCfg = loadServiceConfig(llmConfig.completion_service, completionConfigKeys);
- // Also load top-level authentication_configuration (used in single-provider mode)
- if (llmConfig.authentication_configuration) {
- for (const [key, value] of Object.entries(llmConfig.authentication_configuration)) {
- if (typeof value === "string" && !singleCfg[key]) singleCfg[key] = value;
- }
- }
- setSingleConfig(singleCfg);
- }
+ // Always populate both single and multi-provider state from the same
+ // backend config. The toggle only switches which UI is shown.
+
+ // Single-provider state: uses base config (top-level auth + completion_service)
+ setSingleProvider(completionProv || "openai");
+ setSingleDefaultModel(llmConfig.completion_service?.llm_model || "");
+ setSingleEmbeddingModel(llmConfig.embedding_service?.model_name || "");
+ const mmModel = llmConfig.multimodal_service?.llm_model || "";
+ setMultimodalModel(mmModel);
+ // Multimodal is "custom" if it has an explicit model or a different provider
+ setUseCustomMultimodal(!!mmModel || !!multimodalProv);
+ setSingleConfig(loadServiceConfigResolved(llmConfig.completion_service));
+
+ // Multi-provider state: per-service configs, each inheriting from base
+ setCompletionProvider(completionProv || "openai");
+ setCompletionDefaultModel(llmConfig.completion_service?.llm_model || "");
+ setCompletionConfig(loadServiceConfigResolved(llmConfig.completion_service));
+
+ setEmbeddingProvider(embeddingProv || completionProv || "openai");
+ setEmbeddingModel(llmConfig.embedding_service?.model_name || "");
+ setEmbeddingConfig(loadServiceConfigResolved(llmConfig.embedding_service));
+
+ setMultimodalProvider(multimodalProv || completionProv || "openai");
+ setMultimodalModelName(llmConfig.multimodal_service?.llm_model || "");
+ setMultimodalConfig(loadServiceConfigResolved(llmConfig.multimodal_service));
} catch (error: any) {
console.error("Error fetching config:", error);
setMessage(`Failed to load configuration: ${error.message}`);
@@ -439,14 +459,20 @@ const LLMConfig = () => {
authentication_configuration: buildAuthConfig(embeddingProvider, embeddingConfig),
...buildServiceConfig(embeddingProvider, embeddingConfig)
},
- multimodal_service: {
+ };
+
+ // Save multimodal_service if not inheriting from completion service
+ if (useCustomMultimodal && multimodalModelName) {
+ llmConfigData.multimodal_service = {
llm_service: multimodalProvider,
llm_model: multimodalModelName,
authentication_configuration: buildAuthConfig(multimodalProvider, multimodalConfig),
model_kwargs: { temperature: 0 },
...buildServiceConfig(multimodalProvider, multimodalConfig)
- },
- };
+ };
+ } else {
+ llmConfigData.multimodal_service = null;
+ }
// Save chat_service if not inheriting from completion service
if (useCustomChatbot) {
@@ -477,18 +503,21 @@ const LLMConfig = () => {
embedding_model_service: singleProvider,
model_name: singleEmbeddingModel,
},
- multimodal_service: {
- llm_service: singleProvider,
- llm_model: multimodalModel,
- model_kwargs: { temperature: 0 },
- ...buildServiceConfig(singleProvider, singleConfig)
- },
};
+ if (multimodalModel.trim()) {
+ llmConfigData.multimodal_service = {
+ llm_model: multimodalModel,
+ };
+ } else {
+ llmConfigData.multimodal_service = null;
+ }
- // Save chat_service with just the model name (same provider as completion)
- if (chatbotModelName.trim()) {
+ // Save chat_service if model or temperature differs from defaults
+ const chatTemp = parseFloat(chatbotTemperature) || 0;
+ if (chatbotModelName.trim() || chatTemp !== 0) {
llmConfigData.chat_service = {
- llm_model: chatbotModelName,
+ ...(chatbotModelName.trim() ? { llm_model: chatbotModelName } : {}),
+ model_kwargs: { temperature: chatTemp },
};
} else {
llmConfigData.chat_service = null;
@@ -553,38 +582,39 @@ const LLMConfig = () => {
return null;
};
+ const failValidation = (msg: string) => {
+ setMessage(`❌ ${msg}`);
+ setMessageType("error");
+ setIsTesting(false);
+ };
+
if (useMultipleProviders) {
const completionError = validateProvider(completionProvider, completionConfig, "Completion Service");
- if (completionError) {
- setMessage(`❌ ${completionError}`);
- setMessageType("error");
- setIsTesting(false);
- return;
- }
-
+ if (completionError) { failValidation(completionError); return; }
+ if (!completionDefaultModel.trim()) { failValidation("Model Name is required for Completion Service"); return; }
+
const embeddingError = validateProvider(embeddingProvider, embeddingConfig, "Embedding Service");
- if (embeddingError) {
- setMessage(`❌ ${embeddingError}`);
- setMessageType("error");
- setIsTesting(false);
- return;
+ if (embeddingError) { failValidation(embeddingError); return; }
+ if (!embeddingModel.trim()) { failValidation("Model Name is required for Embedding Service"); return; }
+
+ if (useCustomMultimodal) {
+ const multimodalError = validateProvider(multimodalProvider, multimodalConfig, "Multimodal Service");
+ if (multimodalError) { failValidation(multimodalError); return; }
+ if (!multimodalModelName.trim()) { failValidation("Model Name is required for Multimodal Service"); return; }
}
- const multimodalError = validateProvider(multimodalProvider, multimodalConfig, "Multimodal Service");
- if (multimodalError) {
- setMessage(`❌ ${multimodalError}`);
- setMessageType("error");
- setIsTesting(false);
- return;
+ if (useCustomChatbot) {
+ const chatbotError = validateProvider(chatbotProvider, chatbotProviderConfig, "Chatbot Service");
+ if (chatbotError) { failValidation(chatbotError); return; }
+ if (!chatbotModelName.trim()) { failValidation("Model Name is required for Chatbot Service"); return; }
}
} else {
const singleError = validateProvider(singleProvider, singleConfig, singleProvider);
- if (singleError) {
- setMessage(`❌ ${singleError}`);
- setMessageType("error");
- setIsTesting(false);
- return;
- }
+ if (singleError) { failValidation(singleError); return; }
+ if (!singleDefaultModel.trim()) { failValidation("Completion Model is required"); return; }
+ if (!singleEmbeddingModel.trim()) { failValidation("Embedding Model is required"); return; }
+ if (useCustomMultimodal && !multimodalModel.trim()) { failValidation("Multimodal Model is required when not inheriting from completion"); return; }
+ if (useCustomChatbot && !chatbotModelName.trim()) { failValidation("Chatbot Model is required when not inheriting from completion"); return; }
}
const creds = sessionStorage.getItem("creds");
@@ -607,12 +637,17 @@ const LLMConfig = () => {
},
};
- llmConfigData.multimodal_service = {
- llm_service: multimodalProvider,
- llm_model: multimodalModelName,
- authentication_configuration: buildAuthConfig(multimodalProvider, multimodalConfig),
- ...buildServiceConfig(multimodalProvider, multimodalConfig)
- };
+ if (useCustomMultimodal && multimodalModelName) {
+ llmConfigData.multimodal_service = {
+ llm_service: multimodalProvider,
+ llm_model: multimodalModelName,
+ authentication_configuration: buildAuthConfig(multimodalProvider, multimodalConfig),
+ ...buildServiceConfig(multimodalProvider, multimodalConfig)
+ };
+ } else {
+ // Inherit from completion — send empty to trigger test with resolved config
+ llmConfigData.multimodal_service = {};
+ }
} else {
llmConfigData = {
graphname: selectedGraph || undefined,
@@ -626,26 +661,20 @@ const LLMConfig = () => {
embedding_model_service: singleProvider,
model_name: singleEmbeddingModel,
},
- multimodal_service: {
- llm_service: singleProvider,
- llm_model: multimodalModel,
- ...buildServiceConfig(singleProvider, singleConfig)
- },
};
-
+ if (multimodalModel.trim()) {
+ llmConfigData.multimodal_service = {
+ llm_model: multimodalModel,
+ };
+ } else {
+ // Inherit from completion — send empty to trigger test with resolved config
+ llmConfigData.multimodal_service = {};
+ }
}
- // Add chat_service to test config if custom chatbot is configured
- // Add chat_service to test config if not inheriting
+ // Add chat_service to test config
if (useCustomChatbot) {
if (useMultipleProviders) {
- const chatbotError = validateProvider(chatbotProvider, chatbotProviderConfig, "Chatbot Service");
- if (chatbotError) {
- setMessage(`❌ ${chatbotError}`);
- setMessageType("error");
- setIsTesting(false);
- return;
- }
llmConfigData.chat_service = {
llm_service: chatbotProvider,
llm_model: chatbotModelName,
@@ -919,16 +948,9 @@ const LLMConfig = () => {
- OpenAI
- Azure OpenAI
- Google GenAI (Gemini)
- Google Vertex AI
- AWS Bedrock
- Groq
- Ollama
- AWS SageMaker
- HuggingFace
- IBM WatsonX
+ {LLM_PROVIDERS.map((p) => (
+ {p.label}
+ ))}
@@ -1101,7 +1123,27 @@ const LLMConfig = () => {
id="multiProvider"
checked={useMultipleProviders}
onChange={(e) => {
- setUseMultipleProviders(e.target.checked);
+ const toMulti = e.target.checked;
+ setUseMultipleProviders(toMulti);
+ if (toMulti) {
+ // Sync single-provider values → multi-provider state
+ setCompletionProvider(singleProvider);
+ setCompletionDefaultModel(singleDefaultModel);
+ setCompletionConfig({ ...singleConfig });
+ setEmbeddingProvider(singleProvider);
+ setEmbeddingModel(singleEmbeddingModel);
+ setEmbeddingConfig({ ...singleConfig });
+ setMultimodalProvider(singleProvider);
+ setMultimodalModelName(multimodalModel);
+ setMultimodalConfig({ ...singleConfig });
+ } else {
+ // Sync multi-provider values → single-provider state
+ setSingleProvider(completionProvider);
+ setSingleDefaultModel(completionDefaultModel);
+ setSingleConfig({ ...completionConfig });
+ setSingleEmbeddingModel(embeddingModel);
+ setMultimodalModel(multimodalModelName);
+ }
clearTestResults();
}}
className="h-4 w-4 rounded border-gray-300 dark:border-[#3D3D3D]"
@@ -1137,16 +1179,13 @@ const LLMConfig = () => {
- Only providers supporting both completion and embedding services are shown
+ This provider will be used for all services (completion, embedding, multimodal)
@@ -1154,7 +1193,7 @@ const LLMConfig = () => {
- Multimodal Service
+ Embedding Service
- Configure the provider for processing images and multimodal content (vision tasks).
+ Configure the provider for generating embeddings.
@@ -1473,35 +1584,31 @@ const LLMConfig = () => {
Provider
-
handleProviderChange(value, 'multimodal')}>
+ handleProviderChange(value, 'embedding')}>
- OpenAI
- Azure OpenAI
- Google GenAI (Gemini)
- Google Vertex AI
+ {LLM_PROVIDERS.map((p) => (
+ {p.label}
+ ))}
-
- Only OpenAI, Azure, GenAI, VertexAI support vision
-
- {renderProviderFields(multimodalProvider, multimodalConfig, setMultimodalConfig)}
+ {renderProviderFields(embeddingProvider, embeddingConfig, setEmbeddingConfig)}
- Model Name
+ Model Name *
{
- setMultimodalModelName(e.target.value);
+ setEmbeddingModel(e.target.value);
clearTestResults();
}}
/>
@@ -1535,7 +1642,7 @@ const LLMConfig = () => {
? "bg-green-50 dark:bg-green-900/20 text-green-700 dark:text-green-300"
: "bg-red-50 dark:bg-red-900/20 text-red-700 dark:text-red-300"
}`}>
- Default LLM Model: {testResults.completion.message}
+ Completion Model: {testResults.completion.message}
)}
@@ -1545,27 +1652,27 @@ const LLMConfig = () => {
? "bg-green-50 dark:bg-green-900/20 text-green-700 dark:text-green-300"
: "bg-red-50 dark:bg-red-900/20 text-red-700 dark:text-red-300"
}`}>
-
Chatbot LLM Model: {testResults.chatbot.message}
+
Chatbot Model: {testResults.chatbot.message}
)}
-
- {testResults.embedding && testResults.embedding.status !== "not_tested" && (
+
+ {testResults.multimodal && testResults.multimodal.status !== "not_tested" && (