diff --git a/client/src/api.js b/client/src/api.js
index b1c45da..e698d6f 100644
--- a/client/src/api.js
+++ b/client/src/api.js
@@ -331,10 +331,13 @@ export async function stopModelInference() {
}
}
-export async function queryChatBot(query) {
+export async function queryChatBot(query, conversationId) {
try {
- const res = await axios.post(`${BASE_URL}/chat/query`, { query });
- return res.data?.response;
+ const res = await axios.post(`${BASE_URL}/chat/query`, {
+ query,
+ conversationId,
+ });
+ return res.data;
} catch (error) {
handleError(error);
}
@@ -348,6 +351,75 @@ export async function clearChat() {
}
}
+// ── Conversation history endpoints ───────────────────────────────────────────
+
+export async function listConversations() {
+ try {
+ const res = await axios.get(`${BASE_URL}/chat/conversations`);
+ return res.data;
+ } catch (error) {
+ handleError(error);
+ }
+}
+
+export async function createConversation() {
+ try {
+ const res = await axios.post(`${BASE_URL}/chat/conversations`);
+ return res.data;
+ } catch (error) {
+ handleError(error);
+ }
+}
+
+export async function getConversation(convoId) {
+ try {
+ const res = await axios.get(`${BASE_URL}/chat/conversations/${convoId}`);
+ return res.data;
+ } catch (error) {
+ handleError(error);
+ }
+}
+
+export async function deleteConversation(convoId) {
+ try {
+ await axios.delete(`${BASE_URL}/chat/conversations/${convoId}`);
+ } catch (error) {
+ handleError(error);
+ }
+}
+
+export async function updateConversationTitle(convoId, title) {
+ try {
+ const res = await axios.patch(`${BASE_URL}/chat/conversations/${convoId}`, {
+ title,
+ });
+ return res.data;
+ } catch (error) {
+ handleError(error);
+ }
+}
+
+export async function queryHelperChat(taskKey, query, fieldContext) {
+ try {
+ const res = await axios.post(`${BASE_URL}/chat/helper/query`, {
+ taskKey,
+ query,
+ fieldContext,
+ });
+ return res.data?.response;
+ } catch (error) {
+ handleError(error);
+ }
+}
+
+export async function clearHelperChat(taskKey) {
+ try {
+ await axios.post(`${BASE_URL}/chat/helper/clear`, { taskKey });
+ } catch (error) {
+ handleError(error);
+ }
+}
+
export async function getConfigPresets() {
return makeApiRequest("pytc/configs", "get");
}
diff --git a/client/src/components/Chatbot.js b/client/src/components/Chatbot.js
index 0aa8a25..1787503 100644
--- a/client/src/components/Chatbot.js
+++ b/client/src/components/Chatbot.js
@@ -1,70 +1,148 @@
-import React, { useEffect, useState, useRef } from "react";
-import { Button, Input, List, Typography, Space, Spin, Popconfirm } from "antd";
-import { SendOutlined, CloseOutlined, DeleteOutlined } from "@ant-design/icons";
-import { queryChatBot, clearChat } from "../api";
+import React, { useCallback, useEffect, useState, useRef } from "react";
+import {
+ Button,
+ Input,
+ List,
+ Typography,
+ Space,
+ Spin,
+ Popconfirm,
+ Tooltip,
+} from "antd";
+import {
+ SendOutlined,
+ CloseOutlined,
+ DeleteOutlined,
+ PlusOutlined,
+ MessageOutlined,
+ MenuFoldOutlined,
+ MenuUnfoldOutlined,
+} from "@ant-design/icons";
+import {
+ queryChatBot,
+ clearChat,
+ listConversations,
+ getConversation,
+ deleteConversation,
+} from "../api";
import ReactMarkdown from "react-markdown";
import remarkGfm from "remark-gfm";
const { TextArea } = Input;
const { Text } = Typography;
-const initialMessage = [
- {
- id: 1,
- text: "Hello! I'm your AI assistant, built to help you navigate PyTC Client. How can I help you today?",
- isUser: false,
- },
-];
+
+const GREETING = {
+ role: "assistant",
+ content:
+ "Hello! I'm your AI assistant, built to help you navigate PyTC Client. How can I help you today?",
+};
+
+/* ─── helper: truncate a string to `n` chars ─────────────────────────────── */
+const truncate = (str, n = 50) =>
+ str.length > n ? str.slice(0, n).trimEnd() + "…" : str;
+
+/* ═══════════════════════════════════════════════════════════════════════════ */
function Chatbot({ onClose }) {
- const [messages, setMessages] = useState(() => {
- const saved = localStorage.getItem("chatMessages");
- return saved ? JSON.parse(saved) : initialMessage;
- });
+ /* ── state ─────────────────────────────────────────────────────────────── */
+ const [conversations, setConversations] = useState([]);
+ const [activeConvoId, setActiveConvoId] = useState(null);
+ const [messages, setMessages] = useState([GREETING]);
const [inputValue, setInputValue] = useState("");
const [isSending, setIsSending] = useState(false);
+ const [sidebarOpen, setSidebarOpen] = useState(true);
+ const [isLoadingConvo, setIsLoadingConvo] = useState(false);
+
const lastMessageRef = useRef(null);
- const scrollToLastMessage = () => {
+ /* ── scroll ────────────────────────────────────────────────────────────── */
+ const scrollToBottom = useCallback(() => {
setTimeout(() => {
- if (lastMessageRef.current) {
- lastMessageRef.current.scrollIntoView({
- behavior: "smooth",
- block: "start",
- });
- }
+ lastMessageRef.current?.scrollIntoView({
+ behavior: "smooth",
+ block: "start",
+ });
}, 0);
- };
+ }, []);
useEffect(() => {
- localStorage.setItem("chatMessages", JSON.stringify(messages));
- }, [messages]);
+ scrollToBottom();
+ }, [messages, isSending, scrollToBottom]);
+ /* ── load conversation list on mount ────────────────────────────────────── */
useEffect(() => {
- scrollToLastMessage();
- }, [messages, isSending]);
+ let cancelled = false;
+ (async () => {
+ try {
+ const convos = await listConversations();
+ if (!cancelled && convos) setConversations(convos);
+ } catch {
+ // server may not be ready yet
+ }
+ })();
+ return () => {
+ cancelled = true;
+ };
+ }, []);
+
+ /* ── switch conversation ───────────────────────────────────────────────── */
+ const loadConversation = async (convoId) => {
+ if (convoId === activeConvoId) return;
+ setIsLoadingConvo(true);
+ try {
+ const convo = await getConversation(convoId);
+ if (!convo) return;
+ await clearChat(); // reset LangChain in-memory state
+ setActiveConvoId(convo.id);
+ const dbMessages =
+ convo.messages?.map((m) => ({ role: m.role, content: m.content })) ??
+ [];
+ setMessages([GREETING, ...dbMessages]);
+ } finally {
+ setIsLoadingConvo(false);
+ }
+ };
+
+ /* ── new chat ──────────────────────────────────────────────────────────── */
+ const handleNewChat = async () => {
+ await clearChat();
+ setActiveConvoId(null);
+ setMessages([GREETING]);
+ setInputValue("");
+ };
+ /* ── send message ──────────────────────────────────────────────────────── */
const handleSendMessage = async () => {
if (!inputValue.trim() || isSending) return;
const query = inputValue;
setInputValue("");
- const userMessage = { id: messages.length + 1, text: query, isUser: true };
- setMessages((prev) => [...prev, userMessage]);
+ setMessages((prev) => [...prev, { role: "user", content: query }]);
setIsSending(true);
try {
- const responseText = await queryChatBot(query);
- const botMessage = {
- id: userMessage.id + 1,
- text: responseText || "Sorry, I could not generate a response.",
- isUser: false,
- };
- setMessages((prev) => [...prev, botMessage]);
+ const data = await queryChatBot(query, activeConvoId);
+ const response =
+ data?.response || "Sorry, I could not generate a response.";
+ const returnedConvoId = data?.conversationId ?? activeConvoId;
+
+ setMessages((prev) => [
+ ...prev,
+ { role: "assistant", content: response },
+ ]);
+
+ // If this was the first message in a brand-new chat, we now have a convoId
+ if (!activeConvoId && returnedConvoId) {
+ setActiveConvoId(returnedConvoId);
+ }
+
+ // Refresh sidebar so the new / updated conversation appears
+ const convos = await listConversations();
+ if (convos) setConversations(convos);
} catch (e) {
setMessages((prev) => [
...prev,
{
- id: prev.length + 1,
- text: e.message || "Error contacting chatbot.",
- isUser: false,
+ role: "assistant",
+ content: e.message || "Error contacting chatbot.",
},
]);
} finally {
@@ -79,190 +157,335 @@ function Chatbot({ onClose }) {
}
};
- const handleClearChat = async () => {
- try {
- await clearChat();
- setMessages(initialMessage);
- localStorage.setItem("chatMessages", JSON.stringify(initialMessage));
- } catch (e) {
- console.error("Failed to clear chat:", e);
+ /* ── delete conversation ───────────────────────────────────────────────── */
+ const handleDeleteConvo = async (convoId, e) => {
+ if (e) e.stopPropagation();
+ await deleteConversation(convoId);
+ setConversations((prev) => prev.filter((c) => c.id !== convoId));
+ if (activeConvoId === convoId) {
+ await handleNewChat();
}
};
- return (
-
-
(
+
+ ),
+ ol: ({ children }) => (
+
{children}
+ ),
+ table: ({ children }) => (
+
+ ),
+ thead: ({ children }) => (
+
{children}
+ ),
+ th: ({ children }) => (
+
- AI Assistant
-
-
+ ),
+ td: ({ children }) => (
+ |
+ {children}
+ |
+ ),
+ code: ({ inline, children }) =>
+ inline ? (
+
+ {children}
+
+ ) : (
+
+ {children}
+
+ ),
+ pre: ({ children }) => <>{children}>,
+ };
+
+ /* ═══════════════════════════════════════════════════════════════════════ */
+ /* RENDER */
+ /* ═══════════════════════════════════════════════════════════════════════ */
+ return (
+
+ {/* ── Sidebar ─────────────────────────────────────────────────────── */}
+ {sidebarOpen && (
+
+ {/* header */}
+
- } size="small" />
-
- }
- onClick={onClose}
- size="small"
- />
-
-
+
+ Chats
+
+
+
+ }
+ onClick={handleNewChat}
+ />
+
+
+ }
+ onClick={() => setSidebarOpen(false)}
+ />
+
+
+
+
+ {/* conversation list */}
+
+ {conversations.length === 0 && (
+
+ No past chats yet
+
+ )}
+ {conversations.map((c) => (
+ loadConversation(c.id)}
+ style={{
+ padding: "8px",
+ margin: "2px 0",
+ borderRadius: 6,
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ gap: 8,
+ background:
+ c.id === activeConvoId ? "#e6f4ff" : "transparent",
+ border:
+ c.id === activeConvoId
+ ? "1px solid #91caff"
+ : "1px solid transparent",
+ transition: "background 0.15s",
+ }}
+ onMouseEnter={(e) => {
+ if (c.id !== activeConvoId)
+ e.currentTarget.style.background = "#f0f0f0";
+ }}
+ onMouseLeave={(e) => {
+ if (c.id !== activeConvoId)
+ e.currentTarget.style.background = "transparent";
+ }}
+ >
+
+
+ {truncate(c.title)}
+
+ handleDeleteConvo(c.id, e)}
+ onCancel={(e) => e?.stopPropagation()}
+ okText="Delete"
+ cancelText="Cancel"
+ >
+ }
+ onClick={(e) => e.stopPropagation()}
+ style={{
+ flexShrink: 0,
+ opacity: 0.4,
+ transition: "opacity 0.15s",
+ }}
+ onMouseEnter={(e) => (e.currentTarget.style.opacity = "1")}
+ onMouseLeave={(e) =>
+ (e.currentTarget.style.opacity = "0.4")
+ }
+ />
+
+
+ ))}
+
+
+ )}
+
+ {/* ── Main chat area ──────────────────────────────────────────────── */}
- {
- const isLastMessage = index === messages.length - 1;
- return (
-
-
- {message.isUser ? (
- {message.text}
- ) : (
- (
-
- ),
- ol: ({ children }) => (
-
- {children}
-
- ),
- table: ({ children }) => (
-
- ),
- thead: ({ children }) => (
-
- {children}
-
- ),
- th: ({ children }) => (
-
- {children}
- |
- ),
- td: ({ children }) => (
-
- {children}
- |
- ),
- code: ({ inline, children }) =>
- inline ? (
-
- {children}
-
- ) : (
-
- {children}
-
- ),
- pre: ({ children }) => <>{children}>,
+ {/* header */}
+
+
+ {!sidebarOpen && (
+
+ }
+ onClick={() => setSidebarOpen(true)}
+ />
+
+ )}
+ AI Assistant
+
+
+
+ }
+ onClick={handleNewChat}
+ size="small"
+ />
+
+ }
+ onClick={onClose}
+ size="small"
+ />
+
+
+
+ {/* messages */}
+
+ {isLoadingConvo ? (
+
+
+
+ ) : (
+ {
+ const isLast = index === messages.length - 1;
+ const isUser = message.role === "user";
+ return (
+
+
- {message.text}
-
- )}
-
-
- );
- }}
- />
- {isSending && }
-
-
-
-
+ {isUser ? (
+
+ {message.content}
+
+ ) : (
+
+ {message.content}
+
+ )}
+
+
+ );
+ }}
+ />
+ )}
+ {isSending && }
+
+
+ {/* input */}
+
+
+
+
);
diff --git a/client/src/components/InlineHelpChat.js b/client/src/components/InlineHelpChat.js
new file mode 100644
index 0000000..ba0b6ca
--- /dev/null
+++ b/client/src/components/InlineHelpChat.js
@@ -0,0 +1,470 @@
+import React, { useEffect, useMemo, useRef, useState } from "react";
+import ReactDOM from "react-dom";
+import { Button, Input, Space, Spin, Typography } from "antd";
+import { QuestionCircleOutlined, SendOutlined } from "@ant-design/icons";
+import ReactMarkdown from "react-markdown";
+import remarkGfm from "remark-gfm";
+import { queryHelperChat } from "../api";
+
+const { TextArea } = Input;
+const { Text } = Typography;
+
+/**
+ * Build the automatic first-message prompt that explains a field and asks the
+ * helper agent for a recommendation.
+ */
+const buildInitialPrompt = ({
+ label,
+ yamlKey,
+ value,
+ projectContext,
+ taskContext,
+}) => {
+ return [
+ `Explain this setting and recommend a concrete value if possible:`,
+ `- Label: ${label}`,
+ yamlKey ? `- YAML key: ${yamlKey}` : null,
+ value !== undefined && value !== null && value !== ""
+ ? `- Current value: ${JSON.stringify(value)}`
+ : null,
+ projectContext ? `- Project context: ${projectContext}` : null,
+ taskContext ? `- Task context: ${taskContext}` : null,
+ `Use plain language for non-technical users and give a recommended setting.`,
+ ]
+ .filter(Boolean)
+ .join("\n");
+};
+
+const clamp = (value, min, max) => Math.min(Math.max(value, min), max);
+
+/**
+ * A small "?" button that opens a floating, draggable chat panel connected to
+ * the button by a dashed SVG line. The panel auto-fires an initial prompt on
+ * first open and lets users ask follow-up questions.
+ *
+ * Props:
+ * taskKey – unique key for the helper chat session (e.g. "inference")
+ * label – human-readable field name (e.g. "Input Image")
+ * yamlKey – optional YAML config key (e.g. "DATASET.INPUT_IMAGE")
+ * value – current value of the field
+ * projectContext – short project description for the LLM
+ * taskContext – short task description for the LLM
+ */
+function InlineHelpChat({
+ taskKey,
+ label,
+ yamlKey,
+ value,
+ projectContext,
+ taskContext,
+}) {
+ const anchorRef = useRef(null);
+ const panelRef = useRef(null);
+ const dragState = useRef({ dragging: false, offsetX: 0, offsetY: 0 });
+ const messagesEndRef = useRef(null);
+
+ const [open, setOpen] = useState(false);
+ const [messages, setMessages] = useState([]);
+ const [inputValue, setInputValue] = useState("");
+ const [isSending, setIsSending] = useState(false);
+ const [panelPos, setPanelPos] = useState({
+ top: 0,
+ left: 0,
+ width: 360,
+ height: 300,
+ });
+ const [anchorPoint, setAnchorPoint] = useState({ x: 0, y: 0 });
+
+ const initialPrompt = useMemo(
+ () =>
+ buildInitialPrompt({
+ label,
+ yamlKey,
+ value,
+ projectContext,
+ taskContext,
+ }),
+ [label, yamlKey, value, projectContext, taskContext],
+ );
+
+ // Build a field-context string passed to the backend so the LLM knows
+ // exactly which field the user is asking about.
+ const fieldContext = useMemo(() => {
+ const parts = [`Field: "${label}"`];
+ if (yamlKey) parts.push(`YAML key: ${yamlKey}`);
+ if (taskContext) parts.push(`Task: ${taskContext}`);
+ return parts.join(". ");
+ }, [label, yamlKey, taskContext]);
+
+ // Scroll to bottom when messages change
+ useEffect(() => {
+ if (messagesEndRef.current) {
+ messagesEndRef.current.scrollIntoView({ behavior: "smooth" });
+ }
+ }, [messages, isSending]);
+
+ // ------- Send a message to the helper backend -------
+ const sendMessage = async (text, options = {}) => {
+ const { hideUser = false } = options;
+ if (!text.trim() || isSending) return;
+ setIsSending(true);
+ if (!hideUser) {
+ setMessages((prev) => [...prev, { text, isUser: true }]);
+ }
+ try {
+ const helperTaskKey = `${taskKey}:${label}`;
+ const responseText = await queryHelperChat(
+ helperTaskKey,
+ text,
+ fieldContext,
+ );
+ setMessages((prev) => [
+ ...prev,
+ {
+ text: responseText || "Sorry, I could not generate a response.",
+ isUser: false,
+ },
+ ]);
+ } catch (error) {
+ setMessages((prev) => [
+ ...prev,
+ {
+ text: error.message || "Error contacting helper chatbot.",
+ isUser: false,
+ },
+ ]);
+ } finally {
+ setIsSending(false);
+ }
+ };
+
+ // ------- Open the floating panel -------
+ const openPanel = () => {
+ if (!anchorRef.current) return;
+ const rect = anchorRef.current.getBoundingClientRect();
+ const anchor = {
+ x: rect.left + rect.width / 2,
+ y: rect.top + rect.height / 2,
+ };
+ const viewportW = window.innerWidth;
+ const viewportH = window.innerHeight;
+ const width = panelPos.width || 360;
+ const height = panelPos.height || 300;
+
+ // Prefer placing panel to the right of the "?" icon
+ let left = rect.right + 12;
+ let top = rect.top - 12;
+ if (left + width > viewportW) {
+ left = rect.left - width - 12;
+ }
+ if (left < 16) {
+ left = clamp(rect.left, 16, viewportW - width - 16);
+ }
+ if (top + height > viewportH) {
+ top = clamp(viewportH - height - 16, 16, viewportH - height - 16);
+ }
+ if (top < 16) top = 16;
+
+ setAnchorPoint(anchor);
+ setPanelPos((prev) => ({ ...prev, top, left }));
+ setOpen(true);
+
+ // Auto-fire the initial explainer prompt on first open
+ if (messages.length === 0) {
+ sendMessage(initialPrompt, { hideUser: true });
+ }
+ };
+
+ // ------- Follow-up send -------
+ const handleSend = async () => {
+ if (!inputValue.trim()) return;
+ const query = inputValue;
+ setInputValue("");
+ await sendMessage(query);
+ };
+
+ // ------- Drag handling -------
+ useEffect(() => {
+ if (!open) return;
+ const onMove = (event) => {
+ if (!dragState.current.dragging) return;
+ const nextLeft = clamp(
+ event.clientX - dragState.current.offsetX,
+ 8,
+ window.innerWidth - panelPos.width - 8,
+ );
+ const nextTop = clamp(
+ event.clientY - dragState.current.offsetY,
+ 8,
+ window.innerHeight - panelPos.height - 8,
+ );
+ setPanelPos((prev) => ({ ...prev, left: nextLeft, top: nextTop }));
+ };
+ const onUp = () => {
+ dragState.current.dragging = false;
+ };
+ window.addEventListener("mousemove", onMove);
+ window.addEventListener("mouseup", onUp);
+ return () => {
+ window.removeEventListener("mousemove", onMove);
+ window.removeEventListener("mouseup", onUp);
+ };
+ }, [open, panelPos.width, panelPos.height]);
+
+ const startDrag = (event) => {
+ if (!panelRef.current) return;
+ dragState.current.dragging = true;
+ dragState.current.offsetX = event.clientX - panelPos.left;
+ dragState.current.offsetY = event.clientY - panelPos.top;
+ };
+
+ // Track panel resize via CSS resize: both
+ const handlePanelMouseUp = () => {
+ const newRect = panelRef.current?.getBoundingClientRect();
+ if (newRect) {
+ setPanelPos((prev) => ({
+ ...prev,
+ width: newRect.width,
+ height: newRect.height,
+ }));
+ }
+ };
+
+ // ------- Render the floating panel via portal -------
+ const panel = open ? (
+ <>
+ {/* SVG tether line from "?" icon to the panel */}
+
+
+ {/* The floating panel */}
+
+ {/* Draggable header */}
+
+ {label}
+
+
+
+ {/* Messages area */}
+
+ {messages.map((msg, index) => (
+
+ {msg.isUser ? (
+ {msg.text}
+ ) : (
+ (
+ {children}
+ ),
+ p: ({ children }) => (
+
+ {children}
+
+ ),
+ ul: ({ children }) => (
+
+ ),
+ ol: ({ children }) => (
+
+ {children}
+
+ ),
+ table: ({ children }) => (
+
+ ),
+ th: ({ children }) => (
+
+ {children}
+ |
+ ),
+ td: ({ children }) => (
+
+ {children}
+ |
+ ),
+ code: ({ inline, children }) =>
+ inline ? (
+
+ {children}
+
+ ) : (
+
+ {children}
+
+ ),
+ }}
+ >
+ {msg.text}
+
+ )}
+
+ ))}
+ {isSending && (
+
+
+
+ )}
+
+
+
+ {/* Input area */}
+
+
+
+
+
+ >
+ ) : null;
+
+ return (
+ <>
+ }
+ onClick={() => (open ? setOpen(false) : openPanel())}
+ style={{ color: "#8c8c8c" }}
+ />
+ {open ? ReactDOM.createPortal(panel, document.body) : null}
+ >
+ );
+}
+
+export default InlineHelpChat;
diff --git a/client/src/components/InputSelector.js b/client/src/components/InputSelector.js
index 77da078..0598374 100644
--- a/client/src/components/InputSelector.js
+++ b/client/src/components/InputSelector.js
@@ -1,12 +1,20 @@
-import { Form } from "antd";
+import { Form, Space } from "antd";
import React, { useContext } from "react";
import { AppContext } from "../contexts/GlobalContext";
import UnifiedFileInput from "./UnifiedFileInput";
+import InlineHelpChat from "./InlineHelpChat";
function InputSelector(props) {
const context = useContext(AppContext);
const { type } = props;
+ const projectContext =
+ "Biomedical image segmentation using PyTorch Connectomics.";
+ const taskContext =
+ type === "training"
+ ? "Model training configuration — Step 1: Set Inputs."
+ : "Model inference configuration — Step 1: Set Inputs.";
+
const handleLogPathChange = (value) => {
context.setLogPath(value);
};
@@ -45,7 +53,21 @@ function InputSelector(props) {
span: 14,
}}
>
-
+
+ Input Image
+
+
+ }
+ >
-
+
+ Input Label
+
+
+ }
+ >
{type === "training" ? (
-
+
+ Output Path
+
+
+ }
+ >
) : (
+ Output Path
+
+
+ }
help="Directory where inference results will be saved"
>
)}
{type === "training" ? (
-
+
+ Log Path
+
+
+ }
+ >
) : (
+ Checkpoint Path
+
+
+ }
help="Path to trained model file (.pth.tar)"
>
str:
)
return supervisor, reset_search_counter
+
+
+# ---------------------------------------------------------------------------
+# Helper chat: lightweight RAG-only agent for inline "?" help popovers.
+# Has access to search_documentation only — cannot start training/inference.
+# ---------------------------------------------------------------------------
+
+HELPER_PROMPT = """You are a concise UI helper for PyTorch Connectomics (PyTC Client).
+
+You answer questions about a SPECIFIC field or setting the user is looking at.
+You have access to the application documentation via the search_documentation tool.
+
+RULES:
+1. Lead with a concrete recommendation or explanation (2-4 sentences max).
+2. Use plain, non-technical language — the user has no programming knowledge.
+3. Describe things in terms of what users can see and click in the interface.
+4. If you have enough context, recommend a specific value or action.
+5. Do NOT mention API endpoints, code, environment variables, or internal implementation.
+6. You may call search_documentation up to 2 times per question — then answer with what you have.
+7. You CANNOT start training or inference jobs. If the user asks, tell them to use the main AI Chat panel instead."""
+
+
+def build_helper_chain():
+ """Build a lightweight RAG-only agent for inline field help.
+
+ Shares the same FAISS vectorstore and keyword-fallback docs as the main
+ chatbot but has NO access to training/inference sub-agents.
+ Returns ``(agent, reset_search_counter)`` — same interface as ``build_chain``.
+ """
+ ollama_base_url = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
+ ollama_model = os.getenv("OLLAMA_MODEL", "gpt-oss:20b")
+ ollama_embed_model = os.getenv("OLLAMA_EMBED_MODEL", "qwen3-embedding:8b")
+ llm = ChatOllama(model=ollama_model, base_url=ollama_base_url, temperature=0)
+ embeddings = OllamaEmbeddings(model=ollama_embed_model, base_url=ollama_base_url)
+ faiss_path = process_path("server_api/chatbot/faiss_index")
+ vectorstore = FAISS.load_local(
+ faiss_path,
+ embeddings,
+ allow_dangerous_deserialization=True,
+ )
+ retriever = vectorstore.as_retriever(search_kwargs={"k": 2})
+
+ # Keyword fallback docs (same pattern as main chatbot)
+ summaries_dir = Path(process_path("server_api/chatbot/file_summaries"))
+ _all_docs = {}
+ for md_file in summaries_dir.rglob("*.md"):
+ _all_docs[md_file.name] = md_file.read_text(encoding="utf-8")
+
+ _search_call_count = [0]
+
+ def reset_search_counter():
+ _search_call_count[0] = 0
+
+ @tool
+ def search_documentation(query: str) -> str:
+ """Search PyTC documentation for UI guides, field explanations, and feature descriptions.
+
+ Args:
+ query: The user's question
+
+ Returns:
+ Relevant documentation content
+ """
+ _search_call_count[0] += 1
+ if _search_call_count[0] > 2:
+ return (
+ "Search limit reached. Answer with the documentation already retrieved."
+ )
+
+ docs = retriever.invoke(query)
+ if docs:
+ return "\n\n".join([doc.page_content for doc in docs])
+
+ # Keyword fallback
+ query_lower = query.lower()
+ query_words = [w for w in query_lower.split() if len(w) > 2]
+ scored = []
+ for filename, content in _all_docs.items():
+ content_lower = content.lower()
+ name_lower = filename.replace(".md", "").lower()
+ word_hits = sum(1 for w in query_words if w in content_lower)
+ name_hits = sum(3 for w in query_words if w in name_lower)
+ score = word_hits + name_hits
+ if score > 0:
+ scored.append((score, filename, content))
+ scored.sort(key=lambda x: x[0], reverse=True)
+ if scored:
+ return "\n\n".join([s[2] for s in scored[:3]])
+
+ return "No relevant documentation found."
+
+ helper_agent = create_agent(
+ model=llm,
+ tools=[search_documentation],
+ system_prompt=HELPER_PROMPT,
+ )
+
+ return helper_agent, reset_search_counter
diff --git a/server_api/main.py b/server_api/main.py
index 62c3f50..d0af24f 100644
--- a/server_api/main.py
+++ b/server_api/main.py
@@ -7,11 +7,14 @@
import requests
import uvicorn
-from fastapi import FastAPI, HTTPException, Request, UploadFile
+from fastapi import Depends, FastAPI, HTTPException, Request, UploadFile
from fastapi.middleware.cors import CORSMiddleware
+from sqlalchemy.orm import Session
from server_api.utils.io import readVol
from server_api.utils.utils import process_path
from server_api.auth import models, database, router as auth_router
+from server_api.auth.database import get_db
+from server_api.auth.router import get_current_user
from server_api.synanno import router as synanno_router
from server_api.ehtool import router as ehtool_router
@@ -21,14 +24,23 @@
# Chatbot is optional; keep the server running if dependencies or model endpoints
# are unavailable. We initialize lazily on demand.
try:
- from server_api.chatbot.chatbot import build_chain
+ from server_api.chatbot.chatbot import build_chain, build_helper_chain
except Exception as exc: # pragma: no cover - exercised indirectly via endpoints
build_chain = None
+ build_helper_chain = None
_chatbot_error = exc
chain = None
_reset_search = None
-_chat_history = []
+
+# In-memory LangChain history, keyed by conversation ID for the main chatbot.
+# Rebuilt from DB when switching conversations.
+_active_convo_id: Optional[int] = None
+_chat_history: list = []
+
+# Helper chat (inline "?" popovers) — keyed by taskKey for isolated sessions
+_helper_chains = {} # taskKey -> (agent, reset_fn)
+_helper_histories = {} # taskKey -> list of messages
def _ensure_chatbot():
@@ -508,9 +520,136 @@ async def check_files(req: Request):
return {"error": str(e)}
+# ── Chat history persistence endpoints ─────────────────────────────────────────
+
+
+@app.get("/chat/conversations", response_model=List[models.ConversationResponse])
+def list_conversations(
+ user: models.User = Depends(get_current_user),
+ db: Session = Depends(get_db),
+):
+ """Return all conversations for the current user, newest first."""
+ return (
+ db.query(models.Conversation)
+ .filter(models.Conversation.user_id == user.id)
+ .order_by(models.Conversation.updated_at.desc())
+ .all()
+ )
+
+
+@app.post("/chat/conversations", response_model=models.ConversationDetailResponse)
+def create_conversation(
+ user: models.User = Depends(get_current_user),
+ db: Session = Depends(get_db),
+):
+ """Create a new empty conversation."""
+ convo = models.Conversation(user_id=user.id, title="New Chat")
+ db.add(convo)
+ db.commit()
+ db.refresh(convo)
+ return convo
+
+
+@app.get(
+ "/chat/conversations/{convo_id}",
+ response_model=models.ConversationDetailResponse,
+)
+def get_conversation(
+ convo_id: int,
+ user: models.User = Depends(get_current_user),
+ db: Session = Depends(get_db),
+):
+ """Return a conversation and all its messages."""
+ convo = (
+ db.query(models.Conversation)
+ .filter(
+ models.Conversation.id == convo_id,
+ models.Conversation.user_id == user.id,
+ )
+ .first()
+ )
+ if not convo:
+ raise HTTPException(status_code=404, detail="Conversation not found")
+ return convo
+
+
+@app.delete("/chat/conversations/{convo_id}")
+def delete_conversation(
+ convo_id: int,
+ user: models.User = Depends(get_current_user),
+ db: Session = Depends(get_db),
+):
+ """Delete a conversation and all its messages."""
+ convo = (
+ db.query(models.Conversation)
+ .filter(
+ models.Conversation.id == convo_id,
+ models.Conversation.user_id == user.id,
+ )
+ .first()
+ )
+ if not convo:
+ raise HTTPException(status_code=404, detail="Conversation not found")
+ db.delete(convo)
+ db.commit()
+ # Clear in-memory history if we just deleted the active conversation
+ global _active_convo_id, _chat_history
+ if _active_convo_id == convo_id:
+ _active_convo_id = None
+ _chat_history.clear()
+ return {"message": "Conversation deleted"}
+
+
+@app.patch(
+ "/chat/conversations/{convo_id}",
+ response_model=models.ConversationResponse,
+)
+def update_conversation(
+ convo_id: int,
+ req_body: dict,
+ user: models.User = Depends(get_current_user),
+ db: Session = Depends(get_db),
+):
+ """Update a conversation's title."""
+ convo = (
+ db.query(models.Conversation)
+ .filter(
+ models.Conversation.id == convo_id,
+ models.Conversation.user_id == user.id,
+ )
+ .first()
+ )
+ if not convo:
+ raise HTTPException(status_code=404, detail="Conversation not found")
+ if "title" in req_body:
+ convo.title = req_body["title"][:120] # cap at 120 chars
+ db.commit()
+ db.refresh(convo)
+ return convo
+
+
+def _load_history_for_convo(convo_id: int, db: Session):
+ """Rebuild the in-memory _chat_history from the DB for a given conversation."""
+ global _active_convo_id, _chat_history
+ if _active_convo_id == convo_id:
+ return # already loaded
+ msgs = (
+ db.query(models.ChatMessage)
+ .filter(models.ChatMessage.conversation_id == convo_id)
+ .order_by(models.ChatMessage.created_at)
+ .all()
+ )
+ _chat_history = [{"role": m.role, "content": m.content} for m in msgs]
+ _active_convo_id = convo_id
+
+
# Chatbot endpoints
@app.post("/chat/query")
-async def chat_query(req: Request):
+async def chat_query(
+ req: Request,
+ user: models.User = Depends(get_current_user),
+ db: Session = Depends(get_db),
+):
if not _ensure_chatbot():
detail = "Chatbot is not configured"
if "_chatbot_error" in globals():
@@ -518,21 +657,64 @@ async def chat_query(req: Request):
raise HTTPException(status_code=503, detail=detail)
body = await req.json()
query = body.get("query")
+ convo_id = body.get("conversationId")
if not isinstance(query, str) or not query.strip():
raise HTTPException(status_code=400, detail="Query must be a non-empty string.")
+
+ # Auto-create a conversation if none supplied
+ if not convo_id:
+ convo = models.Conversation(user_id=user.id, title="New Chat")
+ db.add(convo)
+ db.commit()
+ db.refresh(convo)
+ convo_id = convo.id
+ else:
+ convo = (
+ db.query(models.Conversation)
+ .filter(
+ models.Conversation.id == convo_id,
+ models.Conversation.user_id == user.id,
+ )
+ .first()
+ )
+ if not convo:
+ raise HTTPException(status_code=404, detail="Conversation not found")
+
+ # Rebuild in-memory history from DB when switching conversations
+ _load_history_for_convo(convo_id, db)
+
if _reset_search is not None:
_reset_search()
all_messages = _chat_history + [{"role": "user", "content": query}]
result = chain.invoke({"messages": all_messages})
messages = result.get("messages", [])
response = messages[-1].content if messages else "No response generated"
+
+ # Persist to DB
+ db.add(models.ChatMessage(conversation_id=convo_id, role="user", content=query))
+ db.add(
+ models.ChatMessage(conversation_id=convo_id, role="assistant", content=response)
+ )
+
+ # Auto-title: first user message becomes the title (truncated)
+ if convo.title == "New Chat":
+ convo.title = query[:120].strip() or "New Chat"
+
+ db.commit()
+
+ # Update in-memory history
_chat_history.append({"role": "user", "content": query})
_chat_history.append({"role": "assistant", "content": response})
- return {"response": response}
+
+ return {"response": response, "conversationId": convo_id}
@app.post("/chat/clear")
-async def clear_chat():
+async def clear_chat(
+ user: models.User = Depends(get_current_user),
+):
+ """Reset the in-memory LangChain context (does NOT delete DB messages)."""
+ global _active_convo_id, _chat_history
if not _ensure_chatbot():
detail = "Chatbot is not configured"
if "_chatbot_error" in globals():
@@ -541,6 +723,7 @@ async def clear_chat():
if _reset_search is not None:
_reset_search()
_chat_history.clear()
+ _active_convo_id = None
return {"message": "Chat session reset"}
@@ -553,6 +736,74 @@ async def chat_status():
return {"configured": configured, "error": detail}
+# ---------------------------------------------------------------------------
+# Helper chat endpoints (inline "?" popovers — RAG only, no training/inference)
+# ---------------------------------------------------------------------------
+
+
+def _ensure_helper_chat(task_key: str):
+ """Lazily build a helper agent for *task_key*, reusing it on subsequent calls."""
+ global _chatbot_error
+ if task_key in _helper_chains:
+ return True
+ if build_helper_chain is None:
+ return False
+ try:
+ agent, reset_fn = build_helper_chain()
+ _helper_chains[task_key] = (agent, reset_fn)
+ _helper_histories[task_key] = []
+ return True
+ except Exception as exc:
+ _chatbot_error = exc
+ return False
+
+
+@app.post("/chat/helper/query")
+async def chat_helper_query(req: Request):
+ body = await req.json()
+ task_key = body.get("taskKey")
+ query = body.get("query")
+ field_context = body.get("fieldContext", "")
+
+ if not task_key:
+ raise HTTPException(status_code=400, detail="taskKey is required")
+ if not isinstance(query, str) or not query.strip():
+ raise HTTPException(status_code=400, detail="query must be a non-empty string.")
+
+ if not _ensure_helper_chat(task_key):
+ detail = "Helper chatbot is not configured"
+ if "_chatbot_error" in globals():
+ detail = f"{detail}: {_chatbot_error}"
+ raise HTTPException(status_code=503, detail=detail)
+
+ agent, reset_fn = _helper_chains[task_key]
+ history = _helper_histories[task_key]
+
+ # Prepend field context to the first message so the LLM knows what field
+ # the user is looking at.
+ user_content = (
+ f"[Field context: {field_context}]\n\n{query}" if field_context else query
+ )
+
+ reset_fn()
+ all_messages = history + [{"role": "user", "content": user_content}]
+ result = agent.invoke({"messages": all_messages})
+ messages = result.get("messages", [])
+ response = messages[-1].content if messages else "No response generated"
+ history.append({"role": "user", "content": user_content})
+ history.append({"role": "assistant", "content": response})
+ return {"response": response}
+
+
+@app.post("/chat/helper/clear")
+async def chat_helper_clear(req: Request):
+ body = await req.json()
+ task_key = body.get("taskKey")
+ if task_key and task_key in _helper_histories:
+ _helper_histories[task_key].clear()
+ return {"message": "Helper chat cleared"}
+
+
def run():
uvicorn.run(
app,
|