From dbebd4114505ec46e628595705e9d4c0888e65c3 Mon Sep 17 00:00:00 2001
From: linuztx
Date: Tue, 18 Nov 2025 18:49:57 +0800
Subject: [PATCH 001/436] agent: Retry on critical error
---
agent.py | 38 +++++++++++++++++++++++++++++---
prompts/fw.msg_critical_error.md | 1 +
2 files changed, 36 insertions(+), 3 deletions(-)
create mode 100644 prompts/fw.msg_critical_error.md
diff --git a/agent.py b/agent.py
index 594dc37bc5..97205f214e 100644
--- a/agent.py
+++ b/agent.py
@@ -354,6 +354,7 @@ def __init__(
asyncio.run(self.call_extensions("agent_init"))
async def monologue(self):
+ error_retries = 0 # counter for critical error retries
while True:
try:
# loop data dictionary to pass to extensions
@@ -453,6 +454,7 @@ async def stream_callback(chunk: str, full: str):
# exceptions inside message loop:
except InterventionException as e:
+ error_retries = 0 # reset retry counter on user intervention
pass # intervention message has been handled in handle_intervention(), proceed with conversation loop
except RepairableException as e:
# Forward repairable errors to the LLM, maybe it can fix them
@@ -462,8 +464,10 @@ async def stream_callback(chunk: str, full: str):
PrintStyle(font_color="red", padding=True).print(msg["message"])
self.context.log.log(type="error", content=msg["message"])
except Exception as e:
- # Other exception kill the loop
- self.handle_critical_exception(e)
+ # Retry critical exceptions before failing
+ error_retries = await self.retry_critical_exception(
+ e, error_retries
+ )
finally:
# call message_loop_end extensions
@@ -473,9 +477,13 @@ async def stream_callback(chunk: str, full: str):
# exceptions outside message loop:
except InterventionException as e:
+ error_retries = 0 # reset retry counter on user intervention
pass # just start over
except Exception as e:
- self.handle_critical_exception(e)
+ # Retry critical exceptions before failing
+ error_retries = await self.retry_critical_exception(
+ e, error_retries
+ )
finally:
self.context.streaming_agent = None # unset current streamer
# call monologue_end extensions
@@ -532,6 +540,30 @@ async def prepare_prompt(self, loop_data: LoopData) -> list[BaseMessage]:
return full_prompt
+ async def retry_critical_exception(
+ self, e: Exception, error_retries: int, delay: int = 3, max_retries: int = 1
+ ) -> int:
+ if error_retries >= max_retries:
+ self.handle_critical_exception(e)
+
+ error_message = errors.format_error(e)
+
+ self.context.log.log(
+ type="warning", content="Critical error occurred, retrying..."
+ )
+ PrintStyle(font_color="orange", padding=True).print(
+ "Critical error occurred, retrying..."
+ )
+ await asyncio.sleep(delay)
+ agent_facing_error = self.read_prompt(
+ "fw.msg_critical_error.md", error_message=error_message
+ )
+ self.hist_add_warning(message=agent_facing_error)
+ PrintStyle(font_color="orange", padding=True).print(
+ agent_facing_error
+ )
+ return error_retries + 1
+
def handle_critical_exception(self, exception: Exception):
if isinstance(exception, HandledException):
raise exception # Re-raise the exception to kill the loop
diff --git a/prompts/fw.msg_critical_error.md b/prompts/fw.msg_critical_error.md
new file mode 100644
index 0000000000..0bdeda139e
--- /dev/null
+++ b/prompts/fw.msg_critical_error.md
@@ -0,0 +1 @@
+This error has occurred: {{error_message}}. Proceed with your original task if possible.
\ No newline at end of file
From 784fe5589a769ebc61a7a8308dc6d42680c7a068 Mon Sep 17 00:00:00 2001
From: Rafael Uzarowski
Date: Wed, 19 Nov 2025 12:50:43 +0100
Subject: [PATCH 002/436] Fix: proper task cancellation in scheduler, leakage
in defer.py
* **Mechanism**: `EventLoopThread.terminate()` now correctly stops the asyncio loop and joins the thread, removing it from the global registry.
* **Cleanup**: `DeferredTask.kill(terminate_thread=True)` now invokes `_drain_event_loop_tasks()`, which runs **inside** the target thread to explicitly cancel and await all pending tasks (including monologue loops) before killing the thread. This prevents "Task was destroyed but it is pending" warnings and ensures clean exits.
* **Tracking**: The scheduler now maintains a live registry (`_running_deferred_tasks`) of active `DeferredTask` objects, protected by a reentrant lock.
* **State Management**: The `run_task` wrapper uses `asyncio.shield` to ensure that even when a task is cancelled (e.g., by user action), the task state is reliably reset to `IDLE` in the database, preventing tasks from getting stuck in `RUNNING` state.
The fix is correctly propagated to all relevant destruction points using `terminate_thread=True`:
* **Dedicated Context**: `scheduler_task_delete.py` cancels the specific running task and terminates its thread.
* **Shared/Dedicated Context**: Both `chat_remove.py` (Delete Chat) and `chat_reset.py` (Reset Chat) now call `scheduler.cancel_tasks_by_context(...)`. This ensures that if a scheduler task is running in a chat window (monologue), resetting that chat immediately kills the background thread and stops the agent loop.
---
python/api/chat_remove.py | 4 +-
python/api/chat_reset.py | 4 ++
python/api/scheduler_task_delete.py | 1 +
python/helpers/defer.py | 84 +++++++++++++++++++----------
python/helpers/task_scheduler.py | 67 +++++++++++++++++++++--
5 files changed, 127 insertions(+), 33 deletions(-)
diff --git a/python/api/chat_remove.py b/python/api/chat_remove.py
index c7b75f02a4..671e43d9ea 100644
--- a/python/api/chat_remove.py
+++ b/python/api/chat_remove.py
@@ -8,6 +8,9 @@ class RemoveChat(ApiHandler):
async def process(self, input: Input, request: Request) -> Output:
ctxid = input.get("context", "")
+ scheduler = TaskScheduler.get()
+ scheduler.cancel_tasks_by_context(ctxid, terminate_thread=True)
+
context = AgentContext.use(ctxid)
if context:
# stop processing any tasks
@@ -16,7 +19,6 @@ async def process(self, input: Input, request: Request) -> Output:
AgentContext.remove(ctxid)
persist_chat.remove_chat(ctxid)
- scheduler = TaskScheduler.get()
await scheduler.reload()
tasks = scheduler.get_tasks_by_context_id(ctxid)
diff --git a/python/api/chat_reset.py b/python/api/chat_reset.py
index a5ce1284e8..668b08e268 100644
--- a/python/api/chat_reset.py
+++ b/python/api/chat_reset.py
@@ -2,12 +2,16 @@
from python.helpers import persist_chat
+from python.helpers.task_scheduler import TaskScheduler
class Reset(ApiHandler):
async def process(self, input: Input, request: Request) -> Output:
ctxid = input.get("context", "")
+ # attempt to stop any scheduler tasks bound to this context
+ TaskScheduler.get().cancel_tasks_by_context(ctxid, terminate_thread=True)
+
# context instance - get or create
context = self.use_context(ctxid)
context.reset()
diff --git a/python/api/scheduler_task_delete.py b/python/api/scheduler_task_delete.py
index f9b52ac031..5e41a0bd61 100644
--- a/python/api/scheduler_task_delete.py
+++ b/python/api/scheduler_task_delete.py
@@ -34,6 +34,7 @@ async def process(self, input: Input, request: Request) -> Output:
# If the task is running, update its state to IDLE first
if task.state == TaskState.RUNNING:
+ scheduler.cancel_running_task(task_id, terminate_thread=True)
if context:
context.reset()
# Update the state to IDLE so any ongoing processes know to terminate
diff --git a/python/helpers/defer.py b/python/helpers/defer.py
index dc96efe5aa..8c2c7e86ef 100644
--- a/python/helpers/defer.py
+++ b/python/helpers/defer.py
@@ -6,8 +6,9 @@
T = TypeVar("T")
+
class EventLoopThread:
- _instances = {}
+ _instances: dict[str, "EventLoopThread"] = {}
_lock = threading.Lock()
def __init__(self, thread_name: str = "Background") -> None:
@@ -38,8 +39,29 @@ def _run_event_loop(self):
self.loop.run_forever()
def terminate(self):
- if self.loop and self.loop.is_running():
- self.loop.stop()
+ loop = getattr(self, "loop", None)
+ thread = getattr(self, "thread", None)
+
+ if not loop:
+ return
+
+ if loop.is_running():
+ if thread and thread is threading.current_thread():
+ loop.stop()
+ else:
+ loop.call_soon_threadsafe(loop.stop)
+ if thread:
+ thread.join()
+ elif thread and thread.is_alive() and thread is not threading.current_thread():
+ thread.join()
+
+ if not loop.is_closed():
+ loop.close()
+
+ with self.__class__._lock:
+ if self.thread_name in self.__class__._instances:
+ del self.__class__._instances[self.thread_name]
+
self.loop = None
self.thread = None
@@ -79,6 +101,12 @@ def __del__(self):
def _start_task(self):
self._future = self.event_loop_thread.run_coroutine(self._run())
+ if self._future:
+ self._future.add_done_callback(self._on_task_done)
+
+ def _on_task_done(self, _future: Future):
+ # Ensure child background tasks are always cleaned up once the parent finishes
+ self.kill_children()
async def _run(self):
return await self.func(*self.args, **self.kwargs)
@@ -120,30 +148,16 @@ def kill(self, terminate_thread: bool = False) -> None:
if self._future and not self._future.done():
self._future.cancel()
- if (
- terminate_thread
- and self.event_loop_thread.loop
- and self.event_loop_thread.loop.is_running()
- ):
-
- def cleanup():
- tasks = [
- t
- for t in asyncio.all_tasks(self.event_loop_thread.loop)
- if t is not asyncio.current_task(self.event_loop_thread.loop)
- ]
- for task in tasks:
- task.cancel()
- try:
- # Give tasks a chance to cleanup
- if self.event_loop_thread.loop:
- self.event_loop_thread.loop.run_until_complete(
- asyncio.gather(task, return_exceptions=True)
- )
- except Exception:
- pass # Ignore cleanup errors
-
- self.event_loop_thread.loop.call_soon_threadsafe(cleanup)
+ if terminate_thread and self.event_loop_thread.loop:
+ if self.event_loop_thread.loop.is_running():
+ try:
+ cleanup_future = asyncio.run_coroutine_threadsafe(
+ self._drain_event_loop_tasks(), self.event_loop_thread.loop
+ )
+ cleanup_future.result()
+ except Exception:
+ pass
+
self.event_loop_thread.terminate()
def kill_children(self) -> None:
@@ -196,3 +210,19 @@ async def wrapped():
asyncio.run_coroutine_threadsafe(wrapped(), self.event_loop_thread.loop)
return asyncio.wrap_future(future)
+
+ @staticmethod
+ async def _drain_event_loop_tasks():
+ """Cancel and await all pending tasks on the current event loop."""
+ loop = asyncio.get_running_loop()
+ current_task = asyncio.current_task(loop=loop)
+ pending = [
+ task
+ for task in asyncio.all_tasks(loop=loop)
+ if task is not current_task
+ ]
+ if not pending:
+ return
+ for task in pending:
+ task.cancel()
+ await asyncio.gather(*pending, return_exceptions=True)
diff --git a/python/helpers/task_scheduler.py b/python/helpers/task_scheduler.py
index a707777af4..249945bb27 100644
--- a/python/helpers/task_scheduler.py
+++ b/python/helpers/task_scheduler.py
@@ -619,6 +619,8 @@ class TaskScheduler:
_tasks: SchedulerTaskList
_printer: PrintStyle
_instance = None
+ _running_deferred_tasks: Dict[str, DeferredTask]
+ _running_tasks_lock: threading.RLock
@classmethod
def get(cls) -> "TaskScheduler":
@@ -631,8 +633,38 @@ def __init__(self):
if not hasattr(self, '_initialized'):
self._tasks = SchedulerTaskList.get()
self._printer = PrintStyle(italic=True, font_color="green", padding=False)
+ self._running_deferred_tasks = {}
+ self._running_tasks_lock = threading.RLock()
self._initialized = True
+ def _register_running_task(self, task_uuid: str, deferred_task: DeferredTask) -> None:
+ with self._running_tasks_lock:
+ self._running_deferred_tasks[task_uuid] = deferred_task
+
+ def _unregister_running_task(self, task_uuid: str) -> None:
+ with self._running_tasks_lock:
+ self._running_deferred_tasks.pop(task_uuid, None)
+
+ def cancel_running_task(self, task_uuid: str, terminate_thread: bool = False) -> bool:
+ with self._running_tasks_lock:
+ deferred_task = self._running_deferred_tasks.get(task_uuid)
+ if not deferred_task:
+ return False
+ self._printer.print(f"Scheduler cancelling task {task_uuid}")
+ deferred_task.kill(terminate_thread=terminate_thread)
+ return True
+
+ def cancel_tasks_by_context(self, context_id: str, terminate_thread: bool = False) -> bool:
+ cancelled_any = False
+ with self._running_tasks_lock:
+ running_tasks = list(self._running_deferred_tasks.keys())
+ for task_uuid in running_tasks:
+ task = self.get_task_by_uuid(task_uuid)
+ if task and task.context_id == context_id:
+ if self.cancel_running_task(task_uuid, terminate_thread=terminate_thread):
+ cancelled_any = True
+ return cancelled_any
+
async def reload(self):
await self._tasks.reload()
@@ -774,19 +806,23 @@ async def _run_task_wrapper(task_uuid: str, task_context: str | None = None):
task_snapshot: Union[ScheduledTask, AdHocTask, PlannedTask] | None = self.get_task_by_uuid(task_uuid)
if task_snapshot is None:
self._printer.print(f"Scheduler Task with UUID '{task_uuid}' not found")
+ self._unregister_running_task(task_uuid)
return
if task_snapshot.state == TaskState.RUNNING:
self._printer.print(f"Scheduler Task '{task_snapshot.name}' already running, skipping")
+ self._unregister_running_task(task_uuid)
return
# Atomically fetch and check the task's current state
current_task = await self.update_task_checked(task_uuid, lambda task: task.state != TaskState.RUNNING, state=TaskState.RUNNING)
if not current_task:
self._printer.print(f"Scheduler Task with UUID '{task_uuid}' not found or updated by another process")
+ self._unregister_running_task(task_uuid)
return
if current_task.state != TaskState.RUNNING:
# This means the update failed due to state conflict
self._printer.print(f"Scheduler Task '{current_task.name}' state is '{current_task.state}', skipping")
+ self._unregister_running_task(task_uuid)
return
await current_task.on_run()
@@ -868,6 +904,13 @@ async def _run_task_wrapper(task_uuid: str, task_context: str | None = None):
self._printer.print(f"Fixing task state consistency: '{current_task.name}' state is not IDLE after success")
await self.update_task(task_uuid, state=TaskState.IDLE)
+ except asyncio.CancelledError:
+ self._printer.print(f"Scheduler Task '{current_task.name}' cancelled by user")
+ try:
+ await asyncio.shield(self.update_task(task_uuid, state=TaskState.IDLE))
+ except Exception:
+ pass
+ raise
except Exception as e:
# Error
self._printer.print(f"Scheduler Task '{current_task.name}' failed: {e}")
@@ -884,17 +927,31 @@ async def _run_task_wrapper(task_uuid: str, task_context: str | None = None):
agent.handle_critical_exception(e)
finally:
# Call on_finish for task-specific cleanup
- await current_task.on_finish()
+ try:
+ await asyncio.shield(current_task.on_finish())
+ except asyncio.CancelledError:
+ pass
+ except Exception:
+ pass
# Make one final save to ensure all states are persisted
- await self._tasks.save()
+ try:
+ await asyncio.shield(self._tasks.save())
+ except asyncio.CancelledError:
+ pass
+ except Exception:
+ pass
+
+ self._unregister_running_task(task_uuid)
deferred_task = DeferredTask(thread_name=self.__class__.__name__)
+ self._register_running_task(task.uuid, deferred_task)
deferred_task.start_task(_run_task_wrapper, task.uuid, task_context)
- # Ensure background execution doesn't exit immediately on async await, especially in script contexts
- # This helps prevent premature exits when running from non-event-loop contexts
- asyncio.create_task(asyncio.sleep(0.1))
+ # Ensure background execution doesn't exit immediately on async await, especially in script contexts.
+ # Yielding briefly keeps callers like CLI scripts alive long enough for the DeferredTask thread to spin up
+ # without leaving stray pending tasks that trigger \"Task was destroyed\" warnings when the loop shuts down.
+ await asyncio.sleep(0.1)
def serialize_all_tasks(self) -> list[Dict[str, Any]]:
"""
From 51c8451696ac52e43864a535d173a502cfe13b67 Mon Sep 17 00:00:00 2001
From: Rafael Uzarowski
Date: Wed, 19 Nov 2025 13:00:14 +0100
Subject: [PATCH 003/436] Scheduler: use convenience methods for logging of
special messages
---
python/helpers/task_scheduler.py | 46 ++++++++++++++++----------------
1 file changed, 23 insertions(+), 23 deletions(-)
diff --git a/python/helpers/task_scheduler.py b/python/helpers/task_scheduler.py
index 249945bb27..5f9321754a 100644
--- a/python/helpers/task_scheduler.py
+++ b/python/helpers/task_scheduler.py
@@ -215,7 +215,7 @@ async def on_error(self, error: str):
last_result=f"ERROR: {error}"
)
if not updated_task:
- PrintStyle(italic=True, font_color="red", padding=False).print(
+ PrintStyle.error(
f"Failed to update task {self.uuid} state to ERROR after error: {error}"
)
await scheduler.save() # Force save after update
@@ -231,7 +231,7 @@ async def on_success(self, result: str):
last_result=result
)
if not updated_task:
- PrintStyle(italic=True, font_color="red", padding=False).print(
+ PrintStyle.error(
f"Failed to update task {self.uuid} state to IDLE after success"
)
await scheduler.save() # Force save after update
@@ -504,12 +504,12 @@ async def save(self) -> "SchedulerTaskList":
for task in self.tasks:
if isinstance(task, AdHocTask):
if task.token is None or task.token == "":
- PrintStyle(italic=True, font_color="red", padding=False).print(
+ PrintStyle.warning(
f"WARNING: AdHocTask {task.name} ({task.uuid}) has a null or empty token before saving: '{task.token}'"
)
# Generate a new token to prevent errors
task.token = str(random.randint(1000000000000000000, 9999999999999999999))
- PrintStyle(italic=True, font_color="red", padding=False).print(
+ PrintStyle.info(
f"Fixed: Generated new token '{task.token}' for task {task.name}"
)
@@ -522,7 +522,7 @@ async def save(self) -> "SchedulerTaskList":
# Debug: check if 'null' appears as token value in JSON
if '"type": "adhoc"' in json_data and '"token": null' in json_data:
- PrintStyle(italic=True, font_color="red", padding=False).print(
+ PrintStyle.error(
"ERROR: Found null token in JSON output for an adhoc task"
)
@@ -532,7 +532,7 @@ async def save(self) -> "SchedulerTaskList":
if exists(path):
loaded_json = read_file(path)
if '"type": "adhoc"' in loaded_json and '"token": null' in loaded_json:
- PrintStyle(italic=True, font_color="red", padding=False).print(
+ PrintStyle.error(
"ERROR: Null token persisted in JSON file for an adhoc task"
)
@@ -650,7 +650,7 @@ def cancel_running_task(self, task_uuid: str, terminate_thread: bool = False) ->
deferred_task = self._running_deferred_tasks.get(task_uuid)
if not deferred_task:
return False
- self._printer.print(f"Scheduler cancelling task {task_uuid}")
+ PrintStyle.info(f"Scheduler cancelling task {task_uuid}")
deferred_task.kill(terminate_thread=terminate_thread)
return True
@@ -719,7 +719,7 @@ async def run_task_by_uuid(self, task_uuid: str, task_context: str | None = None
# If the task is in error state, reset it to IDLE first
if task.state == TaskState.ERROR:
- self._printer.print(f"Resetting task '{task.name}' from ERROR to IDLE state before running")
+ PrintStyle.info(f"Resetting task '{task.name}' from ERROR to IDLE state before running")
await self.update_task(task_uuid, state=TaskState.IDLE)
# Force a reload to ensure we have the updated state
await self._tasks.reload()
@@ -782,13 +782,13 @@ async def _get_chat_context(self, task: Union[ScheduledTask, AdHocTask, PlannedT
if context:
assert isinstance(context, AgentContext)
- self._printer.print(
+ PrintStyle.info(
f"Scheduler Task {task.name} loaded from task {task.uuid}, context ok"
)
save_tmp_chat(context)
return context
else:
- self._printer.print(
+ PrintStyle.warning(
f"Scheduler Task {task.name} loaded from task {task.uuid} but context not found"
)
return await self.__new_context(task)
@@ -805,23 +805,23 @@ async def _run_task_wrapper(task_uuid: str, task_context: str | None = None):
# preflight checks with a snapshot of the task
task_snapshot: Union[ScheduledTask, AdHocTask, PlannedTask] | None = self.get_task_by_uuid(task_uuid)
if task_snapshot is None:
- self._printer.print(f"Scheduler Task with UUID '{task_uuid}' not found")
+ PrintStyle.error(f"Scheduler Task with UUID '{task_uuid}' not found")
self._unregister_running_task(task_uuid)
return
if task_snapshot.state == TaskState.RUNNING:
- self._printer.print(f"Scheduler Task '{task_snapshot.name}' already running, skipping")
+ PrintStyle.warning(f"Scheduler Task '{task_snapshot.name}' already running, skipping")
self._unregister_running_task(task_uuid)
return
# Atomically fetch and check the task's current state
current_task = await self.update_task_checked(task_uuid, lambda task: task.state != TaskState.RUNNING, state=TaskState.RUNNING)
if not current_task:
- self._printer.print(f"Scheduler Task with UUID '{task_uuid}' not found or updated by another process")
+ PrintStyle.error(f"Scheduler Task with UUID '{task_uuid}' not found or updated by another process")
self._unregister_running_task(task_uuid)
return
if current_task.state != TaskState.RUNNING:
# This means the update failed due to state conflict
- self._printer.print(f"Scheduler Task '{current_task.name}' state is '{current_task.state}', skipping")
+ PrintStyle.warning(f"Scheduler Task '{current_task.name}' state is '{current_task.state}', skipping")
self._unregister_running_task(task_uuid)
return
@@ -831,7 +831,7 @@ async def _run_task_wrapper(task_uuid: str, task_context: str | None = None):
agent = None
try:
- self._printer.print(f"Scheduler Task '{current_task.name}' started")
+ PrintStyle.info(f"Scheduler Task '{current_task.name}' started")
context = await self._get_chat_context(current_task)
AgentContext.use(context.id)
@@ -854,9 +854,9 @@ async def _run_task_wrapper(task_uuid: str, task_context: str | None = None):
if url.scheme in ["http", "https", "ftp", "ftps", "sftp"]:
attachment_filenames.append(attachment)
else:
- self._printer.print(f"Skipping attachment: [{attachment}]")
+ PrintStyle.warning(f"Skipping attachment: [{attachment}]")
except Exception:
- self._printer.print(f"Skipping attachment: [{attachment}]")
+ PrintStyle.warning(f"Skipping attachment: [{attachment}]")
self._printer.print("User message:")
self._printer.print(f"> {current_task.prompt}")
@@ -893,7 +893,7 @@ async def _run_task_wrapper(task_uuid: str, task_context: str | None = None):
result = await agent.monologue()
# Success
- self._printer.print(f"Scheduler Task '{current_task.name}' completed: {result}")
+ PrintStyle.success(f"Scheduler Task '{current_task.name}' completed: {result}")
await self._persist_chat(current_task, context)
await current_task.on_success(result)
@@ -901,11 +901,11 @@ async def _run_task_wrapper(task_uuid: str, task_context: str | None = None):
await self._tasks.reload()
updated_task = self.get_task_by_uuid(task_uuid)
if updated_task and updated_task.state != TaskState.IDLE:
- self._printer.print(f"Fixing task state consistency: '{current_task.name}' state is not IDLE after success")
+ PrintStyle.warning(f"Fixing task state consistency: '{current_task.name}' state is not IDLE after success")
await self.update_task(task_uuid, state=TaskState.IDLE)
except asyncio.CancelledError:
- self._printer.print(f"Scheduler Task '{current_task.name}' cancelled by user")
+ PrintStyle.warning(f"Scheduler Task '{current_task.name}' cancelled by user")
try:
await asyncio.shield(self.update_task(task_uuid, state=TaskState.IDLE))
except Exception:
@@ -913,14 +913,14 @@ async def _run_task_wrapper(task_uuid: str, task_context: str | None = None):
raise
except Exception as e:
# Error
- self._printer.print(f"Scheduler Task '{current_task.name}' failed: {e}")
+ PrintStyle.error(f"Scheduler Task '{current_task.name}' failed: {e}")
await current_task.on_error(str(e))
# Explicitly verify task was updated in storage after error
await self._tasks.reload()
updated_task = self.get_task_by_uuid(task_uuid)
if updated_task and updated_task.state != TaskState.ERROR:
- self._printer.print(f"Fixing task state consistency: '{current_task.name}' state is not ERROR after failure")
+ PrintStyle.warning(f"Fixing task state consistency: '{current_task.name}' state is not ERROR after failure")
await self.update_task(task_uuid, state=TaskState.ERROR)
if agent:
@@ -1094,7 +1094,7 @@ def parse_task_plan(plan_data: Dict[str, Any]) -> TaskPlan:
done=done_dates_cast
)
except Exception as e:
- PrintStyle(italic=True, font_color="red", padding=False).print(
+ PrintStyle.error(
f"Error parsing task plan: {e}"
)
# Return empty plan instead of failing
From 94cfa49d87e9bad70b5f4da8fd484f210db4a6cb Mon Sep 17 00:00:00 2001
From: frdel <38891707+frdel@users.noreply.github.com>
Date: Thu, 20 Nov 2025 13:25:30 +0100
Subject: [PATCH 004/436] fix file name download issue
---
python/api/download_work_dir_file.py | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/python/api/download_work_dir_file.py b/python/api/download_work_dir_file.py
index 454ae5a12f..747c6a4940 100644
--- a/python/api/download_work_dir_file.py
+++ b/python/api/download_work_dir_file.py
@@ -7,6 +7,8 @@
from python.helpers.api import ApiHandler, Input, Output, Request
from python.helpers import files, runtime
from python.api import file_info
+from urllib.parse import quote
+
def stream_file_download(file_source, download_name, chunk_size=8192):
@@ -63,7 +65,7 @@ def generate():
content_type=content_type,
direct_passthrough=True, # Prevent Flask from buffering the response
headers={
- 'Content-Disposition': f'attachment; filename="{download_name}"',
+ 'Content-Disposition': make_disposition(download_name),
'Content-Length': str(file_size), # Critical for browser progress bars
'Cache-Control': 'no-cache',
'X-Accel-Buffering': 'no', # Disable nginx buffering
@@ -74,6 +76,15 @@ def generate():
return response
+def make_disposition(download_name: str) -> str:
+ # Basic ASCII fallback (strip or replace weird chars)
+ ascii_fallback = download_name.encode("ascii", "ignore").decode("ascii") or "download"
+ utf8_name = quote(download_name) # URL-encode UTF-8 bytes
+
+ # RFC 5987: filename* with UTF-8
+ return f'attachment; filename="{ascii_fallback}"; filename*=UTF-8\'\'{utf8_name}'
+
+
class DownloadFile(ApiHandler):
@classmethod
From 11d1cb0e645f942c83cc5b9a597319d837f0e391 Mon Sep 17 00:00:00 2001
From: frdel <38891707+frdel@users.noreply.github.com>
Date: Wed, 26 Nov 2025 09:07:30 +0100
Subject: [PATCH 005/436] subagents preparation
---
agent.py | 139 +++++---
agents/agent0/agent.json | 5 +
agents/default/agent.json | 5 +
agents/developer/agent.json | 5 +
agents/hacker/agent.json | 5 +
agents/researcher/agent.json | 5 +
prompts/agent.system.tool.call_sub.md | 4 +-
prompts/agent.system.tool.call_sub.py | 49 +--
prompts/agent.system.tools.py | 6 +-
python/api/subagents.py | 58 +++
.../agent_init/_15_load_profile_settings.py | 74 ++--
python/helpers/extension.py | 54 +--
python/helpers/files.py | 62 +++-
python/helpers/projects.py | 51 ++-
python/helpers/subagents.py | 329 ++++++++++++++++++
python/tools/behaviour_adjustment.py | 2 +-
16 files changed, 703 insertions(+), 150 deletions(-)
create mode 100644 agents/agent0/agent.json
create mode 100644 agents/default/agent.json
create mode 100644 agents/developer/agent.json
create mode 100644 agents/hacker/agent.json
create mode 100644 agents/researcher/agent.json
create mode 100644 python/api/subagents.py
create mode 100644 python/helpers/subagents.py
diff --git a/agent.py b/agent.py
index 594dc37bc5..b86fb25550 100644
--- a/agent.py
+++ b/agent.py
@@ -11,8 +11,16 @@
import uuid
import models
-from python.helpers import extract_tools, files, errors, history, tokens, context as context_helper
-from python.helpers import dirty_json
+from python.helpers import (
+ extract_tools,
+ files,
+ errors,
+ history,
+ tokens,
+ context as context_helper,
+ dirty_json,
+ subagents
+)
from python.helpers.print_style import PrintStyle
from langchain_core.prompts import (
@@ -69,9 +77,10 @@ def __init__(
# initialize state
self.name = name
self.config = config
+ self.data = data or {}
+ self.output_data = output_data or {}
self.log = log or Log.Log()
self.log.context = self
- self.agent0 = agent0 or Agent(0, self.config, self)
self.paused = paused
self.streaming_agent = streaming_agent
self.task: DeferredTask | None = None
@@ -80,10 +89,9 @@ def __init__(
AgentContext._counter += 1
self.no = AgentContext._counter
self.last_message = last_message or datetime.now(timezone.utc)
- self.data = data or {}
- self.output_data = output_data or {}
-
+ # initialize agent at last (context is complete now)
+ self.agent0 = agent0 or Agent(0, self.config, self)
@staticmethod
def get(id: str):
@@ -100,7 +108,7 @@ def use(id: str):
@staticmethod
def current():
- ctxid = context_helper.get_context_data("agent_context_id","")
+ ctxid = context_helper.get_context_data("agent_context_id", "")
if not ctxid:
return None
return AgentContext.get(ctxid)
@@ -122,7 +130,8 @@ def all():
@staticmethod
def generate_id():
def generate_short_id():
- return ''.join(random.choices(string.ascii_letters + string.digits, k=8))
+ return "".join(random.choices(string.ascii_letters + string.digits, k=8))
+
while True:
short_id = generate_short_id()
if short_id not in AgentContext._contexts:
@@ -132,6 +141,7 @@ def generate_short_id():
def get_notification_manager(cls):
if cls._notification_manager is None:
from python.helpers.notification import NotificationManager # type: ignore
+
cls._notification_manager = NotificationManager()
return cls._notification_manager
@@ -269,7 +279,6 @@ async def _process_chain(self, agent: "Agent", msg: "UserMessage|str", user=True
agent.handle_critical_exception(e)
-
@dataclass
class AgentConfig:
chat_model: models.ModelConfig
@@ -280,7 +289,9 @@ class AgentConfig:
profile: str = ""
memory_subdir: str = ""
knowledge_subdirs: list[str] = field(default_factory=lambda: ["default", "custom"])
- browser_http_headers: dict[str, str] = field(default_factory=dict) # Custom HTTP headers for browser requests
+ browser_http_headers: dict[str, str] = field(
+ default_factory=dict
+ ) # Custom HTTP headers for browser requests
code_exec_ssh_enabled: bool = True
code_exec_ssh_addr: str = "localhost"
code_exec_ssh_port: int = 55022
@@ -380,7 +391,9 @@ async def monologue(self):
prompt = await self.prepare_prompt(loop_data=self.loop_data)
# call before_main_llm_call extensions
- await self.call_extensions("before_main_llm_call", loop_data=self.loop_data)
+ await self.call_extensions(
+ "before_main_llm_call", loop_data=self.loop_data
+ )
async def reasoning_callback(chunk: str, full: str):
await self.handle_intervention()
@@ -389,7 +402,9 @@ async def reasoning_callback(chunk: str, full: str):
# Pass chunk and full data to extensions for processing
stream_data = {"chunk": chunk, "full": full}
await self.call_extensions(
- "reasoning_stream_chunk", loop_data=self.loop_data, stream_data=stream_data
+ "reasoning_stream_chunk",
+ loop_data=self.loop_data,
+ stream_data=stream_data,
)
# Stream masked chunk after extensions processed it
if stream_data.get("chunk"):
@@ -405,7 +420,9 @@ async def stream_callback(chunk: str, full: str):
# Pass chunk and full data to extensions for processing
stream_data = {"chunk": chunk, "full": full}
await self.call_extensions(
- "response_stream_chunk", loop_data=self.loop_data, stream_data=stream_data
+ "response_stream_chunk",
+ loop_data=self.loop_data,
+ stream_data=stream_data,
)
# Stream masked chunk after extensions processed it
if stream_data.get("chunk"):
@@ -570,27 +587,15 @@ async def get_system_prompt(self, loop_data: LoopData) -> list[str]:
return system_prompt
def parse_prompt(self, _prompt_file: str, **kwargs):
- dirs = [files.get_abs_path("prompts")]
- if (
- self.config.profile
- ): # if agent has custom folder, use it and use default as backup
- prompt_dir = files.get_abs_path("agents", self.config.profile, "prompts")
- dirs.insert(0, prompt_dir)
+ dirs = subagents.get_agent_paths_chain(self, "prompts")
prompt = files.parse_file(
- _prompt_file, _directories=dirs, **kwargs
+ _prompt_file, _directories=dirs, _agent=self, **kwargs
)
return prompt
def read_prompt(self, file: str, **kwargs) -> str:
- dirs = [files.get_abs_path("prompts")]
- if (
- self.config.profile
- ): # if agent has custom folder, use it and use default as backup
- prompt_dir = files.get_abs_path("agents", self.config.profile, "prompts")
- dirs.insert(0, prompt_dir)
- prompt = files.read_prompt_file(
- file, _directories=dirs, **kwargs
- )
+ dirs = subagents.get_agent_paths_chain(self, "prompts")
+ prompt = files.read_prompt_file(file, _directories=dirs, _agent=self, **kwargs)
prompt = files.remove_code_fences(prompt)
return prompt
@@ -606,8 +611,12 @@ def hist_add_message(
self.last_message = datetime.now(timezone.utc)
# Allow extensions to process content before adding to history
content_data = {"content": content}
- asyncio.run(self.call_extensions("hist_add_before", content_data=content_data, ai=ai))
- return self.history.add_message(ai=ai, content=content_data["content"], tokens=tokens)
+ asyncio.run(
+ self.call_extensions("hist_add_before", content_data=content_data, ai=ai)
+ )
+ return self.history.add_message(
+ ai=ai, content=content_data["content"], tokens=tokens
+ )
def hist_add_user_message(self, message: UserMessage, intervention: bool = False):
self.history.new_topic() # user message starts a new topic in history
@@ -720,7 +729,9 @@ async def stream_callback(chunk: str, total: str):
system_message=call_data["system"],
user_message=call_data["message"],
response_callback=stream_callback if call_data["callback"] else None,
- rate_limiter_callback=self.rate_limiter_callback if not call_data["background"] else None,
+ rate_limiter_callback=(
+ self.rate_limiter_callback if not call_data["background"] else None
+ ),
)
return response
@@ -742,7 +753,9 @@ async def call_chat_model(
messages=messages,
reasoning_callback=reasoning_callback,
response_callback=response_callback,
- rate_limiter_callback=self.rate_limiter_callback if not background else None,
+ rate_limiter_callback=(
+ self.rate_limiter_callback if not background else None
+ ),
)
return response, reasoning
@@ -817,11 +830,15 @@ async def process_tools(self, msg: str):
# Fallback to local get_tool if MCP tool was not found or MCP lookup failed
if not tool:
tool = self.get_tool(
- name=tool_name, method=tool_method, args=tool_args, message=msg, loop_data=self.loop_data
+ name=tool_name,
+ method=tool_method,
+ args=tool_args,
+ message=msg,
+ loop_data=self.loop_data,
)
if tool:
- self.loop_data.current_tool = tool # type: ignore
+ self.loop_data.current_tool = tool # type: ignore
try:
await self.handle_intervention()
@@ -830,14 +847,20 @@ async def process_tools(self, msg: str):
await self.handle_intervention()
# Allow extensions to preprocess tool arguments
- await self.call_extensions("tool_execute_before", tool_args=tool_args or {}, tool_name=tool_name)
+ await self.call_extensions(
+ "tool_execute_before",
+ tool_args=tool_args or {},
+ tool_name=tool_name,
+ )
response = await tool.execute(**tool_args)
await self.handle_intervention()
# Allow extensions to postprocess tool response
- await self.call_extensions("tool_execute_after", response=response, tool_name=tool_name)
-
+ await self.call_extensions(
+ "tool_execute_after", response=response, tool_name=tool_name
+ )
+
await tool.after_execution(response)
await self.handle_intervention()
@@ -889,34 +912,40 @@ async def handle_response_stream(self, stream: str):
pass
def get_tool(
- self, name: str, method: str | None, args: dict, message: str, loop_data: LoopData | None, **kwargs
+ self,
+ name: str,
+ method: str | None,
+ args: dict,
+ message: str,
+ loop_data: LoopData | None,
+ **kwargs,
):
from python.tools.unknown import Unknown
from python.helpers.tool import Tool
classes = []
- # try agent tools first
- if self.config.profile:
+ # search for tools in agent's folder hierarchy
+ paths = subagents.get_agent_paths_chain(self, "tools", name + ".py", default_root="python")
+ for path in paths:
try:
- classes = extract_tools.load_classes_from_file(
- "agents/" + self.config.profile + "/tools/" + name + ".py", Tool # type: ignore[arg-type]
- )
+ classes = extract_tools.load_classes_from_file(path, Tool) # type: ignore[arg-type]
+ break
except Exception:
- pass
+ continue
- # try default tools
- if not classes:
- try:
- classes = extract_tools.load_classes_from_file(
- "python/tools/" + name + ".py", Tool # type: ignore[arg-type]
- )
- except Exception as e:
- pass
tool_class = classes[0] if classes else Unknown
return tool_class(
- agent=self, name=name, method=method, args=args, message=message, loop_data=loop_data, **kwargs
+ agent=self,
+ name=name,
+ method=method,
+ args=args,
+ message=message,
+ loop_data=loop_data,
+ **kwargs,
)
async def call_extensions(self, extension_point: str, **kwargs) -> Any:
- return await call_extensions(extension_point=extension_point, agent=self, **kwargs)
+ return await call_extensions(
+ extension_point=extension_point, agent=self, **kwargs
+ )
diff --git a/agents/agent0/agent.json b/agents/agent0/agent.json
new file mode 100644
index 0000000000..4fa2cb2c12
--- /dev/null
+++ b/agents/agent0/agent.json
@@ -0,0 +1,5 @@
+{
+ "title": "Agent 0",
+ "description": "Main agent of the system communicating directly with the user.",
+ "context": ""
+}
diff --git a/agents/default/agent.json b/agents/default/agent.json
new file mode 100644
index 0000000000..846d2a679f
--- /dev/null
+++ b/agents/default/agent.json
@@ -0,0 +1,5 @@
+{
+ "title": "Default prompts",
+ "description": "Default prompt file templates. Should be inherited and overriden by specialized prompt profiles.",
+ "context": ""
+}
diff --git a/agents/developer/agent.json b/agents/developer/agent.json
new file mode 100644
index 0000000000..8680176e36
--- /dev/null
+++ b/agents/developer/agent.json
@@ -0,0 +1,5 @@
+{
+ "title": "Developer",
+ "description": "Agent specialized in complex software development.",
+ "context": "Use this agent for software development tasks, including writing code, debugging, refactoring, and architectural design."
+}
diff --git a/agents/hacker/agent.json b/agents/hacker/agent.json
new file mode 100644
index 0000000000..cde645d798
--- /dev/null
+++ b/agents/hacker/agent.json
@@ -0,0 +1,5 @@
+{
+ "title": "Hacker",
+ "description": "Agent specialized in cyber security and penetration testing.",
+ "context": "Use this agent for cybersecurity tasks such as penetration testing, vulnerability analysis, and security auditing."
+}
diff --git a/agents/researcher/agent.json b/agents/researcher/agent.json
new file mode 100644
index 0000000000..e06a9639b5
--- /dev/null
+++ b/agents/researcher/agent.json
@@ -0,0 +1,5 @@
+{
+ "title": "Researcher",
+ "description": "Agent specialized in research, data analysis and reporting.",
+ "context": "Use this agent for information gathering, data analysis, topic research, and generating comprehensive reports."
+}
diff --git a/prompts/agent.system.tool.call_sub.md b/prompts/agent.system.tool.call_sub.md
index b2e267f932..c5c22dc75d 100644
--- a/prompts/agent.system.tool.call_sub.md
+++ b/prompts/agent.system.tool.call_sub.md
@@ -1,3 +1,4 @@
+{{if agent_profiles}}
### call_subordinate
you can use subordinates for subtasks
@@ -31,4 +32,5 @@ example usage
- you might be part of long chain of subordinates, avoid slow and expensive rewriting subordinate responses, instead use `§§include()` alias to include the response as is
**available profiles:**
-{{agent_profiles}}
\ No newline at end of file
+{{agent_profiles}}
+{{endif}}
\ No newline at end of file
diff --git a/prompts/agent.system.tool.call_sub.py b/prompts/agent.system.tool.call_sub.py
index e840cca60e..946bd7f321 100644
--- a/prompts/agent.system.tool.call_sub.py
+++ b/prompts/agent.system.tool.call_sub.py
@@ -1,31 +1,34 @@
import json
-from typing import Any
+from typing import Any, TYPE_CHECKING
from python.helpers.files import VariablesPlugin
-from python.helpers import files
+from python.helpers import files, projects, subagents
from python.helpers.print_style import PrintStyle
+if TYPE_CHECKING:
+ from agent import Agent
-class CallSubordinate(VariablesPlugin):
- def get_variables(self, file: str, backup_dirs: list[str] | None = None) -> dict[str, Any]:
- # collect all prompt profiles from subdirectories (_context.md file)
- profiles = []
- agent_subdirs = files.get_subdirectories("agents", exclude=["_example"])
- for agent_subdir in agent_subdirs:
- try:
- context = files.read_prompt_file(
- "_context.md",
- [files.get_abs_path("agents", agent_subdir)]
- )
- profiles.append({"name": agent_subdir, "context": context})
- except Exception as e:
- PrintStyle().error(f"Error loading agent profile '{agent_subdir}': {e}")
+class CallSubordinate(VariablesPlugin):
+ def get_variables(
+ self, file: str, backup_dirs: list[str] | None = None, **kwargs
+ ) -> dict[str, Any]:
- # in case of no profiles
- if not profiles:
- # PrintStyle().error("No agent profiles found")
- profiles = [
- {"name": "default", "context": "Default Agent-Zero AI Assistant"}
- ]
+ # current agent instance
+ agent: Agent | None = kwargs.get("_agent", None)
+ # current project
+ project = projects.get_context_project_name(agent.context) if agent else None
+ # available agents in project (or global)
+ agents = subagents.get_available_agents_dict(project)
- return {"agent_profiles": profiles}
+ if agents:
+ profiles = {}
+ for name, subagent in agents.items():
+ profiles[name] = {
+ "title": subagent.title,
+ "description": subagent.description,
+ "context": subagent.context,
+ }
+ return {"agent_profiles": profiles}
+ else:
+ return {"agent_profiles": None}
+
diff --git a/prompts/agent.system.tools.py b/prompts/agent.system.tools.py
index bfbe150779..f94544b304 100644
--- a/prompts/agent.system.tools.py
+++ b/prompts/agent.system.tools.py
@@ -5,8 +5,8 @@
from python.helpers.print_style import PrintStyle
-class CallSubordinate(VariablesPlugin):
- def get_variables(self, file: str, backup_dirs: list[str] | None = None) -> dict[str, Any]:
+class BuidToolsPrompt(VariablesPlugin):
+ def get_variables(self, file: str, backup_dirs: list[str] | None = None, **kwargs) -> dict[str, Any]:
# collect all prompt folders in order of their priority
folder = files.get_abs_path(os.path.dirname(file))
@@ -22,7 +22,7 @@ def get_variables(self, file: str, backup_dirs: list[str] | None = None) -> dict
tools = []
for prompt_file in prompt_files:
try:
- tool = files.read_prompt_file(prompt_file)
+ tool = files.read_prompt_file(prompt_file, **kwargs)
tools.append(tool)
except Exception as e:
PrintStyle().error(f"Error loading tool '{prompt_file}': {e}")
diff --git a/python/api/subagents.py b/python/api/subagents.py
new file mode 100644
index 0000000000..6f501ac76b
--- /dev/null
+++ b/python/api/subagents.py
@@ -0,0 +1,58 @@
+from python.helpers.api import ApiHandler, Input, Output, Request, Response
+from python.helpers import subagents
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from python.helpers import projects
+
+class Subagents(ApiHandler):
+ async def process(self, input: Input, request: Request) -> Output:
+ action = input.get("action", "")
+ ctxid = input.get("context_id", None)
+
+ if ctxid:
+ _context = self.use_context(ctxid)
+
+ try:
+ if action == "list":
+ data = self.get_subagents_list()
+ elif action == "load":
+ data = self.load_agent(input.get("name", None))
+ elif action == "save":
+ data = self.save_agent(input.get("name", None), input.get("data", None))
+ elif action == "delete":
+ data = self.delete_agent(input.get("name", None))
+ else:
+ raise Exception("Invalid action")
+
+ return {
+ "ok": True,
+ "data": data,
+ }
+ except Exception as e:
+ return {
+ "ok": False,
+ "error": str(e),
+ }
+
+ def get_subagents_list(self):
+ return subagents.get_agents_list()
+
+ def load_agent(self, name: str|None):
+ if name is None:
+ raise Exception("Subagent name is required")
+ return subagents.load_agent_data(name)
+
+ def save_agent(self, name:str|None, data: dict|None):
+ if name is None:
+ raise Exception("Subagent name is required")
+ if data is None:
+ raise Exception("Subagent data is required")
+ subagent = subagents.SubAgent(**data)
+ subagents.save_agent_data(name, subagent)
+ return subagents.load_agent_data(name)
+
+ def delete_agent(self, name: str|None):
+ if name is None:
+ raise Exception("Subagent name is required")
+ subagents.delete_agent_data(name)
\ No newline at end of file
diff --git a/python/extensions/agent_init/_15_load_profile_settings.py b/python/extensions/agent_init/_15_load_profile_settings.py
index 342639535a..d4261d7b9d 100644
--- a/python/extensions/agent_init/_15_load_profile_settings.py
+++ b/python/extensions/agent_init/_15_load_profile_settings.py
@@ -1,5 +1,5 @@
from initialize import initialize_agent
-from python.helpers import dirty_json, files
+from python.helpers import dirty_json, files, subagents, projects
from python.helpers.extension import Extension
@@ -10,44 +10,44 @@ async def execute(self, **kwargs) -> None:
if not self.agent or not self.agent.config.profile:
return
- settings_path = files.get_abs_path("agents", self.agent.config.profile, "settings.json")
- if files.exists(settings_path):
- try:
- override_settings_str = files.read_file(settings_path)
- override_settings = dirty_json.parse(override_settings_str)
-
- if isinstance(override_settings, dict):
- # Preserve the original memory_subdir unless it's explicitly overridden
- current_memory_subdir = self.agent.config.memory_subdir
-
- new_config = initialize_agent(override_settings=override_settings)
-
- if (
- "agent_memory_subdir" not in override_settings
- and current_memory_subdir != "default"
- ):
- new_config.memory_subdir = current_memory_subdir
-
- self.agent.config = new_config
-
+ config_files = subagents.get_agent_paths_chain(self.agent, "settings.json", include_default=False)
+
+ settings_override = {}
+ for settings_path in config_files:
+ if files.exists(settings_path):
+ try:
+ override_settings_str = files.read_file(settings_path)
+ override_settings = dirty_json.try_parse(override_settings_str)
+ if isinstance(override_settings, dict):
+ settings_override.update(override_settings)
+ else:
+ raise Exception(
+ f"Subordinate settings in {settings_path} must be a JSON object."
+ )
+ except Exception as e:
self.agent.context.log.log(
- type="info",
+ type="error",
content=(
- "Loaded custom settings for agent "
- f"{self.agent.number} with profile '{self.agent.config.profile}'."
+ f"Error loading subordinate settings from {settings_path} for "
+ f"profile '{self.agent.config.profile}': {e}"
),
)
- else:
- raise Exception(
- f"Subordinate settings in {settings_path} "
- "must be a JSON object."
- )
- except Exception as e:
- self.agent.context.log.log(
- type="error",
- content=(
- "Error loading subordinate settings for "
- f"profile '{self.agent.config.profile}': {e}"
- ),
- )
+ if settings_override:
+ # Preserve the original memory_subdir unless it's explicitly overridden
+ current_memory_subdir = self.agent.config.memory_subdir
+ new_config = initialize_agent(override_settings=settings_override)
+ if (
+ "agent_memory_subdir" not in settings_override
+ and current_memory_subdir != "default"
+ ):
+ new_config.memory_subdir = current_memory_subdir
+ self.agent.config = new_config
+ # self.agent.context.log.log(
+ # type="info",
+ # content=(
+ # "Loaded custom settings for agent "
+ # f"{self.agent.number} with profile '{self.agent.config.profile}'."
+ # ),
+ # )
+
diff --git a/python/helpers/extension.py b/python/helpers/extension.py
index 5c12d48066..d28e86c4ec 100644
--- a/python/helpers/extension.py
+++ b/python/helpers/extension.py
@@ -1,14 +1,22 @@
from abc import abstractmethod
from typing import Any
-from python.helpers import extract_tools, files
+from python.helpers import extract_tools, files
from typing import TYPE_CHECKING
+
if TYPE_CHECKING:
from agent import Agent
+
+DEFAULT_EXTENSIONS_FOLDER = "python/extensions"
+USER_EXTENSIONS_FOLDER = "usr/extensions"
+
+_cache: dict[str, list[type["Extension"]]] = {}
+
+
class Extension:
def __init__(self, agent: "Agent|None", **kwargs):
- self.agent: "Agent" = agent # type: ignore < here we ignore the type check as there are currently no extensions without an agent
+ self.agent: "Agent" = agent # type: ignore < here we ignore the type check as there are currently no extensions without an agent
self.kwargs = kwargs
@abstractmethod
@@ -16,25 +24,26 @@ async def execute(self, **kwargs) -> Any:
pass
-async def call_extensions(extension_point: str, agent: "Agent|None" = None, **kwargs) -> Any:
-
- # get default extensions
- defaults = await _get_extensions("python/extensions/" + extension_point)
- classes = defaults
+async def call_extensions(
+ extension_point: str, agent: "Agent|None" = None, **kwargs
+) -> Any:
+ from python.helpers import projects, subagents
- # get agent extensions
- if agent and agent.config.profile:
- agentics = await _get_extensions("agents/" + agent.config.profile + "/extensions/" + extension_point)
- if agentics:
- # merge them, agentics overwrite defaults
- unique = {}
- for cls in defaults + agentics:
- unique[_get_file_from_module(cls.__module__)] = cls
+ # search for extension folders in all agent's paths
+ paths = subagents.get_agent_paths_chain(agent, "extensions", extension_point, default_root="python")
+ all_exts = [cls for path in paths for cls in _get_extensions(path)]
- # sort by name
- classes = sorted(unique.values(), key=lambda cls: _get_file_from_module(cls.__module__))
+ # merge: first ocurrence of file name is the override
+ unique = {}
+ for cls in all_exts:
+ file = _get_file_from_module(cls.__module__)
+ if file not in unique:
+ unique[file] = cls
+ classes = sorted(
+ unique.values(), key=lambda cls: _get_file_from_module(cls.__module__)
+ )
- # call extensions
+ # execute unique extensions
for cls in classes:
await cls(agent=agent).execute(**kwargs)
@@ -42,8 +51,8 @@ async def call_extensions(extension_point: str, agent: "Agent|None" = None, **kw
def _get_file_from_module(module_name: str) -> str:
return module_name.split(".")[-1]
-_cache: dict[str, list[type[Extension]]] = {}
-async def _get_extensions(folder:str):
+
+def _get_extensions(folder: str):
global _cache
folder = files.get_abs_path(folder)
if folder in _cache:
@@ -51,10 +60,7 @@ async def _get_extensions(folder:str):
else:
if not files.exists(folder):
return []
- classes = extract_tools.load_classes_from_folder(
- folder, "*", Extension
- )
+ classes = extract_tools.load_classes_from_folder(folder, "*", Extension)
_cache[folder] = classes
return classes
-
diff --git a/python/helpers/files.py b/python/helpers/files.py
index 0d74b892bb..0ed9cb06d6 100644
--- a/python/helpers/files.py
+++ b/python/helpers/files.py
@@ -15,6 +15,7 @@
import inspect
import glob
import mimetypes
+from simpleeval import simple_eval
class VariablesPlugin(ABC):
@@ -138,6 +139,9 @@ def read_prompt_file(
variables = load_plugin_variables(_file, _directories, **kwargs) or {} # type: ignore
variables.update(kwargs)
+ # evaluate conditions
+ content = evaluate_text_conditions(content, **variables)
+
# Replace placeholders with values from kwargs
content = replace_placeholders_text(content, **variables)
@@ -152,6 +156,53 @@ def read_prompt_file(
return content
+def evaluate_text_conditions(_content: str, **kwargs):
+ # search for {{if ...}} ... {{endif}} blocks and evaluate conditions with nesting support
+ if_pattern = re.compile(r"{{\s*if\s+(.*?)}}", flags=re.DOTALL)
+ token_pattern = re.compile(r"{{\s*(if\b.*?|endif)\s*}}", flags=re.DOTALL)
+
+ def _process(text: str) -> str:
+ m_if = if_pattern.search(text)
+ if not m_if:
+ return text
+
+ depth = 1
+ pos = m_if.end()
+ while True:
+ m = token_pattern.search(text, pos)
+ if not m:
+ # Unterminated if-block, do not modify text
+ return text
+ token = m.group(1)
+ depth += 1 if token.startswith("if ") else -1
+ if depth == 0:
+ break
+ pos = m.end()
+
+ before = text[: m_if.start()]
+ condition = m_if.group(1).strip()
+ inner = text[m_if.end() : m.start()]
+ after = text[m.end() :]
+
+ try:
+ result = simple_eval(condition, names=kwargs)
+ except Exception:
+ # On evaluation error, do not modify this block
+ return text
+
+ if result:
+ # Keep inner content (processed recursively), remove if/endif markers
+ kept = before + _process(inner)
+ else:
+ # Skip entire block, including inner content and markers
+ kept = before
+
+ # Continue processing the remaining text after this block
+ return kept + _process(after)
+
+ return _process(_content)
+
+
def read_file(relative_path: str, encoding="utf-8"):
# Try to get the absolute path for the file from the original directory or backup directories
absolute_path = get_abs_path(relative_path)
@@ -192,8 +243,9 @@ def replace_placeholders_json(_content: str, **kwargs):
# Replace placeholders with values from kwargs
for key, value in kwargs.items():
placeholder = "{{" + key + "}}"
- strval = json.dumps(value)
- _content = _content.replace(placeholder, strval)
+ if placeholder in _content:
+ strval = json.dumps(value)
+ _content = _content.replace(placeholder, strval)
return _content
@@ -508,7 +560,7 @@ def safe_file_name(filename: str) -> str:
def read_text_files_in_dir(
- dir_path: str, max_size: int = 1024 * 1024
+ dir_path: str, max_size: int = 1024 * 1024, pattern: str = "*"
) -> dict[str, str]:
abs_path = get_abs_path(dir_path)
@@ -519,7 +571,9 @@ def read_text_files_in_dir(
try:
if not os.path.isfile(file_path):
continue
- if os.path.getsize(file_path) > max_size:
+ if not fnmatch(os.path.basename(file_path), pattern):
+ continue
+ if max_size > 0 and os.path.getsize(file_path) > max_size:
continue
mime, _ = mimetypes.guess_type(file_path)
if mime is not None and not mime.startswith("text"):
diff --git a/python/helpers/projects.py b/python/helpers/projects.py
index e649706194..6e25738c6e 100644
--- a/python/helpers/projects.py
+++ b/python/helpers/projects.py
@@ -25,7 +25,9 @@ class FileStructureInjectionSettings(TypedDict):
max_lines: int
gitignore: str
-
+class SubAgentSettings(TypedDict):
+ enabled: bool
+
class BasicProjectData(TypedDict):
title: str
description: str
@@ -36,13 +38,14 @@ class BasicProjectData(TypedDict):
] # in the future we can add cutom and point to another existing folder
file_structure: FileStructureInjectionSettings
-
class EditProjectData(BasicProjectData):
name: str
instruction_files_count: int
knowledge_files_count: int
variables: str
secrets: str
+ subagents: dict[str, SubAgentSettings]
+
def get_projects_parent_folder():
@@ -128,6 +131,7 @@ def _normalizeEditData(data: EditProjectData):
"file_structure",
_default_file_structure_settings(),
),
+ subagents=data.get("subagents", {}),
)
@@ -152,6 +156,7 @@ def update_project(name: str, data: EditProjectData):
# save secrets
save_project_variables(name, current["variables"])
save_project_secrets(name, current["secrets"])
+ save_project_subagents(name, current["subagents"])
reactivate_project_in_chats(name)
return name
@@ -170,6 +175,7 @@ def load_edit_project_data(name: str) -> EditProjectData:
) # for additional info
variables = load_project_variables(name)
secrets = load_project_secrets_masked(name)
+ subagents = load_project_subagents(name)
knowledge_files_count = get_knowledge_files_count(name)
data = EditProjectData(
**data,
@@ -178,6 +184,7 @@ def load_edit_project_data(name: str) -> EditProjectData:
knowledge_files_count=knowledge_files_count,
variables=variables,
secrets=secrets,
+ subagents=subagents,
)
data = _normalizeEditData(data)
return data
@@ -314,6 +321,46 @@ def save_project_variables(name: str, variables: str):
files.write_file(abs_path, variables)
+def load_project_subagents(name: str) -> dict[str, SubAgentSettings]:
+ try:
+ abs_path = files.get_abs_path(get_project_meta_folder(name), "agents.json")
+ data = dirty_json.parse(files.read_file(abs_path))
+ if isinstance(data, dict):
+ return _normalize_subagents(data) # type: ignore[arg-type,return-value]
+ return {}
+ except Exception:
+ return {}
+
+
+def save_project_subagents(name: str, subagents_data: dict[str, SubAgentSettings]):
+ abs_path = files.get_abs_path(get_project_meta_folder(name), "agents.json")
+ normalized = _normalize_subagents(subagents_data)
+ content = dirty_json.stringify(normalized)
+ files.write_file(abs_path, content)
+
+
+def _normalize_subagents(
+ subagents_data: dict[str, SubAgentSettings]
+) -> dict[str, SubAgentSettings]:
+ from python.helpers import subagents
+
+ agents_dict = subagents.get_agents_dict()
+
+ normalized: dict[str, SubAgentSettings] = {}
+ for key, value in subagents_data.items():
+ agent = agents_dict.get(key)
+ if not agent:
+ continue
+
+ enabled = bool(value["enabled"])
+ if agent.enabled == enabled:
+ continue
+
+ normalized[key] = {"enabled": enabled}
+
+ return normalized
+
+
def load_project_secrets_masked(name: str, merge_with_global=False):
from python.helpers import secrets
diff --git a/python/helpers/subagents.py b/python/helpers/subagents.py
new file mode 100644
index 0000000000..fb8886a4a1
--- /dev/null
+++ b/python/helpers/subagents.py
@@ -0,0 +1,329 @@
+from python.helpers import files
+from typing import TypedDict, TYPE_CHECKING
+from pydantic import BaseModel, model_validator
+import json
+from typing import Literal
+
+GLOBAL_DIR = "."
+USER_DIR = "usr"
+DEFAULT_AGENTS_DIR = "agents"
+USER_AGENTS_DIR = "usr/agents"
+
+type Origin = Literal["default", "user", "project"]
+
+if TYPE_CHECKING:
+ from agent import Agent
+
+
+class SubAgentListItem(BaseModel):
+ name: str = ""
+ title: str = ""
+ description: str = ""
+ context: str = ""
+ origin: list[Origin] = []
+ enabled: bool = True
+
+ @model_validator(mode="after")
+ def post_validator(self):
+ if self.title == "":
+ self.title = self.name
+ return self
+
+
+class SubAgent(SubAgentListItem):
+ prompts: dict[str, str] = {}
+
+
+def get_agents_list(project_name: str | None = None) -> list[SubAgentListItem]:
+ return list(get_agents_dict(project_name).values())
+
+
+def get_agents_dict(
+ project_name: str | None = None,
+) -> dict[str, SubAgentListItem]:
+ def _merge_agent_dicts(
+ base: dict[str, SubAgentListItem],
+ overrides: dict[str, SubAgentListItem],
+ ) -> dict[str, SubAgentListItem]:
+ merged: dict[str, SubAgentListItem] = dict(base)
+ for name, override in overrides.items():
+ base_agent = merged.get(name)
+ merged[name] = (
+ _merge_agent_list_items(base_agent, override)
+ if base_agent
+ else override
+ )
+ return merged
+
+ # load default and custom agents and merge
+ default_agents = _get_agents_list_from_dir(DEFAULT_AGENTS_DIR, origin="default")
+ custom_agents = _get_agents_list_from_dir(USER_AGENTS_DIR, origin="user")
+ merged = _merge_agent_dicts(default_agents, custom_agents)
+
+ # merge with project agents if possible
+ if project_name:
+ from python.helpers import projects
+
+ project_agents_dir = projects.get_project_meta_folder(project_name, "agents")
+ project_agents = _get_agents_list_from_dir(project_agents_dir, origin="project")
+ merged = _merge_agent_dicts(merged, project_agents)
+
+ return merged
+
+
+def _get_agents_list_from_dir(dir: str, origin: Origin) -> dict[str, SubAgentListItem]:
+ result: dict[str, SubAgentListItem] = {}
+ subdirs = files.get_subdirectories(dir)
+
+ for subdir in subdirs:
+ try:
+ agent_json = files.read_file(files.get_abs_path(dir, subdir, "agent.json"))
+ agent_data = SubAgentListItem.model_validate_json(agent_json)
+ name = agent_data.name or subdir
+ agent_data.name = name
+ agent_data.origin = [origin]
+ result[name] = agent_data
+ except Exception:
+ continue
+
+ return result
+
+
+def load_agent_data(name: str, project_name: str | None = None) -> SubAgent:
+ def _merge_agent(
+ original: SubAgent | None, override: SubAgent | None = None
+ ) -> SubAgent | None:
+ if original and override:
+ return _merge_agents(original, override)
+ elif original:
+ return original
+ return override
+
+ # load default and user agents and merge
+ default_agent = _load_agent_data_from_dir(
+ DEFAULT_AGENTS_DIR, name, origin="default"
+ )
+ user_agent = _load_agent_data_from_dir(USER_AGENTS_DIR, name, origin="user")
+ merged = _merge_agent(default_agent, user_agent)
+
+ # merge with project agent if possible
+ if project_name:
+ from python.helpers import projects
+
+ project_agents_dir = projects.get_project_meta_folder(project_name, "agents")
+ project_agent = _load_agent_data_from_dir(
+ project_agents_dir, name, origin="project"
+ )
+ merged = _merge_agent(merged, project_agent)
+
+ if merged is None:
+ raise FileNotFoundError(
+ f"Agent '{name}' not found in default or custom directories"
+ )
+
+ return merged
+
+
+def save_agent_data(name: str, subagent: SubAgent) -> None:
+ # write agent.json in custom directory
+ agent_dir = f"{USER_AGENTS_DIR}/{name}"
+ agent_json = {
+ "title": subagent.title,
+ "description": subagent.description,
+ "context": subagent.context,
+ "enabled": subagent.enabled,
+ }
+ files.write_file(f"{agent_dir}/agent.json", json.dumps(agent_json, indent=2))
+
+ # replace prompts in custom directory
+ prompts_dir = f"{agent_dir}/prompts"
+ # clear existing custom prompts directory (if any)
+ files.delete_dir(prompts_dir)
+
+ prompts = subagent.prompts or {}
+ for name, content in prompts.items():
+ safe_name = files.safe_file_name(name)
+ if not safe_name.endswith(".md"):
+ safe_name += ".md"
+ files.write_file(f"{prompts_dir}/{safe_name}", content)
+
+
+def delete_agent_data(name: str) -> None:
+ files.delete_dir(f"{USER_AGENTS_DIR}/{name}")
+
+
+def _load_agent_data_from_dir(dir: str, name: str, origin: Origin) -> SubAgent | None:
+ try:
+ subagent_json = files.read_file(files.get_abs_path(dir, name, "agent.json"))
+ subagent = SubAgent.model_validate_json(subagent_json)
+ except Exception:
+ # backward compatibility (before agent.json existed)
+ try:
+ context_file = files.read_file(files.get_abs_path(dir, name, "_context.md"))
+ except Exception:
+ context_file = ""
+ subagent = SubAgent(
+ name=name,
+ title=name,
+ description="",
+ context=context_file,
+ origin=[origin],
+ prompts={},
+ )
+
+ # non-stored fields
+ subagent.name = name
+ subagent.origin = [origin]
+
+ prompts_dir = f"{dir}/{name}/prompts"
+ try:
+ prompts = files.read_text_files_in_dir(prompts_dir, pattern="*.md")
+ except Exception:
+ prompts = {}
+
+ subagent.prompts = prompts or {}
+ return subagent
+
+
+def _merge_agents(base: SubAgent | None, override: SubAgent | None) -> SubAgent | None:
+ if base is None:
+ return override
+ if override is None:
+ return base
+
+ merged_prompts: dict[str, str] = {}
+ merged_prompts.update(base.prompts or {})
+ merged_prompts.update(override.prompts or {})
+
+ return SubAgent(
+ name=override.name,
+ title=override.title,
+ description=override.description,
+ context=override.context,
+ origin=_merge_origins(base.origin, override.origin),
+ prompts=merged_prompts,
+ )
+
+
+def _merge_agent_list_items(
+ base: SubAgentListItem, override: SubAgentListItem
+) -> SubAgentListItem:
+ return SubAgentListItem(
+ name=override.name or base.name,
+ title=override.title or base.title,
+ description=override.description or base.description,
+ context=override.context or base.context,
+ origin=_merge_origins(base.origin, override.origin),
+ )
+
+
+def _merge_origins(base: list[Origin], override: list[Origin]) -> list[Origin]:
+ return base + override
+
+
+def get_default_promp_file_names() -> list[str]:
+ return files.list_files("prompts", filter="*.md")
+
+
+def get_available_agents_dict(
+ project_name: str | None,
+) -> dict[str, SubAgentListItem]:
+ # all available agents
+ all_agents = get_agents_dict()
+ # filter by project settings
+ from python.helpers import projects
+
+ project_settings = (
+ projects.load_project_subagents(project_name) if project_name else {}
+ )
+
+ filtered_agents: dict[str, SubAgentListItem] = {}
+ for name, agent in all_agents.items():
+ if name in project_settings:
+ agent.enabled = project_settings[name]["enabled"]
+ if agent.enabled:
+ filtered_agents[name] = agent
+ return filtered_agents
+
+
+def get_agent_paths(
+ agent: "Agent", *subpaths, must_exist_completely: bool = True
+) -> list[str]:
+ """Returns list of possible paths for the given agent and subpaths. Order is from lowest priority (global)."""
+
+ if not agent or not agent.config.profile:
+ return []
+ from python.helpers import projects
+
+ project_name = projects.get_context_project_name(agent.context)
+ return get_agent_profile_paths(
+ agent.config.profile,
+ project_name,
+ *subpaths,
+ must_exist_completely=must_exist_completely,
+ )
+
+
+def get_agent_paths_chain(
+ agent: "Agent|None",
+ *subpaths,
+ must_exist_completely: bool = True,
+ include_project: bool = True,
+ include_user: bool = True,
+ include_default: bool = True,
+ default_root: str = "",
+) -> list[str]:
+ """Returns list of file paths for the given agent and subpaths, searched in order of priority:
+ project/agents/, usr/agents/, agents/, project/, usr/, default."""
+ from python.helpers import projects
+
+ if agent and agent.config.profile:
+ project_name = projects.get_context_project_name(agent.context)
+ paths = get_agent_profile_paths(
+ agent.config.profile,
+ project_name,
+ *subpaths,
+ must_exist_completely=must_exist_completely,
+ )
+ list.reverse(paths) # reverse for proper priority
+ else:
+ paths = []
+ project_name = ""
+
+ if include_project and project_name:
+ path = projects.get_project_meta_folder(project_name, *subpaths)
+ if (not must_exist_completely) or files.exists(path):
+ paths.append(path)
+ if include_user:
+ path = files.get_abs_path(USER_DIR, *subpaths)
+ if (not must_exist_completely) or files.exists(path):
+ paths.append(path)
+ if include_default:
+ path = files.get_abs_path(default_root, *subpaths)
+ if (not must_exist_completely) or files.exists(path):
+ paths.append(path)
+ return paths
+
+
+def get_agent_profile_paths(
+ name: str,
+ project_name: str | None = None,
+ *subpaths,
+ must_exist_completely: bool = True,
+) -> list[str]:
+ result = []
+ check_subpaths = subpaths if must_exist_completely else []
+
+ if files.exists(files.get_abs_path(DEFAULT_AGENTS_DIR, name, *check_subpaths)):
+ result.append(files.get_abs_path(DEFAULT_AGENTS_DIR, name, *subpaths))
+ if files.exists(files.get_abs_path(USER_AGENTS_DIR, name, *check_subpaths)):
+ result.append(files.get_abs_path(USER_AGENTS_DIR, name, *subpaths))
+ if project_name:
+ from python.helpers import projects
+
+ project_agent_dir = projects.get_project_meta_folder(
+ project_name, "agents", name
+ )
+ if files.exists(files.get_abs_path(project_agent_dir, *check_subpaths)):
+ result.append(files.get_abs_path(project_agent_dir, *subpaths))
+ return result
diff --git a/python/tools/behaviour_adjustment.py b/python/tools/behaviour_adjustment.py
index e9c60bb05c..31fe67cd70 100644
--- a/python/tools/behaviour_adjustment.py
+++ b/python/tools/behaviour_adjustment.py
@@ -58,7 +58,7 @@ def get_custom_rules_file(agent: Agent):
def read_rules(agent: Agent):
rules_file = get_custom_rules_file(agent)
if files.exists(rules_file):
- rules = files.read_prompt_file(rules_file)
+ rules = agent.read_prompt(rules_file)
return agent.read_prompt("agent.system.behaviour.md", rules=rules)
else:
rules = agent.read_prompt("agent.system.behaviour_default.md")
From 00168a41e7ef1ab9a883f22087261a739135ed69 Mon Sep 17 00:00:00 2001
From: pUrGe12
Date: Wed, 26 Nov 2025 16:14:04 +0530
Subject: [PATCH 006/436] moving from double quotes to single quotes because
that is breaking the fstring
---
python/api/csrf_token.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/python/api/csrf_token.py b/python/api/csrf_token.py
index 37bb8453a5..0e46a4d46e 100644
--- a/python/api/csrf_token.py
+++ b/python/api/csrf_token.py
@@ -29,7 +29,7 @@ async def process(self, input: Input, request: Request) -> Output:
if not origin_check["ok"]:
return {
"ok": False,
- "error": f"Origin {self.get_origin_from_request(request)} not allowed when login is disabled. Set login and password or add your URL to ALLOWED_ORIGINS env variable. Currently allowed origins: {",".join(origin_check['allowed_origins'])}",
+ "error": f"Origin {self.get_origin_from_request(request)} not allowed when login is disabled. Set login and password or add your URL to ALLOWED_ORIGINS env variable. Currently allowed origins: {','.join(origin_check['allowed_origins'])}",
}
# generate a csrf token if it doesn't exist
From a92b2627c8011267a7df7f8ea5971b546d1473a4 Mon Sep 17 00:00:00 2001
From: frdel <38891707+frdel@users.noreply.github.com>
Date: Tue, 2 Dec 2025 12:36:42 +0100
Subject: [PATCH 007/436] parse LLM chunk fix, alpine store state save/load
---
models.py | 4 +--
webui/js/AlpineStore.js | 76 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 78 insertions(+), 2 deletions(-)
diff --git a/models.py b/models.py
index fbc2694dfd..55c58a369d 100644
--- a/models.py
+++ b/models.py
@@ -813,7 +813,7 @@ def _parse_chunk(chunk: Any) -> ChatChunk:
message.get("content", "")
if isinstance(message, dict)
else getattr(message, "content", "")
- )
+ ) or ""
reasoning_delta = (
delta.get("reasoning_content", "")
if isinstance(delta, dict)
@@ -822,7 +822,7 @@ def _parse_chunk(chunk: Any) -> ChatChunk:
message.get("reasoning_content", "")
if isinstance(message, dict)
else getattr(message, "reasoning_content", "")
- )
+ ) or ""
return ChatChunk(reasoning_delta=reasoning_delta, response_delta=response_delta)
diff --git a/webui/js/AlpineStore.js b/webui/js/AlpineStore.js
index fea2d457f4..8ad68f9eda 100644
--- a/webui/js/AlpineStore.js
+++ b/webui/js/AlpineStore.js
@@ -44,4 +44,80 @@ export function createStore(name, initialState) {
*/
export function getStore(name) {
return /** @type {T | undefined} */ (stores.get(name));
+}
+
+/**
+ * Save current state of a store into a plain object, with optional include/exclude filters.
+ * If exclude (blacklist) is provided and non-empty, everything except excluded keys is saved.
+ * Otherwise, if include (whitelist) is provided and non-empty, only included keys are saved.
+ * If both are empty, all own enumerable properties are saved.
+ * @param {object} store
+ * @param {string[]} [include]
+ * @param {string[]} [exclude]
+ * @returns {object}
+ */
+export function saveState(store, include = [], exclude = []) {
+ const hasExclude = Array.isArray(exclude) && exclude.length > 0;
+ const hasInclude = !hasExclude && Array.isArray(include) && include.length > 0;
+
+ /** @type {Record} */
+ const snapshot = {};
+
+ for (const key of Object.keys(store)) {
+ if (hasExclude) {
+ if (exclude.includes(key)) continue;
+ } else if (hasInclude) {
+ if (!include.includes(key)) continue;
+ }
+
+ const value = store[key];
+ if (typeof value === "function") continue;
+
+ if (Array.isArray(value)) {
+ snapshot[key] = value.map((item) =>
+ typeof item === "object" && item !== null ? { ...item } : item
+ );
+ } else if (typeof value === "object" && value !== null) {
+ snapshot[key] = { ...value };
+ } else {
+ snapshot[key] = value;
+ }
+ }
+
+ return snapshot;
+}
+
+/**
+ * Load a previously saved state object back into a store, honoring include/exclude filters.
+ * Filtering rules are the same as in saveState.
+ * @param {object} store
+ * @param {object} state
+ * @param {string[]} [include]
+ * @param {string[]} [exclude]
+ */
+export function loadState(store, state, include = [], exclude = []) {
+ if (!state) return;
+
+ const hasExclude = Array.isArray(exclude) && exclude.length > 0;
+ const hasInclude = !hasExclude && Array.isArray(include) && include.length > 0;
+
+ for (const key of Object.keys(state)) {
+ if (hasExclude) {
+ if (exclude.includes(key)) continue;
+ } else if (hasInclude) {
+ if (!include.includes(key)) continue;
+ }
+
+ const value = state[key];
+
+ if (Array.isArray(value)) {
+ store[key] = value.map((item) =>
+ typeof item === "object" && item !== null ? { ...item } : item
+ );
+ } else if (typeof value === "object" && value !== null) {
+ store[key] = { ...value };
+ } else {
+ store[key] = value;
+ }
+ }
}
\ No newline at end of file
From 2400dd5882007d250740286ac1e5d5257207df25 Mon Sep 17 00:00:00 2001
From: deci
Date: Tue, 9 Dec 2025 16:47:16 -0600
Subject: [PATCH 008/436] #81 - .env vars applied to settings
---
README.md | 1 +
docs/development.md | 14 +++
docs/installation.md | 48 ++++++++++-
python/helpers/settings.py | 172 ++++++++++++++++++++++---------------
4 files changed, 164 insertions(+), 71 deletions(-)
diff --git a/README.md b/README.md
index 8c0bb596ac..d2c0d9b4bc 100644
--- a/README.md
+++ b/README.md
@@ -81,6 +81,7 @@ Agent Zero now supports **Projects** – isolated workspaces with their own prom
- The framework does not guide or limit the agent in any way. There are no hard-coded rails that agents have to follow.
- Every prompt, every small message template sent to the agent in its communication loop can be found in the **prompts/** folder and changed.
- Every default tool can be found in the **python/tools/** folder and changed or copied to create new predefined tools.
+- **Automated configuration** via `A0_SET_` environment variables for deployment automation and easy setup.

diff --git a/docs/development.md b/docs/development.md
index 54fe39580e..9faa1805bd 100644
--- a/docs/development.md
+++ b/docs/development.md
@@ -149,6 +149,20 @@ You're now ready to contribute to Agent Zero, create custom extensions, or modif
- See [extensibility](extensibility.md) for instructions on how to create custom extensions.
- See [contribution](contribution.md) for instructions on how to contribute to the framework.
+## Configuration via Environment Variables
+
+For development and testing, you can override default settings using the `.env` file with `A0_SET_` prefixed variables:
+
+```env
+# Add to your .env file
+A0_SET_chat_model_provider=ollama
+A0_SET_chat_model_name=llama3.2
+A0_SET_chat_model_api_base=http://localhost:11434
+A0_SET_memory_recall_interval=5
+```
+
+These environment variables automatically override the hardcoded defaults in `get_default_settings()` without modifying code. Useful for testing different configurations or multi-environment setups.
+
## Want to build your docker image?
- You can use the `DockerfileLocal` to build your docker image.
- Navigate to your project root in the terminal and run `docker build -f DockerfileLocal -t agent-zero-local --build-arg CACHE_DATE=$(date +%Y-%m-%d:%H:%M:%S) .`
diff --git a/docs/installation.md b/docs/installation.md
index b8688f0919..26bdab10f9 100644
--- a/docs/installation.md
+++ b/docs/installation.md
@@ -93,9 +93,55 @@ The following user guide provides instructions for installing and running Agent
- `/tmp/settings.json` - Your Agent Zero settings
> [!TIP]
-> Choose a location that's easy to access and backup. All your Agent Zero data
+> Choose a location that's easy to access and backup. All your Agent Zero data
> will be directly accessible in this directory.
+### Automated Configuration via Environment Variables
+
+Agent Zero settings can be automatically configured using environment variables with the `A0_SET_` prefix in your `.env` file. This enables automated deployments without manual configuration.
+
+**Usage:**
+Add variables to your `.env` file in the format:
+```
+A0_SET_{setting_name}={value}
+```
+
+**Examples:**
+```env
+# Model configuration
+A0_SET_chat_model_provider=anthropic
+A0_SET_chat_model_name=claude-3-5-sonnet-20241022
+A0_SET_chat_model_ctx_length=200000
+
+# Memory settings
+A0_SET_memory_recall_enabled=true
+A0_SET_memory_recall_interval=5
+
+# Agent configuration
+A0_SET_agent_profile=custom
+A0_SET_agent_memory_subdir=production
+```
+
+**Docker usage:**
+When running Docker, you can pass these as environment variables:
+```bash
+docker run -p 50080:80 \
+ -e A0_SET_chat_model_provider=anthropic \
+ -e A0_SET_chat_model_name=claude-3-5-sonnet-20241022 \
+ agent0ai/agent-zero
+```
+
+**Type conversion:**
+- Strings are used as-is
+- Numbers are automatically converted (e.g., "100000" becomes integer 100000)
+- Booleans accept: true/false, 1/0, yes/no, on/off (case-insensitive)
+- Dictionaries must be valid JSON (e.g., `{"temperature": "0"}`)
+
+**Notes:**
+- These override default values in settings.json
+- Sensitive settings (API keys, passwords) use their existing environment variables
+- Container/process restart required for changes to take effect
+
2.3. Run the container:
- In Docker Desktop, go back to the "Images" tab
- Click the `Run` button next to the `agent0ai/agent-zero` image
diff --git a/python/helpers/settings.py b/python/helpers/settings.py
index 9e71b7956f..b6ca2df0b6 100644
--- a/python/helpers/settings.py
+++ b/python/helpers/settings.py
@@ -4,7 +4,7 @@
import os
import re
import subprocess
-from typing import Any, Literal, TypedDict, cast
+from typing import Any, Literal, TypedDict, cast, TypeVar
import models
from python.helpers import runtime, whisper, defer, git
@@ -15,6 +15,38 @@
from python.helpers import dirty_json
+T = TypeVar('T')
+
+def get_default_value(name: str, value: T) -> T:
+ """
+ Load setting value from .env with A0_SET_ prefix, falling back to default.
+
+ Args:
+ name: Setting name (will be prefixed with A0_SET_)
+ value: Default value to use if env var not set
+
+ Returns:
+ Environment variable value (type-normalized) or default value
+ """
+ env_value = dotenv.get_dotenv_value(f"A0_SET_{name}")
+
+ if env_value is None:
+ return value
+
+ # Normalize type to match value param type
+ try:
+ if isinstance(value, bool):
+ return env_value.lower() in ('true', '1', 'yes', 'on') # type: ignore
+ elif isinstance(value, dict):
+ return json.loads(env_value) # type: ignore
+ elif isinstance(value, str):
+ return str(env_value).strip() # type: ignore
+ else:
+ return type(value)(env_value) # type: ignore
+ except (ValueError, TypeError, json.JSONDecodeError):
+ return value
+
+
class Settings(TypedDict):
version: str
@@ -1456,83 +1488,83 @@ def _write_sensitive_settings(settings: Settings):
def get_default_settings() -> Settings:
return Settings(
version=_get_version(),
- chat_model_provider="openrouter",
- chat_model_name="openai/gpt-4.1",
- chat_model_api_base="",
- chat_model_kwargs={"temperature": "0"},
- chat_model_ctx_length=100000,
- chat_model_ctx_history=0.7,
- chat_model_vision=True,
- chat_model_rl_requests=0,
- chat_model_rl_input=0,
- chat_model_rl_output=0,
- util_model_provider="openrouter",
- util_model_name="openai/gpt-4.1-mini",
- util_model_api_base="",
- util_model_ctx_length=100000,
- util_model_ctx_input=0.7,
- util_model_kwargs={"temperature": "0"},
- util_model_rl_requests=0,
- util_model_rl_input=0,
- util_model_rl_output=0,
- embed_model_provider="huggingface",
- embed_model_name="sentence-transformers/all-MiniLM-L6-v2",
- embed_model_api_base="",
- embed_model_kwargs={},
- embed_model_rl_requests=0,
- embed_model_rl_input=0,
- browser_model_provider="openrouter",
- browser_model_name="openai/gpt-4.1",
- browser_model_api_base="",
- browser_model_vision=True,
- browser_model_rl_requests=0,
- browser_model_rl_input=0,
- browser_model_rl_output=0,
- browser_model_kwargs={"temperature": "0"},
- browser_http_headers={},
- memory_recall_enabled=True,
- memory_recall_delayed=False,
- memory_recall_interval=3,
- memory_recall_history_len=10000,
- memory_recall_memories_max_search=12,
- memory_recall_solutions_max_search=8,
- memory_recall_memories_max_result=5,
- memory_recall_solutions_max_result=3,
- memory_recall_similarity_threshold=0.7,
- memory_recall_query_prep=True,
- memory_recall_post_filter=True,
- memory_memorize_enabled=True,
- memory_memorize_consolidation=True,
- memory_memorize_replace_threshold=0.9,
+ chat_model_provider=get_default_value("chat_model_provider", "openrouter"),
+ chat_model_name=get_default_value("chat_model_name", "openai/gpt-4.1"),
+ chat_model_api_base=get_default_value("chat_model_api_base", ""),
+ chat_model_kwargs=get_default_value("chat_model_kwargs", {"temperature": "0"}),
+ chat_model_ctx_length=get_default_value("chat_model_ctx_length", 100000),
+ chat_model_ctx_history=get_default_value("chat_model_ctx_history", 0.7),
+ chat_model_vision=get_default_value("chat_model_vision", True),
+ chat_model_rl_requests=get_default_value("chat_model_rl_requests", 0),
+ chat_model_rl_input=get_default_value("chat_model_rl_input", 0),
+ chat_model_rl_output=get_default_value("chat_model_rl_output", 0),
+ util_model_provider=get_default_value("util_model_provider", "openrouter"),
+ util_model_name=get_default_value("util_model_name", "openai/gpt-4.1-mini"),
+ util_model_api_base=get_default_value("util_model_api_base", ""),
+ util_model_ctx_length=get_default_value("util_model_ctx_length", 100000),
+ util_model_ctx_input=get_default_value("util_model_ctx_input", 0.7),
+ util_model_kwargs=get_default_value("util_model_kwargs", {"temperature": "0"}),
+ util_model_rl_requests=get_default_value("util_model_rl_requests", 0),
+ util_model_rl_input=get_default_value("util_model_rl_input", 0),
+ util_model_rl_output=get_default_value("util_model_rl_output", 0),
+ embed_model_provider=get_default_value("embed_model_provider", "huggingface"),
+ embed_model_name=get_default_value("embed_model_name", "sentence-transformers/all-MiniLM-L6-v2"),
+ embed_model_api_base=get_default_value("embed_model_api_base", ""),
+ embed_model_kwargs=get_default_value("embed_model_kwargs", {}),
+ embed_model_rl_requests=get_default_value("embed_model_rl_requests", 0),
+ embed_model_rl_input=get_default_value("embed_model_rl_input", 0),
+ browser_model_provider=get_default_value("browser_model_provider", "openrouter"),
+ browser_model_name=get_default_value("browser_model_name", "openai/gpt-4.1"),
+ browser_model_api_base=get_default_value("browser_model_api_base", ""),
+ browser_model_vision=get_default_value("browser_model_vision", True),
+ browser_model_rl_requests=get_default_value("browser_model_rl_requests", 0),
+ browser_model_rl_input=get_default_value("browser_model_rl_input", 0),
+ browser_model_rl_output=get_default_value("browser_model_rl_output", 0),
+ browser_model_kwargs=get_default_value("browser_model_kwargs", {"temperature": "0"}),
+ browser_http_headers=get_default_value("browser_http_headers", {}),
+ memory_recall_enabled=get_default_value("memory_recall_enabled", True),
+ memory_recall_delayed=get_default_value("memory_recall_delayed", False),
+ memory_recall_interval=get_default_value("memory_recall_interval", 3),
+ memory_recall_history_len=get_default_value("memory_recall_history_len", 10000),
+ memory_recall_memories_max_search=get_default_value("memory_recall_memories_max_search", 12),
+ memory_recall_solutions_max_search=get_default_value("memory_recall_solutions_max_search", 8),
+ memory_recall_memories_max_result=get_default_value("memory_recall_memories_max_result", 5),
+ memory_recall_solutions_max_result=get_default_value("memory_recall_solutions_max_result", 3),
+ memory_recall_similarity_threshold=get_default_value("memory_recall_similarity_threshold", 0.7),
+ memory_recall_query_prep=get_default_value("memory_recall_query_prep", True),
+ memory_recall_post_filter=get_default_value("memory_recall_post_filter", True),
+ memory_memorize_enabled=get_default_value("memory_memorize_enabled", True),
+ memory_memorize_consolidation=get_default_value("memory_memorize_consolidation", True),
+ memory_memorize_replace_threshold=get_default_value("memory_memorize_replace_threshold", 0.9),
api_keys={},
auth_login="",
auth_password="",
root_password="",
- agent_profile="agent0",
- agent_memory_subdir="default",
- agent_knowledge_subdir="custom",
- rfc_auto_docker=True,
- rfc_url="localhost",
+ agent_profile=get_default_value("agent_profile", "agent0"),
+ agent_memory_subdir=get_default_value("agent_memory_subdir", "default"),
+ agent_knowledge_subdir=get_default_value("agent_knowledge_subdir", "custom"),
+ rfc_auto_docker=get_default_value("rfc_auto_docker", True),
+ rfc_url=get_default_value("rfc_url", "localhost"),
rfc_password="",
- rfc_port_http=55080,
- rfc_port_ssh=55022,
- shell_interface="local" if runtime.is_dockerized() else "ssh",
- stt_model_size="base",
- stt_language="en",
- stt_silence_threshold=0.3,
- stt_silence_duration=1000,
- stt_waiting_timeout=2000,
- tts_kokoro=True,
- mcp_servers='{\n "mcpServers": {}\n}',
- mcp_client_init_timeout=10,
- mcp_client_tool_timeout=120,
- mcp_server_enabled=False,
+ rfc_port_http=get_default_value("rfc_port_http", 55080),
+ rfc_port_ssh=get_default_value("rfc_port_ssh", 55022),
+ shell_interface=get_default_value("shell_interface", "local" if runtime.is_dockerized() else "ssh"),
+ stt_model_size=get_default_value("stt_model_size", "base"),
+ stt_language=get_default_value("stt_language", "en"),
+ stt_silence_threshold=get_default_value("stt_silence_threshold", 0.3),
+ stt_silence_duration=get_default_value("stt_silence_duration", 1000),
+ stt_waiting_timeout=get_default_value("stt_waiting_timeout", 2000),
+ tts_kokoro=get_default_value("tts_kokoro", True),
+ mcp_servers=get_default_value("mcp_servers", '{\n "mcpServers": {}\n}'),
+ mcp_client_init_timeout=get_default_value("mcp_client_init_timeout", 10),
+ mcp_client_tool_timeout=get_default_value("mcp_client_tool_timeout", 120),
+ mcp_server_enabled=get_default_value("mcp_server_enabled", False),
mcp_server_token=create_auth_token(),
- a2a_server_enabled=False,
+ a2a_server_enabled=get_default_value("a2a_server_enabled", False),
variables="",
secrets="",
- litellm_global_kwargs={},
- update_check_enabled=True,
+ litellm_global_kwargs=get_default_value("litellm_global_kwargs", {}),
+ update_check_enabled=get_default_value("update_check_enabled", True),
)
From 01096c181338c0f39d659281004e1b1817d375ae Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jan=20Tom=C3=A1=C5=A1ek?=
<38891707+frdel@users.noreply.github.com>
Date: Wed, 10 Dec 2025 10:27:44 +0100
Subject: [PATCH 009/436] Update settings.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
---
python/helpers/settings.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/python/helpers/settings.py b/python/helpers/settings.py
index b6ca2df0b6..866ab53065 100644
--- a/python/helpers/settings.py
+++ b/python/helpers/settings.py
@@ -36,7 +36,7 @@ def get_default_value(name: str, value: T) -> T:
# Normalize type to match value param type
try:
if isinstance(value, bool):
- return env_value.lower() in ('true', '1', 'yes', 'on') # type: ignore
+ return env_value.strip().lower() in ('true', '1', 'yes', 'on') # type: ignore
elif isinstance(value, dict):
return json.loads(env_value) # type: ignore
elif isinstance(value, str):
From e52b0ecf434f09303d2ab77ca93862136e0b6840 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jan=20Tom=C3=A1=C5=A1ek?=
<38891707+frdel@users.noreply.github.com>
Date: Wed, 10 Dec 2025 10:28:00 +0100
Subject: [PATCH 010/436] Update settings.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
---
python/helpers/settings.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/python/helpers/settings.py b/python/helpers/settings.py
index 866ab53065..48232054a8 100644
--- a/python/helpers/settings.py
+++ b/python/helpers/settings.py
@@ -38,7 +38,7 @@ def get_default_value(name: str, value: T) -> T:
if isinstance(value, bool):
return env_value.strip().lower() in ('true', '1', 'yes', 'on') # type: ignore
elif isinstance(value, dict):
- return json.loads(env_value) # type: ignore
+ return json.loads(env_value.strip()) # type: ignore
elif isinstance(value, str):
return str(env_value).strip() # type: ignore
else:
From 860948cf98a6aaf31bdc238a379fe060c95c9d41 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jan=20Tom=C3=A1=C5=A1ek?=
<38891707+frdel@users.noreply.github.com>
Date: Wed, 10 Dec 2025 10:29:33 +0100
Subject: [PATCH 011/436] Update settings.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
---
python/helpers/settings.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/python/helpers/settings.py b/python/helpers/settings.py
index 48232054a8..573fdadcd0 100644
--- a/python/helpers/settings.py
+++ b/python/helpers/settings.py
@@ -43,7 +43,10 @@ def get_default_value(name: str, value: T) -> T:
return str(env_value).strip() # type: ignore
else:
return type(value)(env_value) # type: ignore
- except (ValueError, TypeError, json.JSONDecodeError):
+ except (ValueError, TypeError, json.JSONDecodeError) as e:
+ PrintStyle(background_color="yellow", font_color="black").print(
+ f"Warning: Invalid value for A0_SET_{name}='{env_value}': {e}. Using default: {value}"
+ )
return value
From 0508eeb61635ce9299298bcf96e9066d8fd28a09 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jan=20Tom=C3=A1=C5=A1ek?=
<38891707+frdel@users.noreply.github.com>
Date: Wed, 10 Dec 2025 10:30:03 +0100
Subject: [PATCH 012/436] Update settings.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
---
python/helpers/settings.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/python/helpers/settings.py b/python/helpers/settings.py
index 573fdadcd0..d8358c2946 100644
--- a/python/helpers/settings.py
+++ b/python/helpers/settings.py
@@ -42,7 +42,7 @@ def get_default_value(name: str, value: T) -> T:
elif isinstance(value, str):
return str(env_value).strip() # type: ignore
else:
- return type(value)(env_value) # type: ignore
+ return type(value)(env_value.strip()) # type: ignore
except (ValueError, TypeError, json.JSONDecodeError) as e:
PrintStyle(background_color="yellow", font_color="black").print(
f"Warning: Invalid value for A0_SET_{name}='{env_value}': {e}. Using default: {value}"
From df2513393393500b2456e827985316ea0600609e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jan=20Tom=C3=A1=C5=A1ek?=
<38891707+frdel@users.noreply.github.com>
Date: Wed, 10 Dec 2025 10:31:30 +0100
Subject: [PATCH 013/436] Update installation.md
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
---
docs/installation.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/installation.md b/docs/installation.md
index 26bdab10f9..c611b1b798 100644
--- a/docs/installation.md
+++ b/docs/installation.md
@@ -138,7 +138,7 @@ docker run -p 50080:80 \
- Dictionaries must be valid JSON (e.g., `{"temperature": "0"}`)
**Notes:**
-- These override default values in settings.json
+- These provide initial default values when settings.json doesn't exist or when new settings are added to the application. Once a value is saved in settings.json, it takes precedence over these environment variables.
- Sensitive settings (API keys, passwords) use their existing environment variables
- Container/process restart required for changes to take effect
From d1be7f25319ffae55d92d47a6c6042810b3403b1 Mon Sep 17 00:00:00 2001
From: deci
Date: Wed, 10 Dec 2025 16:05:53 -0600
Subject: [PATCH 014/436] #84 - Projects support in MCP, A2A, API
---
docs/connectivity.md | 93 ++++++++++++
python/api/api_message.py | 15 +-
python/helpers/fasta2a_server.py | 30 +++-
python/helpers/mcp_server.py | 139 ++++++++++++++++--
.../settings/a2a/a2a-connection.html | 53 ++++++-
.../settings/external/api-examples.html | 62 +++++++-
.../settings/mcp/server/example.html | 81 +++++++---
7 files changed, 434 insertions(+), 39 deletions(-)
diff --git a/docs/connectivity.md b/docs/connectivity.md
index 8cfbe250ec..a6f465eaba 100644
--- a/docs/connectivity.md
+++ b/docs/connectivity.md
@@ -25,6 +25,7 @@ Send messages to Agent Zero and receive responses. Supports text messages, file
* `message` (string, required): The message to send
* `attachments` (array, optional): Array of `{filename, base64}` objects
* `lifetime_hours` (number, optional): Chat lifetime in hours (default: 24)
+* `project` (string, optional): Project name to activate (only on first message)
**Headers:**
* `X-API-KEY` (required)
@@ -169,6 +170,63 @@ async function sendWithAttachment() {
sendWithAttachment();
```
+#### Project Usage Example
+
+```javascript
+// Working with projects
+async function sendMessageWithProject() {
+ try {
+ // First message - activate project
+ const response = await fetch('YOUR_AGENT_ZERO_URL/api_message', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'X-API-KEY': 'YOUR_API_KEY'
+ },
+ body: JSON.stringify({
+ message: "Analyze the project structure",
+ project: "my-web-app" // Activates this project
+ })
+ });
+
+ const data = await response.json();
+
+ if (response.ok) {
+ console.log('✅ Project activated!');
+ console.log('Context ID:', data.context_id);
+ console.log('Response:', data.response);
+
+ // Continue conversation - project already set
+ const followUp = await fetch('YOUR_AGENT_ZERO_URL/api_message', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'X-API-KEY': 'YOUR_API_KEY'
+ },
+ body: JSON.stringify({
+ context_id: data.context_id,
+ message: "What files are in the project?"
+ // Do NOT include project field here - already set on first message
+ })
+ });
+
+ const followUpData = await followUp.json();
+ console.log('Follow-up response:', followUpData.response);
+ return followUpData;
+ } else {
+ console.error('❌ Error:', data.error);
+ return null;
+ }
+ } catch (error) {
+ console.error('❌ Request failed:', error);
+ return null;
+ }
+}
+
+// Call the function
+sendMessageWithProject();
+```
+
---
## `GET/POST /api_log_get`
@@ -568,6 +626,30 @@ Below is an example of a `mcp.json` configuration file that a client could use t
}
```
+### Project Support in MCP
+
+You can specify a project for MCP connections by including it in the URL path:
+
+```json
+{
+ "mcpServers": {
+ "agent-zero-with-project": {
+ "type": "sse",
+ "url": "YOUR_AGENT_ZERO_URL/mcp/t-YOUR_API_TOKEN/p-my-project-name/sse"
+ },
+ "agent-zero-http-with-project": {
+ "type": "streamable-http",
+ "url": "YOUR_AGENT_ZERO_URL/mcp/t-YOUR_API_TOKEN/p-my-project-name/http/"
+ }
+ }
+}
+```
+
+When a project is specified in the URL:
+- All new chats will be created within that project context
+- The agent will have access to project-specific instructions, knowledge, and file structure
+- Attempting to use an existing chat_id from a different project will result in an error
+
---
## A2A (Agent-to-Agent) Connectivity
@@ -583,3 +665,14 @@ To connect another agent to your Agent Zero instance, use the following URL form
```
YOUR_AGENT_ZERO_URL/a2a/t-YOUR_API_TOKEN
```
+
+To connect with a specific project active:
+
+```
+YOUR_AGENT_ZERO_URL/a2a/t-YOUR_API_TOKEN/p-PROJECT_NAME
+```
+
+When a project is specified:
+- All A2A conversations will run in the context of that project
+- The agent will have access to project-specific resources, instructions, and knowledge
+- This enables project-isolated agent-to-agent communication
diff --git a/python/api/api_message.py b/python/api/api_message.py
index 385d556dd2..f955024d2d 100644
--- a/python/api/api_message.py
+++ b/python/api/api_message.py
@@ -3,7 +3,7 @@
from datetime import datetime, timedelta
from agent import AgentContext, UserMessage, AgentContextType
from python.helpers.api import ApiHandler, Request, Response
-from python.helpers import files
+from python.helpers import files, projects
from python.helpers.print_style import PrintStyle
from werkzeug.utils import secure_filename
from initialize import initialize_agent
@@ -33,6 +33,7 @@ async def process(self, input: dict, request: Request) -> dict | Response:
message = input.get("message", "")
attachments = input.get("attachments", [])
lifetime_hours = input.get("lifetime_hours", 24) # Default 24 hours
+ project = input.get("project", None) # Optional project name
if not message:
return Response('{"error": "Message is required"}', status=400, mimetype="application/json")
@@ -71,12 +72,24 @@ async def process(self, input: dict, request: Request) -> dict | Response:
context = AgentContext.use(context_id)
if not context:
return Response('{"error": "Context not found"}', status=404, mimetype="application/json")
+
+ # Validation: if project is provided but context already has different project
+ existing_project = context.get_data(projects.CONTEXT_DATA_KEY_PROJECT)
+ if project and existing_project and existing_project != project:
+ return Response('{"error": "Project can only be set on first message"}', status=400, mimetype="application/json")
else:
config = initialize_agent()
context = AgentContext(config=config, type=AgentContextType.USER)
AgentContext.use(context.id)
context_id = context.id
+ # Activate project if provided
+ if project:
+ try:
+ projects.activate_project(context_id, project)
+ except Exception as e:
+ return Response(f'{{"error": "Failed to activate project: {str(e)}"}}', status=400, mimetype="application/json")
+
# Update chat lifetime
with self._cleanup_lock:
self._chat_lifetimes[context_id] = datetime.now() + timedelta(hours=lifetime_hours)
diff --git a/python/helpers/fasta2a_server.py b/python/helpers/fasta2a_server.py
index d058d1b683..50d4a4908c 100644
--- a/python/helpers/fasta2a_server.py
+++ b/python/helpers/fasta2a_server.py
@@ -5,8 +5,9 @@
from typing import Any, List
import contextlib
import threading
+import contextvars
-from python.helpers import settings
+from python.helpers import settings, projects
from starlette.requests import Request
# Local imports
@@ -60,6 +61,9 @@ async def update_task(self, **kwargs):
_PRINTER = PrintStyle(italic=True, font_color="purple", padding=False)
+# Context variable to store project name from URL
+_a2a_project_name: contextvars.ContextVar[str | None] = contextvars.ContextVar('a2a_project_name', default=None)
+
class AgentZeroWorker(Worker): # type: ignore[misc]
"""Agent Zero implementation of FastA2A Worker."""
@@ -84,6 +88,16 @@ async def run_task(self, params: Any) -> None: # params: TaskSendParams
cfg = initialize_agent()
context = AgentContext(cfg, type=AgentContextType.BACKGROUND)
+ # Activate project if specified in URL
+ project_name = _a2a_project_name.get()
+ if project_name:
+ try:
+ projects.activate_project(context.id, project_name)
+ _PRINTER.print(f"[A2A] Activated project: {project_name}")
+ except Exception as e:
+ _PRINTER.print(f"[A2A] Failed to activate project: {e}")
+ raise Exception(f"Failed to activate project: {str(e)}")
+
# Log user message so it appears instantly in UI chat window
context.log.log(
type="user", # type: ignore[arg-type]
@@ -424,6 +438,9 @@ async def __call__(self, scope, receive, send):
if path.startswith('/a2a'):
path = path[4:] # Remove '/a2a' prefix
+ # Initialize project name
+ project_name = None
+
# Check if path matches token pattern /t-{token}/
if path.startswith('/t-'):
# Extract token from path
@@ -431,6 +448,14 @@ async def __call__(self, scope, receive, send):
path_parts = path[3:].split('/', 1) # Remove '/t-' prefix
request_token = path_parts[0]
remaining_path = '/' + path_parts[1] if len(path_parts) > 1 else '/'
+
+ # Check for project pattern /p-{project}/
+ if remaining_path.startswith('/p-'):
+ project_parts = remaining_path[3:].split('/', 1)
+ if project_parts[0]:
+ project_name = project_parts[0]
+ remaining_path = '/' + project_parts[1] if len(project_parts) > 1 else '/'
+ _PRINTER.print(f"[A2A] Extracted project from URL: {project_name}")
else:
request_token = path[3:]
remaining_path = '/'
@@ -452,6 +477,9 @@ async def __call__(self, scope, receive, send):
})
return
+ # Set project name in context variable for use by worker
+ _a2a_project_name.set(project_name)
+
# Update scope with cleaned path
scope = dict(scope)
scope['path'] = remaining_path
diff --git a/python/helpers/mcp_server.py b/python/helpers/mcp_server.py
index 4c080da69c..466cc2277e 100644
--- a/python/helpers/mcp_server.py
+++ b/python/helpers/mcp_server.py
@@ -3,23 +3,31 @@
from urllib.parse import urlparse
from openai import BaseModel
from pydantic import Field
-from fastmcp import FastMCP
+from fastmcp import FastMCP # type: ignore
+import contextvars
from agent import AgentContext, AgentContextType, UserMessage
from python.helpers.persist_chat import remove_chat
from initialize import initialize_agent
from python.helpers.print_style import PrintStyle
-from python.helpers import settings
+from python.helpers import settings, projects
from starlette.middleware import Middleware
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.exceptions import HTTPException as StarletteHTTPException
from starlette.types import ASGIApp, Receive, Scope, Send
-from fastmcp.server.http import create_sse_app
+from fastmcp.server.http import create_sse_app # type: ignore
from starlette.requests import Request
import threading
_PRINTER = PrintStyle(italic=True, font_color="green", padding=False)
+# Context variable to store project name from URL (per-request)
+_mcp_project_name: contextvars.ContextVar[str | None] = contextvars.ContextVar('mcp_project_name', default=None)
+
+# Session storage for project names (persists across SSE tool calls)
+# Key: connection identifier, Value: project name
+_mcp_project_sessions: dict[str, str | None] = {}
+_mcp_session_lock = threading.Lock()
mcp_server: FastMCP = FastMCP(
name="Agent Zero integrated MCP Server",
@@ -127,6 +135,25 @@ async def send_message(
description="The response from the remote Agent Zero Instance", title="response"
),
]:
+ # Get project name from session storage (persists across SSE connection)
+ # First try context variable (for HTTP requests), then session storage (for SSE tool calls)
+ project_name = _mcp_project_name.get()
+
+ # If not in context variable, try session storage using current token
+ if not project_name:
+ cfg = settings.get_settings()
+ current_token = cfg.get("mcp_server_token")
+ _PRINTER.print(f"[MCP] send_message - Looking for project. Token: '{current_token}' (type: {type(current_token).__name__})")
+ if current_token:
+ with _mcp_session_lock:
+ _PRINTER.print(f"[MCP] Session storage keys: {list(_mcp_project_sessions.keys())}")
+ _PRINTER.print(f"[MCP] Session storage: {_mcp_project_sessions}")
+ project_name = _mcp_project_sessions.get(current_token)
+ if project_name:
+ _PRINTER.print(f"[MCP] Retrieved project from session: {project_name}")
+ else:
+ _PRINTER.print(f"[MCP] No project found in session for token: {current_token}")
+
context: AgentContext | None = None
if chat_id:
context = AgentContext.get(chat_id)
@@ -137,10 +164,27 @@ async def send_message(
# whether we should save the chat or delete it afterwards
# If we continue a conversation, it must be persistent
persistent_chat = True
+
+ # Validation: if project is in URL but context has different project
+ if project_name:
+ existing_project = context.get_data(projects.CONTEXT_DATA_KEY_PROJECT)
+ if existing_project and existing_project != project_name:
+ return ToolError(
+ error=f"Chat belongs to project '{existing_project}' but URL specifies '{project_name}'",
+ chat_id=chat_id
+ )
else:
config = initialize_agent()
context = AgentContext(config=config, type=AgentContextType.BACKGROUND)
+ # Activate project if specified in URL
+ if project_name:
+ try:
+ projects.activate_project(context.id, project_name)
+ _PRINTER.print(f"[MCP] Activated project: {project_name}")
+ except Exception as e:
+ return ToolError(error=f"Failed to activate project: {str(e)}", chat_id="")
+
if not message:
return ToolError(
error="Message is required", chat_id=context.id if persistent_chat else ""
@@ -325,10 +369,10 @@ def reconfigure(self, token: str):
def _create_custom_http_app(self, streamable_http_path, auth_server_provider, auth_settings, debug, routes):
"""Create a custom HTTP app that manages the session manager manually."""
- from fastmcp.server.http import setup_auth_middleware_and_routes, create_base_app
- from mcp.server.streamable_http_manager import StreamableHTTPSessionManager
+ from fastmcp.server.http import setup_auth_middleware_and_routes, create_base_app # type: ignore
+ from mcp.server.streamable_http_manager import StreamableHTTPSessionManager # type: ignore
from starlette.routing import Mount
- from mcp.server.auth.middleware.bearer_auth import RequireAuthMiddleware
+ from mcp.server.auth.middleware.bearer_auth import RequireAuthMiddleware # type: ignore
import anyio
server_routes = []
@@ -408,12 +452,47 @@ async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
# Route based on path
path = scope.get("path", "")
- if f"/t-{self.token}/sse" in path or f"t-{self.token}/messages" in path:
- # Route to SSE app
- await sse_app(scope, receive, send)
- elif f"/t-{self.token}/http" in path:
- # Route to HTTP app
- await http_app(scope, receive, send)
+ # Check for token in path (with or without project segment)
+ # Patterns: /t-{token}/sse, /t-{token}/p-{project}/sse, etc.
+ has_token = f"/t-{self.token}/" in path or f"t-{self.token}/" in path
+
+ # Extract project from path BEFORE cleaning (for session storage)
+ project_name = None
+ if "/p-" in path:
+ try:
+ parts = path.split("/p-")
+ if len(parts) > 1:
+ project_part = parts[1].split("/")[0]
+ if project_part:
+ project_name = project_part
+ _PRINTER.print(f"[MCP] Proxy extracted project from URL: {project_name}")
+ except Exception as e:
+ _PRINTER.print(f"[MCP] Failed to extract project in proxy: {e}")
+
+ # Store project in session (persists across SSE connection)
+ if self.token and project_name:
+ with _mcp_session_lock:
+ _mcp_project_sessions[self.token] = project_name
+ _PRINTER.print(f"[MCP] Stored project '{project_name}' for token '{self.token}' (type: {type(self.token).__name__}) in proxy")
+
+ # Strip project segment from path if present (e.g., /p-project_name/)
+ # This is needed because the underlying MCP apps were configured without project paths
+ cleaned_path = path
+ if "/p-" in path:
+ # Remove /p-{project}/ segment: /t-TOKEN/p-PROJECT/sse -> /t-TOKEN/sse
+ import re
+ cleaned_path = re.sub(r'/p-[^/]+/', '/', path)
+
+ # Update scope with cleaned path for the underlying app
+ modified_scope = dict(scope)
+ modified_scope['path'] = cleaned_path
+
+ if has_token and ("/sse" in path or "/messages" in path):
+ # Route to SSE app with cleaned path
+ await sse_app(modified_scope, receive, send)
+ elif has_token and "/http" in path:
+ # Route to HTTP app with cleaned path
+ await http_app(modified_scope, receive, send)
else:
raise StarletteHTTPException(
status_code=403, detail="MCP forbidden"
@@ -430,4 +509,40 @@ async def mcp_middleware(request: Request, call_next):
status_code=403, detail="MCP server is disabled in settings."
)
+ # Extract project from URL path if present (pattern: /mcp/t-{token}/p-{project}/...)
+ path = request.url.path
+ project_name = None
+ token = None
+
+ # Extract token from path
+ if "/t-" in path:
+ token_parts = path.split("/t-")
+ if len(token_parts) > 1:
+ token = token_parts[1].split("/")[0]
+
+ # Extract project if present
+ if "/p-" in path:
+ try:
+ parts = path.split("/p-")
+ if len(parts) > 1:
+ project_part = parts[1].split("/")[0]
+ if project_part:
+ project_name = project_part
+ _PRINTER.print(f"[MCP] Extracted project from URL: {project_name}")
+ except Exception as e:
+ _PRINTER.print(f"[MCP] Failed to extract project from URL: {e}")
+
+ # Debug logging
+ _PRINTER.print(f"[MCP] Middleware - Path: {path}, Token: {token}, Project: {project_name}")
+
+ # Store project in session dict ONLY if we found one (don't overwrite with None)
+ # The proxy already handles project extraction before path cleaning
+ if token and project_name:
+ with _mcp_session_lock:
+ _mcp_project_sessions[token] = project_name
+ _PRINTER.print(f"[MCP] Middleware stored project '{project_name}' for token session")
+
+ # Also set in context variable for backwards compatibility
+ _mcp_project_name.set(project_name)
+
return await call_next(request)
diff --git a/webui/components/settings/a2a/a2a-connection.html b/webui/components/settings/a2a/a2a-connection.html
index c71c575f26..fc640a68d6 100644
--- a/webui/components/settings/a2a/a2a-connection.html
+++ b/webui/components/settings/a2a/a2a-connection.html
@@ -21,10 +21,21 @@ API Token Infor
A2A Connection URL
+
+
+
+
+
+
+
diff --git a/webui/components/settings/external/api-examples.html b/webui/components/settings/external/api-examples.html
index 08569a1e91..6155946826 100644
--- a/webui/components/settings/external/api-examples.html
+++ b/webui/components/settings/external/api-examples.html
@@ -34,7 +34,8 @@ API Referen
• context_id (string, optional): Existing chat context ID
• message (string, required): The message to send
• attachments (array, optional): Array of {filename, base64} objects
- • lifetime_hours (number, optional): Chat lifetime in hours (default: 24)
+ • lifetime_hours (number, optional): Chat lifetime in hours (default: 24)
+ • project (string, optional): Project name to activate (only on first message)
Headers: X-API-KEY (required), Content-Type: application/json
@@ -52,6 +53,9 @@
Conversation Continuation Example
File Attachment Example
+
+ Project Usage Example
+
@@ -602,11 +606,66 @@ Example Title
// Run the complete workflow
attachmentWorkflow();`;
+ // Project usage example
+ const projectExample = `// Working with projects
+async function sendMessageWithProject() {
+ try {
+ // First message - activate project
+ const response = await fetch('${url}/api_message', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'X-API-KEY': '${token}'
+ },
+ body: JSON.stringify({
+ message: "Analyze the project structure",
+ project: "my-web-app" // Activates this project
+ })
+ });
+
+ const data = await response.json();
+
+ if (response.ok) {
+ console.log('✅ Project activated!');
+ console.log('Context ID:', data.context_id);
+ console.log('Response:', data.response);
+
+ // Continue conversation - project already set
+ const followUp = await fetch('${url}/api_message', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'X-API-KEY': '${token}'
+ },
+ body: JSON.stringify({
+ context_id: data.context_id,
+ message: "What files are in the project?"
+ // Do NOT include project field here - already set on first message
+ })
+ });
+
+ const followUpData = await followUp.json();
+ console.log('Follow-up response:', followUpData.response);
+ return followUpData;
+ } else {
+ console.error('❌ Error:', data.error);
+ return null;
+ }
+ } catch (error) {
+ console.error('❌ Request failed:', error);
+ return null;
+ }
+}
+
+// Call the function
+sendMessageWithProject();`;
+
// Initialize ACE editors
const editors = [
{ id: "api-basic-example", content: basicExample },
{ id: "api-continuation-example", content: continuationExample },
{ id: "api-attachment-example", content: attachmentExample },
+ { id: "api-project-example", content: projectExample },
{ id: "api-log-get-example", content: logGetExample },
{ id: "api-log-post-example", content: logPostExample },
{ id: "api-terminate-example", content: terminateExample },
@@ -635,6 +694,7 @@ Example Title
#api-basic-example,
#api-continuation-example,
#api-attachment-example,
+ #api-project-example,
#api-log-get-example,
#api-log-post-example,
#api-terminate-example,
diff --git a/webui/components/settings/mcp/server/example.html b/webui/components/settings/mcp/server/example.html
index 4a1e033a63..db50343f47 100644
--- a/webui/components/settings/mcp/server/example.html
+++ b/webui/components/settings/mcp/server/example.html
@@ -20,37 +20,78 @@ API Token Infor
Example MCP Server Configuration JSON
+
+
+
+
+
+
+
From 99e8c76f5f522b3daae0bb8096aa4c6453c3c369 Mon Sep 17 00:00:00 2001
From: frdel <38891707+frdel@users.noreply.github.com>
Date: Thu, 11 Dec 2025 12:50:50 +0100
Subject: [PATCH 015/436] support uppercase A0_SET_* in .env
---
python/helpers/settings.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/python/helpers/settings.py b/python/helpers/settings.py
index d8358c2946..f09124bd70 100644
--- a/python/helpers/settings.py
+++ b/python/helpers/settings.py
@@ -28,7 +28,7 @@ def get_default_value(name: str, value: T) -> T:
Returns:
Environment variable value (type-normalized) or default value
"""
- env_value = dotenv.get_dotenv_value(f"A0_SET_{name}")
+ env_value = dotenv.get_dotenv_value(f"A0_SET_{name}", dotenv.get_dotenv_value(f"A0_SET_{name.upper()}", None))
if env_value is None:
return value
From 3abcf1965119967d64f2c63d1cc8b8926eb5d16a Mon Sep 17 00:00:00 2001
From: deci
Date: Thu, 11 Dec 2025 10:07:55 -0600
Subject: [PATCH 016/436] #84 - Fix MCP/A2A project activation
---
python/helpers/fasta2a_server.py | 24 ++++++++---
python/helpers/mcp_server.py | 74 ++++----------------------------
2 files changed, 26 insertions(+), 72 deletions(-)
diff --git a/python/helpers/fasta2a_server.py b/python/helpers/fasta2a_server.py
index 50d4a4908c..6936632580 100644
--- a/python/helpers/fasta2a_server.py
+++ b/python/helpers/fasta2a_server.py
@@ -5,7 +5,7 @@
from typing import Any, List
import contextlib
import threading
-import contextvars
+from collections import deque
from python.helpers import settings, projects
from starlette.requests import Request
@@ -61,8 +61,10 @@ async def update_task(self, **kwargs):
_PRINTER = PrintStyle(italic=True, font_color="purple", padding=False)
-# Context variable to store project name from URL
-_a2a_project_name: contextvars.ContextVar[str | None] = contextvars.ContextVar('a2a_project_name', default=None)
+# FIFO queue to pass project names from request context to worker context
+# Each request appends project (or None), worker pops in same order
+_a2a_project_queue: deque[str | None] = deque()
+_a2a_project_lock = threading.Lock()
class AgentZeroWorker(Worker): # type: ignore[misc]
@@ -88,8 +90,14 @@ async def run_task(self, params: Any) -> None: # params: TaskSendParams
cfg = initialize_agent()
context = AgentContext(cfg, type=AgentContextType.BACKGROUND)
- # Activate project if specified in URL
- project_name = _a2a_project_name.get()
+ # Retrieve project from queue (FIFO matches task processing order)
+ project_name = None
+ with _a2a_project_lock:
+ if _a2a_project_queue:
+ project_name = _a2a_project_queue.popleft()
+ _PRINTER.print(f"[A2A] Retrieved project from queue: {project_name}")
+
+ # Activate project if specified
if project_name:
try:
projects.activate_project(context.id, project_name)
@@ -477,8 +485,10 @@ async def __call__(self, scope, receive, send):
})
return
- # Set project name in context variable for use by worker
- _a2a_project_name.set(project_name)
+ # Store project in queue for worker to retrieve (maintains FIFO order)
+ with _a2a_project_lock:
+ _a2a_project_queue.append(project_name) # None is valid (no project)
+ _PRINTER.print(f"[A2A] Appended project to queue: {project_name}")
# Update scope with cleaned path
scope = dict(scope)
diff --git a/python/helpers/mcp_server.py b/python/helpers/mcp_server.py
index 466cc2277e..8db6913eee 100644
--- a/python/helpers/mcp_server.py
+++ b/python/helpers/mcp_server.py
@@ -24,11 +24,6 @@
# Context variable to store project name from URL (per-request)
_mcp_project_name: contextvars.ContextVar[str | None] = contextvars.ContextVar('mcp_project_name', default=None)
-# Session storage for project names (persists across SSE tool calls)
-# Key: connection identifier, Value: project name
-_mcp_project_sessions: dict[str, str | None] = {}
-_mcp_session_lock = threading.Lock()
-
mcp_server: FastMCP = FastMCP(
name="Agent Zero integrated MCP Server",
instructions="""
@@ -135,24 +130,10 @@ async def send_message(
description="The response from the remote Agent Zero Instance", title="response"
),
]:
- # Get project name from session storage (persists across SSE connection)
- # First try context variable (for HTTP requests), then session storage (for SSE tool calls)
+ # Get project name from context variable (set in proxy __call__)
project_name = _mcp_project_name.get()
-
- # If not in context variable, try session storage using current token
- if not project_name:
- cfg = settings.get_settings()
- current_token = cfg.get("mcp_server_token")
- _PRINTER.print(f"[MCP] send_message - Looking for project. Token: '{current_token}' (type: {type(current_token).__name__})")
- if current_token:
- with _mcp_session_lock:
- _PRINTER.print(f"[MCP] Session storage keys: {list(_mcp_project_sessions.keys())}")
- _PRINTER.print(f"[MCP] Session storage: {_mcp_project_sessions}")
- project_name = _mcp_project_sessions.get(current_token)
- if project_name:
- _PRINTER.print(f"[MCP] Retrieved project from session: {project_name}")
- else:
- _PRINTER.print(f"[MCP] No project found in session for token: {current_token}")
+ if project_name:
+ _PRINTER.print(f"[MCP] send_message using project: {project_name}")
context: AgentContext | None = None
if chat_id:
@@ -456,7 +437,7 @@ async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
# Patterns: /t-{token}/sse, /t-{token}/p-{project}/sse, etc.
has_token = f"/t-{self.token}/" in path or f"t-{self.token}/" in path
- # Extract project from path BEFORE cleaning (for session storage)
+ # Extract project from path BEFORE cleaning and set in context variable
project_name = None
if "/p-" in path:
try:
@@ -469,11 +450,10 @@ async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
except Exception as e:
_PRINTER.print(f"[MCP] Failed to extract project in proxy: {e}")
- # Store project in session (persists across SSE connection)
- if self.token and project_name:
- with _mcp_session_lock:
- _mcp_project_sessions[self.token] = project_name
- _PRINTER.print(f"[MCP] Stored project '{project_name}' for token '{self.token}' (type: {type(self.token).__name__}) in proxy")
+ # Store project in context variable (will be available in send_message)
+ _mcp_project_name.set(project_name)
+ if project_name:
+ _PRINTER.print(f"[MCP] Set project in context variable: {project_name}")
# Strip project segment from path if present (e.g., /p-project_name/)
# This is needed because the underlying MCP apps were configured without project paths
@@ -500,7 +480,7 @@ async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
async def mcp_middleware(request: Request, call_next):
-
+ """Middleware to check if MCP server is enabled."""
# check if MCP server is enabled
cfg = settings.get_settings()
if not cfg["mcp_server_enabled"]:
@@ -509,40 +489,4 @@ async def mcp_middleware(request: Request, call_next):
status_code=403, detail="MCP server is disabled in settings."
)
- # Extract project from URL path if present (pattern: /mcp/t-{token}/p-{project}/...)
- path = request.url.path
- project_name = None
- token = None
-
- # Extract token from path
- if "/t-" in path:
- token_parts = path.split("/t-")
- if len(token_parts) > 1:
- token = token_parts[1].split("/")[0]
-
- # Extract project if present
- if "/p-" in path:
- try:
- parts = path.split("/p-")
- if len(parts) > 1:
- project_part = parts[1].split("/")[0]
- if project_part:
- project_name = project_part
- _PRINTER.print(f"[MCP] Extracted project from URL: {project_name}")
- except Exception as e:
- _PRINTER.print(f"[MCP] Failed to extract project from URL: {e}")
-
- # Debug logging
- _PRINTER.print(f"[MCP] Middleware - Path: {path}, Token: {token}, Project: {project_name}")
-
- # Store project in session dict ONLY if we found one (don't overwrite with None)
- # The proxy already handles project extraction before path cleaning
- if token and project_name:
- with _mcp_session_lock:
- _mcp_project_sessions[token] = project_name
- _PRINTER.print(f"[MCP] Middleware stored project '{project_name}' for token session")
-
- # Also set in context variable for backwards compatibility
- _mcp_project_name.set(project_name)
-
return await call_next(request)
From 10bc874f05180a3b3655b74279615b1d80862907 Mon Sep 17 00:00:00 2001
From: linuztx
Date: Fri, 12 Dec 2025 10:11:12 +0800
Subject: [PATCH 017/436] refactor: migrate user data to usr/ + update frontend
paths
---
initialize.py | 6 +
python/api/api_files_get.py | 2 +-
python/api/api_message.py | 4 +-
python/api/message.py | 4 +-
python/api/upload.py | 2 +-
python/helpers/backup.py | 26 +----
python/helpers/dotenv.py | 2 +-
python/helpers/email_client.py | 2 +-
python/helpers/files.py | 4 +
python/helpers/memory.py | 22 +++-
python/helpers/migration.py | 110 ++++++++++++++++++
python/helpers/persist_chat.py | 2 +-
python/helpers/secrets.py | 2 +-
python/helpers/settings.py | 8 +-
python/helpers/task_scheduler.py | 2 +-
python/tools/browser_agent.py | 2 +-
run_ui.py | 3 +
.../chat/attachments/attachmentsStore.js | 4 +-
.../settings/external/api-examples.html | 8 +-
19 files changed, 170 insertions(+), 45 deletions(-)
create mode 100644 python/helpers/migration.py
diff --git a/initialize.py b/initialize.py
index ec26227fa9..1ef2d0f886 100644
--- a/initialize.py
+++ b/initialize.py
@@ -140,6 +140,12 @@ def initialize_preload():
import preload
return defer.DeferredTask().start_task(preload.preload)
+def initialize_migration():
+ from python.helpers import migration
+ # run migration
+ migration.migrate_user_data()
+ # reload settings to ensure new paths are picked up
+ settings.reload_settings()
def _args_override(config):
# update config with runtime args
diff --git a/python/api/api_files_get.py b/python/api/api_files_get.py
index e021af60fe..4d6dc20594 100644
--- a/python/api/api_files_get.py
+++ b/python/api/api_files_get.py
@@ -50,7 +50,7 @@ async def process(self, input: dict, request: Request) -> dict | Response:
if path.startswith("/a0/tmp/uploads/"):
# Internal path - convert to external
filename = path.replace("/a0/tmp/uploads/", "")
- external_path = files.get_abs_path("tmp/uploads", filename)
+ external_path = files.get_abs_path("usr/uploads", filename)
filename = os.path.basename(external_path)
elif path.startswith("/a0/"):
# Other internal Agent Zero paths
diff --git a/python/api/api_message.py b/python/api/api_message.py
index 385d556dd2..e3fd7e06b8 100644
--- a/python/api/api_message.py
+++ b/python/api/api_message.py
@@ -40,8 +40,8 @@ async def process(self, input: dict, request: Request) -> dict | Response:
# Handle attachments (base64 encoded)
attachment_paths = []
if attachments:
- upload_folder_int = "/a0/tmp/uploads"
- upload_folder_ext = files.get_abs_path("tmp/uploads")
+ upload_folder_int = "/a0/usr/uploads"
+ upload_folder_ext = files.get_abs_path("usr/uploads")
os.makedirs(upload_folder_ext, exist_ok=True)
for attachment in attachments:
diff --git a/python/api/message.py b/python/api/message.py
index bd378e4f79..e328b06606 100644
--- a/python/api/message.py
+++ b/python/api/message.py
@@ -29,8 +29,8 @@ async def communicate(self, input: dict, request: Request):
attachments = request.files.getlist("attachments")
attachment_paths = []
- upload_folder_int = "/a0/tmp/uploads"
- upload_folder_ext = files.get_abs_path("tmp/uploads") # for development environment
+ upload_folder_int = "/a0/usr/uploads"
+ upload_folder_ext = files.get_abs_path("usr/uploads") # for development environment
if attachments:
os.makedirs(upload_folder_ext, exist_ok=True)
diff --git a/python/api/upload.py b/python/api/upload.py
index 4a596a577b..14b656e04f 100644
--- a/python/api/upload.py
+++ b/python/api/upload.py
@@ -14,7 +14,7 @@ async def process(self, input: dict, request: Request) -> dict | Response:
for file in file_list:
if file and self.allowed_file(file.filename): # Check file type
filename = secure_filename(file.filename) # type: ignore
- file.save(files.get_abs_path("tmp/upload", filename))
+ file.save(files.get_abs_path("usr/upload", filename))
saved_filenames.append(filename)
return {"filenames": saved_filenames} # Return saved filenames
diff --git a/python/helpers/backup.py b/python/helpers/backup.py
index 4e4873371d..8ebc070339 100644
--- a/python/helpers/backup.py
+++ b/python/helpers/backup.py
@@ -60,28 +60,12 @@ def _get_default_patterns(self) -> str:
# Ensure paths don't have double slashes
agent_root = self.agent_zero_root.rstrip('/')
- return f"""# Agent Zero Knowledge (excluding defaults)
-{agent_root}/knowledge/**
-!{agent_root}/knowledge/default/**
-
-# Agent Zero Instruments (excluding defaults)
-{agent_root}/instruments/**
-!{agent_root}/instruments/default/**
-
-# Memory (excluding embeddings cache)
-{agent_root}/memory/**
-!{agent_root}/memory/**/embeddings/**
-
-# Configuration and Settings (CRITICAL)
-{agent_root}/.env
-{agent_root}/tmp/settings.json
-{agent_root}/tmp/secrets.env
-{agent_root}/tmp/chats/**
-{agent_root}/tmp/scheduler/**
-{agent_root}/tmp/uploads/**
-
-# User data
+ return f"""# User data
+# All persistent user data is now centralized in /usr for easier backup and restore
{agent_root}/usr/**
+
+# Explicitly include .env
+{agent_root}/usr/.env
"""
def _get_agent_zero_version(self) -> str:
diff --git a/python/helpers/dotenv.py b/python/helpers/dotenv.py
index 07ef0942be..3ce4d938fb 100644
--- a/python/helpers/dotenv.py
+++ b/python/helpers/dotenv.py
@@ -15,7 +15,7 @@ def load_dotenv():
def get_dotenv_file_path():
- return get_abs_path(".env")
+ return get_abs_path("usr/.env")
def get_dotenv_value(key: str, default: Any = None):
# load_dotenv()
diff --git a/python/helpers/email_client.py b/python/helpers/email_client.py
index 741e008991..45e795ebed 100644
--- a/python/helpers/email_client.py
+++ b/python/helpers/email_client.py
@@ -537,7 +537,7 @@ async def read_messages(
port: int = 993,
username: str = "",
password: str = "",
- download_folder: str = "tmp/email",
+ download_folder: str = "usr/email",
options: Optional[Dict[str, Any]] = None,
filter: Optional[Dict[str, Any]] = None,
) -> List[Message]:
diff --git a/python/helpers/files.py b/python/helpers/files.py
index 0ed9cb06d6..8fcf799c8f 100644
--- a/python/helpers/files.py
+++ b/python/helpers/files.py
@@ -410,6 +410,10 @@ def move_dir(old_path: str, new_path: str):
abs_new = get_abs_path(new_path)
if not os.path.isdir(abs_old):
return # nothing to rename
+
+ # ensure parent directory exists
+ os.makedirs(os.path.dirname(abs_new), exist_ok=True)
+
try:
os.rename(abs_old, abs_new)
except Exception:
diff --git a/python/helpers/memory.py b/python/helpers/memory.py
index 8c8785c5af..d18af54530 100644
--- a/python/helpers/memory.py
+++ b/python/helpers/memory.py
@@ -139,7 +139,7 @@ def initialize(
log_item.stream(progress="\nInitializing VectorDB")
em_dir = files.get_abs_path(
- "memory/embeddings"
+ "tmp/memory/embeddings"
) # just caching, no need to parameterize
db_dir = abs_db_dir(memory_subdir)
@@ -333,6 +333,16 @@ def _preload_knowledge_folders(
recursive=True,
)
+ # load custom instruments descriptions
+ index = knowledge_import.load_knowledge(
+ log_item,
+ files.get_abs_path("usr/instruments"),
+ index,
+ {"area": Memory.Area.INSTRUMENTS.value},
+ filename_pattern="**/*.md",
+ recursive=True,
+ )
+
return index
def get_document_by_id(self, id: str) -> Document | None:
@@ -483,7 +493,7 @@ def get_timestamp():
def get_custom_knowledge_subdir_abs(agent: Agent) -> str:
for dir in agent.config.knowledge_subdirs:
if dir != "default":
- return files.get_abs_path("knowledge", dir)
+ return files.get_abs_path("usr/knowledge", dir)
raise Exception("No custom knowledge subdir set")
@@ -499,7 +509,7 @@ def abs_db_dir(memory_subdir: str) -> str:
return files.get_abs_path(get_project_meta_folder(memory_subdir[9:]), "memory")
# standard subdirs
- return files.get_abs_path("memory", memory_subdir)
+ return files.get_abs_path("usr/memory", memory_subdir)
def abs_knowledge_dir(knowledge_subdir: str, *sub_dirs: str) -> str:
@@ -511,7 +521,9 @@ def abs_knowledge_dir(knowledge_subdir: str, *sub_dirs: str) -> str:
get_project_meta_folder(knowledge_subdir[9:]), "knowledge", *sub_dirs
)
# standard subdirs
- return files.get_abs_path("knowledge", knowledge_subdir, *sub_dirs)
+ if knowledge_subdir == "default":
+ return files.get_abs_path("knowledge", *sub_dirs)
+ return files.get_abs_path("usr/knowledge", knowledge_subdir, *sub_dirs)
def get_memory_subdir_abs(agent: Agent) -> str:
@@ -546,7 +558,7 @@ def get_existing_memory_subdirs() -> list[str]:
)
# Get subdirectories from memory folder
- subdirs = files.get_subdirectories("memory", exclude="embeddings")
+ subdirs = files.get_subdirectories("usr/memory")
project_subdirs = files.get_subdirectories(get_projects_parent_folder())
for project_subdir in project_subdirs:
diff --git a/python/helpers/migration.py b/python/helpers/migration.py
new file mode 100644
index 0000000000..b49fe6ac4d
--- /dev/null
+++ b/python/helpers/migration.py
@@ -0,0 +1,110 @@
+import os
+from python.helpers import files
+from python.helpers.print_style import PrintStyle
+
+def migrate_user_data() -> None:
+ """
+ Migrate user data from /tmp and other locations to /usr.
+ """
+
+ PrintStyle().print("Checking for data migration...")
+
+ # --- Migrate Directories -------------------------------------------------------
+ # Move directories from tmp/ or other source locations to usr/
+
+ _move_dir("tmp/chats", "usr/chats")
+ _move_dir("tmp/scheduler", "usr/scheduler")
+ _move_dir("tmp/uploads", "usr/uploads")
+ _move_dir("tmp/upload", "usr/upload")
+ _move_dir("tmp/downloads", "usr/downloads")
+ _move_dir("tmp/email", "usr/email")
+ _move_dir("knowledge/custom", "usr/knowledge")
+ _move_dir("instruments/custom", "usr/instruments")
+
+ # --- Migrate Files -------------------------------------------------------------
+ # Move specific configuration files to usr/
+
+ _move_file("tmp/settings.json", "usr/settings.json")
+ _move_file("tmp/secrets.env", "usr/secrets.env")
+ _move_file(".env", "usr/.env")
+
+ # --- Special Migration Cases ---------------------------------------------------
+
+ # Migrate Memory
+ _migrate_memory()
+
+ # Flatten default directories (knowledge/default -> knowledge/, etc.)
+ # We use _merge_dir_contents because we want to move the *contents* of default/
+ # into the parent directory, not move the default directory itself.
+ _merge_dir_contents("knowledge/default", "knowledge")
+ _merge_dir_contents("instruments/default", "instruments")
+
+ # --- Cleanup -------------------------------------------------------------------
+
+ # Remove obsolete directories after migration
+ _cleanup_obsolete()
+
+ PrintStyle().print("Migration check complete.")
+
+# --- Helper Functions ----------------------------------------------------------
+
+def _move_dir(src: str, dst: str) -> None:
+ """
+ Move a directory from src to dst if src exists and dst does not.
+ """
+ if files.exists(src) and not files.exists(dst):
+ PrintStyle().print(f"Migrating {src} to {dst}...")
+ files.move_dir(src, dst)
+
+def _move_file(src: str, dst: str) -> None:
+ """
+ Move a file from src to dst if src exists and dst does not.
+ """
+ if files.exists(src) and not files.exists(dst):
+ PrintStyle().print(f"Migrating {src} to {dst}...")
+ files.move_file(src, dst)
+
+def _migrate_memory(base_path: str = "memory") -> None:
+ """
+ Migrate memory subdirectories.
+ """
+ subdirs = files.get_subdirectories(base_path)
+ for subdir in subdirs:
+ if subdir == "embeddings":
+ # Special case: Embeddings
+ _move_dir("memory/embeddings", "tmp/memory/embeddings")
+ else:
+ # Move other memory items to usr/memory
+ dst = f"usr/memory/{subdir}"
+ _move_dir(f"memory/{subdir}", dst)
+
+def _merge_dir_contents(src_parent: str, dst_parent: str) -> None:
+ """
+ Moves all subdirectories from src_parent to dst_parent.
+ Useful for flattening structures like 'knowledge/default/*' -> 'knowledge/*'.
+ """
+ if not files.exists(src_parent):
+ return
+
+ # Iterate over subdirectories in the source parent
+ subdirs = files.get_subdirectories(src_parent)
+ for subdir in subdirs:
+ src = f"{src_parent}/{subdir}"
+ dst = f"{dst_parent}/{subdir}"
+
+ # Move the subdirectory if it doesn't exist in destination
+ _move_dir(src, dst)
+
+def _cleanup_obsolete() -> None:
+ """
+ Remove directories that are no longer needed.
+ """
+ to_remove = [
+ "knowledge/default",
+ "instruments/default",
+ "memory"
+ ]
+ for path in to_remove:
+ if files.exists(path):
+ PrintStyle().print(f"Removing {path}...")
+ files.delete_dir(path)
diff --git a/python/helpers/persist_chat.py b/python/helpers/persist_chat.py
index 55867e6fe5..9cb7083564 100644
--- a/python/helpers/persist_chat.py
+++ b/python/helpers/persist_chat.py
@@ -9,7 +9,7 @@
from python.helpers.log import Log, LogItem
-CHATS_FOLDER = "tmp/chats"
+CHATS_FOLDER = "usr/chats"
LOG_SIZE = 1000
CHAT_FILE_NAME = "chat.json"
diff --git a/python/helpers/secrets.py b/python/helpers/secrets.py
index 53ee85d9b0..e87faca88b 100644
--- a/python/helpers/secrets.py
+++ b/python/helpers/secrets.py
@@ -15,7 +15,7 @@
# New alias-based placeholder format §§secret(KEY)
ALIAS_PATTERN = r"§§secret\(([A-Za-z_][A-Za-z0-9_]*)\)"
-DEFAULT_SECRETS_FILE = "tmp/secrets.env"
+DEFAULT_SECRETS_FILE = "usr/secrets.env"
def alias_for_key(key: str, placeholder: str = "§§secret({key})") -> str:
diff --git a/python/helpers/settings.py b/python/helpers/settings.py
index 9e71b7956f..151c1f36ba 100644
--- a/python/helpers/settings.py
+++ b/python/helpers/settings.py
@@ -161,7 +161,7 @@ class SettingsOutput(TypedDict):
PASSWORD_PLACEHOLDER = "****PSWD****"
API_KEY_PLACEHOLDER = "************"
-SETTINGS_FILE = files.get_abs_path("tmp/settings.json")
+SETTINGS_FILE = files.get_abs_path("usr/settings.json")
_settings: Settings | None = None
@@ -1343,6 +1343,12 @@ def get_settings() -> Settings:
return norm
+def reload_settings() -> Settings:
+ global _settings
+ _settings = None
+ return get_settings()
+
+
def set_settings(settings: Settings, apply: bool = True):
global _settings
previous = _settings
diff --git a/python/helpers/task_scheduler.py b/python/helpers/task_scheduler.py
index 5f9321754a..f5c3ed0c2c 100644
--- a/python/helpers/task_scheduler.py
+++ b/python/helpers/task_scheduler.py
@@ -26,7 +26,7 @@
import pytz
from typing import Annotated
-SCHEDULER_FOLDER = "tmp/scheduler"
+SCHEDULER_FOLDER = "usr/scheduler"
# ----------------------
# Task Models
diff --git a/python/tools/browser_agent.py b/python/tools/browser_agent.py
index 6d5f085b26..d9a53f822a 100644
--- a/python/tools/browser_agent.py
+++ b/python/tools/browser_agent.py
@@ -56,7 +56,7 @@ async def _initialize(self):
disable_security=True,
chromium_sandbox=False,
accept_downloads=True,
- downloads_path=files.get_abs_path("tmp/downloads"),
+ downloads_path=files.get_abs_path("usr/downloads"),
allowed_domains=["*", "http://*", "https://*"],
executable_path=pw_binary,
keep_alive=True,
diff --git a/run_ui.py b/run_ui.py
index 1691f69e74..9872fbbb39 100644
--- a/run_ui.py
+++ b/run_ui.py
@@ -190,6 +190,9 @@ async def serve_index():
def run():
PrintStyle().print("Initializing framework...")
+ # migrate data before anything else
+ initialize.initialize_migration()
+
# Suppress only request logs but keep the startup messages
from werkzeug.serving import WSGIRequestHandler
from werkzeug.serving import make_server
diff --git a/webui/components/chat/attachments/attachmentsStore.js b/webui/components/chat/attachments/attachmentsStore.js
index 7e17b9b554..08b7815fde 100644
--- a/webui/components/chat/attachments/attachmentsStore.js
+++ b/webui/components/chat/attachments/attachmentsStore.js
@@ -259,11 +259,11 @@ const model = {
// Generate server-side API URL for file (for device sync)
getServerImgUrl(filename) {
- return `/image_get?path=/a0/tmp/uploads/${encodeURIComponent(filename)}`;
+ return `/image_get?path=/a0/usr/uploads/${encodeURIComponent(filename)}`;
},
getServerFileUrl(filename) {
- return `/a0/tmp/uploads/${encodeURIComponent(filename)}`;
+ return `/a0/usr/uploads/${encodeURIComponent(filename)}`;
},
// Check if file is an image based on extension
diff --git a/webui/components/settings/external/api-examples.html b/webui/components/settings/external/api-examples.html
index 08569a1e91..f0a2ce7f6e 100644
--- a/webui/components/settings/external/api-examples.html
+++ b/webui/components/settings/external/api-examples.html
@@ -146,7 +146,7 @@ API Referen
Parameters:
- • paths (array, required): Array of file paths to retrieve (e.g., ["/a0/tmp/uploads/file.txt"])
+ • paths (array, required): Array of file paths to retrieve (e.g., ["/a0/usr/uploads/file.txt"])
Headers: X-API-KEY (required), Content-Type: application/json
@@ -562,8 +562,8 @@
Example Title
// Example 1: Get specific files
const filePaths = [
- "/a0/tmp/uploads/document.txt",
- "/a0/tmp/uploads/data.json"
+ "/a0/usr/uploads/document.txt",
+ "/a0/usr/uploads/data.json"
];
getFiles(filePaths);
@@ -590,7 +590,7 @@
Example Title
console.log('Message sent with attachment');
// Step 2: Retrieve the uploaded file
- const retrievedFiles = await getFiles(["/a0/tmp/uploads/test.txt"]);
+ const retrievedFiles = await getFiles(["/a0/usr/uploads/test.txt"]);
if (retrievedFiles && retrievedFiles["test.txt"]) {
const originalContent = atob(retrievedFiles["test.txt"]);
From 5b6fbea1ed81698ba5c13bb1e536781ad3458c96 Mon Sep 17 00:00:00 2001
From: linuztx
Date: Fri, 12 Dec 2025 11:45:56 +0800
Subject: [PATCH 018/436] migration: add overwrite support for .env migration
---
python/helpers/migration.py | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/python/helpers/migration.py b/python/helpers/migration.py
index b49fe6ac4d..ea50c10c2f 100644
--- a/python/helpers/migration.py
+++ b/python/helpers/migration.py
@@ -26,7 +26,7 @@ def migrate_user_data() -> None:
_move_file("tmp/settings.json", "usr/settings.json")
_move_file("tmp/secrets.env", "usr/secrets.env")
- _move_file(".env", "usr/.env")
+ _move_file(".env", "usr/.env", overwrite=True)
# --- Special Migration Cases ---------------------------------------------------
@@ -48,19 +48,21 @@ def migrate_user_data() -> None:
# --- Helper Functions ----------------------------------------------------------
-def _move_dir(src: str, dst: str) -> None:
+def _move_dir(src: str, dst: str, overwrite: bool = False) -> None:
"""
Move a directory from src to dst if src exists and dst does not.
"""
- if files.exists(src) and not files.exists(dst):
+ if files.exists(src) and (not files.exists(dst) or overwrite):
PrintStyle().print(f"Migrating {src} to {dst}...")
+ if overwrite and files.exists(dst):
+ files.delete_dir(dst)
files.move_dir(src, dst)
-def _move_file(src: str, dst: str) -> None:
+def _move_file(src: str, dst: str, overwrite: bool = False) -> None:
"""
Move a file from src to dst if src exists and dst does not.
"""
- if files.exists(src) and not files.exists(dst):
+ if files.exists(src) and (not files.exists(dst) or overwrite):
PrintStyle().print(f"Migrating {src} to {dst}...")
files.move_file(src, dst)
From 4d11a2620d066a513235e1d2063d3493498e1f1d Mon Sep 17 00:00:00 2001
From: linuztx
Date: Fri, 12 Dec 2025 13:05:21 +0800
Subject: [PATCH 019/436] migration: reload .env after moving to usr/ to update
config
---
initialize.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/initialize.py b/initialize.py
index 1ef2d0f886..eb6120fdde 100644
--- a/initialize.py
+++ b/initialize.py
@@ -141,9 +141,11 @@ def initialize_preload():
return defer.DeferredTask().start_task(preload.preload)
def initialize_migration():
- from python.helpers import migration
+ from python.helpers import migration, dotenv
# run migration
migration.migrate_user_data()
+ # reload .env as it might have been moved
+ dotenv.load_dotenv()
# reload settings to ensure new paths are picked up
settings.reload_settings()
From 2b0c97f654f65c0f2794a311f2a2ccf168b2bd2b Mon Sep 17 00:00:00 2001
From: linuztx
Date: Fri, 12 Dec 2025 13:54:30 +0800
Subject: [PATCH 020/436] migration: force overwrite for scheduler, knowledge,
and instruments dirs
---
python/helpers/migration.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/python/helpers/migration.py b/python/helpers/migration.py
index ea50c10c2f..e27a419bc3 100644
--- a/python/helpers/migration.py
+++ b/python/helpers/migration.py
@@ -13,13 +13,13 @@ def migrate_user_data() -> None:
# Move directories from tmp/ or other source locations to usr/
_move_dir("tmp/chats", "usr/chats")
- _move_dir("tmp/scheduler", "usr/scheduler")
+ _move_dir("tmp/scheduler", "usr/scheduler", overwrite=True)
_move_dir("tmp/uploads", "usr/uploads")
_move_dir("tmp/upload", "usr/upload")
_move_dir("tmp/downloads", "usr/downloads")
_move_dir("tmp/email", "usr/email")
- _move_dir("knowledge/custom", "usr/knowledge")
- _move_dir("instruments/custom", "usr/instruments")
+ _move_dir("knowledge/custom", "usr/knowledge", overwrite=True)
+ _move_dir("instruments/custom", "usr/instruments", overwrite=True)
# --- Migrate Files -------------------------------------------------------------
# Move specific configuration files to usr/
From 844af16748c429c2be99c4fb4793212f77504e9e Mon Sep 17 00:00:00 2001
From: linuztx
Date: Fri, 12 Dec 2025 15:23:57 +0800
Subject: [PATCH 021/436] migration: correct custom knowledge directory path
mapping
---
python/helpers/memory.py | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/python/helpers/memory.py b/python/helpers/memory.py
index d18af54530..49ec7db4ce 100644
--- a/python/helpers/memory.py
+++ b/python/helpers/memory.py
@@ -493,6 +493,8 @@ def get_timestamp():
def get_custom_knowledge_subdir_abs(agent: Agent) -> str:
for dir in agent.config.knowledge_subdirs:
if dir != "default":
+ if dir == "custom":
+ return files.get_abs_path("usr/knowledge")
return files.get_abs_path("usr/knowledge", dir)
raise Exception("No custom knowledge subdir set")
@@ -523,6 +525,8 @@ def abs_knowledge_dir(knowledge_subdir: str, *sub_dirs: str) -> str:
# standard subdirs
if knowledge_subdir == "default":
return files.get_abs_path("knowledge", *sub_dirs)
+ if knowledge_subdir == "custom":
+ return files.get_abs_path("usr/knowledge", *sub_dirs)
return files.get_abs_path("usr/knowledge", knowledge_subdir, *sub_dirs)
From 2c87e17f18ace3c67468ea793fef0b63895a891b Mon Sep 17 00:00:00 2001
From: deci
Date: Fri, 12 Dec 2025 11:19:58 -0600
Subject: [PATCH 022/436] #84 - APA passing the project via message_id-based
dictionary instead of FIFO queue
---
python/helpers/fasta2a_server.py | 73 ++++++++++++++++++++++++++------
1 file changed, 60 insertions(+), 13 deletions(-)
diff --git a/python/helpers/fasta2a_server.py b/python/helpers/fasta2a_server.py
index 6936632580..613f8fb320 100644
--- a/python/helpers/fasta2a_server.py
+++ b/python/helpers/fasta2a_server.py
@@ -5,7 +5,6 @@
from typing import Any, List
import contextlib
import threading
-from collections import deque
from python.helpers import settings, projects
from starlette.requests import Request
@@ -61,9 +60,9 @@ async def update_task(self, **kwargs):
_PRINTER = PrintStyle(italic=True, font_color="purple", padding=False)
-# FIFO queue to pass project names from request context to worker context
-# Each request appends project (or None), worker pops in same order
-_a2a_project_queue: deque[str | None] = deque()
+# Map message_id to project name (thread-safe, no race conditions)
+# Each message has unique ID that correlates request to worker
+_a2a_message_projects: dict[str, str] = {}
_a2a_project_lock = threading.Lock()
@@ -90,12 +89,15 @@ async def run_task(self, params: Any) -> None: # params: TaskSendParams
cfg = initialize_agent()
context = AgentContext(cfg, type=AgentContextType.BACKGROUND)
- # Retrieve project from queue (FIFO matches task processing order)
+ # Retrieve project by message_id (direct lookup, no race conditions)
+ # Note: Request has messageId (camelCase), but FastA2A converts it to message_id (snake_case)
project_name = None
- with _a2a_project_lock:
- if _a2a_project_queue:
- project_name = _a2a_project_queue.popleft()
- _PRINTER.print(f"[A2A] Retrieved project from queue: {project_name}")
+ message_id = message.get('message_id') # FastA2A converts camelCase to snake_case
+ if message_id:
+ with _a2a_project_lock:
+ project_name = _a2a_message_projects.pop(message_id, None)
+ if project_name:
+ _PRINTER.print(f"[A2A] Retrieved project for message {message_id}: {project_name}")
# Activate project if specified
if project_name:
@@ -485,10 +487,55 @@ async def __call__(self, scope, receive, send):
})
return
- # Store project in queue for worker to retrieve (maintains FIFO order)
- with _a2a_project_lock:
- _a2a_project_queue.append(project_name) # None is valid (no project)
- _PRINTER.print(f"[A2A] Appended project to queue: {project_name}")
+ # If project specified, we need to extract message_id from request body
+ # to correlate project with the specific message (no race conditions)
+ if project_name:
+ # Buffer all messages for replay
+ received_messages = []
+ replay_index = 0
+ message_id_extracted = False
+ original_receive = receive
+
+ async def receive_wrapper():
+ nonlocal replay_index, message_id_extracted
+
+ # If replaying buffered messages, return them in order
+ if replay_index < len(received_messages):
+ msg = received_messages[replay_index]
+ replay_index += 1
+ return msg
+
+ # Otherwise, receive and buffer the next message
+ message = await original_receive()
+ received_messages.append(message)
+
+ # Parse message_id when we get the complete body
+ if message['type'] == 'http.request':
+ # If this is the last chunk, parse the full body
+ if not message.get('more_body', False) and not message_id_extracted:
+ message_id_extracted = True
+ try:
+ import json
+ # Reconstruct full body from all buffered messages
+ body_parts = [msg.get('body', b'') for msg in received_messages if msg['type'] == 'http.request']
+ full_body = b''.join(body_parts)
+ data = json.loads(full_body)
+ # Handle JSON-RPC format: params.message.messageId (camelCase!)
+ if 'params' in data and 'message' in data['params']:
+ msg = data['params']['message']
+ message_id = msg.get('messageId') # camelCase in raw JSON
+ if message_id:
+ with _a2a_project_lock:
+ _a2a_message_projects[message_id] = project_name
+ _PRINTER.print(f"[A2A] Stored project '{project_name}' for message {message_id}")
+ # Reset replay index so FastA2A can replay from start
+ replay_index = 0
+ except Exception as e:
+ _PRINTER.print(f"[A2A] Failed to parse message_id: {e}")
+
+ return message
+
+ receive = receive_wrapper
# Update scope with cleaned path
scope = dict(scope)
From 31daa48a2a54ddc3383b8822188402e9da723165 Mon Sep 17 00:00:00 2001
From: deci
Date: Fri, 12 Dec 2025 12:22:41 -0600
Subject: [PATCH 023/436] #84 - adjusted A2A project passing via
message.metadata
---
python/helpers/fasta2a_server.py | 90 ++++++++++++++------------------
1 file changed, 40 insertions(+), 50 deletions(-)
diff --git a/python/helpers/fasta2a_server.py b/python/helpers/fasta2a_server.py
index 613f8fb320..5ee1e21547 100644
--- a/python/helpers/fasta2a_server.py
+++ b/python/helpers/fasta2a_server.py
@@ -60,11 +60,6 @@ async def update_task(self, **kwargs):
_PRINTER = PrintStyle(italic=True, font_color="purple", padding=False)
-# Map message_id to project name (thread-safe, no race conditions)
-# Each message has unique ID that correlates request to worker
-_a2a_message_projects: dict[str, str] = {}
-_a2a_project_lock = threading.Lock()
-
class AgentZeroWorker(Worker): # type: ignore[misc]
"""Agent Zero implementation of FastA2A Worker."""
@@ -89,15 +84,11 @@ async def run_task(self, params: Any) -> None: # params: TaskSendParams
cfg = initialize_agent()
context = AgentContext(cfg, type=AgentContextType.BACKGROUND)
- # Retrieve project by message_id (direct lookup, no race conditions)
- # Note: Request has messageId (camelCase), but FastA2A converts it to message_id (snake_case)
- project_name = None
- message_id = message.get('message_id') # FastA2A converts camelCase to snake_case
- if message_id:
- with _a2a_project_lock:
- project_name = _a2a_message_projects.pop(message_id, None)
- if project_name:
- _PRINTER.print(f"[A2A] Retrieved project for message {message_id}: {project_name}")
+ # Retrieve project from message.metadata (standard A2A pattern)
+ metadata = message.get('metadata', {}) or {}
+ project_name = metadata.get('project')
+ if project_name:
+ _PRINTER.print(f"[A2A] Retrieved project from message.metadata: {project_name}")
# Activate project if specified
if project_name:
@@ -487,51 +478,50 @@ async def __call__(self, scope, receive, send):
})
return
- # If project specified, we need to extract message_id from request body
- # to correlate project with the specific message (no race conditions)
+ # If project specified, inject it into the request payload
if project_name:
- # Buffer all messages for replay
+ # Buffer messages and modify before returning the complete body
received_messages = []
- replay_index = 0
- message_id_extracted = False
+ body_modified = False
original_receive = receive
async def receive_wrapper():
- nonlocal replay_index, message_id_extracted
-
- # If replaying buffered messages, return them in order
- if replay_index < len(received_messages):
- msg = received_messages[replay_index]
- replay_index += 1
- return msg
+ nonlocal body_modified
- # Otherwise, receive and buffer the next message
+ # Receive and buffer the next message
message = await original_receive()
received_messages.append(message)
- # Parse message_id when we get the complete body
- if message['type'] == 'http.request':
- # If this is the last chunk, parse the full body
- if not message.get('more_body', False) and not message_id_extracted:
- message_id_extracted = True
- try:
- import json
- # Reconstruct full body from all buffered messages
- body_parts = [msg.get('body', b'') for msg in received_messages if msg['type'] == 'http.request']
- full_body = b''.join(body_parts)
- data = json.loads(full_body)
- # Handle JSON-RPC format: params.message.messageId (camelCase!)
- if 'params' in data and 'message' in data['params']:
- msg = data['params']['message']
- message_id = msg.get('messageId') # camelCase in raw JSON
- if message_id:
- with _a2a_project_lock:
- _a2a_message_projects[message_id] = project_name
- _PRINTER.print(f"[A2A] Stored project '{project_name}' for message {message_id}")
- # Reset replay index so FastA2A can replay from start
- replay_index = 0
- except Exception as e:
- _PRINTER.print(f"[A2A] Failed to parse message_id: {e}")
+ # When we get the complete body, inject project into JSON
+ if message['type'] == 'http.request' and not message.get('more_body', False) and not body_modified:
+ body_modified = True
+ try:
+ import json
+ # Reconstruct full body from all buffered messages
+ body_parts = [msg.get('body', b'') for msg in received_messages if msg['type'] == 'http.request']
+ full_body = b''.join(body_parts)
+ data = json.loads(full_body)
+
+ # INJECT project into message.metadata (standard A2A pattern)
+ if 'params' in data and 'message' in data['params']:
+ msg_data = data['params']['message']
+ # Initialize metadata if it doesn't exist
+ if 'metadata' not in msg_data or msg_data['metadata'] is None:
+ msg_data['metadata'] = {}
+ msg_data['metadata']['project'] = project_name
+ _PRINTER.print(f"[A2A] Injected project '{project_name}' into message.metadata")
+
+ # Serialize back to JSON
+ modified_body = json.dumps(data).encode('utf-8')
+
+ # Return modified message IMMEDIATELY (before FastA2A processes it)
+ return {
+ 'type': 'http.request',
+ 'body': modified_body,
+ 'more_body': False
+ }
+ except Exception as e:
+ _PRINTER.print(f"[A2A] Failed to inject project into payload: {e}")
return message
From 079ef0698ddba6395f38ceb7281edf5bd9942e5d Mon Sep 17 00:00:00 2001
From: Eddie Pick
Date: Sun, 14 Dec 2025 12:32:48 -0500
Subject: [PATCH 024/436] Add Z.AI provider support for general and coding APIs
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
- Add Z.AI provider with base URL https://api.z.ai/api/paas/v4
- Add Z.AI Coding provider with base URL https://api.z.ai/api/coding/paas/v4
- Both use OpenAI-compatible protocol via LiteLLM
Available models: glm-4.6, glm-4.6v (vision), glm-4.5, glm-4.5-air
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude Opus 4.5
---
conf/model_providers.yaml | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/conf/model_providers.yaml b/conf/model_providers.yaml
index e805376e3c..999b499e81 100644
--- a/conf/model_providers.yaml
+++ b/conf/model_providers.yaml
@@ -84,6 +84,16 @@ chat:
xai:
name: xAI
litellm_provider: xai
+ zai:
+ name: Z.AI
+ litellm_provider: openai
+ kwargs:
+ api_base: https://api.z.ai/api/paas/v4
+ zai_coding:
+ name: Z.AI Coding
+ litellm_provider: openai
+ kwargs:
+ api_base: https://api.z.ai/api/coding/paas/v4
other:
name: Other OpenAI compatible
litellm_provider: openai
From f5fef3fbe5d8cada3ec400bf447e3567bc6bc05f Mon Sep 17 00:00:00 2001
From: Eddie Pick
Date: Sun, 14 Dec 2025 13:33:58 -0500
Subject: [PATCH 025/436] Add AWS Bedrock provider support for chat and
embeddings
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
- Add Bedrock to chat providers with native LiteLLM support
- Add Bedrock to embedding providers
Available models (from AWS docs):
Claude:
- anthropic.claude-opus-4-5-20251101-v1:0 (Opus 4.5)
- anthropic.claude-sonnet-4-5-20250929-v1:0 (Sonnet 4.5)
- anthropic.claude-haiku-4-5-20251001-v1:0 (Haiku 4.5)
Amazon Nova:
- amazon.nova-pro-v1:0, amazon.nova-lite-v1:0
- amazon.nova-2-lite-v1:0, amazon.nova-2-sonic-v1:0
Qwen:
- qwen.qwen3-235b-a22b-2507-v1:0
- qwen.qwen3-coder-480b-a35b-v1:0
Others: MiniMax M2, Mistral Large 3, DeepSeek R1/V3,
Llama 3.3, Titan Embeddings
Auth: Set BEDROCK_API_KEY (new AWS Bedrock API key)
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude Opus 4.5
---
conf/model_providers.yaml | 6 ++++++
requirements.txt | 1 +
2 files changed, 7 insertions(+)
diff --git a/conf/model_providers.yaml b/conf/model_providers.yaml
index e805376e3c..45c9647158 100644
--- a/conf/model_providers.yaml
+++ b/conf/model_providers.yaml
@@ -64,6 +64,9 @@ chat:
azure:
name: OpenAI Azure
litellm_provider: azure
+ bedrock:
+ name: AWS Bedrock
+ litellm_provider: bedrock
openrouter:
name: OpenRouter
litellm_provider: openrouter
@@ -110,6 +113,9 @@ embedding:
azure:
name: OpenAI Azure
litellm_provider: azure
+ bedrock:
+ name: AWS Bedrock
+ litellm_provider: bedrock
# TODO: OpenRouter not yet supported by LiteLLM, replace with native litellm_provider openrouter and remove api_base when ready
openrouter:
name: OpenRouter
diff --git a/requirements.txt b/requirements.txt
index 07be99756a..a14a853393 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -45,5 +45,6 @@ soundfile==0.13.1
imapclient>=3.0.1
html2text>=2024.2.26
beautifulsoup4>=4.12.3
+boto3>=1.35.0
exchangelib>=5.4.3
pywinpty==3.0.2; sys_platform == "win32"
\ No newline at end of file
From 9c905cdbbdac54af566c16fa3251edb75066bd06 Mon Sep 17 00:00:00 2001
From: frdel <38891707+frdel@users.noreply.github.com>
Date: Mon, 15 Dec 2025 14:44:32 +0100
Subject: [PATCH 026/436] cleanup prints in A2A/MCP
---
python/helpers/fasta2a_server.py | 12 ++----------
python/helpers/mcp_server.py | 5 -----
2 files changed, 2 insertions(+), 15 deletions(-)
diff --git a/python/helpers/fasta2a_server.py b/python/helpers/fasta2a_server.py
index 5ee1e21547..000c5a39df 100644
--- a/python/helpers/fasta2a_server.py
+++ b/python/helpers/fasta2a_server.py
@@ -87,17 +87,10 @@ async def run_task(self, params: Any) -> None: # params: TaskSendParams
# Retrieve project from message.metadata (standard A2A pattern)
metadata = message.get('metadata', {}) or {}
project_name = metadata.get('project')
- if project_name:
- _PRINTER.print(f"[A2A] Retrieved project from message.metadata: {project_name}")
-
+
# Activate project if specified
if project_name:
- try:
- projects.activate_project(context.id, project_name)
- _PRINTER.print(f"[A2A] Activated project: {project_name}")
- except Exception as e:
- _PRINTER.print(f"[A2A] Failed to activate project: {e}")
- raise Exception(f"Failed to activate project: {str(e)}")
+ projects.activate_project(context.id, project_name)
# Log user message so it appears instantly in UI chat window
context.log.log(
@@ -509,7 +502,6 @@ async def receive_wrapper():
if 'metadata' not in msg_data or msg_data['metadata'] is None:
msg_data['metadata'] = {}
msg_data['metadata']['project'] = project_name
- _PRINTER.print(f"[A2A] Injected project '{project_name}' into message.metadata")
# Serialize back to JSON
modified_body = json.dumps(data).encode('utf-8')
diff --git a/python/helpers/mcp_server.py b/python/helpers/mcp_server.py
index 8db6913eee..3c0308ed9c 100644
--- a/python/helpers/mcp_server.py
+++ b/python/helpers/mcp_server.py
@@ -132,8 +132,6 @@ async def send_message(
]:
# Get project name from context variable (set in proxy __call__)
project_name = _mcp_project_name.get()
- if project_name:
- _PRINTER.print(f"[MCP] send_message using project: {project_name}")
context: AgentContext | None = None
if chat_id:
@@ -162,7 +160,6 @@ async def send_message(
if project_name:
try:
projects.activate_project(context.id, project_name)
- _PRINTER.print(f"[MCP] Activated project: {project_name}")
except Exception as e:
return ToolError(error=f"Failed to activate project: {str(e)}", chat_id="")
@@ -452,8 +449,6 @@ async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
# Store project in context variable (will be available in send_message)
_mcp_project_name.set(project_name)
- if project_name:
- _PRINTER.print(f"[MCP] Set project in context variable: {project_name}")
# Strip project segment from path if present (e.g., /p-project_name/)
# This is needed because the underlying MCP apps were configured without project paths
From dd018d66a49e79259d443b7655375eadd885f5e4 Mon Sep 17 00:00:00 2001
From: frdel <38891707+frdel@users.noreply.github.com>
Date: Fri, 19 Dec 2025 11:30:08 +0100
Subject: [PATCH 027/436] pipx, subagent paths, alpine directives, CSRF allowed
origins autoset
---
agent.py | 6 +-
docker/base/fs/ins/install_python.sh | 2 +-
python/api/csrf_token.py | 40 ++++++-
.../agent_init/_15_load_profile_settings.py | 2 +-
python/helpers/extension.py | 2 +-
python/helpers/subagents.py | 101 +++++++-----------
webui/js/initFw.js | 55 ++++++++--
7 files changed, 128 insertions(+), 80 deletions(-)
diff --git a/agent.py b/agent.py
index f5e0602f8c..f6001098c1 100644
--- a/agent.py
+++ b/agent.py
@@ -619,14 +619,14 @@ async def get_system_prompt(self, loop_data: LoopData) -> list[str]:
return system_prompt
def parse_prompt(self, _prompt_file: str, **kwargs):
- dirs = subagents.get_agent_paths_chain(self, "prompts")
+ dirs = subagents.get_paths(self, "prompts")
prompt = files.parse_file(
_prompt_file, _directories=dirs, _agent=self, **kwargs
)
return prompt
def read_prompt(self, file: str, **kwargs) -> str:
- dirs = subagents.get_agent_paths_chain(self, "prompts")
+ dirs = subagents.get_paths(self, "prompts")
prompt = files.read_prompt_file(file, _directories=dirs, _agent=self, **kwargs)
prompt = files.remove_code_fences(prompt)
return prompt
@@ -958,7 +958,7 @@ def get_tool(
classes = []
# search for tools in agent's folder hierarchy
- paths = subagents.get_agent_paths_chain(self, "tools", name + ".py", default_root="python")
+ paths = subagents.get_paths(self, "tools", name + ".py", default_root="python")
for path in paths:
try:
classes = extract_tools.load_classes_from_file(path, Tool) # type: ignore[arg-type]
diff --git a/docker/base/fs/ins/install_python.sh b/docker/base/fs/ins/install_python.sh
index 82f2fc3832..417395ebcc 100644
--- a/docker/base/fs/ins/install_python.sh
+++ b/docker/base/fs/ins/install_python.sh
@@ -20,7 +20,7 @@ python3.13 -m venv /opt/venv
source /opt/venv/bin/activate
# upgrade pip and install static packages
-pip install --no-cache-dir --upgrade pip ipython requests
+pip install --no-cache-dir --upgrade pip pipx ipython requests
echo "====================PYTHON PYVENV===================="
diff --git a/python/api/csrf_token.py b/python/api/csrf_token.py
index 0e46a4d46e..f4d1d63c0f 100644
--- a/python/api/csrf_token.py
+++ b/python/api/csrf_token.py
@@ -11,6 +11,8 @@
from python.helpers import runtime, dotenv, login
import fnmatch
+ALLOWED_ORIGINS_KEY = "ALLOWED_ORIGINS"
+
class GetCsrfToken(ApiHandler):
@@ -44,9 +46,11 @@ async def process(self, input: Input, request: Request) -> Output:
}
async def check_allowed_origin(self, request: Request):
- # if login is required, this che
+ # if login is required, this check is unnecessary
if login.is_login_required():
return {"ok": True, "origin": "", "allowed_origins": ""}
+ # initialize allowed origins if not yet set
+ self.initialize_allowed_origins(request)
# otherwise, check if the origin is allowed
return await self.is_allowed_origin(request)
@@ -66,6 +70,7 @@ async def is_allowed_origin(self, request: Request):
)
return {"ok": match, "origin": origin, "allowed_origins": allowed_origins}
+
def get_origin_from_request(self, request: Request):
# get from origin
r = request.headers.get("Origin") or request.environ.get("HTTP_ORIGIN")
@@ -88,7 +93,7 @@ async def get_allowed_origins(self) -> list[str]:
# get the allowed origins from the environment
allowed_origins = [
origin.strip()
- for origin in (dotenv.get_dotenv_value("ALLOWED_ORIGINS") or "").split(",")
+ for origin in (dotenv.get_dotenv_value(ALLOWED_ORIGINS_KEY) or "").split(",")
if origin.strip()
]
@@ -110,3 +115,34 @@ async def get_allowed_origins(self) -> list[str]:
def get_default_allowed_origins(self) -> list[str]:
return ["*://localhost:*", "*://127.0.0.1:*", "*://0.0.0.0:*"]
+
+ def initialize_allowed_origins(self, request: Request):
+ """
+ If A0 is hosted on a server, add the first visit origin to ALLOWED_ORIGINS.
+ This simplifies deployment process as users can access their new instance without
+ additional setup while keeping it secure.
+ """
+ # dotenv value is already set, do nothing
+ denv = dotenv.get_dotenv_value(ALLOWED_ORIGINS_KEY)
+ if denv:
+ return
+
+ # get the origin from the request
+ req_origin = self.get_origin_from_request(request)
+ if not req_origin:
+ return
+
+ # check if the origin is allowed by default
+ allowed_origins = self.get_default_allowed_origins()
+ match = any(
+ fnmatch.fnmatch(req_origin, allowed_origin)
+ for allowed_origin in allowed_origins
+ )
+ if match:
+ return
+
+ # if not, add it to the allowed origins
+ allowed_origins.append(req_origin)
+ dotenv.save_dotenv_value(ALLOWED_ORIGINS_KEY, ",".join(allowed_origins))
+
+
\ No newline at end of file
diff --git a/python/extensions/agent_init/_15_load_profile_settings.py b/python/extensions/agent_init/_15_load_profile_settings.py
index d4261d7b9d..d4c9b5ab42 100644
--- a/python/extensions/agent_init/_15_load_profile_settings.py
+++ b/python/extensions/agent_init/_15_load_profile_settings.py
@@ -10,7 +10,7 @@ async def execute(self, **kwargs) -> None:
if not self.agent or not self.agent.config.profile:
return
- config_files = subagents.get_agent_paths_chain(self.agent, "settings.json", include_default=False)
+ config_files = subagents.get_paths(self.agent, "settings.json", include_default=False)
settings_override = {}
for settings_path in config_files:
diff --git a/python/helpers/extension.py b/python/helpers/extension.py
index d28e86c4ec..186099cc02 100644
--- a/python/helpers/extension.py
+++ b/python/helpers/extension.py
@@ -30,7 +30,7 @@ async def call_extensions(
from python.helpers import projects, subagents
# search for extension folders in all agent's paths
- paths = subagents.get_agent_paths_chain(agent, "extensions", extension_point, default_root="python")
+ paths = subagents.get_paths(agent, "extensions", extension_point, default_root="python")
all_exts = [cls for path in paths for cls in _get_extensions(path)]
# merge: first ocurrence of file name is the override
diff --git a/python/helpers/subagents.py b/python/helpers/subagents.py
index fb8886a4a1..66f320f001 100644
--- a/python/helpers/subagents.py
+++ b/python/helpers/subagents.py
@@ -246,84 +246,63 @@ def get_available_agents_dict(
return filtered_agents
-def get_agent_paths(
- agent: "Agent", *subpaths, must_exist_completely: bool = True
-) -> list[str]:
- """Returns list of possible paths for the given agent and subpaths. Order is from lowest priority (global)."""
-
- if not agent or not agent.config.profile:
- return []
- from python.helpers import projects
-
- project_name = projects.get_context_project_name(agent.context)
- return get_agent_profile_paths(
- agent.config.profile,
- project_name,
- *subpaths,
- must_exist_completely=must_exist_completely,
- )
-
-
-def get_agent_paths_chain(
+def get_paths(
agent: "Agent|None",
*subpaths,
- must_exist_completely: bool = True,
+ must_exist_completely: bool = True,
include_project: bool = True,
include_user: bool = True,
include_default: bool = True,
default_root: str = "",
) -> list[str]:
"""Returns list of file paths for the given agent and subpaths, searched in order of priority:
- project/agents/, usr/agents/, agents/, project/, usr/, default."""
- from python.helpers import projects
+ project/agents/, project/, usr/agents/, agents/, usr/, default."""
+ paths: list[str] = []
+ check_subpaths = subpaths if must_exist_completely else []
+ profile_name = agent.config.profile if agent and agent.config.profile else ""
+ project_name = ""
- if agent and agent.config.profile:
- project_name = projects.get_context_project_name(agent.context)
- paths = get_agent_profile_paths(
- agent.config.profile,
- project_name,
- *subpaths,
- must_exist_completely=must_exist_completely,
- )
- list.reverse(paths) # reverse for proper priority
- else:
- paths = []
- project_name = ""
+ if include_project and agent:
+ from python.helpers import projects
- if include_project and project_name:
- path = projects.get_project_meta_folder(project_name, *subpaths)
- if (not must_exist_completely) or files.exists(path):
+ project_name = projects.get_context_project_name(agent.context) or ""
+
+ if project_name and profile_name:
+ # project/agents//...
+ project_agent_dir = projects.get_project_meta_folder(
+ project_name, "agents", profile_name
+ )
+ if files.exists(files.get_abs_path(project_agent_dir, *check_subpaths)):
+ paths.append(files.get_abs_path(project_agent_dir, *subpaths))
+
+ if project_name:
+ # project/.a0proj/...
+ path = projects.get_project_meta_folder(project_name, *subpaths)
+ if (not must_exist_completely) or files.exists(path):
+ paths.append(path)
+
+ if profile_name:
+
+ # usr/agents//...
+ path = files.get_abs_path(USER_AGENTS_DIR, profile_name, *subpaths)
+ if (not must_exist_completely) or files.exists(files.get_abs_path(USER_AGENTS_DIR, profile_name, *check_subpaths)):
paths.append(path)
+
+ # agents//...
+ path = files.get_abs_path(DEFAULT_AGENTS_DIR, profile_name, *subpaths)
+ if (not must_exist_completely) or files.exists(files.get_abs_path(DEFAULT_AGENTS_DIR, profile_name, *check_subpaths)):
+ paths.append(path)
+
if include_user:
+ # usr/...
path = files.get_abs_path(USER_DIR, *subpaths)
if (not must_exist_completely) or files.exists(path):
paths.append(path)
+
if include_default:
+ # default_root/...
path = files.get_abs_path(default_root, *subpaths)
if (not must_exist_completely) or files.exists(path):
paths.append(path)
- return paths
-
-def get_agent_profile_paths(
- name: str,
- project_name: str | None = None,
- *subpaths,
- must_exist_completely: bool = True,
-) -> list[str]:
- result = []
- check_subpaths = subpaths if must_exist_completely else []
-
- if files.exists(files.get_abs_path(DEFAULT_AGENTS_DIR, name, *check_subpaths)):
- result.append(files.get_abs_path(DEFAULT_AGENTS_DIR, name, *subpaths))
- if files.exists(files.get_abs_path(USER_AGENTS_DIR, name, *check_subpaths)):
- result.append(files.get_abs_path(USER_AGENTS_DIR, name, *subpaths))
- if project_name:
- from python.helpers import projects
-
- project_agent_dir = projects.get_project_meta_folder(
- project_name, "agents", name
- )
- if files.exists(files.get_abs_path(project_agent_dir, *check_subpaths)):
- result.append(files.get_abs_path(project_agent_dir, *subpaths))
- return result
+ return paths
diff --git a/webui/js/initFw.js b/webui/js/initFw.js
index 69abde220f..c758b6d6c1 100644
--- a/webui/js/initFw.js
+++ b/webui/js/initFw.js
@@ -10,15 +10,48 @@ await import("../vendor/alpine/alpine.min.js");
// add x-destroy directive to alpine
Alpine.directive(
- "destroy",
- (el, { expression }, { evaluateLater, cleanup }) => {
- const onDestroy = evaluateLater(expression);
- cleanup(() => onDestroy());
- }
-);
+ "destroy",
+ (_el, { expression }, { evaluateLater, cleanup }) => {
+ const onDestroy = evaluateLater(expression);
+ cleanup(() => onDestroy());
+ }
+ );
-// add x-create directive to alpine
-Alpine.directive("create", (_el, { expression }, { evaluateLater }) => {
- const onCreate = evaluateLater(expression);
- onCreate();
-});
+ // add x-create directive to alpine
+ Alpine.directive(
+ "create",
+ (_el, { expression }, { evaluateLater }) => {
+ const onCreate = evaluateLater(expression);
+ onCreate();
+ }
+ );
+
+ // run every second if the component is active
+ Alpine.directive(
+ "every-second",
+ (_el, { expression }, { evaluateLater, cleanup }) => {
+ const onTick = evaluateLater(expression);
+ const intervalId = setInterval(() => onTick(), 1000);
+ cleanup(() => clearInterval(intervalId));
+ }
+ );
+
+ // run every minute if the component is active
+ Alpine.directive(
+ "every-minute",
+ (_el, { expression }, { evaluateLater, cleanup }) => {
+ const onTick = evaluateLater(expression);
+ const intervalId = setInterval(() => onTick(), 60_000);
+ cleanup(() => clearInterval(intervalId));
+ }
+ );
+
+ // run every hour if the component is active
+ Alpine.directive(
+ "every-hour",
+ (_el, { expression }, { evaluateLater, cleanup }) => {
+ const onTick = evaluateLater(expression);
+ const intervalId = setInterval(() => onTick(), 3_600_000);
+ cleanup(() => clearInterval(intervalId));
+ }
+ );
From 5a63cf7bfbca1ab9fe2e174d9a220d063152de60 Mon Sep 17 00:00:00 2001
From: "dipi.evil"
Date: Fri, 19 Dec 2025 10:08:32 -0300
Subject: [PATCH 028/436] feat: add support for agent and project on API
---
python/api/api_message.py | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/python/api/api_message.py b/python/api/api_message.py
index 385d556dd2..6ccea07b29 100644
--- a/python/api/api_message.py
+++ b/python/api/api_message.py
@@ -5,6 +5,7 @@
from python.helpers.api import ApiHandler, Request, Response
from python.helpers import files
from python.helpers.print_style import PrintStyle
+from python.helpers.projects import activate_project
from werkzeug.utils import secure_filename
from initialize import initialize_agent
import threading
@@ -33,6 +34,13 @@ async def process(self, input: dict, request: Request) -> dict | Response:
message = input.get("message", "")
attachments = input.get("attachments", [])
lifetime_hours = input.get("lifetime_hours", 24) # Default 24 hours
+ project_name = input.get("project_name", None)
+ agent_profile = input.get("agent_profile", None)
+
+ # Initialize agent if profile provided
+ override_settings = {}
+ if agent_profile:
+ override_settings["agent_profile"] = agent_profile
if not message:
return Response('{"error": "Message is required"}', status=400, mimetype="application/json")
@@ -72,11 +80,14 @@ async def process(self, input: dict, request: Request) -> dict | Response:
if not context:
return Response('{"error": "Context not found"}', status=404, mimetype="application/json")
else:
- config = initialize_agent()
+ config = initialize_agent(override_settings=override_settings)
context = AgentContext(config=config, type=AgentContextType.USER)
AgentContext.use(context.id)
context_id = context.id
+ if project_name:
+ activate_project(context_id, project_name)
+
# Update chat lifetime
with self._cleanup_lock:
self._chat_lifetimes[context_id] = datetime.now() + timedelta(hours=lifetime_hours)
From 8639ef19e6dce415b22f6d48308ab934d7f92baa Mon Sep 17 00:00:00 2001
From: "dipi.evil"
Date: Fri, 19 Dec 2025 10:34:51 -0300
Subject: [PATCH 029/436] chore: comments fixes
---
python/api/api_message.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/python/api/api_message.py b/python/api/api_message.py
index 6ccea07b29..de08593878 100644
--- a/python/api/api_message.py
+++ b/python/api/api_message.py
@@ -37,7 +37,7 @@ async def process(self, input: dict, request: Request) -> dict | Response:
project_name = input.get("project_name", None)
agent_profile = input.get("agent_profile", None)
- # Initialize agent if profile provided
+ # Set an agent if profile provided
override_settings = {}
if agent_profile:
override_settings["agent_profile"] = agent_profile
@@ -85,6 +85,7 @@ async def process(self, input: dict, request: Request) -> dict | Response:
AgentContext.use(context.id)
context_id = context.id
+ # Activate project if provided
if project_name:
activate_project(context_id, project_name)
From a96387d54767d0c128974aa9ea3adb603526f972 Mon Sep 17 00:00:00 2001
From: "dipi.evil"
Date: Fri, 19 Dec 2025 11:17:29 -0300
Subject: [PATCH 030/436] fix: validate existing agent for context
---
python/api/api_message.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/python/api/api_message.py b/python/api/api_message.py
index de08593878..8eb5b53ef8 100644
--- a/python/api/api_message.py
+++ b/python/api/api_message.py
@@ -76,7 +76,9 @@ async def process(self, input: dict, request: Request) -> dict | Response:
# Get or create context
if context_id:
- context = AgentContext.use(context_id)
+ if agent_profile:
+ return Response('{"error": "Cannot override agent profile on existing context"}', status=400, mimetype="application/json")
+ context = AgentContext.use(context_id)
if not context:
return Response('{"error": "Context not found"}', status=404, mimetype="application/json")
else:
From 07daf31b395831b43d2bbddfdf26a84c84c7713c Mon Sep 17 00:00:00 2001
From: "dipi.evil"
Date: Fri, 19 Dec 2025 11:18:10 -0300
Subject: [PATCH 031/436] fix: check if project exists when activating
---
python/api/api_message.py | 24 ++++++++++++++++++++----
1 file changed, 20 insertions(+), 4 deletions(-)
diff --git a/python/api/api_message.py b/python/api/api_message.py
index 8eb5b53ef8..f36fa1fafe 100644
--- a/python/api/api_message.py
+++ b/python/api/api_message.py
@@ -86,10 +86,26 @@ async def process(self, input: dict, request: Request) -> dict | Response:
context = AgentContext(config=config, type=AgentContextType.USER)
AgentContext.use(context.id)
context_id = context.id
-
- # Activate project if provided
- if project_name:
- activate_project(context_id, project_name)
+ # Activate project if provided
+ if project_name:
+ try:
+ activate_project(context_id, project_name)
+ except Exception as e:
+ # Handle non-existent project or context errors more gracefully
+ error_msg = str(e)
+ PrintStyle.error(f"Failed to activate project '{project_name}' for context '{context_id}': {error_msg}")
+ # If the error message suggests a missing resource, return 404; otherwise, 500
+ if "not found" in error_msg.lower() or "does not exist" in error_msg.lower():
+ return Response(
+ f'{{"error": "Project \\"{project_name}\\" not found"}}',
+ status=404,
+ mimetype="application/json",
+ )
+ return Response(
+ f'{{"error": "Failed to activate project \\"{project_name}\\""}}',
+ status=500,
+ mimetype="application/json",
+ )
# Update chat lifetime
with self._cleanup_lock:
From 265e31e4bbb832f2b5b650906e0d88ff20e9c815 Mon Sep 17 00:00:00 2001
From: "dipi.evil"
Date: Fri, 19 Dec 2025 11:34:14 -0300
Subject: [PATCH 032/436] fix: improve error handling for project activation in
existing context
---
python/api/api_message.py | 11 ++---------
1 file changed, 2 insertions(+), 9 deletions(-)
diff --git a/python/api/api_message.py b/python/api/api_message.py
index f36fa1fafe..9fb433335b 100644
--- a/python/api/api_message.py
+++ b/python/api/api_message.py
@@ -78,7 +78,7 @@ async def process(self, input: dict, request: Request) -> dict | Response:
if context_id:
if agent_profile:
return Response('{"error": "Cannot override agent profile on existing context"}', status=400, mimetype="application/json")
- context = AgentContext.use(context_id)
+ context = AgentContext.use(context_id)
if not context:
return Response('{"error": "Context not found"}', status=404, mimetype="application/json")
else:
@@ -91,16 +91,9 @@ async def process(self, input: dict, request: Request) -> dict | Response:
try:
activate_project(context_id, project_name)
except Exception as e:
- # Handle non-existent project or context errors more gracefully
+ # Handle project or context errors more gracefully
error_msg = str(e)
PrintStyle.error(f"Failed to activate project '{project_name}' for context '{context_id}': {error_msg}")
- # If the error message suggests a missing resource, return 404; otherwise, 500
- if "not found" in error_msg.lower() or "does not exist" in error_msg.lower():
- return Response(
- f'{{"error": "Project \\"{project_name}\\" not found"}}',
- status=404,
- mimetype="application/json",
- )
return Response(
f'{{"error": "Failed to activate project \\"{project_name}\\""}}',
status=500,
From c07bbba4da552576183aec9617f58b069d021172 Mon Sep 17 00:00:00 2001
From: Wabifocus
Date: Sun, 21 Dec 2025 01:09:23 -0800
Subject: [PATCH 033/436] InlineButtonConfirmations
---
.../modals/file-browser/file-browser-store.js | 5 +-
.../modals/file-browser/file-browser.html | 43 ++------------
.../settings/memory/memory-dashboard-store.js | 8 ---
.../settings/memory/memory-dashboard.html | 4 +-
.../settings/memory/memory-detail-modal.html | 2 +-
.../components/sidebar/chats/chats-list.html | 25 ++------
.../components/sidebar/tasks/tasks-list.html | 29 ++++-----
webui/css/buttons.css | 48 +++++++++++++++
webui/js/confirmClick.js | 59 +++++++++++++++++++
webui/js/initFw.js | 4 ++
webui/js/scheduler.js | 5 --
11 files changed, 139 insertions(+), 93 deletions(-)
create mode 100644 webui/js/confirmClick.js
diff --git a/webui/components/modals/file-browser/file-browser-store.js b/webui/components/modals/file-browser/file-browser-store.js
index 3764f72152..7825b37b01 100644
--- a/webui/components/modals/file-browser/file-browser-store.js
+++ b/webui/components/modals/file-browser/file-browser-store.js
@@ -172,7 +172,6 @@ const model = {
// --- File actions --------------------------------------------------------
async deleteFile(file) {
- if (!confirm(`Are you sure you want to delete ${file.name}?`)) return;
try {
const resp = await fetchApi("/delete_work_dir_file", {
method: "POST",
@@ -186,9 +185,9 @@ const model = {
this.browser.entries = this.browser.entries.filter(
(e) => e.path !== file.path
);
- alert("File deleted successfully.");
+ window.toastFrontendSuccess("File deleted successfully", "File Deleted");
} else {
- alert(`Error deleting file: ${await resp.text()}`);
+ window.toastFrontendError(`Error deleting file: ${await resp.text()}`, "Delete Error");
}
} catch (e) {
window.toastFrontendError(
diff --git a/webui/components/modals/file-browser/file-browser.html b/webui/components/modals/file-browser/file-browser.html
index b9689fb531..0ccec66a69 100644
--- a/webui/components/modals/file-browser/file-browser.html
+++ b/webui/components/modals/file-browser/file-browser.html
@@ -48,11 +48,11 @@
-
@@ -279,47 +279,12 @@
.btn-upload:active {
background-color: #2b309c;
}
- /* Delete Button Styles */
- .delete-button {
- background: none;
- border: none;
- color: var(--color-primary);
- cursor: pointer;
- width: 32px;
- padding: 4px 8px;
- border-radius: 4px;
- transition: opacity 0.2s, background-color 0.2s;
- }
- .delete-button:hover {
- color: #ff7878;
- }
- .delete-button:active {
- opacity: 0.6;
- }
/* File Actions */
.file-actions {
display: flex;
gap: var(--spacing-xs);
}
- .action-button {
- background: none;
- border: none;
- cursor: pointer;
- width: 32px;
- padding: 6px 8px;
- border-radius: 4px;
- transition: background-color 0.2s;
- }
- .download-button {
- color: var(--color-primary);
- }
- .download-button:hover {
- background-color: var(--color-border);
- }
- .light-mode .download-button:hover {
- background-color: #c6d4de;
- }
/* Responsive Design */
@media (max-width: 768px) {
.file-header,
diff --git a/webui/components/settings/memory/memory-dashboard-store.js b/webui/components/settings/memory/memory-dashboard-store.js
index bf246f5e8a..fe83173703 100644
--- a/webui/components/settings/memory/memory-dashboard-store.js
+++ b/webui/components/settings/memory/memory-dashboard-store.js
@@ -554,14 +554,6 @@ ${memory.content_full}
},
async deleteMemory(memory) {
- if (
- !confirm(
- `Are you sure you want to delete this memory from ${memory.area}?`
- )
- ) {
- return;
- }
-
try {
// Check if this is the memory currently being viewed in detail modal
const isViewingThisMemory =
diff --git a/webui/components/settings/memory/memory-dashboard.html b/webui/components/settings/memory/memory-dashboard.html
index aaaeac15d2..887f32117c 100644
--- a/webui/components/settings/memory/memory-dashboard.html
+++ b/webui/components/settings/memory/memory-dashboard.html
@@ -235,8 +235,8 @@
content_copy
- $store.memoryDashboardStore.deleteMemory(memory))"
title="Delete Memory">
delete
diff --git a/webui/components/settings/memory/memory-detail-modal.html b/webui/components/settings/memory/memory-detail-modal.html
index 0bd45ba4c9..0905bff234 100644
--- a/webui/components/settings/memory/memory-detail-modal.html
+++ b/webui/components/settings/memory/memory-detail-modal.html
@@ -35,7 +35,7 @@
title="Edit Memory">
edit_document
-
diff --git a/webui/components/sidebar/chats/chats-list.html b/webui/components/sidebar/chats/chats-list.html
index 7a11ec98fd..ab464d58f8 100644
--- a/webui/components/sidebar/chats/chats-list.html
+++ b/webui/components/sidebar/chats/chats-list.html
@@ -22,7 +22,9 @@
- X
+ $store.chats.killChat(context.id))">
+ close
+
@@ -110,28 +112,9 @@
font-size: var(--font-size-small);
}
- .edit-button {
+ .chat-container .btn-icon-action {
flex-shrink: 0;
margin-right: 8px;
- background-color: transparent;
- border: 1px solid var(--color-border);
- border-radius: 0.1875rem;
- color: var(--color-primary);
- cursor: pointer;
- padding: 0.125rem 0.5rem;
- transition: all var(--transition-speed) ease-in-out;
- width: 2rem;
- height: 2rem;
- }
-
- .edit-button:hover {
- border-color: var(--color-primary);
- background-color: #32455690;
- }
-
- .edit-button:active {
- background-color: #131a2090;
- color: rgba(253, 253, 253, 0.35);
}
.empty-list-message {
diff --git a/webui/components/sidebar/tasks/tasks-list.html b/webui/components/sidebar/tasks/tasks-list.html
index bf51bfabe1..0344fb44f8 100644
--- a/webui/components/sidebar/tasks/tasks-list.html
+++ b/webui/components/sidebar/tasks/tasks-list.html
@@ -38,20 +38,20 @@
-
-
-
-
-
-
- X
+
+
+ info
+
+
+ refresh
+
+ $store.tasks.deleteTask(task.id))">
+ close
+
+
@@ -138,6 +138,7 @@
.light-mode .task-name:hover { background-color: rgba(0,0,0,0.05); }
.task-info-line { display: flex; justify-content: space-between; align-items: center; width: 100%; margin-top: 2px; }
+ .task-actions { display: flex; gap: 0.375rem; margin-left: auto; }
.task-container {
width: 100%;
diff --git a/webui/css/buttons.css b/webui/css/buttons.css
index 8836591fd2..db8331488e 100644
--- a/webui/css/buttons.css
+++ b/webui/css/buttons.css
@@ -61,3 +61,51 @@
padding-left: 0.75rem;
padding-right: 0.75rem;
}
+
+/* Inline Confirmation State */
+.confirming {
+ border-color: var(--color-highlight) !important;
+ background-color: var(--color-highlight) !important;
+ color: #fff !important;
+}
+
+.confirming:hover {
+ filter: brightness(1.1);
+}
+
+/* Standard icon-only action button for destructive actions */
+.btn-icon-action {
+ display: inline-flex;
+ align-items: center;
+ justify-content: center;
+ background: transparent;
+ border: 1px solid var(--color-border);
+ border-radius: 0.25rem;
+ color: var(--color-primary);
+ cursor: pointer;
+ padding: 0.25rem;
+ width: 1.75rem;
+ height: 1.75rem;
+ transition: all 0.18s cubic-bezier(0.4, 0, 0.2, 1);
+ flex-shrink: 0;
+}
+
+.btn-icon-action .material-symbols-outlined,
+.btn-icon-action .material-icons-outlined {
+ font-size: 1rem;
+}
+
+.btn-icon-action:hover {
+ border-color: var(--color-primary);
+ background-color: rgba(50, 69, 86, 0.56);
+}
+
+.btn-icon-action:active {
+ background-color: rgba(19, 26, 32, 0.56);
+ color: rgba(253, 253, 253, 0.35);
+}
+
+.btn-icon-action.danger:hover {
+ border-color: #e57373;
+ color: #e57373;
+}
diff --git a/webui/js/confirmClick.js b/webui/js/confirmClick.js
new file mode 100644
index 0000000000..6e5f2cdf39
--- /dev/null
+++ b/webui/js/confirmClick.js
@@ -0,0 +1,59 @@
+// Inline button two-click confirmation for destructive actions.
+// First click arms, second click confirms, timeout resets.
+
+const CONFIRM_TIMEOUT = 2000;
+const CONFIRM_CLASS = 'confirming';
+const CONFIRM_ICON = 'check';
+
+const buttonStates = new WeakMap();
+
+// Handles inline two-click confirmation for a button.
+export function confirmClick(event, action) {
+ const button = event.currentTarget;
+ if (!button) return;
+
+ const state = buttonStates.get(button);
+
+ if (state?.confirming) {
+ clearTimeout(state.timeoutId);
+ resetButton(button, state);
+ buttonStates.delete(button);
+ action();
+ } else {
+ const iconEl = button.querySelector('.material-symbols-outlined, .material-icons-outlined');
+ const originalIcon = iconEl?.textContent?.trim();
+
+ const newState = {
+ confirming: true,
+ originalIcon,
+ timeoutId: setTimeout(() => {
+ resetButton(button, newState);
+ buttonStates.delete(button);
+ }, CONFIRM_TIMEOUT)
+ };
+
+ buttonStates.set(button, newState);
+
+ // Apply confirming state
+ button.classList.add(CONFIRM_CLASS);
+ if (iconEl) {
+ iconEl.textContent = CONFIRM_ICON;
+ }
+ }
+}
+
+// Reset button to original state
+function resetButton(button, state) {
+ button.classList.remove(CONFIRM_CLASS);
+ const iconEl = button.querySelector('.material-symbols-outlined, .material-icons-outlined');
+ if (iconEl && state.originalIcon) {
+ iconEl.textContent = state.originalIcon;
+ }
+}
+
+// Register Alpine magic helper
+export function registerAlpineMagic() {
+ if (globalThis.Alpine) {
+ Alpine.magic('confirmClick', () => confirmClick);
+ }
+}
diff --git a/webui/js/initFw.js b/webui/js/initFw.js
index c758b6d6c1..f56ba15d33 100644
--- a/webui/js/initFw.js
+++ b/webui/js/initFw.js
@@ -1,6 +1,7 @@
import * as initializer from "./initializer.js";
import * as _modals from "./modals.js";
import * as _components from "./components.js";
+import { registerAlpineMagic } from "./confirmClick.js";
// initialize required elements
await initializer.initialize();
@@ -8,6 +9,9 @@ await initializer.initialize();
// import alpine library
await import("../vendor/alpine/alpine.min.js");
+// register $confirmClick magic helper for inline button confirmations
+registerAlpineMagic();
+
// add x-destroy directive to alpine
Alpine.directive(
"destroy",
diff --git a/webui/js/scheduler.js b/webui/js/scheduler.js
index 96cb548dde..63d5b65ba4 100644
--- a/webui/js/scheduler.js
+++ b/webui/js/scheduler.js
@@ -1090,11 +1090,6 @@ const fullComponentImplementation = function() {
// Delete a task
async deleteTask(taskId) {
- // Confirm deletion
- if (!confirm('Are you sure you want to delete this task? This action cannot be undone.')) {
- return;
- }
-
try {
// if we delete selected context, switch to another first
From 6d31f86f13f576511f2aaf06f9e8677f4e85783d Mon Sep 17 00:00:00 2001
From: Wabifocus
Date: Sun, 21 Dec 2025 08:37:40 -0800
Subject: [PATCH 034/436] css & html cleanup
---
webui/components/sidebar/tasks/tasks-list.html | 2 --
webui/css/buttons.css | 5 -----
2 files changed, 7 deletions(-)
diff --git a/webui/components/sidebar/tasks/tasks-list.html b/webui/components/sidebar/tasks/tasks-list.html
index 0344fb44f8..e1aa7f1fc9 100644
--- a/webui/components/sidebar/tasks/tasks-list.html
+++ b/webui/components/sidebar/tasks/tasks-list.html
@@ -68,8 +68,6 @@
-
-
+
diff --git a/webui/components/sidebar/tasks/tasks-store.js b/webui/components/sidebar/tasks/tasks-store.js
index f014207f0c..bedacd27fb 100644
--- a/webui/components/sidebar/tasks/tasks-store.js
+++ b/webui/components/sidebar/tasks/tasks-store.js
@@ -1,5 +1,7 @@
import { createStore } from "/js/AlpineStore.js";
import { store as chatsStore } from "/components/sidebar/chats/chats-store.js";
+import { store as schedulerStore } from "/components/settings/scheduler/scheduler-store.js";
+import { store as settingsStore } from "/components/settings/settings-store.js";
// Tasks sidebar store: tasks list and selected task id
const model = {
@@ -50,8 +52,9 @@ const model = {
},
openDetail(taskId) {
- if (globalThis.openTaskDetail) {
- globalThis.openTaskDetail(taskId);
+ // Use the new settings modal store to open scheduler task detail
+ if (settingsStore?.openSchedulerTaskDetail) {
+ settingsStore.openSchedulerTaskDetail(taskId);
}
},
@@ -60,8 +63,8 @@ const model = {
},
deleteTask(taskId) {
- if (globalThis.deleteTaskGlobal) {
- globalThis.deleteTaskGlobal(taskId);
+ if (schedulerStore?.deleteTaskFromSidebar) {
+ schedulerStore.deleteTaskFromSidebar(taskId);
}
},
};
diff --git a/webui/components/sidebar/top-section/quick-actions.html b/webui/components/sidebar/top-section/quick-actions.html
index a4d76e147e..8b807e9ec7 100644
--- a/webui/components/sidebar/top-section/quick-actions.html
+++ b/webui/components/sidebar/top-section/quick-actions.html
@@ -3,6 +3,9 @@
@@ -13,7 +16,7 @@
Load Chat
Save Chat
Restart
-