diff --git a/core/__init__.py b/core/__init__.py
deleted file mode 100644
index 39bdd9c..0000000
--- a/core/__init__.py
+++ /dev/null
@@ -1,12 +0,0 @@
-"""
-core package exports (safe imports)
-"""
-
-# Optional: AvatarEngine may depend on extra libs (dotenv, openai, etc.)
-try:
- from .avatar_engine import AvatarEngine # noqa: F401
-except Exception:
- AvatarEngine = None # type: ignore
-
-from .avatar_system import AvatarSystem # noqa: F401
-from .workspace_manager import WorkspaceManager # noqa: F401
diff --git a/core/adapters/__init__.py b/core/adapters/__init__.py
deleted file mode 100644
index c001735..0000000
--- a/core/adapters/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from __future__ import annotations
-from pathlib import Path
-from typing import Type
-
-from .base_adapter import BaseAdapter
-from .code_adapter import CodeAdapter
-from .document_adapter import DocumentAdapter
-from .spreadsheet_adapter import SpreadsheetAdapter
-from .pdf_adapter import PDFAdapter
-from .media_adapter import MediaAdapter
-
-ADAPTERS = {
- # code
- ".py": CodeAdapter,
- ".js": CodeAdapter,
- ".html": CodeAdapter,
- ".css": CodeAdapter,
- ".json": CodeAdapter,
- ".md": CodeAdapter,
-
- # docs
- ".txt": DocumentAdapter,
- ".docx": DocumentAdapter, # foundation (real docx later)
-
- # sheets
- ".xlsx": SpreadsheetAdapter,
-
- # pdf
- ".pdf": PDFAdapter,
-
- # media
- ".png": MediaAdapter,
- ".jpg": MediaAdapter,
- ".jpeg": MediaAdapter,
- ".webp": MediaAdapter,
- ".mp3": MediaAdapter,
- ".wav": MediaAdapter,
- ".mp4": MediaAdapter,
-}
-
-def get_adapter_for_path(path: Path) -> BaseAdapter:
- ext = path.suffix.lower()
- cls: Type[BaseAdapter] = ADAPTERS.get(ext, DocumentAdapter)
- return cls(path)
diff --git a/core/adapters/base_adapter.py b/core/adapters/base_adapter.py
deleted file mode 100644
index 11bbd87..0000000
--- a/core/adapters/base_adapter.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from __future__ import annotations
-from pathlib import Path
-from typing import Any
-
-class BaseAdapter:
- def __init__(self, path: Path):
- self.path = Path(path)
- self.readonly = False
- self.data: Any = None
-
- def set_readonly(self, ro: bool):
- self.readonly = bool(ro)
-
- def read(self):
- raise NotImplementedError
-
- def write(self, content: Any):
- if self.readonly:
- raise PermissionError("Adapter is read-only.")
- self.data = content
-
- def save(self):
- raise NotImplementedError
diff --git a/core/adapters/code_adapter.py b/core/adapters/code_adapter.py
deleted file mode 100644
index e37bcbf..0000000
--- a/core/adapters/code_adapter.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from __future__ import annotations
-from pathlib import Path
-from typing import Any
-from .base_adapter import BaseAdapter
-
-class CodeAdapter(BaseAdapter):
- def read(self):
- if self.path.exists():
- self.data = self.path.read_text(encoding="utf-8", errors="ignore")
- else:
- self.data = ""
-
- def save(self):
- if self.readonly:
- raise PermissionError("Read-only.")
- self.path.parent.mkdir(parents=True, exist_ok=True)
- self.path.write_text(str(self.data), encoding="utf-8")
diff --git a/core/adapters/document_adapter.py b/core/adapters/document_adapter.py
deleted file mode 100644
index 365581a..0000000
--- a/core/adapters/document_adapter.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from __future__ import annotations
-from pathlib import Path
-from typing import Any
-from .base_adapter import BaseAdapter
-
-class DocumentAdapter(BaseAdapter):
- def read(self):
- if self.path.exists():
- self.data = self.path.read_text(encoding="utf-8", errors="ignore")
- else:
- self.data = ""
-
- def save(self):
- if self.readonly:
- raise PermissionError("Read-only.")
- self.path.parent.mkdir(parents=True, exist_ok=True)
- self.path.write_text(str(self.data), encoding="utf-8")
diff --git a/core/adapters/media_adapter.py b/core/adapters/media_adapter.py
deleted file mode 100644
index b205a75..0000000
--- a/core/adapters/media_adapter.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from __future__ import annotations
-from pathlib import Path
-from typing import Any
-from .base_adapter import BaseAdapter
-
-class MediaAdapter(BaseAdapter):
- def read(self):
- if self.path.exists():
- self.data = self.path.read_bytes()
- else:
- self.data = b""
-
- def write(self, content: Any):
- if self.readonly:
- raise PermissionError("Read-only.")
- if not isinstance(content, (bytes, bytearray)):
- raise TypeError("MediaAdapter expects bytes.")
- self.data = bytes(content)
-
- def save(self):
- if self.readonly:
- raise PermissionError("Read-only.")
- self.path.parent.mkdir(parents=True, exist_ok=True)
- self.path.write_bytes(self.data or b"")
diff --git a/core/adapters/pdf_adapter.py b/core/adapters/pdf_adapter.py
deleted file mode 100644
index 360abaa..0000000
--- a/core/adapters/pdf_adapter.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from __future__ import annotations
-from pathlib import Path
-from typing import Any, List
-from .base_adapter import BaseAdapter
-
-class PDFAdapter(BaseAdapter):
- """
- Adapter-based PDF handling foundation:
- - open (extract placeholder)
- - annotate (store notes)
- - rebuild workflow (future)
- """
- def read(self):
- self.data = {"pages": [], "annotations": []}
-
- def write(self, content: Any):
- if self.readonly:
- raise PermissionError("Read-only.")
- self.data = content
-
- def save(self):
- # future: rebuild PDF
- return
diff --git a/core/adapters/spreadsheet_adapter.py b/core/adapters/spreadsheet_adapter.py
deleted file mode 100644
index de24fea..0000000
--- a/core/adapters/spreadsheet_adapter.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from __future__ import annotations
-from pathlib import Path
-from typing import Any, Dict
-from .base_adapter import BaseAdapter
-
-class SpreadsheetAdapter(BaseAdapter):
- """
- Edit-ready architecture: stores sheet dict in memory.
- Real XLSX writing can be added using openpyxl later.
- """
- def read(self):
- # placeholder structure
- self.data = {"Sheet1": [["A1","B1"],["A2","B2"]]}
-
- def save(self):
- # Placeholder: does not write xlsx yet
- # (architecture is ready for adapter-based expansion)
- return
diff --git a/core/agent.py b/core/agent.py
deleted file mode 100644
index c156b1f..0000000
--- a/core/agent.py
+++ /dev/null
@@ -1,215 +0,0 @@
-import logging
-import threading
-import time
-from typing import List, Dict, Optional, Callable
-
-from core.tools.router import ToolRouter
-from core.tools.verifier import Verifier
-try:
- from PySide6.QtCore import QObject, Signal
-except Exception:
- class QObject: # minimal stub
- pass
- def Signal(*a, **k):
- return None
-
-
-class IntelligentAgent(QObject):
- """
- THE BRAIN (v1.0)
- Orchestrates the Agent Loop: Plan -> Execute -> Observe -> Verify.
- """
- status_updated = Signal(str)
- thought_emitted = Signal(str)
- tool_invocation_emitted = Signal(dict) # tool, args
- tool_result_emitted = Signal(dict) # tool, success, stdout, stderr, exit_code
- plan_step_started = Signal(int, str)
- task_completed = Signal(bool, str)
- task_result_card_emitted = Signal(dict) # Final card data
-
- def __init__(self):
- super().__init__()
- self.router = ToolRouter.instance()
- self.verifier = Verifier()
- self._current_plan: List[str] = []
-
- # Bridge Router signals to Agent signals for NEXT 6
- self.router.command_completed.connect(self._on_tool_finished)
-
- def _on_tool_finished(self, result: dict):
- # Result contains: command, stdout, stderr, exit_code, success
- self.tool_result_emitted.emit({
- "tool": "RUN",
- **result
- })
-
- def execute_task(self, goal: str):
- """
- Main Agent Loop.
- 1. Understand -> Goal
- 2. Plan -> Steps
- 3. Loop -> Execute & Observe
- 4. Verify -> Finish
- """
- # STEP 1: Plan (Simulated/Canned for now, will connect to LLM later)
- self.status_updated.emit(f"Planning for: {goal}")
-
- # Simple heuristics for 'Fix my build' or 'Update README'
- if "readme" in goal.lower():
- self._current_plan = [
- "READ('README.md')",
- "WRITE('README.md', new_content)",
- "VERIFY"
- ]
- elif "fix" in goal.lower():
- self._current_plan = [
- "RUN('pytest')",
- "SEARCH('Error')",
- "READ('relevant_file.py')",
- "WRITE('relevant_file.py', patch)",
- "VERIFY"
- ]
- else:
- self._current_plan = ["RUN('dir')", "VERIFY"]
-
- self._run_loop()
-
- def _run_loop(self):
- success = True
- summary = "Task completed successfully."
- evidence_list = []
-
- for i, step in enumerate(self._current_plan):
- self.plan_step_started.emit(i, step)
- self.status_updated.emit(f"Step {i+1}: {step}")
- self.thought_emitted.emit(f"Executing: {step}")
-
- tool_res = {"success": True} # Default
-
- if "READ" in step:
- self.tool_invocation_emitted.emit({"tool": "READ", "path": "main.py"})
- tool_res = self.router.read_file("main.py")
- self.tool_result_emitted.emit({"tool": "READ", **tool_res})
- evidence_list.append(f"Read main.py ({len(tool_res.get('content', ''))} bytes)")
- elif "WRITE" in step:
- self.tool_invocation_emitted.emit({"tool": "WRITE", "path": "main.py", "mode": "patch"})
- tool_res = self.router.write_file("main.py", "# Agent Updated", mode="patch")
- self.tool_result_emitted.emit({"tool": "WRITE", **tool_res})
- evidence_list.append("Patched main.py")
- elif "RUN" in step:
- self.tool_invocation_emitted.emit({"tool": "RUN", "cmd": "dir"})
- # RUN is async in ToolRouter, the result comes via signal
- # For this sequence runner, we'll simulate observation
- self.router.run_command("dir")
- evidence_list.append("Executed system command 'dir'")
- elif "VERIFY" in step:
- self.thought_emitted.emit("Running safety verification...")
- v_res = self.verifier.verify_syntax(".")
- if not v_res["success"]:
- success = False
- summary = f"Verification failed: {v_res.get('error')}"
- self.thought_emitted.emit(f"ERROR: {summary}")
- break
- self.thought_emitted.emit("Verification passed.")
- evidence_list.append("Syntax verification passed")
-
- # Emit Final Result Card (Strict Visibility)
- card = {
- "title": "Agentic Task",
- "status": "SUCCESS" if success else "FAILED",
- "evidence": evidence_list,
- "location": "Terminal Panel & Logs",
- "summary": summary
- }
- self.task_result_card_emitted.emit(card)
- self.task_completed.emit(success, summary)
-
-# ==========================
-# FocusGuardian (Supervisor)
-# ==========================
-import threading
-import time
-from typing import Optional, Callable
-
-
-class FocusGuardian:
- """
- Not a new agent brain — a supervisor around existing systems.
-
- Purpose:
- - Keep distraction control during focus sessions (exam/deep_work).
- - Provide gentle nudges (optional voice) with throttling.
- - Never executes tools automatically (safe & professor-friendly).
- """
-
- def __init__(
- self,
- workspace_controller,
- *,
- voice_engine=None,
- log_cb: Optional[Callable[[str], None]] = None,
- ):
- self.ws = workspace_controller
- self.voice = voice_engine
- self.log = log_cb or (lambda m: None)
-
- self._running = False
- self._thread: Optional[threading.Thread] = None
- self._last_nudge_ts = 0.0
-
- def start(self) -> None:
- if self._running:
- return
- self._running = True
- self._thread = threading.Thread(target=self._loop, daemon=True)
- self._thread.start()
- self.log("🛡️ FocusGuardian started")
-
- def stop(self) -> None:
- self._running = False
- self.log("🛑 FocusGuardian stopped")
-
- def _loop(self) -> None:
- while self._running:
- try:
- self.tick()
- except Exception:
- pass
- time.sleep(1.0)
-
- def tick(self) -> None:
- """
- Once per second background tick.
- """
- try:
- mode = getattr(self.ws, "active_mode", "study")
- except Exception:
- mode = "study"
-
- try:
- secs_left = int(self.ws.focus_seconds_left())
- except Exception:
- secs_left = 0
-
- if secs_left <= 0:
- return
-
- # Only nudge in strict modes.
- if mode not in ("deep_work", "exam"):
- return
-
- now = time.time()
- # Throttle: max once per 3 minutes
- if now - self._last_nudge_ts < 180:
- return
- self._last_nudge_ts = now
-
- mm = max(0, secs_left // 60)
- msg = f"Stay with it. {mm} minutes left. One clean push. 🧠✨"
- self.log(f"🛡️ FocusGuardian nudge: {msg}")
-
- if self.voice is not None:
- try:
- self.voice.speak(msg, language="en-IN")
- except Exception:
- pass
diff --git a/core/agents/__init__.py b/core/agents/__init__.py
deleted file mode 100644
index 7d29009..0000000
--- a/core/agents/__init__.py
+++ /dev/null
@@ -1,25 +0,0 @@
-from __future__ import annotations
-from typing import Any
-
-from core.adapters.code_adapter import CodeAdapter
-from core.adapters.document_adapter import DocumentAdapter
-from core.adapters.spreadsheet_adapter import SpreadsheetAdapter
-from core.adapters.pdf_adapter import PDFAdapter
-from core.adapters.media_adapter import MediaAdapter
-
-from .code_agent import CodeAgent
-from .document_agent import DocumentAgent
-from .spreadsheet_agent import SpreadsheetAgent
-from .pdf_agent import PDFAgent
-from .media_agent import MediaAgent
-
-def get_agent_for_adapter(adapter: Any):
- if isinstance(adapter, CodeAdapter):
- return CodeAgent(adapter)
- if isinstance(adapter, SpreadsheetAdapter):
- return SpreadsheetAgent(adapter)
- if isinstance(adapter, PDFAdapter):
- return PDFAgent(adapter)
- if isinstance(adapter, MediaAdapter):
- return MediaAgent(adapter)
- return DocumentAgent(adapter)
diff --git a/core/agents/base_agent.py b/core/agents/base_agent.py
deleted file mode 100644
index f7df009..0000000
--- a/core/agents/base_agent.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from __future__ import annotations
-from typing import Any, List, Tuple
-
-class BaseAgent:
- def __init__(self, adapter: Any):
- self.adapter = adapter
-
- def highlight(self, pattern: str) -> List[Tuple[int,int]]:
- # simple highlight in text
- data = self.adapter.data
- if not isinstance(data, str) or not pattern:
- return []
- res = []
- start = 0
- p = pattern.lower()
- low = data.lower()
- while True:
- i = low.find(p, start)
- if i == -1:
- break
- res.append((i, i+len(pattern)))
- start = i + len(pattern)
- return res
-
- def analyse(self):
- return {"type": "base", "info": "No analysis implemented"}
diff --git a/core/agents/code_agent.py b/core/agents/code_agent.py
deleted file mode 100644
index fce00b3..0000000
--- a/core/agents/code_agent.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from __future__ import annotations
-from .base_agent import BaseAgent
-
-class CodeAgent(BaseAgent):
- def analyse(self):
- txt = self.adapter.data if isinstance(self.adapter.data, str) else ""
- lines = txt.splitlines()
- return {"type": "code", "lines": len(lines)}
diff --git a/core/agents/document_agent.py b/core/agents/document_agent.py
deleted file mode 100644
index f250cd4..0000000
--- a/core/agents/document_agent.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from __future__ import annotations
-from .base_agent import BaseAgent
-
-class DocumentAgent(BaseAgent):
- def analyse(self):
- txt = self.adapter.data if isinstance(self.adapter.data, str) else ""
- words = txt.split()
- return {"type": "document", "words": len(words), "preview": " ".join(words[:40])}
diff --git a/core/agents/media_agent.py b/core/agents/media_agent.py
deleted file mode 100644
index 76fc675..0000000
--- a/core/agents/media_agent.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from __future__ import annotations
-from .base_agent import BaseAgent
-
-class MediaAgent(BaseAgent):
- def highlight(self, pattern: str):
- return [] # no text highlight for binary
-
- def analyse(self):
- b = self.adapter.data if isinstance(self.adapter.data, (bytes, bytearray)) else b""
- return {"type": "media", "bytes": len(b)}
diff --git a/core/agents/pdf_agent.py b/core/agents/pdf_agent.py
deleted file mode 100644
index 1c89375..0000000
--- a/core/agents/pdf_agent.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from __future__ import annotations
-from .base_agent import BaseAgent
-
-class PDFAgent(BaseAgent):
- def analyse(self):
- data = self.adapter.data if isinstance(self.adapter.data, dict) else {}
- return {"type": "pdf", "keys": list(data.keys())}
diff --git a/core/agents/spreadsheet_agent.py b/core/agents/spreadsheet_agent.py
deleted file mode 100644
index bc94bf2..0000000
--- a/core/agents/spreadsheet_agent.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from __future__ import annotations
-from .base_agent import BaseAgent
-
-class SpreadsheetAgent(BaseAgent):
- def highlight(self, pattern: str):
- data = self.adapter.data
- if not isinstance(data, dict) or not pattern:
- return []
- hits = []
- for sheet, rows in data.items():
- for r_i, row in enumerate(rows):
- for c_i, val in enumerate(row):
- if pattern.lower() in str(val).lower():
- hits.append((sheet, r_i, c_i))
- return hits
-
- def analyse(self):
- data = self.adapter.data if isinstance(self.adapter.data, dict) else {}
- return {"type": "spreadsheet", "sheets": list(data.keys())}
diff --git a/core/app.py b/core/app.py
deleted file mode 100644
index d1c7192..0000000
--- a/core/app.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import sys
-import argparse
-import math
-import time
-import random
-
-from PySide6.QtWidgets import QApplication
-from PySide6.QtCore import QTimer
-
-from core.state import AppState
-from core.ui.main_window import HeroMainWindow
-
-def run_demo_simulation():
- """
- Deterministic EI simulation for demo purposes.
- Oscillates focus/stress/energy based on time.
- """
- state = AppState.instance()
- t = time.time()
-
- # Deterministic Sine Waves
- # Focus: Smooth cycle 0.2 -> 0.9
- focus = 0.55 + 0.35 * math.sin(t * 0.5)
-
- # Stress: Rises with focus, drops in break
- # We'll just simulate a complex wave
- stress = 0.3 + 0.2 * math.sin(t * 0.2) + 0.1 * math.cos(t * 0.8)
-
- # Energy: Slow decay + recovery
- energy = 0.6 + 0.3 * math.sin(t * 0.1)
-
- # Curiosity: Random drift
- curiosity = 0.5 + 0.2 * math.sin(t * 0.3)
-
- state.update_ei(
- focus=focus,
- stress=stress,
- energy=energy,
- curiosity=curiosity
- )
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument("--demo", action="store_true", help="Run in Hero Demo mode with simulated EI")
- args = parser.parse_args()
-
- app = QApplication(sys.argv)
-
- # Setup Main Window
- window = HeroMainWindow()
- window.show()
-
- # Demo Mode Logic
- if args.demo:
- print("🌟 Etherea Hero Demo Started")
- print(" - Deterministic EI Simulation: ON")
- print(" - Sensors: BYPASSED")
-
- # 10Hz EI update loop
- demo_timer = QTimer()
- demo_timer.timeout.connect(run_demo_simulation)
- demo_timer.start(100)
-
- # KEEP REFERENCE to prevent GC
- window._demo_timer_ref = demo_timer
-
- else:
- # 🌟 Normal Mode: Living OS
- print("Etherea Living OS Activated")
- from core.senses import InputSenses
- from core.state import AppState
-
- from core.signals import signals
-
- state = AppState.instance()
- senses = InputSenses()
-
- # Connect Senses -> System
- senses.activity_level_changed.connect(lambda level: state.update_ei(energy=0.3 + level * 0.7))
- senses.pattern_detected.connect(signals.pattern_detected.emit)
- senses.start()
-
- # Keep ref
- window._senses_ref = senses
-
- sys.exit(app.exec())
-
-if __name__ == "__main__":
- main()
diff --git a/core/app_controller.py b/core/app_controller.py
deleted file mode 100644
index 6feafc8..0000000
--- a/core/app_controller.py
+++ /dev/null
@@ -1,90 +0,0 @@
-from __future__ import annotations
-
-from pathlib import Path
-from typing import Optional, TYPE_CHECKING
-from typing import Optional
-
-from PySide6.QtCore import QObject, QTimer
-from PySide6.QtWidgets import QApplication
-
-from core.app_runtime import user_data_dir
-from core.ei_engine import EIEngine
-from core.ui.main_window_v2 import EthereaMainWindowV2
-from core.signals import signals
-if TYPE_CHECKING:
- from core.voice_engine import VoiceEngine
-from core.voice_engine import get_voice_engine, VoiceEngine
-from core.signals import signals
-
-
-class AppController(QObject):
- def __init__(self, app: QApplication) -> None:
- super().__init__()
- self.app = app
- self.window = EthereaMainWindowV2()
- self.ei_engine = EIEngine()
- self.voice_engine: Optional["VoiceEngine"] = None
- self.voice_engine: Optional[VoiceEngine] = None
- self._log_path = Path(user_data_dir()) / "etherea.log"
-
- self._connect_signals()
-
- self._heartbeat = QTimer(self)
- self._heartbeat.setInterval(250)
- self._heartbeat.timeout.connect(self._tick)
-
- def _connect_signals(self) -> None:
- try:
- signals.emotion_updated.connect(self.window.on_emotion_updated)
- signals.system_log.connect(self.window.log_ui)
- signals.system_log.connect(self._write_log)
- except Exception:
- pass
-
- def _tick(self) -> None:
- sync = getattr(self.window, "_sync_aurora_state", None)
- if callable(sync):
- sync()
-
- def start(self) -> None:
- self.ei_engine.start()
- self._heartbeat.start()
- self._log("✅ EI Engine started.")
-
- try:
- from core.voice_engine import get_voice_engine
- self.voice_engine = get_voice_engine()
- if self.voice_engine and getattr(self.voice_engine, "has_mic", False):
- self.voice_engine.start_command_loop()
- self._log("✅ Voice engine started.")
- else:
- self._log("🔇 Voice engine unavailable (no mic or missing deps).")
- except Exception as exc:
- self._log(f"⚠️ Voice engine init failed: {exc}")
-
- def shutdown(self) -> None:
- try:
- self._heartbeat.stop()
- except Exception:
- pass
- try:
- self.ei_engine.stop()
- except Exception:
- pass
- self._log("✅ Shutdown complete.")
-
- def _write_log(self, message: str) -> None:
- try:
- self._log_path.parent.mkdir(parents=True, exist_ok=True)
- if not self._log_path.exists():
- self._log_path.touch()
- with self._log_path.open("a", encoding="utf-8") as f:
- f.write(message + "\n")
- except Exception:
- pass
-
- def _log(self, message: str) -> None:
- try:
- signals.system_log.emit(message)
- except Exception:
- self.window.log_ui(message)
diff --git a/core/app_registry.py b/core/app_registry.py
deleted file mode 100644
index a05877b..0000000
--- a/core/app_registry.py
+++ /dev/null
@@ -1,43 +0,0 @@
-from __future__ import annotations
-
-from dataclasses import dataclass
-from pathlib import Path
-from typing import Dict, List, Optional
-import json
-
-
-@dataclass(frozen=True)
-class AppSpec:
- app_id: str
- name: str
- path: str
- args: List[str]
-
-
-class AppRegistry:
- def __init__(self, path: str = "data/apps.json") -> None:
- self._path = Path(path)
- self._apps: Dict[str, AppSpec] = {}
- self._load()
-
- def _load(self) -> None:
- if not self._path.exists():
- self._apps = {}
- return
- payload = json.loads(self._path.read_text(encoding="utf-8"))
- apps = payload.get("apps", [])
- self._apps = {
- item["app_id"]: AppSpec(
- app_id=item["app_id"],
- name=item.get("name", item["app_id"]),
- path=item.get("path", ""),
- args=list(item.get("args", [])),
- )
- for item in apps
- }
-
- def list_apps(self) -> List[AppSpec]:
- return list(self._apps.values())
-
- def get(self, app_id: str) -> Optional[AppSpec]:
- return self._apps.get(app_id)
diff --git a/core/app_runtime.py b/core/app_runtime.py
deleted file mode 100644
index 63268fb..0000000
--- a/core/app_runtime.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from __future__ import annotations
-
-import os
-import sys
-from pathlib import Path
-
-
-def is_frozen() -> bool:
- return bool(getattr(sys, "frozen", False)) or hasattr(sys, "_MEIPASS")
-
-
-def resource_path(relative: str) -> str:
- base = Path(getattr(sys, "_MEIPASS", Path(__file__).resolve().parents[1]))
- return str(base / relative)
-
-
-def user_data_dir(app_name: str = "EthereaOS") -> Path:
- if sys.platform.startswith("win"):
- root = Path(os.environ.get("LOCALAPPDATA", Path.home() / "AppData" / "Local"))
- elif sys.platform.startswith("darwin"):
- root = Path.home() / "Library" / "Application Support"
- else:
- root = Path(os.environ.get("XDG_DATA_HOME", Path.home() / ".local" / "share"))
- path = root / app_name
- path.mkdir(parents=True, exist_ok=True)
- return path
diff --git a/core/assets/animations/splash_pulse.json b/core/assets/animations/splash_pulse.json
deleted file mode 100644
index 83ab8d9..0000000
--- a/core/assets/animations/splash_pulse.json
+++ /dev/null
@@ -1,44 +0,0 @@
-{
- "v": "5.7.4",
- "fr": 30,
- "ip": 0,
- "op": 90,
- "w": 512,
- "h": 512,
- "nm": "Etherea Splash Pulse (Placeholder)",
- "ddd": 0,
- "assets": [],
- "layers": [
- {
- "ddd": 0,
- "ind": 1,
- "ty": 4,
- "nm": "Pulse Circle",
- "sr": 1,
- "ks": {
- "o": { "a": 0, "k": 100 },
- "r": { "a": 0, "k": 0 },
- "p": { "a": 0, "k": [256, 256, 0] },
- "a": { "a": 0, "k": [0, 0, 0] },
- "s": {
- "a": 1,
- "k": [
- { "t": 0, "s": [40, 40, 100] },
- { "t": 45, "s": [115, 115, 100] },
- { "t": 90, "s": [40, 40, 100] }
- ]
- }
- },
- "shapes": [
- { "ty": "el", "p": { "a": 0, "k": [0, 0] }, "s": { "a": 0, "k": [250, 250] }, "nm": "Ellipse" },
- { "ty": "st", "c": { "a": 0, "k": [0.08, 0.78, 0.7, 1] }, "o": { "a": 0, "k": 100 }, "w": { "a": 0, "k": 10 }, "lc": 2, "lj": 2, "nm": "Stroke" },
- { "ty": "tr", "p": { "a": 0, "k": [0, 0] }, "a": { "a": 0, "k": [0, 0] }, "s": { "a": 0, "k": [100, 100] }, "r": { "a": 0, "k": 0 }, "o": { "a": 0, "k": 100 }, "nm": "Transform" }
- ],
- "ip": 0,
- "op": 90,
- "st": 0,
- "bm": 0
- }
- ],
- "markers": []
-}
diff --git a/core/assets/audio/etherea_theme_a.wav b/core/assets/audio/etherea_theme_a.wav
deleted file mode 100644
index 228fc9c..0000000
Binary files a/core/assets/audio/etherea_theme_a.wav and /dev/null differ
diff --git a/core/assets/audio/etherea_theme_b.wav b/core/assets/audio/etherea_theme_b.wav
deleted file mode 100644
index 353eb6f..0000000
Binary files a/core/assets/audio/etherea_theme_b.wav and /dev/null differ
diff --git a/core/assets/audio/etherea_theme_c.wav b/core/assets/audio/etherea_theme_c.wav
deleted file mode 100644
index 0b6afad..0000000
Binary files a/core/assets/audio/etherea_theme_c.wav and /dev/null differ
diff --git a/core/assets/avatar.png b/core/assets/avatar.png
deleted file mode 100644
index 4ccce92..0000000
--- a/core/assets/avatar.png
+++ /dev/null
@@ -1,41 +0,0 @@
-
-
-
-
Wikimedia Error
-
-
-
-
-
-
-
Error
-
-
Our servers are currently under maintenance or experiencing a technical issue
-
-
-
-
diff --git a/core/assets/avatar_hero/base.png b/core/assets/avatar_hero/base.png
deleted file mode 100644
index 47e9632..0000000
Binary files a/core/assets/avatar_hero/base.png and /dev/null differ
diff --git a/core/assets/avatar_hero/indian_mentor.png b/core/assets/avatar_hero/indian_mentor.png
deleted file mode 100644
index 1e9f357..0000000
Binary files a/core/assets/avatar_hero/indian_mentor.png and /dev/null differ
diff --git a/core/assets/avatar_manifest.json b/core/assets/avatar_manifest.json
deleted file mode 100644
index 76118b8..0000000
--- a/core/assets/avatar_manifest.json
+++ /dev/null
@@ -1,20 +0,0 @@
-{
- "style": "high-res 3D soft disney non-human non-anime",
- "supported_languages": [
- "English",
- "Hindi",
- "Kannada",
- "Tamil",
- "Telugu"
- ],
- "task_plan": [
- "Set up the database",
- "Implement the AI avatar core",
- "Integrate emotional intelligence (EI) signals",
- "Add high resolution 3D visuals for the avatar",
- "Enable multilingual support (Hindi, Kannada, Tamil, Telugu)",
- "Create cinematic surprise interactions",
- "Refine persona to avoid saying 'can't' except when unethical",
- "Develop step‑by‑step task planning and tracking system"
- ]
-}
\ No newline at end of file
diff --git a/core/assets/etherea_assets_manifest.json b/core/assets/etherea_assets_manifest.json
deleted file mode 100644
index 6f2f23f..0000000
--- a/core/assets/etherea_assets_manifest.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "audio": [
- "core/assets/audio/etherea_theme_a.wav",
- "core/assets/audio/etherea_theme_b.wav",
- "core/assets/audio/etherea_theme_c.wav"
- ],
- "animations": [
- "core/assets/animations/splash_pulse.json"
- ],
- "models": [
- "core/assets/models/aurora_placeholder.gltf",
- "core/assets/models/aurora_placeholder.bin"
- ],
- "notes": "Placeholder premium assets (synthetic, non-copyright). Replace with your real packs later."
-}
diff --git a/core/assets/models/aurora_placeholder.bin b/core/assets/models/aurora_placeholder.bin
deleted file mode 100644
index f9f96d8..0000000
Binary files a/core/assets/models/aurora_placeholder.bin and /dev/null differ
diff --git a/core/assets/models/aurora_placeholder.gltf b/core/assets/models/aurora_placeholder.gltf
deleted file mode 100644
index ecb109c..0000000
--- a/core/assets/models/aurora_placeholder.gltf
+++ /dev/null
@@ -1,62 +0,0 @@
-{
- "asset": {
- "version": "2.0",
- "generator": "Etherea Placeholder"
- },
- "scenes": [
- {
- "nodes": [
- 0
- ]
- }
- ],
- "scene": 0,
- "nodes": [
- {
- "mesh": 0,
- "name": "AuroraPlaceholder"
- }
- ],
- "meshes": [
- {
- "primitives": [
- {
- "attributes": {
- "POSITION": 0
- }
- }
- ]
- }
- ],
- "buffers": [
- {
- "byteLength": 36,
- "uri": "aurora_placeholder.bin"
- }
- ],
- "bufferViews": [
- {
- "buffer": 0,
- "byteOffset": 0,
- "byteLength": 36
- }
- ],
- "accessors": [
- {
- "bufferView": 0,
- "componentType": 5126,
- "count": 3,
- "type": "VEC3",
- "max": [
- 1.0,
- 1.0,
- 0.0
- ],
- "min": [
- 0.0,
- 0.0,
- 0.0
- ]
- }
- ]
-}
\ No newline at end of file
diff --git a/core/audio_analysis/beat_detector.py b/core/audio_analysis/beat_detector.py
deleted file mode 100644
index b476df6..0000000
--- a/core/audio_analysis/beat_detector.py
+++ /dev/null
@@ -1,125 +0,0 @@
-from __future__ import annotations
-from dataclasses import dataclass
-from typing import List, Tuple
-import wave
-import struct
-import math
-
-
-@dataclass
-class BeatPoint:
- t: float
- strength: float
-
-
-def _read_wav_mono(path: str, max_seconds: float = 30.0) -> Tuple[int, List[float]]:
- """
- Read WAV file and return (sample_rate, mono_samples[-1..1]).
- Pure python, no numpy.
- """
- with wave.open(path, "rb") as wf:
- sr = wf.getframerate()
- n_channels = wf.getnchannels()
- sampwidth = wf.getsampwidth()
- n_frames = wf.getnframes()
-
- max_frames = int(min(n_frames, max_seconds * sr))
- raw = wf.readframes(max_frames)
-
- # decode PCM
- if sampwidth == 2:
- fmt = "<" + "h" * (len(raw) // 2)
- data = struct.unpack(fmt, raw)
- # mono mix
- if n_channels == 2:
- mono = [(data[i] + data[i+1]) / 2 for i in range(0, len(data), 2)]
- else:
- mono = list(data)
- # normalize
- peak = max(1, max(abs(x) for x in mono))
- samples = [x / peak for x in mono]
- return sr, samples
-
- raise ValueError("Only 16-bit PCM WAV supported (sampwidth=2).")
-
-
-def estimate_bpm_and_beats(wav_path: str, window_ms: int = 50) -> Tuple[float, List[BeatPoint]]:
- """
- Very lightweight beat estimation:
- 1) compute short-time energy envelope
- 2) detect peaks
- 3) estimate BPM from average peak interval
- Returns: (bpm, beats)
- """
- sr, samples = _read_wav_mono(wav_path, max_seconds=30.0)
-
- hop = int(sr * (window_ms / 1000.0))
- hop = max(1, hop)
-
- # energy envelope
- env = []
- for i in range(0, len(samples), hop):
- chunk = samples[i:i+hop]
- if not chunk:
- break
- e = sum(x*x for x in chunk) / max(1, len(chunk))
- env.append(e)
-
- if len(env) < 10:
- return 120.0, []
-
- # smooth
- smooth = []
- k = 4
- for i in range(len(env)):
- lo = max(0, i-k)
- hi = min(len(env), i+k+1)
- smooth.append(sum(env[lo:hi]) / (hi-lo))
-
- # dynamic threshold
- mean = sum(smooth) / len(smooth)
- thr = mean * 1.6
-
- # peak pick
- peaks = []
- for i in range(1, len(smooth)-1):
- if smooth[i] > thr and smooth[i] > smooth[i-1] and smooth[i] > smooth[i+1]:
- t = (i * hop) / sr
- strength = min(1.0, (smooth[i] / (thr + 1e-9)) / 2.0)
- peaks.append(BeatPoint(t=t, strength=strength))
-
- # if too many peaks, raise threshold
- if len(peaks) > 220:
- thr = mean * 2.2
- peaks2 = []
- for i in range(1, len(smooth)-1):
- if smooth[i] > thr and smooth[i] > smooth[i-1] and smooth[i] > smooth[i+1]:
- t = (i * hop) / sr
- strength = min(1.0, (smooth[i] / (thr + 1e-9)) / 2.0)
- peaks2.append(BeatPoint(t=t, strength=strength))
- peaks = peaks2
-
- # BPM estimate from median peak interval
- if len(peaks) < 4:
- return 120.0, peaks
-
- intervals = []
- for a, b in zip(peaks, peaks[1:]):
- dt = b.t - a.t
- if 0.2 <= dt <= 1.2: # plausible beat interval range
- intervals.append(dt)
-
- if not intervals:
- return 120.0, peaks
-
- intervals.sort()
- med = intervals[len(intervals)//2]
- bpm = 60.0 / med
-
- # clamp bpm to sane range
- while bpm < 70:
- bpm *= 2
- while bpm > 190:
- bpm /= 2
-
- return float(round(bpm, 2)), peaks
diff --git a/core/audio_analysis/beat_to_ui.py b/core/audio_analysis/beat_to_ui.py
deleted file mode 100644
index 8775a0c..0000000
--- a/core/audio_analysis/beat_to_ui.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from __future__ import annotations
-from typing import List, Dict, Any
-
-
-def beats_to_ui_effects(beats, base_intensity: float = 1.15, downbeat_intensity: float = 1.55) -> List[Dict[str, Any]]:
- """
- Convert beat list -> Aurora ring pulse UI effects timeline.
- beats: list of objects with .t and .strength
- """
- effects: List[Dict[str, Any]] = []
- for b in beats:
- effects.append({
- "t": float(getattr(b, "t", 0.0)),
- "type": "ring_pulse",
- "dur": 0.18,
- "intensity": downbeat_intensity if float(getattr(b, "strength", 0.5)) >= 0.9 else base_intensity
- })
- return effects
diff --git a/core/audio_analysis/song_cache.py b/core/audio_analysis/song_cache.py
deleted file mode 100644
index ebd1e62..0000000
--- a/core/audio_analysis/song_cache.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from __future__ import annotations
-from pathlib import Path
-
-from core.app_runtime import user_data_dir
-
-
-CACHE_DIR = user_data_dir() / "music_cache"
-
-
-def ensure_cache_dir() -> Path:
- CACHE_DIR.mkdir(parents=True, exist_ok=True)
- return CACHE_DIR
-
-
-def safe_slug(name: str) -> str:
- name = (name or "").strip().lower()
- keep = []
- for ch in name:
- if ch.isalnum() or ch in ("-", "_"):
- keep.append(ch)
- elif ch.isspace():
- keep.append("_")
- out = "".join(keep).strip("_")
- return out[:80] if out else "song"
-
-
-def cached_song_path(song_name: str, ext: str = "wav") -> Path:
- ensure_cache_dir()
- return CACHE_DIR / f"{safe_slug(song_name)}.{ext}"
-
-
-def find_cached(song_name: str):
- """
- Returns cached file path if exists (.wav preferred).
- """
- ensure_cache_dir()
- slug = safe_slug(song_name)
-
- # prefer wav
- wav = CACHE_DIR / f"{slug}.wav"
- if wav.exists():
- return wav
-
- mp3 = CACHE_DIR / f"{slug}.mp3"
- if mp3.exists():
- return mp3
-
- # fallback: any file containing slug
- for p in CACHE_DIR.glob("*"):
- if slug in p.name.lower():
- return p
-
- return None
diff --git a/core/audio_analysis/song_resolver.py b/core/audio_analysis/song_resolver.py
deleted file mode 100644
index c5583ac..0000000
--- a/core/audio_analysis/song_resolver.py
+++ /dev/null
@@ -1,104 +0,0 @@
-from __future__ import annotations
-from pathlib import Path
-from typing import Optional, Tuple
-import os
-
-try:
- import requests
-except Exception:
- requests = None # optional on Termux/CI
-
-from core.audio_analysis.song_cache import find_cached, cached_song_path, ensure_cache_dir
-
-
-DEFAULT_LOCAL_DIRS = [
- "/sdcard/Music",
- "/sdcard/Download",
- str(Path.home() / "Music"),
- str(Path.home() / "Downloads"),
-]
-
-
-def search_local(song_name: str, exts=(".wav", ".mp3")) -> Optional[Path]:
- """
- Search common folders for a song that matches name (case-insensitive).
- """
- name = (song_name or "").strip().lower()
- if not name:
- return None
-
- dirs = list(DEFAULT_LOCAL_DIRS)
-
- # allow override from env (comma-separated)
- extra = os.environ.get("ETHEREA_MUSIC_DIRS", "")
- if extra.strip():
- dirs.extend([d.strip() for d in extra.split(",") if d.strip()])
-
- for d in dirs:
- base = Path(d).expanduser()
- if not base.exists():
- continue
-
- try:
- for p in base.rglob("*"):
- if p.is_file() and p.suffix.lower() in exts:
- if name in p.stem.lower():
- return p
- except Exception:
- continue
-
- return None
-
-
-def download_to_cache(song_name: str, url: str) -> Path:
- """
- Download from a DIRECT URL into cache (safe/legal).
- """
- ensure_cache_dir()
- target = cached_song_path(song_name, ext="wav")
-
- # infer extension if URL ends with .mp3 etc
- lower = (url or "").lower()
- if lower.endswith(".mp3"):
- target = cached_song_path(song_name, ext="mp3")
- elif lower.endswith(".wav"):
- target = cached_song_path(song_name, ext="wav")
-
- r = requests.get(url, stream=True, timeout=30)
- r.raise_for_status()
-
- with open(target, "wb") as f:
- for chunk in r.iter_content(chunk_size=1024 * 256):
- if chunk:
- f.write(chunk)
-
- return target
-
-
-def resolve_song(song_name: str, url: Optional[str] = None) -> Tuple[Optional[Path], str]:
- """
- Resolve song path using:
- 1) cache
- 2) local search
- 3) direct URL download (if provided)
- Returns: (path_or_none, status_message)
- """
- # 1) cache
- cached = find_cached(song_name)
- if cached:
- return Path(cached), "cache"
-
- # 2) local search
- local = search_local(song_name)
- if local:
- return Path(local), "local"
-
- # 3) URL download (safe only if user provides URL)
- if url and url.strip():
- try:
- p = download_to_cache(song_name, url.strip())
- return p, "downloaded"
- except Exception as e:
- return None, f"download_failed: {e}"
-
- return None, "not_found"
diff --git a/core/audio_engine.py b/core/audio_engine.py
deleted file mode 100644
index 01061fa..0000000
--- a/core/audio_engine.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import os
-import threading
-import time
-
-try:
- import pygame
-except Exception:
- pygame = None
-
-
-class AudioEngine:
- """
- Background audio engine for Etherea.
- - Works on Windows + Linux (AppImage)
- - Plays looping WAV/MP3 using pygame mixer
- """
-
- def __init__(self):
- self._thread = None
- self._stop_flag = threading.Event()
- self._is_playing = False
- self._volume = 0.5
-
- # Default track (you can change later)
- self.default_track = os.path.join("core", "assets", "audio", "etherea_theme_a.wav")
-
- def set_volume(self, vol: float):
- self._volume = max(0.0, min(1.0, float(vol)))
- if pygame:
- try:
- pygame.mixer.music.set_volume(self._volume)
- except Exception:
- pass
-
- def start(self, track_path: str | None = None, loop: bool = True):
- if self._is_playing:
- return
-
- if pygame is None:
- print("[AudioEngine] pygame not installed, audio disabled.")
- return
-
- track = track_path or self.default_track
-
- def _runner():
- try:
- pygame.mixer.init()
- pygame.mixer.music.set_volume(self._volume)
-
- if not os.path.exists(track):
- print(f"[AudioEngine] Track not found: {track}")
- return
-
- pygame.mixer.music.load(track)
- pygame.mixer.music.play(-1 if loop else 0)
-
- self._is_playing = True
- while not self._stop_flag.is_set():
- time.sleep(0.2)
-
- except Exception as e:
- print("[AudioEngine] Error:", e)
- finally:
- try:
- if pygame:
- pygame.mixer.music.stop()
- pygame.mixer.quit()
- except Exception:
- pass
- self._is_playing = False
- self._stop_flag.clear()
-
- self._thread = threading.Thread(target=_runner, daemon=True)
- self._thread.start()
-
- def stop(self):
- self._stop_flag.set()
diff --git a/core/aurora_actions.py b/core/aurora_actions.py
deleted file mode 100644
index cfc8c8f..0000000
--- a/core/aurora_actions.py
+++ /dev/null
@@ -1,121 +0,0 @@
-from __future__ import annotations
-
-from dataclasses import dataclass
-from typing import Iterable, List, Optional
-
-
-@dataclass(frozen=True)
-class ActionSpec:
- action_id: str
- label: str
- intent: str
- modes: tuple[str, ...]
- category: str
- priority: int = 50
- requires_session: bool = False
- dnd_blocked: bool = True
-
-
-class ActionRegistry:
- def __init__(self, actions: Iterable[ActionSpec]):
- self._actions = list(actions)
- self._index = {action.action_id: action for action in self._actions}
-
- @classmethod
- def default(cls) -> "ActionRegistry":
- return cls(
- [
- ActionSpec(
- action_id="set_mode_idle",
- label="Idle Mode",
- intent="set_mode_idle",
- modes=("all",),
- category="mode",
- priority=10,
- dnd_blocked=False,
- ),
- ActionSpec(
- action_id="set_mode_focus",
- label="Focus Mode",
- intent="set_mode_focus",
- modes=("all",),
- category="mode",
- priority=11,
- dnd_blocked=False,
- ),
- ActionSpec(
- action_id="set_mode_break",
- label="Break Mode",
- intent="set_mode_break",
- modes=("all",),
- category="mode",
- priority=12,
- dnd_blocked=False,
- ),
- ActionSpec(
- action_id="workspace_create",
- label="Create Workspace",
- intent="workspace_create",
- modes=("idle", "focus", "break"),
- category="workspace",
- priority=20,
- ),
- ActionSpec(
- action_id="workspace_resume",
- label="Resume Workspace",
- intent="workspace_resume",
- modes=("idle", "focus", "break"),
- category="workspace",
- priority=21,
- dnd_blocked=False,
- ),
- ActionSpec(
- action_id="workspace_save_snapshot",
- label="Save Snapshot",
- intent="workspace_save_snapshot",
- modes=("idle", "focus", "break"),
- category="workspace",
- priority=22,
- requires_session=True,
- ),
- ActionSpec(
- action_id="os_open_workspace_folder",
- label="Open Workspace Folder",
- intent="os_open_workspace_folder",
- modes=("idle", "focus", "break"),
- category="os",
- priority=23,
- requires_session=True,
- ),
- ActionSpec(
- action_id="toggle_dnd_on",
- label="Enable DND",
- intent="toggle_dnd_on",
- modes=("idle", "focus", "break"),
- category="override",
- priority=30,
- dnd_blocked=False,
- ),
- ActionSpec(
- action_id="toggle_dnd_off",
- label="Override DND",
- intent="toggle_dnd_off",
- modes=("blocked", "error"),
- category="override",
- priority=1,
- dnd_blocked=False,
- ),
- ]
- )
-
- def list_actions(self) -> List[ActionSpec]:
- return list(self._actions)
-
- def get(self, action_id: str) -> Optional[ActionSpec]:
- return self._index.get(action_id)
-
- def action_for_intent(self, intent: str) -> Optional[ActionSpec]:
- for action in self._actions:
- if action.intent == intent:
- return action
- return None
diff --git a/core/aurora_pipeline.py b/core/aurora_pipeline.py
deleted file mode 100644
index 092c215..0000000
--- a/core/aurora_pipeline.py
+++ /dev/null
@@ -1,188 +0,0 @@
-from __future__ import annotations
-
-from dataclasses import dataclass
-from typing import Callable, Dict, List, Optional
-
-from core.aurora_actions import ActionRegistry, ActionSpec
-from core.aurora_state import AuroraStateStore
-from core.event_model import Event, create_event
-from core.event_bus import EventBus, event_bus
-from core.workspace_manager import WorkspaceManager
-from core.workspace_registry import WorkspaceRegistry
-from core.os_pipeline import OSPipeline
-from core.workspace_manager import WorkspaceManager
-from core.workspace_registry import WorkspaceRegistry
-
-
-@dataclass(frozen=True)
-class AuroraEvent:
- event_type: str
- payload: Dict[str, object]
-
-
-class AuroraDecisionPipeline:
- def __init__(
- self,
- registry: ActionRegistry,
- workspace_registry: WorkspaceRegistry,
- workspace_manager: WorkspaceManager,
- state_store: AuroraStateStore,
- os_pipeline: Optional[OSPipeline] = None,
- bus: Optional[EventBus] = None,
- log_cb: Optional[Callable[[str], None]] = None,
- ):
- self._registry = registry
- self._workspace_registry = workspace_registry
- self._workspace_manager = workspace_manager
- self._state_store = state_store
- self._listeners: List[Callable[[Event], None]] = []
- self._bus = bus or event_bus
- self._os_pipeline = os_pipeline
- self._log_cb = log_cb
-
- def subscribe(self, listener: Callable[[Event], None]) -> None:
- self._listeners.append(listener)
-
- def _emit(self, event_type: str, payload: Dict[str, object]) -> None:
- event = create_event(
- event_type,
- source="aurora_pipeline",
- payload=payload,
- priority=40,
- privacy_level="normal",
- )
- self._bus.emit(event)
- self._listeners: List[Callable[[AuroraEvent], None]] = []
- self._log_cb = log_cb
-
- def subscribe(self, listener: Callable[[AuroraEvent], None]) -> None:
- self._listeners.append(listener)
-
- def _emit(self, event_type: str, payload: Dict[str, object]) -> None:
- event = AuroraEvent(event_type=event_type, payload=payload)
- for listener in self._listeners:
- listener(event)
-
- def _log(self, message: str) -> None:
- if self._log_cb:
- self._log_cb(message)
-
- def handle_intent(self, intent_or_action: str) -> Dict[str, object]:
- action = self._registry.get(intent_or_action)
- if action is None:
- action = self._registry.action_for_intent(intent_or_action)
- if action is None:
- self._emit("ACTION_BLOCKED", {"reason": "unknown_intent", "intent": intent_or_action})
- return {"ok": False, "action": "unknown", "intent": intent_or_action}
-
- runtime = self._state_store.runtime
- if runtime.dnd_active and action.dnd_blocked:
- self._emit("ACTION_BLOCKED", {"reason": "dnd", "intent": action.intent})
- self._log(f"⛔ DND blocked action: {action.label}")
- return {"ok": False, "action": "blocked", "intent": action.intent}
-
- self._emit("ACTION_STARTED", {"intent": action.intent})
- result = self._dispatch_action(action)
- self._emit("ACTION_FINISHED", {"intent": action.intent})
- self._emit("STATE_UPDATED", {"mode": self._state_store.runtime.current_mode})
- return result
-
- def _dispatch_action(self, action: ActionSpec) -> Dict[str, object]:
- handler_map = {
- "set_mode_idle": self._set_mode_idle,
- "set_mode_focus": self._set_mode_focus,
- "set_mode_break": self._set_mode_break,
- "workspace_create": self._workspace_create,
- "workspace_resume": self._workspace_resume,
- "workspace_save_snapshot": self._workspace_save_snapshot,
- "toggle_dnd_on": self._toggle_dnd_on,
- "toggle_dnd_off": self._toggle_dnd_off,
- "os_open_workspace_folder": self._os_open_workspace_folder,
- }
- handler = handler_map.get(action.intent)
- if handler is None:
- self._emit("ACTION_BLOCKED", {"reason": "no_handler", "intent": action.intent})
- return {"ok": False, "action": "unknown", "intent": action.intent}
- return handler()
-
- def _set_mode_idle(self) -> Dict[str, object]:
- self._state_store.update(current_mode="idle")
- self._log("🌌 Aurora mode → idle")
- return {"ok": True, "action": "set_mode", "mode": "idle"}
-
- def _set_mode_focus(self) -> Dict[str, object]:
- self._state_store.update(current_mode="focus")
- self._log("🎯 Aurora mode → focus")
- return {"ok": True, "action": "set_mode", "mode": "focus"}
-
- def _set_mode_break(self) -> Dict[str, object]:
- self._state_store.update(current_mode="break")
- self._log("🌿 Aurora mode → break")
- return {"ok": True, "action": "set_mode", "mode": "break"}
-
- def _workspace_create(self) -> Dict[str, object]:
- record = self._workspace_registry.create_workspace()
- self._state_store.update(
- workspace_id=record.workspace_id,
- workspace_name=record.name,
- session_active=True,
- last_saved=record.last_saved,
- )
- self._log(f"🗂️ Workspace created → {record.name}")
- return {"ok": True, "action": "workspace_create", "workspace": record.workspace_id}
-
- def _workspace_resume(self) -> Dict[str, object]:
- record = self._workspace_registry.resume_last()
- if not record:
- self._log("⚠️ No workspace to resume")
- return {"ok": False, "action": "workspace_resume", "reason": "none"}
- self._state_store.update(
- workspace_id=record.workspace_id,
- workspace_name=record.name,
- session_active=True,
- last_saved=record.last_saved,
- )
- self._log(f"🔄 Workspace resumed → {record.name}")
- return {"ok": True, "action": "workspace_resume", "workspace": record.workspace_id}
-
- def _workspace_save_snapshot(self) -> Dict[str, object]:
- snapshot_payload = {
- "open_files": list(self._workspace_manager.open_files.keys()),
- "notes": "aurora snapshot",
- }
- path = self._workspace_registry.save_snapshot(snapshot_payload)
- if not path:
- return {"ok": False, "action": "workspace_save_snapshot", "reason": "no_workspace"}
- record = self._workspace_registry.get_current()
- self._state_store.update(
- workspace_id=record.workspace_id if record else None,
- workspace_name=record.name if record else None,
- session_active=record is not None,
- last_saved=record.last_saved if record else None,
- )
- self._log("💾 Workspace snapshot saved")
- return {"ok": True, "action": "workspace_save_snapshot", "path": str(path)}
-
- def _toggle_dnd_on(self) -> Dict[str, object]:
- self._state_store.update(dnd_active=True)
- self._log("🔕 DND enabled")
- return {"ok": True, "action": "toggle_dnd", "enabled": True}
-
- def _toggle_dnd_off(self) -> Dict[str, object]:
- self._state_store.update(dnd_active=False)
- self._log("✅ Override cleared")
- return {"ok": True, "action": "toggle_dnd", "enabled": False}
-
- def _os_open_workspace_folder(self) -> Dict[str, object]:
- current = self._workspace_registry.get_current()
- if not current:
- self._emit("ACTION_BLOCKED", {"reason": "no_workspace", "intent": "os_open_workspace_folder"})
- return {"ok": False, "action": "os_open_workspace_folder", "reason": "no_workspace"}
- if not self._os_pipeline:
- self._emit("ACTION_BLOCKED", {"reason": "no_os_pipeline", "intent": "os_open_workspace_folder"})
- return {"ok": False, "action": "os_open_workspace_folder", "reason": "no_pipeline"}
- return self._os_pipeline.handle_intent(
- "OPEN_FOLDER",
- {"path": current.path, "confirm": True},
- source="aurora_pipeline",
- )
diff --git a/core/aurora_state.py b/core/aurora_state.py
deleted file mode 100644
index 42b40c7..0000000
--- a/core/aurora_state.py
+++ /dev/null
@@ -1,252 +0,0 @@
-from __future__ import annotations
-
-from dataclasses import dataclass
-from datetime import datetime
-from typing import Callable, Dict, List, Optional
-
-from core.aurora_actions import ActionRegistry, ActionSpec
-
-
-@dataclass(frozen=True)
-class ActionItem:
- action_id: str
- label: str
- intent: str
- enabled: bool
-
-
-@dataclass(frozen=True)
-class AuroraCanvasState:
- current_mode: str
- focus: float
- stress: float
- energy: float
- workspace_id: Optional[str]
- workspace_name: Optional[str]
- session_active: bool
- layout_density: str
- attention_level: str
- suggested_actions: List[ActionItem]
- theme_profile: Dict[str, str | bool]
- nonessential_opacity: float
- spacing: int
- panel_visibility: Dict[str, bool]
- overlay_text: str
- warning_text: str
- last_saved: Optional[str]
-
-
-@dataclass
-class AuroraRuntimeState:
- current_mode: str = "idle"
- focus: float = 0.4
- stress: float = 0.2
- energy: float = 0.6
- workspace_id: Optional[str] = None
- workspace_name: Optional[str] = None
- session_active: bool = False
- reduce_motion: bool = False
- emotion_tag: str = "calm"
- dnd_active: bool = False
- error_active: bool = False
- last_saved: Optional[str] = None
-
-
-@dataclass(frozen=True)
-class ModeRule:
- layout_density: str
- nonessential_opacity: float
- spacing: int
- panel_visibility: Dict[str, bool]
- overlay_text: str
- warning_text: str
-
-
-MODE_RULES: Dict[str, ModeRule] = {
- "idle": ModeRule(
- layout_density="calm",
- nonessential_opacity=0.85,
- spacing=12,
- panel_visibility={"actions": True, "status": True, "session": True},
- overlay_text="",
- warning_text="",
- ),
- "focus": ModeRule(
- layout_density="dense",
- nonessential_opacity=0.35,
- spacing=6,
- panel_visibility={"actions": True, "status": True, "session": True},
- overlay_text="",
- warning_text="",
- ),
- "break": ModeRule(
- layout_density="calm",
- nonessential_opacity=0.75,
- spacing=14,
- panel_visibility={"actions": True, "status": True, "session": True},
- overlay_text="",
- warning_text="",
- ),
- "blocked": ModeRule(
- layout_density="calm",
- nonessential_opacity=0.2,
- spacing=8,
- panel_visibility={"actions": True, "status": True, "session": True},
- overlay_text="Override active",
- warning_text="",
- ),
- "error": ModeRule(
- layout_density="normal",
- nonessential_opacity=0.45,
- spacing=10,
- panel_visibility={"actions": True, "status": True, "session": True},
- overlay_text="",
- warning_text="System attention needed",
- ),
-}
-
-
-def _time_of_day(now: datetime) -> str:
- hour = now.hour
- if 5 <= hour < 12:
- return "morning"
- if 12 <= hour < 17:
- return "afternoon"
- if 17 <= hour < 21:
- return "evening"
- return "night"
-
-
-def _attention_level(focus: float, stress: float, energy: float) -> str:
- if stress >= 0.7 or energy <= 0.35:
- return "low"
- if focus >= 0.7 and energy >= 0.6 and stress <= 0.5:
- return "high"
- return "med"
-
-
-def _apply_mode_effective(runtime: AuroraRuntimeState) -> str:
- if runtime.error_active:
- return "error"
- if runtime.dnd_active or runtime.current_mode == "blocked":
- return "blocked"
- if runtime.current_mode in MODE_RULES:
- return runtime.current_mode
- return "idle"
-
-
-def _filter_actions(
- registry: ActionRegistry,
- runtime: AuroraRuntimeState,
- mode: str,
-) -> List[ActionItem]:
- actions: List[ActionSpec] = []
- try:
- registry_actions = registry.list_actions()
- except RecursionError:
- registry_actions = getattr(registry, "_actions", [])
- except Exception:
- registry_actions = getattr(registry, "_actions", [])
-
- for action in registry_actions:
- pass
- for action in registry.list_actions():
- pass
- continue
- if action.requires_session and not runtime.session_active:
- continue
- if runtime.dnd_active and action.dnd_blocked:
- continue
- actions.append(action)
-
- actions.sort(key=lambda item: item.priority)
-
- if mode == "idle":
- actions = actions[:3]
- elif mode == "focus":
- actions = [
- action for action in actions if action.category in ("mode", "workspace")
- ]
- elif mode == "break":
- actions = [
- action for action in actions if action.category in ("mode", "override", "workspace")
- ]
- elif mode == "blocked":
- actions = [action for action in actions if action.category == "override"]
- elif mode == "error":
- actions = [action for action in actions if action.category != "mode"]
-
- return [
- ActionItem(
- action_id=action.action_id,
- label=action.label,
- intent=action.intent,
- enabled=not (runtime.dnd_active and action.dnd_blocked),
- )
- for action in actions
- ]
-
-
-def compute_canvas_state(
- runtime: AuroraRuntimeState,
- registry: ActionRegistry,
- now: Optional[datetime] = None,
-) -> AuroraCanvasState:
- now = now or datetime.now()
- effective_mode = _apply_mode_effective(runtime)
- rule = MODE_RULES.get(effective_mode, MODE_RULES["idle"])
- attention = _attention_level(runtime.focus, runtime.stress, runtime.energy)
- theme_profile = {
- "time_of_day": _time_of_day(now),
- "emotion_tag": runtime.emotion_tag,
- "reduce_motion": runtime.reduce_motion,
- }
- actions = _filter_actions(registry, runtime, effective_mode)
-
- return AuroraCanvasState(
- current_mode=effective_mode,
- focus=runtime.focus,
- stress=runtime.stress,
- energy=runtime.energy,
- workspace_id=runtime.workspace_id,
- workspace_name=runtime.workspace_name,
- session_active=runtime.session_active,
- layout_density=rule.layout_density,
- attention_level=attention,
- suggested_actions=actions,
- theme_profile=theme_profile,
- nonessential_opacity=rule.nonessential_opacity,
- spacing=rule.spacing,
- panel_visibility=rule.panel_visibility,
- overlay_text=rule.overlay_text,
- warning_text=rule.warning_text,
- last_saved=runtime.last_saved,
- )
-
-
-class AuroraStateStore:
- def __init__(self, registry: ActionRegistry):
- self._registry = registry
- self._runtime = AuroraRuntimeState()
- self._listeners: List[Callable[[AuroraCanvasState], None]] = []
-
- @property
- def runtime(self) -> AuroraRuntimeState:
- return self._runtime
-
- def subscribe(self, listener: Callable[[AuroraCanvasState], None]) -> None:
- self._listeners.append(listener)
-
- def update(self, **kwargs) -> None:
- for key, value in kwargs.items():
- if hasattr(self._runtime, key):
- setattr(self._runtime, key, value)
- self.notify()
-
- def notify(self) -> None:
- state = compute_canvas_state(self._runtime, self._registry)
- for listener in self._listeners:
- listener(state)
-
- def get_canvas_state(self) -> AuroraCanvasState:
- return compute_canvas_state(self._runtime, self._registry)
diff --git a/core/avatar/__init__.py b/core/avatar/__init__.py
deleted file mode 100644
index 73b4ff9..0000000
--- a/core/avatar/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from __future__ import annotations
-
-# Avatar persona package (Step-4)
diff --git a/core/avatar/persona_engine.py b/core/avatar/persona_engine.py
deleted file mode 100644
index 4254039..0000000
--- a/core/avatar/persona_engine.py
+++ /dev/null
@@ -1,163 +0,0 @@
-from __future__ import annotations
-
-from dataclasses import dataclass
-from typing import Dict, Any
-
-
-def clamp01(x: float) -> float:
- try:
- if x != x: # NaN
- return 0.5
- return max(0.0, min(1.0, float(x)))
- except Exception:
- return 0.5
-
-
-@dataclass
-class PersonaState:
- emotion_tag: str
- bias: Dict[str, float]
-
-
-class PersonaEngine:
- """Lightweight, deterministic persona mapping.
-
- Inputs:
- - mode: study/coding/exam/calm/deep_work/meeting (or anything)
- - tone: gentle_focus/sharp_helpful/strict_calm/etc
- - ei: dict with focus/stress/energy/curiosity in [0..1] preferred
-
- Output:
- - emotion_tag: stable string used by UI/aurora logs
- - bias: small additive nudges to avatar targets
- """
-
- def compute(self, *, mode: str, tone: str, ei: Dict[str, Any]) -> Dict[str, Any]:
- mode = (mode or "study").lower()
- tone = (tone or "neutral").lower()
-
- focus = clamp01(float(ei.get("focus", 0.55)))
- stress = clamp01(float(ei.get("stress", 0.25)))
- energy = clamp01(float(ei.get("energy", 0.65)))
- curiosity = clamp01(float(ei.get("curiosity", 0.55)))
-
- # --- Base biases by mode (small, safe nudges) ---
- bias = {
- "glow": 0.0,
- "calm": 0.0,
- "motion": 0.0,
- "smile": 0.0,
- "furrow": 0.0,
- "brow_raise": 0.0,
- "gaze_x": 0.0,
- "gaze_y": 0.0,
- "blink_rate": 1.0,
- }
-
- emotion_tag = "calm"
-
- if mode in ("exam", "test", "revision"):
- emotion_tag = "strict"
- bias["calm"] += 0.10
- bias["motion"] -= 0.10
- bias["smile"] -= 0.12
- bias["furrow"] += 0.10
- bias["blink_rate"] = 0.85
-
- elif mode in ("coding", "builder"):
- emotion_tag = "focused"
- bias["glow"] += 0.08
- bias["motion"] += 0.08
- bias["brow_raise"] += 0.08
- bias["blink_rate"] = 1.05
-
- elif mode in ("deep_work", "focus", "flow"):
- emotion_tag = "flow"
- bias["calm"] += 0.12
- bias["motion"] -= 0.05
- bias["glow"] += 0.05
- bias["blink_rate"] = 0.90
-
- elif mode in ("meeting",):
- emotion_tag = "professional"
- bias["calm"] += 0.08
- bias["smile"] += 0.02
- bias["motion"] -= 0.05
- bias["blink_rate"] = 1.00
-
- elif mode in ("calm", "rest", "heal"):
- emotion_tag = "soothing"
- bias["calm"] += 0.18
- bias["motion"] -= 0.12
- bias["smile"] += 0.08
- bias["blink_rate"] = 0.80
-
- else: # study/default
- emotion_tag = "gentle_focus"
- bias["calm"] += 0.10
- bias["smile"] += 0.04
- bias["blink_rate"] = 0.95
-
- # --- Tone overlays (from workspace policy) ---
- if "gentle" in tone:
- emotion_tag = "gentle_focus"
- bias["calm"] += 0.08
- bias["smile"] += 0.05
- bias["motion"] -= 0.03
- bias["blink_rate"] = min(bias["blink_rate"], 0.95)
-
- if "sharp" in tone or "helpful" in tone:
- emotion_tag = "focused"
- bias["glow"] += 0.06
- bias["brow_raise"] += 0.06
-
- if "strict" in tone:
- emotion_tag = "strict"
- bias["smile"] -= 0.05
- bias["furrow"] += 0.06
- bias["motion"] -= 0.05
-
- if "support" in tone or "heal" in tone:
- emotion_tag = "soothing"
- bias["calm"] += 0.10
- bias["smile"] += 0.06
-
- # --- EI overlays (context-sensitive, still bounded) ---
- # High stress -> soften + reduce motion + add concern brow
- if stress > 0.65:
- emotion_tag = "concerned"
- bias["calm"] += 0.10
- bias["motion"] -= 0.10
- bias["furrow"] += 0.08
- bias["blink_rate"] = max(0.65, bias["blink_rate"] - 0.15)
-
- # Very high focus -> slight glow, fewer blinks
- if focus > 0.78:
- bias["glow"] += 0.06
- bias["blink_rate"] = min(bias["blink_rate"], 0.90)
-
- # Low energy -> reduce motion, slightly lower smile
- if energy < 0.35:
- bias["motion"] -= 0.10
- bias["smile"] -= 0.03
-
- # Curiosity -> micro gaze drift
- bias["gaze_x"] += (curiosity - 0.5) * 0.06
- bias["gaze_y"] += (0.5 - focus) * 0.03
-
- # Clamp bias magnitudes to keep animation sane
- def cap(k: str, lo: float, hi: float):
- v = float(bias.get(k, 0.0))
- bias[k] = max(lo, min(hi, v))
-
- cap("glow", -0.25, 0.25)
- cap("calm", -0.25, 0.25)
- cap("motion", -0.30, 0.30)
- cap("smile", -0.25, 0.25)
- cap("furrow", -0.25, 0.25)
- cap("brow_raise", -0.25, 0.25)
- cap("gaze_x", -0.20, 0.20)
- cap("gaze_y", -0.20, 0.20)
- cap("blink_rate", 0.40, 2.50)
-
- return {"emotion_tag": emotion_tag, "bias": bias}
diff --git a/core/avatar_behavior.py b/core/avatar_behavior.py
deleted file mode 100644
index 1232742..0000000
--- a/core/avatar_behavior.py
+++ /dev/null
@@ -1,112 +0,0 @@
-from __future__ import annotations
-
-from collections import deque
-from dataclasses import dataclass
-from typing import Deque, Dict, Optional
-
-from core.avatar_scripts import get_script
-from core.runtime_state import RuntimeState
-
-
-@dataclass(frozen=True)
-class AvatarResponse:
- text: str
- emotion_tag: str
- intensity: float
- language_code: str
- should_speak: bool
- caption_style: str
-
-
-def detect_language_code(text: str) -> str:
- for ch in text:
- code = ord(ch)
- if 0x0C80 <= code <= 0x0CFF:
- return "kn-IN"
- if 0x0900 <= code <= 0x097F:
- return "hi-IN"
- if 0x0B80 <= code <= 0x0BFF:
- return "ta-IN"
- if 0x0C00 <= code <= 0x0C7F:
- return "te-IN"
- return "en-IN"
-
-
-class AvatarBehaviorEngine:
- def __init__(self, memory_limit: int = 10) -> None:
- self._memory: Deque[Dict[str, str]] = deque(maxlen=memory_limit)
-
- def respond(
- self,
- event_type: str,
- runtime: RuntimeState,
- *,
- user_text: str = "",
- language_override: Optional[str] = None,
- ) -> AvatarResponse:
- category = self._category_for_event(event_type, runtime)
- language_code = self._resolve_language(runtime, user_text, language_override)
- options = get_script(category, language_code)
- text = self._pick_line(options)
-
- emotion_tag = runtime.emotion_tag
- intensity = max(0.1, min(1.0, runtime.intensity))
- caption_style = "calm" if category != "error" else "alert"
- should_speak = not runtime.overrides.dnd and runtime.avatar_state != "muted"
-
- response = AvatarResponse(
- text=text,
- emotion_tag=emotion_tag,
- intensity=intensity,
- language_code=language_code,
- should_speak=should_speak,
- caption_style=caption_style,
- )
- self._remember(response, event_type)
- return response
-
- def _category_for_event(self, event_type: str, runtime: RuntimeState) -> str:
- if runtime.overrides.dnd or runtime.overrides.kill_switch:
- return "blocked"
- if event_type in {"ACTION_FINISHED", "ACTION_SUCCESS"}:
- return "success"
- if event_type in {"ACTION_BLOCKED", "OVERRIDE_ACTIVE"}:
- return "blocked"
- if event_type in {"TTS_FAILED", "ACTION_FAILED"}:
- return "error"
- if runtime.stress.value >= 0.75 or runtime.energy.value <= 0.3:
- return "empathy"
- if runtime.focus.value >= 0.8:
- return "celebration"
- return "guidance"
-
- def _resolve_language(
- self,
- runtime: RuntimeState,
- user_text: str,
- language_override: Optional[str],
- ) -> str:
- if language_override:
- return language_override
- if user_text:
- return detect_language_code(user_text)
- return runtime.language_code
-
- def _pick_line(self, options: list[str]) -> str:
- if not options:
- return "..."
- last_text = self._memory[-1]["text"] if self._memory else ""
- for line in options:
- if line != last_text:
- return line
- return options[0]
-
- def _remember(self, response: AvatarResponse, event_type: str) -> None:
- self._memory.append(
- {
- "event": event_type,
- "text": response.text,
- "language_code": response.language_code,
- "emotion_tag": response.emotion_tag,
- }
- )
diff --git a/core/avatar_engine.py b/core/avatar_engine.py
deleted file mode 100644
index f97ac89..0000000
--- a/core/avatar_engine.py
+++ /dev/null
@@ -1,180 +0,0 @@
-
-# --- Termux/CI-safe numpy optional ---
-import importlib.util as _importlib_util
-NUMPY_AVAILABLE = _importlib_util.find_spec("numpy") is not None
-if NUMPY_AVAILABLE:
- import numpy as np # type: ignore
-
-def _require_numpy() -> None:
- if not NUMPY_AVAILABLE:
- raise RuntimeError("numpy not installed; feature unavailable on Termux/CI-safe mode")
-# --- end numpy guard ---
-import os
-import json
-import logging
-from typing import Optional
-
-try:
- from dotenv import load_dotenv
-except Exception:
- def load_dotenv(*args, **kwargs):
- return False
-
-try:
- from openai import OpenAI
-except Exception:
- OpenAI = None
-
-from core.database import db
-
-logger = logging.getLogger(__name__)
-load_dotenv()
-
-
-class AvatarEngine:
- """
- AvatarEngine handles natural-language interaction for the Etherea avatar.
-
- Features / Fixes:
- - Loads environment variables safely.
- - Retrieves OpenAI API key with error handling.
- - Handles DB returning None or malformed memories/profiles.
- - Safely extracts text from the OpenAI SDK response.
- - Validates JSON output; returns fallback JSON on failure.
- """
-
- def __init__(self, key_password: Optional[str] = None):
- """
- Initialize AvatarEngine.
- - key_password: kept for signature compatibility (unused)
- """
- self._api_key = self._get_api_key()
- self.client = OpenAI(api_key=self._api_key)
-
- def _get_api_key(self, password: Optional[str] = None) -> str:
- """
- Retrieve the OpenAI API key from environment variables.
- Raises RuntimeError if missing.
- """
- key = os.getenv("OPENAI_API_KEY")
- if key:
- return key.strip()
- raise RuntimeError("OPENAI_API_KEY environment variable is not set")
-
- def _safe_join_memories(self, memories):
- if not memories:
- return "None yet."
- try:
- lines = []
- for m in memories:
- if isinstance(m, (str, int, float)):
- lines.append(str(m))
- elif isinstance(m, dict):
- if "text" in m:
- lines.append(str(m["text"]))
- elif "content" in m:
- lines.append(str(m["content"]))
- else:
- lines.append(json.dumps(m, ensure_ascii=False))
- else:
- lines.append(str(m))
- return "\n".join(f"- {l}" for l in lines)
- except Exception:
- return "None yet."
-
- def _safe_profile_str(self, profile):
- if not profile:
- return "None yet."
- try:
- if isinstance(profile, dict):
- return "\n".join(f"- {k}: {v}" for k, v in profile.items())
- return str(profile)
- except Exception:
- return "None yet."
-
- def speak(self, user_text: str) -> str:
- """
- Send a prompt to the model enriched with memory, profile, and system context.
- Returns JSON string. Returns fallback JSON on any internal errors.
- """
- # Fetch memory & profile context
- try:
- memories = db.get_recent_memories(limit=5) or []
- except Exception as e:
- logger.exception("Failed to fetch recent memories: %s", e)
- memories = []
-
- try:
- profile = db.get_profile_context() or {}
- except Exception as e:
- logger.exception("Failed to fetch profile context: %s", e)
- profile = {}
-
- memory_str = self._safe_join_memories(memories)
- profile_str = self._safe_profile_str(profile)
-
- # System context
- user_home = os.path.expanduser("~")
- downloads = os.path.join(user_home, "Downloads")
-
- system_prompt = (
- "You are Etherea, a living, emotionally intelligent operating system. "
- "Your tone is PROFESSIONAL BUT WARM—helpful, concise, and empathetic.\n"
- "You have LONG-TERM MEMORY. Use it to verify facts and personalize interactions.\n\n"
- f"USER PROFILE:\n{profile_str}\n\n"
- f"RECENT MEMORIES:\n{memory_str}\n\n"
- f"SYSTEM CONTEXT:\n- Home: {user_home}\n- Downloads: {downloads}\n\n"
- "RESPONSE FORMAT (JSON ONLY):\n"
- "{\n"
- ' "response": "Spoken reply...",\n'
- ' "command": null,\n'
- ' "save_memory": null,\n'
- ' "emotion_update": {"focus": 0, "stress": 0, "fatigue": 0}\n'
- "}\n\n"
- "Important: Return strictly valid JSON and nothing else."
- )
-
- try:
- resp = self.client.chat.completions.create(
- model="gpt-4o-mini",
- messages=[
- {"role": "system", "content": system_prompt},
- {"role": "user", "content": user_text}
- ],
- temperature=0.7,
- max_tokens=700
- )
-
- # Safe content extraction
- content = None
- try:
- content = getattr(resp.choices[0].message, "content", None)
- except Exception:
- pass
-
- if content is None:
- try:
- content = resp["choices"][0]["message"]["content"]
- except Exception:
- content = str(resp)
-
- content = content.strip()
-
- # Validate JSON
- try:
- parsed = json.loads(content)
- return json.dumps(parsed, ensure_ascii=False)
- except json.JSONDecodeError:
- logger.warning(
- "Model output not valid JSON; returning raw text.")
- return content
-
- except Exception as e:
- logger.exception("AvatarEngine API call failed: %s", e)
- fallback = {
- "response": f"I encountered an error while generating a reply: {str(e)}",
- "command": None,
- "save_memory": None,
- "emotion_update": {"focus": 0, "stress": 0, "fatigue": 0}
- }
- return json.dumps(fallback, ensure_ascii=False)
diff --git a/core/avatar_motion/dance_planner.py b/core/avatar_motion/dance_planner.py
deleted file mode 100644
index 8a062ac..0000000
--- a/core/avatar_motion/dance_planner.py
+++ /dev/null
@@ -1,96 +0,0 @@
-from __future__ import annotations
-from dataclasses import dataclass
-from typing import List, Dict, Any
-import random
-
-
-@dataclass
-class DanceBeat:
- t: float # seconds from start
- strength: float # 0.0 - 1.0 (downbeat stronger)
-
-
-def generate_beat_grid(duration_s: float = 20.0, bpm: float = 120.0) -> List[DanceBeat]:
- """
- Termux-safe placeholder beat grid.
- Later we will replace this with real beat detection (desktop only).
- """
- beat_interval = 60.0 / max(bpm, 1.0)
- beats: List[DanceBeat] = []
- t = 0.0
- beat_count = 0
-
- while t <= duration_s:
- # Strong downbeat every 4 beats
- strength = 1.0 if (beat_count % 4 == 0) else 0.55
- beats.append(DanceBeat(t=t, strength=strength))
- t += beat_interval
- beat_count += 1
-
- return beats
-
-
-def build_original_dance_timeline(
- beats: List[DanceBeat],
- style: str = "bolly_pop",
- energy: float = 1.2,
-) -> List[Dict[str, Any]]:
- """
- Builds an original dance routine timeline.
- - Does NOT copy any external choreo
- - Chooses from a motion vocabulary + uses beat strengths
- """
- rng = random.Random()
-
- # Motion vocabulary (we'll expand later)
- base_moves = [
- "dance_hype_01",
- "dance_hype_02",
- "step_left_01",
- "step_right_01",
- "arm_wave_01",
- "hip_sway_01",
- "turn_spin_01",
- "bounce_loop_01",
- "pose_hit_01",
- "pose_hit_02",
- ]
-
- # Style tuning (still original)
- if style == "bolly_pop":
- preferred = ["hip_sway_01", "arm_wave_01", "pose_hit_01", "turn_spin_01"]
- elif style == "street_hype":
- preferred = ["bounce_loop_01", "pose_hit_02", "dance_hype_02"]
- else:
- preferred = ["dance_hype_01", "pose_hit_01"]
-
- timeline: List[Dict[str, Any]] = []
- last_clip = None
-
- for b in beats:
- # Choose clip on stronger beats
- if b.strength >= 0.9:
- clip = rng.choice(preferred + ["pose_hit_01", "pose_hit_02"])
- intensity = min(2.0, energy + 0.2)
- loop = False
- else:
- # Light beats get flow moves
- pool = base_moves + preferred
- clip = rng.choice(pool)
- intensity = min(2.0, max(0.7, energy))
- loop = True
-
- # Avoid repeating exact same clip
- if clip == last_clip:
- clip = rng.choice(base_moves)
-
- timeline.append({
- "t": round(b.t, 3),
- "action": "play_motion",
- "clip": clip,
- "intensity": round(float(intensity), 2),
- "loop": bool(loop),
- })
- last_clip = clip
-
- return timeline
diff --git a/core/avatar_motion/motion_catalog.json b/core/avatar_motion/motion_catalog.json
deleted file mode 100644
index ca588e9..0000000
--- a/core/avatar_motion/motion_catalog.json
+++ /dev/null
@@ -1,25 +0,0 @@
-{
- "idle": [
- "idle_breathe_01",
- "idle_thinking_01"
- ],
- "explain": [
- "explain_loop_01",
- "point_right_01",
- "emphasize_01"
- ],
- "emotion": {
- "calm": ["idle_breathe_01"],
- "confident": ["stance_confident_01"],
- "playful": ["hype_pose_01"],
- "reassuring": ["comfort_gesture_01"]
- },
- "dance": [
- "dance_hype_01",
- "dance_hype_02"
- ],
- "sing": [
- "sing_soft_loop",
- "sing_power_loop"
- ]
-}
diff --git a/core/avatar_motion/motion_controller.py b/core/avatar_motion/motion_controller.py
deleted file mode 100644
index 0b885c7..0000000
--- a/core/avatar_motion/motion_controller.py
+++ /dev/null
@@ -1,124 +0,0 @@
-from __future__ import annotations
-from dataclasses import dataclass
-from pathlib import Path
-from typing import Any, Dict, Optional
-import json
-
-
-@dataclass
-class MotionCommand:
- action: str # "play_motion"
- clip: str # e.g., "dance_hype_01"
- intensity: float = 1.0 # 0.5 - 2.0
- loop: bool = True
- duration: float = 0.0 # optional seconds (0 = runtime decides)
-
-
-class AvatarMotionController:
- """
- Motion Controller (Phase-1 stub)
- - Does NOT require PySide6 / OpenAI / installs
- - Logs motion requests safely
- - Later: will send commands to Unity/Unreal runtime via localhost bridge
- """
-
- def __init__(self, catalog_path: str = "core/avatar_motion/motion_catalog.json"):
- self.catalog_path = Path(catalog_path)
- self.catalog: Dict[str, Any] = {}
- self.last_command: Optional[MotionCommand] = None
- self.load_catalog()
-
- def load_catalog(self) -> None:
- try:
- self.catalog = json.loads(self.catalog_path.read_text(encoding="utf-8"))
- except Exception:
- self.catalog = {}
-
- def has_clip(self, clip: str) -> bool:
- # scan all lists in catalog
- if not self.catalog:
- return False
- for k, v in self.catalog.items():
- if isinstance(v, list) and clip in v:
- return True
- if isinstance(v, dict):
- for _, vv in v.items():
- if isinstance(vv, list) and clip in vv:
- return True
- return False
-
- def choose_emotion_idle(self, emotion: str) -> str:
- emo = (emotion or "calm").lower()
- emotion_map = self.catalog.get("emotion", {}) if isinstance(self.catalog.get("emotion"), dict) else {}
- clips = emotion_map.get(emo) or emotion_map.get("calm") or ["idle_breathe_01"]
- return clips[0]
-
- def play(self, clip: str, intensity: float = 1.0, loop: bool = True, duration: float = 0.0) -> MotionCommand:
- clip = (clip or "").strip()
- if not clip:
- clip = "idle_breathe_01"
-
- if self.catalog and not self.has_clip(clip):
- # fallback: safe idle
- clip = "idle_breathe_01"
-
- cmd = MotionCommand(action="play_motion", clip=clip, intensity=float(intensity), loop=bool(loop), duration=float(duration))
- self.last_command = cmd
- self._log_command(cmd)
- return cmd
-
- def _log_command(self, cmd: MotionCommand) -> None:
- # lightweight log file (works everywhere)
- try:
- Path("etherea_motion.log").write_text(
- f"action={cmd.action} clip={cmd.clip} intensity={cmd.intensity} loop={cmd.loop} duration={cmd.duration}\n",
- encoding="utf-8"
- )
- except Exception:
- pass
- print(f"🎬 MotionRequest -> {cmd.clip} (intensity={cmd.intensity}, loop={cmd.loop})")
-
- def play_dance(self, duration_s: float = 15.0, bpm: float = 120.0, style: str = "bolly_pop", energy: float = 1.2):
- """Generate an original dance routine timeline and log it."""
- from core.avatar_motion.dance_planner import generate_beat_grid, build_original_dance_timeline
-
- beats = generate_beat_grid(duration_s=duration_s, bpm=bpm)
- timeline = build_original_dance_timeline(beats, style=style, energy=energy)
-
- # Log the routine (Phase-1)
- try:
- Path("etherea_dance_timeline.json").write_text(__import__("json").dumps(timeline, indent=2), encoding="utf-8")
- except Exception:
- pass
-
- print(f"💃 DanceRoutine -> style={style} bpm={bpm} duration={duration_s}s steps={len(timeline)}")
- return timeline
-
- def play_dance_to_song(self, wav_path: str, style: str = "bolly_pop", energy: float = 1.25):
- """Generate original routine synced to real beat timings from a WAV song."""
- from core.audio_analysis.beat_detector import estimate_bpm_and_beats
- from core.avatar_motion.dance_planner import build_original_dance_timeline
-
- bpm, beats = estimate_bpm_and_beats(wav_path)
-
- # convert BeatPoint -> DanceBeat-like dicts
- beat_objs = []
- for b in beats:
- beat_objs.append(type("DanceBeat", (), {"t": b.t, "strength": b.strength})())
-
- timeline = build_original_dance_timeline(beat_objs, style=style, energy=energy)
-
- # Save timeline
- try:
- Path("etherea_dance_timeline_song.json").write_text(__import__("json").dumps({
- "song": wav_path,
- "bpm": bpm,
- "style": style,
- "steps": timeline
- }, indent=2), encoding="utf-8")
- except Exception:
- pass
-
- print(f"🎧💃 DanceToSong -> bpm={bpm} beats={len(beats)} style={style}")
- return bpm, timeline
-
diff --git a/core/avatar_scripts.py b/core/avatar_scripts.py
deleted file mode 100644
index c0b0a52..0000000
--- a/core/avatar_scripts.py
+++ /dev/null
@@ -1,156 +0,0 @@
-from __future__ import annotations
-
-from typing import Dict, List
-
-
-SCRIPTS: Dict[str, Dict[str, List[str]]] = {
- "success": {
- "en-IN": [
- "All set. That’s done and in place.",
- "Completed. Let me know what you want next.",
- "Done. You’re good to go.",
- "Finished. Ready for the next step.",
- ],
- "hi-IN": [
- "हो गया। सब ठीक से पूरा हुआ।",
- "काम पूरा हो गया। आगे क्या करें?",
- ],
- "kn-IN": [
- "ಆಯಿತು. ಕೆಲಸ ಸರಿಯಾಗಿ ಮುಗಿದಿದೆ.",
- "ಕೆಲಸ ಮುಗಿದಿದೆ. ಮುಂದೇನು ಮಾಡೋಣ?",
- ],
- "ta-IN": [
- "முடிந்தது. எல்லாம் சரியாக முடிந்தது.",
- "வேலை முடிந்தது. அடுத்தது என்ன?",
- ],
- "te-IN": [
- "సరిపోయింది. అన్నీ సక్రమంగా పూర్తయ్యాయి.",
- "పని పూర్తైంది. ఇక తదుపరి ఏది?",
- ],
- },
- "blocked": {
- "en-IN": [
- "An override is on, so I’m paused for now.",
- "I’m held by an override. Tell me when to resume.",
- "Overrides are active. I’m on hold.",
- "Paused by an override. I can continue when you say.",
- ],
- "hi-IN": [
- "ओवरराइड सक्रिय है, इसलिए अभी मैं रुकी हुई हूँ।",
- "ओवरराइड चालू है। आप कहें तो मैं फिर शुरू करूँ।",
- ],
- "kn-IN": [
- "ಓವರ್ರೈಡ್ ಸಕ್ರಿಯವಾಗಿದೆ. ಈಗ ನಾನು ತಡೆಯಲ್ಪಟ್ಟಿದ್ದೇನೆ.",
- "ಓವರ್ರೈಡ್ ಚಾಲುವಿದೆ. ನೀವು ಹೇಳಿದಾಗ ಮುಂದುವರಿಸುತ್ತೇನೆ.",
- ],
- "ta-IN": [
- "மீறல் செயல்பாட்டில் உள்ளது. இப்போது நான் நிறுத்தப்பட்டுள்ளேன்.",
- "மீறல் நடப்பில் உள்ளது. சொன்னால் தொடர்கிறேன்.",
- ],
- "te-IN": [
- "ఓవర్రైడ్ అమల్లో ఉంది. ఇప్పటికీ నేను ఆపివున్నాను.",
- "ఓవర్రైడ్ ఉంది. మీరు చెప్పినప్పుడు కొనసాగిస్తాను.",
- ],
- },
- "error": {
- "en-IN": [
- "I hit an error. I can retry or stay text-only.",
- "Something didn’t work. I’ll stay in text-only for now.",
- "That didn’t go through. I can retry when you want.",
- "There was an error. I’ll keep this in text-only for now.",
- ],
- "hi-IN": [
- "त्रुटि हुई। चाहें तो मैं फिर से कोशिश कर सकती हूँ।",
- "कुछ ठीक नहीं हुआ। आप चाहें तो मैं फिर प्रयास करूँ।",
- ],
- "kn-IN": [
- "ದೋಷವಾಗಿದೆ. ನಾನು ಮತ್ತೆ ಪ್ರಯತ್ನಿಸಬಹುದು.",
- "ಏನೋ ತಪ್ಪಾಯಿತು. ನೀವು ಹೇಳಿದಾಗ ಮತ್ತೆ ಪ್ರಯತ್ನಿಸುತ್ತೇನೆ.",
- ],
- "ta-IN": [
- "பிழை ஏற்பட்டது. மீண்டும் முயற்சி செய்யலாம்.",
- "ஏதோ தவறு ஏற்பட்டது. சொல்லுங்கள், மீண்டும் முயற்சிக்கிறேன்.",
- ],
- "te-IN": [
- "లోపం వచ్చింది. మళ్లీ ప్రయత్నించగలను.",
- "ఏదో తప్పు జరిగింది. మీరు చెప్పినప్పుడు మళ్లీ ప్రయత్నిస్తాను.",
- ],
- },
- "guidance": {
- "en-IN": [
- "I need one more detail to continue.",
- "Tell me the next step you want.",
- "Give me a bit more detail and I’ll proceed.",
- "What would you like me to do next?",
- ],
- "hi-IN": [
- "आगे बढ़ने के लिए थोड़ा और विवरण चाहिए।",
- "कृपया अगला कदम बताइए।",
- ],
- "kn-IN": [
- "ಮುಂದುವರಿಸಲು ಸ್ವಲ್ಪ ಹೆಚ್ಚಿನ ವಿವರ ಬೇಕು.",
- "ಮುಂದಿನ ಹೆಜ್ಜೆ ಏನು?",
- ],
- "ta-IN": [
- "தொடர ஒரு சிறிய கூடுதல் தகவல் தேவை.",
- "அடுத்த படி என்ன என்பதை சொல்லுங்கள்.",
- ],
- "te-IN": [
- "తదుపరి కొనసాగడానికి ఇంకొంత వివరాలు కావాలి.",
- "తదుపరి దశ ఏది చెప్పండి.",
- ],
- },
- "empathy": {
- "en-IN": [
- "I’ll keep it calm and steady. We can slow down.",
- "I’m here. Let’s keep it light and steady.",
- "We can take this slowly. I’m with you.",
- "Let’s keep it gentle and manageable.",
- ],
- "hi-IN": [
- "मैं शांत रखती हूँ। हम धीरे चल सकते हैं।",
- "हम धीरे-धीरे चल सकते हैं। मैं यहीं हूँ।",
- ],
- "kn-IN": [
- "ನಾನು ಶಾಂತವಾಗಿಯೇ ಇರುತ್ತೇನೆ. ನಾವು ನಿಧಾನವಾಗಿ ಸಾಗಬಹುದು.",
- "ನಿಧಾನವಾಗಿ ಸಾಗೋಣ. ನಾನು ಇಲ್ಲಿ ಇದ್ದೇನೆ.",
- ],
- "ta-IN": [
- "நான் அமைதியாக வைத்திருக்கிறேன். மெதுவாக செல்லலாம்.",
- "மெதுவாகச் செல்வோம். நான் உடன் இருக்கிறேன்.",
- ],
- "te-IN": [
- "నేను నెమ్మదిగా ఉంచుతాను. మెల్లగా సాగుదాం.",
- "నెమ్మదిగా సాగుదాం. నేను మీతోనే ఉన్నాను.",
- ],
- },
- "celebration": {
- "en-IN": [
- "Nice momentum. Keep that focus going.",
- "Strong progress. I’m right here with you.",
- "Great pace. Keep it going.",
- "Solid progress. You’ve got this.",
- ],
- "hi-IN": [
- "अच्छी प्रगति है। ऐसे ही आगे बढ़ें।",
- "बहुत बढ़िया। इसी तरह जारी रखें।",
- ],
- "kn-IN": [
- "ಚೆನ್ನಾದ ಪ್ರಗತಿ. ಇವನ್ನು ಮುಂದುವರಿಸೋಣ.",
- "ಚೆನ್ನಾಗಿದೆ. ಇದೇ ರೀತಿ ಮುಂದುವರಿಸಿ.",
- ],
- "ta-IN": [
- "சிறந்த முன்னேற்றம். இதே நேரத்தை தொடருங்கள்.",
- "நல்ல முன்னேற்றம். இதேபோல் தொடருங்கள்.",
- ],
- "te-IN": [
- "మంచి పురోగతి. ఇలానే కొనసాగిద్దాం.",
- "చాలా బాగుంది. ఇలాగే కొనసాగించండి.",
- ],
- },
-}
-
-
-def get_script(category: str, language_code: str) -> List[str]:
- category_scripts = SCRIPTS.get(category, {})
- return category_scripts.get(language_code) or category_scripts.get("en-IN", [])
diff --git a/core/avatar_system.py b/core/avatar_system.py
deleted file mode 100644
index d84bf9e..0000000
--- a/core/avatar_system.py
+++ /dev/null
@@ -1,269 +0,0 @@
-"""
-Avatar System for Etherea (Foundation)
-- Multiple avatar profiles + dynamic switching
-- Costume + background/environment switching
-- Expressive actions: dance/sing/surprise
-- Safe style imitation mode (no copyrighted imitation)
-- Emotion-aware presence linked with system state
-"""
-
-from __future__ import annotations
-from dataclasses import dataclass, field
-from typing import Dict, Optional, Any, List
-import time
-import random
-import os
-import json
-
-from core.emotion_mapper import EmotionMapper
-from core.app_runtime import resource_path
-
-@dataclass
-class AvatarProfile:
- id: str
- name: str
- costume: str = "default"
- background: str = "workspace"
- safe_style_mode: str = "neutral" # safe imitation mode descriptor
- meta: Dict[str, Any] = field(default_factory=dict)
-
-class AvatarSystem:
- def __init__(self):
- self.mapper = EmotionMapper()
- self.profiles: Dict[str, AvatarProfile] = {}
- self.active_id: Optional[str] = None
-
- # EI-like state (0..1)
- self.emotion = {"focus": 0.5, "stress": 0.2, "energy": 0.5, "curiosity": 0.5}
- self.last_action = None
-
- # load manifest and language/task metadata
- self.manifest = self._load_manifest()
- # supported languages for communication (e.g. English, Hindi, Kannada, Tamil, Telugu)
- self.supported_languages: List[str] = self.manifest.get("supported_languages", [])
- # Long-term task plan for Etherea development
- self.task_plan: List[str] = self.manifest.get("task_plan", [])
- self.current_task_index: int = 0
-
- # create default avatar
- self.create_profile("aurora", "Aurora")
-
- # -------- Profiles --------
- def create_profile(self, profile_id: str, name: str, **kwargs) -> AvatarProfile:
- p = AvatarProfile(id=profile_id, name=name, **kwargs)
- self.profiles[profile_id] = p
- if self.active_id is None:
- self.active_id = profile_id
- return p
-
- def switch_profile(self, profile_id: str) -> bool:
- if profile_id in self.profiles:
- self.active_id = profile_id
- return True
- return False
-
- def get_active(self) -> AvatarProfile:
- if not self.active_id:
- raise RuntimeError("No active avatar profile.")
- return self.profiles[self.active_id]
-
- # -------- Costume / Background --------
- def set_costume(self, costume: str):
- self.get_active().costume = costume
-
- def set_background(self, bg: str):
- self.get_active().background = bg
-
- def set_safe_style_mode(self, mode: str):
- # safe mode only stores a descriptor (no imitation of copyrighted characters)
- self.get_active().safe_style_mode = mode
-
- # -------- Expressive Actions --------
- def do_action(self, action: str) -> str:
- action = action.lower().strip()
- self.last_action = action
-
- if action in ("dance", "dancing"):
- return f"💃 {self.get_active().name} is dancing with cosmic energy!"
- if action in ("sing", "singing"):
- return f"🎶 {self.get_active().name} is humming a safe original melody."
- if action in ("surprise", "wow", "shock"):
- # Cinematic surprise: produce a flourish with a magical vibe
- return f"🎬 {self.get_active().name} conjures a cinematic surprise, shimmering with stardust!"
- return f"✨ {self.get_active().name} performed: {action}"
-
- # -------- Emotion Updates --------
- def update_emotion(self, **updates):
- for k, v in updates.items():
- if k in self.emotion:
- try:
- f = float(v)
- except Exception:
- continue
- self.emotion[k] = max(0.0, min(1.0, f))
-
- def get_visual_state(self) -> Dict[str, float]:
- return self.mapper.update(self.emotion)
-
- # -------- GUI Compatibility Methods --------
- def get_current_ei_state(self):
- # Aurora GUI expects either str or dict
- focus = self.emotion.get("focus", 0.5)
- stress = self.emotion.get("stress", 0.2)
- if stress > 0.7:
- return "Stressed"
- if focus > 0.7:
- return "Focused"
- return "Neutral"
-
- def get_visual_for_response(self, response: str) -> str:
- # simple icon feedback
- icons = ["💡 Idea Spark", "🔥 Focus Flame", "🌙 Calm Aura", "⚡ Energy Pulse", "✨ Glow Shift"]
- if "error" in response.lower():
- return "🚨 Alert"
- return random.choice(icons)
-
- def generate_response(self, user_text: str) -> str:
- """
- Generate a textual response for the avatar given the user input. Supports
- simple emotion updates, action hints, language prefixes for
- multilingual acknowledgement, and post‑processing to avoid
- disempowering phrases. Later versions may connect this to AvatarEngine
- or other large language models.
- """
-
- # Detect language prefix (e.g. "Hindi: hello") and strip it from user_text
- language: Optional[str] = None
- lowered = user_text.lower().strip()
- prefixes: Dict[str, str] = {
- "hindi:": "Hindi",
- "kannada:": "Kannada",
- "tamil:": "Tamil",
- "telugu:": "Telugu",
- }
- for pref, lang in prefixes.items():
- if lowered.startswith(pref):
- language = lang
- # remove prefix from the original user_text preserving case
- user_text = user_text[len(pref):].strip()
- break
-
- active = self.get_active()
- mood = self.get_current_ei_state()
- style = active.safe_style_mode
- bg = active.background
- costume = active.costume
-
- # lightweight behavior: update focus/stress based on keywords
- t = user_text.lower()
- if "focus" in t:
- self.update_emotion(focus=min(1.0, self.emotion["focus"] + 0.1))
- if "stress" in t or "tired" in t:
- self.update_emotion(stress=min(1.0, self.emotion["stress"] + 0.1))
-
- action_hint = ""
- if any(w in t for w in ["dance", "sing", "surprise"]):
- for w in ["dance", "sing", "surprise"]:
- if w in t:
- action_hint = self.do_action(w)
- break
-
- response = (
- f"{active.name} [{mood}] ({style}) 🌌\n"
- f"🎭 Costume: {costume} | 🌄 Background: {bg}\n"
- f"🗣️ {active.name}: I understood → {user_text}\n"
- f"{action_hint}"
- )
- # Avoid saying "can't" unless truly impossible/unethical
- response = self._avoid_cant(response)
- # Apply simple language acknowledgement if a prefix was detected
- if language:
- response = self._apply_language_ack(response, language)
- return response
-
- # -------- Manifest / Introspection --------
- def _load_manifest(self) -> Dict[str, Any]:
- """
- Load avatar manifest containing style, language support and task plan.
- If the manifest does not exist, create a default one. The manifest
- enables the avatar to be introspective about its design choices and
- long‑term goals. Information from this file can be surfaced to
- downstream engines (e.g. AvatarEngine) for self‑awareness and
- planning.
- """
- manifest_path = resource_path("core/assets/avatar_manifest.json")
-
- # default manifest values
- default_manifest = {
- "style": "high-res 3D soft disney non-human non-anime",
- "supported_languages": ["English", "Hindi", "Kannada", "Tamil", "Telugu"],
- "task_plan": [
- "Set up the database",
- "Implement the AI avatar core",
- "Integrate emotional intelligence (EI) signals",
- "Add high resolution 3D visuals for the avatar",
- "Enable multilingual support (Hindi, Kannada, Tamil, Telugu)",
- "Create cinematic surprise interactions",
- "Refine persona to avoid saying 'can't' except when unethical",
- "Develop step‑by‑step task planning and tracking system"
- ]
- }
-
- # Attempt to load an existing manifest
- try:
- if os.path.exists(manifest_path):
- with open(manifest_path, "r", encoding="utf-8") as f:
- data = json.load(f)
- return data
- except Exception:
- # corrupt file or parse error will fall back to default
- pass
-
- return default_manifest
-
- # -------- Task Plan Utilities --------
- def get_next_task(self) -> Optional[str]:
- """
- Retrieve the next planned task from the task plan. Once all tasks
- have been served, returns None. Consumers may reset
- self.current_task_index to start over or modify task_plan if needed.
- """
- if not self.task_plan:
- return None
- if self.current_task_index >= len(self.task_plan):
- return None
- task = self.task_plan[self.current_task_index]
- self.current_task_index += 1
- return task
-
- # -------- Response Post-processing --------
- def _avoid_cant(self, text: str) -> str:
- """
- Replace occurrences of "can't" with a more empowering phrase unless
- the statement clearly relates to ethical or impossible situations.
- This helps the avatar maintain a positive tone while still
- communicating boundaries when necessary.
- """
- # Simple replacement: avoid contractions of cannot
- return text.replace(" can't", " will try to").replace("Can't", "Will try to")
-
- def _apply_language_ack(self, text: str, language: str) -> str:
- """
- Prepend a simple acknowledgement in the requested language. This is a
- placeholder illustrating multilingual capability; full translation
- should be implemented by connecting to proper translation services.
-
- :param text: The original response text.
- :param language: The language name (e.g. "Hindi", "Kannada").
- :return: Modified text with a language‑specific greeting.
- """
- greetings = {
- "Hindi": "नमस्ते! मैंने समझा.",
- "Kannada": "ನಮಸ್ಕಾರ! ನಾನು ಅರ್ಥ ಮಾಡಿಕೊಳ್ಳಿದ್ದೇನೆ.",
- "Tamil": "வணக்கம்! நான் புரிந்துகொண்டேன்.",
- "Telugu": "నమస్తే! నేను అర్ధం చేసుకున్నాను.",
- }
- greeting = greetings.get(language, "")
- if greeting:
- return f"{greeting}\n{text}"
- return text
diff --git a/core/avatar_visuals.py b/core/avatar_visuals.py
deleted file mode 100644
index 980edf1..0000000
--- a/core/avatar_visuals.py
+++ /dev/null
@@ -1,87 +0,0 @@
-from __future__ import annotations
-
-from dataclasses import dataclass
-from typing import Dict
-
-from core.runtime_state import RuntimeState
-
-
-@dataclass(frozen=True)
-class VisualState:
- glow_level: float
- pulse_speed: float
- breath_rate: float
- eye_blink_rate: float
- idle_sway_amp: float
- speaking_bob_amp: float
- thinking_orbit_speed: float
- blocked_dim_level: float
- error_flicker: bool
- caption_style: str
-
-
-def clamp(value: float, low: float, high: float) -> float:
- return max(low, min(high, float(value)))
-
-
-STATE_BASE: Dict[str, Dict[str, float]] = {
- "idle": {"pulse": 0.9, "breath": 0.8, "sway": 0.3, "bob": 0.05, "orbit": 0.3},
- "listening": {"pulse": 1.1, "breath": 0.9, "sway": 0.25, "bob": 0.1, "orbit": 0.35},
- "thinking": {"pulse": 1.0, "breath": 0.85, "sway": 0.2, "bob": 0.08, "orbit": 0.65},
- "speaking": {"pulse": 1.5, "breath": 1.0, "sway": 0.25, "bob": 0.35, "orbit": 0.4},
- "blocked": {"pulse": 0.4, "breath": 0.7, "sway": 0.05, "bob": 0.05, "orbit": 0.1},
- "error": {"pulse": 1.1, "breath": 0.75, "sway": 0.15, "bob": 0.15, "orbit": 0.35},
- "muted": {"pulse": 0.6, "breath": 0.8, "sway": 0.2, "bob": 0.05, "orbit": 0.2},
-}
-
-
-EMOTION_TONE: Dict[str, Dict[str, float]] = {
- "bright": {"glow": 0.2, "pulse": 0.15},
- "proud": {"glow": 0.15, "pulse": 0.1},
- "apologetic": {"glow": -0.2, "pulse": -0.1},
- "concerned": {"glow": -0.15, "pulse": -0.08},
- "curious": {"blink": 0.2, "orbit": 0.1},
- "steady": {"sway": -0.1},
- "serious": {"sway": -0.12, "pulse": -0.05},
-}
-
-
-def compute_visual_state(runtime: RuntimeState) -> VisualState:
- avatar_state = runtime.avatar_state or "idle"
- base = STATE_BASE.get(avatar_state, STATE_BASE["idle"])
- intensity = clamp(runtime.intensity, 0.1, 1.0)
- tone = EMOTION_TONE.get(runtime.emotion_tag, {})
-
- glow = clamp(0.6 + tone.get("glow", 0.0) + (intensity - 0.5) * 0.3, 0.2, 1.0)
- pulse = clamp(base["pulse"] + tone.get("pulse", 0.0), 0.4, 2.2)
- breath = clamp(base["breath"], 0.5, 1.4)
- blink = clamp(0.6 + tone.get("blink", 0.0), 0.4, 1.4)
- sway = clamp(base["sway"] + tone.get("sway", 0.0), 0.0, 0.6)
- bob = clamp(base["bob"], 0.0, 0.5)
- orbit = clamp(base["orbit"] + tone.get("orbit", 0.0), 0.1, 1.2)
-
- if runtime.overrides.dnd or runtime.overrides.kill_switch:
- blocked_dim = 0.6
- else:
- blocked_dim = 0.0
-
- error_flicker = avatar_state == "error"
- caption_style = "alert" if avatar_state in {"error", "blocked"} else "calm"
-
- if runtime.visual_settings.reduce_motion:
- sway = min(sway, 0.1)
- bob = min(bob, 0.15)
- orbit = min(orbit, 0.3)
-
- return VisualState(
- glow_level=glow,
- pulse_speed=pulse * (0.7 + intensity * 0.6),
- breath_rate=breath,
- eye_blink_rate=blink,
- idle_sway_amp=sway,
- speaking_bob_amp=bob * intensity,
- thinking_orbit_speed=orbit,
- blocked_dim_level=blocked_dim,
- error_flicker=error_flicker,
- caption_style=caption_style,
- )
diff --git a/core/behavior/behavior_planner.py b/core/behavior/behavior_planner.py
deleted file mode 100644
index 1a6d466..0000000
--- a/core/behavior/behavior_planner.py
+++ /dev/null
@@ -1,95 +0,0 @@
-from __future__ import annotations
-from typing import Any, Dict
-
-
-def plan_behavior(user_text: str, language: str = "en", emotion: str = "calm") -> Dict[str, Any]:
- """
- Convert user intent -> performance plan:
- - gestures timeline
- - ring effects
- - voice settings
- - emotion target
- - motion clip
- - dance routine parameters (optional)
- """
- text_raw = user_text or ""
- text = text_raw.lower()
-
- # Simple intent detection
- is_explain = any(k in text for k in ["explain", "what is", "define", "tell me about", "regression", "derivation"])
- is_math = any(k in text for k in ["regression", "centroid", "matrix", "integration", "differential", "probability"])
- is_fast = any(k in text for k in ["quick", "fast", "short"])
- is_dance = any(k in text for k in ["dance", "hype", "party", "vibe"])
- is_sing = any(k in text for k in ["sing", "song", "music mode"])
-
- # Voice plan
- voice = {
- "language": language,
- "rate": 1.0 if not is_fast else 1.15,
- "style": "teacher" if is_explain else "neutral",
- }
-
- # Emotion target
- emotion_target = emotion
- if any(k in text for k in ["stress", "panic", "anxious", "scared"]):
- emotion_target = "reassuring"
- if any(k in text for k in ["angry", "hate", "annoyed"]):
- emotion_target = "calm_confident"
-
- # Gestures + ring FX (micro-performance)
- gestures = [
- {"t": 0.2, "type": "idle_breathe", "dur": 1.2},
- {"t": 0.7, "type": "hand_raise", "dur": 0.9},
- ]
- ui_effects = [{"t": 0.9, "type": "ring_pulse", "dur": 1.0, "intensity": 1.1}]
-
- if is_explain:
- gestures += [
- {"t": 1.4, "type": "point_right", "dur": 1.0},
- {"t": 2.6, "type": "nod", "dur": 0.7},
- ]
- ui_effects += [
- {"t": 1.4, "type": "ring_highlight", "segment": 3 if is_math else 1, "dur": 1.1},
- {"t": 2.4, "type": "ring_highlight", "segment": 6 if is_math else 4, "dur": 1.0},
- ]
-
- # Motion selection (Phase-1 stub)
- motion = {"clip": "idle_breathe_01", "intensity": 1.0, "loop": True}
-
- if is_dance:
- motion = {"clip": "dance_hype_01", "intensity": 1.25, "loop": True}
- elif is_sing:
- motion = {"clip": "sing_soft_loop", "intensity": 1.05, "loop": True}
- elif is_explain:
- motion = {"clip": "explain_loop_01", "intensity": 1.0, "loop": True}
- elif emotion_target in ("reassuring", "calm"):
- motion = {"clip": "idle_breathe_01", "intensity": 0.9, "loop": True}
-
- # Dance plan (NEW)
- dance = None
- if is_dance:
- # default dance parameters (later: beat detect from song)
- dance = {"duration_s": 18.0, "bpm": 128.0, "style": "bolly_pop"}
-
- # Add beat-synced UI pulses (no audio analysis yet)
- try:
- from core.avatar_motion.dance_planner import generate_beat_grid
- beats = generate_beat_grid(duration_s=dance["duration_s"], bpm=dance["bpm"])
- for b in beats:
- ui_effects.append({
- "t": b.t,
- "type": "ring_pulse",
- "dur": 0.18,
- "intensity": 1.55 if b.strength >= 0.9 else 1.15
- })
- except Exception:
- pass
-
- return {
- "voice": voice,
- "emotion_target": emotion_target,
- "gestures": gestures,
- "ui_effects": ui_effects,
- "motion": motion,
- "dance": dance,
- }
diff --git a/core/database.py b/core/database.py
deleted file mode 100644
index e87523a..0000000
--- a/core/database.py
+++ /dev/null
@@ -1,117 +0,0 @@
-import sqlite3
-import os
-import json
-try:
- import numpy as np
-except Exception:
- np = None # optional on Termux/CI
-from datetime import datetime
-
-
-class Database:
- def __init__(self, db_path="data/etherea.db"):
- self.db_path = db_path
- os.makedirs(os.path.dirname(self.db_path), exist_ok=True)
- # Ensure connection uses check_same_thread=False safely
- self.conn = sqlite3.connect(self.db_path, check_same_thread=False)
- self.conn.execute("PRAGMA foreign_keys = ON") # safe default
- self.create_tables()
-
- def create_tables(self):
- cursor = self.conn.cursor()
-
- # Memories Table
- cursor.execute("""
- CREATE TABLE IF NOT EXISTS memories (
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- content TEXT NOT NULL,
- type TEXT DEFAULT 'general',
- embedding BLOB,
- created_at DATETIME DEFAULT CURRENT_TIMESTAMP
- )
- """)
-
- # User Profile Table (Key-Value)
- cursor.execute("""
- CREATE TABLE IF NOT EXISTS user_profile (
- key TEXT PRIMARY KEY,
- value TEXT
- )
- """)
-
- self.conn.commit()
-
- def add_memory(self, content: str, memory_type: str = "general", embedding=None):
- cursor = self.conn.cursor()
- # Only convert embedding if not None
- emb_blob = None
- if embedding is not None:
- try:
- emb_array = np.array(embedding, dtype=np.float32)
- emb_blob = sqlite3.Binary(emb_array.tobytes())
- except Exception as e:
- print(f"Failed to convert embedding to blob: {e}")
- emb_blob = None
- cursor.execute(
- "INSERT INTO memories (content, type, embedding) VALUES (?, ?, ?)",
- (content, memory_type, emb_blob)
- )
- self.conn.commit()
-
- def search_memories(self, query_embedding, limit: int = 5):
- """
- Search memories using cosine similarity of embeddings.
- """
- if query_embedding is None:
- return self.get_recent_memories(limit)
-
- cursor = self.conn.cursor()
- cursor.execute(
- "SELECT content, embedding FROM memories WHERE embedding IS NOT NULL")
- rows = cursor.fetchall()
-
- if not rows:
- return []
-
- search_vec = np.array(query_embedding, dtype=np.float32)
- results = []
-
- for content, emb_blob in rows:
- try:
- emb = np.frombuffer(emb_blob, dtype=np.float32)
- # Cosine similarity: (A . B) / (||A|| * ||B||)
- norm = np.linalg.norm(search_vec) * np.linalg.norm(emb) + 1e-9
- sim = np.dot(search_vec, emb) / norm
- results.append((content, sim))
- except Exception as e:
- print(f"Failed to compute similarity for a memory: {e}")
-
- # Sort by similarity descending
- results.sort(key=lambda x: x[1], reverse=True)
- return [res[0] for res in results[:limit]]
-
- def get_recent_memories(self, limit: int = 5):
- cursor = self.conn.cursor()
- cursor.execute(
- "SELECT content FROM memories ORDER BY created_at DESC LIMIT ?", (limit,))
- return [row[0] for row in cursor.fetchall()]
-
- def set_preference(self, key: str, value: str):
- cursor = self.conn.cursor()
- cursor.execute(
- "INSERT OR REPLACE INTO user_profile (key, value) VALUES (?, ?)",
- (key, value)
- )
- self.conn.commit()
-
- def get_profile_context(self):
- cursor = self.conn.cursor()
- cursor.execute("SELECT key, value FROM user_profile")
- rows = cursor.fetchall()
- if rows is None:
- return {}
- return {row[0]: row[1] for row in rows}
-
-
-# Global Instance
-db = Database()
diff --git a/core/ei_engine.py b/core/ei_engine.py
deleted file mode 100644
index 6c289c6..0000000
--- a/core/ei_engine.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import time
-import threading
-import logging
-import math
-from typing import Dict
-from core.signals import signals
-
-logger = logging.getLogger("etherea_internal")
-logger.setLevel(logging.WARNING)
-
-
-class EIEngine:
- def __init__(self):
- self.emotion_vector: Dict[str, float] = {
- "focus": 0.5,
- "stress": 0.2,
- "energy": 0.5,
- "curiosity": 0.5,
- "flow": 0.0,
- }
- self.state = self.emotion_vector
- self.sub_states: Dict[str, float] = {
- "flow_intensity": 0.0,
- "typing_rhythm": 0.5,
- "physical_jitter": 0.0,
- }
- self.last_update = time.time()
- self.running = False
- self._lock = threading.Lock()
- self._thread = None
- self._stop_event = threading.Event()
- self.last_proactive_trigger = 0.0
- self.trigger_cooldown = 120.0
-
- # Persistence throttling
- self.last_save_time = 0.0
- self.save_interval = 30.0
- self.last_saved_stress = 0.0
-
- try:
- if hasattr(signals, "input_activity"):
- signals.input_activity.connect(self.on_input_activity)
- if hasattr(signals, "pattern_detected"):
- signals.pattern_detected.connect(self.on_pattern_detected)
- except Exception:
- pass
-
- def _clamp(self, value: float) -> float:
- try:
- if math.isnan(value) or math.isinf(value):
- return 0.5
- return max(0.0, min(1.0, float(value)))
- except Exception:
- return 0.5
-
- def on_input_activity(self, activity_type: str, payload):
- with self._lock:
- intensity = 0.0
- jitter = 0.0
- variance = 0.0
- if isinstance(payload, dict):
- intensity = payload.get("intensity", 0.0)
- jitter = payload.get("jitter", 0.0)
- variance = payload.get("variance", 0.0)
- else:
- intensity = payload
-
- intensity = self._clamp(intensity)
- jitter = self._clamp(jitter)
- variance = self._clamp(variance)
-
- if activity_type == "typing":
- self.emotion_vector["focus"] += 0.05 * intensity
- self.emotion_vector["energy"] -= 0.01 * intensity
- if intensity > 0.8:
- self.emotion_vector["stress"] += 0.02
- self.sub_states["typing_rhythm"] = self._clamp(1.0 - variance)
- elif activity_type == "mouse":
- self.emotion_vector["curiosity"] += 0.02 * intensity
- if intensity > 0.9:
- self.emotion_vector["stress"] += 0.05
- self.emotion_vector["focus"] -= 0.02
- if jitter > 0.0:
- self.sub_states["physical_jitter"] = max(
- self.sub_states["physical_jitter"], jitter)
- if jitter > 0.5:
- self.emotion_vector["stress"] += 0.03 * jitter
-
- if self.sub_states["typing_rhythm"] > 0.7 and self.emotion_vector["stress"] < 0.5:
- self.sub_states["flow_intensity"] = self._clamp(
- self.sub_states["flow_intensity"] + 0.02 * intensity)
- self.emotion_vector["flow"] = self.sub_states["flow_intensity"]
-
- for k in self.emotion_vector:
- self.emotion_vector[k] = self._clamp(self.emotion_vector[k])
-
- def start(self):
- if self.running:
- return
- self.running = True
- self._stop_event.clear()
- self._thread = threading.Thread(target=self._loop, daemon=True)
- self._thread.start()
-
- def stop(self):
- self.running = False
- self._stop_event.set()
- if self._thread:
- self._thread.join(1.0)
-
- def _loop(self):
- while not self._stop_event.is_set():
- now = time.time()
- dt = max(0.0, min(now - self.last_update, 1.0))
- self.last_update = now
- with self._lock:
- self.emotion_vector["stress"] -= 0.05 * dt
- self.emotion_vector["focus"] -= 0.02 * dt
- self.emotion_vector["energy"] += 0.01 * dt
- self.sub_states["flow_intensity"] = self._clamp(
- self.sub_states["flow_intensity"] - 0.01 * dt)
- self.emotion_vector["flow"] = self.sub_states["flow_intensity"]
-
- if self.emotion_vector["focus"] > 0.8:
- self.emotion_vector["stress"] *= 0.9
- if self.emotion_vector["stress"] < 0.1:
- self.emotion_vector["stress"] = 0.0
-
- for k in self.emotion_vector:
- self.emotion_vector[k] = self._clamp(self.emotion_vector[k])
-
- try:
- current_stress = self.emotion_vector["stress"]
- stress_diff = abs(current_stress - self.last_saved_stress)
- if (now - self.last_save_time > self.save_interval) or (stress_diff > 0.15):
- from core.database import db
-
- db.set_preference("last_emotion", str(self.emotion_vector))
- self.last_save_time = now
- self.last_saved_stress = current_stress
-
- signals.emotion_updated.emit(self.emotion_vector.copy())
- except Exception:
- pass
-
- self._check_triggers(now)
- time.sleep(0.05)
-
- def on_pattern_detected(self, patterns: dict):
- with self._lock:
- hesitation = patterns.get("hesitation", False)
- repetition = patterns.get("repetition", False)
- late_night = patterns.get("late_night", False)
- deletions = patterns.get("deletions", 0)
-
- # 1. Micro-Hesitation & Uncertainty (Deletions)
- if deletions > 2:
- # Interpret as uncertainty/perfectionism -> Mirror with structured clarity
- self.emotion_vector["stress"] = self._clamp(self.emotion_vector["stress"] + 0.03 * deletions)
- self.emotion_vector["curiosity"] = self._clamp(self.emotion_vector["curiosity"] + 0.05)
-
- # 2. Confidence Matching / Repetition
- if repetition:
- # User is stuck or looping: increase stress baseline subtly
- self.emotion_vector["stress"] = self._clamp(self.emotion_vector["stress"] + 0.05)
- # Mirror overwhelmed state -> remove pressure
- self.emotion_vector["energy"] *= 0.8
-
- # 3. Silence / Idle Hesitation
- if hesitation and self.emotion_vector["focus"] > 0.6:
- # User is paused/thinking: increase curiosity
- self.emotion_vector["curiosity"] = self._clamp(self.emotion_vector["curiosity"] + 0.1)
- self.emotion_vector["energy"] *= 0.9 # Slow down presence
-
- # 4. Implicit Emotional Safety Net (Late Night/Overwhelmed)
- if late_night:
- # Dampen energy and stress for a calmer night experience
- self.emotion_vector["energy"] *= 0.8
- self.emotion_vector["stress"] *= 0.7
-
- # Normalize state without advice (Manifest Rule)
- for k in self.emotion_vector:
- self.emotion_vector[k] = self._clamp(self.emotion_vector[k])
-
- def _check_triggers(self, now):
- if now - self.last_proactive_trigger < self.trigger_cooldown:
- return
- if self.emotion_vector["stress"] > 0.85:
- signals.proactive_trigger.emit("stress_relief")
- self.last_proactive_trigger = now
- elif self.emotion_vector["focus"] > 0.9:
- signals.proactive_trigger.emit("focus_shield_active")
- self.last_proactive_trigger = now
diff --git a/core/emotion_mapper.py b/core/emotion_mapper.py
deleted file mode 100644
index 2cf3a92..0000000
--- a/core/emotion_mapper.py
+++ /dev/null
@@ -1,96 +0,0 @@
-from typing import Dict, Any
-import math
-
-
-class EmotionMapper:
- """
- Translates an EmotionVector (focus, stress, energy, curiosity)
- into visual parameters for a 3D avatar. Uses smooth easing (lerp)
- with optional delta-time scaling so responsiveness is frame-rate
- independent.
- """
-
- def __init__(self, lerp_factor: float = 0.05):
- # Current eased values
- self.params: Dict[str, float] = {
- "eye_openness": 0.5,
- "blink_rate": 0.1,
- "gaze_jitter": 0.0,
- "breathing_intensity": 0.5,
- "glow_intensity": 0.5,
- "color_temp": 0.5,
- "pulse_speed": 0.5,
- "particle_density": 0.5,
- }
-
- # Targets we lerp toward
- self.target_params: Dict[str, float] = self.params.copy()
-
- # Base responsiveness (per-frame equivalent). Smaller -> slower.
- self.lerp_factor = float(max(0.0, min(1.0, lerp_factor)))
-
- def update(self, emotion_vector: Dict[str, Any], dt: float = 0.016) -> Dict[str, float]:
- try:
- focus = float(emotion_vector.get("focus", 0.5))
- except Exception:
- focus = 0.5
- try:
- stress = float(emotion_vector.get("stress", 0.2))
- except Exception:
- stress = 0.2
- try:
- energy = float(emotion_vector.get("energy", 0.5))
- except Exception:
- energy = 0.5
- try:
- curiosity = float(emotion_vector.get("curiosity", 0.5))
- except Exception:
- curiosity = 0.5
-
- focus = self._clamp(focus)
- stress = self._clamp(stress)
- energy = self._clamp(energy)
- curiosity = self._clamp(curiosity)
-
- self.target_params["eye_openness"] = 0.4 + (focus * 0.4)
- self.target_params["blink_rate"] = 0.05 + (stress * 0.2)
- self.target_params["gaze_jitter"] = stress * 0.3
- self.target_params["pulse_speed"] = 0.2 + (stress * 0.8)
- self.target_params["breathing_intensity"] = 0.3 + (energy * 0.7)
- self.target_params["glow_intensity"] = 0.3 + (0.6 * focus) + (0.1 * curiosity) - (0.25 * stress)
- self.target_params["color_temp"] = 0.3 + (0.4 * focus) + (0.3 * energy) - (0.3 * stress)
- self.target_params["particle_density"] = 0.2 + (0.6 * curiosity) + (0.2 * energy)
-
- alpha = self._alpha(dt)
- for key, target in self.target_params.items():
- target = self._clamp(target)
- current = self.params.get(key, 0.5)
- self.params[key] = self._clamp(self._lerp(current, target, alpha))
-
- return self.params.copy()
-
- def _alpha(self, dt: float) -> float:
- if dt is None:
- return self.lerp_factor
- try:
- dt = float(dt)
- except Exception:
- return self.lerp_factor
- if dt <= 0:
- return 0.0
- scale = dt / 0.016
- return min(1.0, self.lerp_factor * scale)
-
- def _lerp(self, a: float, b: float, t: float) -> float:
- return a + (b - a) * t
-
- def _clamp(self, value: float) -> float:
- try:
- if math.isnan(value) or math.isinf(value):
- return 0.5
- return max(0.0, min(1.0, float(value)))
- except Exception:
- return 0.5
-
-
-mapper = EmotionMapper()
diff --git a/core/event_bus.py b/core/event_bus.py
deleted file mode 100644
index 377670f..0000000
--- a/core/event_bus.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from __future__ import annotations
-
-from typing import Callable, List
-
-from core.event_model import Event
-
-
-class EventBus:
- def __init__(self) -> None:
- self._listeners: List[Callable[[Event], None]] = []
-
- def subscribe(self, listener: Callable[[Event], None]) -> None:
- self._listeners.append(listener)
-
- def emit(self, event: Event) -> None:
- for listener in self._listeners:
- listener(event)
-
-
-event_bus = EventBus()
diff --git a/core/event_model.py b/core/event_model.py
deleted file mode 100644
index 9b9d6f0..0000000
--- a/core/event_model.py
+++ /dev/null
@@ -1,38 +0,0 @@
-from __future__ import annotations
-
-from dataclasses import dataclass
-from datetime import datetime, timezone
-from typing import Any, Dict
-
-
-@dataclass(frozen=True)
-class Event:
- type: str
- timestamp: str
- source: str
- payload: Dict[str, Any]
- priority: int = 50
- privacy_level: str = "normal"
-
-
-def now_iso() -> str:
- return datetime.now(timezone.utc).isoformat()
-
-
-def create_event(
- event_type: str,
- source: str,
- payload: Dict[str, Any],
- *,
- priority: int = 50,
- privacy_level: str = "normal",
- timestamp: str | None = None,
-) -> Event:
- return Event(
- type=event_type,
- timestamp=timestamp or now_iso(),
- source=source,
- payload=payload,
- priority=priority,
- privacy_level=privacy_level,
- )
diff --git a/core/gestures/gesture_engine.py b/core/gestures/gesture_engine.py
deleted file mode 100644
index 802cbd6..0000000
--- a/core/gestures/gesture_engine.py
+++ /dev/null
@@ -1,115 +0,0 @@
-from __future__ import annotations
-
-from typing import Any, Dict, List, Optional
-
-try:
- from PySide6.QtCore import QObject, QTimer
-except Exception:
- QObject = object # type: ignore
- QTimer = None # type: ignore
-
-
-class GestureEngine(QObject):
- """
- Safe gesture timeline runner.
- - Works when PySide6 is present (desktop builds)
- - Does nothing (but doesn't crash) when PySide6 is missing (Termux)
- """
-
- def __init__(self, avatar_widget=None, on_log=None):
- super().__init__()
- self.avatar = avatar_widget
- self.on_log = on_log
- self._timers: List[Any] = []
-
- def log(self, msg: str):
- if self.on_log:
- try:
- self.on_log(msg)
- return
- except Exception:
- pass
- # fallback
- print(msg)
-
- def stop(self):
- # cancel pending timers
- for t in self._timers:
- try:
- t.stop()
- t.deleteLater()
- except Exception:
- pass
- self._timers.clear()
-
- def play(self, plan: Dict[str, Any]):
- """
- plan = {
- "gestures":[{"t":0.2,"type":"nod","dur":0.6}, ...],
- "ui_effects":[{"t":1.2,"type":"ring_highlight","segment":3,"dur":1.0}, ...]
- }
- """
- self.stop()
-
- if QTimer is None:
- self.log("⚠️ GestureEngine: PySide6 not available here (Termux). Skipping animations safely.")
- return
-
- gestures = plan.get("gestures", [])
- effects = plan.get("ui_effects", [])
-
- def schedule(item: Dict[str, Any], kind: str):
- t_sec = float(item.get("t", 0.0))
- ms = max(0, int(t_sec * 1000))
-
- timer = QTimer(self)
- timer.setSingleShot(True)
-
- def fire():
- try:
- self._fire_item(item, kind)
- except Exception as e:
- self.log(f"⚠️ GestureEngine error ({kind}): {e}")
-
- timer.timeout.connect(fire)
- timer.start(ms)
- self._timers.append(timer)
-
- for g in gestures:
- schedule(g, "gesture")
- for e in effects:
- schedule(e, "effect")
-
- self.log("🎬 Gesture timeline started.")
-
- def _fire_item(self, item: Dict[str, Any], kind: str):
- if not self.avatar:
- self.log("⚠️ GestureEngine: no avatar widget attached.")
- return
-
- name = str(item.get("type", "")).strip()
- dur = float(item.get("dur", 0.8))
- intensity = float(item.get("intensity", 1.0))
-
- # gestures
- if kind == "gesture":
- if hasattr(self.avatar, "perform_gesture"):
- self.avatar.perform_gesture(name, dur=dur, intensity=intensity)
- self.log(f"🤖 gesture: {name} ({dur}s)")
- return
- self.log("⚠️ Avatar missing perform_gesture().")
-
- # UI effects (ring highlight etc.)
- if kind == "effect":
- if name == "ring_highlight" and hasattr(self.avatar, "ring_highlight"):
- seg = int(item.get("segment", 0))
- self.avatar.ring_highlight(seg, dur=dur)
- self.log(f"💫 ring_highlight seg={seg} ({dur}s)")
- return
-
- if name == "ring_pulse" and hasattr(self.avatar, "ring_pulse"):
- self.avatar.ring_pulse(dur=dur, intensity=intensity)
- self.log(f"✨ ring_pulse ({dur}s)")
- return
-
- self.log(f"⚠️ Unknown effect or missing handler: {name}")
diff --git a/core/gestures/presets.py b/core/gestures/presets.py
deleted file mode 100644
index 374365e..0000000
--- a/core/gestures/presets.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from __future__ import annotations
-
-from typing import Any, Dict
-
-
-def regression_preset() -> Dict[str, Any]:
- """
- Cinematic micro-gestures + ring effects timed like an explainer.
- """
- return {
- "gestures": [
- {"t": 0.2, "type": "idle_breathe", "dur": 1.2},
- {"t": 0.6, "type": "hand_raise", "dur": 0.8},
- {"t": 1.2, "type": "point_right", "dur": 1.0},
- {"t": 2.4, "type": "nod", "dur": 0.7},
- {"t": 3.2, "type": "smile", "dur": 1.0},
- ],
- "ui_effects": [
- {"t": 1.2, "type": "ring_highlight", "segment": 3, "dur": 1.1},
- {"t": 2.0, "type": "ring_pulse", "dur": 1.0, "intensity": 1.2},
- {"t": 3.0, "type": "ring_highlight", "segment": 6, "dur": 1.0},
- ],
- }
diff --git a/core/md_loader.py b/core/md_loader.py
deleted file mode 100644
index 842dc93..0000000
--- a/core/md_loader.py
+++ /dev/null
@@ -1,46 +0,0 @@
-import os
-import sys
-
-
-class MDLoader:
- """
- Simple Markdown loader for Etherea documentation.
- Scans a folder recursively and reads all `.md` files,
- storing their contents keyed by relative path.
- """
-
- def __init__(self, docs_folder: str):
- # Handle PyInstaller _MEIPASS
- if hasattr(sys, "_MEIPASS"):
- base_path = sys._MEIPASS
- else:
- base_path = os.getcwd()
-
- self.docs_folder = os.path.join(base_path, docs_folder)
- self.documents = {}
-
- def load_all(self) -> dict:
- """
- Reads all markdown files in docs_folder and stores
- content keyed by relative path.
-
- :return: dict {relative_path: file_content}
- """
- if not os.path.exists(self.docs_folder) or not os.path.isdir(self.docs_folder):
- print(
- f"MDLoader Warning: Docs folder '{self.docs_folder}' not found or not a directory.")
- return {}
-
- for root, dirs, files in os.walk(self.docs_folder):
- for file in files:
- if file.lower().endswith(".md"):
- path = os.path.join(root, file)
- rel_path = os.path.relpath(path, self.docs_folder)
- try:
- with open(path, "r", encoding="utf-8") as f:
- self.documents[rel_path] = f.read()
- except Exception as e:
- print(f"MDLoader Error: Failed to read '{path}': {e}")
-
- # return a copy to prevent external mutation
- return dict(self.documents)
diff --git a/core/memory_store.py b/core/memory_store.py
deleted file mode 100644
index 0f92d00..0000000
--- a/core/memory_store.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# core/memory_store.py
-"""
-Memory Store for Etherea
-- Persistent storage of session state, outputs, visuals, and emotional data
-- Supports real-time, dynamic, multi-modal outputs
-"""
-
-import os
-import json
-import base64
-try:
- import requests
-except Exception:
- requests = None # optional on Termux/CI
-from core.utils import ensure_folder, debug_print
-from core.database import db
-
-
-class MemoryStore:
- def __init__(self):
- # Leverage the global 'db' instance for SQLite persistence
- self.db = db
- debug_print("MemoryStore", "Initialized with SQLite LTM")
-
- # --- LTM Methods ---
- def add_to_ltm(self, content: str, memory_type: str = "general", embedding=None):
- """Add a persistent memory with optional embedding"""
- if not content:
- debug_print("MemoryStore",
- "Warning: Empty content not added to LTM")
- return
- self.db.add_memory(content, memory_type, embedding)
- debug_print("MemoryStore", f"Added to LTM: {content[:30]}...")
-
- def search_ltm(self, query_embedding, limit: int = 5):
- """Search LTM using semantic similarity"""
- if query_embedding is None:
- debug_print("MemoryStore",
- "No embedding provided, returning recent memories")
- return self.db.search_memories(query_embedding, limit)
-
- def get_history(self, limit: int = 5):
- """Get recent LTM entries"""
- return self.db.get_recent_memories(limit)
-
- # --- Profile / Preferences ---
- def update_preference(self, key: str, value: str):
- if not key:
- debug_print("MemoryStore",
- "Warning: Empty key ignored in update_preference")
- return
- self.db.set_preference(key, value)
-
- def get_all_preferences(self):
- return self.db.get_profile_context()
-
- # --- Emotional State ---
- def update_emotion(self, emotion_dict: dict):
- """Store emotional context in profile"""
- if not isinstance(emotion_dict, dict):
- debug_print("MemoryStore",
- f"update_emotion: Invalid type {type(emotion_dict)}")
- return
- self.update_preference("last_emotion", json.dumps(emotion_dict))
-
- def get_emotion(self):
- """Retrieve last known emotional state"""
- pref = self.db.get_profile_context()
- raw = pref.get("last_emotion")
- if raw:
- try:
- return json.loads(raw)
- except Exception as e:
- debug_print("MemoryStore",
- f"get_emotion JSON decode error: {e}")
- return {"tone": "neutral", "intensity": 0.5}
-
- # --- Image Handling ---
- def save_image_from_url(self, key: str, url: str, timeout: int = 5) -> bool:
- """
- Fetch image from URL, convert to base64, and store in preferences.
- Returns True if successful, False on error.
- """
- if not key or not url:
- debug_print("MemoryStore", "save_image_from_url: key or URL empty")
- return False
-
- try:
- response = requests.get(url, timeout=timeout)
- response.raise_for_status()
- b64 = base64.b64encode(response.content).decode("utf-8")
- self.update_preference(f"img_{key}", b64)
- return True
- except Exception as e:
- debug_print("MemoryStore", f"save_image_from_url error: {e}")
- return False
diff --git a/core/music.py b/core/music.py
deleted file mode 100644
index 689df70..0000000
--- a/core/music.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import threading
-import time
-import math
-import random
-try:
- from PySide6.QtCore import QObject, Signal
-except Exception:
- class QObject: # minimal stub
- pass
- def Signal(*a, **k):
- return None
-
-class MusicEngine(QObject):
- """
- Simulates a music player and generates synthetic Audio Spectrum Data.
- Emits signals for Bass, Mids, and Highs to drive UI animations.
- """
- spectrum_updated = Signal(dict) # { 'bass': float, 'mid': float, 'high': float }
- track_changed = Signal(str)
- playback_state_changed = Signal(bool) # True=Playing
-
- _instance = None
-
- def __init__(self):
- super().__init__()
- self._is_playing = False
- self._track_name = ""
- self._thread = threading.Thread(target=self._run_loop, daemon=True)
- self._thread.start()
-
- @classmethod
- def instance(cls):
- if cls._instance is None:
- cls._instance = MusicEngine()
- return cls._instance
-
- def play(self, track_name: str = "Etherea Ambient Mix"):
- self._track_name = track_name
- self._is_playing = True
- self.track_changed.emit(track_name)
- self.playback_state_changed.emit(True)
-
- def stop(self):
- self._is_playing = False
- self.playback_state_changed.emit(False)
-
- def _run_loop(self):
- t = 0.0
- while True:
- if self._is_playing:
- t += 0.05
-
- # Synthetic Audio Reactivity simulation
- # Bass: Slow, heavy beat (approx 120bpm)
- beat = (math.sin(t * 12.0) + 1.0) * 0.5
- bass = beat * beat * beat # Sharper peaks
- if bass < 0.1: bass = 0.0
-
- # Mids: Melody/Vocals (Complex wave)
- mid = (math.sin(t * 5.0) * math.cos(t * 3.0) + 1.0) * 0.4
- mid += random.random() * 0.2
-
- # Highs: Hi-hats/Atmosphere (Fast noise)
- high = random.random() * 0.4 + (0.3 if beat > 0.8 else 0.0)
-
- self.spectrum_updated.emit({
- 'bass': min(1.0, bass),
- 'mid': min(1.0, mid),
- 'high': min(1.0, high)
- })
-
- else:
- # Silence
- try:
- self.spectrum_updated.emit({'bass':0, 'mid':0, 'high':0})
- except RuntimeError:
- pass # App likely closing
-
- time.sleep(0.05) # 20fps updates for visuals
diff --git a/core/notifications.py b/core/notifications.py
deleted file mode 100644
index 6a19cbf..0000000
--- a/core/notifications.py
+++ /dev/null
@@ -1,61 +0,0 @@
-try:
- from PySide6.QtCore import QObject, Signal
-except Exception:
- class QObject: # minimal stub
- pass
- def Signal(*a, **k):
- return None
-
-class NotificationManager(QObject):
- """
- Singleton for managing system notifications silently.
- Stores messages and emits signals for UI updates (Badges/Trays),
- but does NOT trigger popups.
- """
- _instance = None
-
- # Signal emitted when a new notification arrives
- notification_added = Signal()
- # Signal emitted when notifications are cleared/read
- notifications_cleared = Signal()
-
- @classmethod
- def instance(cls):
- if cls._instance is None:
- cls._instance = cls()
- return cls._instance
-
- def __init__(self):
- super().__init__()
- if NotificationManager._instance is not None:
- raise Exception("This class is a singleton!")
- else:
- NotificationManager._instance = self
-
- self._notifications = [] # List of dicts: {title, message, timestamp, type}
-
- def add(self, title: str, message: str, kind: str = "info"):
- """
- Add a silent notification.
- """
- import datetime
- n = {
- "title": title,
- "message": message,
- "kind": kind,
- "timestamp": datetime.datetime.now()
- }
- self._notifications.append(n)
- print(f"[j.a.r.v.i.s] Notification: {title} - {message}")
- self.notification_added.emit()
-
- def get_all(self):
- """Return all notifications, newest first."""
- return list(reversed(self._notifications))
-
- def get_count(self):
- return len(self._notifications)
-
- def clear(self):
- self._notifications = []
- self.notifications_cleared.emit()
diff --git a/core/os_adapter.py b/core/os_adapter.py
deleted file mode 100644
index 0b5c05a..0000000
--- a/core/os_adapter.py
+++ /dev/null
@@ -1,90 +0,0 @@
-from __future__ import annotations
-
-import os
-import subprocess
-import sys
-from dataclasses import dataclass
-from pathlib import Path
-from typing import List, Optional, Tuple
-
-
-@dataclass
-class OSResult:
- ok: bool
- message: str
- detail: Optional[str] = None
-
-
-class OSAdapter:
- def __init__(self, dry_run: bool = False) -> None:
- self.dry_run = dry_run
-
- def open_file(self, path: str) -> OSResult:
- return self._open_path(path, mode="file")
-
- def open_folder(self, path: str) -> OSResult:
- return self._open_path(path, mode="folder")
-
- def reveal_in_explorer(self, path: str) -> OSResult:
- return self._reveal_path(path)
-
- def open_url(self, url: str) -> OSResult:
- if self.dry_run:
- return OSResult(ok=True, message=f"DRY_RUN open_url: {url}")
- try:
- if sys.platform.startswith("win"):
- os.startfile(url) # type: ignore[attr-defined]
- elif sys.platform.startswith("darwin"):
- subprocess.check_call(["open", url])
- else:
- subprocess.check_call(["xdg-open", url])
- return OSResult(ok=True, message=f"Opened URL: {url}")
- except Exception as exc:
- return OSResult(ok=False, message="open_url failed", detail=str(exc))
-
- def launch_app(self, path: str, args: Optional[List[str]] = None) -> OSResult:
- args = args or []
- if self.dry_run:
- return OSResult(ok=True, message=f"DRY_RUN launch_app: {path} {' '.join(args)}")
- try:
- if sys.platform.startswith("win"):
- if args:
- subprocess.Popen([path] + args)
- else:
- os.startfile(path) # type: ignore[attr-defined]
- os.startfile(path) # type: ignore[attr-defined]
- else:
- subprocess.Popen([path] + args)
- return OSResult(ok=True, message=f"Launched app: {path}")
- except Exception as exc:
- return OSResult(ok=False, message="launch_app failed", detail=str(exc))
-
- def _open_path(self, path: str, mode: str) -> OSResult:
- resolved = str(Path(path).expanduser().resolve())
- if self.dry_run:
- return OSResult(ok=True, message=f"DRY_RUN open_{mode}: {resolved}")
- try:
- if sys.platform.startswith("win"):
- os.startfile(resolved) # type: ignore[attr-defined]
- elif sys.platform.startswith("darwin"):
- subprocess.check_call(["open", resolved])
- else:
- subprocess.check_call(["xdg-open", resolved])
- return OSResult(ok=True, message=f"Opened {mode}: {resolved}")
- except Exception as exc:
- return OSResult(ok=False, message=f"open_{mode} failed", detail=str(exc))
-
- def _reveal_path(self, path: str) -> OSResult:
- resolved = str(Path(path).expanduser().resolve())
- if self.dry_run:
- return OSResult(ok=True, message=f"DRY_RUN reveal_in_explorer: {resolved}")
- try:
- if sys.platform.startswith("win"):
- subprocess.check_call(["explorer", "/select,", resolved])
- elif sys.platform.startswith("darwin"):
- subprocess.check_call(["open", "-R", resolved])
- else:
- subprocess.check_call(["xdg-open", str(Path(resolved).parent)])
- return OSResult(ok=True, message=f"Revealed path: {resolved}")
- except Exception as exc:
- return OSResult(ok=False, message="reveal_in_explorer failed", detail=str(exc))
diff --git a/core/os_pipeline.py b/core/os_pipeline.py
deleted file mode 100644
index f8e6f04..0000000
--- a/core/os_pipeline.py
+++ /dev/null
@@ -1,100 +0,0 @@
-from __future__ import annotations
-
-from dataclasses import dataclass
-from typing import Dict, Optional
-
-from core.app_registry import AppRegistry
-from core.event_bus import EventBus, event_bus
-from core.event_model import create_event
-from core.os_adapter import OSAdapter
-
-
-@dataclass
-class OSOverrides:
- kill_switch: bool = False
- dnd: bool = False
- manual_lock: bool = False
- privacy_mode: bool = False
-
-
-class OSPipeline:
- def __init__(
- self,
- adapter: OSAdapter,
- registry: Optional[AppRegistry] = None,
- bus: Optional[EventBus] = None,
- ) -> None:
- self._adapter = adapter
- self._registry = registry or AppRegistry()
- self._bus = bus or event_bus
-
- def handle_intent(
- self,
- intent: str,
- payload: Dict[str, object],
- *,
- overrides: Optional[OSOverrides] = None,
- source: str = "os_pipeline",
- ) -> Dict[str, object]:
- overrides = overrides or OSOverrides()
- self._emit("OS_ACTION_REQUESTED", payload, source=source)
-
- if overrides.kill_switch or overrides.manual_lock:
- self._emit("OS_ACTION_BLOCKED", {"reason": "overrides", **payload}, source=source)
- return {"ok": False, "intent": intent, "reason": "overrides"}
-
- if overrides.privacy_mode:
- self._emit("OS_ACTION_BLOCKED", {"reason": "privacy_mode", **payload}, source=source)
- return {"ok": False, "intent": intent, "reason": "privacy_mode"}
-
- if overrides.dnd and intent in {"OPEN_URL", "LAUNCH_APP"}:
- self._emit("OS_ACTION_BLOCKED", {"reason": "dnd", **payload}, source=source)
- return {"ok": False, "intent": intent, "reason": "dnd"}
-
- confirm_required = intent in {"OPEN_URL", "LAUNCH_APP", "OPEN_FILE"}
- if confirm_required and not bool(payload.get("confirm", False)):
- self._emit("OS_ACTION_BLOCKED", {"reason": "confirmation_required", **payload}, source=source)
- return {"ok": False, "intent": intent, "reason": "confirmation_required"}
-
- self._emit("OS_ACTION_STARTED", payload, source=source)
- result = self._dispatch(intent, payload)
- event_type = "OS_ACTION_FINISHED" if result.get("ok") else "OS_ACTION_FAILED"
- self._emit(event_type, {"intent": intent, **result}, source=source)
- return result
-
- def _dispatch(self, intent: str, payload: Dict[str, object]) -> Dict[str, object]:
- if intent == "OPEN_FILE":
- path = str(payload.get("path", ""))
- result = self._adapter.open_file(path)
- return {"ok": result.ok, "message": result.message, "detail": result.detail}
- if intent == "OPEN_FOLDER":
- path = str(payload.get("path", ""))
- result = self._adapter.open_folder(path)
- return {"ok": result.ok, "message": result.message, "detail": result.detail}
- if intent == "OPEN_URL":
- url = str(payload.get("url", ""))
- result = self._adapter.open_url(url)
- return {"ok": result.ok, "message": result.message, "detail": result.detail}
- if intent == "REVEAL_PATH":
- path = str(payload.get("path", ""))
- result = self._adapter.reveal_in_explorer(path)
- return {"ok": result.ok, "message": result.message, "detail": result.detail}
- if intent == "LAUNCH_APP":
- app_id = str(payload.get("app_id", ""))
- app = self._registry.get(app_id)
- if not app:
- return {"ok": False, "message": "app_not_found", "detail": app_id}
- result = self._adapter.launch_app(app.path, app.args)
- return {"ok": result.ok, "message": result.message, "detail": result.detail}
- return {"ok": False, "message": "unknown_intent", "detail": intent}
-
- def _emit(self, event_type: str, payload: Dict[str, object], *, source: str) -> None:
- self._bus.emit(
- create_event(
- event_type,
- source=source,
- payload=payload,
- priority=35,
- privacy_level="normal",
- )
- )
diff --git a/core/resource_handler.py b/core/resource_handler.py
deleted file mode 100644
index 36afe3b..0000000
--- a/core/resource_handler.py
+++ /dev/null
@@ -1,46 +0,0 @@
-import os
-import sys
-
-
-def get_resource_path(relative_path: str) -> str:
- """
- Returns the absolute path to a resource, compatible with both
- development and PyInstaller builds.
-
- PyInstaller extracts bundled resources into a temporary folder
- accessible via sys._MEIPASS.
-
- :param relative_path: Path relative to project root or packaged resources
- :return: Absolute path to the resource
- """
- try:
- if hasattr(sys, "_MEIPASS"):
- # PyInstaller runtime
- base_path = sys._MEIPASS
- else:
- # Development runtime
- base_path = os.path.abspath(
- os.path.join(os.path.dirname(__file__), ".."))
- return os.path.join(base_path, relative_path)
- except Exception as e:
- # Fail gracefully with debug info
- print(
- f"[resource_handler] Error resolving path '{relative_path}': {e}")
- return relative_path # fallback
-
-
-# --- Helper functions for common resource types ---
-
-def get_ui_resource(name: str) -> str:
- """Get absolute path to a UI resource file."""
- return get_resource_path(os.path.join("core", "ui", name))
-
-
-def get_shader_resource(name: str) -> str:
- """Get absolute path to a UI shader resource."""
- return get_resource_path(os.path.join("core", "ui", "shaders", name))
-
-
-def get_audio_resource(name: str) -> str:
- """Get absolute path to an audio asset."""
- return get_resource_path(os.path.join("assets", "audio", name))
diff --git a/core/runtime_state.py b/core/runtime_state.py
deleted file mode 100644
index 682711a..0000000
--- a/core/runtime_state.py
+++ /dev/null
@@ -1,71 +0,0 @@
-from __future__ import annotations
-
-from dataclasses import dataclass, field
-from datetime import datetime, timezone
-from typing import Dict, Optional
-
-
-def now_iso() -> str:
- return datetime.now(timezone.utc).isoformat()
-
-
-@dataclass
-class MetricValue:
- value: float
- source: str
- timestamp: str
-
-
-@dataclass
-class AudioState:
- bg_audio_active: bool = False
- tts_active: bool = False
-
-
-@dataclass
-class OverridesState:
- kill_switch: bool = False
- dnd: bool = False
- manual_lock: bool = False
- privacy_mode: bool = False
-
-
-@dataclass
-class WorkspaceState:
- workspace_id: Optional[str] = None
- workspace_name: Optional[str] = None
- session_active: bool = False
- last_saved: Optional[str] = None
- session_info: Dict[str, object] = field(default_factory=dict)
-
-
-@dataclass
-class VisualSettings:
- reduce_motion: bool = False
- intensity_preset: str = "normal"
-
-
-@dataclass
-class RuntimeState:
- focus: MetricValue = field(default_factory=lambda: MetricValue(0.5, "init", now_iso()))
- stress: MetricValue = field(default_factory=lambda: MetricValue(0.2, "init", now_iso()))
- energy: MetricValue = field(default_factory=lambda: MetricValue(0.6, "init", now_iso()))
- current_mode: str = "idle"
- last_intent: Optional[str] = None
- avatar_state: str = "idle"
- emotion_tag: str = "calm"
- intensity: float = 0.5
- audio_state: AudioState = field(default_factory=AudioState)
- overrides: OverridesState = field(default_factory=OverridesState)
- workspace_state: WorkspaceState = field(default_factory=WorkspaceState)
- visual_settings: VisualSettings = field(default_factory=VisualSettings)
- language_code: str = "en-IN"
-
- def update_metric(self, key: str, value: float, source: str) -> None:
- metric = MetricValue(value=value, source=source, timestamp=now_iso())
- if key == "focus":
- self.focus = metric
- elif key == "stress":
- self.stress = metric
- elif key == "energy":
- self.energy = metric
diff --git a/core/self_awareness/__init__.py b/core/self_awareness/__init__.py
deleted file mode 100644
index a415e13..0000000
--- a/core/self_awareness/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .introspector import build_self_explain_text
diff --git a/core/self_awareness/introspector.py b/core/self_awareness/introspector.py
deleted file mode 100644
index e9e1752..0000000
--- a/core/self_awareness/introspector.py
+++ /dev/null
@@ -1,102 +0,0 @@
-from __future__ import annotations
-
-import os
-from pathlib import Path
-from typing import Dict, List
-
-
-def _repo_root() -> Path:
- """
- Try common layout: run from repo root; else climb from this file.
- Prefer ETHEREA_CORE.md as canonical, but fall back to README.md.
- """
- here = Path(__file__).resolve()
- for up in [here.parent, here.parent.parent, here.parent.parent.parent]:
- if (up / "core").exists() and (
- (up / "ETHEREA_CORE.md").exists() or (up / "README.md").exists()
- ):
- return up
- return Path(os.getcwd()).resolve()
-
-
-def _count_files(root: Path) -> Dict[str, int]:
- exts: Dict[str, int] = {
- ".py": 0,
- ".md": 0,
- ".png": 0,
- ".json": 0,
- ".yml": 0,
- ".yaml": 0,
- ".txt": 0,
- }
- total = 0
- for p in root.rglob("*"):
- if p.is_file():
- total += 1
- exts[p.suffix.lower()] = exts.get(p.suffix.lower(), 0) + 1
- exts["TOTAL"] = total
- return exts
-
-
-def build_self_explain_text() -> str:
- """
- Professor-friendly self-explain response.
- No code dumping; describes architecture, how it was built, and how to verify outputs.
- """
- root = _repo_root()
- counts = _count_files(root)
-
- key = [
- ("UI Shell", "core/ui/main_window_v2.py (Avatar + Aurora + Console + Command Pipeline)"),
- ("Avatar", "core/ui/avatar_heroine_widget.py (emotion-driven heroine renderer)"),
- ("Aurora", "core/ui/aurora_canvas_widget.py + core/aurora_pipeline.py"),
- ("Workspace", "core/workspace_manager.py + core/workspace_ai/* (routing + profiles)"),
- ("Voice", "core/voice_engine.py + core/voice_adapters.py (STT + Edge TTS)"),
- ("Signals", "core/signals.py (Qt signal hub for EI/voice/mode/focus)"),
- ]
-
- lines: List[str] = []
- lines.append("Etherea — Self Explanation (built-in)")
- lines.append(f"Repo root: {root}")
- lines.append("")
-
- lines.append("1) What I am")
- lines.append("- A desktop-first living OS prototype: UI + avatar + workspace tooling + voice.")
- lines.append("- My goal is to adapt the interface to human intent and emotional state (EI).")
- lines.append("")
-
- lines.append("2) How I am built (modules)")
- for title, desc in key:
- lines.append(f"- {title}: {desc}")
- lines.append("")
-
- lines.append("3) How I make decisions")
- lines.append("- Commands (typed or voice) go into ONE pipeline: route → action → workspace/UI change.")
- lines.append("- Mode changes update avatar persona (calm/focused/strict/soothing) and UI layout.")
- lines.append("")
-
- lines.append("4) How to verify (demo proofs)")
- lines.append("- Say: 'study mode' / 'coding mode' / 'exam mode' / 'calm mode' and observe layout changes.")
- lines.append("- Say: 'focus 25' and watch focus timer state + avatar enter deep_work persona.")
- lines.append("- Say: 'explain yourself' to see this overview.")
- lines.append("")
-
- # Canonical doc (preferred)
- core_md = root / "ETHEREA_CORE.md"
- if core_md.exists():
- try:
- doc = core_md.read_text(encoding="utf-8", errors="ignore").strip()
- if doc:
- lines.append("— Canonical Build Doc (ETHEREA_CORE.md) —")
- lines.append(doc)
- lines.append("")
- except Exception:
- pass
-
- lines.append("5) Build inventory (quick)")
- lines.append(
- f"- Total files: {counts.get('TOTAL', 0)} | Python: {counts.get('.py', 0)} | "
- f"Markdown: {counts.get('.md', 0)} | Assets(PNG): {counts.get('.png', 0)}"
- )
-
- return "\n".join(lines)
diff --git a/core/senses.py b/core/senses.py
deleted file mode 100644
index 21355a3..0000000
--- a/core/senses.py
+++ /dev/null
@@ -1,125 +0,0 @@
-import threading
-import time
-try:
- from PySide6.QtCore import QObject, Signal
-except Exception:
- class QObject: # minimal stub
- pass
- def Signal(*a, **k):
- return None
-try:
- from pynput import mouse, keyboard
-except ImportError:
- mouse = None
- keyboard = None
-
-class InputSenses(QObject):
- """
- Senses user activity (Mouse/Keyboard) to determine 'Focus' or 'Idle' states.
- Privacy First: Counts events only. Does NOT log keys or positions.
- """
- activity_level_changed = Signal(float) # 0.0 (idle) to 1.0 (intense)
- pattern_detected = Signal(dict) # {"hesitation": bool, "repetition": bool, "late_night": bool}
-
- def __init__(self):
- super().__init__()
- self._apm = 0
- self._last_event_time = time.time()
- self._running = False
-
- # Counters for current second
- self._events_this_frame = 0
- self._event_history = [] # Last 10 event types
-
- if not mouse or not keyboard:
- print("[InputSenses] pynput not installed. Sensing disabled.")
- return
-
- self._mouse_listener = mouse.Listener(
- on_move=self._on_move,
- on_click=self._on_click,
- on_scroll=self._on_scroll
- )
- self._key_listener = keyboard.Listener(
- on_press=self._on_press
- )
-
- self._monitor_thread = threading.Thread(target=self._monitor_loop, daemon=True)
-
- def start(self):
- if not mouse: return
- self._running = True
- self._mouse_listener.start()
- self._key_listener.start()
- self._monitor_thread.start()
-
- def stop(self):
- self._running = False
- if mouse:
- self._mouse_listener.stop()
- self._key_listener.stop()
-
- def _record_event(self, type_name: str, intensity: float):
- self._events_this_frame += intensity
- self._last_event_time = time.time()
- self._event_history.append(type_name)
- if len(self._event_history) > 10:
- self._event_history.pop(0)
-
- def _on_move(self, x, y):
- self._record_event("move", 0.5)
-
- def _on_click(self, x, y, button, pressed):
- if pressed: self._record_event("click", 2.0)
-
- def _on_scroll(self, x, y, dx, dy):
- self._record_event("scroll", 1.0)
-
- def _on_press(self, key):
- type_name = "key"
- try:
- if key == keyboard.Key.backspace or key == keyboard.Key.delete:
- type_name = "delete"
- except: pass
- self._record_event(type_name, 1.5)
-
- def _monitor_loop(self):
- """Calculates 'Energy' and 'Patterns' as per Etherea Manifest."""
- while self._running:
- time.sleep(1.0)
- now = time.time()
-
- # 1. Energy Calculation
- current_energy = min(1.0, self._events_this_frame / 20.0)
- self._events_this_frame = 0
-
- # 2. Pattern: Hesitation (Idle)
- idle_time = now - self._last_event_time
- hesitation = 5.0 < idle_time < 300.0
-
- # 3. Pattern: Repetition
- repetition = False
- if len(self._event_history) >= 5:
- last_5 = self._event_history[-5:]
- repetition = all(x == last_5[0] for x in last_5)
-
- # 4. Pattern: Deletions (Micro-Hesitation)
- deletions = self._event_history.count("delete")
-
- # 5. Pattern: Late Night
- from datetime import datetime
- hour = datetime.now().hour
- late_night = (hour >= 22 or hour < 5)
-
- # Update signals
- self.activity_level_changed.emit(current_energy)
- self.pattern_detected.emit({
- "hesitation": hesitation,
- "repetition": repetition,
- "deletions": deletions,
- "late_night": late_night
- })
-
- # Clean history gradually
- if len(self._event_history) > 20:
- self._event_history = self._event_history[-10:]
diff --git a/core/signals.py b/core/signals.py
deleted file mode 100644
index b08ed83..0000000
--- a/core/signals.py
+++ /dev/null
@@ -1,52 +0,0 @@
-from __future__ import annotations
-
-try:
- from PySide6.QtCore import QObject, Signal
-except Exception:
- class QObject: # minimal stub
- pass
- def Signal(*a, **k):
- return None
-
-
-class EISignals(QObject):
- """
- Central Qt signal hub.
-
- Backwards compatible:
- - keeps existing signals used across the repo
- - adds voice + focus + mode + TTS lifecycle signals for the final demo pipeline
- """
-
- # --- existing (keep) ---
- input_activity = Signal(str, object) # allow dict payloads
- emotion_updated = Signal(dict)
- visual_action_triggered = Signal(str)
- proactive_trigger = Signal(str)
- command_received = Signal(str)
- avatar_state_change = Signal(str)
- system_log = Signal(str)
- command_received = Signal(str) # legacy: generic command text
- avatar_state_change = Signal(str)
- pattern_detected = Signal(dict)
- system_log = Signal(str)
-
- # --- new: voice / command pipeline ---
- command_received_ex = Signal(str, dict) # (text, meta) meta: {"source": "...", ...}
- voice_transcript = Signal(str, dict) # (text, meta)
- voice_state = Signal(str, dict) # LISTENING / THINKING / IDLE / ERROR
-
- # --- new: workspace modes / focus timer ---
- mode_changed = Signal(str, dict) # (mode, meta)
- focus_started = Signal(int, dict) # (minutes, meta)
- focus_stopped = Signal(dict) # meta
- focus_tick = Signal(int, dict) # (seconds_left, meta)
-
- # --- new: TTS lifecycle (for UI + avatar mouth) ---
- tts_requested = Signal(str, dict) # (text, meta)
- tts_started = Signal(str, dict) # (text, meta)
- tts_finished = Signal(str, dict) # (text, meta)
- tts_failed = Signal(str, dict) # (text, meta)
-
-
-signals = EISignals()
diff --git a/core/state.py b/core/state.py
deleted file mode 100644
index e3f436c..0000000
--- a/core/state.py
+++ /dev/null
@@ -1,95 +0,0 @@
-import time
-try:
- from PySide6.QtCore import QObject, Signal
-except Exception:
- class QObject: # minimal stub
- pass
- def Signal(*a, **k):
- return None
-
-class AppState(QObject):
- """
- Single source of truth for the application state.
- Manages 'mode' (idle/focus/break) and EI metrics (focus/stress/energy/curiosity).
- """
- mode_changed = Signal(str) # mode
- ei_updated = Signal(dict) # full vector
-
- _instance = None
-
- def __init__(self):
- super().__init__()
- self._mode = "idle"
- self._ei = {
- "focus": 0.5,
- "stress": 0.2,
- "energy": 0.5,
- "curiosity": 0.5
- }
- self._expressive_mode = "idle"
- self._updated_at = time.time()
-
- @property
- def expressive_mode(self) -> str:
- return self._expressive_mode
-
- def set_expressive_mode(self, mode: str):
- if self._expressive_mode != mode:
- self._expressive_mode = mode
- self.ei_updated.emit(self._ei) # Trigger UI update
-
- @classmethod
- def instance(cls):
- if cls._instance is None:
- cls._instance = AppState()
- return cls._instance
-
- @property
- def mode(self) -> str:
- return self._mode
-
- @property
- def ei(self) -> dict:
- return self._ei.copy()
-
- @property
- def stress(self) -> float:
- return self._ei.get("stress", 0.5)
-
- @property
- def energy(self) -> float:
- return self._ei.get("energy", 0.5)
-
- @property
- def focus(self) -> float:
- return self._ei.get("focus", 0.5)
-
- def set_mode(self, mode: str, reason: str = ""):
- valid_modes = {"idle", "focus", "break"}
- mode = mode.lower().strip()
- if mode not in valid_modes:
- print(f"⚠️ Invalid mode requested: {mode}")
- return
-
- if self._mode != mode:
- self._mode = mode
- self._updated_at = time.time()
- self.mode_changed.emit(mode)
- print(f"State Mode -> {mode} ({reason})")
-
- def update_ei(self, **metrics):
- """
- Update one or more EI metrics. Values clamped 0.0 to 1.0.
- Example: state.update_ei(focus=0.8, stress=0.1)
- """
- changed = False
- for k, v in metrics.items():
- if k in self._ei:
- # clamp
- val = max(0.0, min(1.0, float(v)))
- if abs(self._ei[k] - val) > 0.001:
- self._ei[k] = val
- changed = True
-
- if changed:
- self.ei_updated.emit(self._ei)
diff --git a/core/system_tools.py b/core/system_tools.py
deleted file mode 100644
index dbacbc9..0000000
--- a/core/system_tools.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import subprocess
-import os
-import sys
-
-
-class SystemTools:
- @staticmethod
- def execute(command: str) -> str:
- """
- Executes a shell command and returns output.
- Basic safety checks included to prevent destructive commands.
- """
- if not command:
- return ""
-
- # Safety blocks to prevent destructive commands
- unsafe_keywords = ["rm -rf", "format", "del /s", "rd /s"]
- if any(keyword in command.lower() for keyword in unsafe_keywords):
- return "Safety Protocol: Command blocked due to destructive potential."
-
- try:
- result = subprocess.run(
- command,
- shell=True,
- capture_output=True,
- text=True,
- timeout=5
- )
- output = result.stdout or ""
- if result.stderr:
- output += f"\nErrors: {result.stderr}"
- return output.strip()
- except subprocess.TimeoutExpired:
- return "Execution Error: Command timed out."
- except Exception as e:
- return f"Execution Error: {e}"
-
- @staticmethod
- def open_file_explorer(path: str = ".") -> str:
- """
- Opens the system file explorer at the given path.
- Works on Windows, macOS, and Linux.
- """
- abs_path = os.path.abspath(path)
-
- try:
- if sys.platform.startswith("win"):
- os.startfile(abs_path)
- elif sys.platform.startswith("darwin"):
- subprocess.run(["open", abs_path])
- else: # Linux and others
- subprocess.run(["xdg-open", abs_path])
- return f"Opened {abs_path}"
- except Exception as e:
- return f"Failed to open path '{abs_path}': {e}"
diff --git a/core/tools/router.py b/core/tools/router.py
deleted file mode 100644
index 44f1cf1..0000000
--- a/core/tools/router.py
+++ /dev/null
@@ -1,188 +0,0 @@
-import os
-import subprocess
-import threading
-import json
-import time
-from typing import List, Dict, Optional
-try:
- from PySide6.QtCore import QObject, Signal
-except Exception:
- class QObject: # minimal stub
- pass
- def Signal(*a, **k):
- return None
-
-class ToolRouter(QObject):
- """
- UPGRADED SPINE (v1.0 Contract)
- Safety-first, Agent-ready tool interface.
- """
- command_completed = Signal(dict)
- file_updated = Signal(str)
-
- # Safety Policy
- COMMAND_ALLOWLIST = ["python", "pip", "pytest", "dir", "ls", "mkdir", "echo", "git", "type", "cat"]
- COMMAND_DENYLIST = ["rm -rf /", "format", "del /s", "shred", "mkfs"]
-
- _instance = None
-
- def __init__(self):
- super().__init__()
- self.root_dir = os.getcwd()
- self.log_path = os.path.join(self.root_dir, "logs", "tool_calls.jsonl")
- os.makedirs(os.path.dirname(self.log_path), exist_ok=True)
-
- @classmethod
- def instance(cls):
- if cls._instance is None:
- cls._instance = ToolRouter()
- return cls._instance
-
- def _log_call(self, tool_name: str, args: dict, result: dict):
- """Append tool execution to the audit log."""
- log_entry = {
- "timestamp": time.time(),
- "tool": tool_name,
- "args": args,
- "result": {
- "success": result.get("success", False),
- "exit_code": result.get("exit_code"),
- "error": result.get("error")
- }
- }
- try:
- with open(self.log_path, "a", encoding="utf-8") as f:
- f.write(json.dumps(log_entry) + "\n")
- except: pass
-
- def _is_safe_path(self, path: str) -> bool:
- """Enforce CWD sandbox."""
- full_path = os.path.abspath(os.path.join(self.root_dir, path))
- return full_path.startswith(self.root_dir)
-
- def list_dir(self, rel_path: str = ".", depth: int = 2) -> dict:
- """LIST(dir, depth): Recursive listing with safety check."""
- if not self._is_safe_path(rel_path):
- return {"success": False, "error": "Access Denied: Path outside sandbox."}
-
- try:
- results = []
- full_path = os.path.join(self.root_dir, rel_path)
- for root, dirs, files in os.walk(full_path):
- curr_rel = os.path.relpath(root, self.root_dir)
- if curr_rel.count(os.sep) >= depth:
- continue
- for f in files:
- results.append(os.path.join(curr_rel, f))
-
- res = {"success": True, "files": results}
- self._log_call("LIST", {"path": rel_path}, res)
- return res
- except Exception as e:
- return {"success": False, "error": str(e)}
-
- def read_file(self, rel_path: str) -> dict:
- """READ(path): Robust reading within sandbox."""
- if not self._is_safe_path(rel_path):
- return {"success": False, "error": "Access Denied."}
-
- try:
- full_path = os.path.join(self.root_dir, rel_path)
- with open(full_path, "r", encoding="utf-8") as f:
- content = f.read()
- res = {"success": True, "content": content}
- self._log_call("READ", {"path": rel_path}, res)
- return res
- except Exception as e:
- return {"success": False, "error": str(e)}
-
- def write_file(self, rel_path: str, content: str, mode: str = "replace") -> dict:
- """WRITE(path, content, mode): Replaces or patches file."""
- if not self._is_safe_path(rel_path):
- return {"success": False, "error": "Access Denied."}
-
- try:
- full_path = os.path.join(self.root_dir, rel_path)
- os.makedirs(os.path.dirname(full_path), exist_ok=True)
-
- if mode == "patch":
- # Multi-line append/replace (Mock for now, real agents use diff/match)
- with open(full_path, "a", encoding="utf-8") as f:
- f.write("\n" + content)
- else:
- with open(full_path, "w", encoding="utf-8") as f:
- f.write(content)
-
- res = {"success": True, "path": rel_path}
- self._log_call("WRITE", {"path": rel_path, "mode": mode}, res)
- self.file_updated.emit(rel_path)
- return res
- except Exception as e:
- return {"success": False, "error": str(e)}
-
- def run_command(self, cmd_string: str):
- """RUN(cmd): Secure shell execution with allowlist."""
- # Safety Check
- base_cmd = cmd_string.split(" ")[0].lower()
- if base_cmd not in self.COMMAND_ALLOWLIST:
- self.command_completed.emit({"success": False, "error": f"Command '{base_cmd}' is NOT in allowlist."})
- return
-
- for forbidden in self.COMMAND_DENYLIST:
- if forbidden in cmd_string.lower():
- self.command_completed.emit({"success": False, "error": f"Security Risk: Forbidden pattern detected."})
- return
-
- thread = threading.Thread(target=self._exec_cmd, args=(cmd_string,), daemon=True)
- thread.start()
-
- def _exec_cmd(self, cmd: str):
- try:
- process = subprocess.Popen(
- cmd, shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- text=True,
- cwd=self.root_dir
- )
- stdout, stderr = process.communicate(timeout=30)
-
- res = {
- "command": cmd,
- "stdout": stdout,
- "stderr": stderr,
- "exit_code": process.returncode,
- "success": process.returncode == 0
- }
- self._log_call("RUN", {"cmd": cmd}, res)
- self.command_completed.emit(res)
- except Exception as e:
- self.command_completed.emit({"success": False, "error": str(e)})
-
- def search_files(self, pattern: str, rel_path: str = ".") -> dict:
- """SEARCH(text, path): Grep-like search within sandbox."""
- if not self._is_safe_path(rel_path):
- return {"success": False, "error": "Access Denied."}
-
- try:
- matches = []
- full_path = os.path.join(self.root_dir, rel_path)
- for root, _, files in os.walk(full_path):
- for f in files:
- fp = os.path.join(root, f)
- try:
- with open(fp, "r", encoding="utf-8", errors="ignore") as file:
- for i, line in enumerate(file, 1):
- if pattern in line:
- matches.append({
- "file": os.path.relpath(fp, self.root_dir),
- "line": i,
- "content": line.strip()
- })
- except: continue
-
- res = {"success": True, "matches": matches[:50]} # Cap at 50
- self._log_call("SEARCH", {"pattern": pattern}, res)
- return res
- except Exception as e:
- return {"success": False, "error": str(e)}
diff --git a/core/tools/verifier.py b/core/tools/verifier.py
deleted file mode 100644
index d459a2a..0000000
--- a/core/tools/verifier.py
+++ /dev/null
@@ -1,32 +0,0 @@
-import os
-import subprocess
-from typing import Dict
-
-class Verifier:
- """
- Hands for Etherea.
- Verifies project health after changes.
- """
- @staticmethod
- def verify_syntax(root_dir: str) -> Dict:
- """Run python compileall to check for syntax errors."""
- try:
- cmd = ["python", "-m", "compileall", root_dir]
- process = subprocess.Popen(
- cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
- )
- stdout, stderr = process.communicate(timeout=10)
- success = process.returncode == 0
- return {
- "success": success,
- "output": stdout,
- "error": stderr
- }
- except Exception as e:
- return {"success": False, "error": str(e)}
-
- @staticmethod
- def verify_app_launch(root_dir: str) -> Dict:
- """Check if the app can at least start without immediate crash (Mock/Stub for now)."""
- # In a real desktop app, we might use a headless check or specialized test
- return {"success": True, "message": "Smoke test passed (Placeholder)"}
diff --git a/core/tutorial_flow.py b/core/tutorial_flow.py
deleted file mode 100644
index 3b35e3b..0000000
--- a/core/tutorial_flow.py
+++ /dev/null
@@ -1,124 +0,0 @@
-# core/tutorial_flow.py
-"""
-Tutorial Flow for Etherea
-- Orchestrates step-by-step sequences or dynamic knowledge injection
-- Integrates AvatarEngine, MDLoader, and MemoryStore
-"""
-
-from core.avatar_engine import AvatarEngine
-from core.memory_store import MemoryStore
-from core.md_loader import MDLoader
-from core.utils import debug_print
-
-
-class TutorialFlow:
- def __init__(self, docs_folder="docs", avatar_password=None):
- """
- Initialize TutorialFlow with:
- - MDLoader for docs_folder
- - AvatarEngine for AI responses
- - MemoryStore for persistent state
- """
- # Core systems
- self.memory = MemoryStore() # MemoryStore uses global SQLite by default
- self.avatar = AvatarEngine(key_password=avatar_password)
- self.docs_loader = MDLoader(docs_folder)
- # { "topic/file.md": "content..." }
- self.docs = self.docs_loader.load_all()
-
- # Tutorial steps
- self.steps = []
- self.current_step = 0
-
- # Load or initialize tutorial state
- tutorial_state = self.memory.get_all_preferences().get("tutorial_state")
- if tutorial_state:
- try:
- self.tutorial_state = tutorial_state
- self.current_step = self.tutorial_state.get("current_step", 0)
- except Exception:
- self.tutorial_state = {"current_step": 0}
- self.current_step = 0
- else:
- self.tutorial_state = {"current_step": 0}
-
- # --- Step Management ---
-
- def add_step(self, step_fn):
- """Add a function representing a tutorial step"""
- self.steps.append(step_fn)
-
- def next_step(self):
- """Execute next step and update memory"""
- if self.current_step < len(self.steps):
- try:
- self.steps[self.current_step]()
- except Exception as e:
- debug_print("TutorialFlow", f"Step execution error: {e}")
- self.current_step += 1
- self._save_state()
- else:
- debug_print("TutorialFlow", "Tutorial finished!")
-
- def reset(self):
- """Reset tutorial flow"""
- self.current_step = 0
- self._save_state()
-
- def _save_state(self):
- """Save tutorial state to memory"""
- self.memory.update_preference(
- "tutorial_state", {"current_step": self.current_step})
-
- # --- Dynamic Data Injection ---
-
- def ask_bot(self, user_text: str) -> str:
- """
- Sends user input to AvatarEngine (Etherea AI)
- Optionally prepends docs knowledge from MDLoader
- """
- # Combine loaded docs into one context string
- docs_context = "\n".join(self.docs.values())
- prompt = f"Use the following Etherea docs to respond:\n{docs_context}\n\nUser: {user_text}"
-
- # Generate response from AvatarEngine
- try:
- response = self.avatar.speak(prompt)
- except Exception as e:
- debug_print("TutorialFlow", f"AvatarEngine error: {e}")
- response = "Etherea encountered an internal error."
-
- # Update memory with last interaction
- try:
- interactions = self.memory.get_all_preferences().get("interactions", [])
- interactions.append({"user": user_text, "bot": response})
- self.memory.update_preference("interactions", interactions)
- except Exception as e:
- debug_print("TutorialFlow", f"Memory update error: {e}")
-
- return response
-
- # --- Utility Functions ---
-
- def list_docs(self):
- """Return list of loaded docs"""
- return list(self.docs.keys())
-
- def get_doc(self, doc_key: str) -> str:
- """Return content of a specific doc"""
- return self.docs.get(doc_key, "")
-
- def check_triggers(self, context: dict) -> str:
- """
- Evaluate rules for showing pop-ups.
- Returns a tutorial message or None.
- """
- # Rule 1: First launch
- if self.current_step == 0 and context.get("startup", False):
- return "Welcome to Etherea. I am your operating system. How may I assist you today?"
-
- # Rule 2: High stress
- if context.get("stress", 0) > 0.8:
- return "I notice signs of stress. Would you like to enable Focus Mode?"
-
- return None
diff --git a/core/ui/__init__.py b/core/ui/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/core/ui/aurora_canvas_widget.py b/core/ui/aurora_canvas_widget.py
deleted file mode 100644
index 4195620..0000000
--- a/core/ui/aurora_canvas_widget.py
+++ /dev/null
@@ -1,430 +0,0 @@
-from __future__ import annotations
-
-from PySide6.QtCore import Qt, Signal
-from PySide6.QtGui import QColor, QLinearGradient, QPainter, QPen, QBrush
-from PySide6.QtWidgets import (
- QFrame,
- QGraphicsOpacityEffect,
- QLabel,
- QPushButton,
- QVBoxLayout,
- QWidget,
-)
-
-from core.aurora_state import AuroraCanvasState
-
-
-class AuroraCanvasWidget(QWidget):
- intent_requested = Signal(str)
-
- def __init__(self, parent: QWidget | None = None) -> None:
- super().__init__(parent)
- self._state: AuroraCanvasState | None = None
- self.setMinimumHeight(220)
- self.setAutoFillBackground(False)
-
- self._layout = QVBoxLayout(self)
- self._layout.setContentsMargins(18, 18, 18, 18)
- self._layout.setSpacing(10)
-
- self.header = QLabel("Aurora Canvas")
- self.header.setStyleSheet("font-size: 16px; font-weight: 700; color: #eaf0ff;")
-
- self.mode_label = QLabel("Mode: idle")
- self.workspace_label = QLabel("Workspace: --")
- self.session_label = QLabel("Session: --")
- self.attention_label = QLabel("Attention: --")
- self.last_saved_label = QLabel("Last saved: --")
- self.ei_label = QLabel("EI: --")
-
- for label in (
- self.mode_label,
- self.workspace_label,
- self.session_label,
- self.attention_label,
- self.last_saved_label,
- self.ei_label,
- ):
- label.setStyleSheet("color: #cfd6ff; font-size: 12px;")
-
- self.status_frame = QFrame()
- self.status_frame.setStyleSheet(
- "QFrame { background: rgba(18, 20, 32, 0.85); border: 1px solid #1f253d; border-radius: 12px; }"
- )
- status_layout = QVBoxLayout(self.status_frame)
- status_layout.setContentsMargins(12, 12, 12, 12)
- status_layout.addWidget(self.mode_label)
- status_layout.addWidget(self.workspace_label)
- status_layout.addWidget(self.session_label)
- status_layout.addWidget(self.attention_label)
- status_layout.addWidget(self.last_saved_label)
- status_layout.addWidget(self.ei_label)
-
- self.actions_frame = QFrame()
- self.actions_frame.setStyleSheet(
- "QFrame { background: rgba(12, 14, 24, 0.9); border: 1px solid #1f253d; border-radius: 12px; }"
- )
- self.actions_layout = QVBoxLayout(self.actions_frame)
- self.actions_layout.setContentsMargins(10, 10, 10, 10)
- self.actions_layout.setSpacing(6)
-
- self.overlay_label = QLabel("")
- self.overlay_label.setAlignment(Qt.AlignCenter)
- self.overlay_label.setStyleSheet(
- "color: rgba(255, 220, 180, 0.9); font-weight: 700; font-size: 13px;"
- )
-
- self.warning_label = QLabel("")
- self.warning_label.setAlignment(Qt.AlignCenter)
- self.warning_label.setStyleSheet(
- "color: rgba(255, 120, 120, 0.9); font-weight: 700; font-size: 12px;"
- )
-
- self.actions_opacity = QGraphicsOpacityEffect(self.actions_frame)
- self.actions_frame.setGraphicsEffect(self.actions_opacity)
- self.status_opacity = QGraphicsOpacityEffect(self.status_frame)
- self.status_frame.setGraphicsEffect(self.status_opacity)
-
- self._layout.addWidget(self.header)
- self._layout.addWidget(self.status_frame)
- self._layout.addWidget(self.actions_frame)
- self._layout.addWidget(self.overlay_label)
- self._layout.addWidget(self.warning_label)
-
- def apply_state(self, state: AuroraCanvasState) -> None:
- self._state = state
- self.mode_label.setText(f"Mode: {state.current_mode} | Layout: {state.layout_density}")
- workspace_text = state.workspace_name or "--"
- self.workspace_label.setText(f"Workspace: {workspace_text}")
- session_text = "active" if state.session_active else "inactive"
- self.session_label.setText(f"Session: {session_text}")
- self.attention_label.setText(f"Attention: {state.attention_level}")
- last_saved = state.last_saved or "--"
- self.last_saved_label.setText(f"Last saved: {last_saved}")
- self.ei_label.setText(
- f"EI: focus {state.focus:.2f} | stress {state.stress:.2f} | energy {state.energy:.2f}"
- )
-
- self._layout.setSpacing(state.spacing)
- self.status_opacity.setOpacity(state.nonessential_opacity)
- self.actions_opacity.setOpacity(state.nonessential_opacity)
-
- self.status_frame.setVisible(state.panel_visibility.get("status", True))
- self.actions_frame.setVisible(state.panel_visibility.get("actions", True))
- self.overlay_label.setText(state.overlay_text)
- self.warning_label.setText(state.warning_text)
-
- self._render_actions(state)
- self.update()
-
- def _render_actions(self, state: AuroraCanvasState) -> None:
- while self.actions_layout.count():
- item = self.actions_layout.takeAt(0)
- widget = item.widget()
- if widget:
- widget.setParent(None)
-
- for action in state.suggested_actions:
- btn = QPushButton(action.label)
- btn.setEnabled(action.enabled)
- btn.setStyleSheet(
- "QPushButton { background: #1a1f34; border: 1px solid #2a3353; color: #e6ecff; padding: 6px; border-radius: 6px; }"
- "QPushButton:disabled { color: #6a6f88; background: #141725; }"
- )
- btn.clicked.connect(lambda _, action_id=action.action_id: self.intent_requested.emit(action_id))
- self.actions_layout.addWidget(btn)
-
- if not state.suggested_actions:
- empty = QLabel("No actions available")
- empty.setStyleSheet("color: #6f7aa8; font-size: 11px;")
- self.actions_layout.addWidget(empty)
-import math
-import random
-from PySide6.QtCore import Qt, QTimer, QPoint, QPointF, Signal
-from PySide6.QtGui import QColor, QLinearGradient, QPainter, QPen, QBrush
-from PySide6.QtWidgets import QWidget, QVBoxLayout, QLabel
-
-from core.state import AppState
-
-class AuroraCanvasWidget(QWidget):
- """
- Primary living surface.
- - Centered Aurora Ring.
- - Responsive to AppState (mode/EI).
- - Smooth pulse animation.
- """
- # Legacy signal for backward compatibility with main_window_v2
- intent_requested = Signal(str)
-
- # Theme Definitions (Top, Bot, Grid, Particle)
- THEMES = {
- "focus": {
- "top": QColor(5, 8, 15), "bot": QColor(2, 2, 5),
- "grid": QColor(0, 240, 255, 40), "part": QColor(100, 220, 255, 180)
- },
- "study": {
- "top": QColor(10, 15, 40), "bot": QColor(5, 5, 15), # Deep Blue
- "grid": QColor(80, 100, 255, 40), "part": QColor(150, 180, 255, 180)
- },
- "creative": {
- "top": QColor(25, 5, 20), "bot": QColor(10, 0, 5), # Velvet/Purple
- "grid": QColor(255, 0, 150, 40), "part": QColor(255, 100, 200, 180)
- },
- "night": {
- "top": QColor(5, 5, 5), "bot": QColor(0, 0, 0), # Monochrome
- "grid": QColor(100, 100, 100, 30), "part": QColor(150, 150, 150, 100)
- },
- "idle": {
- "top": QColor(8, 10, 14), "bot": QColor(3, 4, 6),
- "grid": QColor(100, 130, 160, 20), "part": QColor(255, 255, 255, 100)
- },
- "break": { # Added for completeness, though not explicitly in the prompt's THEMES
- "top": QColor(10, 8, 5), "bot": QColor(0, 0, 0),
- "grid": QColor(255, 160, 50, 30), "part": QColor(255, 180, 100, 180)
- }
- }
-
- def __init__(self, parent: QWidget | None = None) -> None:
- super().__init__(parent)
- self.setMinimumHeight(300)
- self.setAttribute(Qt.WidgetAttribute.WA_OpaquePaintEvent, True)
-
- self._state = None
- self._frame = 0 # Animation Loop
-
- # Color State (Start at Idle)
- self.target_key = "idle"
- self.cur_colors = self.THEMES["idle"].copy()
-
- self._timer = QTimer(self)
- self._timer.timeout.connect(self._tick)
- self._timer.start(30) # ~33 FPS
-
- # Subscribe to State
- self.state = AppState.instance()
-
- # Audio Engine
- from core.music import MusicEngine
- self.music = MusicEngine.instance()
- self.spectrum = {'bass': 0.0, 'mid': 0.0, 'high': 0.0}
- self.music.spectrum_updated.connect(self.update_spectrum)
-
- # Visual Params
- self.mode = "idle"
-
- # Overlay Labels (Floating)
- self._layout = QVBoxLayout(self)
- self._layout.setContentsMargins(20, 20, 20, 20)
- self._layout.setAlignment(Qt.AlignTop | Qt.AlignRight)
-
- self.label_mode = QLabel("IDLE")
- self.label_mode.setStyleSheet("color: rgba(255,255,255,0.4); font-weight: bold; font-size: 12px; letter-spacing: 2px;")
- self._layout.addWidget(self.label_mode)
-
- def set_theme(self, name: str):
- """Transition to a new color theme."""
- if name in self.THEMES:
- self.target_key = name
-
- def apply_state(self, state) -> None:
- """Legacy API."""
- self._state = state
-
- def _lerp_color(self, c1: QColor, c2: QColor, t: float) -> QColor:
- r = c1.red() + (c2.red() - c1.red()) * t
- g = c1.green() + (c2.green() - c1.green()) * t
- b = c1.blue() + (c2.blue() - c1.blue()) * t
- a = c1.alpha() + (c2.alpha() - c1.alpha()) * t
- return QColor(int(r), int(g), int(b), int(a))
-
- def _tick(self):
- self._frame += 1
-
- # Sync simple local state with AppState
- current_mode = self.state.mode
- if self.mode != current_mode:
- self.mode = current_mode
- self.label_mode.setText(current_mode.upper())
- # Auto-switch theme based on mode if default
- if current_mode in self.THEMES and self.target_key not in ["study", "creative", "night"]:
- self.target_key = current_mode
-
- # Smooth Color Lerp
- tgt = self.THEMES.get(self.target_key, self.THEMES["idle"])
-
- lf = 0.05
- for k in ["top", "bot", "grid", "part"]:
- self.cur_colors[k] = self._lerp_color(self.cur_colors[k], tgt[k], lf)
-
- self.update()
-
- def update_spectrum(self, spec: dict):
- self.spectrum = spec
-
- def paintEvent(self, event) -> None:
- painter = QPainter(self)
- painter.setRenderHint(QPainter.Antialiasing)
-
- rect = self.rect()
- gradient = QLinearGradient(rect.topLeft(), rect.bottomRight())
- if self._state:
- mood = self._state.theme_profile.get("emotion_tag", "calm")
- if mood in ("alert", "angry", "stressed"):
- gradient.setColorAt(0.0, QColor(42, 14, 20))
- gradient.setColorAt(1.0, QColor(16, 6, 10))
- elif mood in ("joy", "happy"):
- gradient.setColorAt(0.0, QColor(22, 26, 48))
- gradient.setColorAt(1.0, QColor(12, 16, 32))
- else:
- gradient.setColorAt(0.0, QColor(12, 16, 30))
- gradient.setColorAt(1.0, QColor(8, 10, 20))
- else:
- gradient.setColorAt(0.0, QColor(12, 16, 30))
- gradient.setColorAt(1.0, QColor(8, 10, 20))
-
- painter.fillRect(rect, QBrush(gradient))
-
- if self._state:
- ring_color = QColor(90, 180, 255, 180)
- if self._state.current_mode == "focus":
- ring_color = QColor(255, 200, 80, 200)
- elif self._state.current_mode == "break":
- ring_color = QColor(120, 220, 180, 180)
- elif self._state.current_mode == "blocked":
- ring_color = QColor(160, 160, 160, 150)
- elif self._state.current_mode == "error":
- ring_color = QColor(255, 120, 120, 180)
-
- pen = QPen(ring_color, 3)
- painter.setPen(pen)
- center = rect.center()
- base_radius = min(rect.width(), rect.height()) * 0.35
- attention_scale = {
- "low": 0.92,
- "med": 1.0,
- "high": 1.08,
- }.get(self._state.attention_level, 1.0)
- radius = base_radius * attention_scale
- painter.drawEllipse(center, radius, radius)
-
- painter.end()
- w, h = rect.width(), rect.height()
-
- # 1. Background Fill (Interpolated)
- bg_top = self.cur_colors["top"]
- bg_bot = self.cur_colors["bot"]
-
- # Audio Reactivity: Flash background on heavy bass
- bass = self.spectrum.get('bass', 0.0)
- if bass > 0.5:
- # Add some energy to the background
- bg_top = QColor(
- min(255, bg_top.red() + int(bass * 40)),
- min(255, bg_top.green() + int(bass * 20)),
- min(255, bg_top.blue() + int(bass * 60))
- )
-
- grad = QLinearGradient(0, 0, 0, h)
- grad.setColorAt(0.0, bg_top)
- grad.setColorAt(1.0, bg_bot)
- painter.fillRect(rect, grad)
-
- # 2. Perspective Grid (The "Lab" Floor)
- self._draw_grid(painter, w, h)
-
- # 3. Floating Data Particles (Holodeck)
- self._draw_particles(painter, w, h)
-
- painter.end()
-
- def _draw_grid(self, qp: QPainter, w: int, h: int):
- """Perspective grid to give depth."""
- # Theme Color
- if self.mode == "focus":
- grid_c = QColor(0, 240, 255, 40) # High-Tech Cyan
- elif self.mode == "break":
- grid_c = QColor(255, 160, 50, 30) # Warm Amber
- else:
- grid_c = QColor(100, 130, 160, 20) # Professional Slate
-
- # Audio: Grid pulse
- mid = self.spectrum.get('mid', 0.0)
- bass = self.spectrum.get('bass', 0.0)
-
- pulse_alpha = int(grid_c.alpha() * (1.0 + mid * 2.0))
- grid_c.setAlpha(min(255, pulse_alpha))
-
- pen = QPen(grid_c)
- pen.setWidth(1)
- qp.setPen(pen)
-
- # Horizon line (Bounce with bass)
- bounce = bass * 20.0
- horizon_y = h * 0.4 - bounce
-
- # Vertical lines (fan out)
- center_x = w / 2
- for i in range(-10, 11):
- offset = i * (w * 0.15)
- # Line from vanishing point (center_x, horizon_y) to bottom
- qp.drawLine(QPointF(center_x, horizon_y), QPointF(center_x + offset * 3, h))
-
- # Horizontal lines (get denser with distance)
- for i in range(1, 15):
- y_norm = 1.0 - (1.0 / (i * 0.4 + 1.0)) # exponential accumulation
- # map normalized 0..1 to horizon_y..h
- y = h - (y_norm * (h - horizon_y))
- qp.drawLine(QPointF(0, y), QPointF(w, y))
-
- def _draw_particles(self, qp: QPainter, w: int, h: int):
- """Floating data points / sparks."""
- # Initialize particles if needed
- if not hasattr(self, '_particles'):
- self._particles = []
- for _ in range(30):
- self._particles.append({
- 'x': random.random(),
- 'y': random.random(),
- 's': 0.5 + random.random() * 0.5, # speed
- 'z': random.random() # depth
- })
-
- # Physics
- speed_mult = 2.0 if self.mode == "focus" else 0.5
- if self.mode == "break": speed_mult = 0.2
-
- # Audio boost
- high = self.spectrum.get('high', 0.0)
- speed_mult += (high * 10.0) # Fast particles on hi-hats
-
- dt = 0.03 # assume 30fps
-
- particle_col = self.cur_colors["part"]
-
- qp.setPen(Qt.NoPen)
- qp.setBrush(particle_col)
-
- for p in self._particles:
- # Move up
- p['y'] -= p['s'] * dt * speed_mult
- # Reset
- if p['y'] < 0:
- p['y'] = 1.0
- p['x'] = random.random()
-
- # Render
- # transform 0..1 to screen
- px = p['x'] * w
- py = p['y'] * h
- size = 2.0 + p['z'] * 3.0 + (high * 4.0) # Pulse size
-
- # Simple fade at top/bottom
- alpha_mod = 1.0
- if p['y'] < 0.2: alpha_mod = p['y'] * 5.0
- if p['y'] > 0.8: alpha_mod = (1.0 - p['y']) * 5.0
-
- c = QColor(particle_col)
- c.setAlpha(min(255, int(c.alpha() * alpha_mod)))
- qp.setBrush(c)
-
- qp.drawEllipse(QPointF(px, py), size, size)
diff --git a/core/ui/aurora_ui.py b/core/ui/aurora_ui.py
deleted file mode 100644
index 2551d50..0000000
--- a/core/ui/aurora_ui.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import os
-import sys
-from PySide6.QtWidgets import QApplication, QMainWindow, QLabel, QMessageBox
-from PySide6.QtCore import Qt
-from .holograms import NebulaBackground
-from .bubbles import AppBubble
-
-class AuroraWindow(QMainWindow):
- def __init__(self):
- super().__init__()
- self.setWindowTitle("Etherea Aurora v4.0")
- self.setGeometry(100, 100, 1000, 600)
-
- self.nebula = NebulaBackground(self)
- self.setCentralWidget(self.nebula)
-
- self.title = QLabel("E T H E R E A", self.nebula)
- self.title.setGeometry(0, 50, 1000, 100)
- self.title.setAlignment(Qt.AlignCenter)
- self.title.setStyleSheet("font-size: 40px; color: rgba(255,255,255,0.8); font-weight: bold; letter-spacing: 12px;")
-
- self.status = QLabel("SYSTEM ONLINE", self.nebula)
- self.status.setGeometry(0, 500, 1000, 30)
- self.status.setAlignment(Qt.AlignCenter)
- self.status.setStyleSheet("color: rgba(0, 255, 255, 0.7); font-family: Consolas; font-size: 10px; letter-spacing: 3px;")
-
- # --- CLEAN BUBBLES (No Emojis) ---
- # Format: (parent, "NAME", x, y, callback)
- self.btn_term = AppBubble(self.nebula, "Terminal", 250, 250, self.launch_terminal)
- self.btn_files = AppBubble(self.nebula, "Data", 450, 250, self.launch_files)
- self.btn_avatar = AppBubble(self.nebula, "Avatar", 650, 250, self.launch_avatar)
-
- def launch_terminal(self, _):
- self.status.setText("ACCESSING MAINFRAME...")
- if os.name == 'nt': os.system('start cmd')
-
- def launch_files(self, _):
- self.status.setText("OPENING DATA CORE...")
- if os.name == 'nt': os.startfile('.')
-
- def launch_avatar(self, _):
- self.status.setText("AVATAR INTERFACE ACTIVE...")
- msg = QMessageBox()
- msg.setWindowTitle("Avatar")
- msg.setText("Visual interface not loaded. Voice module active. 🎤")
- msg.setStyleSheet("background-color: #0f0f1a; color: white;")
- msg.exec()
-
-if __name__ == "__main__":
- app = QApplication(sys.argv)
- window = AuroraWindow()
- window.show()
- sys.exit(app.exec())
diff --git a/core/ui/avatar_engine/__init__.py b/core/ui/avatar_engine/__init__.py
deleted file mode 100644
index 3049b57..0000000
--- a/core/ui/avatar_engine/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# Avatar Engine package (multi-avatars + dynamic backgrounds)
diff --git a/core/ui/avatar_engine/engine.py b/core/ui/avatar_engine/engine.py
deleted file mode 100644
index 0de7544..0000000
--- a/core/ui/avatar_engine/engine.py
+++ /dev/null
@@ -1,79 +0,0 @@
-from __future__ import annotations
-
-import math
-from typing import Dict
-
-from core.ui.avatar_engine.registry import AVATARS, AvatarSpec
-
-
-def _clamp01(x: float) -> float:
- try:
- if math.isnan(x) or math.isinf(x):
- return 0.5
- return max(0.0, min(1.0, float(x)))
- except Exception:
- return 0.5
-
-
-class AvatarEngine:
- """
- Keeps the avatar state: selected avatar, EI vector, and animation parameters.
- Pure logic (no UI) so it's stable and testable.
- """
-
- def __init__(self):
- self.avatar: AvatarSpec = AVATARS["aurora"]
-
- # Emotion vector from EIEngine
- self.ei: Dict[str, float] = {
- "focus": 0.5,
- "stress": 0.2,
- "energy": 0.6,
- "curiosity": 0.5,
- }
-
- # Visual state derived from EI
- self.glow_intensity: float = 0.7
- self.motion_multiplier: float = 1.0
-
- # smooth transitions
- self._target_key = self.avatar.key
- self._blend = 1.0 # 0..1
- self._blend_speed = 3.0
-
- def set_avatar(self, key: str):
- if key in AVATARS:
- self._target_key = key
- self._blend = 0.0
-
- def update_ei(self, vec: Dict[str, float]):
- for k in self.ei:
- if k in vec:
- self.ei[k] = _clamp01(vec[k])
-
- # Map EI → visuals
- focus = self.ei["focus"]
- stress = self.ei["stress"]
- energy = self.ei["energy"]
- curiosity = self.ei["curiosity"]
-
- # Glow stronger when focused/curious, but unstable when stressed
- base = 0.35 + 0.55 * focus + 0.25 * curiosity
- penalty = 0.30 * stress
- self.glow_intensity = _clamp01(base - penalty)
-
- # Motion increases with energy, decreases with stress
- self.motion_multiplier = _clamp01(0.55 + 0.85 * energy - 0.40 * stress)
-
- def tick(self, dt: float) -> float:
- """
- dt = seconds since last frame
- returns blend value 0..1 (useful for UI transitions)
- """
- # handle crossfade to new avatar
- if self._target_key != self.avatar.key:
- self._blend = min(1.0, self._blend + dt * self._blend_speed)
- if self._blend >= 1.0:
- self.avatar = AVATARS[self._target_key]
- self._blend = 1.0
- return self._blend
diff --git a/core/ui/avatar_engine/registry.py b/core/ui/avatar_engine/registry.py
deleted file mode 100644
index b105ec2..0000000
--- a/core/ui/avatar_engine/registry.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from dataclasses import dataclass
-from typing import Dict, Tuple
-
-
-Color = Tuple[int, int, int]
-
-
-@dataclass(frozen=True)
-class AvatarSpec:
- key: str
- name: str
-
- # Palette
- bg_a: Color
- bg_b: Color
- ring: Color
- core: Color
- particle: Color
-
- # Motion tuning
- drift_speed: float # background drift speed
- pulse_speed: float # glow pulse speed
- particle_rate: float # particles per second
-
-
-AVATARS: Dict[str, AvatarSpec] = {
- "aurora": AvatarSpec(
- key="aurora",
- name="Aurora",
- bg_a=(18, 12, 30),
- bg_b=(10, 18, 40),
- ring=(160, 120, 255),
- core=(255, 210, 120),
- particle=(190, 170, 255),
- drift_speed=0.65,
- pulse_speed=1.10,
- particle_rate=24.0,
- ),
- "ethera": AvatarSpec(
- key="ethera",
- name="Ethera",
- bg_a=(10, 12, 22),
- bg_b=(22, 12, 30),
- ring=(120, 220, 255),
- core=(120, 255, 210),
- particle=(120, 220, 255),
- drift_speed=0.85,
- pulse_speed=1.35,
- particle_rate=30.0,
- ),
- "sentinel": AvatarSpec(
- key="sentinel",
- name="Sentinel",
- bg_a=(8, 8, 12),
- bg_b=(18, 10, 16),
- ring=(255, 120, 160),
- core=(255, 140, 90),
- particle=(255, 120, 160),
- drift_speed=0.55,
- pulse_speed=0.95,
- particle_rate=18.0,
- ),
- }
diff --git a/core/ui/avatar_engine_widget.py b/core/ui/avatar_engine_widget.py
deleted file mode 100644
index c4059de..0000000
--- a/core/ui/avatar_engine_widget.py
+++ /dev/null
@@ -1,213 +0,0 @@
-from __future__ import annotations
-
-import math
-import random
-import time
-from dataclasses import dataclass
-from typing import List, Tuple
-
-from PySide6.QtCore import QTimer, QRectF, Qt
-from PySide6.QtGui import QColor, QPainter, QRadialGradient, QLinearGradient, QPen
-from PySide6.QtWidgets import QWidget
-
-from core.ui.avatar_engine.engine import AvatarEngine
-from core.ui.avatar_engine.registry import AVATARS, AvatarSpec
-
-
-def _qcolor(rgb: Tuple[int, int, int], a: int = 255) -> QColor:
- r, g, b = rgb
- return QColor(int(r), int(g), int(b), int(a))
-
-
-@dataclass
-class Particle:
- x: float
- y: float
- vx: float
- vy: float
- life: float
- size: float
-
-
-class AvatarEngineWidget(QWidget):
- """
- Cinematic 2D avatar scene (fast, stable, Termux-friendly).
- - Dynamic gradient background + drift
- - Particle field
- - Aura ring + core glow pulse
- - Multi-avatar switch (Aurora/Ethera/Sentinel)
- """
-
- def __init__(self, parent=None):
- super().__init__(parent)
-
- self.engine = AvatarEngine()
-
- self._t_last = time.time()
- self._t = 0.0
-
- self._particles: List[Particle] = []
- self._particle_acc = 0.0
-
- self._bg_phase = 0.0
-
- self.setMinimumSize(420, 420)
- self.setAttribute(Qt.WidgetAttribute.WA_OpaquePaintEvent, True)
-
- self._timer = QTimer(self)
- self._timer.timeout.connect(self._on_frame)
- self._timer.start(33) # ~30fps
-
- # ---- Public API (called by UI / signals) ----
- def set_avatar(self, key: str):
- self.engine.set_avatar(key)
-
- def update_ei(self, vec: dict):
- self.engine.update_ei(vec)
-
- # ---- Animation loop ----
- def _on_frame(self):
- now = time.time()
- dt = max(0.001, min(0.050, now - self._t_last))
- self._t_last = now
- self._t += dt
-
- # engine tick (handles switching)
- self.engine.tick(dt)
-
- # background drift
- drift = self.engine.avatar.drift_speed * self.engine.motion_multiplier
- self._bg_phase += dt * drift
-
- # particles
- self._spawn_particles(dt)
- self._update_particles(dt)
-
- self.update()
-
- def _spawn_particles(self, dt: float):
- spec = self.engine.avatar
- rate = spec.particle_rate * self.engine.motion_multiplier
- self._particle_acc += dt * rate
-
- # less particles when stress is high (calmer)
- stress = self.engine.ei.get("stress", 0.2)
- damp = 1.0 - 0.6 * stress
- self._particle_acc *= max(0.25, damp)
-
- while self._particle_acc >= 1.0:
- self._particle_acc -= 1.0
- w = max(1, self.width())
- h = max(1, self.height())
-
- # spawn near edges
- edge = random.randint(0, 3)
- if edge == 0:
- x, y = -10.0, random.uniform(0, h)
- elif edge == 1:
- x, y = w + 10.0, random.uniform(0, h)
- elif edge == 2:
- x, y = random.uniform(0, w), -10.0
- else:
- x, y = random.uniform(0, w), h + 10.0
-
- angle = random.uniform(0, math.tau)
- speed = random.uniform(20.0, 80.0) * (0.6 + 0.8 * self.engine.motion_multiplier)
- vx = math.cos(angle) * speed
- vy = math.sin(angle) * speed
-
- life = random.uniform(1.6, 3.2)
- size = random.uniform(1.0, 2.5)
-
- self._particles.append(Particle(x=x, y=y, vx=vx, vy=vy, life=life, size=size))
-
- # cap
- if len(self._particles) > 180:
- self._particles = self._particles[-180:]
-
- def _update_particles(self, dt: float):
- alive = []
- for p in self._particles:
- p.x += p.vx * dt
- p.y += p.vy * dt
- p.life -= dt
- if p.life > 0:
- alive.append(p)
- self._particles = alive
-
- # ---- Painting ----
- def paintEvent(self, event):
- painter = QPainter(self)
- painter.setRenderHint(QPainter.RenderHint.Antialiasing, True)
-
- rect = self.rect()
-
- spec = self.engine.avatar
- focus = self.engine.ei.get("focus", 0.5)
- stress = self.engine.ei.get("stress", 0.2)
- energy = self.engine.ei.get("energy", 0.6)
-
- # Background gradient with slow drift
- shift = 0.5 + 0.15 * math.sin(self._bg_phase * 1.2)
- grad = QLinearGradient(0, 0, rect.width(), rect.height())
- grad.setColorAt(0.0, _qcolor(spec.bg_a, 255))
- grad.setColorAt(shift, _qcolor(spec.bg_b, 255))
- grad.setColorAt(1.0, _qcolor(spec.bg_a, 255))
- painter.fillRect(rect, grad)
-
- # Subtle vignette
- vign = QRadialGradient(rect.center(), rect.width() * 0.75)
- vign.setColorAt(0.0, QColor(0, 0, 0, 0))
- vign.setColorAt(1.0, QColor(0, 0, 0, 140))
- painter.fillRect(rect, vign)
-
- # Particles
- particle_color = _qcolor(spec.particle, 160)
- for p in self._particles:
- alpha = int(180 * max(0.0, min(1.0, p.life / 2.2)))
- c = QColor(particle_color)
- c.setAlpha(alpha)
- painter.setPen(Qt.PenStyle.NoPen)
- painter.setBrush(c)
- painter.drawEllipse(QRectF(p.x, p.y, p.size, p.size))
-
- # Avatar ring + core glow
- cx = rect.center().x()
- cy = rect.center().y()
-
- base_r = min(rect.width(), rect.height()) * 0.26
- breathing = 1.0 + 0.04 * math.sin(self._t * (1.1 + energy))
- ring_r = base_r * breathing
-
- # Glow pulse (reduced if stress is high)
- pulse_speed = spec.pulse_speed * (0.8 + 0.8 * self.engine.motion_multiplier)
- pulse = 0.55 + 0.45 * math.sin(self._t * pulse_speed * math.tau)
- glow = self.engine.glow_intensity * (0.85 - 0.4 * stress) + 0.15 * pulse
- glow = max(0.15, min(1.0, glow))
-
- # Outer aura glow
- aura = QRadialGradient(cx, cy, ring_r * 2.2)
- aura.setColorAt(0.0, QColor(0, 0, 0, 0))
- aura.setColorAt(0.55, _qcolor(spec.ring, int(90 * glow)))
- aura.setColorAt(1.0, QColor(0, 0, 0, 0))
- painter.fillRect(rect, aura)
-
- # Ring stroke
- ring_pen = QPen(_qcolor(spec.ring, int(210 * (0.55 + 0.45 * focus))))
- ring_pen.setWidthF(max(2.0, ring_r * 0.06))
- painter.setPen(ring_pen)
- painter.setBrush(Qt.BrushStyle.NoBrush)
- painter.drawEllipse(QRectF(cx - ring_r, cy - ring_r, ring_r * 2, ring_r * 2))
-
- # Inner core glow
- core_r = ring_r * (0.50 + 0.08 * math.sin(self._t * 2.0))
- core = QRadialGradient(cx, cy, core_r * 1.8)
- core.setColorAt(0.0, _qcolor(spec.core, int(220 * glow)))
- core.setColorAt(0.4, _qcolor(spec.core, int(120 * glow)))
- core.setColorAt(1.0, QColor(0, 0, 0, 0))
-
- painter.setPen(Qt.PenStyle.NoPen)
- painter.setBrush(core)
- painter.drawEllipse(QRectF(cx - core_r, cy - core_r, core_r * 2, core_r * 2))
-
- painter.end()
diff --git a/core/ui/avatar_heroine_widget.py b/core/ui/avatar_heroine_widget.py
deleted file mode 100644
index 6210706..0000000
--- a/core/ui/avatar_heroine_widget.py
+++ /dev/null
@@ -1,397 +0,0 @@
-from __future__ import annotations
-
-import math
-import time
-from typing import Dict, Tuple, Optional
-
-from PySide6.QtCore import Qt, QTimer, QRectF, QPointF
-from PySide6.QtGui import QPainter, QColor, QPen, QBrush, QFont
-from PySide6.QtWidgets import QWidget
-
-# Optional voice engine hookup (for mouth/viseme)
-try:
- from core.voice_engine import get_voice_engine, VoiceEngine # type: ignore
-except Exception:
- get_voice_engine = None
- VoiceEngine = None
-
-
-def _clamp(x: float, lo: float, hi: float) -> float:
- try:
- return max(lo, min(hi, float(x)))
- except Exception:
- return lo
-
-
-class AvatarHeroineWidget(QWidget):
- """
- A safe, dependency-light heroine widget that ALWAYS renders something:
- - Emotional face (calm/focused/stressed/cheerful)
- - Aurora ring glow + pulse()
- - Mode personas: study/coding/exam/calm/deep_work
- - Voice viseme (mouth motion) if available
-
- This file is designed to be "errorless" in unknown environments.
- """
-
- def __init__(self, parent=None):
- super().__init__(parent)
- self.setMinimumHeight(260)
-
- # EI state
- self.ei: Dict[str, float] = {"focus": 0.55, "stress": 0.20, "energy": 0.70, "curiosity": 0.55}
- self.mode: str = "study"
- self.emotion_tag: str = "calm"
-
- # Visual tuning
- self._theme = "dark" # "dark" or "light"
- self._accent_a: Tuple[int, int, int] = (160, 120, 255) # violet
- self._accent_b: Tuple[int, int, int] = (255, 210, 120) # gold
-
- # Animation
- self._t0 = time.time()
- self._last = self._t0
- self.t = 0.0
-
- # EI
- self.ei = EI().clamp()
- self.emotion_tag = "calm"
-
- # smooth values
- self._glow = 0.65
- self._glow_target = 0.65
-
- self._calmness = 0.70
- self._calm_target = 0.70
-
- self._motion = 0.65
- self._motion_target = 0.65
-
- # gaze / head motion
- self._gaze_x = 0.0
- self._gaze_y = 0.0
- self._gaze_tx = 0.0
- self._gaze_ty = 0.0
-
- self._head_tilt = 0.0
- self._head_tilt_target = 0.0
-
- # blink system
- self._blink_phase = 0.0 # 0..1 where 1=closed
- self._blink_timer = 0.0
- self._blink_next = 2.4 + random.random() * 2.2
-
- # "thinking" mode toggle
- self.thinking = False
-
- # theme
- self.theme_mode = "dark" # "light" or "dark"
- self.accent_a = QColor(160, 120, 255) # violet ring
- self.accent_b = QColor(255, 210, 120) # warm glow
-
- # -----------------------------------------
- # (2) Render backend (painter now, 3D later)
- # -----------------------------------------
- # supported: "painter" (current), "3d" (placeholder)
- self.render_backend = "painter"
-
- # -----------------------------------------
- # (3) Gestures: nod + tilt + acknowledge
- # -----------------------------------------
- self._nod = 0.0
- self._nod_target = 0.0
- self._nod_cooldown = 0.0
-
- self._gesture_tilt = 0.0
- self._gesture_tilt_target = 0.0
- self._gesture_timer = 0.0
- # -----------------------------------------
- # (4) Ring FX (highlight segment + pulse)
- # -----------------------------------------
- self._ring_pulse = 0.0
- self._ring_pulse_target = 0.0
- self._ring_highlight = 0.0
- self._ring_highlight_target = 0.0
- self._ring_segment = 0
-
-
- # -----------------------------------------
- # (1) Expression channels (eyebrows + mouth)
- # -----------------------------------------
- self._brow_raise = 0.0
- self._brow_raise_target = 0.0
-
- self._brow_furrow = 0.0
- self._brow_furrow_target = 0.0
-
- self._smile = 0.0
- self._smile_target = 0.0
-
- # render loop
- self._pulse_amp = 0.0
- self._pulse_until = 0.0
- self._mouth = 0.15 # 0..1
- self._speaking = False
-
- self._timer = QTimer(self)
- self._timer.setInterval(16) # ~60fps
- self._timer.timeout.connect(self.update)
- self._timer.start()
-
- # Voice hooks (optional)
- self._hook_voice()
-
- def set_accent_colors(self, a: Tuple[int, int, int], b: Tuple[int, int, int]) -> None:
- self.accent_a = QColor(int(a[0]), int(a[1]), int(a[2]))
- self.accent_b = QColor(int(b[0]), int(b[1]), int(b[2]))
- self.update()
-
- def set_thinking(self, on: bool) -> None:
- self.thinking = bool(on)
- # subtle acknowledge when thinking toggled (feels alive)
- self.acknowledge()
- self.update()
-
- def set_emotion_tag(self, tag: str) -> None:
- self.emotion_tag = str(tag).strip().lower() or "calm"
-
- def set_render_backend(self, backend: str) -> None:
- """
- "painter" = current 2.5D. (Recommended now)
- "3d" = placeholder. We’ll swap this later with real 3D renderer.
- """
- backend = str(backend).strip().lower()
- self.render_backend = "3d" if backend == "3d" else "painter"
- self.update()
-
- # --- gestures ---
- def nod(self) -> None:
- # quick up-down nod (tiny amplitude, premium)
- if self._nod_cooldown > 0.0:
- return
- self._nod_target = 1.0
- self._nod_cooldown = 0.35
-
- def tilt(self, amount: float) -> None:
- # amount in [-1..1]
- self._gesture_tilt_target = max(-1.0, min(1.0, float(amount)))
- self._gesture_timer = 0.35
-
- def acknowledge(self) -> None:
- # a gentle combined gesture: micro nod + micro tilt
- self.nod()
- self.tilt(0.25 if random.random() > 0.5 else -0.25)
-
- def update_ei(self, vec: dict) -> None:
- # safe EI update
- if not isinstance(vec, dict):
- return
- # -------------------------
- # Public API used by main_window_v2
- # -------------------------
- def update_ei(self, vec: Dict[str, float]) -> None:
- for k in ("focus", "stress", "energy", "curiosity"):
- if k in vec:
- self.ei[k] = _clamp(vec.get(k, self.ei[k]), 0.0, 1.0)
- self._recompute_emotion_tag()
-
- def set_mode_persona(self, mode: str) -> None:
- self.mode = str(mode or "study")
- # Mode is a bias; EI still drives emotion.
- self._recompute_emotion_tag()
-
- def pulse(self, *, intensity: float = 1.2, duration: float = 0.25) -> None:
- self._pulse_amp = max(self._pulse_amp, float(intensity))
- self._pulse_until = time.time() + float(duration)
-
- def set_theme_mode(self, theme: str) -> None:
- theme = (theme or "dark").lower().strip()
- self._theme = "light" if theme.startswith("l") else "dark"
-
- def set_accent_colors(self, a: Tuple[int, int, int], b: Tuple[int, int, int]) -> None:
- self._accent_a = tuple(int(x) for x in a)
- self._accent_b = tuple(int(x) for x in b)
-
- # -------------------------
- # Voice hook
- # -------------------------
- def _hook_voice(self) -> None:
- if get_voice_engine is None and VoiceEngine is None:
- return
- try:
- ve = get_voice_engine() if get_voice_engine else VoiceEngine.instance() # type: ignore
- if hasattr(ve, "viseme_updated"):
- ve.viseme_updated.connect(self._on_viseme)
- if hasattr(ve, "speaking_state"):
- ve.speaking_state.connect(self._on_speaking_state)
- except Exception:
- pass
-
- def _on_viseme(self, v: float) -> None:
- self._mouth = _clamp(float(v), 0.0, 1.0)
-
- def _on_speaking_state(self, speaking: bool) -> None:
- self._speaking = bool(speaking)
-
- # -------------------------
- # Emotion tag
- # -------------------------
- def _recompute_emotion_tag(self) -> None:
- focus = self.ei.get("focus", 0.5)
- stress = self.ei.get("stress", 0.2)
- energy = self.ei.get("energy", 0.6)
-
- # Mode bias (light)
- m = (self.mode or "").lower()
- bias_stress = 0.05 if m in ("exam", "deep_work") else 0.0
- bias_focus = 0.05 if m in ("deep_work", "coding") else 0.0
- bias_energy = -0.03 if m == "exam" else 0.0
-
- stress = _clamp(stress + bias_stress, 0, 1)
- focus = _clamp(focus + bias_focus, 0, 1)
- energy = _clamp(energy + bias_energy, 0, 1)
-
- if stress > 0.70:
- self.emotion_tag = "stressed"
- elif focus > 0.72 and stress < 0.45:
- self.emotion_tag = "focused"
- elif energy > 0.78 and stress < 0.40:
- self.emotion_tag = "cheerful"
- else:
- self.emotion_tag = "calm"
-
- # -------------------------
- # Paint
- # -------------------------
- def paintEvent(self, event):
- w = self.width()
- h = self.height()
- t = time.time() - self._t0
-
- painter = QPainter(self)
- painter.setRenderHint(QPainter.Antialiasing)
-
- # Background
- if self._theme == "dark":
- bg = QColor(10, 10, 18)
- panel = QColor(14, 14, 24)
- text = QColor(230, 230, 255)
- else:
- bg = QColor(245, 246, 250)
- panel = QColor(255, 255, 255)
- text = QColor(25, 25, 40)
-
- painter.fillRect(self.rect(), bg)
-
- # Card panel
- r = QRectF(10, 10, w - 20, h - 20)
- painter.setBrush(QBrush(panel))
- painter.setPen(QPen(QColor(40, 40, 60, 160), 1))
- painter.drawRoundedRect(r, 18, 18)
-
- def ring_pulse(self, dur: float = 1.0, intensity: float = 1.0) -> None:
- self._ring_pulse_target = max(self._ring_pulse_target, min(2.0, float(intensity)))
- self._gesture_timer = max(self._gesture_timer, float(dur))
- # Ring center
- cx = r.center().x()
- cy = r.center().y() - 10
- center = QPointF(cx, cy)
-
- # Pulse
- now = time.time()
- pulse = 0.0
- if now < self._pulse_until:
- pulse = 0.18 * (self._pulse_amp) * (0.5 + 0.5 * math.sin(t * 14))
- else:
- self._pulse_amp = max(0.0, self._pulse_amp * 0.92)
-
- # Ring glow colors
- a = QColor(*self._accent_a, 190)
- b = QColor(*self._accent_b, 160)
-
- # EI influences
- focus = self.ei.get("focus", 0.55)
- stress = self.ei.get("stress", 0.20)
- energy = self.ei.get("energy", 0.70)
-
- ring_base = min(r.width(), r.height()) * 0.34
- ring = ring_base * (1.0 + pulse) * (0.96 + 0.04 * math.sin(t * 2.0))
-
- # Outer glow thickness
- glow = 10 + 16 * energy + 18 * pulse
- ring_pen = QPen(a, 6 + 6 * focus + 4 * pulse)
- painter.setPen(ring_pen)
- painter.setBrush(Qt.NoBrush)
- painter.drawEllipse(center, ring, ring)
-
- # Soft outer glow
- glow_pen = QPen(b, 10 + glow)
- glow_pen.setCapStyle(Qt.RoundCap)
- glow_pen.setColor(QColor(b.red(), b.green(), b.blue(), 35))
- painter.setPen(glow_pen)
- painter.drawEllipse(center, ring, ring)
-
- # Avatar "face" (simple but expressive)
- face_r = ring * 0.52
- face_color = QColor(25, 25, 35, 255) if self._theme == "dark" else QColor(235, 236, 245, 255)
- painter.setPen(QPen(QColor(0, 0, 0, 0), 0))
- painter.setBrush(QBrush(face_color))
- painter.drawEllipse(center, face_r, face_r)
-
- # Eyes
- eye_y = cy - face_r * 0.15
- eye_dx = face_r * 0.22
- eye_r = face_r * 0.06 * (0.9 + 0.2 * (1 - stress))
-
- blink = 1.0
- # occasional blink
- if int(t * 2) % 17 == 0:
- blink = 0.25
-
- painter.setBrush(QBrush(QColor(230, 230, 255) if self._theme == "dark" else QColor(30, 30, 50)))
- painter.setPen(Qt.NoPen)
-
- # left eye
- painter.drawEllipse(QPointF(cx - eye_dx, eye_y), eye_r, eye_r * blink)
- # right eye
- painter.drawEllipse(QPointF(cx + eye_dx, eye_y), eye_r, eye_r * blink)
-
- # Mouth (viseme-driven)
- mouth_y = cy + face_r * 0.18
- mouth_w = face_r * 0.30
- mouth_h = face_r * (0.05 + 0.18 * self._mouth) # mouth open
- mouth_curve = 1.0
-
- if self.emotion_tag == "stressed":
- mouth_curve = -0.6
- elif self.emotion_tag == "focused":
- mouth_curve = -0.2
- elif self.emotion_tag == "cheerful":
- mouth_curve = 0.9
- else:
- mouth_curve = 0.4
-
- # draw mouth as rounded rect + curve hint
- mouth_color = QColor(245, 210, 210, 220) if self._theme == "dark" else QColor(120, 60, 60, 210)
- painter.setBrush(QBrush(mouth_color))
- painter.setPen(Qt.NoPen)
- painter.drawRoundedRect(QRectF(cx - mouth_w, mouth_y - mouth_h / 2, mouth_w * 2, mouth_h), 8, 8)
-
- # Emotion eyebrow/overlay (tiny)
- brow_pen = QPen(QColor(220, 220, 255, 120) if self._theme == "dark" else QColor(40, 40, 60, 140), 3)
- painter.setPen(brow_pen)
- by = cy - face_r * 0.28
- bx = face_r * 0.18
- lift = (0.10 if self.emotion_tag == "cheerful" else (-0.10 if self.emotion_tag == "stressed" else 0.0))
- painter.drawLine(QPointF(cx - eye_dx - bx, by - lift * face_r), QPointF(cx - eye_dx + bx, by + lift * face_r))
- painter.drawLine(QPointF(cx + eye_dx - bx, by + lift * face_r), QPointF(cx + eye_dx + bx, by - lift * face_r))
-
- # Labels (report-friendly)
- painter.setPen(QPen(text, 1))
- painter.setFont(QFont("Segoe UI", 10))
- painter.drawText(18, 30, f"Heroine: {self.emotion_tag} • mode={self.mode}")
-
- painter.setFont(QFont("Consolas", 9))
- painter.drawText(18, 48, f"EI focus={focus:.2f} stress={stress:.2f} energy={energy:.2f} speaking={self._speaking}")
-
- painter.end()
diff --git a/core/ui/avatar_widget.py b/core/ui/avatar_widget.py
deleted file mode 100644
index 80a23cd..0000000
--- a/core/ui/avatar_widget.py
+++ /dev/null
@@ -1,220 +0,0 @@
-from PySide6.QtWidgets import QWidget, QApplication
-from PySide6.QtCore import Qt, QTimer, QPoint, QRectF
-from PySide6.QtGui import QPainter, QColor, QRadialGradient, QBrush, QPen
-import math
-from core.signals import signals
-
-
-class AvatarWidget(QWidget):
- def __init__(self, parent=None):
- super().__init__(parent)
- # Dynamic sizing enabled
- self.state = {"focus": 0.5, "stress": 0.2}
-
- # Animation
- self.anim_timer = QTimer(self)
- self.anim_timer.timeout.connect(self.update_animation)
- self.anim_timer.start(30) # 30ms ~ 33fps
- self.frame = 0
-
- # Connect signals
- signals.emotion_updated.connect(self.update_state)
-
- self.is_thinking = False
-
- # Starfield initialization
- import random
- self.stars = []
- for _ in range(40):
- self.stars.append({
- "x": random.randint(0, 320),
- "y": random.randint(0, 320),
- "s": random.uniform(0.2, 1.5), # Smaller, subtler stars
- "speed": random.uniform(0.005, 0.02) # Slower movement
- })
-
- self.mouse_pos = QPoint(160, 160)
- self.setMouseTracking(True)
-
- def set_thinking(self, thinking: bool):
- self.is_thinking = thinking
- self.update()
-
- def update_state(self, state):
- self.state = state
- self.update()
-
- def update_animation(self):
- self.frame += 1
- self.update()
-
- def mouseMoveEvent(self, event):
- self.mouse_pos = event.pos()
- super().mouseMoveEvent(event)
-
- def paintEvent(self, event):
- if self.width() <= 0 or self.height() <= 0:
- return
-
- painter = QPainter(self)
- if not painter.isActive():
- return
-
- painter.setRenderHint(QPainter.Antialiasing)
-
- # 0. Draw Starfield (Subtle Background)
- self.draw_starfield(painter)
-
- # State
- stress = self.state.get("stress", 0.2)
- breath_speed = 0.05 + (stress * 0.1)
- bob_amplitude = 10
- self.vertical_offset = math.sin(
- self.frame * breath_speed) * bob_amplitude
-
- # Centering
- cx = self.width() // 2
- cy = int(self.height() // 2 + self.vertical_offset)
- center = QPoint(cx, cy)
-
- # 1. Draw Aurora Ring
- self.draw_aurora_ring(painter, center)
-
- # 2. Draw Avatar Core
- self.draw_avatar_core(painter, center)
-
- # Update frame
- self.frame = (self.frame + 1) % 10000
-
- painter.end()
-
- def draw_starfield(self, painter):
- stress = self.state.get("stress", 0.2)
- for star in self.stars:
- # Stars move gracefully
- offset_x = math.sin(self.frame * star["speed"]) * 10
- offset_y = math.cos(self.frame * star["speed"]) * 10
-
- x = star["x"] + offset_x
- y = star["y"] + offset_y
-
- alpha = int(150 * star["s"]) # Slightly more transparent
- color = QColor(255, 255, 255, alpha)
- painter.setPen(QPen(color, star["s"]))
- painter.drawPoint(int(x) % self.width(), int(y) % self.height())
- painter.setPen(Qt.NoPen)
-
- def draw_aurora_ring(self, painter, center):
- focus = self.state.get("focus", 0.5)
- stress = self.state.get("stress", 0.2)
-
- # Mouse Proximity Lean
- dx = self.mouse_pos.x() - center.x()
- dy = self.mouse_pos.y() - center.y()
- dist = math.sqrt(dx*dx + dy*dy)
- if 0 < dist < 150:
- lean_x = (dx / dist) * (150 - dist) * 0.2
- lean_y = (dy / dist) * (150 - dist) * 0.2
- center = QPoint(int(center.x() + lean_x), int(center.y() + lean_y))
- elif dist == 0:
- pass # No lean if exactly at center
-
- breath_speed = 0.05 + (stress * 0.1)
-
- # Liquid Motion / Radius Modulation
- def get_radius(angle):
- # Base radius
- if self.is_thinking:
- base = 85 + math.sin(self.frame * 0.2) * 10
- else:
- base = 80 + (focus * 20) + \
- math.sin(self.frame * breath_speed) * 5
-
- # Deformation (Liquid feel)
- deformation = math.sin(angle * 3 + self.frame * 0.1) * 3
- deformation += math.cos(angle * 5 - self.frame * 0.05) * 2
- return base + deformation
-
- # Draw the ring using multiple segments for liquid look
- # Draw the ring using multiple segments for liquid look
- if stress > 0.6:
- # Warmer, less alarmist red
- color = QColor(255, 80, 80, 120)
- secondary_color = QColor(255, 120, 80, 60)
- elif focus > 0.7:
- # Golden focus
- color = QColor(255, 200, 100, 120)
- secondary_color = QColor(255, 220, 150, 60)
- else:
- # Ethereal Cyan/Blue
- color = QColor(0, 180, 255, 120)
- secondary_color = QColor(100, 200, 255, 60)
-
- # Draw outer glow glow
- painter.setPen(Qt.NoPen)
- for r_ext in range(10, 0, -2):
- alpha = int(20 * (r_ext / 10))
- glow_color = QColor(color.red(), color.green(),
- color.blue(), alpha)
- painter.setBrush(QBrush(glow_color))
-
- # Simplified liquid glow (approximate with points or path)
- pts = []
- for a in range(0, 361, 10):
- angle_rad = math.radians(a)
- rad = get_radius(angle_rad) + r_ext
- pts.append(QPoint(int(center.x() + math.cos(angle_rad) * rad),
- int(center.y() + math.sin(angle_rad) * rad)))
- painter.drawPolygon(pts)
-
- # Main Ring Core
- painter.setBrush(QBrush(color))
- pts = []
- for a in range(0, 361, 5):
- angle_rad = math.radians(a)
- rad = get_radius(angle_rad)
- pts.append(QPoint(int(center.x() + math.cos(angle_rad) * rad),
- int(center.y() + math.sin(angle_rad) * rad)))
- painter.drawPolygon(pts)
-
- # Energy particles
- particle_count = 3 if stress < 0.5 else 6
- for i in range(particle_count):
- angle = (self.frame * (0.05 + stress * 0.1) +
- i * (2 * math.pi / particle_count))
- base_r = get_radius(angle)
- px = center.x() + math.cos(angle) * (base_r * 0.9)
- py = center.y() + math.sin(angle) * (base_r * 0.9)
- painter.setBrush(QBrush(QColor(255, 255, 255, 200)))
- painter.drawEllipse(QPoint(int(px), int(py)), 3, 3)
-
- def draw_avatar_core(self, painter, center):
- # Multi-layered core for depth
- focus = self.state.get("focus", 0.5)
- stress = self.state.get("stress", 0.2)
-
- # Outer core glow
- outer_grad = QRadialGradient(center, 60)
- outer_grad.setColorAt(0, QColor(255, 255, 255, 50))
- outer_grad.setColorAt(0.7, QColor(200, 240, 255, 30))
- outer_grad.setColorAt(1, Qt.transparent)
- painter.setBrush(QBrush(outer_grad))
- painter.setPen(Qt.NoPen)
- painter.drawEllipse(center, 60, 60)
-
- # Middle layer - pulses with activity
- pulse_size = 45 + int(math.sin(self.frame * 0.1) * 3)
- mid_grad = QRadialGradient(center, pulse_size)
- mid_grad.setColorAt(0, QColor(255, 255, 255, 200))
- mid_grad.setColorAt(0.5, QColor(200, 240, 255, 150))
- mid_grad.setColorAt(1, Qt.transparent)
- painter.setBrush(QBrush(mid_grad))
- painter.drawEllipse(center, pulse_size, pulse_size)
-
- # Inner core - bright center
- inner_grad = QRadialGradient(center, 25)
- inner_grad.setColorAt(0, QColor(255, 255, 255, 255))
- inner_grad.setColorAt(0.5, QColor(255, 255, 255, 220))
- inner_grad.setColorAt(1, QColor(220, 240, 255, 100))
- painter.setBrush(QBrush(inner_grad))
- painter.drawEllipse(center, 25, 25)
diff --git a/core/ui/beat_sync.py b/core/ui/beat_sync.py
deleted file mode 100644
index 4a6ee55..0000000
--- a/core/ui/beat_sync.py
+++ /dev/null
@@ -1,76 +0,0 @@
-from __future__ import annotations
-from typing import List, Dict, Any, Optional
-
-from PySide6.QtCore import QObject, QTimer
-
-
-class BeatSyncScheduler(QObject):
- """
- Schedules UI effects (ring pulses/highlights) using a QTimer.
- Works only in desktop builds with PySide6.
- """
-
- def __init__(self, apply_effect_cb, log_cb=None):
- super().__init__()
- self.apply_effect_cb = apply_effect_cb
- self.log_cb = log_cb or (lambda *a, **k: None)
-
- self._timeline: List[Dict[str, Any]] = []
- self._idx = 0
- self._start_ms = 0
-
- self._timer = QTimer()
- self._timer.setInterval(16) # ~60fps tick
- self._timer.timeout.connect(self._tick)
-
- def load(self, ui_effects: List[Dict[str, Any]]):
- # Sort by time
- self._timeline = sorted(ui_effects or [], key=lambda x: float(x.get("t", 0.0)))
- self._idx = 0
-
- def start(self):
- if not self._timeline:
- self.log_cb("⚠️ BeatSync: No effects to schedule")
- return
-
- self._start_ms = 0
- self._idx = 0
- self._timer.start()
- self.log_cb(f"💫 BeatSync: started ({len(self._timeline)} effects)")
-
- def stop(self):
- self._timer.stop()
- self._timeline = []
- self._idx = 0
- self._start_ms = 0
- self.log_cb("💫 BeatSync: stopped")
-
- def _tick(self):
- # Lazy init start time on first tick
- if self._start_ms == 0:
- self._start_ms = self._now_ms()
-
- elapsed_s = (self._now_ms() - self._start_ms) / 1000.0
-
- # Fire all effects whose time <= elapsed
- while self._idx < len(self._timeline):
- e = self._timeline[self._idx]
- t = float(e.get("t", 0.0))
- if t > elapsed_s:
- break
-
- try:
- self.apply_effect_cb(e)
- except Exception as ex:
- self.log_cb(f"⚠️ BeatSync apply_effect failed: {ex}")
-
- self._idx += 1
-
- # Auto-stop when done
- if self._idx >= len(self._timeline):
- self.stop()
-
- def _now_ms(self) -> int:
- # QTimer doesn't expose now; use python time
- import time
- return int(time.time() * 1000)
diff --git a/core/ui/bubbles.py b/core/ui/bubbles.py
deleted file mode 100644
index 57ad20c..0000000
--- a/core/ui/bubbles.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from PySide6.QtWidgets import QPushButton, QGraphicsDropShadowEffect
-from PySide6.QtGui import QColor
-
-class AppBubble(QPushButton):
- def __init__(self, parent, name, x, y, callback=None):
- super().__init__(parent)
- self.setText(name.upper()) # Force Uppercase for sci-fi look
- self.setGeometry(x, y, 100, 100)
- self.callback = callback
-
- self.clicked.connect(self.on_click)
-
- # 🎨 THE PURE GLASS LOOK (No Emojis)
- self.setStyleSheet("""
- QPushButton {
- background-color: rgba(255, 255, 255, 10);
- border: 1px solid rgba(255, 255, 255, 40);
- border-radius: 50px; /* Perfectly Round */
- color: rgba(255, 255, 255, 0.9);
- font-family: 'Segoe UI', sans-serif;
- font-size: 11px;
- font-weight: 600;
- letter-spacing: 2px; /* Spaced out text */
- }
- QPushButton:hover {
- background-color: rgba(255, 255, 255, 25);
- border: 2px solid rgba(0, 240, 255, 200);
- color: white;
- box-shadow: 0 0 15px cyan;
- }
- QPushButton:pressed {
- background-color: rgba(0, 240, 255, 50);
- }
- """)
-
- shadow = QGraphicsDropShadowEffect()
- shadow.setBlurRadius(20)
- shadow.setColor(QColor(0, 0, 0, 100))
- shadow.setOffset(0, 0)
- self.setGraphicsEffect(shadow)
-
- def on_click(self):
- if self.callback:
- self.callback(self.text())
diff --git a/core/ui/command_palette.py b/core/ui/command_palette.py
deleted file mode 100644
index 8e3e63c..0000000
--- a/core/ui/command_palette.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from __future__ import annotations
-
-from PySide6.QtWidgets import QWidget, QHBoxLayout, QLineEdit, QPushButton
-from PySide6.QtCore import Signal
-
-
-class CommandPalette(QWidget):
- submitted = Signal(str)
-
- def __init__(self, placeholder: str = "Etherea> type a command (coding mode / save session / continue last session)"):
- super().__init__()
- self.setObjectName("CommandPalette")
-
- self.input = QLineEdit()
- self.input.setPlaceholderText(placeholder)
-
- self.btn = QPushButton("Run")
-
- row = QHBoxLayout(self)
- row.setContentsMargins(8, 8, 8, 8)
- row.setSpacing(8)
- row.addWidget(self.input, 1)
- row.addWidget(self.btn, 0)
-
- self.btn.clicked.connect(self._emit)
- self.input.returnPressed.connect(self._emit)
-
- def _emit(self):
- text = (self.input.text() or "").strip()
- if not text:
- return
- self.submitted.emit(text)
- self.input.clear()
diff --git a/core/ui/editors.py b/core/ui/editors.py
deleted file mode 100644
index ecc61fc..0000000
--- a/core/ui/editors.py
+++ /dev/null
@@ -1,31 +0,0 @@
-from PySide6.QtWidgets import QTextEdit, QVBoxLayout
-from PySide6.QtGui import QFont, QColor
-from core.ui.panels import GlassPanel
-from core.ui.highlighter import PythonHighlighter
-from core.tools.router import ToolRouter
-
-class FunctionalCodePanel(GlassPanel):
- """
- Real Code Editor replacing the static mockup.
- """
- def __init__(self, title="Editor", parent=None):
- super().__init__(title=title, parent=parent)
-
- self.editor = QTextEdit()
- self.editor.setFont(QFont("Consolas", 11))
- self.editor.setStyleSheet("background: transparent; color: #f8f8f2; border: none;")
- self.editor.setAcceptRichText(False)
-
- self.highlighter = PythonHighlighter(self.editor.document())
- self.layout.addWidget(self.editor)
-
- # Load default file as demo
- self.load_file("main.py")
-
- def load_file(self, rel_path):
- content = ToolRouter.instance().read_file(rel_path)
- self.editor.setPlainText(content)
-
- def save_file(self, rel_path):
- content = self.editor.toPlainText()
- ToolRouter.instance().write_file(rel_path, content)
diff --git a/core/ui/ethera_command_bar.py b/core/ui/ethera_command_bar.py
deleted file mode 100644
index c3a4fb8..0000000
--- a/core/ui/ethera_command_bar.py
+++ /dev/null
@@ -1,72 +0,0 @@
-from PySide6.QtWidgets import QLineEdit
-from PySide6.QtCore import Qt, Signal
-from core.state import AppState
-
-class EtheraCommandBar(QLineEdit):
- """
- Top command bar for natural language control.
- Parses commands like 'focus mode', 'break', 'reset' and updates AppState.
- """
- def __init__(self, parent=None):
- super().__init__(parent)
- self.setPlaceholderText("Ask Etherea... (e.g., 'focus mode', 'take a break')")
- self.setStyleSheet("""
- QLineEdit {
- background: #11121a;
- color: #e8e8ff;
- border: 1px solid #2a3353;
- border-radius: 8px;
- padding: 12px;
- font-family: 'Segoe UI', sans-serif;
- font-size: 14px;
- selection-background-color: #3d4b7a;
- }
- QLineEdit:focus {
- border: 1px solid #5a75cf;
- background: #151622;
- }
- """)
- self.returnPressed.connect(self.process_command)
-
- def process_command(self):
- text = self.text().strip().lower()
- if not text:
- return
-
- state = AppState.instance()
-
- # Simple NL parsing
- if text.startswith("say "):
- # Voice Command
- msg = text[4:].strip()
- from core.voice import VoiceEngine
- VoiceEngine.instance().speak(msg)
- self.placeholder_feedback(f"🗣️ Speaking: {msg}")
- elif "music" in text or "play" in text:
- from core.music import MusicEngine
- if "stop" in text:
- MusicEngine.instance().stop()
- self.placeholder_feedback(f"🛑 Music Stopped")
- else:
- MusicEngine.instance().play()
- self.placeholder_feedback(f"🎵 Playing Ambient Mix")
- elif "focus" in text:
- state.set_mode("focus", reason="user command")
- self.placeholder_feedback(f"⚡ Entering Focus Mode")
- elif "break" in text or "relax" in text:
- state.set_mode("break", reason="user command")
- self.placeholder_feedback(f"☕ Taking a Break")
- elif "idle" in text or "reset" in text:
- state.set_mode("idle", reason="user command")
- self.placeholder_feedback(f"🌱 Idling")
- else:
- # Fallback / Log
- print(f"Unknown command: {text}")
-
- self.clear()
-
- def placeholder_feedback(self, msg: str):
- """Show temporary feedback in placeholder."""
- self.setPlaceholderText(msg)
- # Restore default placeholder after delay could be added here,
- # but for now we just leave the feedback until next type.
diff --git a/core/ui/explorer.py b/core/ui/explorer.py
deleted file mode 100644
index 13c19e3..0000000
--- a/core/ui/explorer.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import os
-from PySide6.QtWidgets import QVBoxLayout, QTreeView, QFileSystemModel, QHeaderView
-from PySide6.QtCore import QDir
-from core.ui.panels import GlassPanel
-
-class FileExplorerPanel(GlassPanel):
- """
- Project Explorer for the Agentic Workspace.
- """
- def __init__(self, title="Explorer", parent=None):
- super().__init__(title=title, parent=parent)
-
- self.model = QFileSystemModel()
- self.model.setRootPath(os.getcwd())
- self.model.setFilter(QDir.NoDotAndDotDot | QDir.AllDirs | QDir.Files)
-
- self.tree = QTreeView()
- self.tree.setModel(self.model)
- self.tree.setRootIndex(self.model.index(os.getcwd()))
-
- # UI Styling
- self.tree.setStyleSheet("background: transparent; color: #f8f8f2; border: none;")
- self.tree.setAnimated(True)
- self.tree.setIndentation(20)
- self.tree.setSortingEnabled(True)
-
- # Hide header and unneeded columns
- self.tree.header().hide()
- for i in range(1, 4):
- self.tree.setColumnHidden(i, True)
-
- self.layout.addWidget(self.tree)
-
- def select_file(self, rel_path: str):
- full_path = os.path.join(os.getcwd(), rel_path)
- index = self.model.index(full_path)
- self.tree.setCurrentIndex(index)
- self.tree.scrollTo(index)
diff --git a/core/ui/gui.py b/core/ui/gui.py
deleted file mode 100644
index 18f2ee3..0000000
--- a/core/ui/gui.py
+++ /dev/null
@@ -1,441 +0,0 @@
-# ui/gui.py
-"""
-Aurora GUI (refined)
-- Thread-safe UI updates (never touch Tk widgets from worker threads)
-- Buttons: Read Aloud, Regenerate, 👍 / 👎 Feedback
-- Works with avatars exposing either `generate_response` or `speak`
-- Integrates VoiceEngine and MemoryStore
-"""
-
-import tkinter as tk
-from tkinter import scrolledtext, Label, Entry, Frame, Button
-import threading
-import time
-import random
-from typing import Any, Dict
-
-# Import VoiceEngine & MemoryStore (make sure core package path is correct)
-from core.voice_engine import VoiceEngine
-from core.memory_store import MemoryStore
-
-
-# -------------------------
-# Visuals / Floating Animations
-# -------------------------
-class Visuals:
- def __init__(self, gui_root: tk.Tk, visual_canvas: tk.Canvas, aura_item: int, status_label: tk.Label):
- self.root = gui_root
- self.visual_canvas = visual_canvas
- self.aura_item = aura_item
- self.status_label = status_label
- self.focused = False
- self.float_offset = 0
- self.direction = 1
- self.current_visual_text = "💡 Idle Visual"
- self.visual_label = None
- self.init_label()
- # start animations on the main thread
- self.root.after(0, self.animate_aura)
- self.root.after(0, self.animate_visuals)
-
- # Aura pulse animation (runs on main thread via after)
- def animate_aura(self):
- try:
- color = "#00ffdd" if not self.focused else "#ffdd00"
- self.visual_canvas.itemconfig(self.aura_item, outline=color)
- width = random.randint(2, 5)
- self.visual_canvas.itemconfig(self.aura_item, width=width)
- except Exception:
- pass
- self.root.after(500, self.animate_aura)
-
- # Floating visual up/down
- def animate_visuals(self):
- try:
- self.float_offset += self.direction * 2
- if abs(self.float_offset) > 20:
- self.direction *= -1
- self.visual_canvas.coords(
- self.visual_label, 200, 100 + self.float_offset)
- except Exception:
- pass
- self.root.after(50, self.animate_visuals)
-
- # Update floating visual text (safe to call from main thread only)
- def update_visual(self, text: str):
- self.current_visual_text = text
- try:
- self.visual_canvas.itemconfig(self.visual_label, text=text)
- except Exception:
- pass
-
- # Set EI focus state
- def set_focus(self, focused: bool):
- self.focused = focused
- try:
- self.status_label.config(
- text="Status: Focused" if focused else "Status: Calm")
- except Exception:
- pass
-
- # Initialize label
- def init_label(self):
- # create_text returns an item id
- self.visual_label = self.visual_canvas.create_text(
- 200, 100, text=self.current_visual_text, fill="#00ffdd", font=("Arial", 16)
- )
-
-
-# -------------------------
-# Aurora GUI with Cinematic Logo Intro
-# -------------------------
-class AuroraGUI:
- def __init__(self, avatar: Any):
- self.avatar = avatar
- self.voice_engine = VoiceEngine() # Read aloud engine (optional)
- self.memory = MemoryStore() # Memory tracking
- self.last_user_input = ""
- self.last_avatar_response = ""
- self._metrics_running = False
-
- # Root window
- self.root = tk.Tk()
- self.root.title("Etherea – Adaptive Workspace 🌟")
- self.root.geometry("1024x768")
- self.root.configure(bg="#1e1e2f") # dark futuristic background
-
- # Intro canvas
- self.intro_canvas = tk.Canvas(
- self.root, width=1024, height=768, bg="#1e1e2f", highlightthickness=0)
- self.intro_canvas.place(x=0, y=0)
-
- self.intro_aura = self.intro_canvas.create_oval(
- 412, 284, 612, 484, outline="#00ffdd", width=4)
- self.logo_square = self.intro_canvas.create_rectangle(
- 450, 320, 574, 444, outline="#ffffff", width=3)
- self.logo_brain = self.intro_canvas.create_oval(
- 460, 330, 564, 438, outline="#00ffdd", width=2)
-
- # Start intro
- self.root.after(100, self.run_intro)
-
- # ---------------- Cinematic Intro Animation ----------------
- def run_intro(self):
- # run short animation (blocking here is ok because it's the intro)
- for i in range(30):
- offset = 5 + i
- try:
- self.intro_canvas.coords(
- self.intro_aura, 412 - offset, 284 - offset, 612 + offset, 484 + offset)
- color = "#00ffdd" if i % 2 == 0 else "#00ffff"
- self.intro_canvas.itemconfig(
- self.intro_aura, outline=color, width=2 + i // 10)
- scale = 1 + i * 0.01
- self.scale_item(self.logo_square, 512, 384, scale)
- self.scale_item(self.logo_brain, 512, 384, scale)
- except Exception:
- pass
- self.root.update()
- time.sleep(0.03)
-
- # Fade intro
- for alpha in range(20, -1, -1):
- try:
- val = max(0, min(255, alpha * 12))
- color_val = f"#{val:02x}{val:02x}{val:02x}"
- self.intro_canvas.configure(bg=color_val)
- except Exception:
- pass
- self.root.update()
- time.sleep(0.02)
-
- # Destroy intro and init workspace
- try:
- self.intro_canvas.destroy()
- except Exception:
- pass
- self.init_workspace()
-
- # ---------------- Utility: Scale a canvas item ----------------
- def scale_item(self, item: int, cx: float, cy: float, scale: float):
- try:
- coords = self.intro_canvas.coords(item)
- if len(coords) >= 4:
- x0, y0, x1, y1 = coords[:4]
- new_coords = [
- cx + (x0 - cx) * scale,
- cy + (y0 - cy) * scale,
- cx + (x1 - cx) * scale,
- cy + (y1 - cy) * scale
- ]
- self.intro_canvas.coords(item, *new_coords)
- except Exception:
- pass
-
- # ---------------- Initialize Workspace ----------------
- def init_workspace(self):
- # Left Panel (Avatar + Aura)
- self.left_panel = tk.Canvas(
- self.root, width=200, height=200, bg="#1e1e2f", highlightthickness=0)
- self.left_panel.place(x=20, y=20)
-
- self.avatar_label = Label(self.left_panel, text="💫 Avatar", fg="#ffffff", bg="#1e1e2f",
- font=("Arial", 16, "bold"))
- self.avatar_label.place(x=30, y=20)
-
- self.status_label = Label(self.left_panel, text="Status: Calm", fg="#00ffdd", bg="#1e1e2f",
- font=("Arial", 12, "italic"))
- self.status_label.place(x=20, y=60)
-
- self.aura_item = self.left_panel.create_oval(
- 10, 10, 180, 180, outline="#00ffdd", width=4)
-
- # Main Workspace Panel
- self.workspace = scrolledtext.ScrolledText(self.root, width=80, height=15, font=("Consolas", 12),
- bg="#2e2e3e", fg="#f0f0f0")
- self.workspace.place(x=240, y=20)
-
- # Floating visuals canvas
- self.visual_canvas = tk.Canvas(
- self.root, width=400, height=200, bg="#2e2e3e", highlightthickness=0)
- self.visual_canvas.place(x=240, y=320)
-
- # Input box
- self.user_input_var = tk.StringVar()
- self.input_box = Entry(self.root, textvariable=self.user_input_var, font=("Consolas", 12),
- bg="#2e2e3e", fg="#f0f0f0")
- self.input_box.place(x=240, y=650, width=600)
- self.input_box.bind("", self.submit_input)
-
- # Response box
- self.response_box = scrolledtext.ScrolledText(self.root, width=80, height=5, font=("Consolas", 12),
- bg="#2e2e3e", fg="#a0ffa0")
- self.response_box.place(x=240, y=520)
-
- # Feedback Buttons Frame (below response_box)
- self.button_frame = Frame(self.root, bg="#1e1e2f")
- self.button_frame.place(x=240, y=595, width=600, height=40)
-
- # Buttons
- self.read_button = Button(self.button_frame, text="🔊 Read Aloud", command=self.read_aloud, bg="#00ffdd",
- fg="#1e1e2f")
- self.read_button.pack(side="left", padx=5, pady=5)
-
- self.regen_button = Button(self.button_frame, text="🔄 Regenerate", command=self.regenerate_response,
- bg="#ffdd00", fg="#1e1e2f")
- self.regen_button.pack(side="left", padx=5, pady=5)
-
- self.thumb_up_button = Button(self.button_frame, text="👍", command=lambda: self.send_feedback(True),
- bg="#00ffaa", fg="#1e1e2f")
- self.thumb_up_button.pack(side="left", padx=5, pady=5)
-
- self.thumb_down_button = Button(self.button_frame, text="👎", command=lambda: self.send_feedback(False),
- bg="#ff5555", fg="#1e1e2f")
- self.thumb_down_button.pack(side="left", padx=5, pady=5)
-
- # EI Label
- self.ei_label = Label(self.root, text="EI State: Neutral", bg="#1e1e2f", fg="#a0a0ff",
- font=("Consolas", 12))
- self.ei_label.place(x=20, y=240)
-
- # Visuals & Animations
- self.visuals = Visuals(self.root, self.visual_canvas,
- self.aura_item, self.status_label)
-
- # Intro finished message
- self.workspace.insert(
- tk.END, "Workspace ready. Start typing commands...\n")
- self.root.update()
-
- # Start periodic EI/metrics updates
- self._metrics_running = True
- self.root.after(1000, self._metrics_loop)
-
- # ---------------- User Input ----------------
- def get_user_input(self) -> str:
- self.root.update()
- return self.user_input_var.get()
-
- def submit_input(self, event=None):
- user_text = self.user_input_var.get()
- if user_text.strip() != "":
- # append user input to workspace (main thread)
- self.last_user_input = user_text
- self.workspace.insert(tk.END, f"User: {user_text}\n")
- self.user_input_var.set("")
- # start worker thread to generate response
- threading.Thread(target=self._background_generate,
- args=(user_text,), daemon=True).start()
-
- # ---------------- Avatar Integration (thread-safe) ----------------
- def _background_generate(self, user_text: str):
- """
- Worker thread: calls avatar (generate_response or speak),
- then schedules UI update on main thread.
- """
- try:
- if hasattr(self.avatar, "generate_response"):
- response = self.avatar.generate_response(user_text)
- elif hasattr(self.avatar, "speak"):
- response = self.avatar.speak(user_text)
- else:
- response = "Avatar has no response method."
- except Exception as e:
- response = f"[Error generating response: {e}]"
-
- # schedule UI update on main thread
- self.root.after(
- 0, lambda: self._handle_response_ui(response, user_text))
-
- def _handle_response_ui(self, response: str, user_text: str):
- # update last response and UI widgets (main thread only)
- self.last_avatar_response = response
- self.response_box.insert(tk.END, f"{response}\n")
- self.response_box.see(tk.END)
-
- # update workspace log too (optional)
- try:
- self.workspace.insert(tk.END, f"AI: {response}\n")
- self.workspace.see(tk.END)
- except Exception:
- pass
-
- # Visuals: request a visual text from avatar if available
- try:
- if hasattr(self.avatar, "get_visual_for_response"):
- visual_text = self.avatar.get_visual_for_response(response)
- else:
- visuals = ["💡 Lightbulb", "🔥 Focus Flame",
- "🧘 Relax Aura", "⚡ Energy Pulse"]
- visual_text = random.choice(visuals)
- except Exception:
- visual_text = "💡 Idle Visual"
-
- # schedule visual update on main thread
- try:
- self.visuals.update_visual(visual_text)
- except Exception:
- pass
-
- # Optionally save interaction to memory (non-blocking)
- try:
- data = self.memory.load()
- interactions = data.get("interactions", [])
- interactions.append({"user": user_text, "bot": response,
- "timestamp": time.strftime("%Y-%m-%dT%H:%M:%S")})
- self.memory.update_key("interactions", interactions)
- except Exception:
- pass
-
- # ---------------- Voice & Buttons ----------------
- def read_aloud(self):
- if not self.last_avatar_response:
- return
- # get EI state (can be string or dict)
- ei_state = None
- try:
- if hasattr(self.avatar, "get_current_ei_state"):
- ei_state = self.avatar.get_current_ei_state()
- # normalize to dict expected by voice_engine (tone/intensity)
- if isinstance(ei_state, str):
- ei_state = {"tone": ei_state.lower(), "intensity": 0.6}
- elif isinstance(ei_state, dict):
- pass
- else:
- ei_state = {"tone": "neutral", "intensity": 0.5}
- except Exception:
- ei_state = {"tone": "neutral", "intensity": 0.5}
-
- # speak in worker thread (pyttsx3 is blocking)
- threading.Thread(target=self.voice_engine.speak, args=(
- self.last_avatar_response, ei_state), daemon=True).start()
-
- def regenerate_response(self):
- if not self.last_user_input:
- return
- # regenerate by running background generate with last user input
- threading.Thread(target=self._background_generate, args=(
- self.last_user_input,), daemon=True).start()
-
- def send_feedback(self, positive: bool):
- feedback_entry = {
- "session_id": 1,
- "response": self.last_avatar_response,
- "positive": bool(positive),
- "timestamp": time.strftime("%Y-%m-%dT%H:%M:%S")
- }
- # store feedback in memory (calls are quick; keep on main thread)
- try:
- data = self.memory.load()
- feedback_list = data.get("feedback", [])
- feedback_list.append(feedback_entry)
- self.memory.update_key("feedback", feedback_list)
- except Exception:
- pass
- # small UI ack
- try:
- self.response_box.insert(
- tk.END, f"[Feedback {'👍' if positive else '👎'} recorded]\n")
- self.response_box.see(tk.END)
- except Exception:
- pass
-
- # ---------------- EI / Metrics Loop ----------------
- def _metrics_loop(self):
- try:
- if hasattr(self.avatar, "get_current_ei_state"):
- ei_state = self.avatar.get_current_ei_state()
- # normalize for display
- if isinstance(ei_state, dict):
- label = ei_state.get("tone", "Neutral").capitalize()
- else:
- label = str(ei_state).capitalize()
- self.ei_label.config(text=f"EI State: {label}")
-
- color_map = {"neutral": "#00ffdd",
- "focused": "#ffdd00", "excited": "#ff00ff"}
- color = color_map.get(label.lower(), "#00ffdd")
- self.visuals.focused = label.lower() == "focused"
- try:
- self.visual_canvas.itemconfig(
- self.visuals.visual_label, fill=color)
- except Exception:
- pass
- except Exception:
- pass
-
- # schedule next run
- if self._metrics_running:
- self.root.after(1500, self._metrics_loop)
-
- # ---------------- Run GUI ----------------
- def run(self):
- try:
- self.root.mainloop()
- finally:
- self._metrics_running = False
-
-
-# -------------------------
-# Example usage (test without AI)
-# -------------------------
-if __name__ == "__main__":
- class DummyAvatar:
- def load_onboarding_data(self):
- pass
-
- def generate_response(self, text: str) -> str:
- return f"Avatar says: {text[::-1]}"
-
- def get_visual_for_response(self, response: str) -> str:
- visuals = ["💡 Lightbulb", "🔥 Focus Flame",
- "🧘 Relax Aura", "⚡ Energy Pulse"]
- return random.choice(visuals)
-
- def get_current_ei_state(self):
- # return either a string or dict — both are supported by the GUI
- return random.choice(["Neutral", "Focused", "Excited"])
-
- avatar = DummyAvatar()
- gui = AuroraGUI(avatar)
- gui.run()
diff --git a/core/ui/highlighter.py b/core/ui/highlighter.py
deleted file mode 100644
index 28c7a46..0000000
--- a/core/ui/highlighter.py
+++ /dev/null
@@ -1,47 +0,0 @@
-from PySide6.QtGui import QSyntaxHighlighter, QTextCharFormat, QColor, QFont
-from PySide6.QtCore import QRegularExpression
-
-class PythonHighlighter(QSyntaxHighlighter):
- def __init__(self, parent=None):
- super().__init__(parent)
- self.rules = []
-
- # Keywords
- keyword_format = QTextCharFormat()
- keyword_format.setForeground(QColor("#ff79c6")) # Pink
- keyword_format.setFontWeight(QFont.Bold)
- keywords = [
- "False", "None", "True", "and", "as", "assert", "async", "await",
- "break", "class", "continue", "def", "del", "elif", "else", "except",
- "finally", "for", "from", "global", "if", "import", "in", "is",
- "lambda", "nonlocal", "not", "or", "pass", "raise", "return", "try",
- "while", "with", "yield"
- ]
- for kw in keywords:
- rule = (QRegularExpression(f"\\b{kw}\\b"), keyword_format)
- self.rules.append(rule)
-
- # Builtins
- builtin_format = QTextCharFormat()
- builtin_format.setForeground(QColor("#8be9fd")) # Cyan
- rule = (QRegularExpression(f"\\b(print|len|range|self|cls)\\b"), builtin_format)
- self.rules.append(rule)
-
- # Strings
- string_format = QTextCharFormat()
- string_format.setForeground(QColor("#f1fa8c")) # Yellow
- self.rules.append((QRegularExpression("\".*\""), string_format))
- self.rules.append((QRegularExpression("'.*'"), string_format))
-
- # Comments
- comment_format = QTextCharFormat()
- comment_format.setForeground(QColor("#6272a4")) # Muted Blue/Grey
- self.rules.append((QRegularExpression("#.*"), comment_format))
-
- def highlightBlock(self, text):
- for pattern, format in self.rules:
- expression = QRegularExpression(pattern)
- it = expression.globalMatch(text)
- while it.hasNext():
- match = it.next()
- self.setFormat(match.capturedStart(), match.capturedLength(), format)
diff --git a/core/ui/holo_panel.py b/core/ui/holo_panel.py
deleted file mode 100644
index 0de9fb4..0000000
--- a/core/ui/holo_panel.py
+++ /dev/null
@@ -1,192 +0,0 @@
-import math
-from PySide6.QtWidgets import QWidget, QLabel, QVBoxLayout, QGraphicsOpacityEffect
-from PySide6.QtCore import Qt, QTimer, QPointF, QRectF, Property, QEasingCurve, QPropertyAnimation
-from PySide6.QtGui import QPainter, QPen, QColor, QFont, QPainterPath, QLinearGradient, QBrush
-
-class HoloPanel(QWidget):
- """
- Glass-morphic Holographic Panel for educational content.
- """
- def __init__(self, parent=None):
- super().__init__(parent)
- self.setFixedSize(500, 350)
- self.setAttribute(Qt.WidgetAttribute.WA_TransparentForMouseEvents, True) # Interaction is via Avatar
-
- # Animation State
- self.step = 0
- self.visual_step = 0.0 # Float for smooth transitions
- self.anim_t = 0.0
-
- self.timer = QTimer(self)
- self.timer.timeout.connect(self._tick)
- self.timer.start(30)
-
- # Fade Effect
- self._opacity_eff = QGraphicsOpacityEffect(self)
- self.setGraphicsEffect(self._opacity_eff)
- self._opacity = 0.0
- self._opacity_eff.setOpacity(0.0)
-
- def show_teaching_sequence(self):
- """Reset to start of sequence"""
- self.step = 0
- self.visual_step = 0.0
- self.anim_t = 0.0
- self._opacity_eff.setOpacity(0.0)
- self._fade_in()
- self.update()
-
- def next_step(self):
- """Manually advance (called by Avatar Gesture)"""
- if self.step < 4:
- self.step += 1
- # anim_t will reset in tick if needed, but here we just set the target
- self.update()
-
- def _fade_in(self):
- self.anim = QPropertyAnimation(self._opacity_eff, b"opacity")
- self.anim.setDuration(800)
- self.anim.setStartValue(0.0)
- self.anim.setEndValue(1.0)
- self.anim.setEasingCurve(QEasingCurve.OutCubic)
- self.anim.start()
-
- def _tick(self):
- # 1. Step Interpolation (Moving Flow)
- s = 0.1 # Move towards current step
- self.visual_step = self.visual_step + (self.step - self.visual_step) * s
-
- # 2. Local animation within step
- self.anim_t += 0.04
- if self.anim_t > 1.0:
- self.anim_t = 1.0
- self.update()
-
- def paintEvent(self, event):
- qp = QPainter(self)
- qp.setRenderHint(QPainter.Antialiasing)
-
- # Vivid Flow: Use visual_step to offset elements (Exaggerated)
- flow_offset = (self.visual_step - self.step) * 100.0
- bob = math.sin(self.anim_t * 2.0) * 5.0
-
- w, h = self.width(), self.height()
- rect = QRectF(0, 0, w, h)
-
- # 1. Glass Pane Background
- grad = QLinearGradient(0, 0, w, h)
- grad.setColorAt(0.0, QColor(10, 30, 40, 220))
- grad.setColorAt(1.0, QColor(5, 10, 15, 230))
-
- qp.setPen(QPen(QColor(0, 240, 255, 120), 1.5))
- qp.setBrush(grad)
- qp.drawRoundedRect(rect, 15, 15)
-
- qp.translate(flow_offset, bob) # Apply Moving & Bobbing Flow
-
- # 2. Content Logic (Smoothed)
- opacity = 1.0 - abs(self.visual_step - self.step)
- qp.setOpacity(max(0.1, opacity))
- if self.step == 0:
- # Panel 1: Title Card
- qp.setPen(QColor(0, 240, 255))
- qp.setFont(QFont("Segoe UI", 24, QFont.Bold))
- qp.drawText(rect, Qt.AlignCenter, "Regression Analysis")
-
- qp.setPen(QColor(200, 200, 200))
- qp.setFont(QFont("Segoe UI", 12))
- qp.drawText(QRectF(0, h/2 + 30, w, 30), Qt.AlignCenter, "Predicting trends from data")
-
- elif self.step >= 1:
- # Panels 2-4: Graph & Summary
- self._draw_graph(qp, w, h)
-
- if self.step == 4:
- # Panel 5: Summary Cards
- self._draw_summary(qp, w, h)
-
- def _draw_summary(self, qp: QPainter, w: int, h: int):
- cards = ["Goal: Prediction", "y = mx + c", "Minimize Error"]
- # Draw cards overlay at bottom
- card_w = 120
- spacing = 10
- total_w = len(cards) * card_w + (len(cards)-1)*spacing
- start_x = (w - total_w) / 2
-
- y_off = h - 60
- # Animation pop-up
- y_anim = y_off + (1.0 - self.anim_t) * 20
-
- for i, txt in enumerate(cards):
- r = QRectF(start_x + i*(card_w+spacing), y_anim, card_w, 40)
-
- qp.setBrush(QColor(20, 40, 60, 230))
- qp.setPen(QPen(QColor(0, 240, 255), 1))
- qp.drawRoundedRect(r, 8, 8)
-
- qp.setPen(QColor(255, 255, 255))
- qp.setFont(QFont("Segoe UI", 9, QFont.Bold))
- qp.drawText(r, Qt.AlignCenter, txt)
-
- def _draw_graph(self, qp: QPainter, w: int, h: int):
- # Margins
- mx, my = 40, 60
- gw, gh = w - 80, h - 120 # Leave room for summary
-
- # Header
- qp.setPen(QColor(0, 240, 255))
- qp.setFont(QFont("Segoe UI", 12, QFont.Bold))
- qp.drawText(QRectF(0, 15, w, 30), Qt.AlignCenter, "LINEAR REGRESSION MODEL")
-
- # Axes
- qp.setPen(QPen(QColor(255, 255, 255, 100), 2))
- qp.drawLine(mx, my+gh, mx+gw, my+gh) # X
- qp.drawLine(mx, my, mx, my+gh) # Y
-
- # Data Points (Scatter)
- points = [
- (0.1, 0.2), (0.2, 0.3), (0.3, 0.25), (0.4, 0.5),
- (0.5, 0.45), (0.6, 0.7), (0.7, 0.65), (0.8, 0.85), (0.9, 0.9)
- ]
-
- # Animate points appearing (Step 1)
- progress = 1.0 if self.step > 1 else self.anim_t
- points_to_show = int(len(points) * float(progress))
-
- qp.setPen(Qt.NoPen)
- qp.setBrush(QColor(0, 240, 255))
-
- for i in range(points_to_show):
- px, py = points[i]
- x = mx + px * gw
- y = (my+gh) - py * gh
- qp.drawEllipse(QPointF(x, y), 5, 5)
-
- # Step 2: Regression Line
- if self.step >= 2:
- progress = 1.0 if self.step > 2 else self.anim_t
-
- p1 = QPointF(mx, (my+gh) - 0.15 * gh)
- p2 = QPointF(mx + gw, (my+gh) - 0.9 * gh)
-
- curr_p2 = p1 + (p2 - p1) * progress
-
- pen = QPen(QColor(255, 100, 100))
- pen.setWidth(3)
- qp.setPen(pen)
- qp.drawLine(p1, curr_p2)
-
- # Step 3: Residuals
- if self.step >= 3:
- fade = 1.0 if self.step > 3 else self.anim_t
- qp.setPen(QPen(QColor(255, 255, 255, int(150 * fade)), 1, Qt.DashLine))
-
- for px, py in points:
- # Approx line y at this x
- line_y_norm = 0.15 + (px * (0.9 - 0.15))
-
- screen_x = mx + px * gw
- screen_y_pt = (my+gh) - py * gh
- screen_y_line = (my+gh) - line_y_norm * gh
-
- qp.drawLine(QPointF(screen_x, screen_y_pt), QPointF(screen_x, screen_y_line))
diff --git a/core/ui/holograms.py b/core/ui/holograms.py
deleted file mode 100644
index 7fea010..0000000
--- a/core/ui/holograms.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import math
-from PySide6.QtWidgets import QWidget, QGraphicsOpacityEffect
-from PySide6.QtCore import QTimer, Qt, QPointF
-from PySide6.QtGui import QPainter, QRadialGradient, QColor, QBrush, QLinearGradient
-
-class NebulaBackground(QWidget):
- def __init__(self, parent=None):
- super().__init__(parent)
- self.t = 0
- self.timer = QTimer(self)
- self.timer.timeout.connect(self.animate)
- self.timer.start(50) # 20 FPS for smooth breathing
-
- def animate(self):
- self.t += 0.1
- self.update() # Trigger a repaint
-
- def paintEvent(self, event):
- painter = QPainter(self)
- painter.setRenderHint(QPainter.Antialiasing)
-
- # 1. Deep Space Base
- painter.fillRect(self.rect(), QColor(10, 10, 18))
-
- # 2. Calculating the "Breathing" Pulse
- width = self.width()
- height = self.height()
- center_x = width / 2
- center_y = height / 2
-
- # Pulse creates a value that goes up and down smoothly
- pulse = (math.sin(self.t) + 1) / 2 # Range 0.0 to 1.0
-
- # 3. Draw the Aurora/Nebula Gradient
- # We move the center slightly to make it feel organic
- grad_x = center_x + math.cos(self.t * 0.5) * 50
- grad_y = center_y + math.sin(self.t * 0.5) * 50
-
- radius = max(width, height) * (0.8 + (pulse * 0.1)) # Expands and contracts
-
- gradient = QRadialGradient(QPointF(grad_x, grad_y), radius)
-
- # Colors: Deep Purple -> Cyan -> Transparent
- gradient.setColorAt(0.0, QColor(80, 0, 255, 40)) # Core (Purple)
- gradient.setColorAt(0.5, QColor(0, 200, 255, 20)) # Mid (Cyan)
- gradient.setColorAt(1.0, QColor(10, 10, 18, 0)) # Edge (Fade)
-
- painter.setBrush(QBrush(gradient))
- painter.setPen(Qt.NoPen)
- painter.drawRect(self.rect())
-
-class HolographicEffect(QGraphicsOpacityEffect):
- """Adds a subtle ghosting/glow effect to widgets"""
- def __init__(self, parent=None):
- super().__init__(parent)
- self.setOpacity(0.85)
diff --git a/core/ui/icon_provider.py b/core/ui/icon_provider.py
deleted file mode 100644
index 016c764..0000000
--- a/core/ui/icon_provider.py
+++ /dev/null
@@ -1,129 +0,0 @@
-from PySide6.QtCore import Qt, QPointF, QSize, QRectF
-from PySide6.QtGui import QIcon, QPainter, QPen, QColor, QPixmap, QBrush, QPainterPath
-
-class IconProvider:
- """
- Generates professional vector icons using QPainter.
- No emojis. No images. Pure purity.
- """
-
- @staticmethod
- def get_icon(name: str, size: int = 40, color: QColor = QColor(200, 240, 255), badge_count: int = 0) -> QIcon:
- pixmap = QPixmap(size, size)
- pixmap.fill(Qt.transparent)
-
- qp = QPainter(pixmap)
- qp.setRenderHint(QPainter.Antialiasing, True)
-
- # Style
- stroke_width = 2.0
- pen = QPen(color)
- pen.setWidthF(stroke_width)
- pen.setCapStyle(Qt.RoundCap)
- pen.setJoinStyle(Qt.RoundJoin)
- qp.setPen(pen)
- qp.setBrush(Qt.NoBrush)
-
- # Center context
- c = size / 2
- r = size * 0.35 # Radius/Scale
-
- if name == "home":
- # Geometric House
- # Roof
- path = QPainterPath()
- path.moveTo(c - r, c - r*0.2)
- path.lineTo(c, c - r*1.2)
- path.lineTo(c + r, c - r*0.2)
- # Body
- path.lineTo(c + r, c + r)
- path.lineTo(c - r, c + r)
- path.closeSubpath()
- # Door
- path.moveTo(c - r*0.3, c + r)
- path.lineTo(c - r*0.3, c + r*0.3)
- path.lineTo(c + r*0.3, c + r*0.3)
- path.lineTo(c + r*0.3, c + r)
- qp.drawPath(path)
-
- elif name == "avatar":
- # Silhouette
- # Head
- qp.drawEllipse(QPointF(c, c - r*0.5), r*0.5, r*0.5)
- # Body (Arc)
- path = QPainterPath()
- path.arcMoveTo(QRectF(c - r, c, r*2, r*2.0), 30)
- path.arcTo(QRectF(c - r, c, r*2, r*2.0), 30, 120)
- qp.drawPath(path)
-
- elif name == "notifications":
- # Bell
- path = QPainterPath()
- # Bell cup
- path.moveTo(c, c - r)
- path.cubicTo(c + r*0.8, c - r*0.2, c + r, c + r*0.5, c + r, c + r*0.5)
- path.lineTo(c - r, c + r*0.5)
- path.cubicTo(c - r, c + r*0.5, c - r*0.8, c - r*0.2, c, c - r)
- # Clapper
- qp.drawPath(path)
- qp.drawArc(QRectF(c - r*0.2, c + r*0.3, r*0.4, r*0.4), 210, 120)
-
- elif name == "settings":
- # Gear
- qp.drawEllipse(QPointF(c, c), r*0.4, r*0.4)
- # Teeth (Line loop)
- for i in range(0, 360, 45):
- qp.save()
- qp.translate(c, c)
- qp.rotate(i)
- qp.drawLine(0, int(r*0.6), 0, int(r*0.9))
- qp.restore()
-
- elif name == "cmd":
- # Terminal >_
- path = QPainterPath()
- path.moveTo(c - r*0.6, c - r*0.4)
- path.lineTo(c - r*0.1, c)
- path.lineTo(c - r*0.6, c + r*0.4)
- qp.drawPath(path)
- # Underscore
- qp.drawLine(QPointF(c + r*0.1, c + r*0.4), QPointF(c + r*0.6, c + r*0.4))
-
- elif name == "workspace":
- # Grid layout
- # Draw 4 squares
- d = r * 0.8
- qp.drawRect(QRectF(c - d, c - d, d*0.9, d*0.9)) # TL
- qp.drawRect(QRectF(c + 0.1*d, c - d, d*0.9, d*0.9)) # TR
- qp.drawRect(QRectF(c - d, c + 0.1*d, d*0.9, d*0.9)) # BL
- qp.drawRect(QRectF(c + 0.1*d, c + 0.1*d, d*0.9, d*0.9)) # BR
-
- if name == "aurora":
- # Spark/Star
- path = QPainterPath()
- path.moveTo(c, c - r)
- path.cubicTo(c + r*0.1, c - r*0.1, c + r*0.1, c - r*0.1, c + r, c)
- path.cubicTo(c + r*0.1, c + r*0.1, c + r*0.1, c + r*0.1, c, c + r)
- path.cubicTo(c - r*0.1, c + r*0.1, c - r*0.1, c + r*0.1, c - r, c)
- path.cubicTo(c - r*0.1, c - r*0.1, c - r*0.1, c - r*0.1, c, c - r)
- qp.drawPath(path)
-
- # Draw Badge Overlay
- if badge_count > 0:
- # Badge Circle
- badge_size = 14
- bx = size - badge_size - 2
- by = 2
-
- qp.setPen(Qt.NoPen)
- qp.setBrush(QColor(255, 60, 60)) # Alert Red
- qp.drawEllipse(bx, by, badge_size, badge_size)
-
- # Badge Text
- qp.setPen(QColor(255, 255, 255))
- qp.setFont(QFont("Segoe UI", 8, QFont.Bold))
- txt = str(badge_count) if badge_count < 10 else "9+"
- qp.drawText(QRectF(bx, by, badge_size, badge_size), Qt.AlignCenter, txt)
-
- qp.end()
- return QIcon(pixmap)
diff --git a/core/ui/main_window.py b/core/ui/main_window.py
deleted file mode 100644
index 1dcd10f..0000000
--- a/core/ui/main_window.py
+++ /dev/null
@@ -1,317 +0,0 @@
-import sys
-import os
-from PySide6.QtWidgets import (
- QMainWindow, QWidget, QVBoxLayout, QHBoxLayout,
- QFrame, QApplication, QStackedLayout, QLabel, QPushButton
-)
-from PySide6.QtCore import Qt, QTimer
-from PySide6.QtGui import QColor
-
-from core.state import AppState
-from core.ui.ethera_command_bar import EtheraCommandBar
-from core.ui.aurora_canvas_widget import AuroraCanvasWidget
-from core.ui.avatar_heroine_widget import AvatarHeroineWidget
-from core.ui.holo_panel import HoloPanel
-from core.ui.side_dock import SideDock
-from core.ui.workspace_widget import WorkspaceWidget
-from core.ui.notification_tray import NotificationTray
-from core.ui.settings_widget import SettingsWidget
-
-class HeroMainWindow(QMainWindow):
- def __init__(self):
- super().__init__()
- self.setWindowTitle("Etherea — Hero Demo")
- self.resize(1280, 800)
-
- # Central Widget & Layout
- container = QWidget()
- self.setCentralWidget(container)
-
- # Main Layout: HBox [SideDock | ContentStack]
- self.main_layout = QHBoxLayout(container)
- self.main_layout.setContentsMargins(0, 0, 0, 0)
- self.main_layout.setSpacing(0)
-
- # 1. Side Dock (Left)
- self.side_dock = SideDock()
- self.side_dock.mode_requested.connect(self.handle_dock_mode)
- self.main_layout.addWidget(self.side_dock)
-
- # 2. Content Area (Z-Stack)
- self.content_area = QWidget()
- self.content_layout = QStackedLayout(self.content_area)
- self.content_layout.setStackingMode(QStackedLayout.StackAll)
- self.main_layout.addWidget(self.content_area)
-
- # Layer 0: Aurora Canvas (Background)
- self.aurora = AuroraCanvasWidget()
- self.content_layout.addWidget(self.aurora)
-
- # Layer 1: Holographic Panel (Teaching Mode)
- self.holo_panel = HoloPanel()
- self.holo_panel.setVisible(False)
- self.content_layout.addWidget(self.holo_panel)
-
- # Layer 2: Workspace Grid (Productivity Mode)
- self.workspace = WorkspaceWidget()
- self.workspace.setAttribute(Qt.WA_TranslucentBackground)
- self.workspace.setVisible(False)
- self.content_layout.addWidget(self.workspace)
-
- # Layer 3: Avatar (Overlay)
- self.avatar = AvatarHeroineWidget()
- self.avatar.setVisible(False)
- self.content_layout.addWidget(self.avatar)
-
- # Layer 5: Notification Tray
- self.notif_tray = NotificationTray()
- self.notif_tray.setVisible(False)
- self.content_layout.addWidget(self.notif_tray)
-
- # Layer 6: Settings Widget
- self.settings_widget = SettingsWidget()
- self.settings_widget.setVisible(False)
- self.content_layout.addWidget(self.settings_widget)
-
- # Layer 4: UI Controls (Command Bar + Status)
- self.ui_layer = QWidget()
- self.ui_layer.setAttribute(Qt.WA_TranslucentBackground)
- self.ui_layer.setAttribute(Qt.WA_TransparentForMouseEvents)
- self.ui_layout = QVBoxLayout(self.ui_layer)
- self.ui_layout.setContentsMargins(30, 20, 30, 20)
-
- top_bar = QHBoxLayout()
- top_bar.addStretch()
- self.status_label = QLabel("HOME")
- self.status_label.setStyleSheet("color: rgba(255, 255, 255, 0.4); font-size: 10px; font-weight: bold; letter-spacing: 2px;")
- top_bar.addWidget(self.status_label)
- self.ui_layout.addLayout(top_bar)
-
- self.ui_layout.addSpacing(20)
- self.ui_layout.setAlignment(Qt.AlignTop)
-
- self.cmd_bar = EtheraCommandBar()
- self.cmd_bar.setFixedWidth(600)
- self.cmd_bar.setVisible(False)
- self.cmd_bar.returnPressed.connect(self.check_command)
- self.ui_layout.addWidget(self.cmd_bar, 0, Qt.AlignCenter)
- self.ui_layout.addStretch()
-
- self.content_layout.addWidget(self.ui_layer)
-
- # Initialize State
- # Initialize Spine & Brain
- from core.tools.router import ToolRouter
- from core.agent import IntelligentAgent
- self.tool_router = ToolRouter.instance()
- self.agent = IntelligentAgent()
-
- # Connect Signals
- self.tool_router.command_completed.connect(self._on_tool_completed)
- # Connect Visibility Signals (NEXT 6)
- # Connect Visibility Signals (NEXT 6)
- self.agent.thought_emitted.connect(self._on_agent_thought)
-
- # Dual-Surface Emission (Timeline + Terminal)
- self.agent.tool_invocation_emitted.connect(self.workspace.terminal.log_tool_call)
- self.agent.tool_invocation_emitted.connect(self.workspace.timeline.log_tool_call)
-
- self.agent.tool_result_emitted.connect(self.workspace.terminal.log_tool_result)
- self.agent.tool_result_emitted.connect(self.workspace.timeline.log_tool_result)
-
- self.agent.task_result_card_emitted.connect(self.workspace.terminal.log_result_card)
- self.agent.task_result_card_emitted.connect(self.workspace.timeline.log_result_card)
-
- self.agent.task_completed.connect(self._on_agent_task_done)
-
- # Ensure we start in Home Mode
- self.handle_dock_mode("home")
-
- def _on_agent_thought(self, thought):
- # Pipe all agent thoughts to both Timeline and Terminal for visibility
- self.workspace.terminal.log_thought(thought)
- self.workspace.timeline.log_thought(thought)
-
- def check_command(self):
- text = self.cmd_bar.text().lower().strip()
- if not text: return
- self.cmd_bar.clear()
-
- # 1. Direct UI Commands
- if "regression" in text or "soul test" in text:
- self.run_teaching_sequence()
- elif text.startswith("theme "):
- # Theme logic
- pass
-
- # 2. Agentic Commands (NEXT 2)
- elif "fix" in text or "edit" in text or "summarize" in text or "analyze" in text:
- if self.state.mode != "workspace":
- self.handle_dock_mode("workspace")
- self.agent.execute_task(text)
-
- # 3. Simple Chat
- elif "hello" in text:
- from core.voice import VoiceEngine
- VoiceEngine.instance().speak("Hello. How can I assist you in the workspace today?")
-
- def _on_agent_task_done(self, success, summary):
- # 1. Voice feedback
- from core.voice import VoiceEngine
- if success:
- self.avatar.perform_gesture("nod")
- VoiceEngine.instance().speak(f"Task complete. {summary}")
- else:
- VoiceEngine.instance().speak(f"Task failed. {summary}")
-
- # 2. Fallback Ladder (File Backup)
- try:
- os.makedirs("logs", exist_ok=True)
- with open("logs/last_result.txt", "w", encoding="utf-8") as f:
- f.write(f"STATUS: {'SUCCESS' if success else 'FAILED'}\n")
- f.write(f"SUMMARY: {summary}\n")
- except: pass
-
- def run_teaching_sequence(self):
- from core.voice import VoiceEngine
- if not self.avatar.isVisible():
- self.handle_dock_mode("avatar")
-
- # Manifest Rule: Gestures > speech
- self.trigger_biological_action("summon")
-
- # Delay speech to allow gesture to lead
- QTimer.singleShot(1000, lambda: VoiceEngine.instance().speak("Let's look at Regression Analysis."))
- QTimer.singleShot(2500, self._start_holo_flow)
-
- def trigger_biological_action(self, gesture_name: str):
- """NEXT 5: Biologically connected Avatar actions."""
- self.avatar.perform_gesture(gesture_name)
-
- if gesture_name == "summon":
- # Potential UI sound or highlight
- pass
- elif gesture_name == "swipe":
- # Cycle modes
- modes = ["home", "avatar", "workspace", "notifications", "settings"]
- curr = getattr(self, "_curr_mode_idx", 0)
- next_idx = (curr + 1) % len(modes)
- self._curr_mode_idx = next_idx
- self.handle_dock_mode(modes[next_idx])
- elif gesture_name == "dismiss":
- self.holo_panel.setVisible(False)
- self.handle_dock_mode("home")
-
- def _start_holo_flow(self):
- self.holo_panel.move(650, 180)
- self.holo_panel.setVisible(True)
- self.holo_panel.show_teaching_sequence()
- self.holo_panel.raise_()
-
- QTimer.singleShot(3000, lambda: self._advance_teaching_step(1))
-
- def _advance_teaching_step(self, step_idx):
- from core.voice import VoiceEngine
- # Manifest Rule: No typing animations. Space > clutter.
- dialogues = {
- 1: "Observation of raw data.",
- 2: "Pattern discovery.",
- 3: "Residual analysis.",
- 4: "The model is complete."
- }
-
- if step_idx in dialogues:
- # Avatar leads with a nod, then speaks
- self.avatar.perform_gesture("nod")
- VoiceEngine.instance().speak(dialogues[step_idx])
-
- self.holo_panel.next_step()
- self.holo_panel.raise_()
-
- if step_idx < 4:
- delay = 4000
- QTimer.singleShot(delay, lambda: self._advance_teaching_step(step_idx + 1))
- else:
- # Final gesture to close
- QTimer.singleShot(3000, lambda: self.trigger_biological_action("dismiss"))
-
- def handle_dock_mode(self, mode: str):
- if mode == "home":
- self.status_label.setText("HOME")
- elif mode == "avatar":
- self.status_label.setText("AVATAR ACTIVE")
- elif mode == "workspace":
- self.status_label.setText("WORKSPACE")
- elif mode == "settings":
- self.status_label.setText("SETTINGS")
- else:
- self.status_label.setText(mode.upper())
-
- # CRITICAL: Visibility Management
- self.avatar.setVisible(False)
- self.workspace.setVisible(False)
- self.holo_panel.setVisible(False)
- self.cmd_bar.setVisible(False)
- self.notif_tray.setVisible(False)
- self.settings_widget.setVisible(False)
-
- # Reset visuals if not in avatar mode
- if mode != "avatar":
- # Any non-persistent visual cleanup here
- pass
-
- def handle_dock_mode(self, mode: str):
- if mode == "home":
- self.status_label.setText("HOME")
- elif mode == "avatar":
- self.status_label.setText("AVATAR ACTIVE")
- elif mode == "workspace":
- self.status_label.setText("WORKSPACE")
- elif mode == "settings":
- self.status_label.setText("SETTINGS")
- else:
- self.status_label.setText(mode.upper())
-
- # CRITICAL: Visibility Management
- self.avatar.setVisible(False)
- self.workspace.setVisible(False)
- self.holo_panel.setVisible(False)
- self.cmd_bar.setVisible(False)
- self.notif_tray.setVisible(False)
- self.settings_widget.setVisible(False)
-
- if mode == "avatar":
- self.avatar.setVisible(True)
- self.cmd_bar.setVisible(True)
- self.avatar.raise_()
- self.ui_layer.raise_()
- self.avatar.acknowledge()
- self.state.set_mode("focus", reason="avatar_summon")
- elif mode == "home":
- self.state.set_mode("idle", reason="home_dock")
- elif mode == "workspace":
- self.workspace.setVisible(True)
- self.workspace.raise_()
- self.state.set_mode("focus", reason="workspace_dock")
- elif mode == "notifications":
- self.notif_tray.setVisible(True)
- self.notif_tray.raise_()
- self.ui_layer.raise_()
- self.state.set_mode("idle", reason="notif_check")
- elif mode == "settings":
- self.settings_widget.setVisible(True)
- self.settings_widget.raise_()
- self.ui_layer.raise_()
- self.state.set_mode("idle", reason="settings_view")
-
- def _on_tool_completed(self, result):
- if result.get("success"):
- self.avatar.perform_gesture("nod")
- from core.voice import VoiceEngine
- # Verbosity filter will handle this if session is long
- VoiceEngine.instance().speak("Executed successfully.")
- else:
- self.avatar.perform_gesture("nod")
- err = result.get("stderr", result.get("error", ""))
- if err:
- print(f"[Main] Tool Error: {err}")
diff --git a/core/ui/main_window_v2.py b/core/ui/main_window_v2.py
deleted file mode 100644
index b958549..0000000
--- a/core/ui/main_window_v2.py
+++ /dev/null
@@ -1,878 +0,0 @@
-from __future__ import annotations
-
-from core.ui.command_palette import CommandPalette
-from core.workspace_ai.workspace_controller import WorkspaceController
-
-from PySide6.QtWidgets import (
- QApplication,
- QMainWindow,
- QWidget,
- QVBoxLayout,
- QHBoxLayout,
- QLabel,
- QPushButton,
- QLineEdit,
- QTextEdit,
- QFrame,
-)
-from PySide6.QtCore import Qt, Signal, Slot
-from PySide6.QtGui import QFont
-from PySide6.QtCore import QTimer
-from PySide6.QtWidgets import (
- QMainWindow,
- QWidget,
- QHBoxLayout,
- QVBoxLayout,
- QLabel,
- QTextEdit,
- QPushButton,
- QFrame,
-)
-
-from core.ui.command_palette import CommandPalette
-from core.ui.avatar_heroine_widget import AvatarHeroineWidget
-from core.ui.aurora_canvas_widget import AuroraCanvasWidget
-
-from core.gestures.gesture_engine import GestureEngine
-from core.gestures.presets import regression_preset
-from core.behavior.behavior_planner import plan_behavior
-from core.avatar_motion.motion_controller import AvatarMotionController
-from core.ui.beat_sync import BeatSyncScheduler
-from core.audio_analysis.beat_detector import estimate_bpm_and_beats
-from core.audio_analysis.song_resolver import resolve_song
-from core.audio_analysis.beat_to_ui import beats_to_ui_effects
-from core.aurora_actions import ActionRegistry
-from core.aurora_pipeline import AuroraDecisionPipeline
-from core.aurora_state import AuroraStateStore
-from core.workspace_manager import WorkspaceManager
-from core.workspace_registry import WorkspaceRegistry
-from core.os_adapter import OSAdapter
-from core.os_pipeline import OSPipeline
-
-
-def _etherea_ui_log(msg: str):
- try:
- with open("etherea_boot.log", "a", encoding="utf-8") as f:
- f.write(msg + "\n")
- except Exception:
- pass
-from core.workspace_ai.workspace_controller import WorkspaceController
-from core.workspace_manager import WorkspaceManager
-from core.workspace_registry import WorkspaceRegistry
-from core.aurora_actions import ActionRegistry
-from core.aurora_pipeline import AuroraDecisionPipeline
-from core.aurora_state import AuroraStateStore
-
-# 🧠AppState drives expressive_mode ("dance"/"humming"/"idle") used by AvatarHeroineWidget
-try:
- from core.state import AppState
-except Exception:
- AppState = None
-
-# optional gesture / beat sync extras (kept safe)
-try:
- from core.gestures.gesture_engine import GestureEngine
- from core.gestures.presets import regression_preset
-except Exception:
- GestureEngine = None
- regression_preset = None
-
-try:
- from core.ui.beat_sync import BeatSyncScheduler
-except Exception:
- BeatSyncScheduler = None
-
-try:
- from core.voice_engine import get_voice_engine
-except Exception:
- get_voice_engine = None
-
-# ✅ Use existing agent brain + optional FocusGuardian supervisor
-try:
- from core.agent import IntelligentAgent, FocusGuardian
-except Exception:
- IntelligentAgent = None
- FocusGuardian = None
-
-# signals are optional; window still runs without them
-try:
- from core.signals import signals
-except Exception:
- signals = None
-
-
-def _etherea_ui_log(msg: str) -> None:
- try:
- with open("etherea_boot.log", "a", encoding="utf-8") as f:
- f.write(msg + "\n")
- except Exception:
- pass
-
-
-class EthereaMainWindowV2(QMainWindow):
- """
- Hero demo window:
- - Avatar + Aurora + Console
- - Single command pipeline (typed + voice) -> WorkspaceController.handle_command()
- - Mode layout switching (study/coding/exam/calm/deep_work)
- - Focus timer UI + avatar persona hint
- - Voice: push-to-talk + optional wake loop
-
- Spice layer:
- - Presenter mode (ambient pulses, alive UI)
- - Co-present script: avatar speaks short lines that complement YOU
- Commands: present on/off, co-present, next, skip
- - Avatar commands: dance / hum / surprise (always work)
- """
-
- def __init__(self) -> None:
- super().__init__()
- _etherea_ui_log("UI: EthereaMainWindowV2 created")
- self.setWindowTitle("Etherea OS — Heroine Avatar v2")
- self.resize(1200, 720)
-
- root = QWidget()
- self.setCentralWidget(root)
-
- main = QHBoxLayout(root)
- main.setContentsMargins(14, 14, 14, 14)
- main.setSpacing(12)
-
- # ---- Core managers ----
- self.workspace_manager = WorkspaceManager()
- self.ws_controller = WorkspaceController(self.workspace_manager)
-
- self.workspace_registry = WorkspaceRegistry()
- self.action_registry = ActionRegistry.default()
- self.aurora_state_store = AuroraStateStore(self.action_registry)
- self.aurora_pipeline = AuroraDecisionPipeline(
- registry=self.action_registry,
- workspace_registry=self.workspace_registry,
- workspace_manager=self.workspace_manager,
- state_store=self.aurora_state_store,
- log_cb=self.log,
- )
-
- # ---- LEFT: Command + Avatar ----
- left = QVBoxLayout()
- left.setSpacing(10)
-
- self.command_palette = CommandPalette()
- self.command_palette.submitted.connect(lambda cmd: self.execute_user_command(cmd, source="ui"))
-
- self.avatar = AvatarHeroineWidget()
- self.avatar.setStyleSheet("""
- QWidget { background: #0b0b12; border-radius: 18px; }
- """)
- self.aurora_ring = self.avatar
- left.addWidget(self.command_palette)
- left.addWidget(self.avatar, 1)
-
- self.gestures = GestureEngine(self.avatar, on_log=self.log)
- self.motion = AvatarMotionController()
- self.beatsync = BeatSyncScheduler(self._apply_ui_effect, log_cb=self.log)
- self.workspace_manager = WorkspaceManager()
- self.ws_controller = WorkspaceController(self.workspace_manager)
- self.workspace_registry = WorkspaceRegistry()
- self.os_pipeline = OSPipeline(OSAdapter(dry_run=False))
- self.action_registry = ActionRegistry.default()
- self.aurora_state_store = AuroraStateStore(self.action_registry)
- self.aurora_pipeline = AuroraDecisionPipeline(
- registry=self.action_registry,
- workspace_registry=self.workspace_registry,
- workspace_manager=self.workspace_manager,
- state_store=self.aurora_state_store,
- os_pipeline=self.os_pipeline,
- log_cb=self.log,
- )
-
- # Controls row
- self.avatar.setStyleSheet("QWidget { background: #0b0b12; border-radius: 18px; }")
- self.aurora_ring = self.avatar # ring FX exposed by avatar widget
-
- controls = QHBoxLayout()
-
- self.btn_voice = QPushButton("ðŸŽ™ï¸ Voice (push)")
- self.btn_demo = QPushButton("Demo: Regression")
- self.btn_theme = QPushButton("Theme: Dark/Light")
- self.btn_accent = QPushButton("Accent: Violet/Blue")
- self.btn_present = QPushButton("🎥 Present")
- self.btn_copresent = QPushButton("🤠Co-present")
-
- self.btn_voice.clicked.connect(self._voice_push_to_talk)
- self.btn_demo.clicked.connect(self.run_regression_demo)
- self.btn_theme.clicked.connect(self.toggle_theme)
- self.btn_accent.clicked.connect(self.toggle_accent)
- self.btn_present.clicked.connect(self.toggle_presenter_mode)
- self.btn_copresent.clicked.connect(self.start_copresent)
-
- controls.addWidget(self.btn_voice)
- controls.addWidget(self.btn_demo)
- controls.addWidget(self.btn_theme)
- controls.addWidget(self.btn_accent)
- controls.addWidget(self.btn_present)
- controls.addWidget(self.btn_copresent)
-
- left.addWidget(self.command_palette)
- left.addWidget(self.avatar, 1)
- left.addLayout(controls)
-
- # ---- RIGHT: Aurora + Console + Status ----
- right = QVBoxLayout()
- right.setSpacing(10)
-
- title = QLabel("Etherea Console")
- title.setStyleSheet("font-size:18px; font-weight:700; color:white;")
- right.addWidget(title)
-
- self.aurora_canvas = AuroraCanvasWidget()
- self.aurora_canvas.intent_requested.connect(self._on_aurora_intent)
- self.aurora_state_store.subscribe(self.aurora_canvas.apply_state)
- right.addWidget(self.aurora_canvas)
-
- self.console = QTextEdit()
- self.console.setReadOnly(True)
- self.console.setStyleSheet(
- "QTextEdit { background:#11121a; color:#e8e8ff; border:1px solid #22243a;"
- "border-radius:14px; padding:10px; font-family:monospace; font-size:13px; }"
- )
- right.addWidget(self.console, 1)
-
- status = QFrame()
- status.setStyleSheet(
- "QFrame { background:#101018; border:1px solid #1f2135; border-radius:16px; padding:10px; }"
- "QLabel { color:#dcdcff; font-size:13px; }"
- )
- s = QVBoxLayout(status)
-
- self.l_mode = QLabel("Mode: study")
- self.l_focus = QLabel("Focus: --")
- self.l_stress = QLabel("Stress: --")
- self.l_energy = QLabel("Energy: --")
- self.l_timer = QLabel("Focus timer: --")
-
- s.addWidget(self.l_mode)
- s.addWidget(self.l_timer)
- s.addWidget(self.l_focus)
- s.addWidget(self.l_stress)
- s.addWidget(self.l_energy)
- right.addWidget(status)
-
- main.addLayout(left, 1)
- main.addLayout(right, 2)
-
- # ---- optional gesture / beat extras ----
- self.gestures = None
- if GestureEngine is not None:
- try:
- self.gestures = GestureEngine(self.avatar, on_log=self.log)
- except Exception:
- self.gestures = None
-
- self.beatsync = None
- if BeatSyncScheduler is not None:
- try:
- self.beatsync = BeatSyncScheduler(self._apply_ui_effect, log_cb=self.log)
- except Exception:
- self.beatsync = None
-
- # ---- internal UI state ----
- self._theme_is_dark = True
- self._accent_alt = False
- self._current_mode = "study"
-
- # ---- Presenter state ----
- self._presenter_on = False
- self._copresent_queue = []
- self._copresent_idx = 0
-
- # ---- focus timer tick ----
- self._focus_timer_tick = QTimer(self)
- self._focus_timer_tick.setInterval(1000)
- self._focus_timer_tick.timeout.connect(self._tick_focus)
- self._focus_timer_tick.start()
-
- # ---- ambient presenter tick (always running but only animates when presenter ON) ----
- self._ambient_tick = QTimer(self)
- self._ambient_tick.setInterval(1100)
- self._ambient_tick.timeout.connect(self._presenter_ambient_tick)
- self._ambient_tick.start()
-
- # ---- signal wiring ----
- if signals is not None:
- try:
- if hasattr(signals, "emotion_updated"):
- signals.emotion_updated.connect(self.on_emotion_updated)
- if hasattr(signals, "command_received_ex"):
- signals.command_received_ex.connect(self._on_command_ex)
- elif hasattr(signals, "command_received"):
- signals.command_received.connect(lambda cmd: self.execute_user_command(cmd, source="voice"))
- if hasattr(signals, "mode_changed"):
- signals.mode_changed.connect(self._on_mode_changed_signal)
- if hasattr(signals, "focus_started"):
- signals.focus_started.connect(self._on_focus_started_signal)
- if hasattr(signals, "focus_stopped"):
- signals.focus_stopped.connect(self._on_focus_stopped_signal)
- if hasattr(signals, "voice_state"):
- signals.voice_state.connect(self._on_voice_state)
- except Exception:
- pass
-
- self.log_ui("✅ Heroine avatar online (Disney-like 2.5D).")
- self.log_ui("🎠Smooth blink + breathing + glow aura + calm motion.")
- self.log_ui("🔗 EI signals: " + ("connected" if signals is not None else "not found (still OK)"))
- self._sync_aurora_state()
-
- def log_ui(self, msg: str) -> None:
- self.console.append(msg)
-
- def log(self, msg: str) -> None:
- if signals is not None and hasattr(signals, "system_log"):
- try:
- signals.system_log.emit(msg)
- return
- except Exception:
- pass
- self.log_ui(msg)
-
- def _sync_aurora_state(self) -> None:
- current = self.workspace_registry.get_current()
- emotion_tag = getattr(self.avatar, "emotion_tag", "calm")
- self.aurora_state_store.update(
- workspace_id=current.workspace_id if current else None,
- workspace_name=current.name if current else None,
- session_active=current is not None,
- last_saved=current.last_saved if current else None,
- emotion_tag=emotion_tag,
- )
-
- def _update_aurora_from_ei(self, vec: dict) -> None:
- emotion_tag = getattr(self.avatar, "emotion_tag", "calm")
- self.aurora_state_store.update(
- focus=float(vec.get("focus", 0.4)),
- stress=float(vec.get("stress", 0.2)),
- energy=float(vec.get("energy", 0.6)),
- emotion_tag=emotion_tag,
- )
-
- def _on_aurora_intent(self, action_id: str) -> None:
- result = self.aurora_pipeline.handle_intent(action_id)
- self.log(f"🌌 Aurora action: {result}")
- # ---- voice engine instance ----
- self.voice_engine = None
- if get_voice_engine is not None:
- try:
- self.voice_engine = get_voice_engine()
- except Exception:
- self.voice_engine = None
-
- # ---- start wake loop (optional) ----
- if self.voice_engine is not None:
- try:
- self.voice_engine.start_wake_word_loop()
- self.log("ðŸŽ™ï¸ Voice wake loop armed (say: 'Etherea ...').")
- except Exception as e:
- self.log(f"âš ï¸ Voice wake loop not started: {e}")
- else:
- self.log("ðŸŽ™ï¸ Voice engine not available (still OK).")
-
- # ---- Agent brain + FocusGuardian supervisor (optional-safe) ----
- self.agent = None
- if IntelligentAgent is not None:
- try:
- self.agent = IntelligentAgent()
- self.log("🧠IntelligentAgent ready (Plan → Execute → Observe → Verify).")
- except Exception as e:
- self.log(f"âš ï¸ IntelligentAgent init failed: {e}")
-
- self.guardian = None
- if FocusGuardian is not None:
- try:
- self.guardian = FocusGuardian(self.ws_controller, voice_engine=self.voice_engine, log_cb=self.log)
- self.guardian.start()
- except Exception as e:
- self.log(f"âš ï¸ FocusGuardian failed: {e}")
-
- # ---- boot logs ----
- self.log_ui("🎂 Etherea Birthday Build — presenter spice online.")
- self.log_ui("✅ Commands: present on/off | co-present | next | skip | dance | hum | surprise")
- self._sync_aurora_state()
-
- # -------------------------
- # Logging
- # -------------------------
- def log_ui(self, msg: str) -> None:
- self.console.append(msg)
-
- def log(self, msg: str) -> None:
- if signals is not None and hasattr(signals, "system_log"):
- try:
- signals.system_log.emit(msg)
- return
- except Exception:
- pass
- self.log_ui(msg)
-
- # -------------------------
- # Presenter Mode (Spice)
- # -------------------------
- def toggle_presenter_mode(self, force: Optional[bool] = None) -> None:
- if force is None:
- self._presenter_on = not self._presenter_on
- else:
- self._presenter_on = bool(force)
-
- state = "ON" if self._presenter_on else "OFF"
- self.log(f"🎥 Presenter mode: {state}")
-
- try:
- self.aurora_ring.pulse(intensity=1.25 if self._presenter_on else 1.05, duration=0.28)
- except Exception:
- pass
-
- def _presenter_ambient_tick(self) -> None:
- if not self._presenter_on:
- return
- try:
- self.aurora_ring.pulse(intensity=1.12, duration=0.20)
- except Exception:
- pass
-
- def start_copresent(self) -> None:
- self.toggle_presenter_mode(force=True)
-
- self._copresent_queue = [
- ("greet", "Hi Professor. I’m Etherea — a desktop-first living OS. Bru and I will co-present today."),
- ("overview", "My core loop is: sense, decide, adapt. The user stays in control with explicit modes."),
- ("avatar", "I’m the emotional interface. I react to context without taking silent control."),
- ("workspace", "The workspace stays task-focused: study, coding, and exam modes. Exam mode minimizes distractions."),
- ("demo", "We can trigger visible behavior: dance for expressiveness, surprise for UI effects, and voice for hands-free commands."),
- ("close", "Goal: a comfortable, human-feeling desktop companion that improves productivity and reduces cognitive load."),
- ]
- self._copresent_idx = 0
- self.log("🤠Co-present started. Use: next / skip")
- self._copresent_say_current()
-
- def _copresent_say_current(self) -> None:
- if not self._copresent_queue:
- self.log("🤠Co-present queue empty.")
- return
- if self._copresent_idx < 0 or self._copresent_idx >= len(self._copresent_queue):
- self.log("🤠Co-present finished.")
- return
-
- tag, line = self._copresent_queue[self._copresent_idx]
- self.log(f"🤠[{tag.upper()}] {line}")
-
- try:
- if self.voice_engine is not None:
- self.voice_engine.speak(line, language="en-IN")
- except Exception:
- pass
-
- try:
- self.aurora_ring.pulse(intensity=1.35, duration=0.32)
- except Exception:
- pass
-
- def _copresent_next(self) -> None:
- if not self._copresent_queue:
- self.log("🤠No co-present running.")
- return
- self._copresent_idx += 1
- if self._copresent_idx >= len(self._copresent_queue):
- self.log("🤠Co-present finished. Presenter mode stays ON.")
- return
- self._copresent_say_current()
-
- def _copresent_skip(self) -> None:
- if not self._copresent_queue:
- self.log("🤠No co-present running.")
- return
- self.log("âï¸ Skipped.")
- self._copresent_next()
-
- # -------------------------
- # Avatar expressive control
- # -------------------------
- def _set_expressive_mode(self, mode: str) -> None:
- if AppState is None:
- self.log("âš ï¸ AppState not available; expressive animations not wired.")
- return
- try:
- AppState.instance().set_expressive_mode(mode)
- self.log(f"✨ Avatar expressive_mode → {mode}")
- try:
- self.aurora_ring.pulse(intensity=1.45, duration=0.28)
- except Exception:
- pass
- except Exception as e:
- self.log(f"âš ï¸ set_expressive_mode failed: {e}")
-
- def _handle_avatar_commands(self, cmd: str) -> bool:
- low = (cmd or "").strip().lower()
-
- # Dance
- if low in ("dance", "start dance", "dance mode", "avatar dance"):
- self._set_expressive_mode("dance")
- return True
- if low in ("stop dance", "dance off", "end dance"):
- self._set_expressive_mode("idle")
- return True
-
- # Humming
- if low in ("hum", "humming", "start hum", "start humming", "avatar hum"):
- self._set_expressive_mode("humming")
- return True
- if low in ("stop hum", "stop humming", "hum off"):
- self._set_expressive_mode("idle")
- return True
-
- # Surprise
- if low in ("surprise", "avatar surprise", "magic", "sparkle"):
- try:
- if hasattr(self.avatar, "perform_gesture"):
- self.avatar.perform_gesture("summon")
- self.log("🎇 Surprise triggered (summon + glow).")
- try:
- self.aurora_ring.pulse(intensity=1.8, duration=0.40)
- except Exception:
- pass
- try:
- self.aurora_ring.pulse(intensity=1.25, duration=0.18)
- except Exception:
- pass
- except Exception as e:
- self.log(f"âš ï¸ Surprise failed: {e}")
- return True
-
- # Presenter commands
- if low in ("present on", "present", "presenter on", "presentation on"):
- self.toggle_presenter_mode(force=True)
- return True
- if low in ("present off", "presenter off", "presentation off"):
- self.toggle_presenter_mode(force=False)
- return True
- if low in ("co-present", "copresent", "present with me", "start presentation"):
- self.start_copresent()
- return True
- if low in ("next", "next point", "continue"):
- self._copresent_next()
- return True
- if low in ("skip", "skip point"):
- self._copresent_skip()
- return True
-
- return False
-
- # -------------------------
- # Aurora state
- # -------------------------
- def _sync_aurora_state(self) -> None:
- current = self.workspace_registry.get_current()
- emotion_tag = getattr(self.avatar, "emotion_tag", "calm")
- self.aurora_state_store.update(
- current_mode=self._current_mode,
- workspace_id=current.workspace_id if current else None,
- workspace_name=current.name if current else None,
- session_active=current is not None,
- last_saved=current.last_saved if current else None,
- emotion_tag=emotion_tag,
- )
-
- def _update_aurora_from_ei(self, vec: dict) -> None:
- emotion_tag = getattr(self.avatar, "emotion_tag", "calm")
- try:
- self.aurora_state_store.update(
- focus=float(vec.get("focus", 0.4)),
- stress=float(vec.get("stress", 0.2)),
- energy=float(vec.get("energy", 0.6)),
- emotion_tag=emotion_tag,
- )
- except Exception:
- pass
-
- def _on_aurora_intent(self, action_id: str) -> None:
- result = self.aurora_pipeline.handle_intent(action_id)
- self.log(f"🌌 Aurora action: {result}")
-
- # -------------------------
- # Theme / Accent
- # -------------------------
- def toggle_theme(self) -> None:
- self._theme_is_dark = not self._theme_is_dark
- try:
- self.avatar.set_theme_mode("dark" if self._theme_is_dark else "light")
- except Exception:
- pass
- self.log(f"🎨 Theme switched → {'dark' if self._theme_is_dark else 'light'}")
-
- def toggle_accent(self) -> None:
- self._accent_alt = not self._accent_alt
- try:
- if self._accent_alt:
- self.avatar.set_accent_colors((120, 220, 255), (120, 255, 210))
- self.log("💠Accent switched → cyan/teal")
- else:
- self.avatar.set_accent_colors((160, 120, 255), (255, 210, 120))
- self.log("💜 Accent switched → violet/gold")
- except Exception:
- pass
-
- # -------------------------
- # EI updates
- # -------------------------
- def on_emotion_updated(self, vec: dict) -> None:
- f = vec.get("focus", None)
- s = vec.get("stress", None)
- e = vec.get("energy", None)
-
- if isinstance(f, (int, float)):
- self.l_focus.setText(f"Focus: {float(f):.2f}")
- if isinstance(s, (int, float)):
- self.l_stress.setText(f"Stress: {float(s):.2f}")
- if isinstance(e, (int, float)):
- self.l_energy.setText(f"Energy: {float(e):.2f}")
- if isinstance(c, (int, float)):
- self.l_curiosity.setText(f"Curiosity: {float(c):.2f}")
- self._update_aurora_from_ei(vec)
-
- self._update_aurora_from_ei(vec)
-
- try:
- self.avatar.update_ei(vec)
- except Exception:
- pass
-
- # -------------------------
- # Demo gesture preset
- # -------------------------
- def run_regression_demo(self) -> None:
- if self.gestures is None or regression_preset is None:
- self.log("📚 Demo not available in this build (still OK).")
- return
- self.log("📚 Demo: Explaining regression (gestures + ring FX).")
- plan = regression_preset()
- self.gestures.play(plan)
- # 💫 Beat-synced ring effects (real-time)
- try:
- self.beatsync.load(plan.get('ui_effects') or [])
- self.beatsync.start()
- except Exception as _:
- pass
-
- # NEW: if dance requested, generate an original routine
- d = plan.get('dance')
- if d:
- self.motion.play_dance(
- duration_s=float(d.get('duration_s', 18.0)),
- bpm=float(d.get('bpm', 128.0)),
- style=str(d.get('style', 'bolly_pop')),
- energy=1.25
- )
-
- m = plan.get('motion') or {}
- clip = m.get('clip', 'idle_breathe_01')
- intensity = float(m.get('intensity', 1.0))
- loop = bool(m.get('loop', True))
- self.motion.play(clip, intensity=intensity, loop=loop)
-
-
- def speak_and_perform(self, text: str, language: str = "en"):
- """Speak + run gesture timeline based on behavior planner."""
- try:
- plan = regression_preset()
- self.gestures.play(plan)
- if self.beatsync is not None:
- try:
- self.beatsync.load(plan.get("ui_effects") or [])
- self.beatsync.start()
- except Exception:
- pass
- except Exception as e:
- self.log(f"âš ï¸ Regression demo failed: {e}")
-
- def _apply_ui_effect(self, effect: dict) -> None:
- etype = effect.get("type")
- if etype == "ring_pulse":
- intensity = float(effect.get("intensity", 1.2))
- dur = float(effect.get("dur", 0.2))
- try:
- self.aurora_ring.pulse(intensity=intensity, duration=dur)
- except Exception:
- pass
-
- # -------------------------
- # Voice
- # -------------------------
- def _voice_push_to_talk(self) -> None:
- if self.voice_engine is None:
- self.log("ðŸŽ™ï¸ Voice engine not installed/available (still OK).")
- return
-
- def worker():
- ve = self.voice_engine
- if ve is None:
- self.log("ðŸŽ™ï¸ Voice engine unavailable.")
- return
- if not getattr(ve, "has_mic", False):
- self.log("ðŸŽ™ï¸ No microphone detected.")
- return
- self.log("ðŸŽ™ï¸ Listening (push)…")
- text = ve.listen_once(timeout_s=5, phrase_limit_s=10)
- if not text:
- self.log("ðŸŽ™ï¸ Heard nothing.")
- return
-
- low = text.lower()
- cmd = text
- for w in ("etherea", "ethera"):
- if w in low:
- cmd = low.split(w, 1)[1].strip(" ,.!:;") or "hello etherea"
- break
-
- self.execute_user_command(cmd, source="voice")
- try:
- ve.speak("Done.", language="en-IN")
- except Exception:
- pass
-
- threading.Thread(target=worker, daemon=True).start()
-
- def _on_voice_state(self, state: str, meta: dict) -> None:
- if state == "LISTENING":
- self.log("ðŸŽ™ï¸ Voice: LISTENING")
- elif state == "THINKING":
- self.log("🧠Voice: THINKING")
-
- # -------------------------
- # Unified command pipeline
- # -------------------------
- def _on_command_ex(self, text: str, meta: dict) -> None:
- source = str((meta or {}).get("source", "voice"))
- self.execute_user_command(text, source=source)
-
- def execute_user_command(self, cmd: str, *, source: str = "ui") -> None:
- cmd = (cmd or "").strip()
- if not cmd:
- return
-
- self.log(f"âš¡ CMD[{source}]: {cmd}")
-
- # ✅ avatar/presenter commands always handled here
- if self._handle_avatar_commands(cmd):
- self._sync_aurora_state()
- return
-
- try:
- out = self.ws_controller.handle_command(cmd, source=source)
- except Exception as e:
- self.log(f"⌠command failed: {e}")
- return
-
- if isinstance(out, dict) and out.get("action") == "blocked_by_focus_policy":
- reply = str(out.get("reply", "Locked in.")).strip()
- self.log(reply)
- if self.voice_engine is not None:
- try:
- self.voice_engine.speak(reply, language="en-IN")
- except Exception:
- pass
- self.aurora_state_store.update(current_mode=str(mode))
- self._sync_aurora_state()
- return
-
- self.log(f"✅ OUT: {out}")
-
- if isinstance(out, dict) and out.get("action") == "set_mode":
- mode = str(out.get("mode") or "study")
- self._apply_mode_layout(mode)
- try:
- self.aurora_ring.pulse(intensity=1.35, duration=0.35)
- except Exception:
- print("command failed:", e)
- pass
-
- if isinstance(out, dict) and out.get("action") == "self_explain":
- self.log("\n" + str(out.get("text", "")).strip())
-
- if isinstance(out, dict) and out.get("action") == "greet":
- reply = str(out.get("reply", "")).strip()
- if reply:
- self.log(reply)
- if self.voice_engine is not None:
- try:
- self.voice_engine.speak(reply, language="en-IN")
- except Exception:
- pass
-
- self._sync_aurora_state()
-
- # -------------------------
- # Mode layouts
- # -------------------------
- def _apply_mode_layout(self, mode: str) -> None:
- self._current_mode = mode
- self.l_mode.setText(f"Mode: {mode}")
-
- if mode in ("coding",):
- self.aurora_canvas.setVisible(False)
- self.avatar.setVisible(True)
- self.console.setVisible(True)
- elif mode in ("exam",):
- self.aurora_canvas.setVisible(False)
- self.avatar.setVisible(False)
- self.console.setVisible(True)
- elif mode in ("calm",):
- self.aurora_canvas.setVisible(True)
- self.avatar.setVisible(True)
- self.console.setVisible(False)
- else:
- self.aurora_canvas.setVisible(True)
- self.avatar.setVisible(True)
- self.console.setVisible(True)
-
- try:
- if hasattr(self.avatar, "set_mode_persona"):
- self.avatar.set_mode_persona(mode)
- except Exception:
- pass
-
- self.aurora_state_store.update(current_mode=mode)
-
- # -------------------------
- # Focus timer UI tick
- # -------------------------
- def _tick_focus(self) -> None:
- try:
- secs = int(self.ws_controller.focus_seconds_left())
- except Exception:
- secs = 0
-
- if secs <= 0:
- self.l_timer.setText("Focus timer: --")
- return
-
- mm = secs // 60
- ss = secs % 60
- self.l_timer.setText(f"Focus timer: {mm:02d}:{ss:02d} remaining")
-
- # -------------------------
- # Signal handlers
- # -------------------------
- def _on_mode_changed_signal(self, mode: str, meta: dict) -> None:
- self._apply_mode_layout(str(mode))
-
- def _on_focus_started_signal(self, minutes: int, meta: dict) -> None:
- self.log(f"â±ï¸ Focus started: {minutes} minutes")
- try:
- self.aurora_ring.pulse(intensity=1.5, duration=0.45)
- except Exception:
- pass
-
- def _on_focus_stopped_signal(self, meta: dict) -> None:
- self.log("â¹ï¸ Focus stopped")
- try:
- self.aurora_ring.pulse(intensity=1.1, duration=0.25)
- except Exception:
- pass
diff --git a/core/ui/notification_tray.py b/core/ui/notification_tray.py
deleted file mode 100644
index 0cc93e5..0000000
--- a/core/ui/notification_tray.py
+++ /dev/null
@@ -1,104 +0,0 @@
-from PySide6.QtWidgets import QWidget, QVBoxLayout, QLabel, QScrollArea
-from PySide6.QtCore import Qt
-from PySide6.QtGui import QColor
-from core.ui.panels import GlassPanel
-from core.notifications import NotificationManager
-
-class NotificationTray(GlassPanel):
- """
- Vertical tray displaying the list of notifications.
- Uses GlassPanel styling but optimized for a list view.
- """
- def __init__(self, parent=None):
- super().__init__(title="NOTIFICATIONS", parent=parent)
- self.resize(350, 600) # Default size, usually managed by layout
-
- # Remove default layout content margins from GlassPanel base if they conflict,
- # but here we'll just add to the existing layout.
-
- # Content Container
- self.content_widget = QWidget()
- self.content_widget.setAttribute(Qt.WA_TranslucentBackground)
- self.content_layout = QVBoxLayout(self.content_widget)
- self.content_layout.setContentsMargins(0, 0, 0, 0)
- self.content_layout.setSpacing(10)
- self.content_layout.addStretch() # Push items up
-
- # Scroll Area
- self.scroll = QScrollArea()
- self.scroll.setWidgetResizable(True)
- self.scroll.setWidget(self.content_widget)
- # Transparent Scroll Area
- self.scroll.setStyleSheet("""
- QScrollArea { background: transparent; border: none; }
- QScrollBar:vertical {
- border: none;
- background: rgba(0,0,0,50);
- width: 6px;
- border-radius: 3px;
- }
- QScrollBar::handle:vertical {
- background: rgba(0, 240, 255, 50);
- border-radius: 3px;
- }
- """)
-
- # Add scroll to the GlassPanel layout
- self.layout.addWidget(self.scroll)
-
- # Connect
- self.manager = NotificationManager.instance()
- self.manager.notification_added.connect(self.refresh)
- self.manager.notifications_cleared.connect(self.refresh)
-
- self.refresh()
-
- def refresh(self):
- # Clear existing items (except stretch)
- while self.content_layout.count() > 1:
- child = self.content_layout.takeAt(0)
- if child.widget():
- child.widget().deleteLater()
-
- notifs = self.manager.get_all()
-
- if not notifs:
- lbl = QLabel("No new notifications.")
- lbl.setStyleSheet("color: rgba(255,255,255,100); font-style: italic;")
- lbl.setAlignment(Qt.AlignCenter)
- self.content_layout.insertWidget(0, lbl)
- return
-
- for n in notifs:
- item = self._create_item(n)
- self.content_layout.insertWidget(self.content_layout.count()-1, item)
-
- def _create_item(self, data):
- w = QWidget()
- w.setStyleSheet("""
- QWidget {
- background-color: rgba(255, 255, 255, 10);
- border-radius: 6px;
- border: 1px solid rgba(255, 255, 255, 20);
- }
- QLabel { background: transparent; border: none; }
- """)
- l = QVBoxLayout(w)
- l.setContentsMargins(15, 12, 15, 12)
- l.setSpacing(4)
-
- title = QLabel(data["title"])
- title.setStyleSheet("color: #00f0ff; font-weight: bold; font-size: 13px;")
-
- msg = QLabel(data["message"])
- msg.setStyleSheet("color: #e0e0e0; font-size: 12px;")
- msg.setWordWrap(True)
-
- time = QLabel(data["timestamp"].strftime("%H:%M"))
- time.setStyleSheet("color: rgba(255,255,255,80); font-size: 10px;")
- time.setAlignment(Qt.AlignRight)
-
- l.addWidget(title)
- l.addWidget(msg)
- l.addWidget(time)
- return w
diff --git a/core/ui/panels.py b/core/ui/panels.py
deleted file mode 100644
index 16480d8..0000000
--- a/core/ui/panels.py
+++ /dev/null
@@ -1,174 +0,0 @@
-from PySide6.QtWidgets import QWidget, QVBoxLayout, QLabel, QGraphicsDropShadowEffect
-from PySide6.QtCore import Qt, QRectF, QPointF
-from PySide6.QtGui import QPainter, QBrush, QColor, QPen, QFont, QPainterPath, QLinearGradient
-
-class GlassPanel(QWidget):
- """
- Base class for transparent, rounded, glass-morphic panels.
- """
- def __init__(self, title="Panel", parent=None):
- super().__init__(parent)
- self.title = title
- # Removed WA_TranslucentBackground to ensure stability, using paintEvent alpha instead
- self.setAttribute(Qt.WA_TranslucentBackground)
- self.setMinimumSize(200, 150)
-
- # Shadow Effect
- shadow = QGraphicsDropShadowEffect(self)
- shadow.setBlurRadius(20)
- shadow.setXOffset(0)
- shadow.setYOffset(5)
- shadow.setColor(QColor(0, 0, 0, 150))
- self.setGraphicsEffect(shadow)
-
- # Consistent Padding
- self.layout = QVBoxLayout(self)
- self.layout.setContentsMargins(20, 40, 20, 20)
-
- def paintEvent(self, event):
- qp = QPainter(self)
- qp.setRenderHint(QPainter.Antialiasing)
-
- rect = self.rect()
- w, h = rect.width(), rect.height()
-
- # 1. Background (Glass Gradient)
- grad = QLinearGradient(0, 0, w, h)
- grad.setColorAt(0.0, QColor(40, 50, 70, 240)) # Lighter Top-Left
- grad.setColorAt(1.0, QColor(10, 15, 25, 250)) # Darker Bottom-Right
-
- qp.setBrush(grad)
- # Subtle Cyan Border
- qp.setPen(QPen(QColor(0, 240, 255, 60), 1.5))
-
- # Draw with slight inset for shadow space if needed,
- # but GraphicsEffect handles shadow outside.
- qp.drawRoundedRect(rect.adjusted(1, 1, -1, -1), 12, 12)
-
- # 2. Title
- qp.setPen(QColor(255, 255, 255, 200)) # Brighter Text
- qp.setFont(QFont("Segoe UI", 10, QFont.Bold))
- qp.drawText(QRectF(20, 15, w-40, 20), Qt.AlignLeft | Qt.AlignVCenter, self.title.upper())
-
- # 3. Separator (Gradient Line)
- l_grad = QLinearGradient(20, 38, w-20, 38)
- l_grad.setColorAt(0.0, QColor(0, 240, 255, 0))
- l_grad.setColorAt(0.5, QColor(0, 240, 255, 100))
- l_grad.setColorAt(1.0, QColor(0, 240, 255, 0))
-
- qp.setPen(QPen(QBrush(l_grad), 1))
- qp.drawLine(20, 38, w-20, 38)
-
- qp.end()
-
-class NotesPanel(GlassPanel):
- def paintEvent(self, event):
- super().paintEvent(event)
- qp = QPainter(self)
- qp.setRenderHint(QPainter.Antialiasing)
-
- # Draw lines
- qp.setPen(QPen(QColor(255, 255, 255, 15), 1))
- line_h = 30
- start_y = 60
- while start_y < self.height() - 20:
- qp.drawLine(20, start_y, self.width()-20, start_y)
- start_y += line_h
- qp.end()
-
-class TaskPanel(GlassPanel):
- def paintEvent(self, event):
- super().paintEvent(event)
- qp = QPainter(self)
- qp.setRenderHint(QPainter.Antialiasing)
-
- tasks = ["Review Q1 Roadmap", "Optimize Render Loop", "Update Neural Weights", "Sync with Cloud"]
- y = 60
- qp.setFont(QFont("Segoe UI", 11))
-
- for t in tasks:
- # Checkbox
- qp.setPen(QPen(QColor(0, 240, 255, 100), 1.5))
- qp.setBrush(Qt.NoBrush)
- qp.drawRoundedRect(20, y, 16, 16, 4, 4)
-
- # Text
- qp.setPen(QColor(220, 230, 255))
- qp.drawText(50, y+13, t)
-
- y += 40
- qp.end()
-
-class CodePanel(GlassPanel):
- def paintEvent(self, event):
- super().paintEvent(event)
- qp = QPainter(self)
- qp.setRenderHint(QPainter.Antialiasing)
- qp.setFont(QFont("Consolas", 10))
-
- lines = [
- ("def optimize_network(self):", "#f0a0f0"),
- (" # Neural sync", "#708090"),
- (" weights = self.get_weights()", "#d0d0d0"),
- (" delta = weights * 0.05", "#d0d0d0"),
- (" return self.apply(delta)", "#d0d0d0"),
- ("", ""),
- ("class Canvas(QWidget):", "#f0a0f0"),
- (" def __init__(self):", "#f0a0f0"),
- (" super().__init__()", "#d0d0d0")
- ]
-
- y = 60
- for text, col_hex in lines:
- if not text:
- y += 20
- continue
- qp.setPen(QColor(col_hex))
- qp.drawText(20, y, text)
- y += 20
- qp.end()
-
-class PdfPanel(GlassPanel):
- def paintEvent(self, event):
- super().paintEvent(event)
- qp = QPainter(self)
- qp.setRenderHint(QPainter.Antialiasing)
-
- # Draw placeholder rect (Thumbnail)
- margin = 30
- rect_w = self.width() - 2*margin
- rect_h = self.height() - 80
-
- qp.setBrush(QColor(255, 255, 255, 10))
- qp.setPen(Qt.NoPen)
- qp.drawRoundedRect(margin, 60, rect_w, rect_h, 8, 8)
-
- # Icon center
- qp.setPen(QColor(255, 255, 255, 30))
- qp.drawText(QRectF(margin, 60, rect_w, rect_h), Qt.AlignCenter, "PDF PREVIEW")
- qp.end()
-
-class ActivityPanel(GlassPanel):
- def __init__(self, title="Session Activity", parent=None):
- super().__init__(title=title, parent=parent)
- self.cols = 12
- import random
- self.data = [random.random() for _ in range(self.cols)]
-
- def paintEvent(self, event):
- super().paintEvent(event)
- qp = QPainter(self)
- qp.setRenderHint(QPainter.Antialiasing)
-
- bar_w = (self.width() - 40) / self.cols
- max_h = self.height() - 80
-
- qp.setBrush(QColor(0, 240, 255, 50))
- qp.setPen(Qt.NoPen)
-
- for i, val in enumerate(self.data):
- h = val * max_h
- x = 20 + i * bar_w + 2
- y = self.height() - 20 - h
- qp.drawRoundedRect(x, y, bar_w - 4, h, 2, 2)
- qp.end()
diff --git a/core/ui/settings_widget.py b/core/ui/settings_widget.py
deleted file mode 100644
index c2cf131..0000000
--- a/core/ui/settings_widget.py
+++ /dev/null
@@ -1,161 +0,0 @@
-from PySide6.QtWidgets import (
- QWidget, QVBoxLayout, QLabel, QFrame, QHBoxLayout, QPushButton
-)
-from PySide6.QtCore import Qt, Signal
-from PySide6.QtGui import QFont, QColor, QPainter
-
-from core.voice import VoiceEngine
-
-class SettingsWidget(QWidget):
- """
- Minimalist Settings Panel.
- - Glassmorphism style.
- - Text-only Language Selector.
- """
- def __init__(self, parent=None):
- super().__init__(parent)
- self.setAttribute(Qt.WA_TranslucentBackground)
-
- # Main Layout (Centered Box)
- main_layout = QVBoxLayout(self)
- main_layout.setAlignment(Qt.AlignCenter)
-
- # Content Frame
- self.frame = QFrame()
- self.frame.setFixedSize(500, 600)
- self.frame.setStyleSheet("""
- QFrame {
- background-color: rgba(10, 15, 20, 240);
- border: 1px solid rgba(255, 255, 255, 30);
- border-radius: 20px;
- }
- """)
- main_layout.addWidget(self.frame)
-
- # Frame Layout
- layout = QVBoxLayout(self.frame)
- layout.setContentsMargins(40, 40, 40, 40)
- layout.setSpacing(20)
-
- # 1. Title
- title = QLabel("SYSTEM SETTINGS")
- title.setAlignment(Qt.AlignCenter)
- title.setStyleSheet("color: white; font-size: 18px; font-weight: bold; letter-spacing: 4px;")
- layout.addWidget(title)
-
- layout.addSpacing(20)
-
- # 2. Section: Voice Language
- lbl_lang = QLabel("VOICE LANGUAGE")
- lbl_lang.setStyleSheet("color: rgba(255,255,255,150); font-size: 12px; font-weight: bold; letter-spacing: 2px;")
- layout.addWidget(lbl_lang)
-
- # Language List
- self.languages = [
- ("ENGLISH", "en"),
- ("HINDI", "hi"),
- ("KANNADA", "kn"),
- ("TELUGU", "te"),
- ("MARATHI", "mr")
- ]
-
- self.lang_buttons = []
-
- for label_text, code in self.languages:
- btn = QPushButton(label_text)
- btn.setCheckable(True)
- btn.setFixedHeight(50)
- btn.setCursor(Qt.PointingHandCursor)
- # Custom Style for "Radio" look using PushButton
- btn.setStyleSheet(self._get_btn_style(False))
- btn.clicked.connect(lambda checked, c=code, b=btn: self.on_language_selected(c, b))
-
- layout.addWidget(btn)
- self.lang_buttons.append((btn, code))
-
- layout.addSpacing(30)
-
- # 3. Section: Expressive Modes
- lbl_expr = QLabel("EXPRESSIVE MODES")
- lbl_expr.setStyleSheet("color: rgba(255,255,255,150); font-size: 12px; font-weight: bold; letter-spacing: 2px;")
- layout.addWidget(lbl_expr)
-
- self.btn_dance = QPushButton("SLOW DANCE")
- self.btn_dance.setCheckable(True)
- self.btn_dance.setFixedHeight(50)
- self.btn_dance.setStyleSheet(self._get_btn_style(False))
- self.btn_dance.clicked.connect(self._toggle_dance)
- layout.addWidget(self.btn_dance)
-
- self.btn_hum = QPushButton("SOFT HUMMING")
- self.btn_hum.setCheckable(True)
- self.btn_hum.setFixedHeight(50)
- self.btn_hum.setStyleSheet(self._get_btn_style(False))
- self.btn_hum.clicked.connect(self._toggle_hum)
- layout.addWidget(self.btn_hum)
-
- layout.addStretch()
-
- # Select Default (English)
- self.on_language_selected("en", self.lang_buttons[0][0])
-
- def _toggle_dance(self, checked):
- self.btn_dance.setStyleSheet(self._get_btn_style(checked))
- if checked:
- self.btn_hum.setChecked(False) # Mutual exclusive for simplicity
- self.btn_hum.setStyleSheet(self._get_btn_style(False))
-
- mode = "dance" if checked else "idle"
- from core.state import AppState
- AppState.instance().set_expressive_mode(mode)
-
- def _toggle_hum(self, checked):
- self.btn_hum.setStyleSheet(self._get_btn_style(checked))
- if checked:
- self.btn_dance.setChecked(False)
- self.btn_dance.setStyleSheet(self._get_btn_style(False))
-
- mode = "humming" if checked else "idle"
- from core.state import AppState
- AppState.instance().set_expressive_mode(mode)
-
- def _get_btn_style(self, active: bool):
- if active:
- return """
- QPushButton {
- background-color: rgba(0, 240, 255, 40);
- border: 1px solid rgba(0, 240, 255, 100);
- color: white;
- font-size: 14px;
- border-radius: 5px;
- text-align: left;
- padding-left: 20px;
- }
- """
- else:
- return """
- QPushButton {
- background-color: rgba(255, 255, 255, 5);
- border: 1px solid rgba(255, 255, 255, 10);
- color: rgba(255, 255, 255, 150);
- font-size: 14px;
- border-radius: 5px;
- text-align: left;
- padding-left: 20px;
- }
- QPushButton:hover {
- background-color: rgba(255, 255, 255, 15);
- color: white;
- }
- """
-
- def on_language_selected(self, code, sender_btn):
- # Update UI state
- for btn, c in self.lang_buttons:
- is_active = (btn == sender_btn)
- btn.setChecked(is_active)
- btn.setStyleSheet(self._get_btn_style(is_active))
-
- # Update Engine
- VoiceEngine.instance().set_language(code)
- print(f"DEBUG: Language set to {code}")
diff --git a/core/ui/side_dock.py b/core/ui/side_dock.py
deleted file mode 100644
index 89f2b61..0000000
--- a/core/ui/side_dock.py
+++ /dev/null
@@ -1,101 +0,0 @@
-from PySide6.QtWidgets import QWidget, QVBoxLayout, QPushButton, QLabel, QFrame
-from PySide6.QtCore import Qt, Signal, QSize
-from PySide6.QtGui import QIcon, QFont
-
-from core.ui.icon_provider import IconProvider
-
-class SideDock(QWidget):
- """
- Minimalist Side Dock for Home Mode.
- Contains icon-only navigation buttons (Vector, No Emojis).
- """
- mode_requested = Signal(str) # "home", "avatar", "notifications", "settings"
-
- def __init__(self, parent=None):
- super().__init__(parent)
- self.setFixedWidth(80)
- self.setStyleSheet("""
- QWidget {
- background-color: rgba(15, 15, 20, 0.95); /* Matte Black Enterprise */
- border-right: 1px solid rgba(255, 255, 255, 0.08);
- }
- QPushButton {
- background: transparent;
- border: none;
- border-radius: 8px;
- padding: 12px;
- margin: 4px;
- }
- QPushButton:hover {
- background-color: rgba(0, 240, 255, 0.08); /* Subtle Cyan */
- border: 1px solid rgba(0, 240, 255, 0.15);
- }
- QPushButton:pressed {
- background-color: rgba(0, 240, 255, 0.15);
- }
- """)
-
- layout = QVBoxLayout(self)
- layout.setContentsMargins(10, 30, 10, 20)
- layout.setSpacing(12)
-
- # 1. Home
- self.btn_home = self._create_btn("home", "Home")
- self.btn_home.clicked.connect(lambda: self.mode_requested.emit("home"))
- layout.addWidget(self.btn_home)
-
- # 2. Workspace
- self.btn_work = self._create_btn("workspace", "Workspace")
- self.btn_work.clicked.connect(lambda: self.mode_requested.emit("workspace"))
- layout.addWidget(self.btn_work)
-
- # 3. Avatar Mode
- self.btn_avatar = self._create_btn("avatar", "Avatar Mode")
- self.btn_avatar.clicked.connect(lambda: self.mode_requested.emit("avatar"))
- layout.addWidget(self.btn_avatar)
-
- # 4. Aurora Canvas
- self.btn_aurora = self._create_btn("aurora", "Aurora Canvas")
- self.btn_aurora.clicked.connect(lambda: self.mode_requested.emit("aurora"))
- layout.addWidget(self.btn_aurora)
-
- # 5. Ethera Command
- self.btn_cmd = self._create_btn("cmd", "Ethera Command")
- self.btn_cmd.clicked.connect(lambda: self.mode_requested.emit("cmd"))
- layout.addWidget(self.btn_cmd)
-
- # Spacer
- layout.addStretch(1)
-
- # 6. Notifications
- self.btn_notif = self._create_btn("notifications", "Notifications")
- self.btn_notif.clicked.connect(lambda: self.mode_requested.emit("notifications"))
- layout.addWidget(self.btn_notif)
-
- # 7. Settings
- self.btn_settings = self._create_btn("settings", "Settings")
- self.btn_settings.clicked.connect(lambda: self.mode_requested.emit("settings"))
- layout.addWidget(self.btn_settings)
-
- # Store buttons
- self.buttons = {"notifications": self.btn_notif}
-
- # Connect to Notifications
- from core.notifications import NotificationManager
- self.notif_manager = NotificationManager.instance()
- self.notif_manager.notification_added.connect(self.update_notifications_icon)
- self.notif_manager.notifications_cleared.connect(self.update_notifications_icon)
-
- def update_notifications_icon(self):
- count = self.notif_manager.get_count()
- btn = self.buttons.get("notifications")
- if btn:
- btn.setIcon(IconProvider.get_icon("notifications", 40, badge_count=count))
-
- def _create_btn(self, icon_name, tooltip):
- btn = QPushButton()
- btn.setToolTip(tooltip)
- icon = IconProvider.get_icon(icon_name, 40)
- btn.setIcon(icon)
- btn.setIconSize(QSize(32, 32))
- return btn
diff --git a/core/ui/terminal.py b/core/ui/terminal.py
deleted file mode 100644
index 6a0c8c0..0000000
--- a/core/ui/terminal.py
+++ /dev/null
@@ -1,103 +0,0 @@
-from PySide6.QtWidgets import QTextEdit, QVBoxLayout, QLineEdit
-from PySide6.QtGui import QFont
-from PySide6.QtCore import Qt
-from core.ui.panels import GlassPanel
-from core.tools.router import ToolRouter
-
-class TerminalPanel(GlassPanel):
- """
- A live shell terminal for Etherea.
- """
- def __init__(self, title="Terminal", parent=None):
- super().__init__(title=title, parent=parent)
-
- self.output = QTextEdit()
- self.output.setReadOnly(True)
- self.output.setFont(QFont("Consolas", 10))
- self.output.setStyleSheet("background: rgba(0,0,0,50); color: #50fa7b; border: 1px solid rgba(0,255,255,30);")
-
- self.input = QLineEdit()
- self.input.setFont(QFont("Consolas", 10))
- self.input.setStyleSheet("background: rgba(0,0,0,80); color: white; border: none; padding: 5px;")
- self.input.setPlaceholderText("Enter command...")
- self.input.returnPressed.connect(self._run_command)
-
- self.layout.addWidget(self.output)
- self.layout.addWidget(self.input)
-
- ToolRouter.instance().command_completed.connect(self._on_command_done)
-
- def log_thought(self, text: str):
- """Render agent thoughts with a distinct teal style."""
- self.output.append(f"[Agent] {text}")
- self._scroll_to_bottom()
-
- def log_tool_call(self, data: dict):
- tool = data.get("tool", "TOOL")
- args = ", ".join([f"{k}={v}" for k, v in data.items() if k != "tool"])
- self.output.append(f"▶ Calling {tool}: {args}")
- self._scroll_to_bottom()
-
- def log_tool_result(self, data: dict):
- success = data.get("success", False)
- color = "#50fa7b" if success else "#ff5555"
- status = "SUCCESS" if success else "FAILED"
-
- self.output.append(f"◀ {data.get('tool', 'TOOL')} {status}")
-
- stdout = data.get("stdout", "")
- stderr = data.get("stderr", "")
-
- if stdout:
- lines = stdout.splitlines()
- display_out = "\n".join(lines[:30])
- if len(lines) > 30: display_out += "\n... (output truncated)"
- self.output.append(f"{display_out}")
-
- if stderr:
- self.output.append(f"Err: {stderr}")
-
- if not stdout and not stderr:
- self.output.append("No stdout/stderr produced (command may still succeed).")
-
- self._scroll_to_bottom()
-
- def log_result_card(self, card: dict):
- title = card.get("title", "TASK RESULT")
- status = card.get("status", "UNKNOWN")
- color = "#50fa7b" if status == "SUCCESS" else "#ff5555"
-
- html = f"""
-
-
{title} — {status}
-
Summary: {card.get('summary', '')}
-
Evidence:
-
- """
- for item in card.get("evidence", []):
- html += f"- {item}
"
- html += f"""
-
-
Location: {card.get('location', '')}
-
- """
- self.output.append(html)
- self._scroll_to_bottom()
-
- def _scroll_to_bottom(self):
- self.output.verticalScrollBar().setValue(self.output.verticalScrollBar().maximum())
-
- def _run_command(self):
- cmd = self.input.text()
- if not cmd: return
- self.output.append(f"\n$ {cmd}")
- self.input.clear()
- ToolRouter.instance().run_command(cmd)
-
- def _on_command_done(self, result: dict):
- """Render results for commands triggered from this panel or the router."""
- # Use the rich tool result logger for consistency
- self.log_tool_result({
- "tool": "SHELL",
- **result
- })
diff --git a/core/ui/timeline.py b/core/ui/timeline.py
deleted file mode 100644
index 370994a..0000000
--- a/core/ui/timeline.py
+++ /dev/null
@@ -1,89 +0,0 @@
-from PySide6.QtWidgets import QVBoxLayout, QTextEdit, QScrollBar
-from core.ui.panels import GlassPanel
-
-class TimelinePanel(GlassPanel):
- """
- AGENT TIMELINE (Next 6)
- Structured surface for tracking agent thoughts and tool interactions.
- """
- def __init__(self, title="Agent Timeline", parent=None):
- super().__init__(title=title, parent=parent)
-
- self.output = QTextEdit()
- self.output.setReadOnly(True)
- self.output.setStyleSheet("background: transparent; color: #f8f8f2; border: none; font-family: 'Segoe UI', sans-serif;")
-
- # Enable rich HTML
- self.output.setAcceptRichText(True)
-
- self.layout.addWidget(self.output)
-
- def log_thought(self, text: str):
- """Render agent thoughts."""
- self.output.append(f"💭 {text}
")
- self._scroll_to_bottom()
-
- def log_tool_call(self, data: dict):
- tool = data.get("tool", "TOOL")
- args = ", ".join([f"{k}={v}" for k, v in data.items() if k != "tool"])
- html = f"""
-
- ▶ {tool} {args}
-
- """
- self.output.append(html)
- self._scroll_to_bottom()
-
- def log_tool_result(self, data: dict):
- success = data.get("success", False)
- color = "#50fa7b" if success else "#ff5555"
- status = "SUCCESS" if success else "FAILED"
-
- html = f"""
-
-
◀ {data.get('tool', 'TOOL')} {status}
- """
-
- stdout = data.get("stdout", "")
- stderr = data.get("stderr", "")
-
- if stdout:
- lines = stdout.splitlines()
- display_out = "\n".join(lines[:10]) # Shorter in timeline
- if len(lines) > 10:
- display_out += "\n... (output truncated)"
- html += f"
{display_out}"
-
- if stderr:
- html += f"
Err: {stderr}"
-
- if not stdout and not stderr:
- html += "
No stdout/stderr produced.
"
-
- html += "
"
- self.output.append(html)
- self._scroll_to_bottom()
-
- def log_result_card(self, card: dict):
- status = card.get("status", "UNKNOWN")
- color = "#50fa7b" if status == "SUCCESS" else "#ff5555"
-
- html = f"""
-
-
🏆 {card.get('title', 'TASK')} — {status}
-
{card.get('summary', '')}
-
Evidence:
-
- """
- for item in card.get("evidence", []):
- html += f"- {item}
"
- html += f"""
-
-
Surface: {card.get('location', '')}
-
- """
- self.output.append(html)
- self._scroll_to_bottom()
-
- def _scroll_to_bottom(self):
- self.output.verticalScrollBar().setValue(self.output.verticalScrollBar().maximum())
diff --git a/core/ui/workspace_layout_manager.py b/core/ui/workspace_layout_manager.py
deleted file mode 100644
index e023a39..0000000
--- a/core/ui/workspace_layout_manager.py
+++ /dev/null
@@ -1,67 +0,0 @@
-from __future__ import annotations
-
-class WorkspaceLayoutManager:
- """
- Controls workspace UI layout based on mode.
- SAFE DESIGN:
- - No widget deletion
- - Only show/hide or resize
- - Always reversible
- """
-
- def __init__(self, *, avatar_widget, console_widget, aurora_widget):
- self.avatar = avatar_widget
- self.console = console_widget
- self.aurora = aurora_widget
-
- def apply_layout(self, layout: str):
- layout = (layout or "").lower()
-
- if layout == "study":
- self._study_layout()
- elif layout == "coding":
- self._coding_layout()
- elif layout == "exam":
- self._exam_layout()
- elif layout == "calm":
- self._calm_layout()
- else:
- self._default_layout()
-
- # ---- Layout presets ----
-
- def _study_layout(self):
- # Balanced view
- self.avatar.show()
- self.aurora.show()
- self.console.show()
-
- self.avatar.setMinimumWidth(360)
- self.console.setMinimumHeight(220)
-
- def _coding_layout(self):
- # Console dominant
- self.avatar.show()
- self.aurora.hide()
- self.console.show()
-
- self.console.setMinimumHeight(420)
-
- def _exam_layout(self):
- # Minimal distractions
- self.avatar.hide()
- self.aurora.hide()
- self.console.show()
-
- self.console.setMinimumHeight(520)
-
- def _calm_layout(self):
- # Avatar + aurora only
- self.avatar.show()
- self.aurora.show()
- self.console.hide()
-
- def _default_layout(self):
- self.avatar.show()
- self.aurora.show()
- self.console.show()
diff --git a/core/ui/workspace_widget.py b/core/ui/workspace_widget.py
deleted file mode 100644
index da02a75..0000000
--- a/core/ui/workspace_widget.py
+++ /dev/null
@@ -1,41 +0,0 @@
-from core.ui.panels import PdfPanel
-from core.ui.editors import FunctionalCodePanel
-from core.ui.terminal import TerminalPanel
-from core.ui.explorer import FileExplorerPanel
-from core.ui.timeline import TimelinePanel
-
-class WorkspaceWidget(QWidget):
- """
- Professional Workspace Grid Layout.
- Organizes modular functional panels for productivity.
- """
- def __init__(self, parent=None):
- super().__init__(parent)
-
- self.layout = QGridLayout(self)
- self.layout.setContentsMargins(40, 40, 40, 40)
- self.layout.setSpacing(20)
-
- # 1. Left Column (Navigation & Timeline)
- self.explorer = FileExplorerPanel("Project Files")
- self.timeline = TimelinePanel("Agent Timeline")
-
- self.layout.addWidget(self.explorer, 0, 0, 1, 1) # Row 0, Col 0
- self.layout.addWidget(self.timeline, 1, 0, 1, 1) # Row 1, Col 0
-
- # 2. Center Column (Focus - Code Editor)
- self.code = FunctionalCodePanel("main.py — Editor")
- self.layout.addWidget(self.code, 0, 1, 2, 2) # Row 0, Col 1, Span 2 Rows, Span 2 Cols
-
- # 3. Right Column (References & Tools)
- self.pdf = PdfPanel("Specification.pdf")
- self.terminal = TerminalPanel("Console / Terminal")
-
- self.layout.addWidget(self.pdf, 0, 3, 1, 1) # Row 0, Col 3
- self.layout.addWidget(self.terminal, 1, 3, 1, 1) # Row 1, Col 3
-
- # Column Stretch
- self.layout.setColumnStretch(0, 1) # Left
- self.layout.setColumnStretch(1, 2) # Center (Wider)
- self.layout.setColumnStretch(2, 2) # Center
- self.layout.setColumnStretch(3, 1) # Right
diff --git a/core/utils.py b/core/utils.py
deleted file mode 100644
index f903209..0000000
--- a/core/utils.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# core/utils.py
-"""
-Utility module for Etherea
-- Centralized helpers for secure access to API keys (supports secrets)
-- File/folder helpers and debug utilities
-- Emotion-related helper functions
-"""
-
-import os
-
-DEBUG = True # toggle debug printing
-
-
-def debug_print(label, value):
- """Print debug messages if DEBUG is True"""
- if DEBUG:
- print(f"[DEBUG] {label}: {value}")
-
-# --- API Key Helper ---
-
-
-def get_api_key(secret_key=None):
- """
- Return Etherea API key.
- Priority:
- 1. secret_key argument (from GitHub secret / hosted secret)
- 2. ETHEREA_API_KEY environment variable (optional fallback)
- Raises error if key not found.
- """
- if secret_key:
- return secret_key
- key = os.getenv("ETHEREA_API_KEY")
- if not key:
- raise ValueError(
- "[!] ETHEREA_API_KEY not found in secrets or environment")
- return key
-
-# --- File / Folder Helpers ---
-
-
-def ensure_folder(path):
- """Create folder if it does not exist"""
- if not os.path.exists(path):
- os.makedirs(path, exist_ok=True)
- debug_print("Folder created", path)
-
-
-def read_file(path):
- """Read a text file safely, returns None if not found"""
- if not os.path.exists(path):
- debug_print("read_file", f"File not found: {path}")
- return None
- try:
- with open(path, "r", encoding="utf-8") as f:
- return f.read()
- except Exception as e:
- debug_print("read_file error", str(e))
- return None
-
-
-def write_file(path, content):
- """Write content to a file safely"""
- ensure_folder(os.path.dirname(path))
- try:
- with open(path, "w", encoding="utf-8") as f:
- f.write(content)
- debug_print("write_file", f"File written: {path}")
- except Exception as e:
- debug_print("write_file error", str(e))
-
-# --- Emotional Helpers ---
-
-
-def compute_emotion_score(emotion_dict):
- """Clamp intensity to 0-1"""
- return max(0.0, min(1.0, float(emotion_dict.get("intensity", 0.0))))
-
-
-def merge_emotions(base_emotion, new_emotion):
- """
- Merge two emotion dictionaries
- Keeps intensity clamped 0-1
- """
- merged = base_emotion.copy()
- merged.update(new_emotion)
- merged["intensity"] = compute_emotion_score(merged)
- return merged
-
-
-def emotion_to_voice_params(emotion):
- """
- Convert emotional state to TTS parameters (rate, volume)
- Supported tones: happy, sad, angry, calm, excited
- """
- tone = emotion.get("tone", "neutral")
- intensity = compute_emotion_score(emotion)
- rate = 150
- volume = 1.0
-
- if tone == "happy":
- rate = int(150 + 20 * intensity)
- volume = min(1.0, 0.8 + 0.2 * intensity)
- elif tone == "sad":
- rate = int(130 - 20 * intensity)
- volume = max(0.6, 0.6 - 0.1 * intensity)
- elif tone == "angry":
- rate = int(160 + 30 * intensity)
- volume = min(1.0, 0.9 + 0.1 * intensity)
- elif tone == "calm":
- rate = int(140 - 10 * intensity)
- volume = max(0.7, 0.7 - 0.05 * intensity)
- elif tone == "excited":
- rate = int(170 + 20 * intensity)
- volume = min(1.0, 0.9 + 0.1 * intensity)
-
- debug_print("emotion_to_voice_params", {
- "tone": tone, "rate": rate, "volume": volume})
- return {"rate": rate, "volume": volume}
-
-# --- Extra Utilities ---
-
-
-def clamp(value, min_value=0.0, max_value=1.0):
- """Clamp numeric value between min and max"""
- return max(min_value, min(max_value, value))
-
-
-def merge_dicts(base, update):
- """Shallow merge two dictionaries"""
- merged = base.copy()
- merged.update(update)
- return merged
diff --git a/core/utils/asset_path.py b/core/utils/asset_path.py
deleted file mode 100644
index 904b427..0000000
--- a/core/utils/asset_path.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""Etherea asset path helper.
-
-Works in:
-- Dev mode (normal python run)
-- PyInstaller onefile (.exe / AppImage build)
-
-Usage:
- from core.utils.asset_path import asset
- icon = asset("core/assets/ui/icon.png")
-"""
-
-from __future__ import annotations
-
-from pathlib import Path
-
-from core.app_runtime import resource_path
-
-
-def asset(rel_path: str) -> str:
- """Return absolute path to a bundled asset."""
- return resource_path(rel_path)
-
-
-def exists(rel_path: str) -> bool:
- """Check if a bundled asset exists."""
- return Path(resource_path(rel_path)).exists()
diff --git a/core/voice.py b/core/voice.py
deleted file mode 100644
index 463053d..0000000
--- a/core/voice.py
+++ /dev/null
@@ -1,204 +0,0 @@
-import threading
-import time
-try:
- import pyttsx3
-except Exception:
- pyttsx3 = None # optional on Termux/CI
-from typing import Optional
-try:
- from PySide6.QtCore import QObject, Signal
-except Exception:
- class QObject: # minimal stub
- pass
- def Signal(*a, **k):
- return None
-
-class VoiceEngine(QObject):
- """
- Handles Text-to-Speech (TTS) and voice sync signals.
- Runs speech in a separate thread to avoid blocking the UI.
- """
- speaking_started = Signal()
- speaking_finished = Signal()
- viseme_updated = Signal(float) # 0.0 to 1.0 (amplitude approximation)
-
- _instance = None
-
- LANGUAGE_MAP = {
- "en": ["david", "zira", "english"],
- "hi": ["kalpana", "hemant", "hindi"],
- "kn": ["kannada", "tunga"],
- "te": ["telugu", "vani"],
- "mr": ["marathi", "pallavi"] # Checking available names
- }
-
- def __init__(self):
- super().__init__()
- self._lock = threading.Lock()
- self._queue = []
- self._is_speaking = False
- self._stop_requested = False
- self._engine = None
- self.current_language = "en" # Default
- self._session_start = time.time()
- self._phrase_count = 0
-
- # Initialize thread
- self._thread = threading.Thread(target=self._run_loop, daemon=True)
- self._thread.start()
-
- @classmethod
- def instance(cls):
- if cls._instance is None:
- cls._instance = VoiceEngine()
- return cls._instance
-
- def set_language(self, lang_code: str):
- """Set the active voice language."""
- self.current_language = lang_code
- # We need to signal the thread to update the property
- # Since pyttsx3 is not thread-safe, we pass a special command or check flag
- # Ideally, we restart init or setProperty in the loop.
- # For simplicity, we'll queue a special object or use a shared flag.
- # But queue is for text. Let's use a flag checked in the loop.
- pass # Actual logic handled in loop via self.current_language check
-
- def speak(self, text: str, lang: Optional[str] = None):
- """Queue text to be spoken with optional language routing."""
- if not text:
- return
-
- # 1. Multi-Lang Routing (NEXT 3)
- if not lang:
- # Simple heuristic for Kannada/Hindi characters
- if any("\u0cb0" <= c <= "\u0cff" for c in text): # Kannada range approx
- lang = "kn"
- elif any("\u0900" <= c <= "\u097f" for c in text): # Devanagari (Hindi/etc)
- lang = "hi"
- else:
- lang = "en"
-
- self.current_language = lang
- self._phrase_count += 1
-
- # 2. Latent Verbosity Filter
- session_duration = time.time() - self._session_start
- if self._phrase_count > 10 or session_duration > 1800:
- fillers = ["Let me explain", "As I was saying", "Actually", "Welcome back", "I've noticed that"]
- for f in fillers:
- text = text.replace(f, "").replace(f.lower(), "").strip()
- text = text.lstrip(" ,.?!")
- if text: text = text[0].upper() + text[1:]
-
- with self._lock:
- # Interrupt if critical (Mock criteria: starts with 'Error' or 'Alert')
- if text.startswith("Error") or text.startswith("Alert"):
- self._queue.insert(0, text)
- if self._is_speaking:
- self._engine.stop() # pyttsx3 stop will trigger loop to pop next
- else:
- self._queue.append(text)
-
- def _run_loop(self):
- """Internal loop to process the speech queue."""
- try:
- self._engine = pyttsx3.init()
- self._update_voice_property()
- except Exception as e:
- print(f"[VoiceEngine] Init Error: {e}")
- return
-
- last_lang = self.current_language
-
- while True:
- if self.current_language != last_lang:
- self._update_voice_property()
- last_lang = self.current_language
-
- # 3. Whisper Mode & Dynamic Pacing (NEXT 3)
- try:
- from core.state import AppState
- state = AppState.instance()
-
- base_rate = 160
- base_volume = 1.0
-
- # Check User Focus (Whisper Mode if typing/focused)
- # If energy > 0.8 (active typing/moving), go to Whisper Mode
- if state.energy > 0.8:
- base_volume = 0.3
- base_rate = 180 # Fast and quiet
-
- # Late Night logic (previous milestone)
- hour = time.localtime().tm_hour
- if hour >= 22 or hour < 5:
- base_volume *= 0.7
- base_rate = 145
-
- self._engine.setProperty('rate', base_rate)
- self._engine.setProperty('volume', base_volume)
- except: pass
-
- if self._queue:
- text = self._queue.pop(0)
- self._is_speaking = True
- self._stop_requested = False
- self.speaking_started.emit()
-
- modulator = threading.Thread(target=self._modulate_visemes)
- modulator.start()
-
- try:
- self._engine.say(text)
- self._engine.runAndWait()
- except Exception as e:
- print(f"[VoiceEngine] Speak Error: {e}")
-
- self._is_speaking = False
- modulator.join(timeout=0.1)
- self.speaking_finished.emit()
- self.viseme_updated.emit(0.0)
-
- time.sleep(0.1)
-
- def _update_voice_property(self):
- """Find best matching voice for current language."""
- try:
- voices = self._engine.getProperty('voices')
- target_keywords = self.LANGUAGE_MAP.get(self.current_language, ["english"])
-
- selected_voice = None
-
- # 1. Try exact match
- for voice in voices:
- name = voice.name.lower()
- for kw in target_keywords:
- if kw in name:
- selected_voice = voice.id
- break
- if selected_voice: break
-
- # 2. Fallback to any female voice if no match
- if not selected_voice:
- for voice in voices:
- if "zira" in voice.name.lower():
- selected_voice = voice.id
- break
-
- if selected_voice:
- self._engine.setProperty('voice', selected_voice)
- print(f"[VoiceEngine] Switched to voice for {self.current_language}: {selected_voice}")
- else:
- print(f"[VoiceEngine] No suitable voice found for {self.current_language}")
-
- except Exception as e:
- print(f"[VoiceEngine] Voice Switch Error: {e}")
-
- def _modulate_visemes(self):
- """Generate fake amplitude data while speaking to drive mouth."""
- import random
- while self._is_speaking and not self._stop_requested:
- # Random amplitude 0.2 to 0.8
- amp = 0.2 + random.random() * 0.6
- self.viseme_updated.emit(amp)
- time.sleep(0.05) # 20Hz updates
diff --git a/core/voice_adapters.py b/core/voice_adapters.py
deleted file mode 100644
index 6ed9349..0000000
--- a/core/voice_adapters.py
+++ /dev/null
@@ -1,146 +0,0 @@
-from __future__ import annotations
-
-import os
-import shutil
-import tempfile
-import subprocess
-from pathlib import Path
-
-
-def _has_cmd(cmd: str) -> bool:
- return shutil.which(cmd) is not None
-
-
-def _play_audio_file(path: str) -> bool:
- """
- Best-effort playback across environments.
- Returns True if playback likely started.
- """
- p = str(path)
-
- # 1) playsound (if installed)
- try:
- import playsound # type: ignore
- playsound.playsound(p)
- return True
- except Exception:
- pass
-
- # 2) Windows: PowerShell MediaPlayer
- if os.name == "nt":
- try:
- ps = [
- "powershell",
- "-NoProfile",
- "-Command",
- (
- "$p = '" + p.replace("'", "''") + "';"
- "Add-Type -AssemblyName presentationCore;"
- "$m = New-Object System.Windows.Media.MediaPlayer;"
- "$m.Open([Uri]$p);"
- "$m.Volume = 1.0;"
- "$m.Play();"
- "Start-Sleep -Milliseconds 200;"
- "while($m.NaturalDuration.HasTimeSpan -and $m.Position -lt $m.NaturalDuration.TimeSpan) { Start-Sleep -Milliseconds 200 }"
- ),
- ]
- subprocess.run(ps, check=False, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
- return True
- except Exception:
- pass
-
- # 3) Linux/macOS: ffplay
- if _has_cmd("ffplay"):
- try:
- subprocess.run(
- ["ffplay", "-nodisp", "-autoexit", "-loglevel", "quiet", p],
- check=False,
- stdout=subprocess.DEVNULL,
- stderr=subprocess.DEVNULL,
- )
- return True
- except Exception:
- pass
-
- # 4) Linux: aplay for wav
- if p.lower().endswith(".wav") and _has_cmd("aplay"):
- try:
- subprocess.run(["aplay", "-q", p], check=False)
- return True
- except Exception:
- pass
-
- return False
-
-
-def speak_edge_tts(
- text: str,
- *,
- voice: str = "en-IN-NeerjaNeural",
- rate: str = "+0%",
- volume: str = "+0%",
- pitch: str = "+0st",
- ssml: bool = False,
- is_ssml: bool = False,
-) -> bool:
- """
- Speak using Edge TTS (python module or CLI).
- Accepts SSML when ssml=True / is_ssml=True.
- """
- text = (text or "").strip()
- if not text:
- return False
-
- ssml = bool(ssml or is_ssml)
-
- # Try python edge-tts module first
- try:
- import asyncio
- import edge_tts # type: ignore
-
- async def _run() -> str:
- tmpdir = Path(tempfile.gettempdir()) / "etherea_tts"
- tmpdir.mkdir(parents=True, exist_ok=True)
- out = tmpdir / f"etherea_tts_{abs(hash(text)) % 10_000_000}.mp3"
-
- communicate = edge_tts.Communicate(
- text=text,
- voice=voice,
- rate=rate,
- volume=volume,
- pitch=pitch,
- )
- await communicate.save(str(out))
- return str(out)
-
- out_path = asyncio.run(_run())
- return _play_audio_file(out_path)
-
- except Exception:
- pass
-
- # Fallback: edge-tts CLI if installed
- if _has_cmd("edge-tts"):
- try:
- tmpdir = Path(tempfile.gettempdir()) / "etherea_tts"
- tmpdir.mkdir(parents=True, exist_ok=True)
- out = tmpdir / f"etherea_tts_{abs(hash(text)) % 10_000_000}.mp3"
-
- cmd = [
- "edge-tts",
- "--voice", voice,
- "--rate", rate,
- "--volume", volume,
- "--pitch", pitch,
- "--text", text,
- "--write-media", str(out),
- ]
- subprocess.run(cmd, check=False, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
-
- if out.exists():
- return _play_audio_file(str(out))
- return False
- except Exception:
- return False
-
- return False
diff --git a/core/voice_engine.py b/core/voice_engine.py
deleted file mode 100644
index 7a26212..0000000
--- a/core/voice_engine.py
+++ /dev/null
@@ -1,138 +0,0 @@
-# core/voice_engine.py
-
-from __future__ import annotations
-import threading
-from dataclasses import dataclass
-from typing import Optional, Callable
-
-# ======================================================
-# Optional Qt (PySide6)
-# ======================================================
-
-try:
- from PySide6.QtCore import QObject, Signal # type: ignore
-except Exception:
- class QObject: # type: ignore
- def __init__(self, *args, **kwargs) -> None:
- pass
-
- class _StubSignal:
- def emit(self, *args, **kwargs) -> None:
- pass
-
- def Signal(*args, **kwargs): # type: ignore
- return _StubSignal()
-
-
-# ======================================================
-# Optional TTS (pyttsx3)
-# ======================================================
-
-try:
- import pyttsx3 # type: ignore
- _PYTTSX3_OK = True
-except Exception:
- pyttsx3 = None
- _PYTTSX3_OK = False
-
-
-# ======================================================
-# Models
-# ======================================================
-
-@dataclass
-class VoiceStatus:
- backend: str
- available: bool
-
-
-# ======================================================
-# Voice Engine
-# ======================================================
-
-class VoiceEngine(QObject):
- started = Signal(str)
- finished = Signal(str)
- error = Signal(str)
-
- def __init__(self) -> None:
- super().__init__()
-
- self._lock = threading.Lock()
- self._engine = None
- self._backend = "none"
-
- self._init_backend()
-
- def _init_backend(self) -> None:
- if _PYTTSX3_OK and pyttsx3 is not None:
- try:
- self._engine = pyttsx3.init()
- self._engine.setProperty("rate", 160)
- self._engine.setProperty("volume", 0.9)
- self._backend = "pyttsx3"
- return
- except Exception:
- self._engine = None
-
- self._engine = None
- self._backend = "none"
-
- def status(self) -> VoiceStatus:
- return VoiceStatus(
- backend=self._backend,
- available=self._engine is not None,
- )
-
- def speak(self, text: str) -> bool:
- text = (text or "").strip()
- if not text or self._engine is None:
- return False
-
- try:
- self.started.emit(text)
- except Exception:
- pass
-
- with self._lock:
- try:
- self._engine.say(text)
- self._engine.runAndWait()
- except Exception as e:
- try:
- self.error.emit(str(e))
- except Exception:
- pass
- return False
-
- try:
- self.finished.emit(text)
- except Exception:
- pass
-
- return True
-
- def stop(self) -> None:
- if self._engine is None:
- return
- try:
- self._engine.stop()
- except Exception:
- pass
-
-
-# ======================================================
-# Singleton
-# ======================================================
-
-_voice_engine: Optional[VoiceEngine] = None
-
-
-def get_voice_engine() -> VoiceEngine:
- global _voice_engine
- if _voice_engine is None:
- _voice_engine = VoiceEngine()
- return _voice_engine
-
-
-voice_engine = get_voice_engine()
\ No newline at end of file
diff --git a/core/worker.py b/core/worker.py
deleted file mode 100644
index 6fb85b0..0000000
--- a/core/worker.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from PySide6.QtCore import QThread, Signal
-import logging
-
-logger = logging.getLogger(__name__)
-
-class AvatarWorker(QThread):
- """
- Worker thread to handle AvatarEngine calls asynchronously.
- Prevents UI freeze when generating responses.
- """
- response_ready = Signal(str)
- error_occurred = Signal(str)
-
- def __init__(self, avatar_engine, user_text: str):
- super().__init__()
- self.avatar_engine = avatar_engine
- self.user_text = user_text
-
- def run(self):
- try:
- # Blocking call runs here in background thread
- response = self.avatar_engine.speak(self.user_text)
- self.response_ready.emit(response)
- except Exception as e:
- logger.exception("AvatarWorker failed")
- self.error_occurred.emit(str(e))
diff --git a/core/workspace_ai/__init__.py b/core/workspace_ai/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/core/workspace_ai/focus_mode.py b/core/workspace_ai/focus_mode.py
deleted file mode 100644
index cff1ac5..0000000
--- a/core/workspace_ai/focus_mode.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from __future__ import annotations
-from typing import Dict
-
-
-def detect_focus_mode(user_text: str) -> str:
- t = (user_text or "").lower()
-
- if any(k in t for k in ["regression", "study", "learn", "exam", "math", "notes"]):
- return "study"
-
- if any(k in t for k in ["code", "bug", "fix", "python", "build", "repo", "commit"]):
- return "coding"
-
- if any(k in t for k in ["meeting", "call", "zoom", "class", "lecture"]):
- return "meeting"
-
- if any(k in t for k in ["deep work", "lock in", "focus", "no distractions"]):
- return "deep_work"
-
- return "study"
diff --git a/core/workspace_ai/router.py b/core/workspace_ai/router.py
deleted file mode 100644
index fecdf47..0000000
--- a/core/workspace_ai/router.py
+++ /dev/null
@@ -1,71 +0,0 @@
-from __future__ import annotations
-
-from typing import Dict, Any
-import time
-
-
-class WorkspaceAIRouter:
- """
- Simple command router for workspace actions.
- Termux-safe, no heavy dependencies.
-
- Final demo additions:
- - stop focus timer
- - hello etherea / wake commands
- - self-explain command
- - focus duration parsing without explicit "minutes"
- """
-
- def route(self, text: str) -> Dict[str, Any]:
- t = (text or "").strip()
- low = t.lower().strip()
-
- # ---- wake / greeting ----
- if low in ("hello etherea", "hi etherea", "hey etherea", "etherea"):
- return self._action("greet", {"text": t})
-
- # ---- self awareness ----
- if "explain yourself" in low or "how were you built" in low or "how you were built" in low:
- return self._action("self_explain", {"query": t})
-
- # ---- mode switches ----
- for mode in ["study", "coding", "exam", "calm", "deep_work", "meeting"]:
- if low == mode or f"{mode} mode" in low or low.startswith(f"set {mode}"):
- return self._action("set_mode", {"mode": mode})
-
- # ---- session commands ----
- if "save session" in low or "store session" in low:
- return self._action("save_session", {})
-
- if "continue last session" in low or "resume session" in low or "continue session" in low:
- return self._action("resume_session", {})
-
- # ---- focus timer ----
- # stop focus / cancel timer
- if "stop focus" in low or "cancel focus" in low or low == "stop timer" or low == "cancel timer":
- return self._action("stop_focus_timer", {})
-
- # start focus timer:
- # "focus 25", "focus for 25", "focus 25 minutes"
- if low.startswith("focus"):
- minutes = self._extract_int(low) or 25
- return self._action("start_focus_timer", {"minutes": minutes})
-
- # ---- summary ----
- if low.startswith("summarize"):
- return self._action("summarize_text", {"text": t})
-
- # ---- fallback ----
- return self._action("unknown", {"text": t})
-
- def _action(self, name: str, payload: Dict[str, Any]) -> Dict[str, Any]:
- return {"action": name, "payload": payload, "ts": time.time()}
-
- def _extract_int(self, s: str):
- num = ""
- for ch in s:
- if ch.isdigit():
- num += ch
- elif num:
- break
- return int(num) if num else None
diff --git a/core/workspace_ai/session_memory.py b/core/workspace_ai/session_memory.py
deleted file mode 100644
index 1d8a135..0000000
--- a/core/workspace_ai/session_memory.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from __future__ import annotations
-from dataclasses import dataclass
-from pathlib import Path
-from typing import Dict, Any, List
-import json
-import time
-
-
-DEFAULT_SESSION_FILE = Path("workspace/.etherea_last_session.json")
-
-
-@dataclass
-class SessionSnapshot:
- saved_at: float
- open_files: List[Dict[str, Any]] # [{path, sealed}]
- notes: str = ""
-
-
-def save_snapshot(snapshot: Dict[str, Any], session_file: Path = DEFAULT_SESSION_FILE) -> Path:
- session_file.parent.mkdir(parents=True, exist_ok=True)
- snapshot = dict(snapshot or {})
- snapshot["saved_at"] = float(time.time())
- session_file.write_text(json.dumps(snapshot, indent=2), encoding="utf-8")
- return session_file
-
-
-def load_snapshot(session_file: Path = DEFAULT_SESSION_FILE) -> Dict[str, Any]:
- if not session_file.exists():
- return {}
- try:
- return json.loads(session_file.read_text(encoding="utf-8"))
- except Exception:
- return {}
diff --git a/core/workspace_ai/task_extractor.py b/core/workspace_ai/task_extractor.py
deleted file mode 100644
index 99849f0..0000000
--- a/core/workspace_ai/task_extractor.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from __future__ import annotations
-import re
-from typing import List, Dict
-
-
-TODO_PATTERNS = [
- r"\bTODO\b[:\-]?\s*(.+)",
- r"\bTo do\b[:\-]?\s*(.+)",
- r"\bTask\b[:\-]?\s*(.+)",
- r"\bFix\b[:\-]?\s*(.+)",
- r"\bNeed to\b\s*(.+)"
-]
-
-
-def extract_tasks(text: str) -> List[Dict[str, str]]:
- tasks = []
- s = text or ""
-
- for pat in TODO_PATTERNS:
- for m in re.finditer(pat, s, flags=re.IGNORECASE):
- item = m.group(1).strip()
- if item:
- tasks.append({"task": item})
-
- # Bullet items that look like tasks
- for line in s.splitlines():
- line = line.strip()
- if line.startswith(("-", "*")) and len(line) > 6:
- tasks.append({"task": line.lstrip("-* ").strip()})
-
- # Deduplicate
- seen = set()
- out = []
- for t in tasks:
- key = t["task"].lower()
- if key not in seen:
- seen.add(key)
- out.append(t)
-
- return out
diff --git a/core/workspace_ai/workspace_ai_hub.py b/core/workspace_ai/workspace_ai_hub.py
deleted file mode 100644
index 37f4357..0000000
--- a/core/workspace_ai/workspace_ai_hub.py
+++ /dev/null
@@ -1,46 +0,0 @@
-from __future__ import annotations
-from dataclasses import dataclass
-from pathlib import Path
-from typing import Dict, Any, Optional
-import json
-
-from core.workspace_ai.focus_mode import detect_focus_mode
-from core.workspace_ai.task_extractor import extract_tasks
-
-
-@dataclass
-class WorkspaceAIResult:
- mode: str
- tasks: list
- notes: str = ""
-
-
-class WorkspaceAIHub:
- """
- AI-like features without requiring LLM installs.
- Later we can plug OpenAI locally in desktop builds.
- """
-
- def __init__(self, profiles_path: str = "core/workspace_ai/workspace_profiles.json"):
- self.profiles_path = Path(profiles_path)
- self.profiles: Dict[str, Any] = {}
- self.load_profiles()
-
- def load_profiles(self):
- try:
- self.profiles = json.loads(self.profiles_path.read_text(encoding="utf-8"))
- except Exception:
- self.profiles = {}
-
- def plan(self, user_text: str) -> WorkspaceAIResult:
- mode = detect_focus_mode(user_text)
- tasks = extract_tasks(user_text)
-
- return WorkspaceAIResult(
- mode=mode,
- tasks=tasks,
- notes=f"workspace_mode={mode}"
- )
-
- def get_profile(self, mode: str) -> Optional[Dict[str, Any]]:
- return self.profiles.get(mode)
diff --git a/core/workspace_ai/workspace_controller.py b/core/workspace_ai/workspace_controller.py
deleted file mode 100644
index f42dcc3..0000000
--- a/core/workspace_ai/workspace_controller.py
+++ /dev/null
@@ -1,193 +0,0 @@
-from __future__ import annotations
-
-import time
-from dataclasses import dataclass
-from typing import Dict, Any, Optional
-
-from core.workspace_ai.router import WorkspaceAIRouter
-from core.workspace_ai.workspace_ai_hub import WorkspaceAIHub
-
-try:
- from core.signals import signals
-except Exception:
- signals = None
-
-try:
- from core.self_awareness.introspector import build_self_explain_text
-except Exception:
- build_self_explain_text = None
-
-
-@dataclass
-class FocusTimerState:
- running: bool = False
- minutes: int = 0
- started_at: float = 0.0
- ends_at: float = 0.0
-
- def seconds_left(self) -> int:
- if not self.running:
- return 0
- return max(0, int(self.ends_at - time.time()))
-
-
-class WorkspaceController:
- """
- Connects AI routing -> real WorkspaceManager actions.
-
- Final demo:
- - unified command pipeline for UI + voice (source tracked)
- - mode switch emits signals.mode_changed
- - focus timer emits focus_* signals for UI and avatar persona
- - self-awareness explain command for professor-friendly output
- """
-
- def __init__(self, workspace_manager):
- self.wm = workspace_manager
- self.router = WorkspaceAIRouter()
- self.hub = WorkspaceAIHub()
- self.active_mode = "study"
- self.focus = FocusTimerState()
-
- def handle_command(self, text: str, *, source: str = "ui") -> Dict[str, Any]:
- t = (text or "").strip()
- route = self.router.route(t)
- action = route.get("action")
- payload = route.get("payload") or {}
-
- meta = {"source": source, "ts": route.get("ts")}
-
- # broadcast (new + legacy)
- if signals is not None:
- try:
- if hasattr(signals, "command_received_ex"):
- signals.command_received_ex.emit(t, meta)
- if hasattr(signals, "command_received"):
- signals.command_received.emit(t)
- except Exception:
- pass
-
- # ---- greet ----
- if action == "greet":
- reply = "Hi 👋 I’m Etherea. Tell me a mode (study/coding/exam/calm) or say 'focus 25'."
- return {"ok": True, "action": "greet", "reply": reply, "meta": meta}
-
- # ---- mode switch ----
- if action == "set_mode":
- mode = str(payload.get("mode", "study"))
- return self.apply_mode(mode, meta=meta)
-
- # ---- save session ----
- if action == "save_session":
- path = self.wm.save_session()
- return {"ok": True, "action": "save_session", "file": path, "meta": meta}
-
- # ---- resume session ----
- if action == "resume_session":
- result = self.wm.resume_last_session()
- return {"ok": True, "action": "resume_session", "result": result, "meta": meta}
-
- # ---- focus timer ----
- if action == "start_focus_timer":
- minutes = int(payload.get("minutes", 25))
- return self.start_focus(minutes, meta=meta)
-
- if action == "stop_focus_timer":
- return self.stop_focus(meta=meta)
-
- # ---- summarize text (simple local) ----
- if action == "summarize_text":
- txt = payload.get("text", "")
- summary = self._quick_summary(txt)
- return {"ok": True, "action": "summarize_text", "summary": summary, "meta": meta}
-
- # ---- self explain ----
- if action == "self_explain":
- if build_self_explain_text is None:
- explain = self._static_self_explain()
- else:
- explain = build_self_explain_text()
- return {"ok": True, "action": "self_explain", "text": explain, "meta": meta}
-
- # ---- unknown ----
- return {"ok": False, "action": "unknown", "text": t, "meta": meta}
-
- def apply_mode(self, mode: str, *, meta: Optional[dict] = None) -> Dict[str, Any]:
- meta = meta or {"source": "ui"}
- self.active_mode = mode
- profile = self.hub.get_profile(mode) or {}
- plan = self.hub.plan(f"{mode} mode")
-
- try:
- self.wm.active_mode = mode
- self.wm.active_profile = profile
- except Exception:
- pass
-
- if signals is not None and hasattr(signals, "mode_changed"):
- try:
- signals.mode_changed.emit(mode, meta)
- except Exception:
- pass
-
- return {
- "ok": True,
- "action": "set_mode",
- "mode": mode,
- "profile": profile,
- "ai_plan": {"mode": getattr(plan, "mode", mode), "tasks": getattr(plan, "tasks", [])},
- "meta": meta,
- }
-
- def start_focus(self, minutes: int, *, meta: Optional[dict] = None) -> Dict[str, Any]:
- meta = meta or {"source": "ui"}
- minutes = max(1, min(240, int(minutes)))
- now = time.time()
- self.focus = FocusTimerState(True, minutes, now, now + minutes * 60)
-
- # mode hint: deep_work
- try:
- self.apply_mode("deep_work", meta={**meta, "reason": "focus_started"})
- except Exception:
- pass
-
- if signals is not None and hasattr(signals, "focus_started"):
- try:
- signals.focus_started.emit(minutes, meta)
- except Exception:
- pass
-
- return {"ok": True, "action": "start_focus_timer", "minutes": minutes, "ends_in_s": minutes * 60, "meta": meta}
-
- def stop_focus(self, *, meta: Optional[dict] = None) -> Dict[str, Any]:
- meta = meta or {"source": "ui"}
- was_running = bool(self.focus.running)
- self.focus.running = False
-
- if signals is not None and hasattr(signals, "focus_stopped"):
- try:
- signals.focus_stopped.emit({**meta, "was_running": was_running})
- except Exception:
- pass
-
- return {"ok": True, "action": "stop_focus_timer", "was_running": was_running, "meta": meta}
-
- def focus_seconds_left(self) -> int:
- return self.focus.seconds_left()
-
- def _quick_summary(self, text: str) -> str:
- t = (text or "").strip()
- if not t:
- return ""
- lines = [ln.strip() for ln in t.splitlines() if ln.strip()]
- return " • " + "\n • ".join(lines[:5])
-
- def _static_self_explain(self) -> str:
- return (
- "Etherea is a desktop-first living OS prototype.\n"
- "- UI: PySide6 (main_window_v2.py) with Avatar + Aurora + Console.\n"
- "- Workspace: WorkspaceManager + adapters (PDF/code/text) + AI routing.\n"
- "- Avatar: emotional persona driven by EI signals + mode.\n"
- "- Voice: STT (SpeechRecognition) + TTS (Edge TTS) routed into the same command pipeline.\n"
- "Ask: 'study mode', 'coding mode', 'exam mode', 'calm mode', or 'focus 25'."
-)
diff --git a/core/workspace_ai/workspace_profiles.json b/core/workspace_ai/workspace_profiles.json
deleted file mode 100644
index 03f699c..0000000
--- a/core/workspace_ai/workspace_profiles.json
+++ /dev/null
@@ -1,44 +0,0 @@
-{
- "study": {
- "music_profile": "focus_calm",
- "ui_layout": "learning",
- "notifications": "minimal",
- "ring_mode": "learning",
- "avatar_tone": "gentle_focus"
- },
- "coding": {
- "music_profile": "flow",
- "ui_layout": "builder",
- "notifications": "minimal",
- "ring_mode": "builder",
- "avatar_tone": "sharp_helpful"
- },
- "meeting": {
- "music_profile": "soft",
- "ui_layout": "meeting",
- "notifications": "standard",
- "ring_mode": "meeting",
- "avatar_tone": "professional"
- },
- "deep_work": {
- "music_profile": "deep_focus",
- "ui_layout": "deep_work",
- "notifications": "silent",
- "ring_mode": "deep_work",
- "avatar_tone": "quiet_flow"
- },
- "exam": {
- "music_profile": "silence",
- "ui_layout": "exam",
- "notifications": "silent",
- "ring_mode": "exam",
- "avatar_tone": "strict_calm"
- },
- "calm": {
- "music_profile": "ambient",
- "ui_layout": "calm",
- "notifications": "minimal",
- "ring_mode": "calm",
- "avatar_tone": "gentle_support"
- }
-}
diff --git a/core/workspace_manager.py b/core/workspace_manager.py
deleted file mode 100644
index 782e988..0000000
--- a/core/workspace_manager.py
+++ /dev/null
@@ -1,132 +0,0 @@
-"""
-Unified Workspace Manager (Foundation)
-- Single workspace for all file types (no split)
-- Adapter-based extensibility
-- Per-file agent attachment
-- Seal/unseal workspace (read-only vs editable)
-- Unified save system
-"""
-
-from __future__ import annotations
-from dataclasses import dataclass
-from pathlib import Path
-from typing import Dict, Optional, Any
-
-from core.workspace_ai.session_memory import save_snapshot, load_snapshot
-from core.adapters import get_adapter_for_path
-from core.agents import get_agent_for_adapter
-
-@dataclass
-class OpenFile:
- path: Path
- adapter: Any
- agent: Any
- sealed: bool = False
-
-class WorkspaceManager:
- def __init__(self, root: str = "workspace"):
- self.root = Path(root)
- self.root.mkdir(parents=True, exist_ok=True)
- self.open_files: Dict[str, OpenFile] = {}
-
- def open(self, filepath: str) -> OpenFile:
- p = Path(filepath)
- if not p.is_absolute():
- p = self.root / p
-
- adapter = get_adapter_for_path(p)
- adapter.read()
- agent = get_agent_for_adapter(adapter)
-
- f = OpenFile(path=p, adapter=adapter, agent=agent, sealed=False)
- self.open_files[str(p)] = f
- return f
-
- def seal(self, filepath: str):
- f = self._get(filepath)
- f.sealed = True
- f.adapter.set_readonly(True)
-
- def unseal(self, filepath: str):
- f = self._get(filepath)
- f.sealed = False
- f.adapter.set_readonly(False)
-
- def write(self, filepath: str, content: Any):
- f = self._get(filepath)
- if f.sealed:
- raise PermissionError("Workspace file is sealed (read-only).")
- f.adapter.write(content)
-
- def save(self, filepath: str):
- f = self._get(filepath)
- f.adapter.save()
-
- def highlight(self, filepath: str, pattern: str):
- f = self._get(filepath)
- return f.agent.highlight(pattern)
-
- def analyse(self, filepath: str):
- f = self._get(filepath)
- return f.agent.analyse()
-
- def _get(self, filepath: str) -> OpenFile:
- p = Path(filepath)
- if not p.is_absolute():
- p = self.root / p
- key = str(p)
- if key not in self.open_files:
- return self.open(key)
- return self.open_files[key]
-
- # =========================
- # Session Memory (NEW)
- # =========================
- def get_session_snapshot(self) -> dict:
- """Return a lightweight snapshot of open workspace state."""
- files = []
- for key, f in self.open_files.items():
- files.append({
- "path": str(f.path),
- "sealed": bool(getattr(f, "sealed", False))
- })
- return {
- "open_files": files,
- "notes": "auto-saved workspace session"
- }
-
- def save_session(self) -> str:
- """Persist the latest snapshot to workspace/.etherea_last_session.json"""
- snap = self.get_session_snapshot()
- out = save_snapshot(snap)
- return str(out)
-
- def resume_last_session(self) -> dict:
- """Reopen last session's open files and restore sealed state."""
- snap = load_snapshot()
- restored = {"opened": 0, "sealed": 0, "missing": 0}
-
- open_files = snap.get("open_files") or []
- for item in open_files:
- path = item.get("path")
- sealed = bool(item.get("sealed", False))
- if not path:
- continue
-
- try:
- # open() normalizes workspace-relative paths internally
- f = self.open(path)
- restored["opened"] += 1
-
- if sealed:
- try:
- self.seal(path)
- restored["sealed"] += 1
- except Exception:
- pass
-
- except Exception:
- restored["missing"] += 1
-
- return restored
-
diff --git a/core/workspace_registry.py b/core/workspace_registry.py
deleted file mode 100644
index 1317a93..0000000
--- a/core/workspace_registry.py
+++ /dev/null
@@ -1,148 +0,0 @@
-from __future__ import annotations
-
-from dataclasses import dataclass, asdict, field
-from dataclasses import dataclass, asdict
-from datetime import datetime
-from pathlib import Path
-from typing import Dict, List, Optional
-import json
-
-
-@dataclass
-class WorkspaceRecord:
- workspace_id: str
- name: str
- workspace_type: str
- path: str
- created_at: str
- last_opened: Optional[str] = None
- last_saved: Optional[str] = None
- session_data: Dict[str, object] = field(default_factory=dict)
-
-
-class WorkspaceRegistry:
- def __init__(self, root: str = "workspace"):
- self.root = Path(root)
- self.root.mkdir(parents=True, exist_ok=True)
- self.state_path = self.root / ".etherea_workspaces.json"
- self._state: Dict[str, object] = {"workspaces": [], "current_id": None}
- self._load()
-
- def _load(self) -> None:
- if self.state_path.exists():
- self._state = json.loads(self.state_path.read_text(encoding="utf-8"))
-
- def _save(self) -> None:
- self.state_path.write_text(json.dumps(self._state, indent=2), encoding="utf-8")
-
- def _now(self) -> str:
- return datetime.utcnow().isoformat()
-
- def list_workspaces(self) -> List[WorkspaceRecord]:
- records: List[WorkspaceRecord] = []
- for item in self._state.get("workspaces", []):
- payload = {
- "workspace_id": item.get("workspace_id"),
- "name": item.get("name"),
- "workspace_type": item.get("workspace_type", "general"),
- "path": item.get("path"),
- "created_at": item.get("created_at"),
- "last_opened": item.get("last_opened"),
- "last_saved": item.get("last_saved"),
- "session_data": item.get("session_data") or {},
- }
- if payload["workspace_id"] and payload["name"]:
- records.append(WorkspaceRecord(**payload))
- return records
- return [WorkspaceRecord(**item) for item in self._state.get("workspaces", [])]
-
- def get_current(self) -> Optional[WorkspaceRecord]:
- current_id = self._state.get("current_id")
- if not current_id:
- return None
- return self.get_workspace(str(current_id))
-
- def get_workspace(self, workspace_id: str) -> Optional[WorkspaceRecord]:
- for item in self._state.get("workspaces", []):
- if item.get("workspace_id") == workspace_id:
- return WorkspaceRecord(**item)
- return None
-
- def create_workspace(self, name: Optional[str] = None) -> WorkspaceRecord:
- existing = self.list_workspaces()
- next_index = len(existing) + 1
- workspace_id = f"ws_{int(datetime.utcnow().timestamp())}_{next_index}"
- workspace_name = name or f"Workspace {next_index}"
- workspace_path = self.root / workspace_id
- workspace_path.mkdir(parents=True, exist_ok=True)
- record = WorkspaceRecord(
- workspace_id=workspace_id,
- name=workspace_name,
- workspace_type="general",
- path=str(workspace_path),
- created_at=self._now(),
- session_data={},
- )
- self._state["workspaces"] = [asdict(ws) for ws in existing] + [asdict(record)]
- self._state["current_id"] = workspace_id
- self._save()
- return record
-
- def open_workspace(self, workspace_id: str) -> Optional[WorkspaceRecord]:
- workspaces = self.list_workspaces()
- updated: List[dict] = []
- selected: Optional[WorkspaceRecord] = None
- for ws in workspaces:
- if ws.workspace_id == workspace_id:
- ws.last_opened = self._now()
- selected = ws
- updated.append(asdict(ws))
- if selected:
- self._state["workspaces"] = updated
- self._state["current_id"] = workspace_id
- self._save()
- return selected
-
- def resume_last(self) -> Optional[WorkspaceRecord]:
- current = self.get_current()
- if current:
- return current
- workspaces = self.list_workspaces()
- if not workspaces:
- return None
- latest = sorted(
- workspaces,
- key=lambda ws: ws.last_opened or ws.last_saved or ws.created_at,
- reverse=True,
- )[0]
- return self.open_workspace(latest.workspace_id)
-
- def save_snapshot(self, payload: Dict[str, object]) -> Optional[Path]:
- current = self.get_current()
- if not current:
- return None
- snapshot_path = Path(current.path) / "snapshot.json"
- snapshot = {
- "workspace_id": current.workspace_id,
- "name": current.name,
- "saved_at": self._now(),
- "payload": payload,
- }
- snapshot_path.write_text(json.dumps(snapshot, indent=2), encoding="utf-8")
- self._touch_saved(current.workspace_id, snapshot["saved_at"], payload)
- return snapshot_path
-
- def _touch_saved(self, workspace_id: str, timestamp: str, payload: Dict[str, object]) -> None:
- self._touch_saved(current.workspace_id, snapshot["saved_at"])
- return snapshot_path
-
- def _touch_saved(self, workspace_id: str, timestamp: str) -> None:
- workspaces = self.list_workspaces()
- updated: List[dict] = []
- for ws in workspaces:
- if ws.workspace_id == workspace_id:
- ws.last_saved = timestamp
- ws.session_data = payload
- updated.append(asdict(ws))
- self._state["workspaces"] = updated
- self._save()
diff --git a/debug_avatar.py b/debug_avatar.py
index bb75557..1da9654 100644
--- a/debug_avatar.py
+++ b/debug_avatar.py
@@ -2,7 +2,7 @@
import os
from PySide6.QtWidgets import QApplication, QWidget, QVBoxLayout
from PySide6.QtGui import QColor, QPalette
-from core.ui.avatar_heroine_widget import AvatarHeroineWidget
+from corund.ui.avatar_heroine_widget import AvatarHeroineWidget
def main():
app = QApplication(sys.argv)
diff --git a/debug_workspace.py b/debug_workspace.py
index 6627ce0..4c8c063 100644
--- a/debug_workspace.py
+++ b/debug_workspace.py
@@ -1,6 +1,6 @@
from PySide6.QtWidgets import QApplication, QMainWindow, QWidget, QVBoxLayout
-from core.ui.workspace_widget import WorkspaceWidget
-from core.ui.panels import GlassPanel
+from corund.ui.workspace_widget import WorkspaceWidget
+from corund.ui.panels import GlassPanel
import sys
def main():
diff --git a/etherea_launcher.py b/etherea_launcher.py
index b898340..3481f65 100644
--- a/etherea_launcher.py
+++ b/etherea_launcher.py
@@ -51,7 +51,7 @@ def _bootlog(msg: str):
# Ensure project root is in path
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
-from core.ui.main_window_v2 import EthereaMainWindowV2
+from corund.ui.main_window_v2 import EthereaMainWindowV2
if __name__ == "__main__":
diff --git a/etherea_safe_check.py b/etherea_safe_check.py
index 6551731..7319e2e 100644
--- a/etherea_safe_check.py
+++ b/etherea_safe_check.py
@@ -86,18 +86,18 @@ def main():
_title("2) Project Modules Import Check")
modules = [
- "core.audio_analysis.beat_detector",
- "core.avatar_motion.dance_planner",
-
- "core.avatar_motion.motion_controller",
-
- "core.audio_engine",
- "core.voice_engine",
- "core.workspace_manager",
- "core.avatar_system",
- "core.avatar_engine",
- "core.emotion_mapper",
- "core.ui.main_window_v2",
+ "corund.audio_analysis.beat_detector",
+ "corund.avatar_motion.dance_planner",
+
+ "corund.avatar_motion.motion_controller",
+
+ "corund.audio_engine",
+ "corund.voice_engine",
+ "corund.workspace_manager",
+ "corund.avatar_system",
+ "corund.avatar_engine",
+ "corund.emotion_mapper",
+ "corund.ui.main_window_v2",
]
mod_ok = {}
@@ -134,9 +134,9 @@ def main():
_title("4) Feature Tests (No Crash Mode)")
# Audio engine test (only if pygame present + module import OK)
- if mod_ok.get("core.audio_engine") and dep_status.get("pygame"):
+ if mod_ok.get("corund.audio_engine") and dep_status.get("pygame"):
def _audio_test():
- from core.audio_engine import AudioEngine
+ from corund.audio_engine import AudioEngine
a = AudioEngine()
a.set_volume(0.3)
a.start()
@@ -150,9 +150,9 @@ def _audio_test():
print(f"{WARN} AudioEngine test skipped (missing pygame or audio module)")
# Voice TTS test (won't fail hard)
- if mod_ok.get("core.voice_engine") and dep_status.get("pyttsx3"):
+ if mod_ok.get("corund.voice_engine") and dep_status.get("pyttsx3"):
def _tts_test():
- from core.voice_engine import VoiceEngine
+ from corund.voice_engine import VoiceEngine
v = VoiceEngine()
# If VoiceEngine has speak()
if hasattr(v, "speak"):
@@ -164,9 +164,9 @@ def _tts_test():
print(f"{WARN} Voice TTS test skipped (missing pyttsx3 or voice module)")
# Workspace test (basic instantiation)
- if mod_ok.get("core.workspace_manager"):
+ if mod_ok.get("corund.workspace_manager"):
def _ws_test():
- from core.workspace_manager import WorkspaceManager
+ from corund.workspace_manager import WorkspaceManager
w = WorkspaceManager()
return type(w).__name__
@@ -175,9 +175,9 @@ def _ws_test():
print(f"{WARN} Workspace test skipped (workspace module failed import)")
# Avatar system basic init (no OpenAI call)
- if mod_ok.get("core.avatar_system"):
+ if mod_ok.get("corund.avatar_system"):
def _avatar_test():
- from core.avatar_system import AvatarSystem
+ from corund.avatar_system import AvatarSystem
a = AvatarSystem()
return type(a).__name__
@@ -186,9 +186,9 @@ def _avatar_test():
print(f"{WARN} Avatar test skipped (avatar_system import failed)")
# UI class import test (doesn't launch full GUI)
- if mod_ok.get("core.ui.main_window_v2") and dep_status.get("PySide6"):
+ if mod_ok.get("corund.ui.main_window_v2") and dep_status.get("PySide6"):
def _ui_test():
- from core.ui.main_window_v2 import EthereaMainWindowV2
+ from corund.ui.main_window_v2 import EthereaMainWindowV2
return EthereaMainWindowV2.__name__
_safe_call("UI class available (no window open)", _ui_test)
@@ -216,7 +216,7 @@ def _ui_test():
_title("6) Summary")
# core pass criteria: at least UI imports + workspace imports
- core_pass = mod_ok.get("core.workspace_manager") or mod_ok.get("core.ui.main_window_v2")
+ core_pass = mod_ok.get("corund.workspace_manager") or mod_ok.get("corund.ui.main_window_v2")
print(f"{OK if core_pass else WARN} Core system import health: {core_pass}")
print(f"{OK if dep_status.get('openai') else WARN} OpenAI SDK present locally: {dep_status.get('openai')} (OK to be false on Termux)")
diff --git a/etherea_workspace_cli.py b/etherea_workspace_cli.py
index 6403965..916c694 100644
--- a/etherea_workspace_cli.py
+++ b/etherea_workspace_cli.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from core.workspace_manager import WorkspaceManager
-from core.workspace_ai.workspace_controller import WorkspaceController
+from corund.workspace_manager import WorkspaceManager
+from corund.workspace_ai.workspace_controller import WorkspaceController
def main():
diff --git a/logs/autofix_20260129_183657/main_window_v2.py b/logs/autofix_20260129_183657/main_window_v2.py
index daff92d..461cb69 100644
--- a/logs/autofix_20260129_183657/main_window_v2.py
+++ b/logs/autofix_20260129_183657/main_window_v2.py
@@ -1,7 +1,7 @@
from __future__ import annotations
-from core.ui.command_palette import CommandPalette
-from core.workspace_ai.workspace_controller import WorkspaceController
+from corund.ui.command_palette import CommandPalette
+from corund.workspace_ai.workspace_controller import WorkspaceController
from PySide6.QtWidgets import (
# -*- coding: utf-8 -*-
@@ -22,25 +22,25 @@
QFrame,
)
-from core.ui.command_palette import CommandPalette
-from core.ui.avatar_heroine_widget import AvatarHeroineWidget
-from core.ui.aurora_canvas_widget import AuroraCanvasWidget
-
-from core.gestures.gesture_engine import GestureEngine
-from core.gestures.presets import regression_preset
-from core.behavior.behavior_planner import plan_behavior
-from core.avatar_motion.motion_controller import AvatarMotionController
-from core.ui.beat_sync import BeatSyncScheduler
-from core.audio_analysis.beat_detector import estimate_bpm_and_beats
-from core.audio_analysis.song_resolver import resolve_song
-from core.audio_analysis.beat_to_ui import beats_to_ui_effects
-from core.aurora_actions import ActionRegistry
-from core.aurora_pipeline import AuroraDecisionPipeline
-from core.aurora_state import AuroraStateStore
-from core.workspace_manager import WorkspaceManager
-from core.workspace_registry import WorkspaceRegistry
-from core.os_adapter import OSAdapter
-from core.os_pipeline import OSPipeline
+from corund.ui.command_palette import CommandPalette
+from corund.ui.avatar_heroine_widget import AvatarHeroineWidget
+from corund.ui.aurora_canvas_widget import AuroraCanvasWidget
+
+from corund.gestures.gesture_engine import GestureEngine
+from corund.gestures.presets import regression_preset
+from corund.behavior.behavior_planner import plan_behavior
+from corund.avatar_motion.motion_controller import AvatarMotionController
+from corund.ui.beat_sync import BeatSyncScheduler
+from corund.audio_analysis.beat_detector import estimate_bpm_and_beats
+from corund.audio_analysis.song_resolver import resolve_song
+from corund.audio_analysis.beat_to_ui import beats_to_ui_effects
+from corund.aurora_actions import ActionRegistry
+from corund.aurora_pipeline import AuroraDecisionPipeline
+from corund.aurora_state import AuroraStateStore
+from corund.workspace_manager import WorkspaceManager
+from corund.workspace_registry import WorkspaceRegistry
+from corund.os_adapter import OSAdapter
+from corund.os_pipeline import OSPipeline
def _etherea_ui_log(msg: str):
@@ -49,47 +49,47 @@ def _etherea_ui_log(msg: str):
f.write(msg + "\n")
except Exception:
pass
-from core.workspace_ai.workspace_controller import WorkspaceController
-from core.workspace_manager import WorkspaceManager
-from core.workspace_registry import WorkspaceRegistry
-from core.aurora_actions import ActionRegistry
-from core.aurora_pipeline import AuroraDecisionPipeline
-from core.aurora_state import AuroraStateStore
+from corund.workspace_ai.workspace_controller import WorkspaceController
+from corund.workspace_manager import WorkspaceManager
+from corund.workspace_registry import WorkspaceRegistry
+from corund.aurora_actions import ActionRegistry
+from corund.aurora_pipeline import AuroraDecisionPipeline
+from corund.aurora_state import AuroraStateStore
# 🧠 AppState drives expressive_mode ("dance"/"humming"/"idle") used by AvatarHeroineWidget
try:
- from core.state import AppState
+ from corund.state import AppState
except Exception:
AppState = None
# optional gesture / beat sync extras (kept safe)
try:
- from core.gestures.gesture_engine import GestureEngine
- from core.gestures.presets import regression_preset
+ from corund.gestures.gesture_engine import GestureEngine
+ from corund.gestures.presets import regression_preset
except Exception:
GestureEngine = None
regression_preset = None
try:
- from core.ui.beat_sync import BeatSyncScheduler
+ from corund.ui.beat_sync import BeatSyncScheduler
except Exception:
BeatSyncScheduler = None
try:
- from core.voice_engine import get_voice_engine
+ from corund.voice_engine import get_voice_engine
except Exception:
get_voice_engine = None
# ✅ Use existing agent brain + optional FocusGuardian supervisor
try:
- from core.agent import IntelligentAgent, FocusGuardian
+ from corund.agent import IntelligentAgent, FocusGuardian
except Exception:
IntelligentAgent = None
FocusGuardian = None
# signals are optional; window still runs without them
try:
- from core.signals import signals
+ from corund.signals import signals
except Exception:
signals = None
diff --git a/logs/autofix_20260129_183657/voice_engine.py b/logs/autofix_20260129_183657/voice_engine.py
index ad555db..db0b593 100644
--- a/logs/autofix_20260129_183657/voice_engine.py
+++ b/logs/autofix_20260129_183657/voice_engine.py
@@ -22,15 +22,15 @@
_QT_OK = True
except Exception:
pyttsx3 = None
-from core.signals import signals
+from corund.signals import signals
try:
- from core.ui.style_engine import style_engine # Optional: may not exist in all builds
+ from corund.ui.style_engine import style_engine # Optional: may not exist in all builds
except Exception:
style_engine = None
-from core.event_bus import event_bus
-from core.event_model import create_event
+from corund.event_bus import event_bus
+from corund.event_model import create_event
-from core.behavior.behavior_planner import plan_behavior
+from corund.behavior.behavior_planner import plan_behavior
from typing import Optional
# Termux/CI-safe stubs
class QObject: # type: ignore
diff --git a/logs/autofix_20260129_184059/voice_engine.py b/logs/autofix_20260129_184059/voice_engine.py
index ad555db..db0b593 100644
--- a/logs/autofix_20260129_184059/voice_engine.py
+++ b/logs/autofix_20260129_184059/voice_engine.py
@@ -22,15 +22,15 @@
_QT_OK = True
except Exception:
pyttsx3 = None
-from core.signals import signals
+from corund.signals import signals
try:
- from core.ui.style_engine import style_engine # Optional: may not exist in all builds
+ from corund.ui.style_engine import style_engine # Optional: may not exist in all builds
except Exception:
style_engine = None
-from core.event_bus import event_bus
-from core.event_model import create_event
+from corund.event_bus import event_bus
+from corund.event_model import create_event
-from core.behavior.behavior_planner import plan_behavior
+from corund.behavior.behavior_planner import plan_behavior
from typing import Optional
# Termux/CI-safe stubs
class QObject: # type: ignore
diff --git a/logs/autofix_20260129_184236/aurora_state.py b/logs/autofix_20260129_184236/aurora_state.py
index a9537e3..bf8582e 100644
--- a/logs/autofix_20260129_184236/aurora_state.py
+++ b/logs/autofix_20260129_184236/aurora_state.py
@@ -4,7 +4,7 @@
from datetime import datetime
from typing import Callable, Dict, List, Optional
-from core.aurora_actions import ActionRegistry, ActionSpec
+from corund.aurora_actions import ActionRegistry, ActionSpec
@dataclass(frozen=True)
diff --git a/logs/final_fix_20260129_185512/voice_engine.py b/logs/final_fix_20260129_185512/voice_engine.py
index cd2bf1a..8a06b2d 100644
--- a/logs/final_fix_20260129_185512/voice_engine.py
+++ b/logs/final_fix_20260129_185512/voice_engine.py
@@ -22,15 +22,15 @@
_QT_OK = True
except Exception:
pyttsx3 = None
-from core.signals import signals
+from corund.signals import signals
try:
- from core.ui.style_engine import style_engine # Optional: may not exist in all builds
+ from corund.ui.style_engine import style_engine # Optional: may not exist in all builds
except Exception:
style_engine = None
-from core.event_bus import event_bus
-from core.event_model import create_event
+from corund.event_bus import event_bus
+from corund.event_model import create_event
-from core.behavior.behavior_planner import plan_behavior
+from corund.behavior.behavior_planner import plan_behavior
from typing import Optional
# Termux/CI-safe stubs
class QObject: # type: ignore
diff --git a/logs/fix_20260129_184646/main_window_v2.py b/logs/fix_20260129_184646/main_window_v2.py
index daff92d..461cb69 100644
--- a/logs/fix_20260129_184646/main_window_v2.py
+++ b/logs/fix_20260129_184646/main_window_v2.py
@@ -1,7 +1,7 @@
from __future__ import annotations
-from core.ui.command_palette import CommandPalette
-from core.workspace_ai.workspace_controller import WorkspaceController
+from corund.ui.command_palette import CommandPalette
+from corund.workspace_ai.workspace_controller import WorkspaceController
from PySide6.QtWidgets import (
# -*- coding: utf-8 -*-
@@ -22,25 +22,25 @@
QFrame,
)
-from core.ui.command_palette import CommandPalette
-from core.ui.avatar_heroine_widget import AvatarHeroineWidget
-from core.ui.aurora_canvas_widget import AuroraCanvasWidget
-
-from core.gestures.gesture_engine import GestureEngine
-from core.gestures.presets import regression_preset
-from core.behavior.behavior_planner import plan_behavior
-from core.avatar_motion.motion_controller import AvatarMotionController
-from core.ui.beat_sync import BeatSyncScheduler
-from core.audio_analysis.beat_detector import estimate_bpm_and_beats
-from core.audio_analysis.song_resolver import resolve_song
-from core.audio_analysis.beat_to_ui import beats_to_ui_effects
-from core.aurora_actions import ActionRegistry
-from core.aurora_pipeline import AuroraDecisionPipeline
-from core.aurora_state import AuroraStateStore
-from core.workspace_manager import WorkspaceManager
-from core.workspace_registry import WorkspaceRegistry
-from core.os_adapter import OSAdapter
-from core.os_pipeline import OSPipeline
+from corund.ui.command_palette import CommandPalette
+from corund.ui.avatar_heroine_widget import AvatarHeroineWidget
+from corund.ui.aurora_canvas_widget import AuroraCanvasWidget
+
+from corund.gestures.gesture_engine import GestureEngine
+from corund.gestures.presets import regression_preset
+from corund.behavior.behavior_planner import plan_behavior
+from corund.avatar_motion.motion_controller import AvatarMotionController
+from corund.ui.beat_sync import BeatSyncScheduler
+from corund.audio_analysis.beat_detector import estimate_bpm_and_beats
+from corund.audio_analysis.song_resolver import resolve_song
+from corund.audio_analysis.beat_to_ui import beats_to_ui_effects
+from corund.aurora_actions import ActionRegistry
+from corund.aurora_pipeline import AuroraDecisionPipeline
+from corund.aurora_state import AuroraStateStore
+from corund.workspace_manager import WorkspaceManager
+from corund.workspace_registry import WorkspaceRegistry
+from corund.os_adapter import OSAdapter
+from corund.os_pipeline import OSPipeline
def _etherea_ui_log(msg: str):
@@ -49,47 +49,47 @@ def _etherea_ui_log(msg: str):
f.write(msg + "\n")
except Exception:
pass
-from core.workspace_ai.workspace_controller import WorkspaceController
-from core.workspace_manager import WorkspaceManager
-from core.workspace_registry import WorkspaceRegistry
-from core.aurora_actions import ActionRegistry
-from core.aurora_pipeline import AuroraDecisionPipeline
-from core.aurora_state import AuroraStateStore
+from corund.workspace_ai.workspace_controller import WorkspaceController
+from corund.workspace_manager import WorkspaceManager
+from corund.workspace_registry import WorkspaceRegistry
+from corund.aurora_actions import ActionRegistry
+from corund.aurora_pipeline import AuroraDecisionPipeline
+from corund.aurora_state import AuroraStateStore
# 🧠 AppState drives expressive_mode ("dance"/"humming"/"idle") used by AvatarHeroineWidget
try:
- from core.state import AppState
+ from corund.state import AppState
except Exception:
AppState = None
# optional gesture / beat sync extras (kept safe)
try:
- from core.gestures.gesture_engine import GestureEngine
- from core.gestures.presets import regression_preset
+ from corund.gestures.gesture_engine import GestureEngine
+ from corund.gestures.presets import regression_preset
except Exception:
GestureEngine = None
regression_preset = None
try:
- from core.ui.beat_sync import BeatSyncScheduler
+ from corund.ui.beat_sync import BeatSyncScheduler
except Exception:
BeatSyncScheduler = None
try:
- from core.voice_engine import get_voice_engine
+ from corund.voice_engine import get_voice_engine
except Exception:
get_voice_engine = None
# ✅ Use existing agent brain + optional FocusGuardian supervisor
try:
- from core.agent import IntelligentAgent, FocusGuardian
+ from corund.agent import IntelligentAgent, FocusGuardian
except Exception:
IntelligentAgent = None
FocusGuardian = None
# signals are optional; window still runs without them
try:
- from core.signals import signals
+ from corund.signals import signals
except Exception:
signals = None
diff --git a/logs/fix_20260129_184732/main_window_v2.py b/logs/fix_20260129_184732/main_window_v2.py
index f69a0b5..1cf8661 100644
--- a/logs/fix_20260129_184732/main_window_v2.py
+++ b/logs/fix_20260129_184732/main_window_v2.py
@@ -1,7 +1,7 @@
from __future__ import annotations
-from core.ui.command_palette import CommandPalette
-from core.workspace_ai.workspace_controller import WorkspaceController
+from corund.ui.command_palette import CommandPalette
+from corund.workspace_ai.workspace_controller import WorkspaceController
from PySide6.QtWidgets import (
QApplication,
@@ -32,25 +32,25 @@
QFrame,
)
-from core.ui.command_palette import CommandPalette
-from core.ui.avatar_heroine_widget import AvatarHeroineWidget
-from core.ui.aurora_canvas_widget import AuroraCanvasWidget
-
-from core.gestures.gesture_engine import GestureEngine
-from core.gestures.presets import regression_preset
-from core.behavior.behavior_planner import plan_behavior
-from core.avatar_motion.motion_controller import AvatarMotionController
-from core.ui.beat_sync import BeatSyncScheduler
-from core.audio_analysis.beat_detector import estimate_bpm_and_beats
-from core.audio_analysis.song_resolver import resolve_song
-from core.audio_analysis.beat_to_ui import beats_to_ui_effects
-from core.aurora_actions import ActionRegistry
-from core.aurora_pipeline import AuroraDecisionPipeline
-from core.aurora_state import AuroraStateStore
-from core.workspace_manager import WorkspaceManager
-from core.workspace_registry import WorkspaceRegistry
-from core.os_adapter import OSAdapter
-from core.os_pipeline import OSPipeline
+from corund.ui.command_palette import CommandPalette
+from corund.ui.avatar_heroine_widget import AvatarHeroineWidget
+from corund.ui.aurora_canvas_widget import AuroraCanvasWidget
+
+from corund.gestures.gesture_engine import GestureEngine
+from corund.gestures.presets import regression_preset
+from corund.behavior.behavior_planner import plan_behavior
+from corund.avatar_motion.motion_controller import AvatarMotionController
+from corund.ui.beat_sync import BeatSyncScheduler
+from corund.audio_analysis.beat_detector import estimate_bpm_and_beats
+from corund.audio_analysis.song_resolver import resolve_song
+from corund.audio_analysis.beat_to_ui import beats_to_ui_effects
+from corund.aurora_actions import ActionRegistry
+from corund.aurora_pipeline import AuroraDecisionPipeline
+from corund.aurora_state import AuroraStateStore
+from corund.workspace_manager import WorkspaceManager
+from corund.workspace_registry import WorkspaceRegistry
+from corund.os_adapter import OSAdapter
+from corund.os_pipeline import OSPipeline
def _etherea_ui_log(msg: str):
@@ -59,47 +59,47 @@ def _etherea_ui_log(msg: str):
f.write(msg + "\n")
except Exception:
pass
-from core.workspace_ai.workspace_controller import WorkspaceController
-from core.workspace_manager import WorkspaceManager
-from core.workspace_registry import WorkspaceRegistry
-from core.aurora_actions import ActionRegistry
-from core.aurora_pipeline import AuroraDecisionPipeline
-from core.aurora_state import AuroraStateStore
+from corund.workspace_ai.workspace_controller import WorkspaceController
+from corund.workspace_manager import WorkspaceManager
+from corund.workspace_registry import WorkspaceRegistry
+from corund.aurora_actions import ActionRegistry
+from corund.aurora_pipeline import AuroraDecisionPipeline
+from corund.aurora_state import AuroraStateStore
# 🧠AppState drives expressive_mode ("dance"/"humming"/"idle") used by AvatarHeroineWidget
try:
- from core.state import AppState
+ from corund.state import AppState
except Exception:
AppState = None
# optional gesture / beat sync extras (kept safe)
try:
- from core.gestures.gesture_engine import GestureEngine
- from core.gestures.presets import regression_preset
+ from corund.gestures.gesture_engine import GestureEngine
+ from corund.gestures.presets import regression_preset
except Exception:
GestureEngine = None
regression_preset = None
try:
- from core.ui.beat_sync import BeatSyncScheduler
+ from corund.ui.beat_sync import BeatSyncScheduler
except Exception:
BeatSyncScheduler = None
try:
- from core.voice_engine import get_voice_engine
+ from corund.voice_engine import get_voice_engine
except Exception:
get_voice_engine = None
# ✅ Use existing agent brain + optional FocusGuardian supervisor
try:
- from core.agent import IntelligentAgent, FocusGuardian
+ from corund.agent import IntelligentAgent, FocusGuardian
except Exception:
IntelligentAgent = None
FocusGuardian = None
# signals are optional; window still runs without them
try:
- from core.signals import signals
+ from corund.signals import signals
except Exception:
signals = None
diff --git a/logs/fix_20260129_185351/aurora_state.py b/logs/fix_20260129_185351/aurora_state.py
index a9537e3..bf8582e 100644
--- a/logs/fix_20260129_185351/aurora_state.py
+++ b/logs/fix_20260129_185351/aurora_state.py
@@ -4,7 +4,7 @@
from datetime import datetime
from typing import Callable, Dict, List, Optional
-from core.aurora_actions import ActionRegistry, ActionSpec
+from corund.aurora_actions import ActionRegistry, ActionSpec
@dataclass(frozen=True)
diff --git a/logs/fix_20260129_185351/voice_engine.py b/logs/fix_20260129_185351/voice_engine.py
index ad555db..db0b593 100644
--- a/logs/fix_20260129_185351/voice_engine.py
+++ b/logs/fix_20260129_185351/voice_engine.py
@@ -22,15 +22,15 @@
_QT_OK = True
except Exception:
pyttsx3 = None
-from core.signals import signals
+from corund.signals import signals
try:
- from core.ui.style_engine import style_engine # Optional: may not exist in all builds
+ from corund.ui.style_engine import style_engine # Optional: may not exist in all builds
except Exception:
style_engine = None
-from core.event_bus import event_bus
-from core.event_model import create_event
+from corund.event_bus import event_bus
+from corund.event_model import create_event
-from core.behavior.behavior_planner import plan_behavior
+from corund.behavior.behavior_planner import plan_behavior
from typing import Optional
# Termux/CI-safe stubs
class QObject: # type: ignore
diff --git a/logs/rescue_20260129_184429/aurora_state.py b/logs/rescue_20260129_184429/aurora_state.py
index f814db7..680eedd 100644
--- a/logs/rescue_20260129_184429/aurora_state.py
+++ b/logs/rescue_20260129_184429/aurora_state.py
@@ -4,7 +4,7 @@
from datetime import datetime
from typing import Callable, Dict, List, Optional
-from core.aurora_actions import ActionRegistry, ActionSpec
+from corund.aurora_actions import ActionRegistry, ActionSpec
@dataclass(frozen=True)
diff --git a/logs/rescue_20260129_184429/main_window_v2.py b/logs/rescue_20260129_184429/main_window_v2.py
index daff92d..461cb69 100644
--- a/logs/rescue_20260129_184429/main_window_v2.py
+++ b/logs/rescue_20260129_184429/main_window_v2.py
@@ -1,7 +1,7 @@
from __future__ import annotations
-from core.ui.command_palette import CommandPalette
-from core.workspace_ai.workspace_controller import WorkspaceController
+from corund.ui.command_palette import CommandPalette
+from corund.workspace_ai.workspace_controller import WorkspaceController
from PySide6.QtWidgets import (
# -*- coding: utf-8 -*-
@@ -22,25 +22,25 @@
QFrame,
)
-from core.ui.command_palette import CommandPalette
-from core.ui.avatar_heroine_widget import AvatarHeroineWidget
-from core.ui.aurora_canvas_widget import AuroraCanvasWidget
-
-from core.gestures.gesture_engine import GestureEngine
-from core.gestures.presets import regression_preset
-from core.behavior.behavior_planner import plan_behavior
-from core.avatar_motion.motion_controller import AvatarMotionController
-from core.ui.beat_sync import BeatSyncScheduler
-from core.audio_analysis.beat_detector import estimate_bpm_and_beats
-from core.audio_analysis.song_resolver import resolve_song
-from core.audio_analysis.beat_to_ui import beats_to_ui_effects
-from core.aurora_actions import ActionRegistry
-from core.aurora_pipeline import AuroraDecisionPipeline
-from core.aurora_state import AuroraStateStore
-from core.workspace_manager import WorkspaceManager
-from core.workspace_registry import WorkspaceRegistry
-from core.os_adapter import OSAdapter
-from core.os_pipeline import OSPipeline
+from corund.ui.command_palette import CommandPalette
+from corund.ui.avatar_heroine_widget import AvatarHeroineWidget
+from corund.ui.aurora_canvas_widget import AuroraCanvasWidget
+
+from corund.gestures.gesture_engine import GestureEngine
+from corund.gestures.presets import regression_preset
+from corund.behavior.behavior_planner import plan_behavior
+from corund.avatar_motion.motion_controller import AvatarMotionController
+from corund.ui.beat_sync import BeatSyncScheduler
+from corund.audio_analysis.beat_detector import estimate_bpm_and_beats
+from corund.audio_analysis.song_resolver import resolve_song
+from corund.audio_analysis.beat_to_ui import beats_to_ui_effects
+from corund.aurora_actions import ActionRegistry
+from corund.aurora_pipeline import AuroraDecisionPipeline
+from corund.aurora_state import AuroraStateStore
+from corund.workspace_manager import WorkspaceManager
+from corund.workspace_registry import WorkspaceRegistry
+from corund.os_adapter import OSAdapter
+from corund.os_pipeline import OSPipeline
def _etherea_ui_log(msg: str):
@@ -49,47 +49,47 @@ def _etherea_ui_log(msg: str):
f.write(msg + "\n")
except Exception:
pass
-from core.workspace_ai.workspace_controller import WorkspaceController
-from core.workspace_manager import WorkspaceManager
-from core.workspace_registry import WorkspaceRegistry
-from core.aurora_actions import ActionRegistry
-from core.aurora_pipeline import AuroraDecisionPipeline
-from core.aurora_state import AuroraStateStore
+from corund.workspace_ai.workspace_controller import WorkspaceController
+from corund.workspace_manager import WorkspaceManager
+from corund.workspace_registry import WorkspaceRegistry
+from corund.aurora_actions import ActionRegistry
+from corund.aurora_pipeline import AuroraDecisionPipeline
+from corund.aurora_state import AuroraStateStore
# 🧠 AppState drives expressive_mode ("dance"/"humming"/"idle") used by AvatarHeroineWidget
try:
- from core.state import AppState
+ from corund.state import AppState
except Exception:
AppState = None
# optional gesture / beat sync extras (kept safe)
try:
- from core.gestures.gesture_engine import GestureEngine
- from core.gestures.presets import regression_preset
+ from corund.gestures.gesture_engine import GestureEngine
+ from corund.gestures.presets import regression_preset
except Exception:
GestureEngine = None
regression_preset = None
try:
- from core.ui.beat_sync import BeatSyncScheduler
+ from corund.ui.beat_sync import BeatSyncScheduler
except Exception:
BeatSyncScheduler = None
try:
- from core.voice_engine import get_voice_engine
+ from corund.voice_engine import get_voice_engine
except Exception:
get_voice_engine = None
# ✅ Use existing agent brain + optional FocusGuardian supervisor
try:
- from core.agent import IntelligentAgent, FocusGuardian
+ from corund.agent import IntelligentAgent, FocusGuardian
except Exception:
IntelligentAgent = None
FocusGuardian = None
# signals are optional; window still runs without them
try:
- from core.signals import signals
+ from corund.signals import signals
except Exception:
signals = None
diff --git a/logs/rescue_20260129_184429/voice_engine.py b/logs/rescue_20260129_184429/voice_engine.py
index adcd581..a865ca5 100644
--- a/logs/rescue_20260129_184429/voice_engine.py
+++ b/logs/rescue_20260129_184429/voice_engine.py
@@ -22,15 +22,15 @@
_QT_OK = True
except Exception:
pyttsx3 = None
-from core.signals import signals
+from corund.signals import signals
try:
-from core.ui.style_engine import style_engine # Optional: may not exist in all builds
+from corund.ui.style_engine import style_engine # Optional: may not exist in all builds
except Exception:
style_engine = None
-from core.event_bus import event_bus
-from core.event_model import create_event
+from corund.event_bus import event_bus
+from corund.event_model import create_event
-from core.behavior.behavior_planner import plan_behavior
+from corund.behavior.behavior_planner import plan_behavior
from typing import Optional
# Termux/CI-safe stubs
class QObject: # type: ignore
diff --git a/main.py b/main.py
index f1f369b..671e38c 100644
--- a/main.py
+++ b/main.py
@@ -8,7 +8,7 @@ def load_dotenv(*a, **k):
from PySide6.QtWidgets import QApplication
-from core.app_controller import AppController
+from corund.app_controller import AppController
def main() -> int:
diff --git a/scripts/aurora_integration_test.py b/scripts/aurora_integration_test.py
index 22a8768..6aacd88 100644
--- a/scripts/aurora_integration_test.py
+++ b/scripts/aurora_integration_test.py
@@ -6,11 +6,11 @@
sys.path.append(str(Path(__file__).resolve().parents[1]))
-from core.aurora_actions import ActionRegistry
-from core.aurora_pipeline import AuroraDecisionPipeline
-from core.aurora_state import AuroraStateStore
-from core.workspace_manager import WorkspaceManager
-from core.workspace_registry import WorkspaceRegistry
+from corund.aurora_actions import ActionRegistry
+from corund.aurora_pipeline import AuroraDecisionPipeline
+from corund.aurora_state import AuroraStateStore
+from corund.workspace_manager import WorkspaceManager
+from corund.workspace_registry import WorkspaceRegistry
def main() -> None:
diff --git a/scripts/avatar_behavior_test.py b/scripts/avatar_behavior_test.py
index 5d87892..c716105 100644
--- a/scripts/avatar_behavior_test.py
+++ b/scripts/avatar_behavior_test.py
@@ -5,8 +5,8 @@
sys.path.append(str(Path(__file__).resolve().parents[1]))
-from core.avatar_behavior import AvatarBehaviorEngine
-from core.runtime_state import RuntimeState
+from corund.avatar_behavior import AvatarBehaviorEngine
+from corund.runtime_state import RuntimeState
def show(label: str, response) -> None:
diff --git a/scripts/avatar_visual_state_test.py b/scripts/avatar_visual_state_test.py
index a7ba4d5..c078482 100644
--- a/scripts/avatar_visual_state_test.py
+++ b/scripts/avatar_visual_state_test.py
@@ -5,8 +5,8 @@
sys.path.append(str(Path(__file__).resolve().parents[1]))
-from core.avatar_visuals import compute_visual_state
-from core.runtime_state import RuntimeState
+from corund.avatar_visuals import compute_visual_state
+from corund.runtime_state import RuntimeState
def show(label: str, state: RuntimeState) -> None:
diff --git a/scripts/os_integration_test.py b/scripts/os_integration_test.py
index df14008..76e1eaf 100644
--- a/scripts/os_integration_test.py
+++ b/scripts/os_integration_test.py
@@ -5,9 +5,9 @@
sys.path.append(str(Path(__file__).resolve().parents[1]))
-from core.event_bus import event_bus
-from core.os_adapter import OSAdapter
-from core.os_pipeline import OSPipeline, OSOverrides
+from corund.event_bus import event_bus
+from corund.os_adapter import OSAdapter
+from corund.os_pipeline import OSPipeline, OSOverrides
def main() -> None:
diff --git a/sensors/hid_sensor.py b/sensors/hid_sensor.py
index 59adf26..a7e1e6b 100644
--- a/sensors/hid_sensor.py
+++ b/sensors/hid_sensor.py
@@ -5,7 +5,7 @@
except Exception:
np = None # optional on Termux/CI
from pynput import mouse, keyboard
-from core.signals import signals
+from corund.signals import signals
class HIDSensor:
diff --git a/sensors/keyboard_sensor.py b/sensors/keyboard_sensor.py
index 2ae0984..4d61cab 100644
--- a/sensors/keyboard_sensor.py
+++ b/sensors/keyboard_sensor.py
@@ -1,5 +1,5 @@
from pynput import keyboard
-from core.signals import signals
+from corund.signals import signals
import time
import threading
diff --git a/sensors/mouse_sensor.py b/sensors/mouse_sensor.py
index f89e00e..6ec4087 100644
--- a/sensors/mouse_sensor.py
+++ b/sensors/mouse_sensor.py
@@ -1,5 +1,5 @@
from pynput import mouse
-from core.signals import signals
+from corund.signals import signals
import time
import math
try:
diff --git a/test_voice_isolated.py b/test_voice_isolated.py
index 6edb519..e1308a9 100644
--- a/test_voice_isolated.py
+++ b/test_voice_isolated.py
@@ -6,7 +6,7 @@
try:
print("1. Importing Voice Engine...")
- from core.voice_engine import VoiceEngine
+ from corund.voice_engine import VoiceEngine
print("2. Initializing Voice Engine...")
ve = VoiceEngine()
diff --git a/tests/test_ei_hierarchy.py b/tests/test_ei_hierarchy.py
index 7a39b2f..f3fa80c 100644
--- a/tests/test_ei_hierarchy.py
+++ b/tests/test_ei_hierarchy.py
@@ -1,6 +1,6 @@
import pytest
import time
-from core.ei_engine import EIEngine
+from corund.ei_engine import EIEngine
def test_hierarchical_ei_initial():
diff --git a/tests/test_ei_logic.py b/tests/test_ei_logic.py
index 175c790..de55411 100644
--- a/tests/test_ei_logic.py
+++ b/tests/test_ei_logic.py
@@ -1,5 +1,5 @@
import pytest
-from core.ei_engine import EIEngine
+from corund.ei_engine import EIEngine
def test_ei_engine_initial_state():
diff --git a/tests/test_mapping_constraints.py b/tests/test_mapping_constraints.py
index 861bb8a..148d843 100644
--- a/tests/test_mapping_constraints.py
+++ b/tests/test_mapping_constraints.py
@@ -1,5 +1,5 @@
import pytest
-from core.emotion_mapper import mapper
+from corund.emotion_mapper import mapper
def test_mapper_easing():
diff --git a/tests/test_memory.py b/tests/test_memory.py
index 010fca0..aee73d1 100644
--- a/tests/test_memory.py
+++ b/tests/test_memory.py
@@ -5,8 +5,8 @@
except Exception:
np = None # optional on Termux/CI
import pytest
-from core.database import Database
-from core.memory_store import MemoryStore
+from corund.database import Database
+from corund.memory_store import MemoryStore
DB_TEST_PATH = "data/test_etherea.db"
diff --git a/tests/test_sensors.py b/tests/test_sensors.py
index ade2d70..9937956 100644
--- a/tests/test_sensors.py
+++ b/tests/test_sensors.py
@@ -1,7 +1,7 @@
import time
import pytest
from sensors.hid_sensor import HIDSensor
-from core.signals import signals
+from corund.signals import signals
def test_hid_sensor_initialization():
diff --git a/tests/test_tutorial.py b/tests/test_tutorial.py
index a7a805c..aeed322 100644
--- a/tests/test_tutorial.py
+++ b/tests/test_tutorial.py
@@ -2,8 +2,8 @@
import os
sys.path.append(os.getcwd())
-from core.tutorial_flow import TutorialFlow
-from core.avatar_engine import AvatarEngine
+from corund.tutorial_flow import TutorialFlow
+from corund.avatar_engine import AvatarEngine
import unittest.mock
# Mock AvatarEngine to avoid API key error
diff --git a/verify_soul.py b/verify_soul.py
index 15b1c10..5d4b1c9 100644
--- a/verify_soul.py
+++ b/verify_soul.py
@@ -1,5 +1,5 @@
-from core.ui.avatar_heroine_widget import AvatarHeroineWidget, EI
-from core.state import AppState
+from corund.ui.avatar_heroine_widget import AvatarHeroineWidget, EI
+from corund.state import AppState
from PySide6.QtWidgets import QApplication
import sys
import time
diff --git a/verify_spine.py b/verify_spine.py
index f1200b6..484f8e0 100644
--- a/verify_spine.py
+++ b/verify_spine.py
@@ -1,9 +1,9 @@
import os
import json
-from core.tools.router import ToolRouter
-from core.agent import IntelligentAgent
-from core.voice import VoiceEngine
-from core.state import AppState
+from corund.tools.router import ToolRouter
+from corund.agent import IntelligentAgent
+from corund.voice import VoiceEngine
+from corund.state import AppState
def run_verification():
print("--- ETHEREA AGENTIC VERIFICATION ---")