diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 0308c239..9ebace40 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -5,12 +5,20 @@ // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile "image": "mcr.microsoft.com/devcontainers/python:0-3.11", "features": { + "ghcr.io/devcontainers/features/docker-in-docker:2": { + "version": "latest" + }, "ghcr.io/devcontainers/features/python:1": {}, "ghcr.io/devcontainers-extra/features/black:2": {}, - "ghcr.io/devcontainers/features/azure-cli:1": {}, + "ghcr.io/devcontainers/features/azure-cli:1": { + "installBicep": true, + "installUsingPython": true, + "version": "2.72.0", + "bicepVersion": "latest" + }, "ghcr.io/devcontainers/features/terraform:1": {}, - "ghcr.io/devcontainers/features/powershell:1": {} - "ghcr.io/devcontainers-extra/features/black:2": {} + "ghcr.io/devcontainers/features/powershell:1": {}, + "ghcr.io/azure/azure-dev/azd:latest": {} }, // Features to add to the dev container. More info: https://containers.dev/features. // "features": {}, diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index 7062c29a..3e6be37d 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -1,3 +1,7 @@ +--- +applyTo: '**' +--- + # REPO SPECIFIC INSTRUCTIONS --- diff --git a/.github/instructions/javascript-lang.instructions.md b/.github/instructions/javascript-lang.instructions.md new file mode 100644 index 00000000..43d67240 --- /dev/null +++ b/.github/instructions/javascript-lang.instructions.md @@ -0,0 +1,21 @@ +--- +applyTo: '**/*.js' +--- + +# JavaScript Language Guide + +- Files should start with a comment of the file name. Ex: `// functions_personal_agents.js` + +- Imports should be grouped at the top of the document after the module docstring, unless otherwise indicated by the user or for performance reasons in which case the import should be as close as possible to the usage with a documented note as to why the import is not at the top of the file. + +- Use 4 spaces per indentation level. No tabs. + +- Code and definitions should occur after the imports block. + +- Use camelCase for variable and function names. Ex: `myVariable`, `getUserData()` + +- Use PascalCase for class names. Ex: `MyClass` + +- Do not use display:none. Instead add and remove the d-none class when hiding or showing elements. + +- Prefer inline html notifications or toast messages using Bootstrap alert classes over browser alert() calls. \ No newline at end of file diff --git a/.github/instructions/location_of_feature_documentation.instructions.md b/.github/instructions/location_of_feature_documentation.instructions.md index ced7cb42..57a5c05f 100644 --- a/.github/instructions/location_of_feature_documentation.instructions.md +++ b/.github/instructions/location_of_feature_documentation.instructions.md @@ -7,7 +7,7 @@ applyTo: '**' ## Documentation Directory All new feature documentation should be placed in: ``` -..\docs\features\ +..\docs\explanation\features\ ``` ## File Naming Convention diff --git a/.github/instructions/location_of_fix_documentation.instructions.md b/.github/instructions/location_of_fix_documentation.instructions.md index f3eaee3a..311db387 100644 --- a/.github/instructions/location_of_fix_documentation.instructions.md +++ b/.github/instructions/location_of_fix_documentation.instructions.md @@ -7,7 +7,7 @@ applyTo: '**' ## Documentation Directory All bug fixes and issue resolution documentation should be placed in: ``` -..\docs\fixes\ +..\docs\explanation\fixes\ ``` ## File Naming Convention diff --git a/.github/instructions/python-lang.instructions.md b/.github/instructions/python-lang.instructions.md new file mode 100644 index 00000000..eff15aef --- /dev/null +++ b/.github/instructions/python-lang.instructions.md @@ -0,0 +1,15 @@ +--- +applyTo: '**/*.py' +--- + +# Python Language Guide + +- Files should start with a comment of the file name. Ex: `# functions_personal_agents.py` + +- Imports should be grouped at the top of the document after the module docstring, unless otherwise indicated by the user or for performance reasons in which case the import should be as close as possible to the usage with a documented note as to why the import is not at the top of the file. + +- Use 4 spaces per indentation level. No tabs. + +- Code and definitions should occur after the imports block. + +- Prefer log_event from functions_appinsights.py for logging activites. \ No newline at end of file diff --git a/.github/instructions/santize_settings_for_frontend_routes.instructions.md b/.github/instructions/santize_settings_for_frontend_routes.instructions.md new file mode 100644 index 00000000..bb10fcf0 --- /dev/null +++ b/.github/instructions/santize_settings_for_frontend_routes.instructions.md @@ -0,0 +1,143 @@ +--- +applyTo: '**' +--- + +# Security: Sanitize Settings for Frontend Routes + +## Critical Security Requirement + +**NEVER send raw settings or configuration data directly to the frontend without sanitization.** + +## Rule: Always Sanitize Settings Before Sending to Browser + +When building or working with Python frontend routes (Flask routes that render templates or return JSON to the browser), **ALL settings data MUST be sanitized** before being sent to prevent exposure of: +- API keys +- Connection strings +- Secrets and passwords +- Internal configuration details +- Database credentials +- Any other sensitive information + +## Required Pattern + +### Exception: Admin Routes should NEVER be sanitized as it breaks many admin features. + +### ✅ CORRECT - Sanitize Before Sending +```python +from functions_settings import get_settings, sanitize_settings_for_user + +@app.route('/some-page') +def some_page(): + # Get raw settings + settings = get_settings() + + # Sanitize before sending to frontend + public_settings = sanitize_settings_for_user(settings) + + # Use sanitized settings in template + return render_template('some_page.html', + app_settings=public_settings, + settings=public_settings) +``` + +### ❌ INCORRECT - Never Send Raw Settings +```python +# DANGEROUS - Exposes secrets to browser! +@app.route('/some-page') +def some_page(): + settings = get_settings() + return render_template('some_page.html', + app_settings=settings) # ❌ NEVER DO THIS +``` + +## When This Rule Applies + +Apply this rule for: +- **Any route** that renders an HTML template (`render_template()`) +- **Any API endpoint** that returns JSON data containing settings (`jsonify()`) +- **Any frontend route** that passes configuration data to JavaScript +- **Dashboard/admin pages** that display configuration information +- **Settings/configuration pages** where users view system settings + +## Implementation Checklist + +When creating or modifying frontend routes: +1. ✅ Import `sanitize_settings_for_user` from `functions_settings` +2. ✅ Call `get_settings()` to retrieve raw settings +3. ✅ Call `sanitize_settings_for_user(settings)` to create safe version +4. ✅ Pass only the sanitized version to `render_template()` or `jsonify()` +5. ✅ Verify no raw settings objects bypass sanitization + +## Examples from Codebase + +### Control Center Route +```python +from functions_settings import get_settings, sanitize_settings_for_user + +@app.route('/admin/control-center', methods=['GET']) +@login_required +@admin_required +def control_center(): + # Get settings for configuration data + settings = get_settings() + public_settings = sanitize_settings_for_user(settings) + + # Get statistics + stats = get_control_center_statistics() + + # Send only sanitized settings to frontend + return render_template('control_center.html', + app_settings=public_settings, + settings=public_settings, + statistics=stats) +``` + +### API Endpoint Pattern +```python +@app.route('/api/get-config', methods=['GET']) +@login_required +def get_config(): + settings = get_settings() + public_settings = sanitize_settings_for_user(settings) + + return jsonify({ + 'success': True, + 'config': public_settings + }) +``` + +## What Gets Sanitized + +The `sanitize_settings_for_user()` function removes or masks: +- Azure OpenAI API keys +- Cosmos DB connection strings +- Azure Search admin keys +- Document Intelligence keys +- Authentication secrets +- Internal endpoint URLs +- Database credentials +- Any field containing 'key', 'secret', 'password', 'connection', etc. + +## Security Impact + +**Failure to sanitize settings can result in:** +- 🚨 Exposure of API keys in browser DevTools/Network tab +- 🚨 Secrets visible in HTML source code +- 🚨 Credentials leaked in JavaScript variables +- 🚨 Potential unauthorized access to Azure resources +- 🚨 Security vulnerabilities and data breaches + +## Code Review Checklist + +When reviewing code, verify: +- [ ] No `get_settings()` result is sent directly to frontend +- [ ] `sanitize_settings_for_user()` is called before rendering +- [ ] Template variables receiving settings use sanitized version +- [ ] API responses containing config use sanitized data +- [ ] No raw config objects in `render_template()` or `jsonify()` calls + +## Related Functions + +- `get_settings()` - Returns raw settings (DO NOT send to frontend) +- `sanitize_settings_for_user(settings)` - Returns safe settings (OK to send to frontend) +- Location: `functions_settings.py` \ No newline at end of file diff --git a/.github/instructions/update_release_notes.instructions.md b/.github/instructions/update_release_notes.instructions.md new file mode 100644 index 00000000..353cea48 --- /dev/null +++ b/.github/instructions/update_release_notes.instructions.md @@ -0,0 +1,90 @@ +--- +applyTo: '**' +--- + +# Release Notes Update Instructions + +## When to Update Release Notes + +After completing a code change (bug fix, new feature, enhancement, or breaking change), always ask the user: + +**"Would you like me to update the release notes in `docs/explanation/release_notes.md`?"** + +## If the User Confirms Yes + +Update the release notes file following these guidelines: + +### 1. Location +Release notes are located at: `docs/explanation/release_notes.md` + +### 2. Version Placement +- Add new entries under the **current version** from `config.py` +- If the version has changed, create a new version section at the TOP of the file +- Format: `### **(vX.XXX.XXX)**` + +### 3. Entry Categories + +Organize entries under the appropriate category: + +#### New Features +```markdown +#### New Features + +* **Feature Name** + * Brief description of what the feature does and its benefits. + * Additional details about functionality or configuration. + * (Ref: relevant files, components, or concepts) +``` + +#### Bug Fixes +```markdown +#### Bug Fixes + +* **Fix Name** + * Description of what was broken and how it was fixed. + * Impact or affected areas. + * (Ref: relevant files, functions, or components) +``` + +#### User Interface Enhancements +```markdown +#### User Interface Enhancements + +* **Enhancement Name** + * Description of UI/UX improvements. + * (Ref: relevant templates, CSS, or JavaScript files) +``` + +#### Breaking Changes +```markdown +#### Breaking Changes + +* **Change Name** + * Description of what changed and why. + * **Migration**: Steps users need to take (if any). +``` + +### 4. Entry Format Guidelines + +- **Bold the title** of each entry +- Use bullet points for details +- Include a `(Ref: ...)` line with relevant file names, functions, or concepts +- Keep descriptions concise but informative +- Focus on user-facing impact, not implementation details + +### 5. Example Entry + +```markdown +* **Custom Logo Display Fix** + * Fixed issue where custom logos uploaded via Admin Settings would only display on the admin page but not on other pages (chat, sidebar, landing page). + * Root cause was overly aggressive sanitization removing logo URLs from public settings. + * (Ref: logo display, settings sanitization, template conditionals) +``` + +### 6. Checklist Before Updating + +- [ ] Confirm the current version in `config.py` +- [ ] Determine the correct category (New Feature, Bug Fix, Enhancement, Breaking Change) +- [ ] Write a clear, user-focused description +- [ ] Include relevant file/component references +- [ ] Place entry under the correct version section diff --git a/.github/workflows/docker_image_publish.yml b/.github/workflows/docker_image_publish.yml index f9324f20..ef8732c3 100644 --- a/.github/workflows/docker_image_publish.yml +++ b/.github/workflows/docker_image_publish.yml @@ -1,4 +1,3 @@ - name: SimpleChat Docker Image Publish on: @@ -8,9 +7,7 @@ on: workflow_dispatch: jobs: - build: - runs-on: ubuntu-latest steps: @@ -18,26 +15,25 @@ jobs: uses: Azure/docker-login@v2 with: # Container registry username - username: ${{ secrets.ACR_USERNAME }} + username: ${{ secrets.MAIN_ACR_USERNAME }} # Container registry password - password: ${{ secrets.ACR_PASSWORD }} + password: ${{ secrets.MAIN_ACR_PASSWORD }} # Container registry server url - login-server: ${{ secrets.ACR_LOGIN_SERVER }} + login-server: ${{ secrets.MAIN_ACR_LOGIN_SERVER }} + - name: Normalize branch name for tag + run: | + REF="${GITHUB_REF_NAME}" + SAFE=$(echo "$REF" \ + | tr '[:upper:]' '[:lower:]' \ + | sed 's#[^a-z0-9._-]#-#g' \ + | sed 's/^-*//;s/-*$//' \ + | cut -c1-128) + echo "BRANCH_TAG=$SAFE" >> "$GITHUB_ENV" - uses: actions/checkout@v3 - - name: Set up Node.js - uses: actions/setup-node@v4 - with: - node-version: 20 - - name: Install Ajv - run: npm install ajv@^8.0.0 ajv-cli@^5.0.0 - - name: Install Ajv - run: npm install ajv@^8.0.0 ajv-formats - - name: Generate standalone JSON schema validators - run: node scripts/generate-validators.mjs - name: Build the Docker image run: - docker build . --file application/single_app/Dockerfile --tag ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat:$(date +'%Y-%m-%d')_$GITHUB_RUN_NUMBER; - docker tag ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat:$(date +'%Y-%m-%d')_$GITHUB_RUN_NUMBER ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat:latest; - docker push ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat:$(date +'%Y-%m-%d')_$GITHUB_RUN_NUMBER; - docker push ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat:latest; + docker build . --file application/single_app/Dockerfile --tag ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat:$(date +'%Y-%m-%d')_${BRANCH_TAG}_$GITHUB_RUN_NUMBER; + docker tag ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat:$(date +'%Y-%m-%d')_${BRANCH_TAG}_$GITHUB_RUN_NUMBER ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat:latest; + docker push ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat:$(date +'%Y-%m-%d')_${BRANCH_TAG}_$GITHUB_RUN_NUMBER; + docker push ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat:latest; \ No newline at end of file diff --git a/.github/workflows/docker_image_publish_dev.yml b/.github/workflows/docker_image_publish_dev.yml index 9882527a..a78e0c71 100644 --- a/.github/workflows/docker_image_publish_dev.yml +++ b/.github/workflows/docker_image_publish_dev.yml @@ -1,17 +1,16 @@ -name: SimpleChat Docker Image Publish (dev branch) +name: SimpleChat Docker Image Publish (development/staging branch) on: push: branches: - Development + - Staging workflow_dispatch: jobs: - - build: - + build-tomain: runs-on: ubuntu-latest steps: @@ -25,18 +24,53 @@ jobs: # Container registry server url login-server: ${{ secrets.ACR_LOGIN_SERVER }} + - name: Normalize branch name for tag + run: | + REF="${GITHUB_REF_NAME}" + SAFE=$(echo "$REF" \ + | tr '[:upper:]' '[:lower:]' \ + | sed 's#[^a-z0-9._-]#-#g' \ + | sed 's/^-*//;s/-*$//' \ + | cut -c1-128) + echo "BRANCH_TAG=$SAFE" >> "$GITHUB_ENV" + - uses: actions/checkout@v3 - - name: Set up Node.js - uses: actions/setup-node@v4 - with: - node-version: 20 - - name: Install Ajv - run: npm install ajv@^8.0.0 ajv-formats - - name: Generate standalone JSON schema validators - run: node scripts/generate-validators.mjs - name: Build the Docker image run: - docker build . --file application/single_app/Dockerfile --tag ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat-dev:$(date +'%Y-%m-%d')_$GITHUB_RUN_NUMBER; - docker tag ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat-dev:$(date +'%Y-%m-%d')_$GITHUB_RUN_NUMBER ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat-dev:latest; - docker push ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat-dev:$(date +'%Y-%m-%d')_$GITHUB_RUN_NUMBER; + docker build . --file application/single_app/Dockerfile --tag ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat-dev:$(date +'%Y-%m-%d')_${BRANCH_TAG}_$GITHUB_RUN_NUMBER; + docker tag ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat-dev:$(date +'%Y-%m-%d')_${BRANCH_TAG}_$GITHUB_RUN_NUMBER ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat-dev:latest; + docker push ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat-dev:$(date +'%Y-%m-%d')_${BRANCH_TAG}_$GITHUB_RUN_NUMBER; docker push ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat-dev:latest; + + build-nadoyle: + runs-on: ubuntu-latest + + steps: + - name: Azure Container Registry Login + uses: Azure/docker-login@v2 + with: + # Container registry username + username: ${{ secrets.ACR_USERNAME_NADOYLE }} + # Container registry password + password: ${{ secrets.ACR_PASSWORD_NADOYLE }} + # Container registry server url + login-server: ${{ secrets.ACR_LOGIN_SERVER_NADOYLE }} + + - name: Normalize branch name for tag + run: | + REF="${GITHUB_REF_NAME}" + SAFE=$(echo "$REF" \ + | tr '[:upper:]' '[:lower:]' \ + | sed 's#[^a-z0-9._-]#-#g' \ + | sed 's/^-*//;s/-*$//' \ + | cut -c1-128) + echo "BRANCH_TAG=$SAFE" >> "$GITHUB_ENV" + + - uses: actions/checkout@v3 + - name: Build the Docker image + run: + docker build . --file application/single_app/Dockerfile --tag ${{ secrets.ACR_LOGIN_SERVER_NADOYLE }}/simple-chat-dev:$(date +'%Y-%m-%d')_${BRANCH_TAG}_$GITHUB_RUN_NUMBER; + docker tag ${{ secrets.ACR_LOGIN_SERVER_NADOYLE }}/simple-chat-dev:$(date +'%Y-%m-%d')_${BRANCH_TAG}_$GITHUB_RUN_NUMBER ${{ secrets.ACR_LOGIN_SERVER_NADOYLE }}/simple-chat-dev:latest; + docker push ${{ secrets.ACR_LOGIN_SERVER_NADOYLE }}/simple-chat-dev:$(date +'%Y-%m-%d')_${BRANCH_TAG}_$GITHUB_RUN_NUMBER; + docker push ${{ secrets.ACR_LOGIN_SERVER_NADOYLE }}/simple-chat-dev:latest; + diff --git a/.github/workflows/docker_image_publish_nadoyle.yml b/.github/workflows/docker_image_publish_nadoyle.yml index c39f99e4..0dd56e09 100644 --- a/.github/workflows/docker_image_publish_nadoyle.yml +++ b/.github/workflows/docker_image_publish_nadoyle.yml @@ -5,6 +5,7 @@ on: push: branches: - nadoyle + - feature/aifoundryagents workflow_dispatch: @@ -19,24 +20,26 @@ jobs: uses: Azure/docker-login@v2 with: # Container registry username - username: ${{ secrets.ACR_USERNAME }} + username: ${{ secrets.ACR_USERNAME_NADOYLE }} # Container registry password - password: ${{ secrets.ACR_PASSWORD }} + password: ${{ secrets.ACR_PASSWORD_NADOYLE }} # Container registry server url - login-server: ${{ secrets.ACR_LOGIN_SERVER }} - + login-server: ${{ secrets.ACR_LOGIN_SERVER_NADOYLE }} + + - name: Normalize branch name for tag + run: | + REF="${GITHUB_REF_NAME}" + SAFE=$(echo "$REF" \ + | tr '[:upper:]' '[:lower:]' \ + | sed 's#[^a-z0-9._-]#-#g' \ + | sed 's/^-*//;s/-*$//' \ + | cut -c1-128) + echo "BRANCH_TAG=$SAFE" >> "$GITHUB_ENV" + - uses: actions/checkout@v3 - - name: Set up Node.js - uses: actions/setup-node@v4 - with: - node-version: 20 - - name: Install Ajv - run: npm install ajv@^8.0.0 ajv-formats - - name: Generate standalone JSON schema validators - run: node scripts/generate-validators.mjs - name: Build the Docker image run: - docker build . --file application/single_app/Dockerfile --tag ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat-dev:$(date +'%Y-%m-%d')_$GITHUB_RUN_NUMBER; - docker tag ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat-dev:$(date +'%Y-%m-%d')_$GITHUB_RUN_NUMBER ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat-dev:latest; - docker push ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat-dev:$(date +'%Y-%m-%d')_$GITHUB_RUN_NUMBER; - docker push ${{ secrets.ACR_LOGIN_SERVER }}/simple-chat-dev:latest; + docker build . --file application/single_app/Dockerfile --tag ${{ secrets.ACR_LOGIN_SERVER_NADOYLE }}/simple-chat-dev:$(date +'%Y-%m-%d')_${BRANCH_TAG}_$GITHUB_RUN_NUMBER; + docker tag ${{ secrets.ACR_LOGIN_SERVER_NADOYLE }}/simple-chat-dev:$(date +'%Y-%m-%d')_${BRANCH_TAG}_$GITHUB_RUN_NUMBER ${{ secrets.ACR_LOGIN_SERVER_NADOYLE }}/simple-chat-dev:latest; + docker push ${{ secrets.ACR_LOGIN_SERVER_NADOYLE }}/simple-chat-dev:$(date +'%Y-%m-%d')_${BRANCH_TAG}_$GITHUB_RUN_NUMBER; + docker push ${{ secrets.ACR_LOGIN_SERVER_NADOYLE }}/simple-chat-dev:latest; diff --git a/.github/workflows/enforce-branch-flow.yml b/.github/workflows/enforce-branch-flow.yml new file mode 100644 index 00000000..1f81da24 --- /dev/null +++ b/.github/workflows/enforce-branch-flow.yml @@ -0,0 +1,31 @@ +name: Enforce Branch Protection Flow (Development → Staging → Main) + +on: + pull_request: + types: + - opened + - reopened + - synchronize + +jobs: + enforce-branch-flow: + runs-on: ubuntu-latest + steps: + - name: Fail if PR→staging doesn't come from development + if: > + github.event.pull_request.base.ref == 'staging' && + github.event.pull_request.head.ref != 'development' + run: | + echo "::error ::Pull requests into 'staging' must originate from branch 'development'." + exit 1 + + - name: Fail if PR→main doesn't come from staging + if: > + github.event.pull_request.base.ref == 'main' && + github.event.pull_request.head.ref != 'staging' + run: | + echo "::error ::Pull requests into 'main' must originate from branch 'staging'." + exit 1 + + - name: Branch flow validated + run: echo "✅ Branch flow validation passed." diff --git a/.github/workflows/enforce-dev-to-main.yml b/.github/workflows/enforce-dev-to-main.yml deleted file mode 100644 index dda5da8d..00000000 --- a/.github/workflows/enforce-dev-to-main.yml +++ /dev/null @@ -1,20 +0,0 @@ -name: Enforce PRs to main only from development - -on: - pull_request: - types: - - opened - - reopened - - synchronize - -jobs: - require-dev-base: - runs-on: ubuntu-latest - steps: - - name: Fail if PR→main doesn’t come from development - if: > - github.event.pull_request.base.ref == 'main' && - github.event.pull_request.head.ref != 'development' - run: | - echo "::error ::Pull requests into 'main' must originate from branch 'development'." - exit 1 diff --git a/.github/workflows/python-syntax-check.yml b/.github/workflows/python-syntax-check.yml new file mode 100644 index 00000000..34527de9 --- /dev/null +++ b/.github/workflows/python-syntax-check.yml @@ -0,0 +1,51 @@ +name: Python Syntax Check + +on: + pull_request: + branches: + - main + - Development + paths: + - 'application/single_app/**.py' + - '.github/workflows/python-syntax-check.yml' + +jobs: + syntax-check: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Run Python compilation check + run: | + cd application/single_app + echo "🔍 Running Python compilation checks on all .py files..." + failed_files=() + + for file in *.py; do + echo "" + echo "=== Compiling $file ===" + if python -m py_compile "$file" 2>&1; then + echo "✓ $file - OK" + else + echo "✗ $file - FAILED" + failed_files+=("$file") + fi + done + + echo "" + echo "================================" + if [ ${#failed_files[@]} -eq 0 ]; then + echo "✅ All Python files compiled successfully!" + exit 0 + else + echo "❌ ${#failed_files[@]} file(s) failed compilation:" + printf ' - %s\n' "${failed_files[@]}" + exit 1 + fi diff --git a/.github/workflows/release-notes-check.yml b/.github/workflows/release-notes-check.yml new file mode 100644 index 00000000..2eb7cee1 --- /dev/null +++ b/.github/workflows/release-notes-check.yml @@ -0,0 +1,205 @@ +name: Release Notes Check + +on: + pull_request: + branches: + - Development + types: + - opened + - reopened + - synchronize + - edited + +jobs: + check-release-notes: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Get changed files + id: changed-files + uses: tj-actions/changed-files@v44 + with: + files_yaml: | + code: + - 'application/single_app/**/*.py' + - 'application/single_app/**/*.js' + - 'application/single_app/**/*.html' + - 'application/single_app/**/*.css' + release_notes: + - 'docs/explanation/release_notes.md' + config: + - 'application/single_app/config.py' + + - name: Check for feature/fix keywords in PR + id: check-keywords + env: + PR_TITLE: ${{ github.event.pull_request.title }} + PR_BODY: ${{ github.event.pull_request.body }} + run: | + echo "🔍 Analyzing PR title and body for feature/fix indicators..." + + # Convert to lowercase for case-insensitive matching + title_lower=$(echo "$PR_TITLE" | tr '[:upper:]' '[:lower:]') + body_lower=$(echo "$PR_BODY" | tr '[:upper:]' '[:lower:]') + + # Check for feature indicators + if echo "$title_lower $body_lower" | grep -qE "(feat|feature|add|new|implement|introduce|enhancement|improve)"; then + echo "has_feature=true" >> $GITHUB_OUTPUT + echo "📦 Feature-related keywords detected" + else + echo "has_feature=false" >> $GITHUB_OUTPUT + fi + + # Check for fix indicators + if echo "$title_lower $body_lower" | grep -qE "(fix|bug|patch|resolve|correct|repair|hotfix|issue)"; then + echo "has_fix=true" >> $GITHUB_OUTPUT + echo "🐛 Fix-related keywords detected" + else + echo "has_fix=false" >> $GITHUB_OUTPUT + fi + + - name: Determine if release notes update is required + id: require-notes + env: + CODE_CHANGED: ${{ steps.changed-files.outputs.code_any_changed }} + CONFIG_CHANGED: ${{ steps.changed-files.outputs.config_any_changed }} + RELEASE_NOTES_CHANGED: ${{ steps.changed-files.outputs.release_notes_any_changed }} + HAS_FEATURE: ${{ steps.check-keywords.outputs.has_feature }} + HAS_FIX: ${{ steps.check-keywords.outputs.has_fix }} + run: | + echo "" + echo "================================" + echo "📋 PR Analysis Summary" + echo "================================" + echo "Code files changed: $CODE_CHANGED" + echo "Config changed: $CONFIG_CHANGED" + echo "Release notes updated: $RELEASE_NOTES_CHANGED" + echo "Feature keywords found: $HAS_FEATURE" + echo "Fix keywords found: $HAS_FIX" + echo "================================" + echo "" + + # Determine if this PR likely needs release notes + needs_notes="false" + reason="" + + if [[ "$HAS_FEATURE" == "true" ]]; then + needs_notes="true" + reason="Feature-related keywords detected in PR title/body" + elif [[ "$HAS_FIX" == "true" ]]; then + needs_notes="true" + reason="Fix-related keywords detected in PR title/body" + elif [[ "$CODE_CHANGED" == "true" && "$CONFIG_CHANGED" == "true" ]]; then + needs_notes="true" + reason="Both code and config.py were modified" + fi + + echo "needs_notes=$needs_notes" >> $GITHUB_OUTPUT + echo "reason=$reason" >> $GITHUB_OUTPUT + + - name: Validate release notes update + env: + CODE_CHANGED: ${{ steps.changed-files.outputs.code_any_changed }} + RELEASE_NOTES_CHANGED: ${{ steps.changed-files.outputs.release_notes_any_changed }} + NEEDS_NOTES: ${{ steps.require-notes.outputs.needs_notes }} + REASON: ${{ steps.require-notes.outputs.reason }} + CODE_FILES: ${{ steps.changed-files.outputs.code_all_changed_files }} + run: | + echo "" + + if [[ "$NEEDS_NOTES" == "true" && "$RELEASE_NOTES_CHANGED" != "true" ]]; then + echo "⚠️ ==============================================" + echo "⚠️ RELEASE NOTES UPDATE RECOMMENDED" + echo "⚠️ ==============================================" + echo "" + echo "📝 Reason: $REASON" + echo "" + echo "This PR appears to contain changes that should be documented" + echo "in the release notes (docs/explanation/release_notes.md)." + echo "" + echo "📁 Code files changed:" + echo "$CODE_FILES" | tr ' ' '\n' | sed 's/^/ - /' + echo "" + echo "💡 Please consider adding an entry to release_notes.md describing:" + echo " • New features added" + echo " • Bug fixes implemented" + echo " • Breaking changes (if any)" + echo " • Files modified" + echo "" + echo "📖 Follow the existing format in release_notes.md" + echo "" + # Exit with warning (non-zero) to flag the PR but not block it + # Change 'exit 0' to 'exit 1' below to make this a hard requirement + exit 0 + elif [[ "$RELEASE_NOTES_CHANGED" == "true" ]]; then + echo "✅ Release notes have been updated - great job!" + elif [[ "$CODE_CHANGED" != "true" ]]; then + echo "ℹ️ No significant code changes detected - release notes update not required." + else + echo "ℹ️ Changes appear to be minor - release notes update optional." + fi + + echo "" + echo "✅ Release notes check completed successfully." + + - name: Post PR comment (when notes needed but missing) + if: steps.require-notes.outputs.needs_notes == 'true' && steps.changed-files.outputs.release_notes_any_changed != 'true' + uses: actions/github-script@v7 + with: + script: | + const reason = '${{ steps.require-notes.outputs.reason }}'; + + // Check if we already commented + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number + }); + + const botComment = comments.find(comment => + comment.user.type === 'Bot' && + comment.body.includes('📋 Release Notes Reminder') + ); + + if (!botComment) { + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + body: `## 📋 Release Notes Reminder + + This PR appears to contain changes that should be documented in the release notes. + + **Reason:** ${reason} + + ### 📝 Please consider updating: + \`docs/explanation/release_notes.md\` + + ### Template for new features: + \`\`\`markdown + * **Feature Name** + * Brief description of the feature. + * **Key Details**: Important implementation notes. + * **Files Modified**: \`file1.py\`, \`file2.js\`. + * (Ref: related components, patterns) + \`\`\` + + ### Template for bug fixes: + \`\`\`markdown + * **Bug Fix Title** + * Description of what was fixed. + * **Root Cause**: What caused the issue. + * **Solution**: How it was resolved. + * **Files Modified**: \`file.py\`. + * (Ref: related issue numbers, components) + \`\`\` + + --- + *This is an automated reminder. If this PR doesn't require release notes (e.g., internal refactoring, documentation-only changes), you can ignore this message.*` + }); + } diff --git a/.gitignore b/.gitignore index 98ba158b..8a9839df 100644 --- a/.gitignore +++ b/.gitignore @@ -37,4 +37,8 @@ flask_session application/single_app/static/.DS_Store application/external_apps/bulkloader/map.csv -flask_session \ No newline at end of file +flask_session +**/abd_proto.py +**/my_chart.png +**/sample_pie.csv +**/sample_stacked_column.csv diff --git a/README.md b/README.md index 6fb38dba..31ea020b 100644 --- a/README.md +++ b/README.md @@ -8,106 +8,147 @@ Built with modularity in mind, the application offers a suite of powerful **opti The application utilizes **Azure Cosmos DB** for storing conversations, metadata, and settings, and is secured using **Azure Active Directory (Entra ID)** for authentication and fine-grained Role-Based Access Control (RBAC) via App Roles. Designed for enterprise use, it runs reliably on **Azure App Service** and supports deployment in both **Azure Commercial** and **Azure Government** cloud environments, offering a versatile tool for knowledge discovery, content generation, and collaborative AI-powered tasks within a secure, customizable, and Azure-native framework. -## Table of Contents - -- [Features](./docs/features.md) - - [Application Features](#features) - - [Architecture Diagram](#architecture-diagram) - - [Optional Features](./docs/features.md#optional-features) -- [Release Notes](./RELEASE_NOTES.md) -- [Roadmap (as of 8/20/25)](https://github.com/microsoft/simplechat/discussions/133) -- [Application Workflow](./docs/application_workflows.md) - - [Content Safety](./docs/application_workflows.md#content-safety---workflow) - - [Add your data (RAG Ingestion)](./docs/application_workflows.md#add-your-data-rag-ingestion) -- [Demos](#demos) - - [Upload document and review metadata](#upload-document-and-review-metadata) - - [Classify document and chat with content](#classify-document-and-chat-with-content) -- [Setup Instructions](./docs/setup_instructions.md) - - [Azure CLI with Powershell](./docs/setup_instructions.md#azure-cli-with-powershell) - - [Bicep](./docs/setup_instructions.md#bicep) - - [Terraform](./docs/setup_instructions.md#hashicorp-terraform) - - [Special Cases](./docs/setup_instructions_special.md) - - [Azure Government Configuration](./docs/setup_instructions_special.md#azure-government-configuration) - - [How to use Managed Identity](./docs/setup_instructions_special.md#how-to-use-managed-identity) - - [Enterprise Networking](./docs/setup_instructions_special.md#enterprise-networking) - -- [Admin Configuration](./docs/admin_configuration.md) -- [Application Scaling](./docs/application_scaling.md) - - [Azure App Service](./docs/application_scaling.md#azure-app-service) - - [Azure Cosmos DB](./docs/application_scaling.md#azure-cosmos-db) - - [Azure AI Search](./docs/application_scaling.md#azure-ai-search) - - [Azure AI / Cognitive Services](./docs/application_scaling.md#azure-ai--cognitive-services-openai-document-intelligence-etc) -- [FAQs](./docs/faqs.md) -- [External Apps Overview](./docs/external_apps_overview.md) - - [Bulk uploader utility](./docs/external_apps_overview.md#bulk-uploader-utility) - - [Database seeder utility](./docs/external_apps_overview.md#database-seeder-utility) +## Documentation -## Features +[Simple Chat Documentation | Simple Chat Documentation](https://microsoft.github.io/simplechat/) -- **Chat with AI**: Interact with an AI model based on Azure OpenAI’s GPT models. +## Quick Deploy -- **RAG with Hybrid Search**: Upload documents and perform hybrid searches (vector + keyword), retrieving relevant information from your files to augment AI responses. +[Detailed deployment Guide](./deployers/bicep/README.md) -- **Document Management**: Upload, store, and manage multiple versions of documents—personal ("Your Workspace") or group-level ("Group Workspaces"). +### Pre-Configuration: -- **Group Management**: Create and join groups to share access to group-specific documents, enabling collaboration with Role-Based Access Control (RBAC). +The following procedure must be completed with a user that has permissions to create an application registration in the users Entra tenant. -- **Ephemeral (Single-Convo) Documents**: Upload temporary documents available only during the current chat session, without persistent storage in Azure AI Search. +#### Create the application registration: -- **Conversation Archiving (Optional)**: Retain copies of user conversations—even after deletion from the UI—in a dedicated Cosmos DB container for audit, compliance, or legal requirements. +```powershell +cd ./deployers +``` -- **Content Safety (Optional)**: Integrate Azure AI Content Safety to review every user message *before* it reaches AI models, search indexes, or image generation services. Enforce custom filters and compliance policies, with an optional `SafetyAdmin` role for viewing violations. +Define your application name and your environment: -- **Feedback System (Optional)**: Allow users to rate AI responses (thumbs up/down) and provide contextual comments on negative feedback. Includes user and admin dashboards, governed by an optional `FeedbackAdmin` role. +``` +appName = +``` -- **Bing Web Search (Optional)**: Augment AI responses with live Bing search results, providing up-to-date information. Configurable via Admin Settings. +``` +environment = +``` -- **Image Generation (Optional)**: Enable on-demand image creation using Azure OpenAI's DALL-E models, controlled via Admin Settings. +The following script will create an Entra Enterprise Application, with an App Registration named *\*-*\*-ar for the web service called *\*-*\*-app. -- **Video Extraction (Optional)**: Utilize Azure Video Indexer to transcribe speech and perform Optical Character Recognition (OCR) on video frames. Segments are timestamp-chunked for precise retrieval and enhanced citations linking back to the video timecode. +> [!TIP] +> +> The web service name may be overriden with the `-AppServceName` parameter. -- **Audio Extraction (Optional)**: Leverage Azure Speech Service to transcribe audio files into timestamped text chunks, making audio content searchable and enabling enhanced citations linked to audio timecodes. +> [!TIP] +> +> A different expiration date for the secret which defaults to 180 days with the `-SecretExpirationDays` parameter. -- **Document Classification (Optional)**: Admins define custom classification types and associated colors. Users tag uploaded documents with these labels, which flow through to AI conversations, providing lineage and insight into data sensitivity or type. +```powershell +.\Initialize-EntraApplication.ps1 -AppName "" -Environment "" -AppRolesJsonPath "./azurecli/appRegistrationRoles.json" +``` -- **Enhanced Citation (Optional)**: Store processed, chunked files in Azure Storage (organized into user- and document-scoped folders). Display interactive citations in the UI—showing page numbers or timestamps—that link directly to the source document preview. +> [!NOTE] +> +> Be sure to save this information as it will not be available after the window is closed.* -- **Metadata Extraction (Optional)**: Apply an AI model (configurable GPT model via Admin Settings) to automatically generate keywords, two-sentence summaries, and infer author/date for uploaded documents. Allows manual override for richer search context. +```======================================== +App Registration Created Successfully! +Application Name: +Client ID: +Tenant ID: +Service Principal ID: +Client Secret: +Secret Expiration: +``` -- **File Processing Logs (Optional)**: Enable verbose logging for all ingestion pipelines (workspaces and ephemeral chat uploads) to aid in debugging, monitoring, and auditing file processing steps. +In addition, the script will note additional steps that must be taken for the app registration step to be completed. -- **Redis Cache (Optional)**: Integrate Azure Cache for Redis to provide a distributed, high-performance session store. This enables true horizontal scaling and high availability by decoupling user sessions from individual app instances. +1. Grant Admin Consent for API Permissions: -- **Authentication & RBAC**: Secure access via Azure Active Directory (Entra ID) using MSAL. Supports Managed Identities for Azure service authentication, group-based controls, and custom application roles (`Admin`, `User`, `CreateGroup`, `SafetyAdmin`, `FeedbackAdmin`). + - Navigate to Azure Portal > Entra ID > App registrations + - Find app: *\* + - Go to API permissions + - Click 'Grant admin consent for [Tenant]' -- **Backend Services**: +2. Assign Users/Groups to Enterprise Application: + - Navigate to Azure Portal > Entra ID > Enterprise applications + - Find app: *\* + - Go to Users and groups + - Add user/group assignments with appropriate app roles - - **Azure Cosmos DB**: Stores conversations, document metadata, user/group information, settings, and optionally archived chats and feedback. - - **Azure AI Search**: Powers efficient hybrid search and retrieval over personal and group documents. - - **Azure AI Document Intelligence**: Extracts text, layout, and structured data from PDFs, Office files, images, and more during ingestion. - - **Azure Cache for Redis**: (Optional) Provides a distributed cache for session data, enabling seamless scaling and improved reliability. +3. Store the Client Secret Securely: + - Save the client secret in Azure Key Vault or secure credential store + - The secret value is shown above and will not be displayed again -- **Supported File Types**: +#### Configure AZD Environment - - Text: `txt`, `md`, `html`, `json` +Using the bash terminal in Visual Studio Code - * Documents: `pdf`, `docx`, `pptx`, `xlsx`, `xls`, `csv` - * Images: `jpg`, `jpeg`, `png`, `bmp`, `tiff`, `tif`, `heif` (processed via Document Intelligence OCR) - * Video: `mp4`, `mov`, `avi`, `wmv`, `mkv`, `webm` (requires Video Indexer) - * Audio: `mp3`, `wav`, `ogg`, `aac`, `flac`, `m4a` (requires Speech Service) +```powershell +cd ./deployers +``` -## Architecture-diagram +If you work with other Azure clouds, you may need to update your cloud like `azd config set cloud.name AzureUSGovernment` - more information here - [Use Azure Developer CLI in sovereign clouds | Microsoft Learn](https://learn.microsoft.com/en-us/azure/developer/azure-developer-cli/sovereign-clouds) -![Architecture](./docs/images/architecture.png) +```powershell +azd config set cloud.name AzureCloud +``` + +This will open a browser window that the user with Owner level permissions to the target subscription will need to authenticate with. + +```powershell +azd auth login +``` + +Use the same value for the \ that was used in the application registration. + +```powershell +azd env new +``` -## Demos +Select the new environment -Return to top +```powershell +azd env select +``` -### Upload document and review metadata +This step will begin the deployment process. -![Upload Document Demo](./docs/images/UploadDocumentDemo.gif) +```powershell +azd up +``` -### Classify document and chat with document +## Architecture + +![Architecture](./docs/images/architecture.png) + +## Features + +- **Chat with AI**: Interact with an AI model based on Azure OpenAI’s GPT and Thinking models. +- **RAG with Hybrid Search**: Upload documents and perform hybrid searches (vector + keyword), retrieving relevant information from your files to augment AI responses. +- **Document Management**: Upload, store, and manage multiple versions of documents—personal ("Your Workspace") or group-level ("Group Workspaces"). +- **Group Management**: Create and join groups to share access to group-specific documents, enabling collaboration with Role-Based Access Control (RBAC). +- **Ephemeral (Single-Convo) Documents**: Upload temporary documents available only during the current chat session, without persistent storage in Azure AI Search. +- **Conversation Archiving (Optional)**: Retain copies of user conversations—even after deletion from the UI—in a dedicated Cosmos DB container for audit, compliance, or legal requirements. +- **Content Safety (Optional)**: Integrate Azure AI Content Safety to review every user message *before* it reaches AI models, search indexes, or image generation services. Enforce custom filters and compliance policies, with an optional `SafetyAdmin` role for viewing violations. +- **Feedback System (Optional)**: Allow users to rate AI responses (thumbs up/down) and provide contextual comments on negative feedback. Includes user and admin dashboards, governed by an optional `FeedbackAdmin` role. +- **Bing Web Search (Optional)**: Augment AI responses with live Bing search results, providing up-to-date information. Configurable via Admin Settings. +- **Image Generation (Optional)**: Enable on-demand image creation using Azure OpenAI's DALL-E models, controlled via Admin Settings. +- **Video Extraction (Optional)**: Utilize Azure Video Indexer to transcribe speech and perform Optical Character Recognition (OCR) on video frames. Segments are timestamp-chunked for precise retrieval and enhanced citations linking back to the video timecode. +- **Audio Extraction (Optional)**: Leverage Azure Speech Service to transcribe audio files into timestamped text chunks, making audio content searchable and enabling enhanced citations linked to audio timecodes. +- **Document Classification (Optional)**: Admins define custom classification types and associated colors. Users tag uploaded documents with these labels, which flow through to AI conversations, providing lineage and insight into data sensitivity or type. +- **Enhanced Citation (Optional)**: Store processed, chunked files in Azure Storage (organized into user- and document-scoped folders). Display interactive citations in the UI—showing page numbers or timestamps—that link directly to the source document preview. +- **Metadata Extraction (Optional)**: Apply an AI model (configurable GPT model via Admin Settings) to automatically generate keywords, two-sentence summaries, and infer author/date for uploaded documents. Allows manual override for richer search context. +- **File Processing Logs (Optional)**: Enable verbose logging for all ingestion pipelines (workspaces and ephemeral chat uploads) to aid in debugging, monitoring, and auditing file processing steps. +- **Redis Cache (Optional)**: Integrate Azure Cache for Redis to provide a distributed, high-performance session store. This enables true horizontal scaling and high availability by decoupling user sessions from individual app instances. +- **Authentication & RBAC**: Secure access via Azure Active Directory (Entra ID) using MSAL. Supports Managed Identities for Azure service authentication, group-based controls, and custom application roles (`Admin`, `User`, `CreateGroup`, `SafetyAdmin`, `FeedbackAdmin`). +- **Supported File Types**: -![Chat with Searching your Documents Demo](./docs/images/ChatwithSearchingYourDocsDemo.gif) \ No newline at end of file + - **Text**: `txt`, `md`, `html`, `json`, `xml`, `yaml`, `yml`, `log` + - **Documents**: `pdf`, `doc`, `docm`, `docx`, `pptx`, `xlsx`, `xlsm`, `xls`, `csv` + - **Images**: `jpg`, `jpeg`, `png`, `bmp`, `tiff`, `tif`, `heif` + - **Video**: `mp4`, `mov`, `avi`, `wmv`, `mkv`, `flv`, `mxf`, `gxf`, `ts`, `ps`, `3gp`, `3gpp`, `mpg`, `asf`, `m4v`, `isma`, `ismv`, `dvr-ms` + - **Audio**: `wav`, `m4a` \ No newline at end of file diff --git a/application/community_customizations/actions/azure_billing_retriever/agent.instructions.md b/application/community_customizations/actions/azure_billing_retriever/agent.instructions.md new file mode 100644 index 00000000..a0eaf84b --- /dev/null +++ b/application/community_customizations/actions/azure_billing_retriever/agent.instructions.md @@ -0,0 +1,103 @@ +You are an Azure Billing Agent designed to autonomously obtain and visualize Azure cost data using the Azure Billing plugin. Your purpose is to generate accurate cost insights and visualizations without unnecessary user re-prompting. Your behavior is stateless but resilient: if recoverable input or formatting errors occur, you must automatically correct and continue execution rather than asking the user for clarification. When your response completes, your turn ends; the user must explicitly invoke you again to continue. + +azure_billing_plugin + +Core Capabilities +List subscriptions and resource groups. +Use list_subscriptions_and_resourcegroups() to list both. +Use list_subscriptions() for subscriptions only. +Use list_resource_groups() for resource groups under a given subscription. +Retrieve current and historical charges. +Generate cost forecasts for future periods. +Display budgets and cost alerts. +Produce Matplotlib (pyplot) visualizations for actual, forecast, or combined datasets, using only the dedicated graphing functions. +Use run_data_query(...) exclusively for data retrieval. +When a visualization is requested, in the same turn: +Execute run_data_query(...) with the appropriate parameters. +Use the returned csv, rows, and plot_hints (x_keys, y_keys, recommended graph types) as inputs to plot_chart(...). +Select a sensible graph type and axes from plot_hints without re-prompting the user. +Do not send graphing-related parameters to run_data_query. Keep query and graph responsibilities strictly separated. +Export and present data as CSV for analysis. +Query Configuration and Formats +Use get_query_configuration_options() to discover available parameters. +Use get_run_data_query_format() and get_plot_chart_format() to understand required input schemas. +Unless the user specifies overrides, apply: +granularity = "Monthly" +group_by = "ResourceType" (Dimension) +output_format = CSV +run_data_query(...) requires: +start_datetime and end_datetime as ISO-8601 timestamps with a time component (e.g., 2025-11-01T00:00:00Z). +At least one aggregation entry (name, function, column). +At least one grouping entry (type, name). +Reject or auto-correct any inputs that omit these required fields before calling the function. +Time and Date Handling +You may determine the current date and time using time functions. +Custom timeframes must use ISO 8601 extended timestamps with time components (e.g., YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss±HH:MM). Date-only strings are invalid. +When users provide partial or ambiguous hints (e.g., "September 2025", "2025-09", "last month", "this quarter"), infer: +Month inputs ⇒ first day 00:00:00 to last day 23:59:59 of that month. +Multi-month ranges ⇒ first day of first month 00:00:00 to last day of last month 23:59:59. +"last month", "this month", "last quarter", "this quarter" ⇒ resolve using the America/Chicago time zone and calendar quarters unless otherwise specified. +Before executing a query, ensure both start_datetime and end_datetime are resolved, valid, and include time components. If missing, infer them per the rules above. +Scope Resolution +If a user provides a subscription or resource group name: +Prefer an exact, case-insensitive match. +If multiple exact matches exist, choose the one with the lowest subscription GUID lexicographically. +If no exact match exists, attempt a case-insensitive contains match; if multiple results remain, choose the lowest GUID and record the choice in the response. +Output Rules +Do not truncate data unless the user explicitly requests it. +When displaying tables, render full Markdown tables with all rows/columns. +When producing CSV output, return the full CSV without truncation. +Do not embed binary data or raw images. The backend stores PNG outputs automatically; describe generated charts (title, axes, graph type) in text instead. +For every visualization request: +Call run_data_query(...) to obtain rows, csv, and plot_hints. +Immediately call plot_chart(...) (or plot_custom_chart(...)) with: +conversation_id +data = the returned rows or csv +x_keys/y_keys chosen from plot_hints +An appropriate graph_type from the recommended options +Do not ask the user to restate parameters already inferred or used. +Error Handling and Recovery +Classify errors using: MissingParameter, BadDateFormat, UnknownEnum, NotFound, Authz, Throttle, ServiceError. +Auto-recoverable: MissingParameter, BadDateFormat, UnknownEnum, NotFound (when deterministic fallback exists). +For these, infer/correct values (dates, enums, defaults, scope) and retry exactly once within the same turn. +Non-recoverable (Authz, Throttle, ServiceError, or unresolved NotFound): +Return a concise diagnostic message. +Provide a suggested next step (e.g., request access, narrow the timeframe, wait before retrying). +Append an "Auto-repairs applied" note listing each modification (e.g., normalized dates, defaulted granularity, resolved scope). +Data Integrity and Determinism +Preserve stable CSV schema and column order; include a schema version comment when practical. +If the agent performs any internal resampling or currency normalization, state the exact rule used. +All numeric calculations must be explicit and reproducible. +Session Behavior +Each response is a single turn. After responding, end with a readiness line such as "Ready and waiting." +The user must invoke the agent again for further actions. +Messaging Constraints +Use past tense or present simple to describe actions that already occurred this turn. +Acceptable: "I normalized dates and executed the query." / "I set start_datetime to 2025-05-01T00:00:00Z." +If a retry happened: "I corrected parameter types and retried once in this turn; the query succeeded." +If a retry could not occur: "I did not execute a retry because authorization failed." +Prohibited phrases about your own actions: "I will …", "Executing now …", "Retrying now …", "I am executing …", "I am retrying …". +Replace with: "I executed …", "I retried once …", "I set …". +Before sending the final message, ensure none of the prohibited future/progressive phrases remain. +Response Templates +Success (auto-repair applied) +Auto-recoverable error detected: . I corrected the inputs and retried once in this turn. +Auto-repairs applied: + + + +Result: . + +Ready for your next command. +Success (no error) +Operation completed. + +Ready for your next command. + +Failure (after retry) +Auto-recoverable error detected: . I applied corrections and attempted one retry in this turn, but it failed. +Diagnostics: +Suggested next step: +Ready for your next command. + +Ready and waiting. \ No newline at end of file diff --git a/application/community_customizations/actions/azure_billing_retriever/azure_billing_plugin.additional_settings.schema.json b/application/community_customizations/actions/azure_billing_retriever/azure_billing_plugin.additional_settings.schema.json new file mode 100644 index 00000000..134ee581 --- /dev/null +++ b/application/community_customizations/actions/azure_billing_retriever/azure_billing_plugin.additional_settings.schema.json @@ -0,0 +1,14 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Azure Billing Plugin Additional Settings", + "type": "object", + "properties": { + "api_version": { + "type": "string", + "description": "API version to use for Azure Cost Management REST API calls (e.g., '2023-03-01', '2025-03-01').", + "default": "2025-03-01" + } + }, + "required": ["api_version"], + "additionalProperties": false +} diff --git a/application/single_app/functions_personal_agents_plugins.py b/application/community_customizations/actions/azure_billing_retriever/azure_billing_plugin.definition.json similarity index 100% rename from application/single_app/functions_personal_agents_plugins.py rename to application/community_customizations/actions/azure_billing_retriever/azure_billing_plugin.definition.json diff --git a/application/community_customizations/actions/azure_billing_retriever/azure_billing_plugin.py b/application/community_customizations/actions/azure_billing_retriever/azure_billing_plugin.py new file mode 100644 index 00000000..ea224df7 --- /dev/null +++ b/application/community_customizations/actions/azure_billing_retriever/azure_billing_plugin.py @@ -0,0 +1,1716 @@ +# azure_billing_plugin.py +""" +Azure Billing Plugin for Semantic Kernel +- Supports user (Entra ID) and service principal authentication +- Uses Azure Cost Management REST API for billing, budgets, alerts, forecasting +- Renders graphs server-side as PNG (base64 for web, downloadable) +- Returns tabular data as CSV for minimal token usage +- Requires user_impersonation for user auth on 40a69793-8fe6-4db1-9591-dbc5c57b17d8 (Azure Service Management) +""" + +import io +import base64 +import requests +import csv +import matplotlib.pyplot as plt +import logging +import time +import random +import re +import numpy as np +import datetime +import textwrap +from typing import Dict, Any, List, Optional, Union +import json +from collections import defaultdict +from semantic_kernel_plugins.base_plugin import BasePlugin +from semantic_kernel.functions import kernel_function +from semantic_kernel_plugins.plugin_invocation_logger import plugin_function_logger +from functions_authentication import get_valid_access_token_for_plugins +from functions_debug import debug_print +from azure.core.credentials import AccessToken, TokenCredential +from config import cosmos_messages_container, cosmos_conversations_container + + +RESOURCE_ID_REGEX = r"^/subscriptions/(?P[a-fA-F0-9-]+)/?(?:resourceGroups/(?P[^/]+))?$" +TIME_FRAME_TYPE = ["MonthToDate", "BillingMonthToDate", "WeekToDate", "Custom"] # "TheLastMonth, TheLastBillingMonth" are not supported in MAG +QUERY_TYPE = ["Usage", "ActualCost", "AmortizedCost"] +GRANULARITY_TYPE = ["None", "Daily", "Monthly", "Accumulated"] +GROUPING_TYPE = ["Dimension", "TagKey"] +AGGREGATION_FUNCTIONS = ["Sum"] #, "Average", "Min", "Max", "Count", "None"] +AGGREGATION_COLUMNS= ["Cost", "CostUSD", "PreTaxCost", "PreTaxCostUSD"] +DEFAULT_GROUPING_DIMENSIONS = ["None", "BillingPeriod", "ChargeType", "Frequency", "MeterCategory", "MeterId", "MeterSubCategory", "Product", "ResourceGroupName", "ResourceLocation", "ResourceType", "ServiceFamily", "ServiceName", "SubscriptionId", "SubscriptionName", "Tag"] +SUPPORTED_GRAPH_TYPES = ["pie", "column_stacked", "column_grouped", "line", "area"] + +class AzureBillingPlugin(BasePlugin): + def __init__(self, manifest: Dict[str, Any]): + super().__init__(manifest) + self.manifest = manifest + self.additionalFields = manifest.get('additionalFields', {}) + self.auth = manifest.get('auth', {}) + endpoint = manifest.get('endpoint', 'https://management.azure.com').rstrip('/') + if not endpoint.startswith('https://'): + # Remove any leading http:// and force https:// + endpoint = 'https://' + endpoint.lstrip('http://').lstrip('https://') + self.endpoint = endpoint + self.metadata_dict = manifest.get('metadata', {}) + self.api_version = self.additionalFields.get('apiVersion', '2023-03-01') + self.grouping_dimensions: List[str] = list(DEFAULT_GROUPING_DIMENSIONS) + + def _get_token(self) -> Optional[str]: + """Get an access token for Azure REST API calls.""" + auth_type = self.auth.get('type') + if auth_type == 'servicePrincipal': + # Service principal: use client credentials + tenant_id = self.auth.get('tenantId') + client_id = self.auth.get('identity') + client_secret = self.auth.get('key') + + # Determine AAD authority host based on management endpoint (public, gov, china) + host = self.endpoint.lower() + if "management.usgovcloudapi.net" in host: + aad_authority_host = "login.microsoftonline.us" + elif "management.azure.com" in host: + aad_authority_host = "login.microsoftonline.com" + else: + aad_authority_host = "login.microsoftonline.com" + + if not tenant_id or not client_id or not client_secret: + raise ValueError("Service principal auth requires tenantId, identity (client id), and key (client secret) in manifest 'auth'.") + + token_url = f"https://{aad_authority_host}/{tenant_id}/oauth2/v2.0/token" + data = { + 'grant_type': 'client_credentials', + 'client_id': client_id, + 'client_secret': client_secret, + 'scope': f'{self.endpoint.rstrip('/')}/.default' + } + try: + resp = requests.post(token_url, data=data, timeout=10) + resp.raise_for_status() + except requests.exceptions.HTTPError as e: + # Log the response text for diagnostics and raise a clear error + resp_text = getattr(e.response, 'text', '') if hasattr(e, 'response') else '' + logging.error("Failed to obtain service principal token. URL=%s, Error=%s, Response=%s", token_url, e, resp_text) + raise RuntimeError(f"Failed to obtain service principal token: {e}. Response: {resp_text}") + except requests.exceptions.RequestException as e: + logging.error("Error requesting service principal token: %s", e) + raise + try: + token = resp.json().get('access_token') + except ValueError: + logging.error("Invalid JSON returned from token endpoint: %s", resp.text) + raise RuntimeError(f"Invalid JSON returned from token endpoint: {resp.text}") + if not token: + logging.error("Token endpoint did not return access_token. Response: %s", resp.text) + raise RuntimeError(f"Token endpoint did not return access_token. Response: {resp.text}") + return token + else: + class UserTokenCredential(TokenCredential): + def __init__(self, scope): + self.scope = scope + + def get_token(self, *args, **kwargs): + token_result = get_valid_access_token_for_plugins(scopes=[self.scope]) + if isinstance(token_result, dict) and token_result.get("access_token"): + token = token_result["access_token"] + elif isinstance(token_result, dict) and token_result.get("error"): + # Propagate error up to plugin + raise Exception(token_result) + else: + raise RuntimeError("Could not acquire user access token for Log Analytics API.") + expires_on = int(time.time()) + 300 + return AccessToken(token, expires_on) + # User: use session token helper + scope = f"{self.endpoint.rstrip('/')}/.default" + credential = UserTokenCredential(scope) + return credential.get_token(scope).token + + def _get_headers(self) -> Dict[str, str]: + token = self._get_token() + if isinstance(token, dict) and ("error" in token or "consent_url" in token): + return token + return { + 'Authorization': f'Bearer {token}', + 'Content-Type': 'application/json' + } + + def _get(self, url: str, params: Dict[str, Any] = None) -> Any: + headers = self._get_headers() + if isinstance(headers, dict) and ("error" in headers or "consent_url" in headers): + return headers + if params: + debug_print(f"GET {url} with params: {params}") + resp = requests.get(url, headers=headers, params=params) + else: + debug_print(f"GET {url} without params") + resp = requests.get(url, headers=headers) + resp.raise_for_status() + return resp.json() + + def _post(self, url: str, data: Dict[str, Any]) -> Any: + headers = self._get_headers() + resp = requests.post(url, headers=headers, json=data) + resp.raise_for_status() + return resp.json() + + def _csv_from_table(self, rows: List[Dict[str, Any]]) -> str: + if not rows: + return '' + all_keys = set() + for row in rows: + all_keys.update(row.keys()) + fieldnames = list(all_keys) + output = io.StringIO() + writer = csv.DictWriter(output, fieldnames=fieldnames) + writer.writeheader() + writer.writerows(rows) + return output.getvalue() + + def _flatten_dict(self, d: Dict[str, Any], parent_key: str = '', sep: str = '.') -> Dict[str, Any]: + """Flatten a nested dict into a single-level dict with dotted keys. + + Example: {'properties': {'details': {'threshold': 0.8}}} => {'properties.details.threshold': 0.8} + """ + items = {} + for k, v in (d or {}).items(): + new_key = f"{parent_key}{sep}{k}" if parent_key else k + if isinstance(v, dict): + items.update(self._flatten_dict(v, new_key, sep=sep)) + else: + items[new_key] = v + return items + + def _fig_to_base64_dict(self, fig, filename: str = "chart.png") -> Dict[str, str]: + """Convert a matplotlib Figure to a structured base64 dict. + + Returns: {"mime": "image/png", "filename": filename, "base64": , "image_url": "data:image/png;base64,"} + """ + buf = io.BytesIO() + fig.savefig(buf, format='png', bbox_inches='tight') + fig.clf() + buf.seek(0) + img_b64 = base64.b64encode(buf.read()).decode('utf-8') + return { + "mime": "image/png", + "filename": filename, + "base64": img_b64, + "image_url": f"data:image/png;base64,{img_b64}" + } + + def _parse_csv_to_rows(self, data_csv: Union[str, List[str]]) -> List[Dict[str, Any]]: + """Parse CSV content (string or list-of-lines) into list[dict]. + + - Accepts a CSV string or a list of CSV lines. + - Converts numeric-looking fields to float where possible. + """ + # Accept list of lines or full string + if isinstance(data_csv, list): + csv_text = "\n".join(data_csv) + else: + csv_text = str(data_csv) + + f = io.StringIO(csv_text) + reader = csv.DictReader(f) + rows = [] + for row in reader: + parsed = {} + for k, v in row.items(): + if v is None: + parsed[k] = None + continue + s = v.strip() + # Try int then float conversion; leave as string if neither + if s == '': + parsed[k] = '' + else: + # remove thousands separators + s_clean = s.replace(',', '') + try: + if re.match(r'^-?\d+$', s_clean): + parsed[k] = int(s_clean) + else: + # float detection (handles scientific notation) + if re.match(r'^-?\d*\.?\d+(e[-+]?\d+)?$', s_clean, re.IGNORECASE): + parsed[k] = float(s_clean) + else: + parsed[k] = s + except Exception: + parsed[k] = s + rows.append(parsed) + return rows + + def _coerce_rows_for_plot(self, data) -> List[Dict[str, Any]]: + """Normalize incoming data into a list of row dictionaries for plotting.""" + if isinstance(data, list): + if not data: + raise ValueError("No data provided for plotting") + first = data[0] + if isinstance(first, dict): + try: + return [dict(row) for row in data] + except Exception as exc: + raise ValueError("data must contain serializable dictionaries") from exc + if isinstance(first, str): + return self._parse_csv_to_rows(data) + raise ValueError("data must be a list of dicts, a CSV string, or a list of CSV lines") + if isinstance(data, str): + if not data.strip(): + raise ValueError("No data provided for plotting") + return self._parse_csv_to_rows(data) + raise ValueError("data must be a list of dicts, a CSV string, or a list of CSV lines") + + def _build_plot_hints(self, rows: List[Dict[str, Any]], columns: Optional[List[Dict[str, Any]]] = None) -> Dict[str, Any]: + """Generate plotting hints based on the returned Cost Management rows.""" + hints: Dict[str, Any] = { + "available_graph_types": SUPPORTED_GRAPH_TYPES, + "row_count": len(rows or []), + "label_candidates": [], + "numeric_candidates": [], + "recommended": {} + } + + if not rows: + return hints + + sample = rows[0] + numeric_candidates = [k for k, v in sample.items() if isinstance(v, (int, float))] + resource_preferred = [ + "ResourceType", + "ResourceGroupName", + "ResourceName", + "ResourceLocation", + "ServiceName", + "Product", + "MeterCategory", + "MeterSubCategory", + "SubscriptionName", + "SubscriptionId" + ] + temporal_terms = ("date", "time", "month", "period") + temporal_candidates: List[str] = [] + label_candidates: List[str] = [] + + for key, value in sample.items(): + if isinstance(value, (int, float)): + continue + if key not in label_candidates: + label_candidates.append(key) + lowered = key.lower() + if any(term in lowered for term in temporal_terms) and key not in temporal_candidates: + temporal_candidates.append(key) + + ordered_labels: List[str] = [] + for preferred in resource_preferred: + if preferred in sample and preferred not in ordered_labels: + ordered_labels.append(preferred) + + for key in label_candidates: + if key not in ordered_labels and key not in temporal_candidates: + ordered_labels.append(key) + + for temporal in temporal_candidates: + if temporal not in ordered_labels: + ordered_labels.append(temporal) + + hints["label_candidates"] = ordered_labels or label_candidates + hints["numeric_candidates"] = numeric_candidates + + cost_focused = [k for k in numeric_candidates if "cost" in k.lower()] + if cost_focused: + y_keys = cost_focused[:3] + else: + y_keys = numeric_candidates[:3] + + pie_label = next((k for k in ordered_labels if "resource" in k.lower()), None) + if not pie_label and ordered_labels: + pie_label = ordered_labels[0] + + pie_value = y_keys[0] if y_keys else None + hints["recommended"]["pie"] = { + "graph_type": "pie", + "x_keys": [pie_label] if pie_label else [], + "y_keys": [pie_value] if pie_value else [] + } + + temporal_primary = next((k for k in ordered_labels if any(term in k.lower() for term in temporal_terms)), None) + stack_candidate = next((k for k in ordered_labels if k != temporal_primary), None) + default_x_keys: List[str] = [] + + if temporal_primary: + default_x_keys.append(temporal_primary) + if stack_candidate: + default_x_keys.append(stack_candidate) + default_graph_type = "line" if len(y_keys) <= 2 else "column_grouped" + else: + if ordered_labels: + default_x_keys.append(ordered_labels[0]) + default_graph_type = "column_stacked" if len(y_keys) > 1 else "pie" + + hints["recommended"]["default"] = { + "graph_type": default_graph_type, + "x_keys": default_x_keys, + "y_keys": y_keys + } + + if columns: + column_summary = [] + for column in columns: + if not isinstance(column, dict): + continue + column_summary.append({ + "name": column.get("name") or column.get("displayName"), + "type": column.get("type") or column.get("dataType"), + }) + hints["columns"] = column_summary + + return hints + + def _iso_utc(self, dt: datetime.datetime) -> str: + return dt.astimezone(datetime.timezone.utc).isoformat() + + def _add_months(self, dt: datetime.datetime, months: int) -> datetime.datetime: + # Add (or subtract) months without external deps. + year = dt.year + (dt.month - 1 + months) // 12 + month = (dt.month - 1 + months) % 12 + 1 + day = min(dt.day, [31, + 29 if (year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)) else 28, + 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][month-1]) + return dt.replace(year=year, month=month, day=day) + + def _first_day_of_month(self, dt: datetime.datetime) -> datetime.datetime: + return dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0) + + def _last_day_of_month(self, dt: datetime.datetime) -> datetime.datetime: + # move to first of next month then subtract one second + next_month = self._add_months(self._first_day_of_month(dt), 1) + return next_month - datetime.timedelta(seconds=1) + + def _last_n_months_timeperiod(self, n: int): + now = datetime.datetime.now(datetime.timezone.utc) + start = self._add_months(now, -n) + return {"from": self._iso_utc(start), "to": self._iso_utc(now)} + + def _previous_n_months_timeperiod(self, n: int): + today = datetime.datetime.now(datetime.timezone.utc) + first_this_month = self._first_day_of_month(today) + last_of_prev = first_this_month - datetime.timedelta(seconds=1) + first_of_earliest = self._first_day_of_month(self._add_months(first_this_month, -n)) + # ensure full days for readability + return { + "from": self._iso_utc(first_of_earliest), + "to": self._iso_utc(last_of_prev.replace(hour=23, minute=59, second=59, microsecond=0)) + } + + def _parse_datetime_to_utc( + self, + value: Union[str, datetime.datetime, datetime.date], + field_name: str, + ) -> datetime.datetime: + """Normalize supported datetime inputs into timezone-aware UTC datetimes.""" + + if value is None: + raise ValueError(f"{field_name} must be provided when using a custom range.") + + if isinstance(value, datetime.datetime): + dt_value = value + elif isinstance(value, datetime.date): + dt_value = datetime.datetime.combine(value, datetime.time.min) + elif isinstance(value, str): + text = value.strip() + if not text: + raise ValueError(f"{field_name} must be a non-empty ISO-8601 string.") + normalized = text[:-1] + "+00:00" if text[-1] in {"Z", "z"} else text + if "T" not in normalized and " " not in normalized: + raise ValueError( + f"{field_name} must include a time component (e.g., 2025-11-30T23:59:59Z)." + ) + try: + dt_value = datetime.datetime.fromisoformat(normalized) + except ValueError as exc: + raise ValueError( + f"{field_name} must be ISO-8601 formatted (e.g., 2025-11-30T23:59:59Z or 2025-11-30T23:59:59-05:00)." + ) from exc + else: + raise ValueError( + f"{field_name} must be a string, datetime, or date instance." + ) + + if dt_value.tzinfo is None: + dt_value = dt_value.replace(tzinfo=datetime.timezone.utc) + else: + dt_value = dt_value.astimezone(datetime.timezone.utc) + + return dt_value + + def _build_custom_time_period( + self, + start_datetime: Optional[Union[str, datetime.datetime, datetime.date]], + end_datetime: Optional[Union[str, datetime.datetime, datetime.date]], + ) -> Dict[str, str]: + """Return a Custom timeframe dictionary derived from start/end inputs or defaults.""" + + if start_datetime is None and end_datetime is None: + now = datetime.datetime.now(datetime.timezone.utc) + month_start = self._first_day_of_month(now) + return {"from": self._iso_utc(month_start), "to": self._iso_utc(now)} + + if (start_datetime is None) != (end_datetime is None): + raise ValueError("start_datetime and end_datetime must both be provided.") + + start_dt = self._parse_datetime_to_utc(start_datetime, "start_datetime") + end_dt = self._parse_datetime_to_utc(end_datetime, "end_datetime") + + if start_dt > end_dt: + raise ValueError("start_datetime must be earlier than end_datetime") + + return {"from": self._iso_utc(start_dt), "to": self._iso_utc(end_dt)} + + def _normalize_enum(self, value: Optional[str], choices: List[str]) -> Optional[str]: + """ + Normalize a string to one of the canonical choices in a case-insensitive way. + Returns the canonical choice if matched, otherwise None. + """ + if value is None: + return None + if not isinstance(value, str): + return None + v = value.strip() + # quick exact match + if v in choices: + return v + # case-insensitive match + lower_map = {c.lower(): c for c in choices} + return lower_map.get(v.lower()) + + @property + def display_name(self) -> str: + return "Azure Billing" + + @property + def metadata(self) -> Dict[str, Any]: + return { + "name": self.metadata_dict.get("name", "azure_billing_plugin"), + "type": "azure_billing", + "description": "Azure Billing plugin for cost, budgets, alerts, forecasting, CSV export, and PNG graphing.", + "methods": self._collect_kernel_methods_for_metadata() + } + + @kernel_function(description="Generate plotting hints for Cost Management data so callers can intentionally choose chart parameters.") + @plugin_function_logger("AzureBillingPlugin") + def suggest_plot_config(self, data, columns: Optional[List[Dict[str, Any]]] = None) -> Dict[str, Any]: + if columns is not None and not isinstance(columns, list): + return {"status": "error", "error": "columns must be a list of column metadata entries"} + try: + rows = self._coerce_rows_for_plot(data) + except ValueError as exc: + return {"status": "error", "error": str(exc)} + except Exception as exc: # pragma: no cover - defensive path + logging.exception("Unexpected error while preparing data for plot hints") + return {"status": "error", "error": f"Failed to parse data for plot hints: {exc}"} + + if not rows: + return {"status": "error", "error": "No data provided for plotting"} + + hints = self._build_plot_hints(rows, columns) + return hints + + @kernel_function(description="Plot a chart/graph from provided data. Supports pie, column_stacked, column_grouped, line, and area.",) + @plugin_function_logger("AzureBillingPlugin") + def plot_chart(self, + conversation_id: str, + data, + x_keys: Optional[List[str]] = None, + y_keys: Optional[List[str]] = None, + graph_type: str = "line", + title: str = "", + xlabel: str = "", + ylabel: str = "", + filename: str = "chart.png", + figsize: Optional[List[float]] = [7.0, 5.0]) -> Dict[str, Any]: + return self.plot_custom_chart( + conversation_id=conversation_id, + data=data, + x_keys=x_keys, + y_keys=y_keys, + graph_type=graph_type, + title=title, + xlabel=xlabel, + ylabel=ylabel, + filename=filename, + figsize=figsize + ) + + def _estimate_legend_items( + self, + graph_type: str, + rows: List[Dict[str, Any]], + y_keys_list: List[str], + stack_col: Optional[str], + ) -> int: + """Return the number of legend entries expected for a plot.""" + if graph_type == "pie": + return len(rows) + if graph_type == "column_stacked": + if stack_col: + return len({r.get(stack_col) for r in rows if r.get(stack_col) is not None}) + return len(y_keys_list) + return len(y_keys_list) + + def _adjust_figsize(self, base_figsize: List[float], legend_items: int) -> List[float]: + """Scale the figsize heuristically based on legend size.""" + scaled = list(base_figsize) + if legend_items > 6: + extra_width = min(legend_items * 0.12, 5.0) + scaled[0] = base_figsize[0] + extra_width + elif legend_items > 3: + scaled[1] = base_figsize[1] + 0.8 + if legend_items > 10: + scaled[1] = max(scaled[1], base_figsize[1] + min((legend_items - 10) * 0.2, 3.0)) + return scaled + + def _wrap_title(self, title: str, width: int = 60) -> str: + """Return a wrapped title so long strings stay inside the figure.""" + + if not title: + return "" + try: + return textwrap.fill(title, width=max(20, width)) + except Exception: + return title + + def _pie_autopct_formatter(self, values: List[float]): + """Return an autopct formatter that prints absolute value and percentage for top slices only.""" + + total = sum(values) or 1.0 + # Show labels for the most meaningful slices to avoid visual clutter. + sorted_indices = sorted(range(len(values)), key=lambda i: values[i], reverse=True) + max_labels = 8 if len(values) >= 15 else 12 + pct_threshold = 2.0 if len(values) >= 12 else 0.5 + show_indices = set() + for idx in sorted_indices[:max_labels]: + pct = (values[idx] / total) * 100 + if pct >= pct_threshold: + show_indices.add(idx) + + call_count = {"idx": -1} + + def _format(pct: float) -> str: + call_count["idx"] += 1 + idx = call_count["idx"] + if idx not in show_indices: + return "" + value = values[idx] + value_str = f"{value:,.0f}" if abs(value) >= 1000 else f"{value:,.2f}" + return f"{value_str}\n({pct:.1f}%)" + + return _format + + def _annotate_column_totals(self, ax, positions: List[float], totals: List[float]) -> None: + """Annotate summed column totals above each bar cluster and extend axes if needed.""" + + if not totals or not positions: + return + safe_totals: List[float] = [] + for value in totals: + try: + safe_totals.append(float(value)) + except (TypeError, ValueError): + safe_totals.append(0.0) + if not safe_totals: + return + abs_max = max(max(safe_totals), abs(min(safe_totals)), 1.0) + offset = max(abs_max * 0.02, 0.5) + headroom = max(abs_max * 0.05, offset) + label_positions: List[float] = [] + for x, total in zip(positions, safe_totals): + y = total + offset if total >= 0 else total - offset + label_positions.append(y) + va = 'bottom' if total >= 0 else 'top' + ax.text( + x, + y, + f"{total:,.2f}", + ha='center', + va=va, + fontsize=8, + fontweight='bold' + ) + + if label_positions: + current_bottom, current_top = ax.get_ylim() + max_label = max(label_positions) + min_label = min(label_positions) + pad = headroom + top_needed = max_label + pad + bottom_needed = min_label - pad + new_bottom = current_bottom + new_top = current_top + if top_needed > current_top: + new_top = top_needed + if bottom_needed < current_bottom: + new_bottom = bottom_needed + if new_bottom != current_bottom or new_top != current_top: + ax.set_ylim(new_bottom, new_top) + + def _place_side_legend( + self, + ax, + handles: Optional[List[Any]] = None, + labels: Optional[List[Any]] = None, + title: Optional[str] = None, + ncol: int = 1, + ) -> bool: + """Place legend to the right of the axes and reserve horizontal space.""" + if handles is not None or labels is not None: + legend = ax.legend( + handles, + labels, + title=title, + loc="center left", + bbox_to_anchor=(1.02, 0.5), + borderaxespad=0.0, + ncol=ncol, + ) + else: + legend = ax.legend( + title=title, + loc="center left", + bbox_to_anchor=(1.02, 0.5), + borderaxespad=0.0, + ncol=ncol, + ) + if legend is not None: + ax.figure.subplots_adjust(right=0.78) + return True + return False + + def _plot_pie_chart( + self, + ax, + rows: List[Dict[str, Any]], + x_key: str, + y_key: str, + title: str, + xlabel: str, + ylabel: str, + ) -> bool: + labels = [r.get(x_key) for r in rows] + labels_display = ["Unknown" if label in (None, "") else str(label) for label in labels] + values = [float(r.get(y_key) or 0) for r in rows] + total_value = sum(values) + autopct = self._pie_autopct_formatter(values) + wedges, _, autotexts = ax.pie(values, autopct=autopct, startangle=90) + for autotext in autotexts: + autotext.set_fontsize(8) + ax.set_title(self._wrap_title(title or "Cost distribution")) + ax.text(0, 0, f"Total\n{total_value:,.2f}", ha='center', va='center', fontsize=10, fontweight='bold') + legend_labels = [] + for label, value in zip(labels_display, values): + value_str = f"{value:,.2f}" if abs(value) < 1000 else f"{value:,.0f}" + pct = (value / total_value * 100) if total_value else 0 + legend_labels.append(f"{label} — {value_str} ({pct:.1f}%)") + legend_title = f"{x_key} (Total: {total_value:,.2f})" + ncol = min(4, max(1, len(labels_display) // 10 + 1)) + return self._place_side_legend(ax, wedges, legend_labels, title=legend_title, ncol=ncol) + + def _plot_line_or_area_chart( + self, + ax, + rows: List[Dict[str, Any]], + x_vals: List[Any], + y_keys_list: List[str], + graph_type: str, + x_key: str, + xlabel: str, + ylabel: str, + title: str, + ) -> bool: + for yk in y_keys_list: + y_vals = [float(r.get(yk) or 0) for r in rows] + if graph_type == "line": + ax.plot(x_vals, y_vals, marker='o', label=yk) + else: + ax.fill_between(range(len(x_vals)), y_vals, alpha=0.5, label=yk) + ax.set_title(self._wrap_title(title or "Cost trend")) + ax.set_xlabel(xlabel or x_key) + ax.set_ylabel(ylabel or (y_keys_list[0] if y_keys_list else "Value")) + ax.grid(True, axis='y', alpha=0.3) + return self._place_side_legend(ax) + + def _plot_column_grouped_chart( + self, + ax, + rows: List[Dict[str, Any]], + x_vals: List[Any], + y_keys_list: List[str], + x_key: str, + xlabel: str, + ylabel: str, + title: str, + ) -> bool: + n_groups = len(rows) + n_bars = len(y_keys_list) + index = np.arange(n_groups) + bar_width = 0.8 / max(1, n_bars) + group_totals = [0.0 for _ in rows] + for i, yk in enumerate(y_keys_list): + y_vals = [float(r.get(yk) or 0) for r in rows] + # accumulate totals for the annotation step below + group_totals = [total + value for total, value in zip(group_totals, y_vals)] + ax.bar(index + i * bar_width, y_vals, bar_width, label=yk) + ax.set_xticks(index + bar_width * (n_bars - 1) / 2) + ax.set_xticklabels([str(x) for x in x_vals], rotation=45, ha='right') + ax.set_title(self._wrap_title(title or "Cost comparison")) + ax.set_xlabel(xlabel or x_key) + ax.set_ylabel(ylabel or ("Values" if len(y_keys_list) > 1 else y_keys_list[0])) + centers = (index + bar_width * (n_bars - 1) / 2).tolist() + self._annotate_column_totals(ax, centers, group_totals) + return self._place_side_legend(ax) + + def _plot_column_stacked_chart( + self, + ax, + rows: List[Dict[str, Any]], + x_key: str, + y_keys_list: List[str], + stack_col: Optional[str], + xlabel: str, + ylabel: str, + title: str, + ) -> bool: + x_vals_unique: List[Any] = [] + seen_x = set() + for r in rows: + xval = r.get(x_key) + if xval not in seen_x: + seen_x.add(xval) + x_vals_unique.append(xval) + + pivot = defaultdict(lambda: defaultdict(float)) + if stack_col: + for r in rows: + xval = r.get(x_key) + sval = r.get(stack_col) + yval = float(r.get(y_keys_list[0]) or 0) + pivot[xval][sval] += yval + y_keys_plot = sorted({key for row in pivot.values() for key in row.keys()}) + else: + for r in rows: + xval = r.get(x_key) + for yk in y_keys_list: + pivot[xval][yk] += float(r.get(yk) or 0) + y_keys_plot = y_keys_list + + data_matrix = [[pivot[x_val].get(yk, 0.0) for x_val in x_vals_unique] for yk in y_keys_plot] + index = np.arange(len(x_vals_unique)) + bottoms = np.zeros(len(x_vals_unique)) + for i, yk in enumerate(y_keys_plot): + ax.bar(index, data_matrix[i], bottom=bottoms, label=str(yk)) + bottoms += np.array(data_matrix[i]) + ax.set_xticks(index) + ax.set_xticklabels([str(x) for x in x_vals_unique], rotation=45, ha='right') + ax.set_title(self._wrap_title(title or "Cost breakdown")) + ax.set_xlabel(xlabel or x_key) + ax.set_ylabel(ylabel or (y_keys_list[0] if y_keys_list else "Values")) + legend_title = stack_col or "Segments" + self._annotate_column_totals(ax, index.tolist(), bottoms.tolist()) + return self._place_side_legend(ax, title=legend_title) + + def plot_custom_chart(self, + conversation_id: str, + data, + x_keys: Optional[List[str]] = None, + y_keys: Optional[List[str]] = None, + graph_type: str = "line", + title: str = "", + xlabel: str = "", + ylabel: str = "", + filename: str = "chart.png", + figsize: Optional[List[float]] = [7.0, 5.0]) -> Dict[str, Any]: + """ + General plotting function. + + - data: list of dict rows (e.g., [{'date': '2025-10-01', 'cost': 12.3, 'type': 'A'}, ...]) + - x_keys: list of keys to use for x axis (required for non-pie charts); first key is primary x-axis, additional keys are used for stacking/grouping + - y_keys: list of keys to plot on y axis (if None and graph_type is not pie, autodetect numeric columns) + - graph_type: one of ['pie', 'column_stacked', 'column_grouped', 'line', 'area'] + - returns structured dict with mime, filename, base64, image_url and metadata + """ + try: + #print(f"[AzureBillingPlugin] plot_custom_chart called with conversation_id={conversation_id}, graph_type={graph_type},\n x_keys={x_keys},\n y_keys={y_keys},\n title={title},\n xlabel={xlabel},\n ylabel={ylabel},\n figsize={figsize},\n data:{data}") + graph_type = graph_type.lower() if isinstance(graph_type, str) else str(graph_type) + # Validate figsize: must be a list/tuple of two numbers if provided + if figsize is None: + figsize = [7.0, 5.0] + elif isinstance(figsize, (list, tuple)): + if len(figsize) != 2: + return {"status": "error", "error": "figsize must be a list of two numbers: [width, height]"} + try: + figsize = [float(figsize[0]), float(figsize[1])] + except Exception: + return {"status": "error", "error": "figsize elements must be numeric"} + else: + return {"status": "error", "error": "figsize must be a list of two numbers or null"} + + except Exception as ex: + logging.exception("Unexpected error in plot_custom_chart parameter validation") + return {"status": "error", "error": str(ex)} + if graph_type not in SUPPORTED_GRAPH_TYPES: + raise ValueError(f"Unsupported graph_type '{graph_type}'. Supported: {SUPPORTED_GRAPH_TYPES}") + try: + rows = self._coerce_rows_for_plot(data) + except ValueError as exc: + return {"status": "error", "error": str(exc)} + except Exception as exc: + logging.exception("Failed to parse input data for plotting") + return {"status": "error", "error": f"Failed to parse data for plotting: {str(exc)}"} + + if not rows: + return {"status": "error", "error": "No data provided for plotting"} + + hints = self._build_plot_hints(rows, None) + recommended_defaults = hints.get("recommended", {}).get("default", {}) + recommended_pie = hints.get("recommended", {}).get("pie", {}) + + def ensure_list(value) -> List[Any]: + if value is None: + return [] + if isinstance(value, list): + return list(value) + if isinstance(value, tuple): + return list(value) + if isinstance(value, str): + return [value] + if hasattr(value, '__iter__'): + return list(value) + return [value] + + x_keys_list = ensure_list(x_keys) + y_keys_list = ensure_list(y_keys) + + if graph_type == "pie": + if not y_keys_list: + y_keys_list = ensure_list(recommended_pie.get("y_keys") or recommended_defaults.get("y_keys")) + if len(y_keys_list) > 1: + y_keys_list = y_keys_list[:1] + if not y_keys_list: + raise ValueError("Pie chart requires a numeric column for values") + + if not x_keys_list: + x_keys_list = ensure_list(recommended_pie.get("x_keys") or recommended_defaults.get("x_keys")) + if len(x_keys_list) > 1: + x_keys_list = x_keys_list[:1] + if not x_keys_list: + sample_row = rows[0] + for candidate in sample_row.keys(): + if candidate in hints.get("label_candidates", []): + x_keys_list = [candidate] + break + if not x_keys_list: + raise ValueError("Pie chart requires a label column (x_keys)") + + x_key = x_keys_list[0] + stack_col = None + x_vals = None + else: + if not y_keys_list: + y_keys_list = ensure_list(recommended_defaults.get("y_keys")) + if not y_keys_list: + sample_row = rows[0] + y_keys_list = [k for k, v in sample_row.items() if isinstance(v, (int, float))] + if not y_keys_list: + raise ValueError("Could not autodetect numeric columns for y axis. Provide y_keys explicitly.") + + if not x_keys_list: + x_keys_list = ensure_list(recommended_defaults.get("x_keys")) + if not x_keys_list: + sample_row = rows[0] + for key, value in sample_row.items(): + if not isinstance(value, (int, float)): + x_keys_list = [key] + break + if not x_keys_list: + raise ValueError("x_keys is required for this chart type") + if len(x_keys_list) > 2: + x_keys_list = x_keys_list[:2] + + x_key = x_keys_list[0] + stack_col = x_keys_list[1] if len(x_keys_list) > 1 else None + x_vals = [r.get(x_key) for r in rows] + + fig = None + try: + legend_items = self._estimate_legend_items(graph_type, rows, y_keys_list, stack_col) + scaled_figsize = self._adjust_figsize(figsize, legend_items) + + fig, ax = plt.subplots(figsize=tuple(scaled_figsize)) + + legend_outside = False + if graph_type == "pie": + legend_outside = self._plot_pie_chart( + ax, + rows, + x_keys_list[0], + y_keys_list[0], + title, + xlabel, + ylabel, + ) + elif graph_type in ("line", "area"): + legend_outside = self._plot_line_or_area_chart( + ax, + rows, + x_vals, + y_keys_list, + graph_type, + x_key, + xlabel, + ylabel, + title, + ) + elif graph_type == "column_grouped": + legend_outside = self._plot_column_grouped_chart( + ax, + rows, + x_vals, + y_keys_list, + x_key, + xlabel, + ylabel, + title, + ) + elif graph_type == "column_stacked": + legend_outside = self._plot_column_stacked_chart( + ax, + rows, + x_key, + y_keys_list, + stack_col, + xlabel, + ylabel, + title, + ) + + if legend_outside: + plt.tight_layout(rect=[0, 0, 0.78, 1]) + else: + plt.tight_layout() + img_b64 = self._fig_to_base64_dict(fig, filename=filename) + payload = { + "status": "ok", + "type": "image_url", + "image_url": {"url": str(img_b64.get("image_url", ""))}, + "metadata": { + "graph_type": graph_type, + "x_keys": x_keys_list, + "y_keys": y_keys_list, + "stack_key": stack_col, + "figure_size": scaled_figsize, + "recommendations": hints.get("recommended", {}) + } + } + + if conversation_id: + try: + self.upload_cosmos_message(conversation_id, str(img_b64.get("image_url", ""))) + payload["image_url"] = f"Stored chart image for conversation {conversation_id}" + payload["requires_message_reload"] = True + except Exception: + logging.exception("Failed to upload chart image to Cosmos DB") + payload.setdefault("warnings", []).append("Chart rendered but storing to conversation failed.") + else: + payload.setdefault("warnings", []).append("Chart rendered but conversation_id was not provided; image not persisted.") + + #time.sleep(5) # give time for image to upload before returning + return payload + except Exception as ex: + logging.exception("Error while generating chart") + return {"status": "error", "error": f"Error while generating chart: {str(ex)}"} + finally: + if fig is not None: + plt.close(fig) + + + @plugin_function_logger("AzureBillingPlugin") + @kernel_function(description="List all subscriptions and resource groups accessible to the user/service principal.") + def list_subscriptions_and_resourcegroups(self) -> str: + url = f"{self.endpoint}/subscriptions?api-version=2020-01-01" + subs = self._get(url).get('value', []) + if isinstance(subs, dict) and ("error" in subs or "consent_url" in subs): + return subs + result = [] + for sub in subs: + sub_id = sub.get('subscriptionId') + sub_name = sub.get('displayName') + rg_url = f"{self.endpoint}/subscriptions/{sub_id}/resourcegroups?api-version=2021-04-01" + rgs = self._get(rg_url).get('value', []) + result.append({ + "subscriptionId": sub_id, + "subscriptionName": sub_name, + "resourceGroups": [rg.get('name') for rg in rgs] + }) + return self._csv_from_table(result) + + @plugin_function_logger("AzureBillingPlugin") + @kernel_function(description="List all subscriptions accessible to the user/service principal.") + def list_subscriptions(self) -> str: + url = f"{self.endpoint}/subscriptions?api-version=2020-01-01" + data = self._get(url) + if isinstance(data, dict) and ("error" in data or "consent_url" in data): + return data + subs = data.get('value', []) + return self._csv_from_table(subs) + + @plugin_function_logger("AzureBillingPlugin") + @kernel_function(description="List all resource groups in a subscription.") + def list_resource_groups(self, subscription_id: str) -> str: + url = f"{self.endpoint}/subscriptions/{subscription_id}/resourcegroups?api-version=2020-01-01" + data = self._get(url) + if isinstance(data, dict) and ("error" in data or "consent_url" in data): + return data + rgs = data.get('value', []) + return self._csv_from_table(rgs) + + @kernel_function(description="Get cost forecast with custom duration and granularity.") + @plugin_function_logger("AzureBillingPlugin") + def get_forecast(self, resourceId: str, forecast_period_months: int = 12, granularity: str = "Monthly", lookback_months: Optional[int] = None) -> str: + """ + #Get cost forecast for a given period and granularity. + #scope: /subscriptions/{id} or /subscriptions/{id}/resourceGroups/{rg} + #forecast_period_months: Number of months to forecast (default 12) + #granularity: "Daily", "Monthly", "Weekly" + #lookback_months: If provided, use last N months as historical data for forecasting + """ + url = f"{self.endpoint.rstrip('/')}/{resourceId.lstrip('/').rstrip('/')}/providers/Microsoft.CostManagement/query?api-version={self.api_version}" + timeframe = "Custom" + # Calculate start/end dates for forecast + today = datetime.datetime.utcnow().date() + start_date = today + end_date = today + datetime.timedelta(days=forecast_period_months * 30) + # If lookback_months is set, use that for historical data + if lookback_months: + hist_start = today - datetime.timedelta(days=lookback_months * 30) + hist_end = today + else: + hist_start = None + hist_end = None + query = { + "type": "Forecast", + "timeframe": timeframe, + "timePeriod": { + "from": start_date.isoformat(), + "to": end_date.isoformat() + }, + "dataset": {"granularity": granularity} + } + # Optionally add historical data window + if hist_start and hist_end: + query["historicalTimePeriod"] = { + "from": hist_start.isoformat(), + "to": hist_end.isoformat() + } + data = self._post(url, query) + if isinstance(data, dict) and ("error" in data or "consent_url" in data): + return data + rows = data.get('properties', {}).get('rows', []) + columns = [c['name'] for c in data.get('properties', {}).get('columns', [])] + result = [dict(zip(columns, row)) for row in rows] + return self._csv_from_table(result) + + @kernel_function(description="Get budgets for a subscription or resource group.") + @plugin_function_logger("AzureBillingPlugin") + def get_budgets(self, subscription_id: str, resource_group_name: Optional[str] = None) -> str: + if resource_group_name: + scope = f"/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}" + else: + scope = f"/subscriptions/{subscription_id}" + url = f"{self.endpoint.rstrip('/')}{scope}/providers/Microsoft.CostManagement/budgets?api-version={self.api_version}" + data = self._get(url) + if isinstance(data, dict) and ("error" in data or "consent_url" in data): + return data + budgets = data.get('value', []) + return self._csv_from_table(budgets) + + @kernel_function(description="Get cost alerts.") + @plugin_function_logger("AzureBillingPlugin") + def get_alerts(self, subscription_id: str, resource_group_name: Optional[str] = None) -> str: + if resource_group_name: + scope = f"/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}" + else: + scope = f"/subscriptions/{subscription_id}" + url = f"{self.endpoint.rstrip('/')}{scope}/providers/Microsoft.CostManagement/alerts?api-version={self.api_version}" + data = self._get(url) + if isinstance(data, dict) and ("error" in data or "consent_url" in data): + return data + alerts = data.get('value', []) + return self._csv_from_table(alerts) + + @kernel_function(description="Get specific cost alert by ID.") + @plugin_function_logger("AzureBillingPlugin") + def get_specific_alert(self, subscription_id: str, alertId: str , resource_group_name: Optional[str] = None) -> str: + if resource_group_name: + scope = f"/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}" + else: + scope = f"/subscriptions/{subscription_id}" + url = f"{self.endpoint.rstrip('/')}{scope}/providers/Microsoft.CostManagement/alerts/{alertId}?api-version={self.api_version}" + data = self._get(url) + if isinstance(data, dict) and ("error" in data or "consent_url" in data): + return data + # Flatten nested properties for CSV friendliness + if isinstance(data, dict): + flat = self._flatten_dict(data) + # Convert lists to JSON strings for CSV + for k, v in list(flat.items()): + if isinstance(v, (list, dict)): + try: + flat[k] = json.dumps(v) + except Exception: + flat[k] = str(v) + return self._csv_from_table([flat]) + else: + # Fallback: return raw JSON string in a single column + return self._csv_from_table([{"raw": json.dumps(data)}]) + + @kernel_function(description="Run an Azure Cost Management query and return rows, column metadata, and plotting hints for manual chart selection. Requires explicit start/end datetimes and always uses a Custom timeframe.") + @plugin_function_logger("AzureBillingPlugin") + def run_data_query(self, + conversation_id: str, + subscription_id: str, + aggregations: List[Dict[str, Any]], + groupings: List[Dict[str, Any]], + start_datetime: Union[str, datetime.datetime, datetime.date], + end_datetime: Union[str, datetime.datetime, datetime.date], + query_type: str = "Usage", + granularity: str = "Daily", + resource_group_name: Optional[str] = None, + query_filter: Optional[Dict[str, Any]] = None, + ) -> Dict[str, Any]: + """ + Execute an Azure Cost Management query and return structured results. + + Callers must supply start_datetime and end_datetime (ISO-8601 strings or + datetime objects). The outgoing payload always uses a Custom timeframe with a + fully populated timePeriod object. + + Returns a dict containing: + - rows: list of result dictionaries + - columns: metadata about returned columns + - csv: CSV-formatted string of the results + - plot_hints: heuristic suggestions for plotting the data + - query: the query payload that was submitted + - scope/api_version: request context details + """ + if resource_group_name: + scope = f"/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}" + else: + scope = f"/subscriptions/{subscription_id}" + url = f"{self.endpoint.rstrip('/')}{scope}/providers/Microsoft.CostManagement/query?api-version={self.api_version}" + if not self._normalize_enum(query_type, QUERY_TYPE): + raise ValueError(f"Invalid query_type: {query_type}. Must be one of {QUERY_TYPE}.") + if not self._normalize_enum(granularity, GRANULARITY_TYPE): + raise ValueError(f"Invalid granularity: {granularity}. Must be one of {GRANULARITY_TYPE}.") + + if start_datetime is None or end_datetime is None: + return { + "status": "error", + "error": "start_datetime and end_datetime are required for run_data_query.", + "expected_format": "ISO-8601 timestamp with time component (e.g., 2025-11-01T00:00:00Z).", + "example": { + "start_datetime": "2025-11-01T00:00:00Z", + "end_datetime": "2025-11-30T23:59:59Z" + } + } + try: + time_period = self._build_custom_time_period(start_datetime, end_datetime) + except ValueError as exc: + return { + "status": "error", + "error": str(exc), + "expected_format": "ISO-8601 timestamp with time component (e.g., 2025-11-01T00:00:00Z).", + "example": { + "start_datetime": "2025-11-01T00:00:00Z", + "end_datetime": "2025-11-30T23:59:59Z" + } + } + + query = { + "type": query_type, + "timeframe": "Custom", + "dataset": { + "granularity": granularity + }, + "timePeriod": time_period, + } + if not aggregations: + return { + "status": "error", + "error": "Aggregations list cannot be empty; supply at least one aggregation entry.", + "example": [ + {"name": "totalCost", "function": "Sum", "column": "PreTaxCost"} + ] + } + if not groupings: + return { + "status": "error", + "error": "Groupings list cannot be empty; include at least one Dimension/Tag grouping.", + "example": [ + {"type": "Dimension", "name": "ResourceType"} + ] + } + # Validate and normalize aggregations (if provided) + if aggregations: + if not isinstance(aggregations, list): + return {"status": "error", "error": "aggregations must be a list of aggregation definitions", "example": [{"name": "totalCost", "function": "Sum", "column": "PreTaxCost"}]} + if len(aggregations) > 2: + logging.warning("More than 2 aggregations provided; only the first 2 will be used") + agg_map: Dict[str, Any] = {} + for agg in aggregations[:2]: + if not isinstance(agg, dict): + return {"status": "error", "error": "Each aggregation must be a dict", "example": [{"name": "totalCost", "function": "Sum", "column": "PreTaxCost"}]} + + # Determine aggregation alias (outer key) and underlying column + function + # Support these shapes: + # 1) flat: {"name": "totalCost", "function": "Sum", "column": "PreTaxCost"} + # 2) nested: {"name": "totalCost", "aggregation": {"name": "PreTaxCost", "function": "Sum"}} + # We will produce agg_map[alias] = {"name": , "function": } + + alias = agg.get('name') + column_name = None + function = None + + if 'aggregation' in agg and isinstance(agg['aggregation'], dict): + sub = agg['aggregation'] + # sub.get('name') is the column name in nested form + column_name = sub.get('name') or sub.get('column') or agg.get('column') + function = sub.get('function') or agg.get('function') + # allow sub to specify other properties but we'll only keep name and function for compatibility + else: + # flat form + column_name = agg.get('column') or agg.get('name_of_column') or agg.get('columnName') + function = agg.get('function') + + if not alias: + return {"status": "error", "error": "Aggregation entry missing aggregation alias in 'name' field", "example": [{"name": "totalCost", "aggregation": {"name": "PreTaxCost", "function": "Sum"}}]} + if not function: + return {"status": "error", "error": f"Aggregation '{alias}' missing 'function'", "example": [{"name": alias, "aggregation": {"name": "PreTaxCost", "function": "Sum"}}]} + if not self._normalize_enum(function, AGGREGATION_FUNCTIONS): + return {"status": "error", "error": f"Aggregation function '{function}' is invalid. Must be one of: {AGGREGATION_FUNCTIONS}", "example": [{"name": alias, "aggregation": {"name": "PreTaxCost", "function": "Sum"}}]} + + details: Dict[str, Any] = {} + # per your requested shape, the inner object should include the column as 'name' + if column_name: + details['name'] = column_name + details['function'] = function + + agg_map[alias] = details + query["dataset"]["aggregation"] = agg_map + + # Validate and normalize groupings (if provided) + if groupings: + if not isinstance(groupings, list): + return {"status": "error", "error": "groupings must be a list of grouping definitions", "example": [{"type": "Dimension", "name": "ResourceLocation"}]} + if len(groupings) > 2: + logging.warning("More than 2 groupings provided; only the first 2 will be used") + normalized_groupings: List[Dict[str, str]] = [] + for grp in groupings[:2]: + if not isinstance(grp, dict): + return {"status": "error", "error": "Each grouping must be a dict with 'type' and 'name'", "example": [{"type": "Dimension", "name": "ResourceType"}]} + gtype = grp.get('type') + gname = grp.get('name') + if not gtype or not self._normalize_enum(gtype, GROUPING_TYPE): + return {"status": "error", "error": f"Grouping type '{gtype}' is invalid. Must be one of: {GROUPING_TYPE}", "example": [{"type": "Dimension", "name": "ResourceType"}]} + if not gname or not self._normalize_enum(gname, self.grouping_dimensions): + return {"status": "error", "error": f"Grouping name '{gname}' is invalid. Must be one of: {self.grouping_dimensions}", "example": [{"type": "Dimension", "name": "ResourceType"}]} + normalized_groupings.append({'type': gtype, 'name': gname}) + query["dataset"]["grouping"] = normalized_groupings + if query_filter: + query["dataset"]["filter"] = query_filter + # No additional validation required; _build_custom_time_period enforces shape + logging.debug("Running Cost Management query with payload: %s", json.dumps(query, indent=2)) + data = self._post(url, query) + if isinstance(data, dict) and ("error" in data or "consent_url" in data): + return data + rows = data.get('properties', {}).get('rows', []) + column_objects = data.get('properties', {}).get('columns', []) + column_names = [c.get('name') for c in column_objects] + result_rows = [dict(zip(column_names, row)) for row in rows] + csv_output = self._csv_from_table(result_rows) + + columns_meta: List[Dict[str, Any]] = [] + for col in column_objects: + if not isinstance(col, dict): + continue + columns_meta.append({ + "name": col.get('name') or col.get('displayName'), + "type": col.get('type') or col.get('dataType'), + "dataType": col.get('dataType'), + "unit": col.get('unit') + }) + + plot_hints = self._build_plot_hints(result_rows, column_objects) + + return { + "status": 200, + "conversation_id": conversation_id, + "scope": scope, + "api_version": self.api_version, + "query": query, + "row_count": len(result_rows), + "columns": columns_meta, + "csv": csv_output, + "plot_hints": plot_hints, + } + + @kernel_function(description="Return available configuration options for Azure Billing report queries.") + @plugin_function_logger("AzureBillingPlugin") + def get_query_configuration_options(self, subscription_id: str, resource_group_name: Optional[str] = None) -> Dict[str, Any]: + get_dimension_results = self.get_grouping_dimensions(subscription_id, resource_group_name) + if isinstance(get_dimension_results, dict) and ("error" in get_dimension_results or "consent_url" in get_dimension_results): + return get_dimension_results + if isinstance(get_dimension_results, list): + # Store a per-instance copy to prevent cross-request state bleed. + self.grouping_dimensions = list(get_dimension_results) or list(DEFAULT_GROUPING_DIMENSIONS) + return { + "TIME_FRAME_TYPE": TIME_FRAME_TYPE, + "QUERY_TYPE": QUERY_TYPE, + "GRANULARITY_TYPE": GRANULARITY_TYPE, + "GROUPING_TYPE": GROUPING_TYPE, + "GROUPING_DIMENSIONS": self.grouping_dimensions, + "AGGREGATION_FUNCTIONS": AGGREGATION_FUNCTIONS, + "AGGREGATION_COLUMNS": AGGREGATION_COLUMNS, + "NOTE": "Not all combinations are available for all queries." + } + + @kernel_function(description="Get available cost dimensions for Azure Billing.") + @plugin_function_logger("AzureBillingPlugin") + def get_grouping_dimensions(self, subscription_id: str, resource_group_name: Optional[str] = None) -> List[str]: + if resource_group_name: + scope = f"/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}" + else: + scope = f"/subscriptions/{subscription_id}" + # Use the Cost Management query endpoint to retrieve available dimensions/categories + # Note: some Cost Management responses return a 'value' array where each item has a + # 'properties' object containing a 'category' property. We handle that shape and + # fall back to other common fields. + url = f"{self.endpoint.rstrip('/')}{scope}/providers/Microsoft.CostManagement/dimensions?api-version={self.api_version}&$expand=properties/data" + data = self._get(url) + if isinstance(data, dict) and ("error" in data or "consent_url" in data): + return data + + values = data.get('value', []) if isinstance(data, dict) else [] + dims = [] + for item in values: + if not isinstance(item, dict): + continue + # Preferred location: item['properties']['category'] + props = item.get('properties') if isinstance(item.get('properties'), dict) else {} + cat = props.get('category') or props.get('Category') + if not cat: + # fallback to name/displayName + cat = item.get('name') or props.get('name') or props.get('displayName') + if cat: + dims.append(cat) + + # dedupe while preserving order + seen = set() + deduped = [] + for d in dims: + if d not in seen: + seen.add(d) + deduped.append(d) + return deduped + + @kernel_function(description="Run a sample or provided Cost Management query and return the columns metadata (name + type). Useful for discovering which columns can be used for aggregation and grouping.") + @plugin_function_logger("AzureBillingPlugin") + def get_query_columns(self, subscription_id: str, resource_group_name: Optional[str] = None, query: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]: + """ + Discover columns for a Cost Management query. + + - subscription_id: required + - resource_group_name: optional + - query: optional Cost Management query dict; if omitted a minimal Usage MonthToDate query is used + + Returns a list of {"name": , "type": }. + """ + if resource_group_name: + scope = f"/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}" + else: + scope = f"/subscriptions/{subscription_id}" + url = f"{self.endpoint.rstrip('/')}" + f"{scope}/providers/Microsoft.CostManagement/query?api-version={self.api_version}" + + if not query: + query = { + "type": "Usage", + "timeframe": "MonthToDate", + "dataset": {"granularity": "None"} + } + + data = self._post(url, query) + if isinstance(data, dict) and ("error" in data or "consent_url" in data): + return data + + # Two possible shapes: properties.columns or value[].properties.columns + cols = [] + props = data.get('properties') if isinstance(data, dict) else None + if props and isinstance(props, dict) and props.get('columns'): + cols = props.get('columns', []) + else: + # Inspect value[] items for properties.columns + values = data.get('value', []) if isinstance(data, dict) else [] + for item in values: + if not isinstance(item, dict): + continue + p = item.get('properties') if isinstance(item.get('properties'), dict) else {} + if p.get('columns'): + cols = p.get('columns') + break + + result = [] + for c in cols or []: + if not isinstance(c, dict): + continue + name = c.get('name') or c.get('displayName') + typ = c.get('type') or c.get('dataType') or c.get('data', {}).get('type') if isinstance(c.get('data'), dict) else c.get('type') + result.append({"name": name, "type": typ}) + + return result + + @kernel_function(description="Return only aggregatable (numeric) columns from a sample or provided query.") + @plugin_function_logger("AzureBillingPlugin") + def get_aggregatable_columns(self, subscription_id: str, resource_group_name: Optional[str] = None, query: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]: + """ + Returns columns suitable for aggregation (numeric types). Uses `get_query_columns` internally. + """ + cols = self.get_query_columns(subscription_id, resource_group_name, query) + if isinstance(cols, dict) and ("error" in cols or "consent_url" in cols): + return cols + numeric_types = {"Number", "Double", "Integer", "Decimal", "Long", "Float"} + agg = [c for c in (cols or []) if (c.get('type') in numeric_types or (isinstance(c.get('type'), str) and c.get('type').lower() == 'number'))] + return agg + + + @kernel_function(description="Get the expected formatting, in JSON, for run_data_query parameters.") + @plugin_function_logger("AzureBillingPlugin") + def get_run_data_query_format(self) -> Dict[str, Any]: + """ + Returns an example JSON object describing the expected parameters for run_data_query. + Includes required/optional fields, types, valid values, and reflects the latest method signature. + """ + return { + "conversation_id": "", + "subscription_id": "", + "resource_group_name": "", + "query_type": f"", + "start_datetime": "", + "end_datetime": "", + "granularity": f"", + "aggregations": [ + { + "name": "totalCost", + "function": f"", + "column": f"" + } + ], + "groupings": [ + { + "type": f"", + "name": f"" + } + ], + "query_filter": "", + "example_request": { + "conversation_id": "abc123", + "subscription_id": "00000000-0000-0000-0000-000000000000", + "query_type": "Usage", + "start_datetime": "2025-04-01T00:00:00-04:00", + "end_datetime": "2025-09-30T23:59:59-04:00", + "granularity": "Daily", + "aggregations": [ + {"name": "totalCost", "function": "Sum", "column": "PreTaxCost"} + ], + "groupings": [ + {"type": "Dimension", "name": "ResourceType"} + ] + }, + "example_response": { + "status": "ok", + "row_count": 3, + "rows": [ + {"ResourceType": "microsoft.compute/virtualmachines", "PreTaxCost": 12694.43}, + {"ResourceType": "microsoft.compute/disks", "PreTaxCost": 4715.20}, + {"ResourceType": "microsoft.keyvault/vaults", "PreTaxCost": 201.11} + ], + "columns": [ + {"name": "ResourceType", "type": "String"}, + {"name": "PreTaxCost", "type": "Number"} + ], + "plot_hints": { + "recommended": { + "default": { + "graph_type": "column_stacked", + "x_keys": ["ResourceType"], + "y_keys": ["PreTaxCost"] + }, + "pie": { + "graph_type": "pie", + "x_keys": ["ResourceType"], + "y_keys": ["PreTaxCost"] + } + } + } + }, + "workflow": [ + "Call run_data_query to retrieve rows, columns, csv, and plot_hints.", + "Always provide start_datetime and end_datetime using ISO-8601 strings (e.g., 2025-11-01T00:00:00Z).", + "Always supply at least one aggregation entry; the plugin no longer infers defaults when none are provided.", + "Include at least one grouping (Dimension + name) so the query can bucket the data.", + "Inspect plot_hints['recommended'] for suggested x_keys, y_keys, and chart types.", + "Pass rows (or the csv string) plus the chosen keys into plot_chart to render and persist a graph." + ] + } + + # Returns the expected input data format for plot_custom_chart + @kernel_function(description="Get the expected input data format for plot_custom_chart (graphing) as JSON.") + @plugin_function_logger("AzureBillingPlugin") + def get_plot_chart_format(self) -> Dict[str, Any]: + """ + Returns an example object describing the expected 'data' parameter for plot_custom_chart. + The 'data' field should be a CSV string (with headers and rows), matching the output format of run_data_query. + """ + return { + "conversationId": "", + "data": "", + "x_keys": ["ResourceType"], + "y_keys": ["PreTaxCost"], + "graph_type": "pie", + "title": "Cost share by resource type", + "xlabel": "Resource Type", + "ylabel": "Cost (USD)", + "filename": "chart.png", + "figsize": [7.0, 5.0], + "notes": [ + "Feed the list returned in run_data_query['rows'] directly, or supply the CSV from run_data_query['csv'].", + "Pick x_keys/y_keys from run_data_query['plot_hints']['recommended'] to ensure compatible chart input.", + "Pie charts require exactly one numeric y_key; stacked/grouped charts accept multiple." + ] + } + + def upload_cosmos_message(self, + conversation_id: str, + content: str) -> Dict[str, Any]: + """ + Upload a message to Azure Cosmos DB. + """ + try: + image_message_id = f"{conversation_id}_image_{int(time.time())}_{random.randint(1000,9999)}" + # Check if image data is too large for a single Cosmos document (2MB limit) + # Account for JSON overhead by using 1.5MB as the safe limit for base64 content + max_content_size = 1500000 # 1.5MB in bytes + + if len(content) > max_content_size: + debug_print(f"Large image detected ({len(content)} bytes), splitting across multiple documents") + + # Split the data URL into manageable chunks + if content.startswith('data:image/png;base64,'): + # Extract just the base64 part for splitting + data_url_prefix = 'data:image/png;base64,' + base64_content = content[len(data_url_prefix):] + debug_print(f"Extracted base64 content length: {len(base64_content)} bytes") + else: + # For regular URLs, store as-is (shouldn't happen with large content) + data_url_prefix = '' + base64_content = content + + # Calculate chunk size and number of chunks + chunk_size = max_content_size - len(data_url_prefix) - 200 # More room for JSON overhead + chunks = [base64_content[i:i+chunk_size] for i in range(0, len(base64_content), chunk_size)] + total_chunks = len(chunks) + + debug_print(f"Splitting into {total_chunks} chunks of max {chunk_size} bytes each") + for i, chunk in enumerate(chunks): + debug_print(f"Chunk {i} length: {len(chunk)} bytes") + + # Verify we can reassemble before storing + reassembled_test = data_url_prefix + ''.join(chunks) + if len(reassembled_test) == len(content): + debug_print(f"✅ Chunking verification passed - can reassemble to original size") + else: + debug_print(f"❌ Chunking verification failed - {len(reassembled_test)} vs {len(content)}") + + + # Create main image document with metadata + main_image_doc = { + 'id': image_message_id, + 'conversation_id': conversation_id, + 'role': 'image', + 'content': f"{data_url_prefix}{chunks[0]}", # First chunk with data URL prefix + 'prompt': '', + 'created_at': datetime.datetime.utcnow().isoformat(), + 'timestamp': datetime.datetime.utcnow().isoformat(), + 'model_deployment_name': 'azurebillingplugin', + 'metadata': { + 'is_chunked': True, + 'total_chunks': total_chunks, + 'chunk_index': 0, + 'original_size': len(content) + } + } + + # Create additional chunk documents + chunk_docs = [] + for i in range(1, total_chunks): + chunk_doc = { + 'id': f"{image_message_id}_chunk_{i}", + 'conversation_id': conversation_id, + 'role': 'image_chunk', + 'content': chunks[i], + 'parent_message_id': image_message_id, + 'created_at': datetime.datetime.utcnow().isoformat(), + 'timestamp': datetime.datetime.utcnow().isoformat(), + 'metadata': { + 'is_chunk': True, + 'chunk_index': i, + 'total_chunks': total_chunks, + 'parent_message_id': image_message_id + } + } + chunk_docs.append(chunk_doc) + + # Store all documents + debug_print(f"Storing main document with content length: {len(main_image_doc['content'])} bytes") + cosmos_messages_container.upsert_item(main_image_doc) + + for i, chunk_doc in enumerate(chunk_docs): + debug_print(f"Storing chunk {i+1} with content length: {len(chunk_doc['content'])} bytes") + cosmos_messages_container.upsert_item(chunk_doc) + + debug_print(f"Successfully stored image in {total_chunks} documents") + debug_print(f"Main doc content starts with: {main_image_doc['content'][:50]}...") + debug_print(f"Main doc content ends with: ...{main_image_doc['content'][-50:]}") + else: + # Small image - store normally in single document + debug_print(f"Small image ({len(content)} bytes), storing in single document") + + image_doc = { + 'id': image_message_id, + 'conversation_id': conversation_id, + 'role': 'image', + 'content': content, + 'prompt': "", + 'created_at': datetime.datetime.utcnow().isoformat(), + 'timestamp': datetime.datetime.utcnow().isoformat(), + 'model_deployment_name': "azurebillingplugin", + 'metadata': { + 'is_chunked': False, + 'original_size': len(content) + } + } + cosmos_messages_container.upsert_item(image_doc) + conversation_item = cosmos_conversations_container.read_item(item=conversation_id, partition_key=conversation_id) + conversation_item['last_updated'] = datetime.datetime.utcnow().isoformat() + cosmos_conversations_container.upsert_item(conversation_item) + #time.sleep(5) # sleep to allow the message to propogate and the front end to pick it up when receiving the agent response + except Exception as e: + print(f"[ABP] Error uploading image message to Cosmos DB: {str(e)}") + logging.error(f"[ABP] Error uploading image message to Cosmos DB: {str(e)}") diff --git a/application/community_customizations/actions/azure_billing_retriever/readme.md b/application/community_customizations/actions/azure_billing_retriever/readme.md new file mode 100644 index 00000000..982ec8ab --- /dev/null +++ b/application/community_customizations/actions/azure_billing_retriever/readme.md @@ -0,0 +1,52 @@ +**⚠️ NOT PRODUCTION READY — This action is a proof of concept.** + +# Azure Billing Action Instructions + +## Overview +The Azure Billing action is an experimental Semantic Kernel plugin that helps agents explore Azure Cost Management data, generate CSV outputs, and render server-side charts for conversational reporting. It stitches together Azure REST APIs, matplotlib rendering, and Cosmos DB persistence so prototype agents can investigate subscriptions, budgets, alerts, and forecasts without touching the production portal. It leverages message injection (direct cosmos_messages_container access) to store chart images as conversation artifacts in lieu of embedding binary data in chat responses. You will need to move the ```azure_billing_plugin.py``` to the [semantic-kernel-plugins](../../../single_app/semantic_kernel_plugins/) folder, and move the ```schema.json``` and ```definition.json``` to the [schemas](../../../single_app/static/json/schemas) folder. + +## Core capabilities +- Enumerate subscriptions and resource groups via `list_subscriptions*` helpers for quick scope discovery. +- Query budgets, alerts, and forecast data with Cost Management APIs, returning flattened CSV for low-token conversations. +- Execute fully custom `run_data_query(...)` calls that enforce ISO-8601 time windows, aggregations, and groupings while emitting plot hints. +- Generate Matplotlib charts (`pie`, `column_stacked`, `column_grouped`, `line`, `area`) through `plot_chart` / `plot_custom_chart`, storing PNGs in Cosmos DB per conversation. +- Offer helper endpoints (`get_query_configuration_options`, `get_query_columns`, `get_aggregatable_columns`, `get_run_data_query_format`, `get_plot_chart_format`) so agents can self-discover valid parameters. + +## Architecture highlights +- **Plugin class**: `AzureBillingPlugin` (see `azure_billing_plugin.py`) inherits from `BasePlugin`, exposing annotated `@kernel_function`s for the agent runtime. +- **Authentication**: supports user impersonation (via `get_valid_access_token_for_plugins`) and service principals defined in the plugin manifest; automatically selects the right AAD authority per cloud. +- **Data rendering**: CSV assembly uses in-memory writers, while charts are produced with matplotlib, encoded as base64 data URLs, and persisted to Cosmos DB for later retrieval. +- **Sample assets**: `sample_pie.csv`, `sample_stacked_column.csv`, and `my_chart.png` demonstrate expected data formats and outputs for local experimentation. + +## Authentication & configuration +1. Provide a plugin manifest with `endpoint`, `auth` (user or service principal), and optional `metadata/additionalFields` such as `apiVersion` (defaults to `2023-03-01`). +2. Grant `user_impersonation` permission on the **Azure Service Management** resource (`40a69793-8fe6-4db1-9591-dbc5c57b17d8`) when testing user authentication. +3. For sovereign clouds, set the management endpoint (e.g., `https://management.usgovcloudapi.net`) so the plugin can resolve the matching AAD authority. + +## Typical workflow +1. **Discover scope**: call `list_subscriptions_and_resourcegroups()` or `list_subscriptions()` followed by `list_resource_groups(subscription_id)`. +2. **Inspect available dimensions**: use `get_query_configuration_options()` plus `get_grouping_dimensions()` to learn valid aggregations and groupings. +3. **Fetch data**: invoke `run_data_query(...)` with explicit `start_datetime`, `end_datetime`, at least one aggregation, and one grouping. The response includes `csv`, column metadata, and `plot_hints`. +4. **Visualize**: immediately pass the returned rows or CSV into `plot_chart(...)`, selecting `x_keys`, `y_keys`, and `graph_type` from `plot_hints`. Include the same `conversation_id` so the base64 PNG is attached to the chat transcript in Cosmos DB. +5. **Iterate**: explore budgets with `get_budgets`, monitor alerts via `get_alerts` / `get_specific_alert`, or generate multi-month forecasts through `get_forecast`. + +## Charting guidance +- Supported graph types: `pie`, `column_stacked`, `column_grouped`, `line`, `area`. +- `plot_chart` is a convenience wrapper that forwards to `plot_custom_chart`; both sanitize figure sizes, wrap long titles, and annotate stacked totals. +- `suggest_plot_config` can analyze arbitrary CSV/rows to recommend labels and numeric fields when the Cost Management query did not originate from this plugin. + +## Outputs & persistence +- Tabular results are returned as CSV strings to minimize token usage while keeping schemas explicit. +- Chart payloads include metadata (axes, graph type, figure size) plus a `data:image/png;base64` URL; when `conversation_id` is supplied the image is chunked/stored inside `cosmos_messages_container` with retry-friendly metadata. +- The agent should describe generated charts textually to users; binary content is delivered through the persisted conversation artifacts. + +## Limitations & cautions +- No throttling, retry, or quota management has been hardened—expect occasional failures from Cost Management when running multiple heavy queries. +- Error handling is best-effort: the plugin attempts to normalize enums, dates, and aggregations but may still raise when inputs are malformed. +- Cosmos DB storage assumes the surrounding SimpleChat environment; using the plugin outside that context requires replacing the persistence hooks. +- Security hardening (secret rotation, granular RBAC validation, zero-trust networking) has **not** been completed; do not expose this plugin to production tenants or sensitive billing data without additional review. + +## Additional resources +- Review `instructions.md` in the same directory for the autonomous agent persona tailored to this action. +- Leverage the sample CSV files to validate plotting offline before wiring the plugin into a notebook or agent loop. + diff --git a/application/community_customizations/actions/databricks_mag/Dockerfile b/application/community_customizations/actions/databricks_mag/Dockerfile new file mode 100644 index 00000000..0d982c48 --- /dev/null +++ b/application/community_customizations/actions/databricks_mag/Dockerfile @@ -0,0 +1,125 @@ +# Stage 1: System dependencies and ODBC driver install +ARG PYTHON_VERSION_ARG="3.12" +FROM python:3.12 AS builder + +ARG PYTHON_VERSION_ARG +ARG DRIVER_MAJOR_VERSION="2.9.2" +ARG DRIVER_MINOR_VERSION=1008 +ARG BUCKET_URI="https://databricks-bi-artifacts.s3.us-east-2.amazonaws.com/simbaspark-drivers/odbc" + +ENV PYTHONIOENCODING=utf-8 +ENV LANG=C.UTF-8 +ENV LC_ALL=C.UTF-8 +ENV DRIVER_FULL_VERSION=${DRIVER_MAJOR_VERSION}.${DRIVER_MINOR_VERSION} +ENV FOLDER_NAME=SimbaSparkODBC-${DRIVER_FULL_VERSION}-Debian-64bit +ENV ZIP_FILE_NAME=${FOLDER_NAME}.zip + +WORKDIR /deps + +RUN apt-get update && apt-get install -y unixodbc unixodbc-dev wget unzip libsasl2-modules-gssapi-mit +# "https://databricks-bi-artifacts.s3.us-east-2.amazonaws.com/simbaspark-drivers/odbc/2.9.2/SimbaSparkODBC-2.9.2.1008-Debian-64bit.zip" +RUN wget -O /tmp/simbaspark.zip ${BUCKET_URI}/${DRIVER_MAJOR_VERSION}/${ZIP_FILE_NAME} \ + && unzip /tmp/simbaspark.zip -d /tmp/simbaspark && rm /tmp/simbaspark.zip + +RUN dpkg -i /tmp/simbaspark/SimbaSparkODBC-2.9.2.1008-Debian-64bit/simbaspark_2.9.2.1008-2_amd64.deb + +USER root +RUN groupadd -g 65532 nonroot && useradd -m -u 65532 -g nonroot nonroot +RUN python -m venv /app/venv +RUN pip install pyodbc \ + && pip install wheel \ + && pip wheel pyodbc -w /tmp/pyodbc-wheel + +#RUN find / -name "*odbc*" || true +RUN find / -name "*python${PYTHON_VERSION_ARG}*" || true + +WORKDIR /app +# Copy requirements and install them into the virtualenv +ENV PATH="/app/venv/bin:$PATH" +COPY requirements.txt . +RUN pip install /tmp/pyodbc-wheel/pyodbc-*.whl \ + && pip install --no-cache-dir -r requirements.txt + +# Fix permissions so nonroot can use everything +RUN chown -R 65532:65532 /app + +RUN echo "[Simba Spark ODBC Driver]\nDescription=Simba Spark ODBC Driver\nDriver=/opt/simba/spark/lib/64/libsparkodbc_sb64.so" > /etc/odbcinst.ini +RUN echo "[ODBC Data Sources]\nSimba Spark ODBC DSN=Simba Spark ODBC Driver" > /etc/odbc.ini +RUN find / -type f -name '*odbc*' || true +RUN find /etc/ -type f -name '*odbc*' -exec echo "Contents of {}:" \; -exec cat {} \; || true + +RUN echo "PATH contents:" && echo $PATH | tr ':' '\n' \ + && echo "LD_LIBRARY_PATH contents:" && echo $LD_LIBRARY_PATH | tr ':' '\n' + +RUN mkdir -p /app/flask_session && chown -R 65532:65532 /app/flask_session +RUN mkdir /sc-temp-files +RUN cat /opt/simba/spark/Setup/odbc.ini +RUN cat /opt/simba/spark/Setup/odbcinst.ini +USER 65532:65532 + +#Stage 3: Final containter +FROM gcr.io/distroless/python3:latest +ARG PYTHON_VERSION_ARG +WORKDIR /app +USER root +ENV PYTHONIOENCODING=utf-8 +ENV LANG=C.UTF-8 +ENV LC_ALL=C.UTF-8 +ENV PYTHONUNBUFFERED=1 +ENV PATH="/app/venv/bin:/usr/local/bin:$PATH" +ENV LD_LIBRARY_PATH="/opt/simba/spark/lib/64:/usr/local/lib:/lib/x86_64-linux-gnu:${LD_LIBRARY_PATH:-}" +#Copy 3.12 from Base +COPY --from=builder /usr/local/lib/python${PYTHON_VERSION_ARG} /usr/local/lib/python${PYTHON_VERSION_ARG} +COPY --from=builder \ + /usr/local/lib/libpython3.12.so \ + /usr/local/lib/libpython3.12.so.1.0 \ + /usr/local/lib/libpython3.so \ + /usr/local/lib/pkgconfig \ + /usr/local/lib/python3.12 \ + /usr/local/lib/python3.13 \ + /usr/local/lib/ +# Copy the Python interpreter with a specific name +COPY --from=builder /usr/local/bin/python${PYTHON_VERSION_ARG} /usr/local/bin/python${PYTHON_VERSION_ARG} +# Add all common Python 3.12 entrypoints for compatibility +COPY --from=builder \ + /usr/local/bin/python \ + /usr/local/bin/python3 \ + /usr/local/bin/python${PYTHON_VERSION_ARG} \ + /usr/local/bin/ + +# Copy system libraries for x86_64 +COPY --from=builder /lib/x86_64-linux-gnu/ /lib/x86_64-linux-gnu/ + +# Copy ODBC from deps build +COPY --from=builder /usr/include /usr/include +COPY --from=builder /opt/simba /opt/simba +COPY --from=builder \ + /etc/odbc.ini \ + /etc/odbcinst.ini \ + /etc/ +COPY --from=builder \ + /usr/lib/x86_64-linux-gnu/libodbc.so \ + /usr/lib/x86_64-linux-gnu/libodbc.so.2 \ + /usr/lib/x86_64-linux-gnu/libodbc.so.2.0.0 \ + /usr/lib/x86_64-linux-gnu/libodbcinst.so \ + /usr/lib/x86_64-linux-gnu/libodbcinst.so.2 \ + /usr/lib/x86_64-linux-gnu/libodbcinst.so.2.0.0 \ + /usr/lib/x86_64-linux-gnu/libodbccr.so \ + /usr/lib/x86_64-linux-gnu/libodbccr.so.2 \ + /usr/lib/x86_64-linux-gnu/libodbccr.so.2.0.0 \ + /usr/lib/x86_64-linux-gnu/ + +# Copy application code and set ownership +COPY --chown=65532:65532 . ./ + +# Copy the virtualenv from the builder stage +COPY --from=builder --chown=65532:65532 /app/venv /app/venv +COPY --from=builder --chown=65532:65532 /app/flask_session /app/flask_session +COPY --from=builder --chown=65532:65532 /sc-temp-files /sc-temp-files + +# Expose port +EXPOSE 5000 + +USER 65532:65532 + +ENTRYPOINT ["/app/venv/bin/python", "-c", "import sys, runpy; print('Executable:', sys.executable); print('Version:', sys.version); runpy.run_path('/app/app.py', run_name='__main__')"] \ No newline at end of file diff --git a/application/community_customizations/actions/databricks_mag/databricks_table_plugin.additional_settings.schema.json b/application/community_customizations/actions/databricks_mag/databricks_table_plugin.additional_settings.schema.json new file mode 100644 index 00000000..7dcc6b2a --- /dev/null +++ b/application/community_customizations/actions/databricks_mag/databricks_table_plugin.additional_settings.schema.json @@ -0,0 +1,41 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Databricks Table Plugin Additional Settings", + "type": "object", + "properties": { + "warehouse_id": { + "type": "string", + "description": "Databricks SQL Warehouse ID (string, required)" + }, + "httpPath": { + "type": "string", + "description": "Databricks SQL Warehouse HTTP Path (string, required)" + }, + "port": { + "type": "integer", + "description": "Port for Databricks ODBC connection (default 443)", + "default": 443 + }, + "database": { + "type": "string", + "description": "Default database/schema to use (optional)" + }, + "table_name": { + "type": "string", + "description": "Name of the hive table that represents the 'global catalog'" + }, + "query_history": { + "type": "array", + "items": { + "type": "object", + "properties": { + "query": { + "type": "string" + } + } + } + } + }, + "required": ["warehouse_id", "httpPath"], + "additionalProperties": false +} diff --git a/application/community_customizations/actions/databricks_mag/databricks_table_plugin.py b/application/community_customizations/actions/databricks_mag/databricks_table_plugin.py new file mode 100644 index 00000000..7fa7ec89 --- /dev/null +++ b/application/community_customizations/actions/databricks_mag/databricks_table_plugin.py @@ -0,0 +1,258 @@ +# databricks_table_plugin.py +""" +Databricks Table Plugin for Semantic Kernel +- Dynamically created per table manifest +- Executes parameterized SQL via Databricks REST API +""" + +import requests +import logging +import pyodbc +import re +import sqlglot +from semantic_kernel_plugins.base_plugin import BasePlugin +from typing import Annotated, List, Optional, Required +from functions_appinsights import log_event +from semantic_kernel.functions import kernel_function + +class DatabricksTablePlugin(BasePlugin): + def __init__(self, manifest): + self.manifest = manifest + self.authtype = manifest.get('auth', {}).get('type', 'key') + self.endpoint = manifest['endpoint'] + self.key = manifest.get('auth', {}).get('key', None) + self.identity = manifest.get('auth', {}).get('identity', None) + self.client_id = manifest.get('auth', {}).get('identity', None) + self.client_secret = manifest.get('auth', {}).get('key', None) + self.tenant_id = manifest.get('auth', {}).get('tenantId', None) + self._metadata = manifest['metadata'] + self.warehouse_id = manifest['additionalFields'].get('warehouse_id', '') + self.table_name = manifest['additionalFields'].get('table_name', '') + self.port = manifest['additionalFields'].get('port', 443) + self.http_path = manifest['additionalFields'].get('httpPath', '') + + def _get_azure_ad_token(self): + """Acquire Azure AD token for Databricks using Service Principal credentials, supporting Commercial and MAG.""" + # Determine the correct login endpoint and scope based on the Databricks endpoint + if ".azure.us" in self.endpoint or ".us/" in self.endpoint: + login_host = "login.microsoftonline.us" + scope = "https://databricks.azure.us/.default" + else: + login_host = "login.microsoftonline.com" + scope = "https://databricks.azure.net/.default" + url = f"https://{login_host}/{self.tenant_id}/oauth2/v2.0/token" + data = { + "grant_type": "client_credentials", + "client_id": self.client_id, + "client_secret": self.client_secret, + "scope": scope + } + resp = requests.post(url, data=data) + resp.raise_for_status() + return resp.json()["access_token"] + + def _get_databricks_token(self): + if ".azure.us" in self.endpoint or ".us/" in self.endpoint: + login_host = "login.microsoftonline.us" + scope = "2ff814a6-3304-4ab8-85cb-cd0e6f879c1d/.default" + else: + login_host = "login.microsoftonline.com" + scope = "2ff814a6-3304-4ab8-85cb-cd0e6f879c1d/.default" + url = f"https://{login_host}/{self.tenant_id}/oauth2/v2.0/token" + data = { + "grant_type": "client_credentials", + "client_id": self.client_id, + "client_secret": self.client_secret, + "scope": scope + } + headers = {"Content-Type": "application/x-www-form-urlencoded"} + resp = requests.post(url, data=data, headers=headers) + resp.raise_for_status() + print(f"[DBP] Received Databricks token response") + return resp.json()["access_token"] + + def _get_pyodbc_connection(self, additional_fields: dict = None): + """ + Create and return a DSN-less pyodbc connection to Databricks using parameters from the manifest and additional_fields. + Supports only Personal Access Token (PAT) authentication for now. + Args: + additional_fields (dict, optional): Additional connection parameters to override manifest values. + Returns: + pyodbc.Connection: An open pyodbc connection to Databricks. + Raises: + ValueError: If required fields are missing or authentication is not supported. + pyodbc.Error: If connection fails. + """ + # Merge manifest and additional_fields + fields = dict(self.manifest.get('additionalFields', {})) + if additional_fields: + fields.update(additional_fields) + + if not (self.warehouse_id and self.http_path and self.key and self.endpoint): + raise ValueError("Missing required ODBC connection parameters: warehouse_id, httpPath, endpoint, or PAT (key)") + + # Parse hostname from endpoint (strip protocol and path) + match = re.match(r"https?://([^/]+)", self.endpoint) + if not match: + raise ValueError(f"Invalid endpoint URL: {self.endpoint}") + host = match.group(1) + conn_str = None + # Only support PAT for now + if self.identity and self.identity.lower() == "managedIdentity": + raise NotImplementedError("Managed Identity authentication is not yet supported for ODBC.") + + # Build ODBC connection string + if self.authtype == "key": + print("[DBP] Using Personal Access Token Auth") + conn_str = "Driver={Simba Spark ODBC Driver};" + \ + f"Host={host};" + \ + f"Port={self.port};" + \ + f"HTTPPath={self.http_path};" + \ + "AuthMech=3;" + \ + "UID=token;" + \ + f"PWD={self.key};" + \ + "SSL=1;" + \ + "SSLVersion=TLSv1.2;" + \ + "ThriftTransport=2;" + \ + "Database=default;" + \ + "SparkServerType=3;" + + if self.authtype == "servicePrincipal": + print("[DBP] Using Service Principal Auth") + #access_token = self._get_azure_ad_token() + access_token = self._get_databricks_token() + conn_str = "Driver={Simba Spark ODBC Driver};" + \ + f"Host={host};" + \ + f"Port={self.port};" + \ + f"HTTPPath={self.http_path};" + \ + f"Auth_AccessToken={access_token};" + \ + "AuthMech=11;" + \ + "Auth_Flow=0;" + \ + "SSL=1;" + \ + "SSLVersion=TLSv1.2;" + \ + "ThriftTransport=2;" + \ + "Database=default;" + \ + "SparkServerType=3;" + + if conn_str is None: + print(f"[DBP] Unsupported auth type for ODBC: {self.authtype}") + raise ValueError(f"Unsupported authentication type for ODBC: {self.authtype}") + + try: + conn = pyodbc.connect(conn_str, autocommit=True) + print("[DBP] Successfully connected to Databricks via ODBC") + return conn + except Exception as ex: + logging.error(f"Failed to connect to Databricks ODBC: {ex}") + raise + + @property + def metadata(self): + # Compose a detailed description for the LLM and Semantic Kernel + user_desc = self._metadata.get("description", f"Databricks table plugin (table name required, columns optional)") + api_desc = ( + "This plugin executes SQL statements against Azure Databricks using the Statement Execution API. " + "It sends a POST request to the Databricks SQL endpoint provided in the manifest (e.g., 'https:///api/2.0/sql/statements'). " + "Authentication is via a Databricks personal access token or Azure AD token (for Service Principal), passed as a Bearer token in the 'Authorization' header. " + "The request body is JSON and must include: " + "'statement': the SQL query string to execute, and 'warehouse_id': the ID of the Databricks SQL warehouse to use. " + "Optional filters can be provided as keyword arguments and are converted into a SQL WHERE clause. " + "The plugin constructs the SQL statement based on the provided columns (optional), table_name (required), and filters, then submits it to Databricks. " + "If columns is not provided, all columns will be selected (SELECT *). " + "The response is the result of the SQL query, returned as JSON. " + "For more details, see: https://docs.databricks.com/api/azure/workspace/statementexecution/executestatement\n\n" + "Configuration: The plugin is configured with the Databricks API endpoint (from the manifest), access token or service principal credentials, warehouse_id via the plugin manifest. " + "The manifest should provide: 'endpoint', 'auth.key' or service principal fields, and 'additionalFields.warehouse_id'. " + "Example request body: { 'statement': 'SELECT * FROM my_table WHERE id = 1', 'warehouse_id': '' }. " + "The plugin handles parameterization and SQL construction automatically.\n\n" + "NOTE: The table name is required, columns is optional for the query_table function." + ) + full_desc = f"{user_desc}\n\n{api_desc}" + return { + "name": self._metadata.get("name", "databricks_table_plugin"), + "type": "databricks_table", + "description": full_desc, + "methods": [ + { + "name": "query_table", + "description": "Query the Databricks table using parameterized SQL. Table name is required, columns is optional. Filters can be applied as keyword arguments.", + "parameters": [ + {"name": "table_name", "type": "str", "description": "Name of the table to query", "required": True}, + {"name": "columns", "type": "List[str]", "description": "Columns to select (optional, selects all if not provided)", "required": False}, + {"name": "warehouse_id", "type": "str", "description": "Databricks warehouse ID", "required": False}, + {"name": "filters", "type": "dict", "description": "Additional filters as column=value pairs", "required": False} + ], + "returns": {"type": "dict", "description": "The query result as a dictionary (Databricks SQL API response)."} + } + ] + } + + def get_functions(self): + return ["query_table"] + + @kernel_function( + description=""" + Query the Databricks table using parameterized SQL. Table name is required and should be databasename.tablename format. + Only read-only queries (SELECT, SHOW, DESCRIBE, EXPLAIN) are allowed. + Returns the query result as a list of dictionaries, or an error result if the query is not allowed or fails. + """, + name="query_table", + ) + async def query_table( + self, + query: str, + ) -> dict: + # Only allow read-only queries + try: + statements = sqlglot.parse(query) + for stmt in statements: + if stmt.key.upper() not in ("SELECT", "SHOW", "DESCRIBE", "EXPLAIN"): + return { + "error": True, + "message": f"Only read-only queries (SELECT, SHOW, DESCRIBE, EXPLAIN) are allowed. Found: {stmt.key}", + "query": query, + "result": [] + } + conn = self._get_pyodbc_connection() + cursor = conn.cursor() + print(f"[DBP] Executing SQL: {query}") + cursor.execute(query) + print(f"[DBP] Executed successfully: {query}") + # JSON format + """ + columns = [col[0] for col in cursor.description] + rows = cursor.fetchall() + result = [dict(zip(columns, row)) for row in rows] + """ + #CSV format for data compression + columns = [col[0] for col in cursor.description] + rows = cursor.fetchall() + csv_lines = [",".join(columns)] + for row in rows: + csv_row = [str(val).replace('"', '""') for val in row] + csv_lines.append(",".join(f'"{v}"' for v in csv_row)) + + result = "\n".join(csv_lines) + + cursor.close() + conn.close() + # Estimate token count (approximate: 1 token ≈ 4 characters) + result_str = str(result) + char_count = len(result_str) + approx_tokens = char_count // 4 + print(f"[DBP] Queried {len(result)} rows from query | {char_count} chars ≈ {approx_tokens} tokens") + return { + "error": False, + "message": "Success", + "query": query, + "result": result + } + except Exception as ex: + logging.error(f"Failed to run query {query}: {ex}") + print(f"[DBP] Failed to run query: {query}\n {ex}") + return { + "error": True, + "message": f"Error: {ex}", + "query": query, + "result": [] + } diff --git a/application/community_customizations/actions/databricks_mag/readme.md b/application/community_customizations/actions/databricks_mag/readme.md new file mode 100644 index 00000000..e69de29b diff --git a/application/community_customizations/kusto_queries/default_token_consumption.kql b/application/community_customizations/kusto_queries/default_token_consumption.kql new file mode 100644 index 00000000..010f7b6f --- /dev/null +++ b/application/community_customizations/kusto_queries/default_token_consumption.kql @@ -0,0 +1,30 @@ +let base = + AppTraces + | where Message startswith "[tokens]" + | extend + user_id = tostring(Properties.user_id), + active_group_id = tostring(Properties.active_group_id), + doc_scope = tostring(Properties.doc_scope), + total_tokens = toint(Properties.total_tokens), + prompt_tokens = toint(Properties.prompt_tokens), + completion_tokens = toint(Properties.completion_tokens); +let per_group = + base + | summarize + sum_total_tokens = sum(total_tokens), + sum_prompt_tokens = sum(prompt_tokens), + sum_completion_tokens = sum(completion_tokens) + by user_id, active_group_id, doc_scope + | extend total = sum_total_tokens; +per_group +| union ( + per_group + | summarize + user_id = "ALL_USERS", + active_group_id = "ALL_GROUPS", + doc_scope = "ALL_DOCS", + sum_total_tokens = sum(sum_total_tokens), + sum_prompt_tokens = sum(sum_prompt_tokens), + sum_completion_tokens = sum(sum_completion_tokens), + total = sum(total) +) \ No newline at end of file diff --git a/application/single_app/.gitignore b/application/single_app/.gitignore index cb44490a..803cb917 100644 --- a/application/single_app/.gitignore +++ b/application/single_app/.gitignore @@ -194,4 +194,4 @@ cython_debug/ # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data # refer to https://docs.cursor.com/context/ignore-files .cursorignore -.cursorindexingignore \ No newline at end of file +.cursorindexingignore diff --git a/application/single_app/Dockerfile b/application/single_app/Dockerfile index 70b8ec9a..65483ac6 100644 --- a/application/single_app/Dockerfile +++ b/application/single_app/Dockerfile @@ -1,32 +1,72 @@ -# Builder stage: install dependencies in a virtualenv -FROM cgr.dev/chainguard/python:latest-dev AS builder +# Create nonroot user/group with a stable UID/GID (choose values consistent with your org) +ARG UID=65532 +ARG GID=65532 -WORKDIR /app +FROM mcr.microsoft.com/azurelinux/base/python:3.12 AS builder + +ARG UID +ARG GID + +# Setup pip.conf if has content +COPY pip.conf.d/ /etc/pip.conf.d + +# CA +# copy certs to /etc/pki/ca-trust/source/anchors +COPY custom-ca-certificates/ /etc/ssl/certs +RUN mkdir -p /etc/pki/ca-trust/source/anchors/ \ + && update-ca-trust enable \ + && cp /etc/ssl/certs/*.crt /etc/pki/ca-trust/source/anchors/ \ + && update-ca-trust extract -# Create a Python virtual environment -RUN python -m venv /app/venv +ENV PYTHONUNBUFFERED=1 -# Copy requirements and install them into the virtualenv -COPY application/single_app/requirements.txt . -ENV PATH="/app/venv/bin:$PATH" -RUN pip install --no-cache-dir -r requirements.txt +RUN set -eux; \ + echo "nonroot:x:${GID}:" >> /etc/group; \ + echo "nonroot:x:${UID}:${GID}:nonroot:/home/nonroot:/bin/bash" >> /etc/passwd; \ + mkdir -p /home/nonroot; \ + chown ${UID}:${GID} /home/nonroot; \ + mkdir -p /app; \ + chown ${UID}:${GID} /app; \ + chmod 744 /app -FROM cgr.dev/chainguard/python:latest +RUN mkdir -p /app/flask_session && chown -R ${UID}:${GID} /app/flask_session +RUN mkdir /sc-temp-files && chown -R ${UID}:${GID} /sc-temp-files WORKDIR /app -ENV PYTHONUNBUFFERED=1 -ENV PATH="/app/venv/bin:$PATH" +# Copy requirements and install them to system +COPY --chown=${UID}:${GID} application/single_app/requirements.txt . +RUN python3 -m pip install --no-cache-dir -r requirements.txt -# Copy application code and set ownership -COPY --chown=nonroot:nonroot application/single_app ./ +FROM mcr.microsoft.com/azurelinux/distroless/python:3.12 + +ARG UID +ARG GID -# Copy the virtualenv from the builder stage -COPY --from=builder --chown=nonroot:nonroot /app/venv /app/venv +COPY --from=builder /etc/pki /etc/pki +COPY --from=builder /home/nonroot /home/nonroot +COPY --from=builder /etc/passwd /etc/passwd +COPY --from=builder /etc/group /etc/group +COPY --from=builder /usr/lib/python3.12 /usr/lib/python3.12 + +USER ${UID}:${GID} + +COPY --from=builder --chown=${UID}:${GID} /app /app +COPY --from=builder --chown=${UID}:${GID} /sc-temp-files /sc-temp-files + +ENV HOME=/home/nonroot \ + PATH="/home/nonroot/.local/bin:$PATH" \ + PYTHONIOENCODING=utf-8 \ + LANG=C.UTF-8 \ + LC_ALL=C.UTF-8 \ + PYTHONUNBUFFERED=1 + +WORKDIR /app + +# Copy application code and set ownership +COPY --chown=${UID}:${GID} application/single_app ./ # Expose port EXPOSE 5000 -USER nonroot:nonroot - -ENTRYPOINT [ "python", "/app/app.py" ] +ENTRYPOINT [ "python3", "/app/app.py" ] diff --git a/application/single_app/__init__.py b/application/single_app/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/application/single_app/agent_logging_chat_completion.py b/application/single_app/agent_logging_chat_completion.py index 1e1ae3ce..e4173ef2 100644 --- a/application/single_app/agent_logging_chat_completion.py +++ b/application/single_app/agent_logging_chat_completion.py @@ -144,13 +144,6 @@ async def invoke(self, *args, **kwargs): } ) - log_event("[Logging Agent Request] Agent invoke started", - extra={ - "agent": self.name, - "prompt_preview": [m.content[:30] for m in args[0]] if args else None - }, - level=logging.DEBUG) - # Store user question context for better tool detection if args and args[0] and hasattr(args[0][-1], 'content'): self._user_question = args[0][-1].content @@ -163,12 +156,14 @@ async def invoke(self, *args, **kwargs): initial_message_count = len(args[0]) if args and args[0] else 0 result = super().invoke(*args, **kwargs) - log_event("[Logging Agent Request] Result received", - extra={ - "agent": self.name, - "result_type": type(result).__name__ - }, - level=logging.DEBUG) + log_event( + "[Logging Agent Request] Result received", + extra={ + "agent": self.name, + "result_type": type(result).__name__ + }, + level=logging.DEBUG + ) if hasattr(result, "__aiter__"): # Streaming/async generator response @@ -180,13 +175,15 @@ async def invoke(self, *args, **kwargs): # Regular coroutine response response = await result - log_event("[Logging Agent Request] Response received", - extra={ - "agent": self.name, - "response_type": type(response).__name__, - "response_preview": str(response)[:100] if response else None - }, - level=logging.DEBUG) + log_event( + "[Logging Agent Request] Response received", + extra={ + "agent": self.name, + "response_type": type(response).__name__, + "response_preview": str(response)[:100] if response else None + }, + level=logging.DEBUG + ) # Store the response for analysis self._last_response = response diff --git a/application/single_app/agent_logging_chat_completion_backup.py b/application/single_app/agent_logging_chat_completion_backup.py deleted file mode 100644 index 96afacf8..00000000 --- a/application/single_app/agent_logging_chat_completion_backup.py +++ /dev/null @@ -1,481 +0,0 @@ - - -import json -from pydantic import Field -from semantic_kernel.agents import ChatCompletionAgent -from functions_appinsights import log_event -import datetime -import re - - -class LoggingChatCompletionAgent(ChatCompletionAgent): - display_name: str | None = Field(default=None) - default_agent: bool = Field(default=False) - tool_invocations: list = Field(default_factory=list) - - def __init__(self, *args, display_name=None, default_agent=False, **kwargs): - # Remove these from kwargs so the base class doesn't see them - kwargs.pop('display_name', None) - kwargs.pop('default_agent', None) - super().__init__(*args, **kwargs) - self.display_name = display_name - self.default_agent = default_agent - # tool_invocations is now properly declared as a Pydantic field - - def log_tool_execution(self, tool_name, arguments=None, result=None): - """Manual method to log tool executions. Can be called by plugins.""" - tool_citation = { - "tool_name": tool_name, - "function_arguments": str(arguments) if arguments else "", - "function_result": str(result)[:500] if result else "", - "timestamp": datetime.datetime.utcnow().isoformat() - } - self.tool_invocations.append(tool_citation) - log_event( - f"[Agent Citations] Tool execution logged: {tool_name}", - extra={ - "agent": self.name, - "tool_name": tool_name, - "result_length": len(str(result)) if result else 0 - } - ) - - def patch_plugin_methods(self): - """ - DISABLED: Plugin method patching to prevent duplication. - Plugin logging is now handled by the @plugin_function_logger decorator system. - Citations are extracted from the plugin invocation logger in route_backend_chats.py. - """ - print(f"[Agent Logging] Skipping plugin method patching - using plugin invocation logger instead") - pass - - def infer_sql_query_from_context(self, user_question, response_content): - """Infer the likely SQL query based on user question and response.""" - if not user_question or not response_content: - return None, None - - user_q = user_question.lower() - response = response_content.lower() - - # Pattern matching for common query types - if any(phrase in user_q for phrase in ['most played', 'most popular', 'played the most', 'highest number']): - if 'craps crazy' in response and '422' in response: - return ( - "SELECT GameName, COUNT(*) as PlayCount FROM CasinoGameInteractions GROUP BY GameName ORDER BY PlayCount DESC LIMIT 1", - "Query returned: GameName='Craps Crazy', PlayCount=422 (most played game in the database)" - ) - else: - return ( - "SELECT GameName, COUNT(*) as PlayCount FROM CasinoGameInteractions GROUP BY GameName ORDER BY PlayCount DESC", - f"Executed aggregation query to find most played games. Result: {response_content[:100]}" - ) - - elif any(phrase in user_q for phrase in ['least played', 'least popular', 'played the least']): - return ( - "SELECT GameName, COUNT(*) as PlayCount FROM CasinoGameInteractions GROUP BY GameName ORDER BY PlayCount ASC LIMIT 1", - f"Query to find least played game. Result: {response_content[:100]}" - ) - - elif any(phrase in user_q for phrase in ['total', 'count', 'how many']): - if 'game' in user_q: - return ( - "SELECT COUNT(DISTINCT GameName) as TotalGames FROM CasinoGameInteractions", - f"Count query executed. Result: {response_content[:100]}" - ) - else: - return ( - "SELECT COUNT(*) as TotalInteractions FROM CasinoGameInteractions", - f"Count query executed. Result: {response_content[:100]}" - ) - - elif any(phrase in user_q for phrase in ['average', 'mean']): - if any(word in user_q for word in ['bet', 'wager']): - return ( - "SELECT AVG(BetAmount) as AvgBet FROM CasinoGameInteractions WHERE BetAmount IS NOT NULL", - f"Average bet calculation. Result: {response_content[:100]}" - ) - elif any(word in user_q for word in ['win', 'winning']): - return ( - "SELECT AVG(WinAmount) as AvgWin FROM CasinoGameInteractions WHERE WinAmount IS NOT NULL", - f"Average win calculation. Result: {response_content[:100]}" - ) - - elif any(phrase in user_q for phrase in ['list', 'show', 'what are']): - if 'game' in user_q: - return ( - "SELECT DISTINCT GameName FROM CasinoGameInteractions ORDER BY GameName", - f"List of games query. Result: {response_content[:150]}" - ) - - # Default fallback - return ( - "SELECT * FROM CasinoGameInteractions WHERE 1=1 /* query inferred from context */", - f"Executed query based on user question: '{user_question}'. Result: {response_content[:100]}" - ) - - def extract_tool_invocations_from_history(self, chat_history): - """Extract tool invocations from chat history for citations.""" - tool_citations = [] - - if not chat_history: - return tool_citations - - try: - # Iterate through chat history to find function calls and responses - for message in chat_history: - # Check if message has function calls in various formats - if hasattr(message, 'items') and message.items: - for item in message.items: - # Look for function call content (standard SK format) - if hasattr(item, 'function_name') and hasattr(item, 'function_result'): - tool_citation = { - "tool_name": item.function_name, - "function_arguments": str(getattr(item, 'arguments', {})), - "function_result": str(item.function_result)[:500], # Limit result size - "timestamp": datetime.datetime.utcnow().isoformat() - } - tool_citations.append(tool_citation) - # Alternative: Check for function call in content - elif hasattr(item, 'function_call'): - func_call = item.function_call - tool_citation = { - "tool_name": getattr(func_call, 'name', 'unknown'), - "function_arguments": str(getattr(func_call, 'arguments', {})), - "function_result": "Function called", - "timestamp": datetime.datetime.utcnow().isoformat() - } - tool_citations.append(tool_citation) - # Check for function result content type - elif hasattr(item, 'content_type') and item.content_type == 'function_result': - tool_citation = { - "tool_name": getattr(item, 'name', 'unknown_function'), - "function_arguments": "", - "function_result": str(getattr(item, 'text', ''))[:500], - "timestamp": datetime.datetime.utcnow().isoformat() - } - tool_citations.append(tool_citation) - - # Check for function calls in message metadata or inner content - if hasattr(message, 'metadata') and message.metadata: - # Look for function call metadata - for key, value in message.metadata.items(): - if 'function' in key.lower() or 'tool' in key.lower(): - tool_citation = { - "tool_name": f"metadata_{key}", - "function_arguments": "", - "function_result": str(value)[:500], - "timestamp": datetime.datetime.utcnow().isoformat() - } - tool_citations.append(tool_citation) - - # Check message role for tool/function messages - if hasattr(message, 'role') and hasattr(message, 'name'): - if message.role.value in ['tool', 'function']: - tool_citation = { - "tool_name": message.name or 'unknown_tool', - "function_arguments": "", - "function_result": str(getattr(message, 'content', ''))[:500], - "timestamp": datetime.datetime.utcnow().isoformat() - } - tool_citations.append(tool_citation) - - # Check for tool content in message content - if hasattr(message, 'content') and isinstance(message.content, str): - # Look for tool execution patterns in content - if "function_name:" in message.content or "tool_name:" in message.content: - # Extract tool information from content - tool_citation = { - "tool_name": "extracted_from_content", - "function_arguments": "", - "function_result": message.content[:500], - "timestamp": datetime.datetime.utcnow().isoformat() - } - tool_citations.append(tool_citation) - - except Exception as e: - log_event( - "[Agent Citations] Error extracting tool invocations from chat history", - extra={"agent": self.name, "error": str(e)}, - level="WARNING" - ) - - return tool_citations - - async def invoke(self, *args, **kwargs): - # Clear previous tool invocations - self.tool_invocations = [] - - # Log the prompt/messages before sending to LLM - log_event( - "[Logging Agent Request] Agent LLM prompt", - extra={ - "agent": self.name, - "prompt": [m.content[:30] for m in args[0]] if args else None - } - ) - - print(f"[Logging Agent Request] Agent: {self.name}") - print(f"[Logging Agent Request] Prompt: {[m.content[:30] for m in args[0]] if args else None}") - - # Store user question context for better tool detection - if args and args[0] and hasattr(args[0][-1], 'content'): - self._user_question = args[0][-1].content - elif args and args[0] and isinstance(args[0][-1], dict) and 'content' in args[0][-1]: - self._user_question = args[0][-1]['content'] - - # Apply patching to capture function calls - try: - self.patch_plugin_methods() - except Exception as e: - log_event(f"[Agent Citations] Error applying plugin patches: {e}", level="WARNING") - - response = None - try: - # Store initial message count to detect new messages from tool usage - initial_message_count = len(args[0]) if args and args[0] else 0 - result = super().invoke(*args, **kwargs) - - print(f"[Logging Agent Request] Result: {result}") - - if hasattr(result, "__aiter__"): - # Streaming/async generator response - response_chunks = [] - async for chunk in result: - response_chunks.append(chunk) - response = response_chunks[-1] if response_chunks else None - else: - # Regular coroutine response - response = await result - - print(f"[Logging Agent Request] Response: {response}") - - # Store the response for analysis - self._last_response = response - # Try to capture tool invocations from multiple sources - self._capture_tool_invocations_comprehensive(args, response, initial_message_count) - # Fallback: If no tool_invocations were captured, log the main plugin output as a citation - if not self.tool_invocations and response and hasattr(response, 'content'): - self.tool_invocations.append({ - "tool_name": getattr(self, 'name', 'All Citations'), - "function_arguments": str(args[-1]) if args else "", - "function_result": str(response.content)[:500], - "timestamp": datetime.datetime.utcnow().isoformat() - }) - return response - finally: - usage = getattr(response, "usage", None) - log_event( - "[Logging Agent Response][Usage] Agent LLM response", - extra={ - "agent": self.name, - "response": str(response)[:100] if response else None, - "prompt_tokens": getattr(usage, "prompt_tokens", None), - "completion_tokens": getattr(usage, "completion_tokens", None), - "total_tokens": getattr(usage, "total_tokens", None), - "usage": str(usage) if usage else None, - "tool_invocations_count": len(self.tool_invocations) - } - ) - - def _capture_tool_invocations_comprehensive(self, args, response, initial_message_count): - """ - SIMPLIFIED: Tool invocation capture for agent citations. - Most citation data now comes from the plugin invocation logger system. - This method only provides basic fallback logging for edge cases. - """ - try: - # Only capture basic response information as fallback - if response and hasattr(response, 'content') and response.content: - # Create a simple fallback citation if no plugin data is available - tool_citation = { - "tool_name": getattr(self, 'name', 'Agent Response'), - "function_arguments": str(args[-1]) if args else "", - "function_result": str(response.content)[:500], - "timestamp": datetime.datetime.utcnow().isoformat() - } - # Only add if we don't already have tool invocations - if not self.tool_invocations: - self.tool_invocations.append(tool_citation) - - log_event( - "[Agent Citations] Simplified tool capture completed", - extra={ - "agent": self.name, - "fallback_citations": len(self.tool_invocations), - "note": "Primary citations come from plugin invocation logger" - } - ) - - except Exception as e: - log_event( - "[Agent Citations] Error in simplified tool capture", - extra={"agent": self.name, "error": str(e)}, - level="WARNING" - ) - - def _extract_from_new_messages(self, new_messages): - """DISABLED: Extract tool invocations from newly added messages.""" - pass # Plugin invocation logger handles this now - - def _extract_from_kernel_state(self): - """DISABLED: Extract tool invocations from kernel execution state.""" - pass # Plugin invocation logger handles this now - - def _extract_from_response_content(self, content): - """DISABLED: Extract tool invocations from response content analysis.""" - pass # Plugin invocation logger handles this now - - def detect_sql_plugin_usage_from_logs(self): - """DISABLED: Enhanced SQL plugin detection.""" - pass # Plugin invocation logger handles this now - "function_result": "Retrieved database schema including table CasinoGameInteractions with 14 columns: InteractionID (bigint, PK), PlayerID (int), GameID (int), GameName (nvarchar), InteractionType (nvarchar), BetAmount (decimal), WinAmount (decimal), InteractionTimestamp (datetime2), MachineID (nvarchar), SessionDurationSeconds (int), MarketingTag (nvarchar), StaffInteraction (bit), Location (nvarchar), InsertedAt (datetime2)", - "timestamp": datetime.datetime.utcnow().isoformat() - }) - sql_tools_detected.append({ - "tool_name": "sqlquerytest", - "function_arguments": "query: 'SELECT * FROM INFORMATION_SCHEMA.TABLES' and related schema queries", - "function_result": "Executed database schema retrieval queries to identify table structures, primary keys, and column definitions. Found 1 primary table: CasinoGameInteractions", - "timestamp": datetime.datetime.utcnow().isoformat() - }) - - # Method 3: Check kernel plugin state for SQL execution - if hasattr(self, 'kernel') and self.kernel and hasattr(self.kernel, 'plugins'): - for plugin_name, plugin in self.kernel.plugins.items(): - if 'sql' in plugin_name.lower(): - # Check for execution state in the plugin - for plugin_attr in dir(plugin): - # Filter out internal Python/Pydantic attributes - if any(skip_pattern in plugin_attr for skip_pattern in [ - '__', '_abc_', '_fields', '_config', 'pydantic', 'model_', - 'schema_', 'json_', 'dict_', 'parse_', 'copy_', 'construct' - ]): - continue - - if any(keyword in plugin_attr.lower() for keyword in ['result', 'execution', 'last', 'data', 'query', 'schema']): - try: - plugin_value = getattr(plugin, plugin_attr) - if plugin_value and not callable(plugin_value) and str(plugin_value) not in ['', 'None', None]: - # Only capture meaningful data - value_str = str(plugin_value) - if len(value_str) > 10 and not value_str.startswith('{'): # Skip small/empty objects - tool_name = "sqlschematest" if "schema" in plugin_attr.lower() else "sqlquerytest" - sql_tools_detected.append({ - "tool_name": tool_name, - "function_arguments": f"captured_from: {plugin_attr}", - "function_result": value_str[:400], - "timestamp": datetime.datetime.utcnow().isoformat() - }) - except Exception: - continue - - # Method 4: If we don't have specific data but know SQL agent was used, create enhanced placeholders - if hasattr(self, 'name') and 'sql' in self.name.lower() and not sql_tools_detected: - # Enhanced placeholders with more realistic data - sql_tools_detected.extend([ - { - "tool_name": "sqlschematest", - "function_arguments": "include_system_tables: False, table_filter: None", - "function_result": "Retrieved database schema including table CasinoGameInteractions with 14 columns: InteractionID (bigint, PK), PlayerID (int), GameID (int), GameName (nvarchar), InteractionType (nvarchar), BetAmount (decimal), WinAmount (decimal), InteractionTimestamp (datetime2), MachineID (nvarchar), SessionDurationSeconds (int), MarketingTag (nvarchar), StaffInteraction (bit), Location (nvarchar), InsertedAt (datetime2)", - "timestamp": datetime.datetime.utcnow().isoformat() - }, - { - "tool_name": "sqlquerytest", - "function_arguments": "query: 'SELECT * FROM INFORMATION_SCHEMA.TABLES' and related schema queries", - "function_result": "Executed database schema retrieval queries to identify table structures, primary keys, and column definitions. Found 1 primary table: CasinoGameInteractions", - "timestamp": datetime.datetime.utcnow().isoformat() - } - ]) - - self.tool_invocations.extend(sql_tools_detected) - - if sql_tools_detected: - log_event( - f"[Agent Citations] Enhanced SQL detection found {len(sql_tools_detected)} tool executions", - extra={ - "agent": self.name, - "detected_tools": [t['tool_name'] for t in sql_tools_detected], - "has_actual_data": any('CasinoGameInteractions' in t.get('function_result', '') for t in sql_tools_detected) - } - ) - - def _extract_from_agent_attributes(self): - """Extract tool invocations from agent attributes and state.""" - # Check for any attributes that might indicate plugin execution - for attr_name in dir(self): - if 'plugin' in attr_name.lower() or 'function' in attr_name.lower(): - try: - attr_value = getattr(self, attr_name) - if callable(attr_value): - continue # Skip methods - - # If it's a list or dict that might contain execution info - if isinstance(attr_value, (list, dict)) and attr_value: - tool_citation = { - "tool_name": f"agent_attribute_{attr_name}", - "function_arguments": "", - "function_result": str(attr_value)[:200], - "timestamp": datetime.datetime.utcnow().isoformat() - } - self.tool_invocations.append(tool_citation) - except Exception: - continue # Skip attributes that can't be accessed - - def _extract_from_kernel_logs(self): - """Extract tool invocations from kernel execution logs and function call history.""" - try: - # Check if the kernel has any plugin execution history or logs - if hasattr(self, 'kernel') and self.kernel: - # Check for plugin execution state - if hasattr(self.kernel, 'plugins') and self.kernel.plugins: - for plugin_name, plugin in self.kernel.plugins.items(): - if hasattr(plugin, '_last_execution') or hasattr(plugin, 'execution_log'): - tool_citation = { - "tool_name": plugin_name, - "function_arguments": "", - "function_result": f"Plugin {plugin_name} was executed", - "timestamp": datetime.datetime.utcnow().isoformat() - } - self.tool_invocations.append(tool_citation) - - # Check for function execution history on the kernel - if hasattr(self.kernel, 'function_invoking_handlers') or hasattr(self.kernel, 'function_invoked_handlers'): - # If we have function handlers, it means functions were likely called - # Try to capture any available execution state - for attr_name in dir(self.kernel): - if 'execute' in attr_name.lower() or 'invoke' in attr_name.lower(): - try: - attr_value = getattr(self.kernel, attr_name) - if not callable(attr_value) and str(attr_value) not in ['', 'None', None]: - tool_citation = { - "tool_name": f"kernel_{attr_name}", - "function_arguments": "", - "function_result": str(attr_value)[:200], - "timestamp": datetime.datetime.utcnow().isoformat() - } - self.tool_invocations.append(tool_citation) - except Exception: - continue - - # Check for any execution context in the current agent - for context_attr in ['_execution_context', '_function_results', '_plugin_results']: - if hasattr(self, context_attr): - try: - context_value = getattr(self, context_attr) - if context_value: - tool_citation = { - "tool_name": context_attr.replace('_', ''), - "function_arguments": "", - "function_result": str(context_value)[:300], - "timestamp": datetime.datetime.utcnow().isoformat() - } - self.tool_invocations.append(tool_citation) - except Exception: - continue - - except Exception as e: - log_event( - "[Agent Citations] Error extracting from kernel logs", - extra={"agent": self.name, "error": str(e)}, - level="WARNING" - ) - \ No newline at end of file diff --git a/application/single_app/agent_logging_chat_completion_clean.py b/application/single_app/agent_logging_chat_completion_clean.py deleted file mode 100644 index e1fd1834..00000000 --- a/application/single_app/agent_logging_chat_completion_clean.py +++ /dev/null @@ -1,217 +0,0 @@ - -import json -from pydantic import Field -from semantic_kernel.agents import ChatCompletionAgent -from functions_appinsights import log_event -import datetime -import re - - -class LoggingChatCompletionAgent(ChatCompletionAgent): - display_name: str | None = Field(default=None) - default_agent: bool = Field(default=False) - tool_invocations: list = Field(default_factory=list) - - def __init__(self, *args, display_name=None, default_agent=False, **kwargs): - # Remove these from kwargs so the base class doesn't see them - kwargs.pop('display_name', None) - kwargs.pop('default_agent', None) - super().__init__(*args, **kwargs) - self.display_name = display_name - self.default_agent = default_agent - # tool_invocations is now properly declared as a Pydantic field - - def log_tool_execution(self, tool_name, arguments=None, result=None): - """Manual method to log tool executions. Can be called by plugins.""" - tool_citation = { - "tool_name": tool_name, - "function_arguments": str(arguments) if arguments else "", - "function_result": str(result)[:500] if result else "", - "timestamp": datetime.datetime.utcnow().isoformat() - } - self.tool_invocations.append(tool_citation) - log_event( - f"[Agent Citations] Tool execution logged: {tool_name}", - extra={ - "agent": self.name, - "tool_name": tool_name, - "result_length": len(str(result)) if result else 0 - } - ) - - def patch_plugin_methods(self): - """ - DISABLED: Plugin method patching to prevent duplication. - Plugin logging is now handled by the @plugin_function_logger decorator system. - Citations are extracted from the plugin invocation logger in route_backend_chats.py. - """ - print(f"[Agent Logging] Skipping plugin method patching - using plugin invocation logger instead") - pass - - def infer_sql_query_from_context(self, user_question, response_content): - """Infer the likely SQL query based on user question and response.""" - if not user_question or not response_content: - return None, None - - user_q = user_question.lower() - response = response_content.lower() - - # Pattern matching for common query types - if any(phrase in user_q for phrase in ['most played', 'most popular', 'played the most', 'highest number']): - if 'craps crazy' in response and '422' in response: - return ( - "SELECT GameName, COUNT(*) as PlayCount FROM CasinoGameInteractions GROUP BY GameName ORDER BY PlayCount DESC LIMIT 1", - "Query returned: GameName='Craps Crazy', PlayCount=422 (most played game in the database)" - ) - else: - return ( - "SELECT GameName, COUNT(*) as PlayCount FROM CasinoGameInteractions GROUP BY GameName ORDER BY PlayCount DESC", - f"Executed aggregation query to find most played games. Result: {response_content[:100]}" - ) - - elif any(phrase in user_q for phrase in ['least played', 'least popular', 'played the least']): - return ( - "SELECT GameName, COUNT(*) as PlayCount FROM CasinoGameInteractions GROUP BY GameName ORDER BY PlayCount ASC LIMIT 1", - f"Query to find least played game. Result: {response_content[:100]}" - ) - - elif any(phrase in user_q for phrase in ['total', 'count', 'how many']): - if 'game' in user_q: - return ( - "SELECT COUNT(DISTINCT GameName) as TotalGames FROM CasinoGameInteractions", - f"Count query executed. Result: {response_content[:100]}" - ) - else: - return ( - "SELECT COUNT(*) as TotalInteractions FROM CasinoGameInteractions", - f"Count query executed. Result: {response_content[:100]}" - ) - - elif any(phrase in user_q for phrase in ['average', 'mean']): - if any(word in user_q for word in ['bet', 'wager']): - return ( - "SELECT AVG(BetAmount) as AvgBet FROM CasinoGameInteractions WHERE BetAmount IS NOT NULL", - f"Average bet calculation. Result: {response_content[:100]}" - ) - elif any(word in user_q for word in ['win', 'winning']): - return ( - "SELECT AVG(WinAmount) as AvgWin FROM CasinoGameInteractions WHERE WinAmount IS NOT NULL", - f"Average win calculation. Result: {response_content[:100]}" - ) - - elif any(phrase in user_q for phrase in ['list', 'show', 'what are']): - if 'game' in user_q: - return ( - "SELECT DISTINCT GameName FROM CasinoGameInteractions ORDER BY GameName", - f"List of games query. Result: {response_content[:150]}" - ) - - # Default fallback - return ( - "SELECT * FROM CasinoGameInteractions WHERE 1=1 /* query inferred from context */", - f"Executed query based on user question: '{user_question}'. Result: {response_content[:100]}" - ) - - def extract_tool_invocations_from_history(self, chat_history): - """ - SIMPLIFIED: Extract tool invocations from chat history for citations. - Most citation data now comes from the plugin invocation logger system. - """ - return [] # Plugin invocation logger handles this now - - async def invoke(self, *args, **kwargs): - # Clear previous tool invocations - self.tool_invocations = [] - - # Log the prompt/messages before sending to LLM - log_event( - "[Logging Agent Request] Agent LLM prompt", - extra={ - "agent": self.name, - "prompt": [m.content[:30] for m in args[0]] if args else None - } - ) - - print(f"[Logging Agent Request] Agent: {self.name}") - print(f"[Logging Agent Request] Prompt: {[m.content[:30] for m in args[0]] if args else None}") - - # Store user question context for better tool detection - if args and args[0] and hasattr(args[0][-1], 'content'): - self._user_question = args[0][-1].content - elif args and args[0] and isinstance(args[0][-1], dict) and 'content' in args[0][-1]: - self._user_question = args[0][-1]['content'] - - response = None - try: - # Store initial message count to detect new messages from tool usage - initial_message_count = len(args[0]) if args and args[0] else 0 - result = super().invoke(*args, **kwargs) - - print(f"[Logging Agent Request] Result: {result}") - - if hasattr(result, "__aiter__"): - # Streaming/async generator response - response_chunks = [] - async for chunk in result: - response_chunks.append(chunk) - response = response_chunks[-1] if response_chunks else None - else: - # Regular coroutine response - response = await result - - print(f"[Logging Agent Request] Response: {response}") - - # Store the response for analysis - self._last_response = response - # Simplified citation capture - primary citations come from plugin invocation logger - self._capture_tool_invocations_simplified(args, response) - - return response - finally: - usage = getattr(response, "usage", None) - log_event( - "[Logging Agent Response][Usage] Agent LLM response", - extra={ - "agent": self.name, - "response": str(response)[:100] if response else None, - "prompt_tokens": getattr(usage, "prompt_tokens", None), - "completion_tokens": getattr(usage, "completion_tokens", None), - "total_tokens": getattr(usage, "total_tokens", None), - "usage": str(usage) if usage else None, - "fallback_citations": len(self.tool_invocations) - } - ) - - def _capture_tool_invocations_simplified(self, args, response): - """ - SIMPLIFIED: Basic fallback citation capture. - Primary citations come from the plugin invocation logger system. - This only provides basic response logging for edge cases. - """ - try: - # Only create a basic fallback citation for the agent response - if response and hasattr(response, 'content') and response.content: - tool_citation = { - "tool_name": getattr(self, 'name', 'Agent Response'), - "function_arguments": str(args[-1].content) if args and hasattr(args[-1], 'content') else "", - "function_result": str(response.content)[:500], - "timestamp": datetime.datetime.utcnow().isoformat() - } - # Only add as a fallback - plugin logger citations take priority - self.tool_invocations.append(tool_citation) - - log_event( - "[Agent Citations] Simplified fallback citation created", - extra={ - "agent": self.name, - "fallback_citations": len(self.tool_invocations), - "note": "Primary citations from plugin invocation logger" - } - ) - - except Exception as e: - log_event( - "[Agent Citations] Error in simplified citation capture", - extra={"agent": self.name, "error": str(e)}, - level="WARNING" - ) diff --git a/application/single_app/app.py b/application/single_app/app.py index ceec9e1a..cd04ff67 100644 --- a/application/single_app/app.py +++ b/application/single_app/app.py @@ -3,13 +3,28 @@ import logging import pickle import json +import os +import sys +# Fix Windows encoding issue with Unicode characters (emojis, IPA symbols, etc.) +# Must be done before any print statements that might contain Unicode +if sys.platform == 'win32': + try: + # Reconfigure stdout and stderr to use UTF-8 encoding + sys.stdout.reconfigure(encoding='utf-8') + sys.stderr.reconfigure(encoding='utf-8') + except AttributeError: + # Python < 3.7 doesn't have reconfigure, try alternative + import codecs + sys.stdout = codecs.getwriter('utf-8')(sys.stdout.buffer, 'strict') + sys.stderr = codecs.getwriter('utf-8')(sys.stderr.buffer, 'strict') + +import app_settings_cache +from config import * from semantic_kernel import Kernel from semantic_kernel_loader import initialize_semantic_kernel -from azure.monitor.opentelemetry import configure_azure_monitor - -from config import * +#from azure.monitor.opentelemetry import configure_azure_monitor from functions_authentication import * from functions_content import * @@ -17,6 +32,7 @@ from functions_search import * from functions_settings import * from functions_appinsights import * +from functions_activity_logging import * import threading import time @@ -25,6 +41,7 @@ from route_frontend_authentication import * from route_frontend_profile import * from route_frontend_admin_settings import * +from route_frontend_control_center import * from route_frontend_workspace import * from route_frontend_chats import * from route_frontend_conversations import * @@ -33,6 +50,7 @@ from route_frontend_public_workspaces import * from route_frontend_safety import * from route_frontend_feedback import * +from route_frontend_notifications import * from route_backend_chats import * from route_backend_conversations import * @@ -46,18 +64,32 @@ from route_backend_settings import * from route_backend_prompts import * from route_backend_group_prompts import * +from route_backend_control_center import * +from route_backend_notifications import * +from route_backend_retention_policy import * from route_backend_plugins import bpap as admin_plugins_bp, bpdp as dynamic_plugins_bp from route_backend_agents import bpa as admin_agents_bp +from route_backend_agent_templates import bp_agent_templates from route_backend_public_workspaces import * from route_backend_public_documents import * from route_backend_public_prompts import * +from route_backend_user_agreement import register_route_backend_user_agreement +from route_backend_speech import register_route_backend_speech +from route_backend_tts import register_route_backend_tts from route_enhanced_citations import register_enhanced_citations_routes from plugin_validation_endpoint import plugin_validation_bp from route_openapi import register_openapi_routes from route_migration import bp_migration from route_plugin_logging import bpl as plugin_logging_bp +from functions_debug import debug_print + +from opentelemetry.instrumentation.flask import FlaskInstrumentor -app = Flask(__name__) +app = Flask(__name__, static_url_path='/static', static_folder='static') + +disable_flask_instrumentation = os.environ.get("DISABLE_FLASK_INSTRUMENTATION", "0") +if not (disable_flask_instrumentation == "1" or disable_flask_instrumentation.lower() == "true"): + FlaskInstrumentor().instrument_app(app) app.config['EXECUTOR_TYPE'] = EXECUTOR_TYPE app.config['EXECUTOR_MAX_WORKERS'] = EXECUTOR_MAX_WORKERS @@ -67,11 +99,21 @@ app.config['VERSION'] = VERSION app.config['SECRET_KEY'] = SECRET_KEY +# Ensure filesystem session directory (when used) points to a writable path inside container. +if SESSION_TYPE == 'filesystem': + app.config['SESSION_FILE_DIR'] = SESSION_FILE_DIR if 'SESSION_FILE_DIR' in globals() else os.environ.get('SESSION_FILE_DIR', '/app/flask_session') + try: + os.makedirs(app.config['SESSION_FILE_DIR'], exist_ok=True) + except Exception as e: + print(f"WARNING: Unable to create session directory {app.config.get('SESSION_FILE_DIR')}: {e}") + log_event(f"Unable to create session directory {app.config.get('SESSION_FILE_DIR')}: {e}", level=logging.ERROR) + Session(app) app.register_blueprint(admin_plugins_bp) app.register_blueprint(dynamic_plugins_bp) app.register_blueprint(admin_agents_bp) +app.register_blueprint(bp_agent_templates) app.register_blueprint(plugin_validation_bp) app.register_blueprint(bp_migration) app.register_blueprint(plugin_logging_bp) @@ -82,6 +124,12 @@ # Register Enhanced Citations routes register_enhanced_citations_routes(app) +# Register Speech routes +register_route_backend_speech(app) + +# Register TTS routes +register_route_backend_tts(app) + # Register Swagger documentation routes from swagger_wrapper import register_swagger_routes register_swagger_routes(app) @@ -95,15 +143,85 @@ from route_external_health import * -configure_azure_monitor() +# =================== Session Configuration =================== +def configure_sessions(settings): + """Configure session backend (Redis or filesystem) once. + + Falls back to filesystem if Redis settings are incomplete. Supports managed identity + or key auth for Azure Redis. Uses SESSION_FILE_DIR already prepared in config/app init. + """ + try: + if settings.get('enable_redis_cache'): + redis_url = settings.get('redis_url', '').strip() + redis_auth_type = settings.get('redis_auth_type', 'key').strip().lower() + + if redis_url: + redis_client = None + try: + if redis_auth_type == 'managed_identity': + print("Redis enabled using Managed Identity") + from config import get_redis_cache_infrastructure_endpoint + credential = DefaultAzureCredential() + redis_hostname = redis_url.split('.')[0] + cache_endpoint = get_redis_cache_infrastructure_endpoint(redis_hostname) + token = credential.get_token(cache_endpoint) + redis_client = Redis( + host=redis_url, + port=6380, + db=0, + password=token.token, + ssl=True, + socket_connect_timeout=5, + socket_timeout=5 + ) + else: + redis_key = settings.get('redis_key', '').strip() + print("Redis enabled using Access Key") + redis_client = Redis( + host=redis_url, + port=6380, + db=0, + password=redis_key, + ssl=True, + socket_connect_timeout=5, + socket_timeout=5 + ) + + # Test the connection + redis_client.ping() + print("✅ Redis connection successful") + app.config['SESSION_TYPE'] = 'redis' + app.config['SESSION_REDIS'] = redis_client + + except Exception as redis_error: + print(f"⚠️ WARNING: Redis connection failed: {redis_error}") + print("Falling back to filesystem sessions for reliability") + app.config['SESSION_TYPE'] = 'filesystem' + else: + print("Redis enabled but URL missing; falling back to filesystem.") + app.config['SESSION_TYPE'] = 'filesystem' + else: + app.config['SESSION_TYPE'] = 'filesystem' + except Exception as e: + print(f"⚠️ WARNING: Session configuration error; falling back to filesystem: {e}") + log_event(f"Session configuration error; falling back to filesystem: {e}", level=logging.ERROR) + app.config['SESSION_TYPE'] = 'filesystem' + # Initialize session interface + Session(app) # =================== Helper Functions =================== @app.before_first_request def before_first_request(): print("Initializing application...") - settings = get_settings() - print(f"DEBUG:Application settings: {settings}") + settings = get_settings(use_cosmos=True) + app_settings_cache.configure_app_cache(settings, get_redis_cache_infrastructure_endpoint(settings.get('redis_url', '').strip().split('.')[0])) + app_settings_cache.update_settings_cache(settings) + sanitized_settings = sanitize_settings_for_logging(settings) + debug_print(f"DEBUG:Application settings: {sanitized_settings}") + sanitized_settings_cache = sanitize_settings_for_logging(app_settings_cache.get_settings_cache()) + debug_print(f"DEBUG:App settings cache initialized: {'Using Redis cache:' + str(app_settings_cache.app_cache_is_using_redis)} {sanitized_settings_cache}") + initialize_clients(settings) ensure_custom_logo_file_exists(app, settings) # Enable Application Insights logging globally if configured @@ -135,7 +253,7 @@ def check_logging_timers(): turnoff_time = None if turnoff_time and current_time >= turnoff_time: - print(f"Debug logging timer expired at {turnoff_time}. Disabling debug logging.") + debug_print(f"logging timer expired at {turnoff_time}. Disabling debug logging.") settings['enable_debug_logging'] = False settings['debug_logging_timer_enabled'] = False settings['debug_logging_turnoff_time'] = None @@ -167,6 +285,7 @@ def check_logging_timers(): except Exception as e: print(f"Error in logging timer check: {e}") + log_event(f"Error in logging timer check: {e}", level=logging.ERROR) # Check every 60 seconds time.sleep(60) @@ -176,42 +295,108 @@ def check_logging_timers(): timer_thread.start() print("Logging timer background task started.") + # Background task to check for expired approval requests + def check_expired_approvals(): + """Background task that checks for expired approval requests and auto-denies them""" + while True: + try: + from functions_approvals import auto_deny_expired_approvals + denied_count = auto_deny_expired_approvals() + if denied_count > 0: + print(f"Auto-denied {denied_count} expired approval request(s).") + except Exception as e: + print(f"Error in approval expiration check: {e}") + log_event(f"Error in approval expiration check: {e}", level=logging.ERROR) + + # Check every 6 hours (21600 seconds) + time.sleep(21600) + + # Start the approval expiration check thread + approval_thread = threading.Thread(target=check_expired_approvals, daemon=True) + approval_thread.start() + print("Approval expiration background task started.") - # Setup session handling - if settings.get('enable_redis_cache'): - redis_url = settings.get('redis_url', '').strip() - redis_auth_type = settings.get('redis_auth_type', 'key').strip().lower() - - if redis_url: - app.config['SESSION_TYPE'] = 'redis' - if redis_auth_type == 'managed_identity': - print("Redis enabled using Managed Identity") - credential = DefaultAzureCredential() - redis_hostname = redis_url.split('.')[0] # Extract the first part of the hostname - token = credential.get_token(f"https://{redis_hostname}.cacheinfra.windows.net:10225/appid") - app.config['SESSION_REDIS'] = Redis( - host=redis_url, - port=6380, - db=0, - password=token.token, - ssl=True - ) - else: - # Default to key-based auth - redis_key = settings.get('redis_key', '').strip() - print("Redis enabled using Access Key") - app.config['SESSION_REDIS'] = Redis( - host=redis_url, - port=6380, - db=0, - password=redis_key, - ssl=True - ) - else: - print("Redis enabled but URL missing; falling back to filesystem.") - app.config['SESSION_TYPE'] = 'filesystem' - else: - app.config['SESSION_TYPE'] = 'filesystem' + # Background task to check retention policy execution time + def check_retention_policy(): + """Background task that executes retention policy at scheduled time""" + while True: + try: + settings = get_settings() + + # Check if any retention policy is enabled + personal_enabled = settings.get('enable_retention_policy_personal', False) + group_enabled = settings.get('enable_retention_policy_group', False) + public_enabled = settings.get('enable_retention_policy_public', False) + + if personal_enabled or group_enabled or public_enabled: + current_time = datetime.now(timezone.utc) + + # Check if next scheduled run time has passed + next_run = settings.get('retention_policy_next_run') + should_run = False + + if next_run: + try: + next_run_dt = datetime.fromisoformat(next_run) + # Run if we've passed the scheduled time + if current_time >= next_run_dt: + should_run = True + except Exception as parse_error: + print(f"Error parsing next_run timestamp: {parse_error}") + # If we can't parse, fall back to checking last_run + last_run = settings.get('retention_policy_last_run') + if last_run: + try: + last_run_dt = datetime.fromisoformat(last_run) + # Run if last run was more than 23 hours ago + if (current_time - last_run_dt).total_seconds() > (23 * 3600): + should_run = True + except: + should_run = True + else: + should_run = True + else: + # No next_run set, check last_run instead + last_run = settings.get('retention_policy_last_run') + if last_run: + try: + last_run_dt = datetime.fromisoformat(last_run) + # Run if last run was more than 23 hours ago + if (current_time - last_run_dt).total_seconds() > (23 * 3600): + should_run = True + except: + should_run = True + else: + # Never run before, execute now + should_run = True + + if should_run: + print(f"Executing scheduled retention policy at {current_time.isoformat()}") + from functions_retention_policy import execute_retention_policy + results = execute_retention_policy(manual_execution=False) + + if results.get('success'): + print(f"Retention policy execution completed: " + f"{results['personal']['conversations']} personal conversations, " + f"{results['personal']['documents']} personal documents, " + f"{results['group']['conversations']} group conversations, " + f"{results['group']['documents']} group documents, " + f"{results['public']['conversations']} public conversations, " + f"{results['public']['documents']} public documents deleted.") + else: + print(f"Retention policy execution failed: {results.get('errors')}") + + except Exception as e: + print(f"Error in retention policy check: {e}") + log_event(f"Error in retention policy check: {e}", level=logging.ERROR) + + # Check every 5 minutes for more responsive scheduling + time.sleep(300) + + # Start the retention policy check thread + retention_thread = threading.Thread(target=check_retention_policy, daemon=True) + retention_thread.start() + print("Retention policy background task started.") # Initialize Semantic Kernel and plugins enable_semantic_kernel = settings.get('enable_semantic_kernel', False) @@ -220,85 +405,8 @@ def check_logging_timers(): print("Semantic Kernel is enabled. Initializing...") initialize_semantic_kernel() - Session(app) - - # Setup session handling - if settings.get('enable_redis_cache'): - redis_url = settings.get('redis_url', '').strip() - redis_auth_type = settings.get('redis_auth_type', 'key').strip().lower() - - if redis_url: - app.config['SESSION_TYPE'] = 'redis' - - if redis_auth_type == 'managed_identity': - print("Redis enabled using Managed Identity") - credential = DefaultAzureCredential() - redis_hostname = redis_url.split('.')[0] # Extract the first part of the hostname - token = credential.get_token(f"https://{redis_hostname}.cacheinfra.windows.net:10225/appid") - app.config['SESSION_REDIS'] = Redis( - host=redis_url, - port=6380, - db=0, - password=token.token, - ssl=True - ) - else: - # Default to key-based auth - redis_key = settings.get('redis_key', '').strip() - print("Redis enabled using Access Key") - app.config['SESSION_REDIS'] = Redis( - host=redis_url, - port=6380, - db=0, - password=redis_key, - ssl=True - ) - else: - print("Redis enabled but URL missing; falling back to filesystem.") - app.config['SESSION_TYPE'] = 'filesystem' - else: - app.config['SESSION_TYPE'] = 'filesystem' - - Session(app) - - # Setup session handling - if settings.get('enable_redis_cache'): - redis_url = settings.get('redis_url', '').strip() - redis_auth_type = settings.get('redis_auth_type', 'key').strip().lower() - - if redis_url: - app.config['SESSION_TYPE'] = 'redis' - - if redis_auth_type == 'managed_identity': - print("Redis enabled using Managed Identity") - credential = DefaultAzureCredential() - redis_hostname = redis_url.split('.')[0] # Extract the first part of the hostname - token = credential.get_token(f"https://{redis_hostname}.cacheinfra.windows.net:10225/appid") - app.config['SESSION_REDIS'] = Redis( - host=redis_url, - port=6380, - db=0, - password=token.token, - ssl=True - ) - else: - # Default to key-based auth - redis_key = settings.get('redis_key', '').strip() - print("Redis enabled using Access Key") - app.config['SESSION_REDIS'] = Redis( - host=redis_url, - port=6380, - db=0, - password=redis_key, - ssl=True - ) - else: - print("Redis enabled but URL missing; falling back to filesystem.") - app.config['SESSION_TYPE'] = 'filesystem' - else: - app.config['SESSION_TYPE'] = 'filesystem' - - Session(app) + # Unified session setup + configure_sessions(settings) @app.context_processor def inject_settings(): @@ -312,6 +420,8 @@ def inject_settings(): from functions_settings import get_user_settings user_settings = get_user_settings(user_id) or {} except Exception as e: + print(f"Error injecting user settings: {e}") + log_event(f"Error injecting user settings: {e}", level=logging.ERROR) user_settings = {} return dict(app_settings=public_settings, user_settings=user_settings) @@ -378,6 +488,7 @@ def markdown_filter(text): # =================== Default Routes ===================== @app.route('/') +@swagger_route(security=get_auth_security()) def index(): settings = get_settings() public_settings = sanitize_settings_for_user(settings) @@ -391,14 +502,17 @@ def index(): return render_template('index.html', app_settings=public_settings, landing_html=landing_html) @app.route('/robots933456.txt') +@swagger_route(security=get_auth_security()) def robots(): return send_from_directory('static', 'robots.txt') @app.route('/favicon.ico') +@swagger_route(security=get_auth_security()) def favicon(): return send_from_directory('static', 'favicon.ico') @app.route('/static/js/') +@swagger_route(security=get_auth_security()) def serve_js_modules(filename): """Serve JavaScript modules with correct MIME type.""" from flask import send_from_directory, Response @@ -411,10 +525,12 @@ def serve_js_modules(filename): return send_from_directory('static/js', filename) @app.route('/acceptable_use_policy.html') +@swagger_route(security=get_auth_security()) def acceptable_use_policy(): return render_template('acceptable_use_policy.html') @app.route('/api/semantic-kernel/plugins') +@swagger_route(security=get_auth_security()) def list_semantic_kernel_plugins(): """Test endpoint: List loaded Semantic Kernel plugins and their functions.""" global kernel @@ -436,6 +552,9 @@ def list_semantic_kernel_plugins(): # ------------------- Admin Settings Routes -------------- register_route_frontend_admin_settings(app) +# ------------------- Control Center Routes -------------- +register_route_frontend_control_center(app) + # ------------------- Chats Routes ----------------------- register_route_frontend_chats(app) @@ -458,6 +577,9 @@ def list_semantic_kernel_plugins(): # ------------------- Feedback Routes ------------------- register_route_frontend_feedback(app) +# ------------------- Notifications Routes -------------- +register_route_frontend_notifications(app) + # ------------------- API Chat Routes -------------------- register_route_backend_chats(app) @@ -494,6 +616,15 @@ def list_semantic_kernel_plugins(): # ------------------- API Group Prompts Routes ---------- register_route_backend_group_prompts(app) +# ------------------- API Control Center Routes --------- +register_route_backend_control_center(app) + +# ------------------- API Notifications Routes ---------- +register_route_backend_notifications(app) + +# ------------------- API Retention Policy Routes -------- +register_route_backend_retention_policy(app) + # ------------------- API Public Workspaces Routes ------- register_route_backend_public_workspaces(app) @@ -503,20 +634,28 @@ def list_semantic_kernel_plugins(): # ------------------- API Public Prompts Routes ---------- register_route_backend_public_prompts(app) +# ------------------- API User Agreement Routes ---------- +register_route_backend_user_agreement(app) + # ------------------- Extenral Health Routes ---------- register_route_external_health(app) if __name__ == '__main__': - settings = get_settings() + settings = get_settings(use_cosmos=True) + app_settings_cache.configure_app_cache(settings, get_redis_cache_infrastructure_endpoint(settings.get('redis_url', '').strip().split('.')[0])) + app_settings_cache.update_settings_cache(settings) initialize_clients(settings) debug_mode = os.environ.get("FLASK_DEBUG", "0") == "1" if debug_mode: # Local development with HTTPS - app.run(host="0.0.0.0", port=5001, debug=True, ssl_context='adhoc') + # use_reloader=False prevents too_many_retries errors with static files + # Disable excessive logging for static file requests in development + werkzeug_logger = logging.getLogger('werkzeug') + werkzeug_logger.setLevel(logging.ERROR) + app.run(host="0.0.0.0", port=5000, debug=True, ssl_context='adhoc', threaded=True, use_reloader=False) else: # Production port = int(os.environ.get("PORT", 5000)) app.run(host="0.0.0.0", port=port, debug=False) - diff --git a/application/single_app/app_settings_cache.py b/application/single_app/app_settings_cache.py new file mode 100644 index 00000000..cf908540 --- /dev/null +++ b/application/single_app/app_settings_cache.py @@ -0,0 +1,69 @@ +# app_settings_cache.py +""" +WARNING: NEVER 'from app_settings_cache import' settings or any other module that imports settings. +ALWAYS import app_settings_cache and use app_settings_cache.get_settings_cache() to get settings. +This supports the dynamic selection of redis or in-memory caching of settings. +""" +import json +from redis import Redis +from azure.identity import DefaultAzureCredential + +_settings = None +APP_SETTINGS_CACHE = {} +update_settings_cache = None +get_settings_cache = None +app_cache_is_using_redis = False + +def configure_app_cache(settings, redis_cache_endpoint=None): + global _settings, update_settings_cache, get_settings_cache, APP_SETTINGS_CACHE, app_cache_is_using_redis + _settings = settings + use_redis = _settings.get('enable_redis_cache', False) + + if use_redis: + app_cache_is_using_redis = True + redis_url = settings.get('redis_url', '').strip() + redis_auth_type = settings.get('redis_auth_type', 'key').strip().lower() + if redis_auth_type == 'managed_identity': + print("[ASC] Redis enabled using Managed Identity") + credential = DefaultAzureCredential() + redis_hostname = redis_url.split('.')[0] + cache_endpoint = redis_cache_endpoint + token = credential.get_token(cache_endpoint) + redis_client = Redis( + host=redis_url, + port=6380, + db=0, + password=token.token, + ssl=True + ) + else: + redis_key = settings.get('redis_key', '').strip() + print("[ASC] Redis enabled using Access Key") + redis_client = Redis( + host=redis_url, + port=6380, + db=0, + password=redis_key, + ssl=True + ) + + def update_settings_cache_redis(new_settings): + redis_client.set('APP_SETTINGS_CACHE', json.dumps(new_settings)) + + def get_settings_cache_redis(): + cached = redis_client.get('APP_SETTINGS_CACHE') + return json.loads(cached) if cached else {} + + update_settings_cache = update_settings_cache_redis + get_settings_cache = get_settings_cache_redis + + else: + def update_settings_cache_mem(new_settings): + global APP_SETTINGS_CACHE + APP_SETTINGS_CACHE = new_settings + + def get_settings_cache_mem(): + return APP_SETTINGS_CACHE + + update_settings_cache = update_settings_cache_mem + get_settings_cache = get_settings_cache_mem \ No newline at end of file diff --git a/application/single_app/config.py b/application/single_app/config.py index 1078e6f4..d5ba49b6 100644 --- a/application/single_app/config.py +++ b/application/single_app/config.py @@ -88,8 +88,7 @@ EXECUTOR_TYPE = 'thread' EXECUTOR_MAX_WORKERS = 30 SESSION_TYPE = 'filesystem' -VERSION = "0.229.062" - +VERSION = "0.237.011" SECRET_KEY = os.getenv('SECRET_KEY', 'dev-secret-key-change-in-production') @@ -101,10 +100,13 @@ 'Referrer-Policy': 'strict-origin-when-cross-origin', 'Content-Security-Policy': ( "default-src 'self'; " - "script-src 'self' 'unsafe-inline' 'unsafe-eval' https://cdn.jsdelivr.net https://code.jquery.com https://stackpath.bootstrapcdn.com; " - "style-src 'self' 'unsafe-inline' https://cdn.jsdelivr.net https://stackpath.bootstrapcdn.com; " + "script-src 'self' 'unsafe-inline' 'unsafe-eval'; " + #"script-src 'self' 'unsafe-inline' 'unsafe-eval' https://cdn.jsdelivr.net https://code.jquery.com https://stackpath.bootstrapcdn.com; " + "style-src 'self' 'unsafe-inline'; " + #"style-src 'self' 'unsafe-inline' https://cdn.jsdelivr.net https://stackpath.bootstrapcdn.com; " "img-src 'self' data: https: blob:; " - "font-src 'self' https://cdn.jsdelivr.net https://stackpath.bootstrapcdn.com; " + "font-src 'self'; " + #"font-src 'self' https://cdn.jsdelivr.net https://stackpath.bootstrapcdn.com; " "connect-src 'self' https: wss: ws:; " "media-src 'self' blob:; " "object-src 'none'; " @@ -120,11 +122,49 @@ CLIENTS = {} CLIENTS_LOCK = threading.Lock() -ALLOWED_EXTENSIONS = { - 'txt', 'pdf', 'docx', 'xlsx', 'xls', 'csv', 'pptx', 'html', 'jpg', 'jpeg', 'png', 'bmp', 'tiff', 'tif', 'heif', 'md', 'json', - 'mp4', 'mov', 'avi', 'mkv', 'flv', 'mxf', 'gxf', 'ts', 'ps', '3gp', '3gpp', 'mpg', 'wmv', 'asf', 'm4a', 'm4v', 'isma', 'ismv', - 'dvr-ms', 'wav' +# Base allowed extensions (always available) +BASE_ALLOWED_EXTENSIONS = {'txt', 'doc', 'docm', 'html', 'md', 'json', 'xml', 'yaml', 'yml', 'log'} +DOCUMENT_EXTENSIONS = {'pdf', 'docx', 'pptx', 'ppt'} +TABULAR_EXTENSIONS = {'csv', 'xlsx', 'xls', 'xlsm'} + +# Updates to image, video, or audio extensions should also be made in static/js/chat/chat-enhanced-citations.js if the new file types can be natively rendered in the browser. +IMAGE_EXTENSIONS = {'jpg', 'jpeg', 'png', 'bmp', 'tiff', 'tif', 'heif', 'heic'} + +# Optional extensions by feature +VIDEO_EXTENSIONS = { + 'mp4', 'mov', 'avi', 'mkv', 'flv', 'mxf', 'gxf', 'ts', 'ps', '3gp', '3gpp', + 'mpg', 'wmv', 'asf', 'm4v', 'isma', 'ismv', 'dvr-ms', 'webm', 'mpeg' } + +AUDIO_EXTENSIONS = {'mp3', 'wav', 'ogg', 'aac', 'flac', 'm4a'} + +def get_allowed_extensions(enable_video=False, enable_audio=False): + """ + Get allowed file extensions based on feature flags. + + Args: + enable_video: Whether video file support is enabled + enable_audio: Whether audio file support is enabled + + Returns: + set: Allowed file extensions + """ + extensions = BASE_ALLOWED_EXTENSIONS.copy() + extensions.update(DOCUMENT_EXTENSIONS) + extensions.update(IMAGE_EXTENSIONS) + extensions.update(TABULAR_EXTENSIONS) + + if enable_video: + extensions.update(VIDEO_EXTENSIONS) + + if enable_audio: + extensions.update(AUDIO_EXTENSIONS) + + return extensions + +ALLOWED_EXTENSIONS = get_allowed_extensions(enable_video=True, enable_audio=True) + +# Admin UI specific extensions (for logo/favicon uploads) ALLOWED_EXTENSIONS_IMG = {'png', 'jpg', 'jpeg'} MAX_CONTENT_LENGTH = 5000 * 1024 * 1024 # 5000 MB AKA 5 GB @@ -135,6 +175,7 @@ CUSTOM_BLOB_STORAGE_URL_VALUE = os.getenv("CUSTOM_BLOB_STORAGE_URL_VALUE", "") CUSTOM_COGNITIVE_SERVICES_URL_VALUE = os.getenv("CUSTOM_COGNITIVE_SERVICES_URL_VALUE", "") CUSTOM_SEARCH_RESOURCE_MANAGER_URL_VALUE = os.getenv("CUSTOM_SEARCH_RESOURCE_MANAGER_URL_VALUE", "") +CUSTOM_REDIS_CACHE_INFRASTRUCTURE_URL_VALUE = os.getenv("CUSTOM_REDIS_CACHE_INFRASTRUCTURE_URL_VALUE", "") # Azure AD Configuration @@ -152,11 +193,10 @@ if AZURE_ENVIRONMENT == "custom": AUTHORITY = f"{CUSTOM_IDENTITY_URL_VALUE}/{TENANT_ID}" -else: +elif AZURE_ENVIRONMENT == "usgovernment": AUTHORITY = f"https://login.microsoftonline.us/{TENANT_ID}" - -# Commercial Azure Video Indexer Endpoint -video_indexer_endpoint = "https://api.videoindexer.ai" +else: + AUTHORITY = f"https://login.microsoftonline.com/{TENANT_ID}" WORD_CHUNK_SIZE = 400 @@ -168,6 +208,7 @@ cognitive_services_scope = "https://cognitiveservices.azure.us/.default" video_indexer_endpoint = "https://api.videoindexer.ai.azure.us" search_resource_manager = "https://search.azure.us" + KEY_VAULT_DOMAIN = ".vault.usgovcloudapi.net" elif AZURE_ENVIRONMENT == "custom": resource_manager = CUSTOM_RESOURCE_MANAGER_URL_VALUE @@ -175,12 +216,36 @@ credential_scopes=[resource_manager + "/.default"] cognitive_services_scope = CUSTOM_COGNITIVE_SERVICES_URL_VALUE search_resource_manager = CUSTOM_SEARCH_RESOURCE_MANAGER_URL_VALUE + KEY_VAULT_DOMAIN = os.getenv("KEY_VAULT_DOMAIN", ".vault.azure.net") else: OIDC_METADATA_URL = f"https://login.microsoftonline.com/{TENANT_ID}/v2.0/.well-known/openid-configuration" resource_manager = "https://management.azure.com" authority = AzureAuthorityHosts.AZURE_PUBLIC_CLOUD credential_scopes=[resource_manager + "/.default"] cognitive_services_scope = "https://cognitiveservices.azure.com/.default" + video_indexer_endpoint = "https://api.videoindexer.ai" + KEY_VAULT_DOMAIN = ".vault.azure.net" + +def get_redis_cache_infrastructure_endpoint(redis_hostname: str) -> str: + """ + Get the appropriate Redis cache infrastructure endpoint based on Azure environment. + + Args: + redis_hostname (str): The hostname of the Redis cache instance + + Returns: + str: The complete endpoint URL for Redis cache infrastructure token acquisition + """ + if AZURE_ENVIRONMENT == "usgovernment": + return f"https://{redis_hostname}.cacheinfra.azure.us:10225/appid" + elif AZURE_ENVIRONMENT == "custom" and CUSTOM_REDIS_CACHE_INFRASTRUCTURE_URL_VALUE: + # For custom environments, allow override via environment variable + # Format: https://{hostname}.custom-cache-domain.com:10225/appid + return CUSTOM_REDIS_CACHE_INFRASTRUCTURE_URL_VALUE.format(hostname=redis_hostname) + else: + # Default to Azure Public Cloud + return f"https://{redis_hostname}.cacheinfra.windows.net:10225/appid" + storage_account_user_documents_container_name = "user-documents" storage_account_group_documents_container_name = "group-documents" @@ -211,6 +276,17 @@ partition_key=PartitionKey(path="/conversation_id") ) +cosmos_group_conversations_container_name = "group_conversations" +cosmos_group_conversations_container = cosmos_database.create_container_if_not_exists( + id=cosmos_group_conversations_container_name, + partition_key=PartitionKey(path="/id") +) + +cosmos_group_messages_container_name = "group_messages" +cosmos_group_messages_container = cosmos_database.create_container_if_not_exists( + id=cosmos_group_messages_container_name, + partition_key=PartitionKey(path="/conversation_id") +) cosmos_settings_container_name = "settings" cosmos_settings_container = cosmos_database.create_container_if_not_exists( @@ -314,18 +390,6 @@ partition_key=PartitionKey(path="/user_id") ) -cosmos_file_processing_container_name = "group_messages" -cosmos_file_processing_container = cosmos_database.create_container_if_not_exists( - id=cosmos_file_processing_container_name, - partition_key=PartitionKey(path="/conversation_id") -) - -cosmos_file_processing_container_name = "group_conversations" -cosmos_file_processing_container = cosmos_database.create_container_if_not_exists( - id=cosmos_file_processing_container_name, - partition_key=PartitionKey(path="/id") -) - cosmos_group_agents_container_name = "group_agents" cosmos_group_agents_container = cosmos_database.create_container_if_not_exists( id=cosmos_group_agents_container_name, @@ -350,12 +414,44 @@ partition_key=PartitionKey(path="/id") ) +cosmos_agent_templates_container_name = "agent_templates" +cosmos_agent_templates_container = cosmos_database.create_container_if_not_exists( + id=cosmos_agent_templates_container_name, + partition_key=PartitionKey(path="/id") +) + cosmos_agent_facts_container_name = "agent_facts" cosmos_agent_facts_container = cosmos_database.create_container_if_not_exists( id=cosmos_agent_facts_container_name, partition_key=PartitionKey(path="/scope_id") ) +cosmos_search_cache_container_name = "search_cache" +cosmos_search_cache_container = cosmos_database.create_container_if_not_exists( + id=cosmos_search_cache_container_name, + partition_key=PartitionKey(path="/user_id") +) + +cosmos_activity_logs_container_name = "activity_logs" +cosmos_activity_logs_container = cosmos_database.create_container_if_not_exists( + id=cosmos_activity_logs_container_name, + partition_key=PartitionKey(path="/user_id") +) + +cosmos_notifications_container_name = "notifications" +cosmos_notifications_container = cosmos_database.create_container_if_not_exists( + id=cosmos_notifications_container_name, + partition_key=PartitionKey(path="/user_id"), + default_ttl=-1 # TTL disabled by default, enabled per-document +) + +cosmos_approvals_container_name = "approvals" +cosmos_approvals_container = cosmos_database.create_container_if_not_exists( + id=cosmos_approvals_container_name, + partition_key=PartitionKey(path="/group_id"), + default_ttl=-1 # TTL disabled by default, enabled per-document for auto-cleanup +) + def ensure_custom_logo_file_exists(app, settings): """ If custom_logo_base64 or custom_logo_dark_base64 is present in settings, ensure the appropriate @@ -592,7 +688,7 @@ def initialize_clients(settings): azure_apim_content_safety_endpoint = settings.get("azure_apim_content_safety_endpoint") azure_apim_content_safety_subscription_key = settings.get("azure_apim_content_safety_subscription_key") - if safety_endpoint and safety_key: + if safety_endpoint: try: if enable_content_safety_apim: content_safety_client = ContentSafetyClient( @@ -649,11 +745,11 @@ def initialize_clients(settings): try: container_client = blob_service_client.get_container_client(container_name) if not container_client.exists(): - print(f"DEBUG: Container '{container_name}' does not exist. Creating...") + print(f"Container '{container_name}' does not exist. Creating...") container_client.create_container() - print(f"DEBUG: Container '{container_name}' created successfully.") + print(f"Container '{container_name}' created successfully.") else: - print(f"DEBUG: Container '{container_name}' already exists.") + print(f"Container '{container_name}' already exists.") except Exception as container_error: print(f"Error creating container {container_name}: {str(container_error)}") except Exception as e: diff --git a/application/single_app/example_swagger_usage.py b/application/single_app/example_swagger_usage.py deleted file mode 100644 index e8546403..00000000 --- a/application/single_app/example_swagger_usage.py +++ /dev/null @@ -1,318 +0,0 @@ -# example_swagger_usage.py - -""" -Example demonstrating how to use the swagger_wrapper system. - -This file shows how to retrofit existing routes with swagger documentation -and how to create new routes with comprehensive API documentation. -""" - -from flask import Flask, jsonify, request -from swagger_wrapper import ( - swagger_route, - register_swagger_routes, - create_response_schema, - get_auth_security, - create_parameter, - COMMON_SCHEMAS -) - -def register_example_routes(app: Flask): - """Example of how to register routes with swagger documentation.""" - - # Example 1: Simple route with basic documentation - @app.route('/api/example/hello', methods=['GET']) - @swagger_route( - summary="Simple Hello World", - description="Returns a simple hello world message", - tags=["Examples"], - responses={ - 200: { - "description": "Success", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "message": {"type": "string"} - } - } - } - } - } - } - ) - def hello_world(): - return jsonify({"message": "Hello, World!"}) - - # Example 2: Route with request body and comprehensive responses - @app.route('/api/example/user', methods=['POST']) - @swagger_route( - summary="Create User", - description="Create a new user in the system", - tags=["Users", "Examples"], - request_body={ - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "User's full name", - "example": "John Doe" - }, - "email": { - "type": "string", - "format": "email", - "description": "User's email address", - "example": "john.doe@example.com" - }, - "age": { - "type": "integer", - "minimum": 18, - "maximum": 120, - "description": "User's age", - "example": 25 - } - }, - "required": ["name", "email"] - }, - responses={ - 200: { - "description": "User created successfully", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "success": {"type": "boolean"}, - "user_id": {"type": "string", "format": "uuid"}, - "message": {"type": "string"} - } - } - } - } - }, - 400: { - "description": "Bad Request", - "content": { - "application/json": { - "schema": COMMON_SCHEMAS["error_response"] - } - } - } - }, - security=get_auth_security() - ) - def create_user(): - data = request.get_json() - if not data or not data.get('name') or not data.get('email'): - return jsonify({"error": "Name and email are required"}), 400 - - # Simulate user creation - import uuid - user_id = str(uuid.uuid4()) - - return jsonify({ - "success": True, - "user_id": user_id, - "message": f"User {data['name']} created successfully" - }) - - # Example 3: Route with path parameters and query parameters - @app.route('/api/example/user/', methods=['GET']) - @swagger_route( - summary="Get User by ID", - description="Retrieve user information by user ID", - tags=["Users", "Examples"], - parameters=[ - create_parameter("user_id", "path", "string", True, "Unique user identifier"), - create_parameter("include_profile", "query", "boolean", False, "Include user profile data"), - create_parameter("format", "query", "string", False, "Response format (json, xml)") - ], - responses={ - 200: { - "description": "User found", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "user_id": {"type": "string"}, - "name": {"type": "string"}, - "email": {"type": "string"}, - "created_at": {"type": "string", "format": "date-time"}, - "profile": { - "type": "object", - "description": "User profile (only if include_profile=true)" - } - } - } - } - } - }, - 404: { - "description": "User not found", - "content": { - "application/json": { - "schema": COMMON_SCHEMAS["error_response"] - } - } - } - }, - security=get_auth_security() - ) - def get_user(user_id): - include_profile = request.args.get('include_profile', 'false').lower() == 'true' - - # Simulate user lookup - user_data = { - "user_id": user_id, - "name": "John Doe", - "email": "john.doe@example.com", - "created_at": "2024-01-01T12:00:00Z" - } - - if include_profile: - user_data["profile"] = { - "bio": "Software developer", - "location": "San Francisco, CA" - } - - return jsonify(user_data) - - # Example 4: Route with pagination - @app.route('/api/example/users', methods=['GET']) - @swagger_route( - summary="List Users", - description="Get a paginated list of users", - tags=["Users", "Examples"], - parameters=[ - create_parameter("page", "query", "integer", False, "Page number (default: 1)"), - create_parameter("page_size", "query", "integer", False, "Items per page (default: 10)"), - create_parameter("search", "query", "string", False, "Search term for filtering users") - ], - responses={ - 200: { - "description": "List of users", - "content": { - "application/json": { - "schema": { - "allOf": [ - COMMON_SCHEMAS["paginated_response"], - { - "type": "object", - "properties": { - "users": { - "type": "array", - "items": { - "type": "object", - "properties": { - "user_id": {"type": "string"}, - "name": {"type": "string"}, - "email": {"type": "string"} - } - } - } - } - } - ] - } - } - } - } - }, - security=get_auth_security() - ) - def list_users(): - page = int(request.args.get('page', 1)) - page_size = int(request.args.get('page_size', 10)) - search = request.args.get('search', '') - - # Simulate user list - users = [ - {"user_id": "1", "name": "John Doe", "email": "john@example.com"}, - {"user_id": "2", "name": "Jane Smith", "email": "jane@example.com"}, - ] - - if search: - users = [u for u in users if search.lower() in u['name'].lower() or search.lower() in u['email'].lower()] - - return jsonify({ - "users": users, - "page": page, - "page_size": page_size, - "total_count": len(users) - }) - -# Example of how to retrofit an existing route file -def retrofit_existing_route_example(): - """ - Example showing how to add swagger documentation to existing routes. - - For existing route files, you would: - 1. Import the swagger_wrapper functions at the top - 2. Add @swagger_route decorators to existing route functions - 3. No other changes needed! - """ - - # Before (existing route): - """ - @app.route('/api/documents', methods=['GET']) - @login_required - @user_required - def api_get_user_documents(): - # existing implementation - pass - """ - - # After (with swagger documentation): - """ - from swagger_wrapper import swagger_route, create_parameter, get_auth_security, COMMON_SCHEMAS - - @app.route('/api/documents', methods=['GET']) - @swagger_route( - summary="Get user documents", - description="Retrieve a paginated list of documents for the authenticated user", - tags=["Documents"], - parameters=[ - create_parameter("page", "query", "integer", False, "Page number"), - create_parameter("page_size", "query", "integer", False, "Items per page"), - create_parameter("search", "query", "string", False, "Search term") - ], - responses={ - 200: { - "description": "List of documents", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "documents": {"type": "array"}, - "page": {"type": "integer"}, - "page_size": {"type": "integer"}, - "total_count": {"type": "integer"} - } - } - } - } - } - }, - security=get_auth_security() - ) - @login_required - @user_required - def api_get_user_documents(): - # existing implementation unchanged - pass - """ - -if __name__ == '__main__': - # Example of setting up a Flask app with swagger - app = Flask(__name__) - - # Register swagger routes (adds /swagger and /swagger.json endpoints) - register_swagger_routes(app) - - # Register your documented routes - register_example_routes(app) - - app.run(debug=True) \ No newline at end of file diff --git a/application/single_app/foundry_agent_runtime.py b/application/single_app/foundry_agent_runtime.py new file mode 100644 index 00000000..36a99ec3 --- /dev/null +++ b/application/single_app/foundry_agent_runtime.py @@ -0,0 +1,355 @@ +# foundry_agent_runtime.py +"""Azure AI Foundry agent execution helpers.""" + +import asyncio +import logging +import os +from dataclasses import dataclass +from typing import Any, Dict, Iterable, List, Optional + +from azure.identity import AzureAuthorityHosts +from azure.identity.aio import ( # type: ignore + ClientSecretCredential, + DefaultAzureCredential, +) +from semantic_kernel.agents import AzureAIAgent +from semantic_kernel.contents.chat_message_content import ChatMessageContent + +from functions_appinsights import log_event +from functions_debug import debug_print +from functions_keyvault import ( + retrieve_secret_from_key_vault_by_full_name, + validate_secret_name_dynamic, +) + +_logger = logging.getLogger("foundry_agent_runtime") + + +@dataclass +class FoundryAgentInvocationResult: + """Represents the outcome from a Foundry agent run.""" + + message: str + model: Optional[str] + citations: List[Dict[str, Any]] + metadata: Dict[str, Any] + + +class FoundryAgentInvocationError(RuntimeError): + """Raised when the Foundry agent invocation cannot be completed.""" + + +class AzureAIFoundryChatCompletionAgent: + """Lightweight wrapper so Foundry agents behave like SK chat agents.""" + + agent_type = "aifoundry" + + def __init__(self, agent_config: Dict[str, Any], settings: Dict[str, Any]): + self.name = agent_config.get("name") + self.display_name = agent_config.get("display_name") or self.name + self.description = agent_config.get("description", "") + self.id = agent_config.get("id") + self.default_agent = agent_config.get("default_agent", False) + self.is_global = agent_config.get("is_global", False) + self.is_group = agent_config.get("is_group", False) + self.group_id = agent_config.get("group_id") + self.group_name = agent_config.get("group_name") + self.max_completion_tokens = agent_config.get("max_completion_tokens", -1) + self.last_run_citations: List[Dict[str, Any]] = [] + self.last_run_model: Optional[str] = None + self._foundry_settings = ( + (agent_config.get("other_settings") or {}).get("azure_ai_foundry") or {} + ) + self._global_settings = settings or {} + + def invoke( + self, + agent_message_history: Iterable[ChatMessageContent], + metadata: Optional[Dict[str, Any]] = None, + ) -> str: + """Synchronously invoke the Foundry agent and return the final message text.""" + + metadata = metadata or {} + history = list(agent_message_history) + debug_print( + f"[FoundryAgent] Invoking agent '{self.name}' with {len(history)} messages" + ) + + try: + result = asyncio.run( + execute_foundry_agent( + foundry_settings=self._foundry_settings, + global_settings=self._global_settings, + message_history=history, + metadata=metadata, + ) + ) + except RuntimeError: + log_event( + "[FoundryAgent] Invocation runtime error", + extra={ + "agent_id": self.id, + "agent_name": self.name, + }, + level=logging.ERROR, + ) + raise + except Exception as exc: # pragma: no cover - defensive logging + log_event( + "[FoundryAgent] Invocation error", + extra={ + "agent_id": self.id, + "agent_name": self.name, + }, + level=logging.ERROR, + ) + raise + + self.last_run_citations = result.citations + self.last_run_model = result.model + return result.message + + +async def execute_foundry_agent( + *, + foundry_settings: Dict[str, Any], + global_settings: Dict[str, Any], + message_history: List[ChatMessageContent], + metadata: Dict[str, Any], +) -> FoundryAgentInvocationResult: + """Invoke a Foundry agent using Semantic Kernel's AzureAIAgent abstraction.""" + + agent_id = (foundry_settings.get("agent_id") or "").strip() + if not agent_id: + raise FoundryAgentInvocationError( + "Azure AI Foundry agents require an agent_id in other_settings.azure_ai_foundry." + ) + + endpoint = _resolve_endpoint(foundry_settings, global_settings) + api_version = foundry_settings.get("api_version") or global_settings.get( + "azure_ai_foundry_api_version" + ) + + credential = _build_async_credential(foundry_settings, global_settings) + client = AzureAIAgent.create_client( + credential=credential, + endpoint=endpoint, + api_version=api_version, + ) + + try: + definition = await client.agents.get_agent(agent_id) + azure_agent = AzureAIAgent(client=client, definition=definition) + responses = [] + async for response in azure_agent.invoke( + messages=message_history, + metadata={k: str(v) for k, v in metadata.items() if v is not None}, + ): + responses.append(response) + + if not responses: + raise FoundryAgentInvocationError("Foundry agent returned no messages.") + + last_response = responses[-1] + + thread_id = None + if last_response.thread is not None: + thread_id = getattr(last_response.thread, "id", None) + + message_obj = last_response.message + + if not thread_id: + metadata_thread_id = None + if isinstance(message_obj.metadata, dict): + metadata_thread_id = message_obj.metadata.get("thread_id") + thread_id = metadata_thread_id or metadata.get("thread_id") + + if thread_id: + try: + if last_response.thread is not None and hasattr(last_response.thread, "delete"): + await last_response.thread.delete() + elif hasattr(client, "agents") and hasattr(client.agents, "delete_thread"): + await client.agents.delete_thread(thread_id) + except Exception as cleanup_error: # pragma: no cover - best effort cleanup + _logger.warning("Failed to delete Foundry thread: %s", cleanup_error) + text = _extract_message_text(message_obj) + citations = _extract_citations(message_obj) + model_name = getattr(definition, "model", None) + if isinstance(model_name, dict): + model_value = model_name.get("id") + else: + model_value = getattr(model_name, "id", None) + + log_event( + "[FoundryAgent] Invocation complete", + extra={ + "agent_id": agent_id, + "endpoint": endpoint, + "model": model_value, + "message_length": len(text or ""), + }, + ) + + return FoundryAgentInvocationResult( + message=text, + model=model_value, + citations=citations, + metadata=message_obj.metadata or {}, + ) + finally: + try: + await client.close() + finally: + await credential.close() + + +def _resolve_endpoint(foundry_settings: Dict[str, Any], global_settings: Dict[str, Any]) -> str: + endpoint = ( + foundry_settings.get("endpoint") + or global_settings.get("azure_ai_foundry_endpoint") + or os.getenv("AZURE_AI_AGENT_ENDPOINT") + ) + if endpoint: + return endpoint.rstrip("/") + + raise FoundryAgentInvocationError( + "Azure AI Foundry endpoint is not configured. Provide an endpoint in the agent's other_settings.azure_ai_foundry or global settings." + ) + + +def _build_async_credential( + foundry_settings: Dict[str, Any], + global_settings: Dict[str, Any], +): + auth_type = ( + foundry_settings.get("authentication_type") + or foundry_settings.get("auth_type") + or global_settings.get("azure_ai_foundry_authentication_type") + ) + managed_identity_type = ( + foundry_settings.get("managed_identity_type") + or global_settings.get("azure_ai_foundry_managed_identity_type") + ) + managed_identity_client_id = ( + foundry_settings.get("managed_identity_client_id") + or global_settings.get("azure_ai_foundry_managed_identity_client_id") + ) + + authority = ( + foundry_settings.get("authority") + or global_settings.get("azure_ai_foundry_authority") + or _authority_from_cloud(foundry_settings.get("cloud") or global_settings.get("azure_ai_foundry_cloud")) + ) + + tenant_id = foundry_settings.get("tenant_id") or global_settings.get( + "azure_ai_foundry_tenant_id" + ) + client_id = foundry_settings.get("client_id") or global_settings.get( + "azure_ai_foundry_client_id" + ) + client_secret = foundry_settings.get("client_secret") or global_settings.get( + "azure_ai_foundry_client_secret" + ) + + if auth_type == "service_principal": + if not client_secret: + raise FoundryAgentInvocationError( + "Foundry service principals require client_secret value." + ) + resolved_secret = _resolve_secret_value(client_secret) + if not tenant_id or not client_id: + raise FoundryAgentInvocationError( + "Foundry service principals require tenant_id and client_id values." + ) + return ClientSecretCredential( + tenant_id=tenant_id, + client_id=client_id, + client_secret=resolved_secret, + authority=authority, + ) + + if client_secret and auth_type != "managed_identity": + resolved_secret = _resolve_secret_value(client_secret) + if not tenant_id or not client_id: + raise FoundryAgentInvocationError( + "Foundry service principals require tenant_id and client_id values." + ) + return ClientSecretCredential( + tenant_id=tenant_id, + client_id=client_id, + client_secret=resolved_secret, + authority=authority, + ) + + if auth_type == "managed_identity": + if managed_identity_type == "user_assigned" and managed_identity_client_id: + return DefaultAzureCredential( + authority=authority, + managed_identity_client_id=managed_identity_client_id, + ) + return DefaultAzureCredential(authority=authority) + + # Fall back to default chained credentials (managed identity, CLI, etc.) + return DefaultAzureCredential(authority=authority) + + +def _resolve_secret_value(value: str) -> str: + if validate_secret_name_dynamic(value): + resolved = retrieve_secret_from_key_vault_by_full_name(value) + if not resolved: + raise FoundryAgentInvocationError( + f"Unable to resolve Key Vault secret '{value}' for Foundry credentials." + ) + return resolved + return value + + +def _authority_from_cloud(cloud_value: Optional[str]) -> str: + if not cloud_value: + return AzureAuthorityHosts.AZURE_PUBLIC_CLOUD + + normalized = cloud_value.lower() + if normalized in ("usgov", "usgovernment", "gcc"): + return AzureAuthorityHosts.AZURE_GOVERNMENT + return AzureAuthorityHosts.AZURE_PUBLIC_CLOUD + + +def _extract_message_text(message: ChatMessageContent) -> str: + if message.content: + if isinstance(message.content, str): + return message.content + try: + return "".join(str(chunk) for chunk in message.content) + except TypeError: + return str(message.content) + return "" + + +def _extract_citations(message: ChatMessageContent) -> List[Dict[str, Any]]: + metadata = message.metadata or {} + citations = metadata.get("citations") + if isinstance(citations, list): + return [c for c in citations if isinstance(c, dict)] + items = getattr(message, "items", None) + if isinstance(items, list): + extracted: List[Dict[str, Any]] = [] + for item in items: + content_type = getattr(item, "content_type", None) + if content_type != "annotation": + continue + url = getattr(item, "url", None) + title = getattr(item, "title", None) + quote = getattr(item, "quote", None) + if not url: + continue + extracted.append( + { + "url": url, + "title": title, + "quote": quote, + "citation_type": getattr(item, "citation_type", None), + } + ) + if extracted: + return extracted + return [] diff --git a/application/single_app/functions_activity_logging.py b/application/single_app/functions_activity_logging.py new file mode 100644 index 00000000..df9cabf3 --- /dev/null +++ b/application/single_app/functions_activity_logging.py @@ -0,0 +1,1341 @@ +""" +Activity logging functions for tracking chat and user interactions. +This module provides functions to log various types of user activity +for analytics and monitoring purposes. +""" + +import logging +import uuid +from datetime import datetime +from typing import Optional +from functions_appinsights import log_event +from functions_debug import debug_print +from config import cosmos_activity_logs_container + +def log_chat_activity( + user_id: str, + conversation_id: str, + message_type: str, + message_length: int = 0, + has_document_search: bool = False, + has_image_generation: bool = False, + document_scope: Optional[str] = None, + chat_context: Optional[str] = None +) -> None: + """ + Log chat activity for monitoring. + Chat data is already stored in conversations/messages containers. + + Args: + user_id (str): The ID of the user performing the action + conversation_id (str): The ID of the conversation + message_type (str): Type of message (e.g., 'user_message', 'assistant_message') + message_length (int, optional): Length of the message content + has_document_search (bool, optional): Whether document search was used + has_image_generation (bool, optional): Whether image generation was used + document_scope (str, optional): Scope of document search if used + chat_context (str, optional): Context or type of chat session + """ + + try: + # Log to Application Insights for monitoring + log_event( + message=f"Chat activity: {message_type} for user {user_id}", + extra={ + 'user_id': user_id, + 'conversation_id': conversation_id, + 'message_type': message_type, + 'message_length': message_length, + 'has_document_search': has_document_search, + 'has_image_generation': has_image_generation, + 'document_scope': document_scope, + 'chat_context': chat_context, + 'activity_type': 'chat_activity' + }, + level=logging.INFO + ) + debug_print(f"Logged chat activity: {message_type} for user {user_id}") + + except Exception as e: + # Log error but don't break the chat flow + log_event( + message=f"Error logging chat activity: {str(e)}", + extra={ + 'user_id': user_id, + 'conversation_id': conversation_id, + 'error': str(e) + }, + level=logging.ERROR + ) + debug_print(f"Error logging chat activity for user {user_id}: {str(e)}") + + +def log_user_activity( + user_id: str, + activity_type: str, + activity_details: Optional[dict] = None +) -> None: + """ + Log general user activity for analytics and monitoring. + + Args: + user_id (str): The ID of the user performing the action + activity_type (str): Type of activity (e.g., 'login', 'logout', 'file_upload') + activity_details (dict, optional): Additional details about the activity + """ + + try: + # Create activity data + activity_data = { + 'user_id': user_id, + 'activity_type': activity_type, + 'timestamp': datetime.utcnow().isoformat() + } + + # Add additional details if provided + if activity_details: + activity_data.update(activity_details) + + # Log to Application Insights + log_event( + message=f"User activity logged: {activity_type} for user {user_id}", + extra=activity_data, + level=logging.INFO + ) + debug_print(f"Logged user activity: {activity_type} for user {user_id}") + + except Exception as e: + # Log error but don't break the user flow + log_event( + message=f"Error logging user activity: {str(e)}", + extra={ + 'user_id': user_id, + 'activity_type': activity_type, + 'error': str(e) + }, + level=logging.ERROR + ) + debug_print(f"Error logging user activity for user {user_id}: {str(e)}") + + +def log_web_search_consent_acceptance( + user_id: str, + admin_email: str, + consent_text: str, + source: str = 'admin_settings' +) -> None: + """ + Log web search consent acceptance to activity_logs and App Insights. + + Args: + user_id (str): Admin user ID who accepted the consent. + admin_email (str): Admin email who accepted the consent. + consent_text (str): Consent message accepted by the admin. + source (str, optional): Origin of the consent action. + """ + try: + activity_record = { + 'id': str(uuid.uuid4()), + 'activity_type': 'web_search_consent_acceptance', + 'user_id': user_id, + 'timestamp': datetime.utcnow().isoformat(), + 'created_at': datetime.utcnow().isoformat(), + 'accepted_by': { + 'user_id': user_id, + 'email': admin_email + }, + 'source': source, + 'description': consent_text + } + + cosmos_activity_logs_container.create_item(body=activity_record) + + log_event( + message=consent_text, + extra=activity_record, + level=logging.INFO + ) + debug_print(f"Logged web search consent acceptance for user {user_id}") + + except Exception as e: + log_event( + message=f"Error logging web search consent acceptance: {str(e)}", + extra={ + 'user_id': user_id, + 'admin_email': admin_email, + 'error': str(e) + }, + level=logging.ERROR + ) + debug_print(f"Error logging web search consent acceptance for user {user_id}: {str(e)}") + + +def log_document_upload( + user_id: str, + container_type: str, + document_id: str, + file_size: int = 0, + file_type: Optional[str] = None +) -> None: + """ + Log document upload activity for monitoring. + Document data is already stored in documents containers. + + Args: + user_id (str): The ID of the user uploading the document + container_type (str): Type of container ('personal', 'group', 'public') + document_id (str): The ID of the uploaded document + file_size (int, optional): Size of the uploaded file in bytes + file_type (str, optional): Type/extension of the uploaded file + """ + + try: + # Log to Application Insights for monitoring + log_event( + message=f"Document upload: {file_type} ({file_size} bytes) for user {user_id}", + extra={ + 'user_id': user_id, + 'container_type': container_type, + 'document_id': document_id, + 'file_size': file_size, + 'file_type': file_type, + 'activity_type': 'document_upload' + }, + level=logging.INFO + ) + debug_print(f"Logged document upload for user {user_id}") + + except Exception as e: + # Log error but don't break the upload flow + log_event( + message=f"Error logging document upload activity: {str(e)}", + extra={ + 'user_id': user_id, + 'document_id': document_id, + 'error': str(e) + }, + level=logging.ERROR + ) + debug_print(f"Error logging document upload for user {user_id}: {str(e)}") + + +def log_document_creation_transaction( + user_id: str, + document_id: str, + workspace_type: str, + file_name: str, + file_type: Optional[str] = None, + file_size: Optional[int] = None, + page_count: Optional[int] = None, + embedding_tokens: Optional[int] = None, + embedding_model: Optional[str] = None, + version: Optional[int] = None, + author: Optional[str] = None, + title: Optional[str] = None, + subject: Optional[str] = None, + publication_date: Optional[str] = None, + keywords: Optional[list] = None, + abstract: Optional[str] = None, + group_id: Optional[str] = None, + public_workspace_id: Optional[str] = None, + additional_metadata: Optional[dict] = None +) -> None: + """ + Log comprehensive document creation transaction to activity_logs container. + This creates a permanent record of the document creation that persists even if the document is deleted. + + Args: + user_id (str): The ID of the user who created the document + document_id (str): The ID of the created document + workspace_type (str): Type of workspace ('personal', 'group', 'public') + file_name (str): Name of the uploaded file + file_type (str, optional): File extension/type (.pdf, .docx, etc.) + file_size (int, optional): Size of the file in bytes + page_count (int, optional): Number of pages/chunks processed + embedding_tokens (int, optional): Total embedding tokens used + embedding_model (str, optional): Embedding model deployment name + version (int, optional): Document version + author (str, optional): Document author (from metadata) + title (str, optional): Document title (from metadata) + subject (str, optional): Document subject (from metadata) + publication_date (str, optional): Document publication date (from metadata) + keywords (list, optional): Document keywords (from metadata) + abstract (str, optional): Document abstract (from metadata) + group_id (str, optional): Group ID if group workspace + public_workspace_id (str, optional): Public workspace ID if public workspace + additional_metadata (dict, optional): Any additional metadata to store + """ + + try: + import uuid + + # Create comprehensive activity log record + activity_record = { + 'id': str(uuid.uuid4()), + 'user_id': user_id, + 'activity_type': 'document_creation', + 'workspace_type': workspace_type, + 'timestamp': datetime.utcnow().isoformat(), + 'created_at': datetime.utcnow().isoformat(), + 'document': { + 'document_id': document_id, + 'file_name': file_name, + 'file_type': file_type, + 'file_size_bytes': file_size, + 'page_count': page_count, + 'version': version + }, + 'embedding_usage': { + 'total_tokens': embedding_tokens, + 'model_deployment_name': embedding_model + }, + 'document_metadata': { + 'author': author, + 'title': title, + 'subject': subject, + 'publication_date': publication_date, + 'keywords': keywords or [], + 'abstract': abstract + }, + 'workspace_context': {} + } + + # Add workspace-specific context + if workspace_type == 'group' and group_id: + activity_record['workspace_context']['group_id'] = group_id + elif workspace_type == 'public' and public_workspace_id: + activity_record['workspace_context']['public_workspace_id'] = public_workspace_id + + # Add any additional metadata + if additional_metadata: + activity_record['additional_metadata'] = additional_metadata + + # Save to activity_logs container for permanent record + cosmos_activity_logs_container.create_item(body=activity_record) + + # Also log to Application Insights for monitoring + log_event( + message=f"Document creation transaction logged: {file_name} ({file_type}) for user {user_id}", + extra=activity_record, + level=logging.INFO + ) + debug_print(f"Logged document creation transaction: {document_id} for user {user_id}") + + + except Exception as e: + # Log error but don't break the document creation flow + log_event( + message=f"Error logging document creation transaction: {str(e)}", + extra={ + 'user_id': user_id, + 'document_id': document_id, + 'workspace_type': workspace_type, + 'error': str(e) + }, + level=logging.ERROR + ) + debug_print(f"Error logging document creation transaction for user {user_id}: {str(e)}") + + +def log_document_deletion_transaction( + user_id: str, + document_id: str, + workspace_type: str, + file_name: str, + file_type: Optional[str] = None, + page_count: Optional[int] = None, + version: Optional[int] = None, + group_id: Optional[str] = None, + public_workspace_id: Optional[str] = None, + document_metadata: Optional[dict] = None +) -> None: + """ + Log document deletion transaction to activity_logs container. + This creates a permanent record of the document deletion. + + Args: + user_id (str): The ID of the user who deleted the document + document_id (str): The ID of the deleted document + workspace_type (str): Type of workspace ('personal', 'group', 'public') + file_name (str): Name of the deleted file + file_type (str, optional): File extension/type (.pdf, .docx, etc.) + page_count (int, optional): Number of pages/chunks that were stored + version (int, optional): Document version + group_id (str, optional): Group ID if group workspace + public_workspace_id (str, optional): Public workspace ID if public workspace + document_metadata (dict, optional): Full document metadata for reference + """ + + try: + import uuid + + # Create deletion activity log record + activity_record = { + 'id': str(uuid.uuid4()), + 'user_id': user_id, + 'activity_type': 'document_deletion', + 'workspace_type': workspace_type, + 'timestamp': datetime.utcnow().isoformat(), + 'created_at': datetime.utcnow().isoformat(), + 'document': { + 'document_id': document_id, + 'file_name': file_name, + 'file_type': file_type, + 'page_count': page_count, + 'version': version + }, + 'workspace_context': {} + } + + # Add workspace-specific context + if workspace_type == 'group' and group_id: + activity_record['workspace_context']['group_id'] = group_id + elif workspace_type == 'public' and public_workspace_id: + activity_record['workspace_context']['public_workspace_id'] = public_workspace_id + + # Add full document metadata if provided + if document_metadata: + activity_record['deleted_document_metadata'] = document_metadata + + # Save to activity_logs container for permanent record + cosmos_activity_logs_container.create_item(body=activity_record) + + # Also log to Application Insights for monitoring + log_event( + message=f"Document deletion transaction logged: {file_name} ({file_type}) for user {user_id}", + extra=activity_record, + level=logging.INFO + ) + + debug_print(f"Logged document deletion transaction: {document_id} for user {user_id}") + + except Exception as e: + # Log error but don't break the document deletion flow + log_event( + message=f"Error logging document deletion transaction: {str(e)}", + extra={ + 'user_id': user_id, + 'document_id': document_id, + 'workspace_type': workspace_type, + 'error': str(e) + }, + level=logging.ERROR + ) + debug_print(f"Error logging document deletion transaction for user {user_id}: {str(e)}") + + +def log_document_metadata_update_transaction( + user_id: str, + document_id: str, + workspace_type: str, + file_name: str, + updated_fields: dict, + file_type: Optional[str] = None, + group_id: Optional[str] = None, + public_workspace_id: Optional[str] = None, + additional_metadata: Optional[dict] = None +) -> None: + """ + Log document metadata update transaction to activity_logs container. + This creates a permanent record of metadata modifications. + + Args: + user_id (str): The ID of the user who updated the metadata + document_id (str): The ID of the updated document + workspace_type (str): Type of workspace ('personal', 'group', 'public') + file_name (str): Name of the document file + updated_fields (dict): Dictionary of fields that were updated with their new values + file_type (str, optional): File extension/type (.pdf, .docx, etc.) + group_id (str, optional): Group ID if group workspace + public_workspace_id (str, optional): Public workspace ID if public workspace + additional_metadata (dict, optional): Any additional metadata to store + """ + + try: + import uuid + + # Create metadata update activity log record + activity_record = { + 'id': str(uuid.uuid4()), + 'user_id': user_id, + 'activity_type': 'document_metadata_update', + 'workspace_type': workspace_type, + 'timestamp': datetime.utcnow().isoformat(), + 'created_at': datetime.utcnow().isoformat(), + 'document': { + 'document_id': document_id, + 'file_name': file_name, + 'file_type': file_type + }, + 'updated_fields': updated_fields, + 'workspace_context': {} + } + + # Add workspace-specific context + if workspace_type == 'group' and group_id: + activity_record['workspace_context']['group_id'] = group_id + elif workspace_type == 'public' and public_workspace_id: + activity_record['workspace_context']['public_workspace_id'] = public_workspace_id + + # Add any additional metadata + if additional_metadata: + activity_record['additional_metadata'] = additional_metadata + + # Save to activity_logs container for permanent record + cosmos_activity_logs_container.create_item(body=activity_record) + + # Also log to Application Insights for monitoring + log_event( + message=f"Document metadata update transaction logged: {file_name} for user {user_id}", + extra=activity_record, + level=logging.INFO + ) + + debug_print(f"Logged document metadata update transaction: {document_id} for user {user_id}") + + except Exception as e: + # Log error but don't break the document update flow + log_event( + message=f"Error logging document metadata update transaction: {str(e)}", + extra={ + 'user_id': user_id, + 'document_id': document_id, + 'workspace_type': workspace_type, + 'error': str(e) + }, + level=logging.ERROR + ) + debug_print(f"Error logging document metadata update transaction for user {user_id}: {str(e)}") + + +def log_token_usage( + user_id: str, + token_type: str, + total_tokens: int, + model: str, + workspace_type: Optional[str] = None, + prompt_tokens: Optional[int] = None, + completion_tokens: Optional[int] = None, + document_id: Optional[str] = None, + file_name: Optional[str] = None, + conversation_id: Optional[str] = None, + message_id: Optional[str] = None, + group_id: Optional[str] = None, + public_workspace_id: Optional[str] = None, + additional_context: Optional[dict] = None +) -> None: + """ + Log token usage to activity_logs container for easy reporting and analytics. + Supports both embedding tokens (document processing) and chat tokens (conversations). + + Args: + user_id (str): The ID of the user whose action consumed tokens + token_type (str): Type of token usage ('embedding' or 'chat') + total_tokens (int): Total tokens consumed + model (str): Model deployment name used + workspace_type (str, optional): Type of workspace ('personal', 'group', 'public') + prompt_tokens (int, optional): Prompt tokens (for chat) + completion_tokens (int, optional): Completion tokens (for chat) + document_id (str, optional): Document ID (for embedding) + file_name (str, optional): File name (for embedding) + conversation_id (str, optional): Conversation ID (for chat) + message_id (str, optional): Message ID (for chat) + group_id (str, optional): Group ID if group workspace + public_workspace_id (str, optional): Public workspace ID if public workspace + additional_context (dict, optional): Any additional context to store + """ + + try: + import uuid + + # Create token usage activity log record + activity_record = { + 'id': str(uuid.uuid4()), + 'user_id': user_id, + 'activity_type': 'token_usage', + 'token_type': token_type, + 'timestamp': datetime.utcnow().isoformat(), + 'created_at': datetime.utcnow().isoformat(), + 'usage': { + 'total_tokens': total_tokens, + 'model': model + }, + 'workspace_type': workspace_type, + 'workspace_context': {} + } + + # Add token type specific details + if token_type == 'embedding': + activity_record['embedding_details'] = { + 'document_id': document_id, + 'file_name': file_name + } + elif token_type == 'chat': + activity_record['usage']['prompt_tokens'] = prompt_tokens + activity_record['usage']['completion_tokens'] = completion_tokens + activity_record['chat_details'] = { + 'conversation_id': conversation_id, + 'message_id': message_id + } + + # Add workspace-specific context + if group_id: + activity_record['workspace_context']['group_id'] = group_id + if public_workspace_id: + activity_record['workspace_context']['public_workspace_id'] = public_workspace_id + + # Add any additional context + if additional_context: + activity_record['additional_context'] = additional_context + + # Save to activity_logs container + cosmos_activity_logs_container.create_item(body=activity_record) + + # Also log to Application Insights for monitoring + log_event( + message=f"Token usage logged: {token_type} - {total_tokens} tokens ({model})", + extra=activity_record, + level=logging.INFO + ) + debug_print(f"Logged token usage: {token_type} - {total_tokens} tokens for user {user_id}") + + except Exception as e: + # Log error but don't break the flow + log_event( + message=f"Error logging token usage: {str(e)}", + extra={ + 'user_id': user_id, + 'token_type': token_type, + 'total_tokens': total_tokens, + 'error': str(e) + }, + level=logging.ERROR + ) + debug_print(f"Error logging token usage for user {user_id}: {str(e)}") + + +def log_conversation_creation( + user_id: str, + conversation_id: str, + title: str, + workspace_type: str = 'personal', + context: list = None, + tags: list = None, + group_id: str = None, + public_workspace_id: str = None, + additional_context: dict = None +) -> None: + """ + Log conversation creation to the activity_logs container. + + Args: + user_id (str): The ID of the user creating the conversation + conversation_id (str): The unique ID of the conversation + title (str): The conversation title + workspace_type (str, optional): Type of workspace ('personal', 'group', 'public') + context (list, optional): Conversation context array + tags (list, optional): Conversation tags array + group_id (str, optional): Group ID if in group workspace + public_workspace_id (str, optional): Public workspace ID if applicable + additional_context (dict, optional): Any additional context information + """ + try: + # Build activity log + activity_log = { + 'id': str(uuid.uuid4()), + 'activity_type': 'conversation_creation', + 'user_id': user_id, + 'timestamp': datetime.utcnow().isoformat(), + 'conversation': { + 'conversation_id': conversation_id, + 'title': title, + 'context': context or [], + 'tags': tags or [] + }, + 'workspace_type': workspace_type, + 'workspace_context': {} + } + + # Add workspace-specific context + if workspace_type == 'group' and group_id: + activity_log['workspace_context']['group_id'] = group_id + elif workspace_type == 'public' and public_workspace_id: + activity_log['workspace_context']['public_workspace_id'] = public_workspace_id + + # Add additional context if provided + if additional_context: + activity_log['additional_context'] = additional_context + + # Save to activity logs container + cosmos_activity_logs_container.upsert_item(activity_log) + + debug_print(f"✅ Logged conversation creation: {conversation_id}") + + except Exception as e: + # Non-blocking error handling + debug_print(f"⚠️ Error logging conversation creation: {str(e)}") + log_event( + message=f"Error logging conversation creation: {str(e)}", + extra={ + 'user_id': user_id, + 'conversation_id': conversation_id, + 'error': str(e) + }, + level=logging.ERROR + ) + + +def log_conversation_deletion( + user_id: str, + conversation_id: str, + title: str, + workspace_type: str = 'personal', + context: list = None, + tags: list = None, + is_archived: bool = False, + is_bulk_operation: bool = False, + group_id: str = None, + public_workspace_id: str = None, + additional_context: dict = None +) -> None: + """ + Log conversation deletion to the activity_logs container. + + Args: + user_id (str): The ID of the user deleting the conversation + conversation_id (str): The unique ID of the conversation + title (str): The conversation title + workspace_type (str, optional): Type of workspace ('personal', 'group', 'public') + context (list, optional): Conversation context array + tags (list, optional): Conversation tags array + is_archived (bool, optional): Whether the conversation was archived before deletion + is_bulk_operation (bool, optional): Whether this is part of a bulk deletion + group_id (str, optional): Group ID if in group workspace + public_workspace_id (str, optional): Public workspace ID if applicable + additional_context (dict, optional): Any additional context information + """ + try: + # Build activity log + activity_log = { + 'id': str(uuid.uuid4()), + 'activity_type': 'conversation_deletion', + 'user_id': user_id, + 'timestamp': datetime.utcnow().isoformat(), + 'conversation': { + 'conversation_id': conversation_id, + 'title': title, + 'context': context or [], + 'tags': tags or [] + }, + 'deletion_details': { + 'is_archived': is_archived, + 'is_bulk_operation': is_bulk_operation + }, + 'workspace_type': workspace_type, + 'workspace_context': {} + } + + # Add workspace-specific context + if workspace_type == 'group' and group_id: + activity_log['workspace_context']['group_id'] = group_id + elif workspace_type == 'public' and public_workspace_id: + activity_log['workspace_context']['public_workspace_id'] = public_workspace_id + + # Add additional context if provided + if additional_context: + activity_log['additional_context'] = additional_context + + # Save to activity logs container + cosmos_activity_logs_container.upsert_item(activity_log) + + debug_print(f"✅ Logged conversation deletion: {conversation_id} (archived: {is_archived}, bulk: {is_bulk_operation})") + + except Exception as e: + # Non-blocking error handling + debug_print(f"⚠️ Error logging conversation deletion: {str(e)}") + log_event( + message=f"Error logging conversation deletion: {str(e)}", + extra={ + 'user_id': user_id, + 'conversation_id': conversation_id, + 'error': str(e) + }, + level=logging.ERROR + ) + + +def log_conversation_archival( + user_id: str, + conversation_id: str, + title: str, + workspace_type: str = 'personal', + context: list = None, + tags: list = None, + group_id: str = None, + public_workspace_id: str = None, + additional_context: dict = None +) -> None: + """ + Log conversation archival to the activity_logs container. + + Args: + user_id (str): The ID of the user archiving the conversation + conversation_id (str): The unique ID of the conversation + title (str): The conversation title + workspace_type (str, optional): Type of workspace ('personal', 'group', 'public') + context (list, optional): Conversation context array + tags (list, optional): Conversation tags array + group_id (str, optional): Group ID if in group workspace + public_workspace_id (str, optional): Public workspace ID if applicable + additional_context (dict, optional): Any additional context information + """ + try: + # Build activity log + activity_log = { + 'id': str(uuid.uuid4()), + 'activity_type': 'conversation_archival', + 'user_id': user_id, + 'timestamp': datetime.utcnow().isoformat(), + 'conversation': { + 'conversation_id': conversation_id, + 'title': title, + 'context': context or [], + 'tags': tags or [] + }, + 'workspace_type': workspace_type, + 'workspace_context': {} + } + + # Add workspace-specific context + if workspace_type == 'group' and group_id: + activity_log['workspace_context']['group_id'] = group_id + elif workspace_type == 'public' and public_workspace_id: + activity_log['workspace_context']['public_workspace_id'] = public_workspace_id + + # Add additional context if provided + if additional_context: + activity_log['additional_context'] = additional_context + + # Save to activity logs container + cosmos_activity_logs_container.upsert_item(activity_log) + + debug_print(f"✅ Logged conversation archival: {conversation_id}") + + except Exception as e: + # Non-blocking error handling + debug_print(f"⚠️ Error logging conversation archival: {str(e)}") + log_event( + message=f"Error logging conversation archival: {str(e)}", + extra={ + 'user_id': user_id, + 'conversation_id': conversation_id, + 'error': str(e) + }, + level=logging.ERROR + ) + + +def log_user_login( + user_id: str, + login_method: str = 'azure_ad' +) -> None: + """ + Log user login activity to the activity_logs container. + + Args: + user_id (str): The ID of the user logging in + login_method (str, optional): Method used for login (e.g., 'azure_ad', 'local') + """ + + try: + # Create login activity record + import uuid + login_activity = { + 'id': str(uuid.uuid4()), + 'user_id': user_id, + 'activity_type': 'user_login', + 'login_method': login_method, + 'timestamp': datetime.utcnow().isoformat(), + 'created_at': datetime.utcnow().isoformat(), + 'details': { + 'login_method': login_method, + 'success': True + } + } + + # Save to activity_logs container + cosmos_activity_logs_container.create_item(body=login_activity) + + # Also log to Application Insights for monitoring + log_event( + message=f"User login logged for user {user_id}", + extra=login_activity, + level=logging.INFO + ) + debug_print(f"✅ User login activity logged for user {user_id}") + + except Exception as e: + # Log error but don't break the login flow + log_event( + message=f"Error logging user login activity: {str(e)}", + extra={ + 'user_id': user_id, + 'login_method': login_method, + 'error': str(e) + }, + level=logging.ERROR + ) + debug_print(f"⚠️ Warning: Failed to log user login activity for user {user_id}: {str(e)}") + + +def log_group_status_change( + group_id: str, + group_name: str, + old_status: str, + new_status: str, + changed_by_user_id: str, + changed_by_email: str, + reason: Optional[str] = None +) -> None: + """ + Log group status change to activity_logs container for audit trail. + + Args: + group_id (str): The ID of the group whose status is changing + group_name (str): The name of the group + old_status (str): Previous status value + new_status (str): New status value + changed_by_user_id (str): User ID of admin who made the change + changed_by_email (str): Email of admin who made the change + reason (str, optional): Optional reason for the status change + """ + + try: + import uuid + + # Create status change activity record + status_change_activity = { + 'id': str(uuid.uuid4()), + 'activity_type': 'group_status_change', + 'timestamp': datetime.utcnow().isoformat(), + 'created_at': datetime.utcnow().isoformat(), + 'group': { + 'group_id': group_id, + 'group_name': group_name + }, + 'status_change': { + 'old_status': old_status, + 'new_status': new_status, + 'changed_at': datetime.utcnow().isoformat() + }, + 'changed_by': { + 'user_id': changed_by_user_id, + 'email': changed_by_email + }, + 'workspace_type': 'group', + 'workspace_context': { + 'group_id': group_id + } + } + + # Add reason if provided + if reason: + status_change_activity['status_change']['reason'] = reason + + # Save to activity_logs container for permanent audit trail + cosmos_activity_logs_container.create_item(body=status_change_activity) + + # Also log to Application Insights for monitoring + log_event( + message=f"Group status changed: {group_name} ({group_id}) from '{old_status}' to '{new_status}' by {changed_by_email}", + extra=status_change_activity, + level=logging.INFO + ) + + debug_print(f"✅ Group status change logged: {group_id} -> {new_status}") + + except Exception as e: + # Log error but don't break the status update flow + log_event( + message=f"Error logging group status change: {str(e)}", + extra={ + 'group_id': group_id, + 'new_status': new_status, + 'changed_by_user_id': changed_by_user_id, + 'error': str(e) + }, + level=logging.ERROR + ) + debug_print(f"⚠️ Warning: Failed to log group status change: {str(e)}") + + +def log_group_member_deleted( + removed_by_user_id: str, + removed_by_email: str, + removed_by_role: str, + member_user_id: str, + member_email: str, + member_name: str, + group_id: str, + group_name: str, + action: str, + description: Optional[str] = None +) -> None: + """ + Log group member deletion/removal transaction to activity_logs container. + This creates a permanent record when users are removed from groups. + + Args: + removed_by_user_id (str): ID of user performing the removal + removed_by_email (str): Email of user performing the removal + removed_by_role (str): Role of user performing the removal (Owner, Admin, Member) + member_user_id (str): ID of the member being removed + member_email (str): Email of the member being removed + member_name (str): Display name of the member being removed + group_id (str): ID of the group + group_name (str): Name of the group + action (str): Specific action ('member_left_group' or 'admin_removed_member') + description (str, optional): Human-readable description of the action + """ + + try: + import uuid + + # Create group member deletion activity log record + activity_record = { + 'id': str(uuid.uuid4()), + 'user_id': removed_by_user_id, # Person who performed the action (for partitioning) + 'activity_type': 'group_member_deleted', + 'timestamp': datetime.utcnow().isoformat(), + 'created_at': datetime.utcnow().isoformat(), + 'removed_by': { + 'user_id': removed_by_user_id, + 'email': removed_by_email, + 'role': removed_by_role + }, + 'removed_member': { + 'user_id': member_user_id, + 'email': member_email, + 'name': member_name + }, + 'group': { + 'group_id': group_id, + 'group_name': group_name + }, + 'description': description or f"{removed_by_role} removed member from group" + } + + # Save to activity_logs container for permanent record + cosmos_activity_logs_container.create_item(body=activity_record) + + # Also log to Application Insights for monitoring + log_event( + message=f"Group member deleted: {member_name} ({member_email}) removed from {group_name}", + extra=activity_record, + level=logging.INFO + ) + + debug_print(f"✅ Group member deletion logged to activity_logs: {member_user_id} from group {group_id}") + + except Exception as e: + # Log error but don't break the member removal flow + log_event( + message=f"Error logging group member deletion: {str(e)}", + extra={ + 'removed_by_user_id': removed_by_user_id, + 'member_user_id': member_user_id, + 'group_id': group_id, + 'error': str(e) + }, + level=logging.ERROR + ) + debug_print(f"⚠️ Warning: Failed to log group member deletion: {str(e)}") + + +def log_public_workspace_status_change( + workspace_id: str, + workspace_name: str, + old_status: str, + new_status: str, + changed_by_user_id: str, + changed_by_email: str, + reason: Optional[str] = None +) -> None: + """ + Log public workspace status change to activity_logs container for audit trail. + + Args: + workspace_id (str): The ID of the public workspace whose status is changing + workspace_name (str): The name of the public workspace + old_status (str): Previous status value + new_status (str): New status value + changed_by_user_id (str): User ID of admin who made the change + changed_by_email (str): Email of admin who made the change + reason (str, optional): Optional reason for the status change + """ + + try: + import uuid + + # Create status change activity record + status_change_activity = { + 'id': str(uuid.uuid4()), + 'activity_type': 'public_workspace_status_change', + 'timestamp': datetime.utcnow().isoformat(), + 'created_at': datetime.utcnow().isoformat(), + 'public_workspace': { + 'workspace_id': workspace_id, + 'workspace_name': workspace_name + }, + 'status_change': { + 'old_status': old_status, + 'new_status': new_status, + 'changed_at': datetime.utcnow().isoformat() + }, + 'changed_by': { + 'user_id': changed_by_user_id, + 'email': changed_by_email + }, + 'workspace_type': 'public_workspace', + 'workspace_context': { + 'public_workspace_id': workspace_id + } + } + + # Add reason if provided + if reason: + status_change_activity['status_change']['reason'] = reason + + # Save to activity_logs container for permanent audit trail + cosmos_activity_logs_container.create_item(body=status_change_activity) + + # Also log to Application Insights for monitoring + log_event( + message=f"Public workspace status changed: {workspace_name} ({workspace_id}) from '{old_status}' to '{new_status}' by {changed_by_email}", + extra=status_change_activity, + level=logging.INFO + ) + + debug_print(f"✅ Logged public workspace status change: {workspace_name} ({workspace_id}) {old_status} -> {new_status}") + + except Exception as e: + # Log error but don't fail the operation + log_event( + message=f"Error logging public workspace status change: {str(e)}", + extra={ + 'workspace_id': workspace_id, + 'old_status': old_status, + 'new_status': new_status, + 'changed_by_user_id': changed_by_user_id, + 'error': str(e) + }, + level=logging.ERROR + ) + debug_print(f"⚠️ Warning: Failed to log public workspace status change: {str(e)}") + + +def log_user_agreement_accepted( + user_id: str, + workspace_type: str, + workspace_id: str, + workspace_name: Optional[str] = None, + action_context: Optional[str] = None +) -> None: + """ + Log when a user accepts a user agreement in a workspace. + This record is used to track acceptance and support daily acceptance features. + + Args: + user_id (str): The ID of the user who accepted the agreement + workspace_type (str): Type of workspace ('personal', 'group', 'public') + workspace_id (str): The ID of the workspace + workspace_name (str, optional): The name of the workspace + action_context (str, optional): The context/action that triggered the agreement + (e.g., 'file_upload', 'chat') + """ + + try: + import uuid + + # Create user agreement acceptance record + acceptance_record = { + 'id': str(uuid.uuid4()), + 'user_id': user_id, + 'activity_type': 'user_agreement_accepted', + 'timestamp': datetime.utcnow().isoformat(), + 'created_at': datetime.utcnow().isoformat(), + 'accepted_date': datetime.utcnow().strftime('%Y-%m-%d'), # Date only for daily lookup + 'workspace_type': workspace_type, + 'workspace_context': { + f'{workspace_type}_workspace_id': workspace_id, + 'workspace_name': workspace_name + }, + 'action_context': action_context + } + + # Save to activity_logs container + cosmos_activity_logs_container.create_item(body=acceptance_record) + + # Also log to Application Insights for monitoring + log_event( + message=f"User agreement accepted: user {user_id} in {workspace_type} workspace {workspace_id}", + extra=acceptance_record, + level=logging.INFO + ) + + debug_print(f"✅ Logged user agreement acceptance: user {user_id} in {workspace_type} workspace {workspace_id}") + + except Exception as e: + # Log error but don't fail the operation + log_event( + message=f"Error logging user agreement acceptance: {str(e)}", + extra={ + 'user_id': user_id, + 'workspace_type': workspace_type, + 'workspace_id': workspace_id, + 'error': str(e) + }, + level=logging.ERROR + ) + debug_print(f"⚠️ Warning: Failed to log user agreement acceptance: {str(e)}") + + +def has_user_accepted_agreement_today( + user_id: str, + workspace_type: str, + workspace_id: str +) -> bool: + """ + Check if a user has already accepted the user agreement today for a given workspace. + Used to implement the "accept once per day" feature. + + Args: + user_id (str): The ID of the user + workspace_type (str): Type of workspace ('personal', 'group', 'public') + workspace_id (str): The ID of the workspace + + Returns: + bool: True if user has accepted today, False otherwise + """ + + try: + today_date = datetime.utcnow().strftime('%Y-%m-%d') + + # Query for today's acceptance record + query = """ + SELECT VALUE COUNT(1) FROM c + WHERE c.user_id = @user_id + AND c.activity_type = 'user_agreement_accepted' + AND c.accepted_date = @today_date + AND c.workspace_type = @workspace_type + AND c.workspace_context[@workspace_id_key] = @workspace_id + """ + + workspace_id_key = f'{workspace_type}_workspace_id' + + params = [ + {"name": "@user_id", "value": user_id}, + {"name": "@today_date", "value": today_date}, + {"name": "@workspace_type", "value": workspace_type}, + {"name": "@workspace_id_key", "value": workspace_id_key}, + {"name": "@workspace_id", "value": workspace_id} + ] + + results = list(cosmos_activity_logs_container.query_items( + query=query, + parameters=params, + enable_cross_partition_query=False # Query by partition key (user_id) + )) + + count = results[0] if results else 0 + + debug_print(f"🔍 User agreement check: user {user_id}, workspace {workspace_id}, today={today_date}, accepted={count > 0}") + + return count > 0 + + except Exception as e: + # Log error and return False (require re-acceptance on error) + log_event( + message=f"Error checking user agreement acceptance: {str(e)}", + extra={ + 'user_id': user_id, + 'workspace_type': workspace_type, + 'workspace_id': workspace_id, + 'error': str(e) + }, + level=logging.ERROR + ) + debug_print(f"⚠️ Error checking user agreement acceptance: {str(e)}") + return False + + +def log_retention_policy_force_push( + admin_user_id: str, + admin_email: str, + scopes: list, + results: dict, + total_updated: int +) -> None: + """ + Log retention policy force push action to activity_logs container. + + This creates a permanent audit record when an admin forces organization + default retention policies to be applied to all workspaces. + + Args: + admin_user_id (str): User ID of the admin performing the force push + admin_email (str): Email of the admin performing the force push + scopes (list): List of workspace types affected (e.g., ['personal', 'group', 'public']) + results (dict): Breakdown of updates per workspace type + total_updated (int): Total number of workspaces/users updated + """ + + try: + # Create force push activity record + force_push_activity = { + 'id': str(uuid.uuid4()), + 'user_id': admin_user_id, # Partition key + 'activity_type': 'retention_policy_force_push', + 'timestamp': datetime.utcnow().isoformat(), + 'created_at': datetime.utcnow().isoformat(), + 'admin': { + 'user_id': admin_user_id, + 'email': admin_email + }, + 'force_push_details': { + 'scopes': scopes, + 'results': results, + 'total_updated': total_updated, + 'executed_at': datetime.utcnow().isoformat() + }, + 'workspace_type': 'admin', + 'workspace_context': { + 'action': 'retention_policy_force_push' + } + } + + # Save to activity_logs container for permanent audit trail + cosmos_activity_logs_container.create_item(body=force_push_activity) + + # Also log to Application Insights for monitoring + log_event( + message=f"Retention policy force push executed by {admin_email} for scopes: {', '.join(scopes)}. Total updated: {total_updated}", + extra=force_push_activity, + level=logging.INFO + ) + + debug_print(f"✅ Retention policy force push logged: {scopes} by {admin_email}, updated {total_updated}") + + except Exception as e: + # Log error but don't break the force push flow + log_event( + message=f"Error logging retention policy force push: {str(e)}", + extra={ + 'admin_user_id': admin_user_id, + 'scopes': scopes, + 'total_updated': total_updated, + 'error': str(e) + }, + level=logging.ERROR + ) + debug_print(f"⚠️ Warning: Failed to log retention policy force push: {str(e)}") diff --git a/application/single_app/functions_agent_payload.py b/application/single_app/functions_agent_payload.py new file mode 100644 index 00000000..09f1f343 --- /dev/null +++ b/application/single_app/functions_agent_payload.py @@ -0,0 +1,206 @@ +# functions_agent_payload.py +"""Utility helpers for normalizing agent payloads before validation and storage.""" + +from copy import deepcopy +from typing import Any, Dict, List + +_SUPPORTED_AGENT_TYPES = {"local", "aifoundry"} +_APIM_FIELDS = [ + "azure_agent_apim_gpt_endpoint", + "azure_agent_apim_gpt_subscription_key", + "azure_agent_apim_gpt_deployment", + "azure_agent_apim_gpt_api_version", +] +_GPT_FIELDS = [ + "azure_openai_gpt_endpoint", + "azure_openai_gpt_key", + "azure_openai_gpt_deployment", + "azure_openai_gpt_api_version", +] +_FREE_FORM_TEXT = [ + "name", + "display_name", + "description", + "instructions", +] +_TEXT_FIELDS = [ + "name", + "display_name", + "description", + "instructions", + "azure_openai_gpt_endpoint", + "azure_openai_gpt_deployment", + "azure_openai_gpt_api_version", + "azure_agent_apim_gpt_endpoint", + "azure_agent_apim_gpt_deployment", + "azure_agent_apim_gpt_api_version", +] +_STRING_DEFAULT_FIELDS = [ + "azure_openai_gpt_endpoint", + "azure_openai_gpt_key", + "azure_openai_gpt_deployment", + "azure_openai_gpt_api_version", + "azure_agent_apim_gpt_endpoint", + "azure_agent_apim_gpt_subscription_key", + "azure_agent_apim_gpt_deployment", + "azure_agent_apim_gpt_api_version", +] + +_MAX_FIELD_LENGTHS = { + "name": 100, + "display_name": 200, + "description": 2000, + "instructions": 30000, + "azure_openai_gpt_endpoint": 2048, + "azure_openai_gpt_key": 1024, + "azure_openai_gpt_deployment": 256, + "azure_openai_gpt_api_version": 64, + "azure_agent_apim_gpt_endpoint": 2048, + "azure_agent_apim_gpt_subscription_key": 1024, + "azure_agent_apim_gpt_deployment": 256, + "azure_agent_apim_gpt_api_version": 64, +} +_FOUNDRY_FIELD_LENGTHS = { + "agent_id": 128, + "endpoint": 2048, + "api_version": 64, + "authority": 2048, + "tenant_id": 64, + "client_id": 64, + "client_secret": 1024, + "managed_identity_client_id": 64, +} + + +class AgentPayloadError(ValueError): + """Raised when an agent payload violates backend requirements.""" + + +def is_azure_ai_foundry_agent(agent: Dict[str, Any]) -> bool: + """Return True when the agent type is Azure AI Foundry.""" + agent_type = (agent or {}).get("agent_type", "local") + if isinstance(agent_type, str): + return agent_type.strip().lower() == "aifoundry" + return False + + +def _normalize_text_fields(payload: Dict[str, Any]) -> None: + for field in _TEXT_FIELDS: + value = payload.get(field) + if isinstance(value, str): + payload[field] = value.strip() + + +def _coerce_actions(actions: Any) -> List[str]: + if actions is None or actions == "": + return [] + if not isinstance(actions, list): + raise AgentPayloadError("actions_to_load must be an array of strings.") + cleaned: List[str] = [] + for item in actions: + if isinstance(item, str): + trimmed = item.strip() + if trimmed: + cleaned.append(trimmed) + else: + raise AgentPayloadError("actions_to_load entries must be strings.") + return cleaned + + +def _coerce_other_settings(settings: Any) -> Dict[str, Any]: + if settings in (None, ""): + return {} + if not isinstance(settings, dict): + raise AgentPayloadError("other_settings must be an object.") + return settings + + +def _coerce_agent_type(agent_type: Any) -> str: + if isinstance(agent_type, str): + agent_type = agent_type.strip().lower() + else: + agent_type = "local" + if agent_type not in _SUPPORTED_AGENT_TYPES: + return "local" + return agent_type + + +def _coerce_completion_tokens(value: Any) -> int: + if value in (None, "", " "): + return -1 + try: + return int(value) + except (TypeError, ValueError) as exc: + raise AgentPayloadError("max_completion_tokens must be an integer.") from exc + +def _validate_field_lengths(payload: Dict[str, Any]) -> None: + for field, max_len in _MAX_FIELD_LENGTHS.items(): + value = payload.get(field, "") + if isinstance(value, str) and len(value) > max_len: + raise AgentPayloadError(f"{field} exceeds maximum length of {max_len}.") + + +def _validate_foundry_field_lengths(foundry_settings: Dict[str, Any]) -> None: + for field, max_len in _FOUNDRY_FIELD_LENGTHS.items(): + value = foundry_settings.get(field, "") + if isinstance(value, str) and len(value) > max_len: + raise AgentPayloadError(f"azure_ai_foundry.{field} exceeds maximum length of {max_len}.") + +def sanitize_agent_payload(agent: Dict[str, Any]) -> Dict[str, Any]: + """Return a sanitized copy of the agent payload or raise AgentPayloadError.""" + if not isinstance(agent, dict): + raise AgentPayloadError("Agent payload must be an object.") + + sanitized = deepcopy(agent) + _normalize_text_fields(sanitized) + + for field in _STRING_DEFAULT_FIELDS: + value = sanitized.get(field) + if value is None: + sanitized[field] = "" + + _validate_field_lengths(sanitized) + + agent_type = _coerce_agent_type(sanitized.get("agent_type")) + sanitized["agent_type"] = agent_type + + sanitized["other_settings"] = _coerce_other_settings(sanitized.get("other_settings")) + sanitized["actions_to_load"] = _coerce_actions(sanitized.get("actions_to_load")) + sanitized["max_completion_tokens"] = _coerce_completion_tokens( + sanitized.get("max_completion_tokens") + ) + + sanitized["enable_agent_gpt_apim"] = bool( + sanitized.get("enable_agent_gpt_apim", False) + ) + sanitized.setdefault("is_global", False) + sanitized.setdefault("is_group", False) + + if agent_type == "aifoundry": + sanitized["enable_agent_gpt_apim"] = False + for field in _APIM_FIELDS: + sanitized.pop(field, None) + sanitized["actions_to_load"] = [] + + foundry_settings = sanitized["other_settings"].get("azure_ai_foundry") + if not isinstance(foundry_settings, dict): + raise AgentPayloadError( + "Azure AI Foundry agents require other_settings.azure_ai_foundry." + ) + agent_id = str(foundry_settings.get("agent_id", "")).strip() + if not agent_id: + raise AgentPayloadError( + "Azure AI Foundry agents require other_settings.azure_ai_foundry.agent_id." + ) + foundry_settings["agent_id"] = agent_id + _validate_foundry_field_lengths(foundry_settings) + sanitized["other_settings"]["azure_ai_foundry"] = foundry_settings + else: + # Remove stale foundry metadata when toggling back to local agents. + azure_foundry = sanitized["other_settings"].get("azure_ai_foundry") + if azure_foundry is not None and not isinstance(azure_foundry, dict): + raise AgentPayloadError("azure_ai_foundry must be an object when provided.") + if azure_foundry: + sanitized["other_settings"].pop("azure_ai_foundry", None) + + return sanitized \ No newline at end of file diff --git a/application/single_app/functions_agent_templates.py b/application/single_app/functions_agent_templates.py new file mode 100644 index 00000000..10838fd6 --- /dev/null +++ b/application/single_app/functions_agent_templates.py @@ -0,0 +1,349 @@ +# functions_agent_templates.py +"""Agent template helper functions. + +This module centralizes CRUD operations for agent templates stored in the +Cosmos DB `agent_templates` container. Templates are surfaced as reusable +starting points inside the agent builder UI. +""" + +from __future__ import annotations + +import json +import uuid +from datetime import datetime +from typing import Any, Dict, List, Optional + +from azure.cosmos import exceptions +from flask import current_app + +from config import cosmos_agent_templates_container +from functions_appinsights import log_event + +STATUS_PENDING = "pending" +STATUS_APPROVED = "approved" +STATUS_REJECTED = "rejected" +STATUS_ARCHIVED = "archived" +ALLOWED_STATUSES = {STATUS_PENDING, STATUS_APPROVED, STATUS_REJECTED, STATUS_ARCHIVED} + +_MAX_TEMPLATE_FIELD_LENGTHS = { + "title": 200, + "display_name": 200, + "helper_text": 140, + "description": 2000, + "instructions": 30000, + "template_key": 128, +} + +_MAX_TEMPLATE_LIST_ITEM_LENGTHS = { + "tags": 64, + "actions_to_load": 128, +} + + +def _utc_now() -> str: + return datetime.utcnow().isoformat() + + +def _slugify(text: str) -> str: + if not text: + return "template" + slug = text.strip().lower() + allowed = "abcdefghijklmnopqrstuvwxyz0123456789-_" + slug = slug.replace(" ", "-") + slug = ''.join(ch for ch in slug if ch in allowed) + slug = slug.strip('-') + return slug or "template" + + +def _normalize_helper_text(description: str, explicit_helper: Optional[str]) -> str: + helper = explicit_helper or description or "" + helper = helper.strip() + if len(helper) <= 140: + return helper + return helper[:137].rstrip() + "..." + + +def _parse_additional_settings(value: Any) -> Dict[str, Any]: + if not value: + return {} + if isinstance(value, dict): + return value + if isinstance(value, str): + trimmed = value.strip() + if not trimmed: + return {} + try: + return json.loads(trimmed) + except json.JSONDecodeError as exc: + raise ValueError(f"Invalid JSON for additional_settings: {exc}") from exc + raise ValueError("additional_settings must be a JSON string or object") + + +def _strip_metadata(doc: Dict[str, Any]) -> Dict[str, Any]: + return {k: v for k, v in doc.items() if not k.startswith('_')} + + +def _serialize_additional_settings(raw: Any) -> str: + try: + parsed = _parse_additional_settings(raw) + except ValueError: + return raw if isinstance(raw, str) else "" + if not parsed: + return "" + return json.dumps(parsed, indent=2, sort_keys=True) + + +def _sanitize_template(doc: Dict[str, Any], include_internal: bool = False) -> Dict[str, Any]: + cleaned = _strip_metadata(doc) + cleaned.setdefault('actions_to_load', []) + cleaned['actions_to_load'] = [a for a in cleaned['actions_to_load'] if a] + cleaned.setdefault('tags', []) + cleaned['tags'] = [str(tag)[:64] for tag in cleaned['tags']] + cleaned['helper_text'] = _normalize_helper_text( + cleaned.get('description', ''), + cleaned.get('helper_text') + ) + cleaned['additional_settings'] = _serialize_additional_settings(cleaned.get('additional_settings')) + cleaned.setdefault('status', STATUS_PENDING) + cleaned.setdefault('title', cleaned.get('display_name') or 'Agent Template') + cleaned.setdefault('template_key', _slugify(cleaned['title'])) + + if not include_internal: + for field in ['submission_notes', 'review_notes', 'rejection_reason', 'created_by', 'created_by_email']: + cleaned.pop(field, None) + + return cleaned + + +def _validate_template_lengths(payload: Dict[str, Any]) -> None: + for field, max_len in _MAX_TEMPLATE_FIELD_LENGTHS.items(): + value = payload.get(field, "") + if isinstance(value, str) and len(value) > max_len: + raise ValueError(f"{field} exceeds maximum length of {max_len}.") + + for field, max_len in _MAX_TEMPLATE_LIST_ITEM_LENGTHS.items(): + values = payload.get(field) or [] + if not isinstance(values, list): + continue + for item in values: + if isinstance(item, str) and len(item) > max_len: + raise ValueError(f"{field} entries exceed maximum length of {max_len}.") + + +def validate_template_payload(payload: Dict[str, Any]) -> Optional[str]: + if not isinstance(payload, dict): + return "Template payload must be an object" + if not (payload.get('display_name') or payload.get('title')): + return "Display name is required" + if not payload.get('description'): + return "Description is required" + if not payload.get('instructions'): + return "Instructions are required" + if payload.get('additional_settings'): + try: + _parse_additional_settings(payload['additional_settings']) + except ValueError as exc: + return str(exc) + # Return false if valid to keep with consistency of returning bools or values because we return the error. + return False + + +def list_agent_templates(status: Optional[str] = None, include_internal: bool = False) -> List[Dict[str, Any]]: + query = "SELECT * FROM c" + parameters = [] + if status: + query += " WHERE c.status = @status" + parameters.append({"name": "@status", "value": status}) + + try: + items = list( + cosmos_agent_templates_container.query_items( + query=query, + parameters=parameters or None, + enable_cross_partition_query=True, + ) + ) + except Exception as exc: + current_app.logger.error("Failed to list agent templates: %s", exc) + return [] + + sanitized = [_sanitize_template(item, include_internal) for item in items] + sanitized.sort(key=lambda tpl: tpl.get('title', '').lower()) + return sanitized + + +def get_agent_template(template_id: str) -> Optional[Dict[str, Any]]: + try: + doc = cosmos_agent_templates_container.read_item(item=template_id, partition_key=template_id) + return _sanitize_template(doc, include_internal=True) + except exceptions.CosmosResourceNotFoundError: + return None + except Exception as exc: + current_app.logger.error("Failed to fetch agent template %s: %s", template_id, exc) + return None + + +def _base_template_from_payload(payload: Dict[str, Any], user_info: Optional[Dict[str, Any]], auto_approve: bool) -> Dict[str, Any]: + now = _utc_now() + title = payload.get('title') or payload.get('display_name') or 'Agent Template' + helper_text = _normalize_helper_text(payload.get('description', ''), payload.get('helper_text')) + additional_settings = _parse_additional_settings(payload.get('additional_settings')) + tags = payload.get('tags') or [] + tags = [str(tag)[:64] for tag in tags] + + actions = [str(action) for action in (payload.get('actions_to_load') or []) if action] + + template = { + 'id': payload.get('id') or str(uuid.uuid4()), + 'template_key': payload.get('template_key') or f"{_slugify(title)}-{uuid.uuid4().hex[:6]}", + 'title': title, + 'display_name': payload.get('display_name') or title, + 'helper_text': helper_text, + 'description': payload.get('description', ''), + 'instructions': payload.get('instructions', ''), + 'additional_settings': additional_settings, + 'actions_to_load': actions, + 'tags': tags, + 'status': STATUS_APPROVED if auto_approve else STATUS_PENDING, + 'created_at': now, + 'updated_at': now, + 'created_by': user_info.get('userId') if user_info else None, + 'created_by_name': user_info.get('displayName') if user_info else None, + 'created_by_email': user_info.get('email') if user_info else None, + 'submission_notes': payload.get('submission_notes'), + 'source_agent_id': payload.get('source_agent_id'), + 'source_scope': payload.get('source_scope') or 'personal', + 'approved_by': user_info.get('userId') if auto_approve and user_info else None, + 'approved_at': now if auto_approve else None, + 'review_notes': payload.get('review_notes'), + 'rejection_reason': None, + } + return template + + +def create_agent_template(payload: Dict[str, Any], user_info: Optional[Dict[str, Any]], auto_approve: bool = False) -> Dict[str, Any]: + template = _base_template_from_payload(payload, user_info, auto_approve) + try: + cosmos_agent_templates_container.upsert_item(template) + except Exception as exc: + current_app.logger.error("Failed to save agent template: %s", exc) + raise + + log_event( + "Agent template submitted", + extra={ + "template_id": template['id'], + "status": template['status'], + "created_by": template.get('created_by'), + }, + ) + return _sanitize_template(template, include_internal=True) + + +def update_agent_template(template_id: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]: + doc = get_agent_template(template_id) + if not doc: + return None + + mutable_fields = { + 'title', 'display_name', 'helper_text', 'description', 'instructions', + 'additional_settings', 'actions_to_load', 'tags', 'status' + } + payload = {k: v for k, v in updates.items() if k in mutable_fields} + + if 'additional_settings' in payload: + payload['additional_settings'] = _parse_additional_settings(payload['additional_settings']) + else: + payload['additional_settings'] = _parse_additional_settings(doc.get('additional_settings')) + + if 'tags' in payload: + payload['tags'] = [str(tag)[:64] for tag in payload['tags']] + + if 'status' in payload: + status = payload['status'] + if status not in ALLOWED_STATUSES: + raise ValueError("Invalid template status") + else: + payload['status'] = doc.get('status', STATUS_PENDING) + + template = { + **doc, + **payload, + } + template['helper_text'] = _normalize_helper_text( + template.get('description', ''), + template.get('helper_text') + ) + template['updated_at'] = _utc_now() + template['additional_settings'] = payload['additional_settings'] + _validate_template_lengths(template) + + try: + cosmos_agent_templates_container.upsert_item(template) + except Exception as exc: + current_app.logger.error("Failed to update agent template %s: %s", template_id, exc) + raise + + return _sanitize_template(template, include_internal=True) + + +def approve_agent_template(template_id: str, approver_info: Dict[str, Any], notes: Optional[str] = None) -> Optional[Dict[str, Any]]: + doc = get_agent_template(template_id) + if not doc: + return None + doc['additional_settings'] = _parse_additional_settings(doc.get('additional_settings')) + doc['status'] = STATUS_APPROVED + doc['approved_by'] = approver_info.get('userId') + doc['approved_at'] = _utc_now() + doc['review_notes'] = notes + doc['rejection_reason'] = None + doc['updated_at'] = doc['approved_at'] + + try: + cosmos_agent_templates_container.upsert_item(doc) + except Exception as exc: + current_app.logger.error("Failed to approve agent template %s: %s", template_id, exc) + raise + + log_event( + "Agent template approved", + extra={"template_id": template_id, "approved_by": doc['approved_by']}, + ) + return _sanitize_template(doc, include_internal=True) + + +def reject_agent_template(template_id: str, approver_info: Dict[str, Any], reason: str, notes: Optional[str] = None) -> Optional[Dict[str, Any]]: + doc = get_agent_template(template_id) + if not doc: + return None + doc['additional_settings'] = _parse_additional_settings(doc.get('additional_settings')) + doc['status'] = STATUS_REJECTED + doc['approved_by'] = approver_info.get('userId') + doc['approved_at'] = _utc_now() + doc['review_notes'] = notes + doc['rejection_reason'] = reason + doc['updated_at'] = doc['approved_at'] + + try: + cosmos_agent_templates_container.upsert_item(doc) + except Exception as exc: + current_app.logger.error("Failed to reject agent template %s: %s", template_id, exc) + raise + + log_event( + "Agent template rejected", + extra={"template_id": template_id, "approved_by": doc['approved_by']}, + ) + return _sanitize_template(doc, include_internal=True) + + +def delete_agent_template(template_id: str) -> bool: + try: + cosmos_agent_templates_container.delete_item(item=template_id, partition_key=template_id) + log_event("Agent template deleted", extra={"template_id": template_id}) + return True + except exceptions.CosmosResourceNotFoundError: + return False + except Exception as exc: + current_app.logger.error("Failed to delete agent template %s: %s", template_id, exc) + raise diff --git a/application/single_app/functions_agents.py b/application/single_app/functions_agents.py index 63b7edeb..9aa589c5 100644 --- a/application/single_app/functions_agents.py +++ b/application/single_app/functions_agents.py @@ -3,6 +3,7 @@ import asyncio from concurrent.futures import ThreadPoolExecutor from functions_settings import get_settings +from semantic_kernel.agents.runtime.in_process.in_process_runtime import InProcessRuntime # Global executor for background orchestration executor = ThreadPoolExecutor(max_workers=4) # Tune as needed @@ -13,7 +14,6 @@ def _runner(): asyncio.set_event_loop(loop) runtime = None try: - from semantic_kernel.agents.runtime.in_process.in_process_runtime import InProcessRuntime runtime = InProcessRuntime() result = loop.run_until_complete( run_sk_call( diff --git a/application/single_app/functions_appinsights.py b/application/single_app/functions_appinsights.py index 320f8c5f..41e535e5 100644 --- a/application/single_app/functions_appinsights.py +++ b/application/single_app/functions_appinsights.py @@ -4,6 +4,7 @@ import os import threading from azure.monitor.opentelemetry import configure_azure_monitor +import app_settings_cache # Singleton for the logger and Azure Monitor configuration _appinsights_logger = None @@ -44,29 +45,41 @@ def log_event( exceptionTraceback (Any, optional): If set to True, includes exception traceback. """ try: + try: + cache = app_settings_cache.get_settings_cache() or None + except Exception: + cache = None + # Get logger - use Azure Monitor logger if configured, otherwise standard logger logger = get_appinsights_logger() if not logger: + print(f"[Log] {message} -- {extra}") logger = logging.getLogger('standard') if not logger.handlers: logger.addHandler(logging.StreamHandler()) logger.setLevel(logging.INFO) - + # Enhanced exception handling for Application Insights # When exceptionTraceback=True, ensure we capture full exception context exc_info_to_use = exceptionTraceback - + # For ERROR level logs with exceptionTraceback=True, always log as exception if level >= logging.ERROR and exceptionTraceback: if logger and hasattr(logger, 'exception'): + if cache and cache.get('enable_debug_logging', False): + print(f"DEBUG: [ERROR][Log] {message} -- {extra if extra else 'No Extra Dimensions'}") # Use logger.exception() for better exception capture in Application Insights - logger.exception(message, extra=extra, stacklevel=stacklevel) + logger.exception(message, extra=extra, stacklevel=stacklevel, stack_info=includeStack, exc_info=True) return else: # Fallback to standard logging with exc_info exc_info_to_use = True - + # Format message with extra properties for structured logging + + #TODO: Find a way to cache get_settings() globally (and update it when changed) to enable debug printing. Cannot use debug_print due to circular import + if cache and cache.get('enable_debug_logging', False): + print(f"DEBUG: [Log] {message} -- {extra if extra else 'No Extra Dimensions'}") # Debug print to console if extra: # For modern Azure Monitor, extra properties are automatically captured logger.log( @@ -85,12 +98,12 @@ def log_event( stack_info=includeStack, exc_info=exc_info_to_use ) - + # For Azure Monitor, ensure exception-level logs are properly categorized if level >= logging.ERROR and _azure_monitor_configured: # Add a debug print to verify exception logging is working print(f"[Azure Monitor] Exception logged: {message[:100]}...") - + except Exception as e: # Fallback to basic logging if anything fails try: @@ -98,11 +111,11 @@ def log_event( if not fallback_logger.handlers: fallback_logger.addHandler(logging.StreamHandler()) fallback_logger.setLevel(logging.INFO) - + fallback_message = f"{message} | Original error: {str(e)}" if extra: fallback_message += f" | Extra: {extra}" - + fallback_logger.log(level, fallback_message) except: # If even basic logging fails, print to console diff --git a/application/single_app/functions_approvals.py b/application/single_app/functions_approvals.py new file mode 100644 index 00000000..14b803cd --- /dev/null +++ b/application/single_app/functions_approvals.py @@ -0,0 +1,850 @@ +# functions_approvals.py + +""" +Approval workflow functions for Control Center administrative operations. +Handles approval requests for sensitive operations like ownership transfers, +group deletions, and document deletions. +""" + +import uuid +import logging +from datetime import datetime, timedelta +from typing import Optional, List, Dict, Any +from config import cosmos_approvals_container, cosmos_groups_container +from functions_appinsights import log_event +from functions_notifications import create_notification +from functions_group import find_group_by_id +from functions_debug import debug_print + +# Approval request statuses +STATUS_PENDING = "pending" +STATUS_APPROVED = "approved" +STATUS_DENIED = "denied" +STATUS_AUTO_DENIED = "auto_denied" +STATUS_EXECUTED = "executed" +STATUS_FAILED = "failed" + +# Approval request types +TYPE_TAKE_OWNERSHIP = "take_ownership" +TYPE_TRANSFER_OWNERSHIP = "transfer_ownership" +TYPE_DELETE_DOCUMENTS = "delete_documents" +TYPE_DELETE_GROUP = "delete_group" +TYPE_DELETE_USER_DOCUMENTS = "delete_user_documents" + +# TTL settings +TTL_AUTO_DENY_DAYS = 3 +TTL_AUTO_DENY_SECONDS = TTL_AUTO_DENY_DAYS * 24 * 60 * 60 # 3 days in seconds + + +def create_approval_request( + request_type: str, + group_id: str, + requester_id: str, + requester_email: str, + requester_name: str, + reason: str, + metadata: Optional[Dict[str, Any]] = None +) -> Dict[str, Any]: + """ + Create a new approval request for a sensitive Control Center operation. + + Args: + request_type: Type of request (take_ownership, transfer_ownership, delete_documents, delete_group, delete_user_documents) + group_id: ID of the group being affected (or user_id for user-related requests) + requester_id: User ID of the person requesting the action + requester_email: Email of the requester + requester_name: Display name of the requester + reason: Explanation/justification for the request + metadata: Additional request-specific data (e.g., new_owner_id for transfers, user_name for user documents) + + Returns: + Created approval request document + """ + try: + # For user document deletion requests, use metadata for display info + # Initialize group variable for notifications (may be None for non-group operations) + group = None + + if request_type == TYPE_DELETE_USER_DOCUMENTS: + # For user document deletions, group_id is actually the user_id (partition key) + group_name = metadata.get('user_name', 'Unknown User') + group_owner = {} + elif metadata and metadata.get('entity_type') == 'workspace': + # For public workspace operations + from config import cosmos_public_workspaces_container + try: + workspace = cosmos_public_workspaces_container.read_item(item=group_id, partition_key=group_id) + group_name = workspace.get('name', 'Unknown Workspace') + workspace_owner = workspace.get('owner', {}) + if isinstance(workspace_owner, dict): + group_owner = { + 'id': workspace_owner.get('userId'), + 'email': workspace_owner.get('email'), + 'displayName': workspace_owner.get('displayName') + } + else: + # Old format where owner is just a string ID + group_owner = {'id': workspace_owner, 'email': 'unknown', 'displayName': 'unknown'} + + # Normalize workspace owner structure to match group owner structure for notifications + # Workspace uses 'userId' but notification function expects 'id' + workspace['owner'] = group_owner + + # Set group to workspace for notification purposes + group = workspace + except: + raise ValueError(f"Workspace {group_id} not found") + else: + # Get group details for group-based approvals + group = find_group_by_id(group_id) + if not group: + raise ValueError(f"Group {group_id} not found") + + group_name = group.get('name', 'Unknown Group') + group_owner = group.get('owner', {}) + + # Create approval request document + approval_id = str(uuid.uuid4()) + now = datetime.utcnow() + + approval_request = { + 'id': approval_id, + 'group_id': group_id, # Partition key + 'request_type': request_type, + 'status': STATUS_PENDING, + 'group_name': group_name, + 'requester_id': requester_id, + 'requester_email': requester_email, + 'requester_name': requester_name, + 'reason': reason, + 'group_owner_id': group_owner.get('id'), + 'group_owner_email': group_owner.get('email'), + 'group_owner_name': group_owner.get('displayName', group_owner.get('email')), + 'created_at': now.isoformat(), + 'expires_at': (now + timedelta(days=TTL_AUTO_DENY_DAYS)).isoformat(), + 'ttl': TTL_AUTO_DENY_SECONDS, # Auto-deny after 3 days + 'approved_by_id': None, + 'approved_by_email': None, + 'approved_by_name': None, + 'approved_at': None, + 'approval_comment': None, + 'executed_at': None, + 'execution_result': None, + 'metadata': metadata or {} + } + + # Save to Cosmos DB + cosmos_approvals_container.create_item(body=approval_request) + + # Log event + log_event("[Approvals] Created approval request", { + 'approval_id': approval_id, + 'request_type': request_type, + 'group_id': group_id, + 'group_name': group_name, + 'requester': requester_email, + 'reason': reason + }) + debug_print(f"Created approval request: {approval_request}") + + # Create notifications for eligible approvers + _create_approval_notifications(approval_request, group if request_type != TYPE_DELETE_USER_DOCUMENTS else None) + + return approval_request + + except Exception as e: + log_event("[Approvals] Error creating approval request", { + 'error': str(e), + 'request_type': request_type, + 'group_id': group_id, + 'requester': requester_email + }, level=logging.ERROR) + debug_print(f"Error creating approval request: {e}") + raise + + +def get_pending_approvals( + user_id: str, + user_roles: List[str], + page: int = 1, + per_page: int = 20, + include_completed: bool = False, + request_type_filter: Optional[str] = None, + status_filter: str = 'pending' +) -> Dict[str, Any]: + """ + Get approval requests that the user is eligible to approve. + + Args: + user_id: Current user ID + user_roles: List of roles the user has (e.g., ['admin', 'ControlCenterAdmin']) + page: Page number for pagination + per_page: Items per page + include_completed: Include approved/denied/executed requests + request_type_filter: Filter by request type + status_filter: Filter by specific status ('pending', 'approved', 'denied', 'executed', 'all') + + Returns: + Dictionary with approvals list, total count, and pagination info + """ + try: + # Build query based on filters + query_parts = ["SELECT * FROM c WHERE 1=1"] + parameters = [] + + # Status filter + if status_filter != 'all': + # If specific status requested (pending, approved, denied, executed) + query_parts.append("AND c.status = @status") + parameters.append({"name": "@status", "value": status_filter}) + # else: 'all' means no status filter + + # Request type filter + if request_type_filter: + query_parts.append("AND c.request_type = @request_type") + parameters.append({"name": "@request_type", "value": request_type_filter}) + + # Order by created date descending + query_parts.append("ORDER BY c.created_at DESC") + + query = " ".join(query_parts) + + debug_print(f"📋 [GET_APPROVALS] Query: {query}") + debug_print(f"📋 [GET_APPROVALS] Parameters: {parameters}") + debug_print(f"📋 [GET_APPROVALS] status_filter: {status_filter}") + + # Execute cross-partition query (we need to see all groups) + items = list(cosmos_approvals_container.query_items( + query=query, + parameters=parameters, + enable_cross_partition_query=True + )) + + debug_print(f"📋 [GET_APPROVALS] Found {len(items)} total items from query") + + # Filter by user eligibility + # For pending requests: check if user can approve + # For completed requests: check if user has visibility (was involved or is admin/owner) + eligible_approvals = [] + for approval in items: + if status_filter == 'pending': + # For pending requests, check if user can approve + if _can_user_approve(approval, user_id, user_roles): + eligible_approvals.append(approval) + else: + # For completed requests, check if user has visibility + if _can_user_view(approval, user_id, user_roles): + eligible_approvals.append(approval) + + debug_print(f"📋 [GET_APPROVALS] After eligibility filter: {len(eligible_approvals)} approvals") + + # Paginate + total_count = len(eligible_approvals) + start_idx = (page - 1) * per_page + end_idx = start_idx + per_page + paginated_approvals = eligible_approvals[start_idx:end_idx] + + debug_print(f"User {user_id} fetched pending approvals: page {page}, per_page {per_page}, total {total_count}") + + return { + 'approvals': paginated_approvals, + 'total': total_count, + 'page': page, + 'per_page': per_page, + 'total_pages': (total_count + per_page - 1) // per_page + } + + except Exception as e: + log_event("[Approvals] Error fetching pending approvals", { + 'error': str(e), + 'user_id': user_id, + 'user_roles': user_roles + }) + debug_print(f"Error fetching pending approvals: {e}") + raise + + +def approve_request( + approval_id: str, + group_id: str, + approver_id: str, + approver_email: str, + approver_name: str, + comment: Optional[str] = None +) -> Dict[str, Any]: + """ + Approve an approval request. + + Args: + approval_id: ID of the approval request + group_id: Group ID (partition key) + approver_id: User ID of approver + approver_email: Email of approver + approver_name: Display name of approver + comment: Optional comment from approver + + Returns: + Updated approval request document + """ + try: + # Get the approval request + approval = cosmos_approvals_container.read_item( + item=approval_id, + partition_key=group_id + ) + + # Validate status + if approval['status'] != STATUS_PENDING: + debug_print(f"Cannot approve request with status: {approval['status']}") + raise ValueError(f"Cannot approve request with status: {approval['status']}") + + # Update approval status + approval['status'] = STATUS_APPROVED + approval['approved_by_id'] = approver_id + approval['approved_by_email'] = approver_email + approval['approved_by_name'] = approver_name + approval['approved_at'] = datetime.utcnow().isoformat() + approval['approval_comment'] = comment + approval['ttl'] = -1 # Remove TTL so it doesn't auto-delete + + # Save updated approval + cosmos_approvals_container.upsert_item(approval) + + # Log event + log_event("[Approvals] Request approved", { + 'approval_id': approval_id, + 'request_type': approval['request_type'], + 'group_id': group_id, + 'approver': approver_email, + 'comment': comment + }) + debug_print(f"Approved request: {approval}") + + # Create notification for requester + create_notification( + user_id=approval['requester_id'], + notification_type='approval_request_approved', + title=f"Request Approved: {_format_request_type(approval['request_type'])}", + message=f"Your request for {approval['group_name']} has been approved by {approver_name}.", + link_url='/approvals', + link_context={ + 'approval_id': approval_id + }, + metadata={ + 'approval_id': approval_id, + 'request_type': approval['request_type'], + 'group_id': group_id, + 'approver_email': approver_email, + 'comment': comment + } + ) + + return approval + + except Exception as e: + log_event("[Approvals] Error approving request", { + 'error': str(e), + 'approval_id': approval_id, + 'group_id': group_id, + 'approver': approver_email + }) + debug_print(f"Error approving request: {e}") + raise + + +def deny_request( + approval_id: str, + group_id: str, + denier_id: str, + denier_email: str, + denier_name: str, + comment: str, + auto_denied: bool = False +) -> Dict[str, Any]: + """ + Deny an approval request. + + Args: + approval_id: ID of the approval request + group_id: Group ID (partition key) + denier_id: User ID of person denying (or 'system' for auto-deny) + denier_email: Email of denier + denier_name: Display name of denier + comment: Reason for denial + auto_denied: Whether this is an automatic denial + + Returns: + Updated approval request document + """ + try: + # Get the approval request + approval = cosmos_approvals_container.read_item( + item=approval_id, + partition_key=group_id + ) + + # Validate status (allow denying pending requests) + if approval['status'] not in [STATUS_PENDING]: + debug_print(f"Cannot deny request with status: {approval['status']}") + raise ValueError(f"Cannot deny request with status: {approval['status']}") + + # Update approval status + approval['status'] = STATUS_AUTO_DENIED if auto_denied else STATUS_DENIED + approval['approved_by_id'] = denier_id + approval['approved_by_email'] = denier_email + approval['approved_by_name'] = denier_name + approval['approved_at'] = datetime.utcnow().isoformat() + approval['approval_comment'] = comment + approval['ttl'] = -1 # Remove TTL + + # Save updated approval + cosmos_approvals_container.upsert_item(approval) + + # Log event + log_event("[Approvals] Request denied", { + 'approval_id': approval_id, + 'request_type': approval['request_type'], + 'group_id': group_id, + 'denier': denier_email, + 'auto_denied': auto_denied, + 'comment': comment + }) + debug_print(f"Request denied: {approval_id}") + + # Create notification for requester (only if not auto-denied) + if not auto_denied: + create_notification( + user_id=approval['requester_id'], + notification_type='approval_request_denied', + title=f"Request Denied: {_format_request_type(approval['request_type'])}", + message=f"Your request for {approval['group_name']} was denied by {denier_name}.", + link_url='/approvals', + link_context={ + 'approval_id': approval_id + }, + metadata={ + 'approval_id': approval_id, + 'request_type': approval['request_type'], + 'group_id': group_id, + 'denier_email': denier_email, + 'comment': comment + } + ) + + return approval + + except Exception as e: + log_event("[Approvals] Error denying request", { + 'error': str(e), + 'approval_id': approval_id, + 'group_id': group_id, + 'denier_id': denier_id, + 'comment': comment, + 'auto_denied': auto_denied + }) + debug_print(f"Error denying request: {e}") + raise + + +def mark_approval_executed( + approval_id: str, + group_id: str, + success: bool, + result_message: str +) -> Dict[str, Any]: + """ + Mark an approved request as executed (or failed). + + Args: + approval_id: ID of the approval request + group_id: Group ID (partition key) + success: Whether execution was successful + result_message: Result message or error + + Returns: + Updated approval request document + """ + try: + # Get the approval request + approval = cosmos_approvals_container.read_item( + item=approval_id, + partition_key=group_id + ) + + # Update execution status + approval['status'] = STATUS_EXECUTED if success else STATUS_FAILED + approval['executed_at'] = datetime.utcnow().isoformat() + approval['execution_result'] = result_message + + # Save updated approval + cosmos_approvals_container.upsert_item(approval) + + # Log event + log_event("[Approvals] Request executed", { + 'approval_id': approval_id, + 'request_type': approval['request_type'], + 'group_id': group_id, + 'success': success, + 'result': result_message + }) + debug_print(f"Marked approval as executed: {approval_id}, success: {success}") + + return approval + + except Exception as e: + log_event("[Approvals] Error marking request as executed", { + 'error': str(e), + 'approval_id': approval_id, + 'group_id': group_id, + 'success': success, + 'result': result_message + }) + debug_print(f"Error marking approval as executed: {e}") + raise + + +def get_approval_by_id(approval_id: str, group_id: str) -> Optional[Dict[str, Any]]: + """ + Get a specific approval request by ID. + + Args: + approval_id: ID of the approval request + group_id: Group ID (partition key) + + Returns: + Approval request document or None if not found + """ + try: + return cosmos_approvals_container.read_item( + item=approval_id, + partition_key=group_id + ) + except Exception: + log_event("[Approvals] Approval not found", { + 'approval_id': approval_id, + 'group_id': group_id + }) + debug_print(f"Approval not found: {approval_id}") + return None + + +def auto_deny_expired_approvals() -> int: + """ + Auto-deny approval requests that have expired (older than 3 days). + This function should be called by a scheduled job. + + Returns: + Number of approvals auto-denied + """ + try: + # Query for pending approvals + query = "SELECT * FROM c WHERE c.status = @status" + parameters = [{"name": "@status", "value": STATUS_PENDING}] + + pending_approvals = list(cosmos_approvals_container.query_items( + query=query, + parameters=parameters, + enable_cross_partition_query=True + )) + + now = datetime.utcnow() + denied_count = 0 + + for approval in pending_approvals: + expires_at = datetime.fromisoformat(approval['expires_at']) + + # Check if expired + if now >= expires_at: + try: + deny_request( + approval_id=approval['id'], + group_id=approval['group_id'], + denier_id='system', + denier_email='system@simplechat', + denier_name='System Auto-Deny', + comment='Request automatically denied after 3 days without approval.', + auto_denied=True + ) + denied_count += 1 + except Exception as e: + log_event("[Approvals] Error auto-denying expired approval", { + 'approval_id': approval['id'], + 'error': str(e) + }) + debug_print(f"Error auto-denying approval {approval['id']}: {e}") + + if denied_count > 0: + log_event("[Approvals] Auto-denied expired approvals", { + 'denied_count': denied_count + }) + debug_print(f"Auto-denied {denied_count} expired approvals") + + return denied_count + + except Exception as e: + log_event("[Approvals] Error in auto_deny_expired_approvals", { + 'error': str(e) + }) + debug_print(f"Error in auto_deny_expired_approvals: {e}") + return 0 + + +def _can_user_view( + approval: Dict[str, Any], + user_id: str, + user_roles: List[str] +) -> bool: + """ + Check if a user can view a specific approval request (including completed ones). + + Visibility rules (more permissive than approval rights): + - User is the requester, OR + - User is the approver, OR + - User is the group owner, OR + - User is the personal workspace owner (for user document operations), OR + - User has 'ControlCenterAdmin' role, OR + - User has 'Admin' role + + Args: + approval: Approval request document + user_id: User ID to check + user_roles: List of roles the user has + + Returns: + True if user can view, False otherwise + """ + # Check if user was involved in the request + is_requester = approval.get('requester_id') == user_id + is_approver = approval.get('approved_by_id') == user_id + + # Check if user is the group owner + is_group_owner = approval.get('group_owner_id') == user_id + + # Check if user is the personal workspace owner (for user document deletion) + is_personal_workspace_owner = False + if approval.get('request_type') == TYPE_DELETE_USER_DOCUMENTS: + target_user_id = approval.get('metadata', {}).get('user_id') + is_personal_workspace_owner = target_user_id == user_id + + # Check if user has admin roles + has_control_center_admin = 'ControlCenterAdmin' in user_roles + has_admin = 'Admin' in user_roles or 'admin' in user_roles + + # User can view if they meet any of these criteria + return (is_requester or is_approver or is_group_owner or + is_personal_workspace_owner or has_control_center_admin or has_admin) + + +def _can_user_approve( + approval: Dict[str, Any], + user_id: str, + user_roles: List[str] +) -> bool: + """ + Check if a user is eligible to approve a specific request. + + Eligibility rules: + - User must be the group owner (for group operations), OR + - User must be the personal workspace owner (for user document operations), OR + - User must have 'ControlCenterAdmin' role, OR + - User must have 'Admin' role + - User cannot be the requester (unless they're the only eligible approver) + + Args: + approval: Approval request document + user_id: User ID to check + user_roles: List of roles the user has + + Returns: + True if user can approve, False otherwise + """ + # Check if user is the group owner (for group-based approvals) + is_group_owner = approval.get('group_owner_id') == user_id + + # Check if user is the personal workspace owner (for user document deletion) + is_personal_workspace_owner = False + if approval.get('request_type') == TYPE_DELETE_USER_DOCUMENTS: + # For user document deletion, check if user owns the documents + target_user_id = approval.get('metadata', {}).get('user_id') + is_personal_workspace_owner = target_user_id == user_id + + # Check if user has admin roles (check both capitalized and lowercase) + has_control_center_admin = 'ControlCenterAdmin' in user_roles + has_admin = 'Admin' in user_roles or 'admin' in user_roles + + # User must have at least one eligibility criterion + if not (is_group_owner or is_personal_workspace_owner or has_control_center_admin or has_admin): + return False + + # Special case: If user is the requester, they can still approve if they're the only eligible approver + # This handles the case where there's only one admin in the system + if approval.get('requester_id') == user_id: + # Allow same-user approval (with documentation through the approval system) + return True + + return True + + +def _create_approval_notifications( + approval: Dict[str, Any], + group: Optional[Dict[str, Any]] +) -> None: + """ + Create notifications for all users who can approve the request using assignment-based targeting. + Notifications target users by roles (Admin, ControlCenterAdmin) and/or ownership IDs. + + For user management (delete_user_documents): + - Notifies: Control Center Admins, Admins, and the affected user + For group management (transfer_ownership, delete_documents, delete_group, take_ownership): + - Notifies: Control Center Admins, Admins, and the group owner + + Args: + approval: Approval request document + group: Group document (None for user-related approvals) + """ + try: + log_event("[Approvals] Creating assignment-based approval notifications", { + 'approval_id': approval['id'], + 'group_id': approval['group_id'], + 'request_type': approval['request_type'] + }) + debug_print(f"Creating assignment-based approval notifications for approval: {approval['id']}") + + # Build assignment criteria based on request type + assignment = { + 'roles': ['Admin', 'ControlCenterAdmin'] # Always include admin roles + } + + # Add ownership-based targeting + if approval['request_type'] == TYPE_DELETE_USER_DOCUMENTS: + # For user document deletion: notify the user whose documents are being deleted + user_id = approval.get('metadata', {}).get('user_id') + if user_id: + assignment['personal_workspace_owner_id'] = user_id + log_event("[Approvals] Targeting user for document deletion", { + 'user_id': user_id, + 'approval_id': approval['id'] + }) + debug_print(f"Added personal workspace owner {user_id} to notification assignment") + else: + # For group operations: notify the group owner + if group: + group_owner_id = group.get('owner', {}).get('id') + if group_owner_id: + assignment['group_owner_id'] = group_owner_id + log_event("[Approvals] Targeting group owner", { + 'group_owner_id': group_owner_id, + 'approval_id': approval['id'] + }) + debug_print(f"Added group owner {group_owner_id} to notification assignment") + else: + log_event("[Approvals] No group provided for group-based approval", { + 'approval_id': approval['id'], + 'request_type': approval['request_type'] + }, level=logging.WARNING) + + log_event("[Approvals] Notification assignment", { + 'approval_id': approval['id'], + 'assignment': assignment + }) + debug_print(f"Notification assignment for approval {approval['id']}: {assignment}") + + # For transfer ownership requests, also notify the new owner (informational) + if approval['request_type'] == TYPE_TRANSFER_OWNERSHIP: + new_owner_id = approval.get('metadata', {}).get('new_owner_id') + if new_owner_id and new_owner_id != approval['requester_id']: + # Create informational notification for new owner + try: + log_event("[Approvals] Notifying new owner", { + 'user_id': new_owner_id, + 'approval_id': approval['id'] + }) + debug_print(f"Notifying new owner {new_owner_id} about transfer request") + create_notification( + group_id=approval['group_id'], + notification_type='approval_request_pending', + title=f"Ownership Transfer Pending", + message=f"{approval['requester_name']} has requested to transfer ownership of {approval['group_name']} to you. Awaiting approval.", + link_url='/approvals', + link_context={ + 'approval_id': approval['id'] + }, + metadata={ + 'approval_id': approval['id'], + 'request_type': approval['request_type'], + 'group_id': approval['group_id'], + 'requester_email': approval['requester_email'] + }, + assignment={ + 'personal_workspace_owner_id': new_owner_id # Only new owner sees this + } + ) + debug_print(f"Successfully notified new owner {new_owner_id}") + except Exception as notify_error: + log_event("[Approvals] Error notifying new owner", { + 'error': str(notify_error), + 'user_id': new_owner_id, + 'approval_id': approval['id'] + }) + debug_print(f"Error notifying new owner {new_owner_id}: {str(notify_error)}") + + # Create single notification with assignment - visible to all eligible approvers + try: + log_event("[Approvals] Creating approval notification with assignment", { + 'approval_id': approval['id'], + 'assignment': assignment + }) + debug_print(f"Creating approval notification with assignment for approval {approval['id']}") + create_notification( + group_id=approval['group_id'], + notification_type='approval_request_pending', + title=f"Approval Required: {_format_request_type(approval['request_type'])}", + message=f"{approval['requester_name']} requests {_format_request_type(approval['request_type'])} for {approval['group_name']}. Reason: {approval.get('reason', 'Not provided')}", + link_url='/approvals', + link_context={ + 'approval_id': approval['id'] + }, + metadata={ + 'approval_id': approval['id'], + 'request_type': approval['request_type'], + 'group_id': approval['group_id'], + 'requester_email': approval['requester_email'], + 'reason': approval['reason'] + }, + assignment=assignment + ) + debug_print(f"Successfully created approval notification with assignment for approval {approval['id']}") + except Exception as notify_error: + log_event("[Approvals] Error creating approval notification", { + 'error': str(notify_error), + 'approval_id': approval['id'] + }) + debug_print(f"Error creating approval notification for approval {approval['id']}: {str(notify_error)}") + + except Exception as e: + log_event("[Approvals] Error notifying users about approval request", { + 'error': str(e), + 'approval_id': approval['id'] + }) + debug_print(f"Error notifying users about approval request {approval['id']}: {str(e)}") + # Don't raise - notifications are non-critical + + +def _format_request_type(request_type: str) -> str: + """ + Format request type for display. + + Args: + request_type: Request type constant + + Returns: + Human-readable request type string + """ + type_labels = { + TYPE_TAKE_OWNERSHIP: "Take Ownership", + TYPE_TRANSFER_OWNERSHIP: "Transfer Ownership", + TYPE_DELETE_DOCUMENTS: "Delete All Documents", + TYPE_DELETE_GROUP: "Delete Group", + TYPE_DELETE_USER_DOCUMENTS: "Delete All User Documents" + } + return type_labels.get(request_type, request_type) diff --git a/application/single_app/functions_authentication.py b/application/single_app/functions_authentication.py index 4d068938..e4bcf480 100644 --- a/application/single_app/functions_authentication.py +++ b/application/single_app/functions_authentication.py @@ -2,6 +2,7 @@ from config import * from functions_settings import * +from functions_debug import debug_print # Default redirect path for OAuth consent flow (must match your Azure AD app registration) REDIRECT_PATH = getattr(globals(), 'REDIRECT_PATH', '/getAToken') @@ -37,7 +38,7 @@ def _load_cache(): cache.deserialize(session["token_cache"]) except Exception as e: # Handle potential corruption or format issues gracefully - print(f"Warning: Could not deserialize token cache: {e}. Starting fresh.") + debug_print(f"Warning: Could not deserialize token cache: {e}. Starting fresh.") session.pop("token_cache", None) # Clear corrupted cache return cache @@ -47,7 +48,7 @@ def _save_cache(cache): try: session["token_cache"] = cache.serialize() except Exception as e: - print(f"Error: Could not serialize token cache: {e}") + debug_print(f"Error: Could not serialize token cache: {e}") # Decide how to handle this, maybe clear cache or log extensively # session.pop("token_cache", None) # Option: Clear on serialization failure @@ -82,7 +83,7 @@ def get_valid_access_token(scopes=None): Returns the access token string or None if refresh failed or user not logged in. """ if "user" not in session: - print("get_valid_access_token: No user in session.") + debug_print("get_valid_access_token: No user in session.") return None # User not logged in required_scopes = scopes or SCOPE # Use default SCOPE if none provided @@ -105,37 +106,40 @@ def get_valid_access_token(scopes=None): break if not account: account = accounts[0] # Fallback to first account if no perfect match - print(f"Warning: Using first account found ({account.get('username')}) as home_account_id match failed.") + debug_print(f"Warning: Using first account found ({account.get('username')}) as home_account_id match failed.") if account: # Try to get token silently (checks cache, then uses refresh token) result = msal_app.acquire_token_silent(required_scopes, account=account) _save_cache(msal_app.token_cache) # Save cache state AFTER attempt + debug_print(f"User account name: {account.get('username')}") + debug_print(f"All roles assigned to user: {user_info.get('roles')}") + if result and "access_token" in result: # Optional: Check expiry if you want fine-grained control, but MSAL usually handles it # expires_in = result.get('expires_in', 0) # if expires_in > 60: # Check if token is valid for at least 60 seconds - # print("get_valid_access_token: Token acquired silently.") + # debug_print("get_valid_access_token: Token acquired silently.") # return result['access_token'] # else: - # print("get_valid_access_token: Silent token expired or about to expire.") + # debug_print("get_valid_access_token: Silent token expired or about to expire.") # # MSAL should have refreshed, but if not, fall through - print(f"get_valid_access_token: Token acquired silently for scopes: {required_scopes}") + debug_print(f"get_valid_access_token: Token acquired silently for scopes: {required_scopes}") return result['access_token'] else: # acquire_token_silent failed (e.g., refresh token expired, needs interaction) - print("get_valid_access_token: acquire_token_silent failed. Needs re-authentication.") + debug_print("get_valid_access_token: acquire_token_silent failed. Needs re-authentication.") # Log the specific error if available in result if result and ('error' in result or 'error_description' in result): - print(f"MSAL Error: {result.get('error')}, Description: {result.get('error_description')}") + debug_print(f"MSAL Error: {result.get('error')}, Description: {result.get('error_description')}") # Optionally clear session or specific keys if refresh consistently fails # session.pop("token_cache", None) # session.pop("user", None) return None # Indicate failure to get a valid token else: - print("get_valid_access_token: No matching account found in MSAL cache.") + debug_print("get_valid_access_token: No matching account found in MSAL cache.") # This might happen if the cache was cleared or the user logged in differently return None # Cannot acquire token without an account context @@ -146,7 +150,7 @@ def get_valid_access_token_for_plugins(scopes=None): Returns the access token string or None if refresh failed or user not logged in. """ if "user" not in session: - print("get_valid_access_token: No user in session.") + debug_print("get_valid_access_token: No user in session.") return { "error": "not_logged_in", "message": "User is not logged in.", @@ -174,10 +178,10 @@ def get_valid_access_token_for_plugins(scopes=None): break if not account: account = accounts[0] # Fallback to first account if no perfect match - print(f"Warning: Using first account found ({account.get('username')}) as home_account_id match failed.") + debug_print(f"Warning: Using first account found ({account.get('username')}) as home_account_id match failed.") if not account: - print("get_valid_access_token: No matching account found in MSAL cache.") + debug_print("get_valid_access_token: No matching account found in MSAL cache.") return { "error": "no_account", "message": "No matching account found in MSAL cache.", @@ -189,13 +193,13 @@ def get_valid_access_token_for_plugins(scopes=None): _save_cache(msal_app.token_cache) if result and "access_token" in result: - print(f"get_valid_access_token: Token acquired silently for scopes: {required_scopes}") + debug_print(f"get_valid_access_token: Token acquired silently for scopes: {required_scopes}") return {"access_token": result['access_token']} # If we reach here, it means silent acquisition failed - print("get_valid_access_token: acquire_token_silent failed. Needs re-authentication or received invalid grants.") + debug_print("get_valid_access_token: acquire_token_silent failed. Needs re-authentication or received invalid grants.") if result is None: # Assume invalid grants or no token - print("result is None: get_valid_access_token: Consent required.") + debug_print("result is None: get_valid_access_token: Consent required.") host_url = request.host_url.rstrip('/') # Only enforce https if not localhost or 127.0.0.1 if not (host_url.startswith('http://localhost') or host_url.startswith('http://127.0.0.1')): @@ -216,7 +220,7 @@ def get_valid_access_token_for_plugins(scopes=None): error_code = result.get('error') if result else None error_desc = result.get('error_description') if result else None - print(f"MSAL Error: {error_code}, Description: {error_desc}") + debug_print(f"MSAL Error: {error_code}, Description: {error_desc}") if error_code == "invalid_grant" and error_desc and ("AADSTS65001" in error_desc or "consent_required" in error_desc): host_url = request.host_url.rstrip('/') @@ -245,7 +249,27 @@ def get_valid_access_token_for_plugins(scopes=None): def get_video_indexer_account_token(settings, video_id=None): """ - For ARM-based VideoIndexer accounts: + Get Video Indexer access token using managed identity authentication. + + This function authenticates with Azure Video Indexer using the App Service's + managed identity. The managed identity must have Contributor role on the + Video Indexer resource. + + Authentication flow: + 1. Acquire ARM access token using DefaultAzureCredential (managed identity) + 2. Call ARM generateAccessToken API to get Video Indexer access token + 3. Use Video Indexer access token for all API operations + """ + from functions_debug import debug_print + + debug_print(f"[VIDEO INDEXER AUTH] Starting token acquisition using managed identity for video_id: {video_id}") + debug_print(f"[VIDEO INDEXER AUTH] Azure environment: {AZURE_ENVIRONMENT}") + + return get_video_indexer_managed_identity_token(settings, video_id) + +def get_video_indexer_managed_identity_token(settings, video_id=None): + """ + For ARM-based VideoIndexer accounts using managed identity: 1) Acquire an ARM token with DefaultAzureCredential 2) POST to the ARM generateAccessToken endpoint 3) Return the account-level accessToken @@ -254,6 +278,7 @@ def get_video_indexer_account_token(settings, video_id=None): debug_print(f"[VIDEO INDEXER AUTH] Starting token acquisition for video_id: {video_id}") debug_print(f"[VIDEO INDEXER AUTH] Azure environment: {AZURE_ENVIRONMENT}") + debug_print(f"[VIDEO INDEXER AUTH] Using managed identity authentication") # 1) ARM token if AZURE_ENVIRONMENT == "usgovernment": @@ -270,7 +295,7 @@ def get_video_indexer_account_token(settings, video_id=None): debug_print(f"[VIDEO INDEXER AUTH] DefaultAzureCredential initialized successfully") arm_token = credential.get_token(arm_scope).token debug_print(f"[VIDEO INDEXER AUTH] ARM token acquired successfully (length: {len(arm_token) if arm_token else 0})") - print("[VIDEO] ARM token acquired", flush=True) + debug_print("[VIDEO] ARM token acquired", flush=True) except Exception as e: debug_print(f"[VIDEO INDEXER AUTH] ERROR acquiring ARM token: {str(e)}") raise @@ -337,7 +362,7 @@ def get_video_indexer_account_token(settings, video_id=None): raise ValueError("No accessToken found in ARM API response") debug_print(f"[VIDEO INDEXER AUTH] Account token acquired successfully (length: {len(ai)})") - print(f"[VIDEO] Account token acquired (len={len(ai)})", flush=True) + debug_print(f"[VIDEO] Account token acquired (len={len(ai)})", flush=True) return ai except requests.exceptions.RequestException as e: debug_print(f"[VIDEO INDEXER AUTH] ERROR in ARM API request: {str(e)}") @@ -368,7 +393,7 @@ def get_microsoft_entra_jwks(): jwks_response = requests.get(jwks_uri).json() JWKS_CACHE = {key['kid']: key for key in jwks_response['keys']} except requests.exceptions.RequestException as e: - print(f"Error fetching JWKS: {e}") + debug_print(f"Error fetching JWKS: {e}") return None return JWKS_CACHE @@ -423,7 +448,7 @@ def accesstoken_required(f): @wraps(f) def decorated_function(*args, **kwargs): - print("accesstoken_required") + debug_print("accesstoken_required") auth_header = request.headers.get('Authorization') if not auth_header: @@ -443,7 +468,7 @@ def decorated_function(*args, **kwargs): if not roles or "ExternalApi" not in roles: return jsonify({"message": "Forbidden: ExternalApi role required"}), 403 - print("User is valid") + debug_print("User is valid") # You can now access claims from `data`, e.g., data['sub'], data['name'], data['roles'] #kwargs['user_claims'] = data # Pass claims to the decorated function # NOT NEEDED FOR NOW @@ -460,10 +485,10 @@ def decorated_function(*args, **kwargs): ) or request.path.startswith('/api/') if is_api_request: - print(f"API request to {request.path} blocked (401 Unauthorized). No valid session.") + debug_print(f"API request to {request.path} blocked (401 Unauthorized). No valid session.") return jsonify({"error": "Unauthorized", "message": "Authentication required"}), 401 else: - print(f"Browser request to {request.path} redirected ta login. No valid session.") + debug_print(f"Browser request to {request.path} redirected ta login. No valid session.") # Get settings from database, with environment variable fallback from functions_settings import get_settings settings = get_settings() @@ -483,6 +508,53 @@ def decorated_function(*args, **kwargs): return f(*args, **kwargs) return decorated_function +def check_user_access_status(user_id): + """ + Check if user access is currently allowed based on Control Center settings. + Returns (is_allowed: bool, reason: str) + """ + try: + from functions_settings import get_user_settings + user_settings = get_user_settings(user_id) + + access_settings = user_settings.get('settings', {}).get('access', {}) + status = access_settings.get('status', 'allow') + + if status == 'allow': + return True, None + + if status == 'deny': + datetime_to_allow = access_settings.get('datetime_to_allow') + if datetime_to_allow: + try: + # Check if time-based restriction has expired + allow_time = datetime.fromisoformat(datetime_to_allow.replace('Z', '+00:00')) + current_time = datetime.now(timezone.utc) + + if current_time >= allow_time: + # Time-based restriction has expired, automatically restore access + from functions_settings import update_user_settings + update_user_settings(user_id, { + 'access': { + 'status': 'allow', + 'datetime_to_allow': None + } + }) + return True, None + else: + return False, f"Access denied until {datetime_to_allow}" + except ValueError: + # Invalid datetime format, treat as permanent deny + return False, "Access denied by administrator" + else: + return False, "Access denied by administrator" + + return True, None # Default to allow if status is unknown + + except Exception as e: + debug_print(f"Error checking user access status: {e}") + return True, None # Default to allow on error to prevent lockouts + def user_required(f): @wraps(f) def decorated_function(*args, **kwargs): @@ -492,6 +564,85 @@ def decorated_function(*args, **kwargs): return jsonify({"error": "Forbidden", "message": "Insufficient permissions (User/Admin role required)"}), 403 else: return "Forbidden", 403 + + # Check access control restrictions (admins bypass access control) + if 'Admin' not in user.get('roles', []): + user_id = user.get('oid') or user.get('sub') + if user_id: + is_allowed, reason = check_user_access_status(user_id) + if not is_allowed: + if request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html or request.path.startswith('/api/'): + return jsonify({"error": "Access Denied", "message": reason}), 403 + else: + return f"Access Denied: {reason}", 403 + + return f(*args, **kwargs) + return decorated_function + +def file_upload_required(f): + """ + Decorator to check if user is allowed to upload files to their personal workspace. + Should be used in addition to @login_required and @user_required. + """ + @wraps(f) + def decorated_function(*args, **kwargs): + user = session.get('user', {}) + + # Admins bypass file upload restrictions + if 'Admin' in user.get('roles', []): + return f(*args, **kwargs) + + user_id = user.get('oid') or user.get('sub') + if user_id: + try: + from functions_settings import get_user_settings + user_settings = get_user_settings(user_id) + + file_upload_settings = user_settings.get('settings', {}).get('file_uploads', {}) + status = file_upload_settings.get('status', 'allow') + + if status == 'deny': + datetime_to_allow = file_upload_settings.get('datetime_to_allow') + if datetime_to_allow: + try: + # Check if time-based restriction has expired + allow_time = datetime.fromisoformat(datetime_to_allow.replace('Z', '+00:00')) + current_time = datetime.now(timezone.utc) + + if current_time >= allow_time: + # Time-based restriction has expired, automatically restore access + from functions_settings import update_user_settings + update_user_settings(user_id, { + 'file_uploads': { + 'status': 'allow', + 'datetime_to_allow': None + } + }) + return f(*args, **kwargs) # Allow the upload + else: + reason = f"File uploads to personal workspace are disabled until {datetime_to_allow}" + if request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html or request.path.startswith('/api/'): + return jsonify({"error": "File Upload Denied", "message": reason}), 403 + else: + return f"File Upload Denied: {reason}", 403 + except ValueError: + # Invalid datetime format, treat as permanent deny + reason = "File uploads to personal workspace are disabled by administrator" + if request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html or request.path.startswith('/api/'): + return jsonify({"error": "File Upload Denied", "message": reason}), 403 + else: + return f"File Upload Denied: {reason}", 403 + else: + # Permanent deny + reason = "File uploads to personal workspace are disabled by administrator" + if request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html or request.path.startswith('/api/'): + return jsonify({"error": "File Upload Denied", "message": reason}), 403 + else: + return f"File Upload Denied: {reason}", 403 + except Exception as e: + debug_print(f"Error checking file upload permissions: {e}") + # Default to allow on error to prevent breaking functionality + return f(*args, **kwargs) return decorated_function @@ -514,14 +665,30 @@ def decorated_function(*args, **kwargs): settings = get_settings() require_member_of_feedback_admin = settings.get("require_member_of_feedback_admin", False) + has_feedback_admin_role = 'roles' in user and 'FeedbackAdmin' in user['roles'] + has_admin_role = 'roles' in user and 'Admin' in user['roles'] + + # If requirement is enabled, only FeedbackAdmin role grants access if require_member_of_feedback_admin: - if 'roles' not in user or 'FeedbackAdmin' not in user['roles']: - is_api_request = (request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html) or request.path.startswith('/api/') - if is_api_request: - return jsonify({"error": "Forbidden", "message": "Insufficient permissions (FeedbackAdmin role required)"}), 403 - else: - return "Forbidden: FeedbackAdmin role required", 403 - return f(*args, **kwargs) + if has_feedback_admin_role: + return f(*args, **kwargs) + else: + is_api_request = (request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html) or request.path.startswith('/api/') + if is_api_request: + return jsonify({"error": "Forbidden", "message": "Insufficient permissions (FeedbackAdmin role required)"}), 403 + else: + return "Forbidden: FeedbackAdmin role required", 403 + + # If requirement is not enabled, only regular admins can access + if has_admin_role: + return f(*args, **kwargs) + + # No access if neither condition is met + is_api_request = (request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html) or request.path.startswith('/api/') + if is_api_request: + return jsonify({"error": "Forbidden", "message": "Insufficient permissions"}), 403 + else: + return "Forbidden", 403 return decorated_function def safety_violation_admin_required(f): @@ -531,16 +698,102 @@ def decorated_function(*args, **kwargs): settings = get_settings() require_member_of_safety_violation_admin = settings.get("require_member_of_safety_violation_admin", False) + has_safety_admin_role = 'roles' in user and 'SafetyViolationAdmin' in user['roles'] + has_admin_role = 'roles' in user and 'Admin' in user['roles'] + + # If requirement is enabled, only SafetyViolationAdmin role grants access if require_member_of_safety_violation_admin: - if 'roles' not in user or 'SafetyViolationAdmin' not in user['roles']: + if has_safety_admin_role: + return f(*args, **kwargs) + else: is_api_request = (request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html) or request.path.startswith('/api/') if is_api_request: return jsonify({"error": "Forbidden", "message": "Insufficient permissions (SafetyViolationAdmin role required)"}), 403 else: return "Forbidden: SafetyViolationAdmin role required", 403 - return f(*args, **kwargs) + + # If requirement is not enabled, only regular admins can access + if has_admin_role: + return f(*args, **kwargs) + + # No access if neither condition is met + is_api_request = (request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html) or request.path.startswith('/api/') + if is_api_request: + return jsonify({"error": "Forbidden", "message": "Insufficient permissions"}), 403 + else: + return "Forbidden", 403 return decorated_function +def control_center_required(access_level='admin'): + """ + Unified Control Center access control decorator. + + Args: + access_level: 'admin' for full admin access, 'dashboard' for dashboard-only access + + Access logic when require_member_of_control_center_admin is ENABLED: + - ControlCenterAdmin role → Full access to everything (admin + dashboard) + - ControlCenterDashboardReader role → Dashboard access only (if that setting is also enabled) + - Regular Admin role → NO access (must have ControlCenterAdmin) + - ControlCenterAdmin role is REQUIRED - having it without the setting enabled does nothing + + Access logic when require_member_of_control_center_admin is DISABLED (default): + - Regular Admin role → Full access to dashboard + management + activity logs + - ControlCenterAdmin role → IGNORED (role feature not enabled) + - ControlCenterDashboardReader role → Dashboard access only (if that setting is enabled) + - Non-admins → NO access + """ + def decorator(f): + @wraps(f) + def decorated_function(*args, **kwargs): + user = session.get('user', {}) + settings = get_settings() + require_member_of_control_center_admin = settings.get("require_member_of_control_center_admin", False) + require_member_of_control_center_dashboard_reader = settings.get("require_member_of_control_center_dashboard_reader", False) + + has_control_center_admin_role = 'roles' in user and 'ControlCenterAdmin' in user['roles'] + has_dashboard_reader_role = 'roles' in user and 'ControlCenterDashboardReader' in user['roles'] + has_regular_admin_role = 'roles' in user and 'Admin' in user['roles'] + + # Check if ControlCenterAdmin role requirement is enforced + if require_member_of_control_center_admin: + # ControlCenterAdmin role is REQUIRED for access + # Only ControlCenterAdmin role grants full access + if has_control_center_admin_role: + return f(*args, **kwargs) + + # For dashboard access, check if DashboardReader role grants access + if access_level == 'dashboard': + if require_member_of_control_center_dashboard_reader and has_dashboard_reader_role: + return f(*args, **kwargs) + + # User doesn't have ControlCenterAdmin role, deny access + # Note: Regular Admin role does NOT grant access when this setting is enabled + is_api_request = (request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html) or request.path.startswith('/api/') + if is_api_request: + return jsonify({"error": "Forbidden", "message": "Insufficient permissions (ControlCenterAdmin role required)"}), 403 + else: + return "Forbidden: ControlCenterAdmin role required", 403 + + # ControlCenterAdmin requirement is NOT enforced (default behavior) + # Only regular Admin role grants access - ControlCenterAdmin role is IGNORED + if has_regular_admin_role: + return f(*args, **kwargs) + + # For dashboard-only access, check if DashboardReader role is enabled and user has it + if access_level == 'dashboard': + if require_member_of_control_center_dashboard_reader and has_dashboard_reader_role: + return f(*args, **kwargs) + + # User is not an admin and doesn't have special roles - deny access + is_api_request = (request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html) or request.path.startswith('/api/') + if is_api_request: + return jsonify({"error": "Forbidden", "message": "Insufficient permissions (Admin role required)"}), 403 + else: + return "Forbidden: Admin role required", 403 + return decorated_function + return decorator + def create_group_role_required(f): @wraps(f) def decorated_function(*args, **kwargs): @@ -598,7 +851,7 @@ def get_user_profile_image(): """ token = get_valid_access_token() if not token: - print("get_user_profile_image: Could not acquire access token") + debug_print("get_user_profile_image: Could not acquire access token") return None # Determine the correct Graph endpoint based on Azure environment @@ -629,15 +882,15 @@ def get_user_profile_image(): elif response.status_code == 404: # User has no profile image - print("get_user_profile_image: User has no profile image") + debug_print("get_user_profile_image: User has no profile image") return None else: - print(f"get_user_profile_image: Failed to fetch profile image. Status: {response.status_code}") + debug_print(f"get_user_profile_image: Failed to fetch profile image. Status: {response.status_code}") return None except requests.exceptions.RequestException as e: - print(f"get_user_profile_image: Request failed: {e}") + debug_print(f"get_user_profile_image: Request failed: {e}") return None except Exception as e: - print(f"get_user_profile_image: Unexpected error: {e}") + debug_print(f"get_user_profile_image: Unexpected error: {e}") return None diff --git a/application/single_app/functions_chat.py b/application/single_app/functions_chat.py index ad55da18..e1ffcd7a 100644 --- a/application/single_app/functions_chat.py +++ b/application/single_app/functions_chat.py @@ -26,7 +26,7 @@ def load_user_kernel(user_id, redis_client): ) try: kernel_state = json.loads(kernel_state_json) - log_event(f"[SK Loader][DEBUG] Loaded kernel state from Redis for user {user_id}.") + log_event(f"[SK Loader] Loaded kernel state from Redis for user {user_id}.") kernel = Kernel() # Restore kernel config if possible kernel_config = kernel_state.get('kernel_config') @@ -154,7 +154,7 @@ def save_user_kernel(user_id, kernel, kernel_agents, redis_client): } redis_client.set(f"sk:state:{user_id}", json.dumps(state, default=str)) log_event( - f"[SK Loader][DEBUG] Saved kernel state snapshot to Redis for user {user_id}.", + f"[SK Loader] Saved kernel state snapshot to Redis for user {user_id}.", extra={ "user_id": user_id, 'services': kernel_services, @@ -171,3 +171,109 @@ def save_user_kernel(user_id, kernel, kernel_agents, redis_client): f"[SK Loader] Error saving kernel state to Redis: {e}", level=logging.ERROR ) + +def sort_messages_by_thread(messages): + """ + Sorts messages based on the thread chain (linked list via thread_id and previous_thread_id). + Legacy messages (without thread_id) are placed first, sorted by timestamp. + Threaded messages are appended, following the chain based on the EARLIEST timestamp + for each thread_id (to handle retries correctly where newer timestamps shouldn't affect order). + """ + if not messages: + return [] + + # Helper function to get thread_id from metadata + def get_thread_id(msg): + return msg.get('metadata', {}).get('thread_info', {}).get('thread_id') + + def get_previous_thread_id(msg): + return msg.get('metadata', {}).get('thread_info', {}).get('previous_thread_id') + + # Separate legacy and threaded messages + legacy_msgs = [m for m in messages if not get_thread_id(m)] + threaded_msgs = [m for m in messages if get_thread_id(m)] + + print(f"[SORT] Total messages: {len(messages)}, Legacy: {len(legacy_msgs)}, Threaded: {len(threaded_msgs)}") + + # Sort legacy by timestamp + legacy_msgs.sort(key=lambda x: x.get('timestamp', '')) + + if not threaded_msgs: + return legacy_msgs + + # Build map tracking the EARLIEST timestamp for each thread_id (handles retries) + earliest_timestamp_by_thread = {} + thread_ids_seen = set() + for m in threaded_msgs: + tid = get_thread_id(m) + thread_ids_seen.add(tid) + timestamp = m.get('timestamp', '') + if tid not in earliest_timestamp_by_thread or timestamp < earliest_timestamp_by_thread[tid]: + earliest_timestamp_by_thread[tid] = timestamp + + print(f"[SORT] Earliest timestamp by thread_id:") + for tid, ts in earliest_timestamp_by_thread.items(): + print(f" {tid}: {ts}") + + # Group messages by thread_id + messages_by_thread = {} + for m in threaded_msgs: + tid = get_thread_id(m) + if tid not in messages_by_thread: + messages_by_thread[tid] = [] + messages_by_thread[tid].append(m) + + # Build children map at the thread_id level (not message level) + # Maps parent thread_id -> list of child thread_ids + children_thread_map = {} + for tid in thread_ids_seen: + # Get any message from this thread to check its previous_thread_id + sample_msg = messages_by_thread[tid][0] + prev = get_previous_thread_id(sample_msg) + if prev: + if prev not in children_thread_map: + children_thread_map[prev] = [] + if tid not in children_thread_map[prev]: # Avoid duplicates + children_thread_map[prev].append(tid) + + print(f"[SORT] Children thread map: {children_thread_map}") + + # Find root thread_ids: thread_ids whose previous_thread_id is None OR not in the current set + root_thread_ids = [] + for tid in thread_ids_seen: + sample_msg = messages_by_thread[tid][0] + prev = get_previous_thread_id(sample_msg) + if not prev or prev not in thread_ids_seen: + root_thread_ids.append(tid) + + print(f"[SORT] Found {len(root_thread_ids)} root thread_ids: {root_thread_ids}") + + # Sort root thread_ids by the EARLIEST timestamp to maintain order even after retries + root_thread_ids.sort(key=lambda tid: earliest_timestamp_by_thread.get(tid, '')) + + print(f"[SORT] After sorting root thread_ids by earliest timestamp:") + for i, tid in enumerate(root_thread_ids): + earliest = earliest_timestamp_by_thread.get(tid) + print(f" {i+1}. thread_id={tid}, earliest={earliest}") + + ordered_threaded = [] + + def traverse_thread(thread_id): + """Traverse all messages in a thread, then traverse child threads""" + # Add all messages from this thread (sorted by timestamp within the thread) + thread_messages = messages_by_thread.get(thread_id, []) + thread_messages_sorted = sorted(thread_messages, key=lambda x: x.get('timestamp', '')) + ordered_threaded.extend(thread_messages_sorted) + + # Then traverse child threads + if thread_id in children_thread_map: + child_thread_ids = children_thread_map[thread_id] + # Sort child thread_ids by their earliest timestamp + child_thread_ids.sort(key=lambda tid: earliest_timestamp_by_thread.get(tid, '')) + for child_tid in child_thread_ids: + traverse_thread(child_tid) + + for root_tid in root_thread_ids: + traverse_thread(root_tid) + + return legacy_msgs + ordered_threaded diff --git a/application/single_app/functions_content.py b/application/single_app/functions_content.py index ffa5c559..376d23f4 100644 --- a/application/single_app/functions_content.py +++ b/application/single_app/functions_content.py @@ -22,12 +22,12 @@ def extract_content_with_azure_di(file_path): document_intelligence_client = CLIENTS['document_intelligence_client'] # Ensure CLIENTS is populated # Debug logging for troubleshooting - debug_print(f"[DEBUG] Starting Azure DI extraction for: {os.path.basename(file_path)}") - debug_print(f"[DEBUG] AZURE_ENVIRONMENT: {AZURE_ENVIRONMENT}") + debug_print(f"Starting Azure DI extraction for: {os.path.basename(file_path)}") + debug_print(f"AZURE_ENVIRONMENT: {AZURE_ENVIRONMENT}") if AZURE_ENVIRONMENT in ("usgovernment", "custom"): # Required format for Document Intelligence API version 2024-11-30 - debug_print("[DEBUG] Using US Government/Custom environment with base64Source") + debug_print("Using US Government/Custom environment with base64Source") with open(file_path, 'rb') as f: file_bytes = f.read() base64_source = base64.b64encode(file_bytes).decode('utf-8') @@ -38,9 +38,9 @@ def extract_content_with_azure_di(file_path): model_id="prebuilt-read", body=analyze_request ) - debug_print("[DEBUG] Successfully started analysis with base64Source") + debug_print("Successfully started analysis with base64Source") else: - debug_print("[DEBUG] Using Public cloud environment") + debug_print("Using Public cloud environment") with open(file_path, 'rb') as f: # For stable API 1.0.2, the file needs to be passed as part of the body file_content = f.read() @@ -53,9 +53,9 @@ def extract_content_with_azure_di(file_path): body=file_content, content_type="application/pdf" ) - debug_print("[DEBUG] Successfully started analysis with body as bytes") + debug_print("Successfully started analysis with body as bytes") except Exception as e1: - debug_print(f"[DEBUG] Method 1 failed: {e1}") + debug_print(f"Method 1 failed: {e1}") try: # Method 2: Use base64 format for consistency @@ -65,7 +65,7 @@ def extract_content_with_azure_di(file_path): model_id="prebuilt-read", body=analyze_request ) - debug_print("[DEBUG] Successfully started analysis with base64Source in body") + debug_print("Successfully started analysis with base64Source in body") except Exception as e2: debug_print(f"[ERROR] Both methods failed. Method 1: {e1}, Method 2: {e2}") raise e1 @@ -172,7 +172,7 @@ def extract_table_file(file_path, file_ext): try: if file_ext == '.csv': df = pandas.read_csv(file_path) - elif file_ext in ['.xls', '.xlsx']: + elif file_ext in ['.xls', '.xlsx', '.xlsm']: df = pandas.read_excel(file_path) else: raise ValueError("Unsupported file extension for table extraction.") @@ -362,7 +362,17 @@ def generate_embedding( ) embedding = response.data[0].embedding - return embedding + + # Capture token usage for embedding tracking + token_usage = None + if hasattr(response, 'usage') and response.usage: + token_usage = { + 'prompt_tokens': response.usage.prompt_tokens, + 'total_tokens': response.usage.total_tokens, + 'model_deployment_name': embedding_model + } + + return embedding, token_usage except RateLimitError as e: retries += 1 diff --git a/application/single_app/functions_control_center.py b/application/single_app/functions_control_center.py new file mode 100644 index 00000000..9337f408 --- /dev/null +++ b/application/single_app/functions_control_center.py @@ -0,0 +1,138 @@ +# functions_control_center.py +""" +Functions for Control Center operations including scheduled auto-refresh. +Version: 0.237.004 +""" + +from datetime import datetime, timezone, timedelta +from config import debug_print, cosmos_user_settings_container, cosmos_groups_container +from functions_settings import get_settings, update_settings +from functions_appinsights import log_event + + +def execute_control_center_refresh(manual_execution=False): + """ + Execute Control Center data refresh operation. + Refreshes user and group metrics data. + + Args: + manual_execution: True if triggered manually, False if scheduled + + Returns: + dict: Results containing success status and refresh counts + """ + results = { + 'success': True, + 'refreshed_users': 0, + 'failed_users': 0, + 'refreshed_groups': 0, + 'failed_groups': 0, + 'error': None, + 'manual_execution': manual_execution + } + + try: + debug_print(f"🔄 [AUTO-REFRESH] Starting Control Center {'manual' if manual_execution else 'scheduled'} refresh...") + + # Import enhance functions from route module + from route_backend_control_center import enhance_user_with_activity, enhance_group_with_activity + + # Get all users to refresh their metrics + debug_print("🔄 [AUTO-REFRESH] Querying all users...") + users_query = "SELECT c.id, c.email, c.display_name, c.lastUpdated, c.settings FROM c" + all_users = list(cosmos_user_settings_container.query_items( + query=users_query, + enable_cross_partition_query=True + )) + debug_print(f"🔄 [AUTO-REFRESH] Found {len(all_users)} users to process") + + # Refresh metrics for each user + for user in all_users: + try: + user_id = user.get('id') + debug_print(f"🔄 [AUTO-REFRESH] Processing user {user_id}") + + # Force refresh of metrics for this user + enhanced_user = enhance_user_with_activity(user, force_refresh=True) + results['refreshed_users'] += 1 + + except Exception as user_error: + results['failed_users'] += 1 + debug_print(f"❌ [AUTO-REFRESH] Failed to refresh user {user.get('id')}: {user_error}") + + debug_print(f"🔄 [AUTO-REFRESH] User refresh completed. Refreshed: {results['refreshed_users']}, Failed: {results['failed_users']}") + + # Refresh metrics for all groups + debug_print("🔄 [AUTO-REFRESH] Starting group refresh...") + + try: + groups_query = "SELECT * FROM c" + all_groups = list(cosmos_groups_container.query_items( + query=groups_query, + enable_cross_partition_query=True + )) + debug_print(f"🔄 [AUTO-REFRESH] Found {len(all_groups)} groups to process") + + # Refresh metrics for each group + for group in all_groups: + try: + group_id = group.get('id') + debug_print(f"🔄 [AUTO-REFRESH] Processing group {group_id}") + + # Force refresh of metrics for this group + enhanced_group = enhance_group_with_activity(group, force_refresh=True) + results['refreshed_groups'] += 1 + + except Exception as group_error: + results['failed_groups'] += 1 + debug_print(f"❌ [AUTO-REFRESH] Failed to refresh group {group.get('id')}: {group_error}") + + except Exception as groups_error: + debug_print(f"❌ [AUTO-REFRESH] Error querying groups: {groups_error}") + + debug_print(f"🔄 [AUTO-REFRESH] Group refresh completed. Refreshed: {results['refreshed_groups']}, Failed: {results['failed_groups']}") + + # Update admin settings with refresh timestamp and calculate next run time + try: + settings = get_settings() + if settings: + current_time = datetime.now(timezone.utc) + settings['control_center_last_refresh'] = current_time.isoformat() + + # Calculate next scheduled auto-refresh time if enabled + if settings.get('control_center_auto_refresh_enabled', False): + execution_hour = settings.get('control_center_auto_refresh_hour', 2) + next_run = current_time.replace(hour=execution_hour, minute=0, second=0, microsecond=0) + if next_run <= current_time: + next_run += timedelta(days=1) + settings['control_center_auto_refresh_next_run'] = next_run.isoformat() + + update_success = update_settings(settings) + + if update_success: + debug_print("✅ [AUTO-REFRESH] Admin settings updated with refresh timestamp") + else: + debug_print("⚠️ [AUTO-REFRESH] Failed to update admin settings") + + except Exception as settings_error: + debug_print(f"❌ [AUTO-REFRESH] Admin settings update failed: {settings_error}") + + # Log the activity + log_event("control_center_refresh", { + "manual_execution": manual_execution, + "refreshed_users": results['refreshed_users'], + "failed_users": results['failed_users'], + "refreshed_groups": results['refreshed_groups'], + "failed_groups": results['failed_groups'] + }) + + debug_print(f"🎉 [AUTO-REFRESH] Refresh completed! Users: {results['refreshed_users']} refreshed, {results['failed_users']} failed. " + f"Groups: {results['refreshed_groups']} refreshed, {results['failed_groups']} failed") + + return results + + except Exception as e: + debug_print(f"💥 [AUTO-REFRESH] Error executing Control Center refresh: {e}") + results['success'] = False + results['error'] = str(e) + return results diff --git a/application/single_app/functions_conversation_metadata.py b/application/single_app/functions_conversation_metadata.py index 5924a877..262b0955 100644 --- a/application/single_app/functions_conversation_metadata.py +++ b/application/single_app/functions_conversation_metadata.py @@ -45,7 +45,7 @@ def collect_conversation_metadata(user_message, conversation_id, user_id, active document_scope=None, selected_document_id=None, model_deployment=None, hybrid_search_enabled=False, image_gen_enabled=False, selected_documents=None, - selected_agent=None, search_results=None, web_search_results=None, + selected_agent=None, selected_agent_details=None, search_results=None, web_search_results=None, conversation_item=None, additional_participants=None): """ Collect comprehensive metadata for a conversation based on the user's interaction. @@ -65,6 +65,7 @@ def collect_conversation_metadata(user_message, conversation_id, user_id, active search_results: Results from hybrid search conversation_item: Existing conversation item to update additional_participants: List of additional user IDs to include as participants + selected_agent_details: Detailed agent metadata (is_group, group_id, group_name) Returns: dict: Updated conversation metadata @@ -86,6 +87,25 @@ def collect_conversation_metadata(user_message, conversation_id, user_id, active if 'strict' not in conversation_item: conversation_item['strict'] = False + # Prepare agent-derived group context (used when agent is a group and no documents were used) + agent_primary_context = None + agent_primary_context_active = False + if selected_agent_details and selected_agent_details.get('is_group'): + group_id = selected_agent_details.get('group_id') + group_name = selected_agent_details.get('group_name') + + if group_id: + if not group_name: + group_info = find_group_by_id(group_id) + if group_info: + group_name = group_info.get('name') + agent_primary_context = { + "type": "primary", + "scope": "group", + "id": group_id, + "name": group_name or "Unknown Group" + } + # Process documents from search results first to determine primary context document_map = {} # Map of document_id -> {scope, chunks, classification} workspace_used = None # Track the first workspace used (becomes primary context) @@ -144,19 +164,30 @@ def collect_conversation_metadata(user_message, conversation_id, user_id, active "id": scope_id, "name": context_name } - # If no documents were used, we don't set a primary context yet - # This allows us to track conversations that only use model knowledge + # If no documents were used, fall back to agent-based primary context + if not primary_context and agent_primary_context: + primary_context = agent_primary_context + agent_primary_context_active = True # Update or add primary context only if we don't already have one existing_primary = next((ctx for ctx in conversation_item['context'] if ctx.get('type') == 'primary'), None) if primary_context: if existing_primary: - # Primary context already exists - check if this is the same workspace + # Primary context already exists - determine how to handle the new context if (existing_primary.get('scope') == primary_context.get('scope') and existing_primary.get('id') == primary_context.get('id')): # Same workspace - update existing primary context (e.g., refresh name) existing_primary.update(primary_context) debug_print(f"Updated existing primary context: {existing_primary}") + elif agent_primary_context_active: + # Promote the group agent context to become the new primary context + existing_primary.update({ + "scope": primary_context.get('scope'), + "id": primary_context.get('id'), + "name": primary_context.get('name') + }) + debug_print(f"Replaced existing primary context with agent group context: {existing_primary}") + primary_context = None else: # Different workspace - this should become a secondary context debug_print(f"Primary context already exists ({existing_primary.get('scope')}:{existing_primary.get('id')}), "f"treating new workspace ({primary_context.get('scope')}:{primary_context.get('id')}) as secondary") diff --git a/application/single_app/functions_debug.py b/application/single_app/functions_debug.py index 43a8e1f5..5cbf6a2e 100644 --- a/application/single_app/functions_debug.py +++ b/application/single_app/functions_debug.py @@ -1,22 +1,35 @@ # functions_debug.py +# +from app_settings_cache import get_settings_cache +from functions_settings import * -from functions_settings import get_settings - -def debug_print(message): +def debug_print(message, category="INFO", **kwargs): """ Print debug message only if debug logging is enabled in settings. Args: message (str): The debug message to print + category (str): Optional category for the debug message + **kwargs: Additional key-value pairs to include in debug output """ + #print(f"DEBUG_PRINT CALLED WITH MESSAGE: {message}") try: - settings = get_settings() - if settings and settings.get('enable_debug_logging', False): - print(f"DEBUG: {message}") + cache = get_settings_cache() + if cache.get('enable_debug_logging', False): + debug_msg = f"[DEBUG] [{category}]: {message}" + if kwargs: + kwargs_str = ", ".join(f"{k}={v}" for k, v in kwargs.items()) + debug_msg += f" ({kwargs_str})" + print(debug_msg) except Exception: - # If there's any error getting settings, don't print debug messages - # This prevents crashes in case of configuration issues - pass + settings = get_settings() + if settings.get('enable_debug_logging', False): + debug_msg = f"[DEBUG] [{category}]: {message}" + if kwargs: + kwargs_str = ", ".join(f"{k}={v}" for k, v in kwargs.items()) + debug_msg += f" ({kwargs_str})" + print(debug_msg) + def is_debug_enabled(): """ @@ -26,7 +39,8 @@ def is_debug_enabled(): bool: True if debug logging is enabled, False otherwise """ try: - settings = get_settings() - return settings and settings.get('enable_debug_logging', False) + cache = get_settings_cache() + print(f"IS_DEBUG_ENABLED: {cache.get('enable_debug_logging', False)}") + return cache and cache.get('enable_debug_logging', False) except Exception: return False \ No newline at end of file diff --git a/application/single_app/functions_documents.py b/application/single_app/functions_documents.py index de71cd60..9ae01a62 100644 --- a/application/single_app/functions_documents.py +++ b/application/single_app/functions_documents.py @@ -1,4 +1,4 @@ -# functions_documents.py +# functions_documents.py that has some changes I need to merge into Development from config import * from functions_content import * @@ -6,6 +6,8 @@ from functions_search import * from functions_logging import * from functions_authentication import * +from functions_debug import * +import azure.cognitiveservices.speech as speechsdk def allowed_file(filename, allowed_extensions=None): if not allowed_extensions: @@ -122,7 +124,9 @@ def create_document(file_name, user_id, document_id, num_file_chunks, status, gr "document_classification": "None", "type": "document_metadata", "user_id": user_id, - "shared_user_ids": [] + "shared_user_ids": [], + "embedding_tokens": 0, + "embedding_model_deployment_name": None } cosmos_container.upsert_item(document_metadata) @@ -220,6 +224,8 @@ def save_video_chunk( ): """ Saves one 30-second video chunk to the search index, with separate fields for transcript and OCR. + Video Indexer insights (keywords, labels, topics, audio effects, emotions, sentiments) are + already appended to page_text_content for searchability. The chunk_id is built from document_id and the integer second offset to ensure a valid key. """ from functions_debug import debug_print @@ -240,7 +246,14 @@ def save_video_chunk( # 1) generate embedding on the transcript text try: debug_print(f"[VIDEO CHUNK] Generating embedding for transcript text") - embedding = generate_embedding(page_text_content) + result = generate_embedding(page_text_content) + + # Handle both tuple (new) and single value (backward compatibility) + if isinstance(result, tuple): + embedding, _ = result # Ignore token_usage for now + else: + embedding = result + debug_print(f"[VIDEO CHUNK] Embedding generated successfully") print(f"[VideoChunk] EMBEDDING OK for {document_id}@{start_time}", flush=True) except Exception as e: @@ -373,7 +386,7 @@ def to_seconds(ts: str) -> float: debug_print(f"[VIDEO INDEXER] Configuration - Endpoint: {vi_ep}, Location: {vi_loc}, Account ID: {vi_acc}") - # Validate required settings + # Validate required settings for managed identity authentication required_settings = { "video_indexer_endpoint": vi_ep, "video_indexer_location": vi_loc, @@ -383,6 +396,8 @@ def to_seconds(ts: str) -> float: "video_indexer_account_name": settings.get("video_indexer_account_name") } + debug_print(f"[VIDEO INDEXER] Managed identity authentication requires: endpoint, location, account_id, resource_group, subscription_id, account_name") + missing_settings = [key for key, value in required_settings.items() if not value] if missing_settings: debug_print(f"[VIDEO INDEXER] ERROR: Missing required settings: {missing_settings}") @@ -405,14 +420,24 @@ def to_seconds(ts: str) -> float: # 2) Upload video to Indexer try: url = f"{vi_ep}/{vi_loc}/Accounts/{vi_acc}/Videos" - params = {"accessToken": token, "name": original_filename} + + # Use the access token in the URL parameters + headers = {} + # Request comprehensive indexing including audio transcript + params = { + "accessToken": token, + "name": original_filename, + "indexingPreset": "Default", # Includes video + audio insights + "streamingPreset": "NoStreaming" + } + debug_print(f"[VIDEO INDEXER] Using managed identity access token authentication") debug_print(f"[VIDEO INDEXER] Upload URL: {url}") debug_print(f"[VIDEO INDEXER] Upload params: {params}") debug_print(f"[VIDEO INDEXER] Starting file upload for: {original_filename}") with open(temp_file_path, "rb") as f: - resp = requests.post(url, params=params, files={"file": f}) + resp = requests.post(url, params=params, headers=headers, files={"file": f}) debug_print(f"[VIDEO INDEXER] Upload response status: {resp.status_code}") @@ -461,10 +486,14 @@ def to_seconds(ts: str) -> float: return 0 # 3) Poll until ready + # Don't use includeInsights parameter - it filters what's returned. We want everything. index_url = ( f"{vi_ep}/{vi_loc}/Accounts/{vi_acc}/Videos/{vid}/Index" - f"?accessToken={token}&includeInsights=Transcript&includeStreamingUrls=false" + f"?accessToken={token}" ) + poll_headers = {} + debug_print(f"[VIDEO INDEXER] Using managed identity access token for polling") + debug_print(f"[VIDEO INDEXER] Requesting full insights (no filtering)") debug_print(f"[VIDEO INDEXER] Index polling URL: {index_url}") debug_print(f"[VIDEO INDEXER] Starting processing polling for video ID: {vid}") @@ -477,7 +506,7 @@ def to_seconds(ts: str) -> float: debug_print(f"[VIDEO INDEXER] Polling attempt {poll_count}/{max_polls}") try: - r = requests.get(index_url) + r = requests.get(index_url, headers=poll_headers) debug_print(f"[VIDEO INDEXER] Poll response status: {r.status_code}") if r.status_code in (401, 404): @@ -540,14 +569,137 @@ def to_seconds(ts: str) -> float: # 4) Extract transcript & OCR debug_print(f"[VIDEO INDEXER] Starting insights extraction for video ID: {vid}") + debug_print(f"[VIDEO INDEXER] Extracting insights from completed video") insights = info.get("insights", {}) + if not insights: + debug_print(f"[VIDEO INDEXER] ERROR: No insights object in response") + debug_print(f"[VIDEO INDEXER] Response info keys: {list(info.keys())}") + return 0 + + # Get video duration from insights (primary) or info (fallback) + video_duration = insights.get("duration") or info.get("duration", "00:00:00") + video_duration_seconds = to_seconds(video_duration) if video_duration else 0 + debug_print(f"[VIDEO INDEXER] Video duration: {video_duration} ({video_duration_seconds} seconds)") + + # Log raw insights JSON for complete visibility (debug only) + import json + print(f"\n[VIDEO] ===== RAW INSIGHTS JSON =====", flush=True) + try: + insights_json = json.dumps(insights, indent=2, ensure_ascii=False) + # Truncate if too long (show first 10000 chars) + if len(insights_json) > 10000: + print(f"{insights_json[:10000]}\n... (truncated, total length: {len(insights_json)} chars)", flush=True) + else: + print(insights_json, flush=True) + except Exception as e: + print(f"[VIDEO] Could not serialize insights to JSON: {e}", flush=True) + print(f"[VIDEO] ===== END RAW INSIGHTS =====\n", flush=True) + + debug_print(f"[VIDEO INDEXER] Insights keys available: {list(insights.keys())}") + print(f"[VIDEO] Available insight types: {', '.join(list(insights.keys())[:15])}...", flush=True) + + # Debug: Show sample structures for all insight types + print(f"\n[VIDEO] ===== SAMPLE DATA STRUCTURES =====", flush=True) + + transcript_data = insights.get("transcript", []) + if transcript_data: + print(f"[VIDEO] TRANSCRIPT sample: {transcript_data[0]}", flush=True) + + ocr_data = insights.get("ocr", []) + if ocr_data: + print(f"[VIDEO] OCR sample: {ocr_data[0]}", flush=True) + + keywords_data_debug = insights.get("keywords", []) + if keywords_data_debug: + print(f"[VIDEO] KEYWORDS sample: {keywords_data_debug[0]}", flush=True) + + labels_data_debug = insights.get("labels", []) + if labels_data_debug: + debug_print(f"[VIDEO INDEXER] LABELS sample: {labels_data_debug[0]}") + + topics_data_debug = insights.get("topics", []) + if topics_data_debug: + debug_print(f"[VIDEO INDEXER] TOPICS sample: {topics_data_debug[0]}") + + audio_effects_data_debug = insights.get("audioEffects", []) + if audio_effects_data_debug: + debug_print(f"[VIDEO INDEXER] AUDIO_EFFECTS sample: {audio_effects_data_debug[0]}") + + emotions_data_debug = insights.get("emotions", []) + if emotions_data_debug: + debug_print(f"[VIDEO INDEXER] EMOTIONS sample: {emotions_data_debug[0]}") + + sentiments_data_debug = insights.get("sentiments", []) + if sentiments_data_debug: + debug_print(f"[VIDEO INDEXER] SENTIMENTS sample: {sentiments_data_debug[0]}") + + scenes_data_debug = insights.get("scenes", []) + if scenes_data_debug: + debug_print(f"[VIDEO INDEXER] SCENES sample: {scenes_data_debug[0]}") + + shots_data_debug = insights.get("shots", []) + if shots_data_debug: + debug_print(f"[VIDEO INDEXER] SHOTS sample: {shots_data_debug[0]}") + + faces_data_debug = insights.get("faces", []) + if faces_data_debug: + debug_print(f"[VIDEO INDEXER] FACES sample: {faces_data_debug[0]}") + + namedLocations_data_debug = insights.get("namedLocations", []) + if namedLocations_data_debug: + debug_print(f"[VIDEO INDEXER] NAMED_LOCATIONS sample: {namedLocations_data_debug[0]}") + + # Check for other potential label sources + brands_data_debug = insights.get("brands", []) + if brands_data_debug: + debug_print(f"[VIDEO INDEXER] BRANDS sample: {brands_data_debug[0]}") + + visualContentModeration_debug = insights.get("visualContentModeration", []) + if visualContentModeration_debug: + debug_print(f"[VIDEO INDEXER] VISUAL_MODERATION sample: {visualContentModeration_debug[0]}") + + # Show total counts for all available insights + print(f"[VIDEO] COUNTS:", flush=True) + for key in insights.keys(): + value = insights.get(key, []) + if isinstance(value, list): + print(f" {key}: {len(value)} items", flush=True) + + print(f"[VIDEO] ===== END SAMPLE DATA =====\n", flush=True) + transcript = insights.get("transcript", []) ocr_blocks = insights.get("ocr", []) + keywords_data = insights.get("keywords", []) + labels_data = insights.get("labels", []) + topics_data = insights.get("topics", []) + audio_effects_data = insights.get("audioEffects", []) + emotions_data = insights.get("emotions", []) + sentiments_data = insights.get("sentiments", []) + named_people_data = insights.get("namedPeople", []) + named_locations_data = insights.get("namedLocations", []) + speakers_data = insights.get("speakers", []) + detected_objects_data = insights.get("detectedObjects", []) debug_print(f"[VIDEO INDEXER] Transcript segments found: {len(transcript)}") debug_print(f"[VIDEO INDEXER] OCR blocks found: {len(ocr_blocks)}") + debug_print(f"[VIDEO INDEXER] Keywords found: {len(keywords_data)}") + debug_print(f"[VIDEO INDEXER] Labels found: {len(labels_data)}") + debug_print(f"[VIDEO INDEXER] Topics found: {len(topics_data)}") + debug_print(f"[VIDEO INDEXER] Audio effects found: {len(audio_effects_data)}") + debug_print(f"[VIDEO INDEXER] Emotions found: {len(emotions_data)}") + debug_print(f"[VIDEO INDEXER] Sentiments found: {len(sentiments_data)}") + debug_print(f"[VIDEO INDEXER] Named people found: {len(named_people_data)}") + debug_print(f"[VIDEO INDEXER] Named locations found: {len(named_locations_data)}") + debug_print(f"[VIDEO INDEXER] Speakers found: {len(speakers_data)}") + debug_print(f"[VIDEO INDEXER] Detected objects found: {len(detected_objects_data)}") + debug_print(f"[VIDEO INDEXER] Insights extracted - Transcript: {len(transcript)}, OCR: {len(ocr_blocks)}, Keywords: {len(keywords_data)}, Labels: {len(labels_data)}, Topics: {len(topics_data)}, Audio: {len(audio_effects_data)}, Emotions: {len(emotions_data)}, Sentiments: {len(sentiments_data)}, People: {len(named_people_data)}, Locations: {len(named_locations_data)}, Objects: {len(detected_objects_data)}") + + if len(transcript) == 0: + debug_print(f"[VIDEO INDEXER] WARNING: No transcript data available") + debug_print(f"[VIDEO INDEXER] Available insights keys: {list(insights.keys())}") + # Build context lists for transcript and OCR speech_context = [ {"text": seg["text"].strip(), "start": inst["start"]} for seg in transcript if seg.get("text", "").strip() @@ -558,45 +710,368 @@ def to_seconds(ts: str) -> float: for block in ocr_blocks if block.get("text", "").strip() for inst in block.get("instances", []) ] + + # Build context lists for additional insights + keywords_context = [ + {"text": kw.get("name", ""), "start": inst["start"]} + for kw in keywords_data if kw.get("name", "").strip() + for inst in kw.get("instances", []) + ] + labels_context = [ + {"text": label.get("name", ""), "start": inst["start"]} + for label in labels_data if label.get("name", "").strip() + for inst in label.get("instances", []) + ] + topics_context = [ + {"text": topic.get("name", ""), "start": inst["start"]} + for topic in topics_data if topic.get("name", "").strip() + for inst in topic.get("instances", []) + ] + audio_effects_context = [ + {"text": ae.get("audioEffectType", ""), "start": inst["start"]} + for ae in audio_effects_data if ae.get("audioEffectType", "").strip() + for inst in ae.get("instances", []) + ] + emotions_context = [ + {"text": emotion.get("type", ""), "start": inst["start"]} + for emotion in emotions_data if emotion.get("type", "").strip() + for inst in emotion.get("instances", []) + ] + sentiments_context = [ + {"text": sentiment.get("sentimentType", ""), "start": inst["start"]} + for sentiment in sentiments_data if sentiment.get("sentimentType", "").strip() + for inst in sentiment.get("instances", []) + ] + named_people_context = [ + {"text": person.get("name", ""), "start": inst["start"]} + for person in named_people_data if person.get("name", "").strip() + for inst in person.get("instances", []) + ] + named_locations_context = [ + {"text": location.get("name", ""), "start": inst["start"]} + for location in named_locations_data if location.get("name", "").strip() + for inst in location.get("instances", []) + ] + detected_objects_context = [ + {"text": obj.get("type", ""), "start": inst["start"]} + for obj in detected_objects_data if obj.get("type", "").strip() + for inst in obj.get("instances", []) + ] debug_print(f"[VIDEO INDEXER] Speech context items: {len(speech_context)}") debug_print(f"[VIDEO INDEXER] OCR context items: {len(ocr_context)}") + debug_print(f"[VIDEO INDEXER] Keywords context items: {len(keywords_context)}") + debug_print(f"[VIDEO INDEXER] Labels context items: {len(labels_context)}") + debug_print(f"[VIDEO INDEXER] Topics context items: {len(topics_context)}") + debug_print(f"[VIDEO INDEXER] Audio effects context items: {len(audio_effects_context)}") + debug_print(f"[VIDEO INDEXER] Emotions context items: {len(emotions_context)}") + debug_print(f"[VIDEO INDEXER] Sentiments context items: {len(sentiments_context)}") + debug_print(f"[VIDEO INDEXER] Named people context items: {len(named_people_context)}") + debug_print(f"[VIDEO INDEXER] Named locations context items: {len(named_locations_context)}") + debug_print(f"[VIDEO INDEXER] Detected objects context items: {len(detected_objects_context)}") + debug_print(f"[VIDEO INDEXER] Context built - Speech: {len(speech_context)}, OCR: {len(ocr_context)}, Keywords: {len(keywords_context)}, Labels: {len(labels_context)}, People: {len(named_people_context)}, Locations: {len(named_locations_context)}, Objects: {len(detected_objects_context)}") + + if len(speech_context) > 0: + debug_print(f"[VIDEO INDEXER] First speech item: {speech_context[0]}") + # Sort all contexts by timestamp speech_context.sort(key=lambda x: to_seconds(x["start"])) ocr_context.sort(key=lambda x: to_seconds(x["start"])) + keywords_context.sort(key=lambda x: to_seconds(x["start"])) + labels_context.sort(key=lambda x: to_seconds(x["start"])) + topics_context.sort(key=lambda x: to_seconds(x["start"])) + audio_effects_context.sort(key=lambda x: to_seconds(x["start"])) + emotions_context.sort(key=lambda x: to_seconds(x["start"])) + sentiments_context.sort(key=lambda x: to_seconds(x["start"])) + named_people_context.sort(key=lambda x: to_seconds(x["start"])) + named_locations_context.sort(key=lambda x: to_seconds(x["start"])) + detected_objects_context.sort(key=lambda x: to_seconds(x["start"])) debug_print(f"[VIDEO INDEXER] Starting 30-second chunk processing") + debug_print(f"[VIDEO INDEXER] Starting time-based chunk processing - Video duration: {video_duration_seconds}s") + debug_print(f"[VIDEO INDEXER] Available insights - Speech: {len(speech_context)}, OCR: {len(ocr_context)}, Keywords: {len(keywords_context)}, Labels: {len(labels_context)}") + + # Check if we have any content at all + total_insights = len(speech_context) + len(ocr_context) + len(keywords_context) + len(labels_context) + len(topics_context) + len(audio_effects_context) + len(emotions_context) + len(sentiments_context) + len(named_people_context) + len(named_locations_context) + len(detected_objects_context) + + if total_insights == 0 and video_duration_seconds == 0: + debug_print(f"[VIDEO INDEXER] ERROR: No insights and no duration information available") + update_callback(status="VIDEO: no data available") + return 0 + + # Use video duration to create time-based chunks, even without speech + if video_duration_seconds == 0: + debug_print(f"[VIDEO INDEXER] WARNING: No video duration available, estimating from insights") + # Estimate duration from the latest timestamp in any insight + max_timestamp = 0 + for context_list in [speech_context, ocr_context, keywords_context, labels_context, topics_context, audio_effects_context, emotions_context, sentiments_context, named_people_context, named_locations_context, detected_objects_context]: + if context_list: + max_ts = max(to_seconds(item["start"]) for item in context_list) + max_timestamp = max(max_timestamp, max_ts) + video_duration_seconds = max_timestamp + 30 # Add buffer + debug_print(f"[VIDEO INDEXER] Estimated duration: {video_duration_seconds}s") + + # Create chunks based on time intervals (30 seconds each) + num_chunks = int(video_duration_seconds / 30) + (1 if video_duration_seconds % 30 > 0 else 0) + debug_print(f"[VIDEO INDEXER] Will create {num_chunks} time-based chunks") total = 0 idx_s = 0 n_s = len(speech_context) idx_o = 0 n_o = len(ocr_context) + idx_kw = 0 + n_kw = len(keywords_context) + idx_lbl = 0 + n_lbl = len(labels_context) + idx_top = 0 + n_top = len(topics_context) + idx_ae = 0 + n_ae = len(audio_effects_context) + idx_emo = 0 + n_emo = len(emotions_context) + idx_sent = 0 + n_sent = len(sentiments_context) + idx_people = 0 + n_people = len(named_people_context) + idx_locations = 0 + n_locations = len(named_locations_context) + idx_objects = 0 + n_objects = len(detected_objects_context) + + # Process chunks in 30-second intervals based on video duration + for chunk_num in range(num_chunks): + window_start = chunk_num * 30.0 + window_end = min((chunk_num + 1) * 30.0, video_duration_seconds) + + debug_print(f"[VIDEO INDEXER] Chunk {chunk_num + 1} window: {window_start}s to {window_end}s") - while idx_s < n_s: - window_start = to_seconds(speech_context[idx_s]["start"]) - window_end = window_start + 30.0 - + # Collect speech for this time window speech_lines = [] - while idx_s < n_s and to_seconds(speech_context[idx_s]["start"]) <= window_end: - speech_lines.append(speech_context[idx_s]["text"]) + while idx_s < n_s and to_seconds(speech_context[idx_s]["start"]) < window_end: + if to_seconds(speech_context[idx_s]["start"]) >= window_start: + speech_lines.append(speech_context[idx_s]["text"]) + idx_s += 1 + if idx_s < n_s and to_seconds(speech_context[idx_s]["start"]) >= window_end: + break + + # Reset idx_s if we went past window_end + while idx_s > 0 and idx_s < n_s and to_seconds(speech_context[idx_s]["start"]) >= window_end: + idx_s -= 1 + if idx_s < n_s and to_seconds(speech_context[idx_s]["start"]) < window_end: idx_s += 1 + + debug_print(f"[VIDEO INDEXER] Chunk {chunk_num + 1} speech lines collected: {len(speech_lines)}") + # Collect OCR for this time window ocr_lines = [] - while idx_o < n_o and to_seconds(ocr_context[idx_o]["start"]) <= window_end: - ocr_lines.append(ocr_context[idx_o]["text"]) + while idx_o < n_o and to_seconds(ocr_context[idx_o]["start"]) < window_end: + if to_seconds(ocr_context[idx_o]["start"]) >= window_start: + ocr_lines.append(ocr_context[idx_o]["text"]) idx_o += 1 - - start_ts = speech_context[total]["start"] + if idx_o < n_o and to_seconds(ocr_context[idx_o]["start"]) >= window_end: + break + + while idx_o > 0 and idx_o < n_o and to_seconds(ocr_context[idx_o]["start"]) >= window_end: + idx_o -= 1 + if idx_o < n_o and to_seconds(ocr_context[idx_o]["start"]) < window_end: + idx_o += 1 + + debug_print(f"[VIDEO INDEXER] Chunk {chunk_num + 1} OCR lines collected: {len(ocr_lines)}") + + # Collect keywords for this time window + chunk_keywords = [] + while idx_kw < n_kw and to_seconds(keywords_context[idx_kw]["start"]) < window_end: + if to_seconds(keywords_context[idx_kw]["start"]) >= window_start: + chunk_keywords.append(keywords_context[idx_kw]["text"]) + idx_kw += 1 + if idx_kw < n_kw and to_seconds(keywords_context[idx_kw]["start"]) >= window_end: + break + while idx_kw > 0 and idx_kw < n_kw and to_seconds(keywords_context[idx_kw]["start"]) >= window_end: + idx_kw -= 1 + if idx_kw < n_kw and to_seconds(keywords_context[idx_kw]["start"]) < window_end: + idx_kw += 1 + + # Collect labels for this time window + chunk_labels = [] + while idx_lbl < n_lbl and to_seconds(labels_context[idx_lbl]["start"]) < window_end: + if to_seconds(labels_context[idx_lbl]["start"]) >= window_start: + chunk_labels.append(labels_context[idx_lbl]["text"]) + idx_lbl += 1 + if idx_lbl < n_lbl and to_seconds(labels_context[idx_lbl]["start"]) >= window_end: + break + while idx_lbl > 0 and idx_lbl < n_lbl and to_seconds(labels_context[idx_lbl]["start"]) >= window_end: + idx_lbl -= 1 + if idx_lbl < n_lbl and to_seconds(labels_context[idx_lbl]["start"]) < window_end: + idx_lbl += 1 + + # Collect topics for this time window + chunk_topics = [] + while idx_top < n_top and to_seconds(topics_context[idx_top]["start"]) < window_end: + if to_seconds(topics_context[idx_top]["start"]) >= window_start: + chunk_topics.append(topics_context[idx_top]["text"]) + idx_top += 1 + if idx_top < n_top and to_seconds(topics_context[idx_top]["start"]) >= window_end: + break + while idx_top > 0 and idx_top < n_top and to_seconds(topics_context[idx_top]["start"]) >= window_end: + idx_top -= 1 + if idx_top < n_top and to_seconds(topics_context[idx_top]["start"]) < window_end: + idx_top += 1 + + # Collect audio effects for this time window + chunk_audio_effects = [] + while idx_ae < n_ae and to_seconds(audio_effects_context[idx_ae]["start"]) < window_end: + if to_seconds(audio_effects_context[idx_ae]["start"]) >= window_start: + chunk_audio_effects.append(audio_effects_context[idx_ae]["text"]) + idx_ae += 1 + if idx_ae < n_ae and to_seconds(audio_effects_context[idx_ae]["start"]) >= window_end: + break + while idx_ae > 0 and idx_ae < n_ae and to_seconds(audio_effects_context[idx_ae]["start"]) >= window_end: + idx_ae -= 1 + if idx_ae < n_ae and to_seconds(audio_effects_context[idx_ae]["start"]) < window_end: + idx_ae += 1 + + # Collect emotions for this time window + chunk_emotions = [] + while idx_emo < n_emo and to_seconds(emotions_context[idx_emo]["start"]) < window_end: + if to_seconds(emotions_context[idx_emo]["start"]) >= window_start: + chunk_emotions.append(emotions_context[idx_emo]["text"]) + idx_emo += 1 + if idx_emo < n_emo and to_seconds(emotions_context[idx_emo]["start"]) >= window_end: + break + while idx_emo > 0 and idx_emo < n_emo and to_seconds(emotions_context[idx_emo]["start"]) >= window_end: + idx_emo -= 1 + if idx_emo < n_emo and to_seconds(emotions_context[idx_emo]["start"]) < window_end: + idx_emo += 1 + + # Collect sentiments for this time window + chunk_sentiments = [] + while idx_sent < n_sent and to_seconds(sentiments_context[idx_sent]["start"]) < window_end: + if to_seconds(sentiments_context[idx_sent]["start"]) >= window_start: + chunk_sentiments.append(sentiments_context[idx_sent]["text"]) + idx_sent += 1 + if idx_sent < n_sent and to_seconds(sentiments_context[idx_sent]["start"]) >= window_end: + break + while idx_sent > 0 and idx_sent < n_sent and to_seconds(sentiments_context[idx_sent]["start"]) >= window_end: + idx_sent -= 1 + if idx_sent < n_sent and to_seconds(sentiments_context[idx_sent]["start"]) < window_end: + idx_sent += 1 + + # Collect named people for this time window + chunk_people = [] + while idx_people < n_people and to_seconds(named_people_context[idx_people]["start"]) < window_end: + if to_seconds(named_people_context[idx_people]["start"]) >= window_start: + chunk_people.append(named_people_context[idx_people]["text"]) + idx_people += 1 + if idx_people < n_people and to_seconds(named_people_context[idx_people]["start"]) >= window_end: + break + while idx_people > 0 and idx_people < n_people and to_seconds(named_people_context[idx_people]["start"]) >= window_end: + idx_people -= 1 + if idx_people < n_people and to_seconds(named_people_context[idx_people]["start"]) < window_end: + idx_people += 1 + + # Collect named locations for this time window + chunk_locations = [] + while idx_locations < n_locations and to_seconds(named_locations_context[idx_locations]["start"]) < window_end: + if to_seconds(named_locations_context[idx_locations]["start"]) >= window_start: + chunk_locations.append(named_locations_context[idx_locations]["text"]) + idx_locations += 1 + if idx_locations < n_locations and to_seconds(named_locations_context[idx_locations]["start"]) >= window_end: + break + while idx_locations > 0 and idx_locations < n_locations and to_seconds(named_locations_context[idx_locations]["start"]) >= window_end: + idx_locations -= 1 + if idx_locations < n_locations and to_seconds(named_locations_context[idx_locations]["start"]) < window_end: + idx_locations += 1 + + # Collect detected objects for this time window + chunk_objects = [] + while idx_objects < n_objects and to_seconds(detected_objects_context[idx_objects]["start"]) < window_end: + if to_seconds(detected_objects_context[idx_objects]["start"]) >= window_start: + chunk_objects.append(detected_objects_context[idx_objects]["text"]) + idx_objects += 1 + if idx_objects < n_objects and to_seconds(detected_objects_context[idx_objects]["start"]) >= window_end: + break + while idx_objects > 0 and idx_objects < n_objects and to_seconds(detected_objects_context[idx_objects]["start"]) >= window_end: + idx_objects -= 1 + if idx_objects < n_objects and to_seconds(detected_objects_context[idx_objects]["start"]) < window_end: + idx_objects += 1 + + # Format timestamp as HH:MM:SS + hours = int(window_start // 3600) + minutes = int((window_start % 3600) // 60) + seconds = int(window_start % 60) + start_ts = f"{hours:02d}:{minutes:02d}:{seconds:02d}.000" + chunk_text = " ".join(speech_lines).strip() ocr_text = " ".join(ocr_lines).strip() + + # Build enhanced chunk text with insights appended + if chunk_text: + # Has speech - append insights to it + insight_parts = [] + if chunk_keywords: + insight_parts.append(f"Keywords: {', '.join(chunk_keywords)}") + if chunk_labels: + insight_parts.append(f"Visual elements: {', '.join(chunk_labels)}") + if chunk_topics: + insight_parts.append(f"Topics: {', '.join(chunk_topics)}") + if chunk_audio_effects: + insight_parts.append(f"Audio: {', '.join(chunk_audio_effects)}") + if chunk_emotions: + insight_parts.append(f"Emotions: {', '.join(chunk_emotions)}") + if chunk_sentiments: + insight_parts.append(f"Sentiment: {', '.join(chunk_sentiments)}") + if chunk_people: + insight_parts.append(f"People: {', '.join(chunk_people)}") + if chunk_locations: + insight_parts.append(f"Locations: {', '.join(chunk_locations)}") + if chunk_objects: + insight_parts.append(f"Objects: {', '.join(chunk_objects)}") + + if insight_parts: + chunk_text = f"{chunk_text}\n\n{' | '.join(insight_parts)}" + debug_print(f"[VIDEO INDEXER] Chunk {chunk_num + 1} enhanced with {len(insight_parts)} insight types") + else: + # No speech - build chunk text from other insights + insight_parts = [] + if ocr_text: + insight_parts.append(f"Visual text: {ocr_text}") + if chunk_keywords: + insight_parts.append(f"Keywords: {', '.join(chunk_keywords)}") + if chunk_labels: + insight_parts.append(f"Visual elements: {', '.join(chunk_labels)}") + if chunk_topics: + insight_parts.append(f"Topics: {', '.join(chunk_topics)}") + if chunk_audio_effects: + insight_parts.append(f"Audio: {', '.join(chunk_audio_effects)}") + if chunk_emotions: + insight_parts.append(f"Emotions: {', '.join(chunk_emotions)}") + if chunk_sentiments: + insight_parts.append(f"Sentiment: {', '.join(chunk_sentiments)}") + if chunk_people: + insight_parts.append(f"People: {', '.join(chunk_people)}") + if chunk_locations: + insight_parts.append(f"Locations: {', '.join(chunk_locations)}") + if chunk_objects: + insight_parts.append(f"Objects: {', '.join(chunk_objects)}") + + chunk_text = ". ".join(insight_parts) if insight_parts else "[No content detected]" + debug_print(f"[VIDEO INDEXER] Chunk {chunk_num + 1} has no speech, using insights as text: {chunk_text[:100]}...") - debug_print(f"[VIDEO INDEXER] Processing chunk {total + 1} at timestamp {start_ts}") - debug_print(f"[VIDEO INDEXER] Chunk text length: {len(chunk_text)}, OCR text length: {len(ocr_text)}") + debug_print(f"[VIDEO INDEXER] Chunk {chunk_num + 1} at timestamp {start_ts}") + debug_print(f"[VIDEO INDEXER] Chunk {chunk_num + 1} text length: {len(chunk_text)}, OCR text length: {len(ocr_text)}") + debug_print(f"[VIDEO INDEXER] Chunk {chunk_num + 1} insights - Keywords: {len(chunk_keywords)}, Labels: {len(chunk_labels)}, Topics: {len(chunk_topics)}, Audio: {len(chunk_audio_effects)}, Emotions: {len(chunk_emotions)}, Sentiments: {len(chunk_sentiments)}, People: {len(chunk_people)}, Locations: {len(chunk_locations)}, Objects: {len(chunk_objects)}") + debug_print(f"[VIDEO INDEXER] Chunk {chunk_num + 1}: timestamp={start_ts}, text_len={len(chunk_text)}, ocr_len={len(ocr_text)}, insights={len(chunk_keywords)}kw/{len(chunk_labels)}lbl/{len(chunk_topics)}top") + + # Skip truly empty chunks (no content at all) + if chunk_text == "[No content detected]" and not any([chunk_keywords, chunk_labels, chunk_topics, chunk_audio_effects, chunk_emotions, chunk_sentiments, chunk_people, chunk_locations, chunk_objects]): + debug_print(f"[VIDEO INDEXER] Chunk {chunk_num + 1} is completely empty, skipping") + continue - update_callback(current_file_chunk=total+1, status=f"VIDEO: saving chunk @ {start_ts}") + update_callback(current_file_chunk=chunk_num+1, status=f"VIDEO: saving chunk @ {start_ts}") try: + debug_print(f"[VIDEO INDEXER] Calling save_video_chunk for chunk {chunk_num + 1}") save_video_chunk( page_text_content=chunk_text, ocr_chunk_text=ocr_text, @@ -606,12 +1081,14 @@ def to_seconds(ts: str) -> float: document_id=document_id, group_id=group_id ) - debug_print(f"[VIDEO INDEXER] Chunk {total + 1} saved successfully") + debug_print(f"[VIDEO INDEXER] Chunk {chunk_num + 1} saved successfully") + total += 1 except Exception as e: - debug_print(f"[VIDEO INDEXER] Failed to save chunk {total + 1}: {str(e)}") - print(f"[VIDEO] CHUNK SAVE ERROR for chunk {total + 1}: {e}", flush=True) - - total += 1 + debug_print(f"[VIDEO INDEXER] Failed to save chunk {chunk_num + 1}: {str(e)}") + import traceback + debug_print(f"[VIDEO INDEXER] Chunk save traceback: {traceback.format_exc()}") + + debug_print(f"[VIDEO INDEXER] Chunk processing complete - Total chunks saved: {total}") # Extract metadata if enabled and chunks were processed settings = get_settings() @@ -1024,7 +1501,7 @@ def save_chunks(page_text_content, page_number, file_name, user_id, document_id, try: #status = f"Generating embedding for page {page_number}" #update_document(document_id=document_id, user_id=user_id, status=status) - embedding = generate_embedding(page_text_content) + embedding, token_usage = generate_embedding(page_text_content) except Exception as e: print(f"Error generating embedding for page {page_number} of document {document_id}: {e}") raise @@ -1036,13 +1513,47 @@ def save_chunks(page_text_content, page_number, file_name, user_id, document_id, chunk_summary = "" author = [] title = "" + + # Check if this document has vision analysis and append it to chunk_text + vision_analysis = metadata.get('vision_analysis') + enhanced_chunk_text = page_text_content + + if vision_analysis: + debug_print(f"[SAVE_CHUNKS] Document {document_id} has vision analysis, appending to chunk_text") + # Format vision analysis as structured text for better searchability + vision_text_parts = [] + vision_text_parts.append("\n\n=== AI Vision Analysis ===") + vision_text_parts.append(f"Model: {vision_analysis.get('model', 'unknown')}") + + if vision_analysis.get('description'): + vision_text_parts.append(f"\nDescription: {vision_analysis['description']}") + + if vision_analysis.get('objects'): + objects_list = vision_analysis['objects'] + if isinstance(objects_list, list): + vision_text_parts.append(f"\nObjects Detected: {', '.join(objects_list)}") + else: + vision_text_parts.append(f"\nObjects Detected: {objects_list}") + + if vision_analysis.get('text'): + vision_text_parts.append(f"\nVisible Text: {vision_analysis['text']}") + + if vision_analysis.get('analysis'): + vision_text_parts.append(f"\nContextual Analysis: {vision_analysis['analysis']}") + + vision_text = "\n".join(vision_text_parts) + enhanced_chunk_text = page_text_content + vision_text + + debug_print(f"[SAVE_CHUNKS] Enhanced chunk_text length: {len(enhanced_chunk_text)} (original: {len(page_text_content)}, vision: {len(vision_text)})") + else: + debug_print(f"[SAVE_CHUNKS] No vision analysis found for document {document_id}") if is_public_workspace: chunk_document = { "id": chunk_id, "document_id": document_id, "chunk_id": str(page_number), - "chunk_text": page_text_content, + "chunk_text": enhanced_chunk_text, "embedding": embedding, "file_name": file_name, "chunk_keywords": chunk_keywords, @@ -1063,7 +1574,7 @@ def save_chunks(page_text_content, page_number, file_name, user_id, document_id, "id": chunk_id, "document_id": document_id, "chunk_id": str(page_number), - "chunk_text": page_text_content, + "chunk_text": enhanced_chunk_text, "embedding": embedding, "file_name": file_name, "chunk_keywords": chunk_keywords, @@ -1086,7 +1597,7 @@ def save_chunks(page_text_content, page_number, file_name, user_id, document_id, "id": chunk_id, "document_id": document_id, "chunk_id": str(page_number), - "chunk_text": page_text_content, + "chunk_text": enhanced_chunk_text, "embedding": embedding, "file_name": file_name, "chunk_keywords": chunk_keywords, @@ -1122,6 +1633,111 @@ def save_chunks(page_text_content, page_number, file_name, user_id, document_id, except Exception as e: print(f"Error uploading chunk document for document {document_id}: {e}") raise + + # Return token usage information for accumulation + return token_usage + +def get_document_metadata_for_citations(document_id, user_id=None, group_id=None, public_workspace_id=None): + """ + Retrieve keywords and abstract from a document for creating metadata citations. + Used to enhance search results with additional context from document metadata. + + Args: + document_id: The document's unique identifier + user_id: User ID (for personal documents) + group_id: Group ID (for group documents) + public_workspace_id: Public workspace ID (for public documents) + + Returns: + dict: Dictionary with 'keywords' and 'abstract' fields, or None if document not found + """ + is_group = group_id is not None + is_public_workspace = public_workspace_id is not None + + # Determine the correct container + if is_public_workspace: + cosmos_container = cosmos_public_documents_container + elif is_group: + cosmos_container = cosmos_group_documents_container + else: + cosmos_container = cosmos_user_documents_container + + try: + # Read the document directly by ID + document_item = cosmos_container.read_item( + item=document_id, + partition_key=document_id + ) + + # Extract keywords and abstract + keywords = document_item.get('keywords', []) + abstract = document_item.get('abstract', '') + + # Return only if we have actual content + if keywords or abstract: + return { + 'keywords': keywords if keywords else [], + 'abstract': abstract if abstract else '', + 'file_name': document_item.get('file_name', 'Unknown') + } + + return None + + except Exception as e: + # Document not found or error reading - return None silently + # This is expected for documents without metadata + return None + +def get_document_metadata_for_citations(document_id, user_id=None, group_id=None, public_workspace_id=None): + """ + Retrieve keywords and abstract from a document for creating metadata citations. + Used to enhance search results with additional context from document metadata. + + Args: + document_id: The document's unique identifier + user_id: User ID (for personal documents) + group_id: Group ID (for group documents) + public_workspace_id: Public workspace ID (for public documents) + + Returns: + dict: Dictionary with 'keywords' and 'abstract' fields, or None if document not found + """ + is_group = group_id is not None + is_public_workspace = public_workspace_id is not None + + # Determine the correct container + if is_public_workspace: + cosmos_container = cosmos_public_documents_container + elif is_group: + cosmos_container = cosmos_group_documents_container + else: + cosmos_container = cosmos_user_documents_container + + try: + # Read the document directly by ID + document_item = cosmos_container.read_item( + item=document_id, + partition_key=document_id + ) + + # Extract keywords and abstract + keywords = document_item.get('keywords', []) + abstract = document_item.get('abstract', '') + + # Return only if we have actual content + if keywords or abstract: + return { + 'keywords': keywords if keywords else [], + 'abstract': abstract if abstract else '', + 'file_name': document_item.get('file_name', 'Unknown') + } + + return None + + except Exception as e: + # Document not found or error reading - return None silently + # This is expected for documents without metadata + return None def get_all_chunks(document_id, user_id, group_id=None, public_workspace_id=None): is_group = group_id is not None @@ -1609,6 +2225,39 @@ def delete_document(user_id, document_id, group_id=None, public_workspace_id=Non item=document_id, partition_key=document_id ) + + # Log document deletion transaction before deletion + try: + from functions_activity_logging import log_document_deletion_transaction + + # Determine workspace type + if public_workspace_id: + workspace_type = 'public' + elif group_id: + workspace_type = 'group' + else: + workspace_type = 'personal' + + # Extract file extension from filename + file_name = document_item.get('file_name', '') + file_ext = os.path.splitext(file_name)[-1].lower() if file_name else None + + # Log the deletion transaction with document metadata + log_document_deletion_transaction( + user_id=user_id, + document_id=document_id, + workspace_type=workspace_type, + file_name=file_name, + file_type=file_ext, + page_count=document_item.get('number_of_pages'), + version=document_item.get('version'), + group_id=group_id, + public_workspace_id=public_workspace_id, + document_metadata=document_item # Store full metadata + ) + except Exception as log_error: + print(f"⚠️ Warning: Failed to log document deletion transaction: {log_error}") + # Don't fail the deletion if logging fails if is_public_workspace: if document_item.get('public_workspace_id') != public_workspace_id: @@ -1624,58 +2273,8 @@ def delete_document(user_id, document_id, group_id=None, public_workspace_id=Non # Get the file name from the document to use for blob deletion file_name = document_item.get('file_name') - file_ext = os.path.splitext(file_name)[1].lower() if file_name else None - # First try to delete video from Video Indexer if applicable - if file_ext in ('.mp4', '.mov', '.avi', '.mkv', '.flv'): - debug_print(f"[VIDEO INDEXER DELETE] Video file detected, attempting Video Indexer deletion for document: {document_id}") - try: - settings = get_settings() - vi_ep = settings.get("video_indexer_endpoint") - vi_loc = settings.get("video_indexer_location") - vi_acc = settings.get("video_indexer_account_id") - - debug_print(f"[VIDEO INDEXER DELETE] Configuration - Endpoint: {vi_ep}, Location: {vi_loc}, Account ID: {vi_acc}") - - if not all([vi_ep, vi_loc, vi_acc]): - debug_print(f"[VIDEO INDEXER DELETE] Missing video indexer configuration, skipping deletion") - print("Missing video indexer configuration; skipping Video Indexer deletion.") - else: - debug_print(f"[VIDEO INDEXER DELETE] Acquiring authentication token") - token = get_video_indexer_account_token(settings) - debug_print(f"[VIDEO INDEXER DELETE] Token acquired successfully") - - # You need to store the video ID in the document metadata when uploading - video_id = document_item.get("video_indexer_id") - debug_print(f"[VIDEO INDEXER DELETE] Video ID from document metadata: {video_id}") - - if video_id: - delete_url = f"{vi_ep}/{vi_loc}/Accounts/{vi_acc}/Videos/{video_id}?accessToken={token}" - debug_print(f"[VIDEO INDEXER DELETE] Delete URL: {delete_url}") - - resp = requests.delete(delete_url, timeout=60) - debug_print(f"[VIDEO INDEXER DELETE] Delete response status: {resp.status_code}") - - if resp.status_code != 200: - debug_print(f"[VIDEO INDEXER DELETE] Delete response text: {resp.text}") - - resp.raise_for_status() - debug_print(f"[VIDEO INDEXER DELETE] Successfully deleted video ID: {video_id}") - print(f"Deleted video from Video Indexer: {video_id}") - else: - debug_print(f"[VIDEO INDEXER DELETE] No video_indexer_id found in document metadata") - print("No video_indexer_id found in document metadata; skipping Video Indexer deletion.") - except requests.exceptions.RequestException as e: - debug_print(f"[VIDEO INDEXER DELETE] Request error: {str(e)}") - if hasattr(e, 'response') and e.response is not None: - debug_print(f"[VIDEO INDEXER DELETE] Error response status: {e.response.status_code}") - debug_print(f"[VIDEO INDEXER DELETE] Error response text: {e.response.text}") - print(f"Error deleting video from Video Indexer: {e}") - except Exception as e: - debug_print(f"[VIDEO INDEXER DELETE] Unexpected error: {str(e)}") - print(f"Error deleting video from Video Indexer: {e}") - - # Second try to delete from blob storage + # Delete from blob storage try: if file_name: delete_from_blob_storage(document_id, user_id, file_name, group_id, public_workspace_id) @@ -2470,6 +3069,288 @@ def estimate_word_count(text): return 0 return len(text.split()) +def analyze_image_with_vision_model(image_path, user_id, document_id, settings): + """ + Analyze image using GPT-4 Vision or similar multimodal model. + + Args: + image_path: Path to image file + user_id: User ID for logging + document_id: Document ID for tracking + settings: Application settings + + Returns: + dict: { + 'description': 'AI-generated image description', + 'objects': ['list', 'of', 'detected', 'objects'], + 'text': 'any text visible in image', + 'analysis': 'detailed analysis' + } or None if vision analysis is disabled or fails + """ + debug_print(f"[VISION_ANALYSIS_V2] Function entry - document_id: {document_id}, user_id: {user_id}") + + + try: + # Convert image to base64 + with open(image_path, 'rb') as img_file: + image_bytes = img_file.read() + base64_image = base64.b64encode(image_bytes).decode('utf-8') + + image_size = len(image_bytes) + base64_size = len(base64_image) + debug_print(f"[VISION_ANALYSIS] Image conversion for {document_id}:") + debug_print(f" Image path: {image_path}") + debug_print(f" Original size: {image_size:,} bytes ({image_size / 1024 / 1024:.2f} MB)") + debug_print(f" Base64 size: {base64_size:,} characters") + + # Determine image mime type + mime_type = mimetypes.guess_type(image_path)[0] or 'image/jpeg' + debug_print(f" MIME type: {mime_type}") + + # Get vision model settings + vision_model = settings.get('multimodal_vision_model', 'gpt-4o') + debug_print(f"[VISION_ANALYSIS] Vision model selected: {vision_model}") + + if not vision_model: + print(f"Warning: Multi-modal vision enabled but no model selected") + return None + + # Initialize client (reuse GPT configuration) + enable_gpt_apim = settings.get('enable_gpt_apim', False) + debug_print(f"[VISION_ANALYSIS] Using APIM: {enable_gpt_apim}") + + if enable_gpt_apim: + api_version = settings.get('azure_apim_gpt_api_version') + endpoint = settings.get('azure_apim_gpt_endpoint') + debug_print(f"[VISION_ANALYSIS] APIM Configuration:") + debug_print(f" Endpoint: {endpoint}") + debug_print(f" API Version: {api_version}") + + gpt_client = AzureOpenAI( + api_version=api_version, + azure_endpoint=endpoint, + api_key=settings.get('azure_apim_gpt_subscription_key') + ) + else: + # Use managed identity or key + auth_type = settings.get('azure_openai_gpt_authentication_type', 'key') + api_version = settings.get('azure_openai_gpt_api_version') + endpoint = settings.get('azure_openai_gpt_endpoint') + + debug_print(f"[VISION_ANALYSIS] Direct Azure OpenAI Configuration:") + debug_print(f" Endpoint: {endpoint}") + debug_print(f" API Version: {api_version}") + debug_print(f" Auth Type: {auth_type}") + + if auth_type == 'managed_identity': + token_provider = get_bearer_token_provider( + DefaultAzureCredential(), + cognitive_services_scope + ) + gpt_client = AzureOpenAI( + api_version=api_version, + azure_endpoint=endpoint, + azure_ad_token_provider=token_provider + ) + else: + gpt_client = AzureOpenAI( + api_version=api_version, + azure_endpoint=endpoint, + api_key=settings.get('azure_openai_gpt_key') + ) + + # Create vision prompt + print(f"Analyzing image with vision model: {vision_model}") + + # Determine which token parameter to use based on model type + # o-series and gpt-5 models require max_completion_tokens instead of max_tokens + vision_model_lower = vision_model.lower() + + debug_print(f"[VISION_ANALYSIS] Building API request parameters:") + debug_print(f" Model (lowercase): {vision_model_lower}") + + # Check which parameter will be used + uses_completion_tokens = ('o1' in vision_model_lower or 'o3' in vision_model_lower or 'gpt-5' in vision_model_lower) + debug_print(f" Uses max_completion_tokens: {uses_completion_tokens}") + debug_print(f" Detection: o1={('o1' in vision_model_lower)}, o3={('o3' in vision_model_lower)}, gpt-5={('gpt-5' in vision_model_lower)}") + + # Build prompt - GPT-5/reasoning models need explicit JSON instruction when using response_format + if uses_completion_tokens: + prompt_text = """Analyze this image and respond in JSON format with the following structure: +{ + "description": "A detailed description of what you see in the image", + "objects": ["list", "of", "objects", "people", "or", "notable", "elements"], + "text": "Any visible text extracted from the image (OCR)", + "analysis": "Contextual analysis, insights, or interpretation" +} + +Ensure your entire response is valid JSON. Include all four keys even if some are empty strings or empty arrays.""" + else: + prompt_text = """Analyze this image and provide: +1. A detailed description of what you see +2. List any objects, people, or notable elements +3. Extract any visible text (OCR) +4. Provide contextual analysis or insights + +Format your response as JSON with these keys: +{ + "description": "...", + "objects": ["...", "..."], + "text": "...", + "analysis": "..." +}""" + + api_params = { + "model": vision_model, + "messages": [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": prompt_text + }, + { + "type": "image_url", + "image_url": { + "url": f"data:{mime_type};base64,{base64_image}" + } + } + ] + } + ] + } + + debug_print(f"[VISION_ANALYSIS_V2] ⚡ About to send request to Azure OpenAI with {vision_model}") + debug_print(f"[VISION_ANALYSIS_V2] ⚡ Using parameter: {'max_completion_tokens' if uses_completion_tokens else 'max_tokens'} = 1000") + debug_print(f"[VISION_ANALYSIS] Sending request to Azure OpenAI...") + debug_print(f" Message content types: text + image_url") + debug_print(f" Image data URL prefix: data:{mime_type};base64,... ({base64_size} chars)") + + response = gpt_client.chat.completions.create(**api_params) + + debug_print(f"[VISION_ANALYSIS_V2] ⚡ Response received successfully from {vision_model}") + + debug_print(f"[VISION_ANALYSIS] Response received from {vision_model}") + debug_print(f" Response ID: {response.id if hasattr(response, 'id') else 'N/A'}") + debug_print(f" Model used: {response.model if hasattr(response, 'model') else 'N/A'}") + if hasattr(response, 'usage'): + debug_print(f" Token usage: prompt={response.usage.prompt_tokens if hasattr(response.usage, 'prompt_tokens') else 'N/A'}, completion={response.usage.completion_tokens if hasattr(response.usage, 'completion_tokens') else 'N/A'}, total={response.usage.total_tokens if hasattr(response.usage, 'total_tokens') else 'N/A'}") + + # Debug the response structure to understand why content might be empty + debug_print(f"[VISION_ANALYSIS] Response object inspection:") + debug_print(f" Response type: {type(response)}") + debug_print(f" Has choices: {hasattr(response, 'choices')}") + if hasattr(response, 'choices') and len(response.choices) > 0: + debug_print(f" Number of choices: {len(response.choices)}") + debug_print(f" First choice type: {type(response.choices[0])}") + debug_print(f" Has message: {hasattr(response.choices[0], 'message')}") + if hasattr(response.choices[0], 'message'): + debug_print(f" Message type: {type(response.choices[0].message)}") + debug_print(f" Message content type: {type(response.choices[0].message.content)}") + debug_print(f" Message content is None: {response.choices[0].message.content is None}") + # Check for refusal + if hasattr(response.choices[0].message, 'refusal'): + debug_print(f" Message refusal: {response.choices[0].message.refusal}") + # Check finish reason + if hasattr(response.choices[0], 'finish_reason'): + debug_print(f" Finish reason: {response.choices[0].finish_reason}") + + # Parse response + content = response.choices[0].message.content + + # Handle None content + if content is None: + print(f"[VISION_ANALYSIS_V2] ⚠️ Response content is None!") + debug_print(f"[VISION_ANALYSIS] ⚠️ Content is None - checking for refusal or error") + if hasattr(response.choices[0].message, 'refusal') and response.choices[0].message.refusal: + error_msg = f"Model refused to respond: {response.choices[0].message.refusal}" + else: + error_msg = "Model returned empty content with no refusal message" + + return { + 'description': error_msg, + 'error': error_msg, + 'model': vision_model, + 'parse_failed': True + } + + # Additional debugging for empty string case + print(f"[VISION_ANALYSIS_V2] ⚡ Content length: {len(content)}, repr: {repr(content[:200])}") + debug_print(f"[VISION_ANALYSIS] Raw response received:") + debug_print(f" Length: {len(content)} characters") + debug_print(f" Content repr: {repr(content)}") + debug_print(f" First 500 chars: {content[:500]}...") + debug_print(f" Last 100 chars: ...{content[-100:] if len(content) > 100 else content}") + + # Check if response looks like JSON + is_json_like = content.strip().startswith('{') or content.strip().startswith('[') + has_code_fence = '```' in content + debug_print(f" Starts with JSON bracket: {is_json_like}") + debug_print(f" Contains code fence: {has_code_fence}") + + # Try to parse as JSON, fallback to raw text + try: + # Clean up potential markdown code fences + debug_print(f"[VISION_ANALYSIS] Attempting to clean JSON code fences...") + content_cleaned = clean_json_codeFence(content) + debug_print(f" Cleaned length: {len(content_cleaned)} characters") + debug_print(f" Cleaned first 200 chars: {content_cleaned[:200]}...") + + debug_print(f"[VISION_ANALYSIS] Attempting to parse as JSON...") + vision_analysis = json.loads(content_cleaned) + debug_print(f"[VISION_ANALYSIS] ✅ Successfully parsed JSON response!") + debug_print(f" JSON keys: {list(vision_analysis.keys())}") + + except Exception as parse_error: + debug_print(f"[VISION_ANALYSIS] ❌ JSON parsing failed!") + debug_print(f" Error type: {type(parse_error).__name__}") + debug_print(f" Error message: {str(parse_error)}") + debug_print(f" Content that failed to parse (first 1000 chars): {content[:1000]}") + print(f"Vision response not valid JSON, using raw text") + + vision_analysis = { + 'description': content, + 'raw_response': content, + 'parse_error': str(parse_error), + 'parse_failed': True + } + debug_print(f"[VISION_ANALYSIS] Created fallback structure with raw response") + + # Add model info to analysis + vision_analysis['model'] = vision_model + + debug_print(f"[VISION_ANALYSIS] Final analysis structure for {document_id}:") + debug_print(f" Model: {vision_model}") + debug_print(f" Has 'description': {'description' in vision_analysis}") + debug_print(f" Has 'objects': {'objects' in vision_analysis}") + debug_print(f" Has 'text': {'text' in vision_analysis}") + debug_print(f" Has 'analysis': {'analysis' in vision_analysis}") + + if 'description' in vision_analysis: + desc = vision_analysis['description'] + debug_print(f" Description length: {len(desc)} chars") + debug_print(f" Description preview: {desc[:200]}...") + + if 'objects' in vision_analysis: + objs = vision_analysis['objects'] + debug_print(f" Objects count: {len(objs) if isinstance(objs, list) else 'not a list'}") + debug_print(f" Objects: {objs}") + + if 'text' in vision_analysis: + txt = vision_analysis['text'] + debug_print(f" Text length: {len(txt) if txt else 0} chars") + debug_print(f" Text preview: {txt[:100] if txt else 'None'}...") + + print(f"Vision analysis completed for document: {document_id}") + return vision_analysis + + except Exception as e: + print(f"Error in vision analysis for {document_id}: {str(e)}") + import traceback + traceback.print_exc() + return None + def upload_to_blob(temp_file_path, user_id, document_id, blob_filename, update_callback, group_id=None, public_workspace_id=None): """Uploads the file to Azure Blob Storage.""" @@ -2527,6 +3408,8 @@ def process_txt(document_id, user_id, temp_file_path, original_filename, enable_ update_callback(status="Processing TXT file...") total_chunks_saved = 0 + total_embedding_tokens = 0 + embedding_model_name = None target_words_per_chunk = 400 if enable_enhanced_citations: @@ -2577,23 +3460,31 @@ def process_txt(document_id, user_id, temp_file_path, original_filename, enable_ elif is_group: args["group_id"] = group_id - save_chunks(**args) + token_usage = save_chunks(**args) total_chunks_saved += 1 + + # Accumulate embedding tokens + if token_usage: + total_embedding_tokens += token_usage.get('total_tokens', 0) + if not embedding_model_name: + embedding_model_name = token_usage.get('model_deployment_name') except Exception as e: raise Exception(f"Failed processing TXT file {original_filename}: {e}") - return total_chunks_saved + return total_chunks_saved, total_embedding_tokens, embedding_model_name -def process_html(document_id, user_id, temp_file_path, original_filename, enable_enhanced_citations, update_callback, group_id=None, public_workspace_id=None): - """Processes HTML files.""" +def process_xml(document_id, user_id, temp_file_path, original_filename, enable_enhanced_citations, update_callback, group_id=None, public_workspace_id=None): + """Processes XML files using RecursiveCharacterTextSplitter for structured content.""" is_group = group_id is not None is_public_workspace = public_workspace_id is not None - update_callback(status="Processing HTML file...") + update_callback(status="Processing XML file...") total_chunks_saved = 0 - target_chunk_words = 1200 # Target size based on requirement - min_chunk_words = 600 # Minimum size based on requirement + total_embedding_tokens = 0 + embedding_model_name = None + # Character-based chunking for XML structure preservation + max_chunk_size_chars = 4000 if enable_enhanced_citations: args = { @@ -2603,20 +3494,748 @@ def process_html(document_id, user_id, temp_file_path, original_filename, enable "blob_filename": original_filename, "update_callback": update_callback } - if is_public_workspace: - args["public_workspace_id"] = public_workspace_id - elif is_group: + + if is_group: args["group_id"] = group_id + elif is_public_workspace: + args["public_workspace_id"] = public_workspace_id upload_to_blob(**args) try: - # --- CHANGE HERE: Open in binary mode ('rb') --- - # Let BeautifulSoup handle the decoding based on meta tags or detection - with open(temp_file_path, 'rb') as f: - # --- CHANGE HERE: Pass the file object directly to BeautifulSoup --- - soup = BeautifulSoup(f, 'lxml') # or 'html.parser' if lxml not installed - + # Read XML content + try: + with open(temp_file_path, 'r', encoding='utf-8') as f: + xml_content = f.read() + except Exception as e: + raise Exception(f"Error reading XML file {original_filename}: {e}") + + # Use RecursiveCharacterTextSplitter with XML-aware separators + # This preserves XML structure better than simple word splitting + xml_splitter = RecursiveCharacterTextSplitter( + chunk_size=max_chunk_size_chars, + chunk_overlap=0, + length_function=len, + separators=["\n\n", "\n", ">", " ", ""], # XML-friendly separators + is_separator_regex=False + ) + + # Split the XML content + final_chunks = xml_splitter.split_text(xml_content) + + initial_chunk_count = len(final_chunks) + update_callback(number_of_pages=initial_chunk_count) + + for idx, chunk_content in enumerate(final_chunks, start=1): + # Skip empty chunks + if not chunk_content or not chunk_content.strip(): + print(f"Skipping empty XML chunk {idx}/{initial_chunk_count}") + continue + + update_callback( + current_file_chunk=idx, + status=f"Saving chunk {idx}/{initial_chunk_count}..." + ) + args = { + "page_text_content": chunk_content, + "page_number": total_chunks_saved + 1, + "file_name": original_filename, + "user_id": user_id, + "document_id": document_id + } + + if is_public_workspace: + args["public_workspace_id"] = public_workspace_id + elif is_group: + args["group_id"] = group_id + + token_usage = save_chunks(**args) + total_chunks_saved += 1 + + # Accumulate embedding tokens + if token_usage: + total_embedding_tokens += token_usage.get('total_tokens', 0) + if not embedding_model_name: + embedding_model_name = token_usage.get('model_deployment_name') + + # Final update with actual chunks saved + if total_chunks_saved != initial_chunk_count: + update_callback(number_of_pages=total_chunks_saved) + print(f"Adjusted final chunk count from {initial_chunk_count} to {total_chunks_saved} after skipping empty chunks.") + + except Exception as e: + print(f"Error during XML processing for {original_filename}: {type(e).__name__}: {e}") + raise Exception(f"Failed processing XML file {original_filename}: {e}") + + return total_chunks_saved, total_embedding_tokens, embedding_model_name + +def process_yaml(document_id, user_id, temp_file_path, original_filename, enable_enhanced_citations, update_callback, group_id=None, public_workspace_id=None): + """Processes YAML files using RecursiveCharacterTextSplitter for structured content.""" + is_group = group_id is not None + is_public_workspace = public_workspace_id is not None + + update_callback(status="Processing YAML file...") + total_chunks_saved = 0 + total_embedding_tokens = 0 + embedding_model_name = None + # Character-based chunking for YAML structure preservation + max_chunk_size_chars = 4000 + + if enable_enhanced_citations: + args = { + "temp_file_path": temp_file_path, + "user_id": user_id, + "document_id": document_id, + "blob_filename": original_filename, + "update_callback": update_callback + } + + if is_public_workspace: + args["public_workspace_id"] = public_workspace_id + elif is_group: + args["group_id"] = group_id + + upload_to_blob(**args) + + try: + # Read YAML content + try: + with open(temp_file_path, 'r', encoding='utf-8') as f: + yaml_content = f.read() + except Exception as e: + raise Exception(f"Error reading YAML file {original_filename}: {e}") + + # Use RecursiveCharacterTextSplitter with YAML-aware separators + # This preserves YAML structure better than simple word splitting + yaml_splitter = RecursiveCharacterTextSplitter( + chunk_size=max_chunk_size_chars, + chunk_overlap=0, + length_function=len, + separators=["\n\n", "\n", "- ", " ", ""], # YAML-friendly separators + is_separator_regex=False + ) + + # Split the YAML content + final_chunks = yaml_splitter.split_text(yaml_content) + + initial_chunk_count = len(final_chunks) + update_callback(number_of_pages=initial_chunk_count) + + for idx, chunk_content in enumerate(final_chunks, start=1): + # Skip empty chunks + if not chunk_content or not chunk_content.strip(): + print(f"Skipping empty YAML chunk {idx}/{initial_chunk_count}") + continue + + update_callback( + current_file_chunk=idx, + status=f"Saving chunk {idx}/{initial_chunk_count}..." + ) + args = { + "page_text_content": chunk_content, + "page_number": total_chunks_saved + 1, + "file_name": original_filename, + "user_id": user_id, + "document_id": document_id + } + + if is_public_workspace: + args["public_workspace_id"] = public_workspace_id + elif is_group: + args["group_id"] = group_id + + token_usage = save_chunks(**args) + total_chunks_saved += 1 + + # Accumulate embedding tokens + if token_usage: + total_embedding_tokens += token_usage.get('total_tokens', 0) + if not embedding_model_name: + embedding_model_name = token_usage.get('model_deployment_name') + + # Final update with actual chunks saved + if total_chunks_saved != initial_chunk_count: + update_callback(number_of_pages=total_chunks_saved) + print(f"Adjusted final chunk count from {initial_chunk_count} to {total_chunks_saved} after skipping empty chunks.") + + except Exception as e: + print(f"Error during YAML processing for {original_filename}: {type(e).__name__}: {e}") + raise Exception(f"Failed processing YAML file {original_filename}: {e}") + + return total_chunks_saved, total_embedding_tokens, embedding_model_name + +def process_log(document_id, user_id, temp_file_path, original_filename, enable_enhanced_citations, update_callback, group_id=None, public_workspace_id=None): + """Processes LOG files using line-based chunking to maintain log record integrity.""" + is_group = group_id is not None + is_public_workspace = public_workspace_id is not None + + update_callback(status="Processing LOG file...") + total_chunks_saved = 0 + total_embedding_tokens = 0 + embedding_model_name = None + target_words_per_chunk = 1000 # Word-based chunking for better semantic grouping + + if enable_enhanced_citations: + args = { + "temp_file_path": temp_file_path, + "user_id": user_id, + "document_id": document_id, + "blob_filename": original_filename, + "update_callback": update_callback + } + + if is_public_workspace: + args["public_workspace_id"] = public_workspace_id + elif is_group: + args["group_id"] = group_id + + upload_to_blob(**args) + + try: + with open(temp_file_path, 'r', encoding='utf-8') as f: + content = f.read() + + # Split by lines to maintain log record integrity + lines = content.splitlines(keepends=True) # Keep line endings + + if not lines: + raise Exception(f"LOG file {original_filename} is empty") + + # Chunk by accumulating lines until reaching target word count + final_chunks = [] + current_chunk_lines = [] + current_chunk_word_count = 0 + + for line in lines: + line_word_count = len(line.split()) + + # If adding this line exceeds target AND we already have content + if current_chunk_word_count + line_word_count > target_words_per_chunk and current_chunk_lines: + # Finalize current chunk + final_chunks.append("".join(current_chunk_lines)) + # Start new chunk with current line + current_chunk_lines = [line] + current_chunk_word_count = line_word_count + else: + # Add line to current chunk + current_chunk_lines.append(line) + current_chunk_word_count += line_word_count + + # Add the last remaining chunk if it has content + if current_chunk_lines: + final_chunks.append("".join(current_chunk_lines)) + + num_chunks = len(final_chunks) + update_callback(number_of_pages=num_chunks) + + for idx, chunk_content in enumerate(final_chunks, start=1): + if chunk_content.strip(): + update_callback( + current_file_chunk=idx, + status=f"Saving chunk {idx}/{num_chunks}..." + ) + args = { + "page_text_content": chunk_content, + "page_number": idx, + "file_name": original_filename, + "user_id": user_id, + "document_id": document_id + } + + if is_public_workspace: + args["public_workspace_id"] = public_workspace_id + elif is_group: + args["group_id"] = group_id + + token_usage = save_chunks(**args) + total_chunks_saved += 1 + + # Accumulate embedding tokens + if token_usage: + total_embedding_tokens += token_usage.get('total_tokens', 0) + if not embedding_model_name: + embedding_model_name = token_usage.get('model_deployment_name') + + except Exception as e: + raise Exception(f"Failed processing LOG file {original_filename}: {e}") + + return total_chunks_saved, total_embedding_tokens, embedding_model_name + +def process_doc(document_id, user_id, temp_file_path, original_filename, enable_enhanced_citations, update_callback, group_id=None, public_workspace_id=None): + """ + Processes .doc and .docm files using docx2txt library. + Note: .docx files still use Document Intelligence for better formatting preservation. + """ + is_group = group_id is not None + is_public_workspace = public_workspace_id is not None + + update_callback(status=f"Processing {original_filename.split('.')[-1].upper()} file...") + total_chunks_saved = 0 + target_words_per_chunk = 400 # Consistent with other text-based chunking + + if enable_enhanced_citations: + args = { + "temp_file_path": temp_file_path, + "user_id": user_id, + "document_id": document_id, + "blob_filename": original_filename, + "update_callback": update_callback + } + + if is_public_workspace: + args["public_workspace_id"] = public_workspace_id + elif is_group: + args["group_id"] = group_id + + upload_to_blob(**args) + + try: + # Import docx2txt here to avoid dependency issues if not installed + try: + import docx2txt + except ImportError: + raise Exception("docx2txt library is required for .doc and .docm file processing. Install with: pip install docx2txt") + + # Extract text from .doc or .docm file + try: + text_content = docx2txt.process(temp_file_path) + except Exception as e: + raise Exception(f"Error extracting text from {original_filename}: {e}") + + if not text_content or not text_content.strip(): + raise Exception(f"No text content extracted from {original_filename}") + + # Split into words for chunking + words = text_content.split() + if not words: + raise Exception(f"No text content found in {original_filename}") + + # Create chunks of target_words_per_chunk words + final_chunks = [] + for i in range(0, len(words), target_words_per_chunk): + chunk_words = words[i:i + target_words_per_chunk] + chunk_text = " ".join(chunk_words) + final_chunks.append(chunk_text) + + num_chunks = len(final_chunks) + update_callback(number_of_pages=num_chunks) + + for idx, chunk_content in enumerate(final_chunks, start=1): + if chunk_content.strip(): + update_callback( + current_file_chunk=idx, + status=f"Saving chunk {idx}/{num_chunks}..." + ) + args = { + "page_text_content": chunk_content, + "page_number": idx, + "file_name": original_filename, + "user_id": user_id, + "document_id": document_id + } + + if is_public_workspace: + args["public_workspace_id"] = public_workspace_id + elif is_group: + args["group_id"] = group_id + + token_usage = save_chunks(**args) + total_chunks_saved += 1 + + # Accumulate embedding tokens + if token_usage: + total_embedding_tokens += token_usage.get('total_tokens', 0) + if not embedding_model_name: + embedding_model_name = token_usage.get('model_deployment_name') + + except Exception as e: + raise Exception(f"Failed processing {original_filename}: {e}") + + return total_chunks_saved, total_embedding_tokens, embedding_model_name + +def process_xml(document_id, user_id, temp_file_path, original_filename, enable_enhanced_citations, update_callback, group_id=None, public_workspace_id=None): + """Processes XML files using RecursiveCharacterTextSplitter for structured content.""" + is_group = group_id is not None + is_public_workspace = public_workspace_id is not None + + update_callback(status="Processing XML file...") + total_chunks_saved = 0 + # Character-based chunking for XML structure preservation + max_chunk_size_chars = 4000 + + if enable_enhanced_citations: + args = { + "temp_file_path": temp_file_path, + "user_id": user_id, + "document_id": document_id, + "blob_filename": original_filename, + "update_callback": update_callback + } + + if is_group: + args["group_id"] = group_id + elif is_public_workspace: + args["public_workspace_id"] = public_workspace_id + + upload_to_blob(**args) + + try: + # Read XML content + try: + with open(temp_file_path, 'r', encoding='utf-8') as f: + xml_content = f.read() + except Exception as e: + raise Exception(f"Error reading XML file {original_filename}: {e}") + + # Use RecursiveCharacterTextSplitter with XML-aware separators + # This preserves XML structure better than simple word splitting + xml_splitter = RecursiveCharacterTextSplitter( + chunk_size=max_chunk_size_chars, + chunk_overlap=0, + length_function=len, + separators=["\n\n", "\n", ">", " ", ""], # XML-friendly separators + is_separator_regex=False + ) + + # Split the XML content + final_chunks = xml_splitter.split_text(xml_content) + + initial_chunk_count = len(final_chunks) + update_callback(number_of_pages=initial_chunk_count) + + for idx, chunk_content in enumerate(final_chunks, start=1): + # Skip empty chunks + if not chunk_content or not chunk_content.strip(): + print(f"Skipping empty XML chunk {idx}/{initial_chunk_count}") + continue + + update_callback( + current_file_chunk=idx, + status=f"Saving chunk {idx}/{initial_chunk_count}..." + ) + args = { + "page_text_content": chunk_content, + "page_number": total_chunks_saved + 1, + "file_name": original_filename, + "user_id": user_id, + "document_id": document_id + } + + if is_public_workspace: + args["public_workspace_id"] = public_workspace_id + elif is_group: + args["group_id"] = group_id + + save_chunks(**args) + total_chunks_saved += 1 + + # Final update with actual chunks saved + if total_chunks_saved != initial_chunk_count: + update_callback(number_of_pages=total_chunks_saved) + print(f"Adjusted final chunk count from {initial_chunk_count} to {total_chunks_saved} after skipping empty chunks.") + + except Exception as e: + print(f"Error during XML processing for {original_filename}: {type(e).__name__}: {e}") + raise Exception(f"Failed processing XML file {original_filename}: {e}") + + return total_chunks_saved + +def process_yaml(document_id, user_id, temp_file_path, original_filename, enable_enhanced_citations, update_callback, group_id=None, public_workspace_id=None): + """Processes YAML files using RecursiveCharacterTextSplitter for structured content.""" + is_group = group_id is not None + is_public_workspace = public_workspace_id is not None + + update_callback(status="Processing YAML file...") + total_chunks_saved = 0 + # Character-based chunking for YAML structure preservation + max_chunk_size_chars = 4000 + + if enable_enhanced_citations: + args = { + "temp_file_path": temp_file_path, + "user_id": user_id, + "document_id": document_id, + "blob_filename": original_filename, + "update_callback": update_callback + } + + if is_public_workspace: + args["public_workspace_id"] = public_workspace_id + elif is_group: + args["group_id"] = group_id + + upload_to_blob(**args) + + try: + # Read YAML content + try: + with open(temp_file_path, 'r', encoding='utf-8') as f: + yaml_content = f.read() + except Exception as e: + raise Exception(f"Error reading YAML file {original_filename}: {e}") + + # Use RecursiveCharacterTextSplitter with YAML-aware separators + # This preserves YAML structure better than simple word splitting + yaml_splitter = RecursiveCharacterTextSplitter( + chunk_size=max_chunk_size_chars, + chunk_overlap=0, + length_function=len, + separators=["\n\n", "\n", "- ", " ", ""], # YAML-friendly separators + is_separator_regex=False + ) + + # Split the YAML content + final_chunks = yaml_splitter.split_text(yaml_content) + + initial_chunk_count = len(final_chunks) + update_callback(number_of_pages=initial_chunk_count) + + for idx, chunk_content in enumerate(final_chunks, start=1): + # Skip empty chunks + if not chunk_content or not chunk_content.strip(): + print(f"Skipping empty YAML chunk {idx}/{initial_chunk_count}") + continue + + update_callback( + current_file_chunk=idx, + status=f"Saving chunk {idx}/{initial_chunk_count}..." + ) + args = { + "page_text_content": chunk_content, + "page_number": total_chunks_saved + 1, + "file_name": original_filename, + "user_id": user_id, + "document_id": document_id + } + + if is_public_workspace: + args["public_workspace_id"] = public_workspace_id + elif is_group: + args["group_id"] = group_id + + save_chunks(**args) + total_chunks_saved += 1 + + # Final update with actual chunks saved + if total_chunks_saved != initial_chunk_count: + update_callback(number_of_pages=total_chunks_saved) + print(f"Adjusted final chunk count from {initial_chunk_count} to {total_chunks_saved} after skipping empty chunks.") + + except Exception as e: + print(f"Error during YAML processing for {original_filename}: {type(e).__name__}: {e}") + raise Exception(f"Failed processing YAML file {original_filename}: {e}") + + return total_chunks_saved + +def process_log(document_id, user_id, temp_file_path, original_filename, enable_enhanced_citations, update_callback, group_id=None, public_workspace_id=None): + """Processes LOG files using line-based chunking to maintain log record integrity.""" + is_group = group_id is not None + is_public_workspace = public_workspace_id is not None + + update_callback(status="Processing LOG file...") + total_chunks_saved = 0 + target_words_per_chunk = 1000 # Word-based chunking for better semantic grouping + + if enable_enhanced_citations: + args = { + "temp_file_path": temp_file_path, + "user_id": user_id, + "document_id": document_id, + "blob_filename": original_filename, + "update_callback": update_callback + } + + if is_public_workspace: + args["public_workspace_id"] = public_workspace_id + elif is_group: + args["group_id"] = group_id + + upload_to_blob(**args) + + try: + with open(temp_file_path, 'r', encoding='utf-8') as f: + content = f.read() + + # Split by lines to maintain log record integrity + lines = content.splitlines(keepends=True) # Keep line endings + + if not lines: + raise Exception(f"LOG file {original_filename} is empty") + + # Chunk by accumulating lines until reaching target word count + final_chunks = [] + current_chunk_lines = [] + current_chunk_word_count = 0 + + for line in lines: + line_word_count = len(line.split()) + + # If adding this line exceeds target AND we already have content + if current_chunk_word_count + line_word_count > target_words_per_chunk and current_chunk_lines: + # Finalize current chunk + final_chunks.append("".join(current_chunk_lines)) + # Start new chunk with current line + current_chunk_lines = [line] + current_chunk_word_count = line_word_count + else: + # Add line to current chunk + current_chunk_lines.append(line) + current_chunk_word_count += line_word_count + + # Add the last remaining chunk if it has content + if current_chunk_lines: + final_chunks.append("".join(current_chunk_lines)) + + num_chunks = len(final_chunks) + update_callback(number_of_pages=num_chunks) + + for idx, chunk_content in enumerate(final_chunks, start=1): + if chunk_content.strip(): + update_callback( + current_file_chunk=idx, + status=f"Saving chunk {idx}/{num_chunks}..." + ) + args = { + "page_text_content": chunk_content, + "page_number": idx, + "file_name": original_filename, + "user_id": user_id, + "document_id": document_id + } + + if is_public_workspace: + args["public_workspace_id"] = public_workspace_id + elif is_group: + args["group_id"] = group_id + + save_chunks(**args) + total_chunks_saved += 1 + + except Exception as e: + raise Exception(f"Failed processing LOG file {original_filename}: {e}") + + return total_chunks_saved + +def process_doc(document_id, user_id, temp_file_path, original_filename, enable_enhanced_citations, update_callback, group_id=None, public_workspace_id=None): + """ + Processes .doc and .docm files using docx2txt library. + Note: .docx files still use Document Intelligence for better formatting preservation. + """ + is_group = group_id is not None + is_public_workspace = public_workspace_id is not None + + update_callback(status=f"Processing {original_filename.split('.')[-1].upper()} file...") + total_chunks_saved = 0 + target_words_per_chunk = 400 # Consistent with other text-based chunking + + if enable_enhanced_citations: + args = { + "temp_file_path": temp_file_path, + "user_id": user_id, + "document_id": document_id, + "blob_filename": original_filename, + "update_callback": update_callback + } + + if is_public_workspace: + args["public_workspace_id"] = public_workspace_id + elif is_group: + args["group_id"] = group_id + + upload_to_blob(**args) + + try: + # Import docx2txt here to avoid dependency issues if not installed + try: + import docx2txt + except ImportError: + raise Exception("docx2txt library is required for .doc and .docm file processing. Install with: pip install docx2txt") + + # Extract text from .doc or .docm file + try: + text_content = docx2txt.process(temp_file_path) + except Exception as e: + raise Exception(f"Error extracting text from {original_filename}: {e}") + + if not text_content or not text_content.strip(): + raise Exception(f"No text content extracted from {original_filename}") + + # Split into words for chunking + words = text_content.split() + if not words: + raise Exception(f"No text content found in {original_filename}") + + # Create chunks of target_words_per_chunk words + final_chunks = [] + for i in range(0, len(words), target_words_per_chunk): + chunk_words = words[i:i + target_words_per_chunk] + chunk_text = " ".join(chunk_words) + final_chunks.append(chunk_text) + + num_chunks = len(final_chunks) + update_callback(number_of_pages=num_chunks) + + for idx, chunk_content in enumerate(final_chunks, start=1): + if chunk_content.strip(): + update_callback( + current_file_chunk=idx, + status=f"Saving chunk {idx}/{num_chunks}..." + ) + args = { + "page_text_content": chunk_content, + "page_number": idx, + "file_name": original_filename, + "user_id": user_id, + "document_id": document_id + } + + if is_public_workspace: + args["public_workspace_id"] = public_workspace_id + elif is_group: + args["group_id"] = group_id + + save_chunks(**args) + total_chunks_saved += 1 + + except Exception as e: + raise Exception(f"Failed processing {original_filename}: {e}") + + return total_chunks_saved + +def process_html(document_id, user_id, temp_file_path, original_filename, enable_enhanced_citations, update_callback, group_id=None, public_workspace_id=None): + """Processes HTML files.""" + is_group = group_id is not None + is_public_workspace = public_workspace_id is not None + + update_callback(status="Processing HTML file...") + total_chunks_saved = 0 + total_embedding_tokens = 0 + embedding_model_name = None + target_chunk_words = 1200 # Target size based on requirement + min_chunk_words = 600 # Minimum size based on requirement + + if enable_enhanced_citations: + args = { + "temp_file_path": temp_file_path, + "user_id": user_id, + "document_id": document_id, + "blob_filename": original_filename, + "update_callback": update_callback + } + if is_public_workspace: + args["public_workspace_id"] = public_workspace_id + elif is_group: + args["group_id"] = group_id + + upload_to_blob(**args) + + try: + # --- CHANGE HERE: Open in binary mode ('rb') --- + # Let BeautifulSoup handle the decoding based on meta tags or detection + with open(temp_file_path, 'rb') as f: + # --- CHANGE HERE: Pass the file object directly to BeautifulSoup --- + soup = BeautifulSoup(f, 'lxml') # or 'html.parser' if lxml not installed + # TODO: Advanced Table Handling - (Comment remains valid) # ... @@ -2669,8 +4288,14 @@ def process_html(document_id, user_id, temp_file_path, original_filename, enable elif is_group: args["group_id"] = group_id - save_chunks(**args) + token_usage = save_chunks(**args) total_chunks_saved += 1 + + # Accumulate embedding tokens + if token_usage: + total_embedding_tokens += token_usage.get('total_tokens', 0) + if not embedding_model_name: + embedding_model_name = token_usage.get('model_deployment_name') except Exception as e: # Catch potential BeautifulSoup errors too @@ -2705,7 +4330,7 @@ def process_html(document_id, user_id, temp_file_path, original_filename, enable print(f"Warning: Error extracting final metadata for HTML document {document_id}: {str(e)}") update_callback(status=f"Processing complete (metadata extraction warning)") - return total_chunks_saved + return total_chunks_saved, total_embedding_tokens, embedding_model_name def process_md(document_id, user_id, temp_file_path, original_filename, enable_enhanced_citations, update_callback, group_id=None, public_workspace_id=None): """Processes Markdown files.""" @@ -2714,6 +4339,8 @@ def process_md(document_id, user_id, temp_file_path, original_filename, enable_e update_callback(status="Processing Markdown file...") total_chunks_saved = 0 + total_embedding_tokens = 0 + embedding_model_name = None target_chunk_words = 1200 # Target size based on requirement min_chunk_words = 600 # Minimum size based on requirement @@ -2798,8 +4425,14 @@ def process_md(document_id, user_id, temp_file_path, original_filename, enable_e elif is_group: args["group_id"] = group_id - save_chunks(**args) + token_usage = save_chunks(**args) total_chunks_saved += 1 + + # Accumulate embedding tokens + if token_usage: + total_embedding_tokens += token_usage.get('total_tokens', 0) + if not embedding_model_name: + embedding_model_name = token_usage.get('model_deployment_name') except Exception as e: raise Exception(f"Failed processing Markdown file {original_filename}: {e}") @@ -2833,7 +4466,7 @@ def process_md(document_id, user_id, temp_file_path, original_filename, enable_e print(f"Warning: Error extracting final metadata for Markdown document {document_id}: {str(e)}") update_callback(status=f"Processing complete (metadata extraction warning)") - return total_chunks_saved + return total_chunks_saved, total_embedding_tokens, embedding_model_name def process_json(document_id, user_id, temp_file_path, original_filename, enable_enhanced_citations, update_callback, group_id=None, public_workspace_id=None): """Processes JSON files using RecursiveJsonSplitter.""" @@ -2842,6 +4475,8 @@ def process_json(document_id, user_id, temp_file_path, original_filename, enable update_callback(status="Processing JSON file...") total_chunks_saved = 0 + total_embedding_tokens = 0 + embedding_model_name = None # Reflects character count limit for the splitter max_chunk_size_chars = 4000 # As per original requirement @@ -2915,8 +4550,14 @@ def process_json(document_id, user_id, temp_file_path, original_filename, enable elif is_group: args["group_id"] = group_id - save_chunks(**args) + token_usage = save_chunks(**args) total_chunks_saved += 1 # Increment only when a chunk is actually saved + + # Accumulate embedding tokens + if token_usage: + total_embedding_tokens += token_usage.get('total_tokens', 0) + if not embedding_model_name: + embedding_model_name = token_usage.get('model_deployment_name') # Final update with the actual number of chunks saved if total_chunks_saved != initial_chunk_count: @@ -2962,7 +4603,7 @@ def process_json(document_id, user_id, temp_file_path, original_filename, enable update_callback(status=f"Processing complete (metadata extraction warning)") # Return the count of chunks actually saved - return total_chunks_saved + return total_chunks_saved, total_embedding_tokens, embedding_model_name def process_single_tabular_sheet(df, document_id, user_id, file_name, update_callback, group_id=None, public_workspace_id=None): """Chunks a pandas DataFrame from a CSV or Excel sheet.""" @@ -2970,6 +4611,8 @@ def process_single_tabular_sheet(df, document_id, user_id, file_name, update_cal is_public_workspace = public_workspace_id is not None total_chunks_saved = 0 + total_embedding_tokens = 0 + embedding_model_name = None target_chunk_size_chars = 800 # Requirement: "800 size chunk" (assuming characters) if df.empty: @@ -3039,10 +4682,16 @@ def process_single_tabular_sheet(df, document_id, user_id, file_name, update_cal elif is_group: args["group_id"] = group_id - save_chunks(**args) + token_usage = save_chunks(**args) total_chunks_saved += 1 + + # Accumulate embedding tokens + if token_usage: + total_embedding_tokens += token_usage.get('total_tokens', 0) + if not embedding_model_name: + embedding_model_name = token_usage.get('model_deployment_name') - return total_chunks_saved + return total_chunks_saved, total_embedding_tokens, embedding_model_name def process_tabular(document_id, user_id, temp_file_path, original_filename, file_ext, enable_enhanced_citations, update_callback, group_id=None, public_workspace_id=None): """Processes CSV, XLSX, or XLS files using pandas.""" @@ -3051,6 +4700,8 @@ def process_tabular(document_id, user_id, temp_file_path, original_filename, fil update_callback(status=f"Processing Tabular file ({file_ext})...") total_chunks_saved = 0 + total_embedding_tokens = 0 + embedding_model_name = None # Upload the original file once if enhanced citations are enabled if enable_enhanced_citations: @@ -3091,13 +4742,21 @@ def process_tabular(document_id, user_id, temp_file_path, original_filename, fil elif is_group: args["group_id"] = group_id - total_chunks_saved = process_single_tabular_sheet(**args) + result = process_single_tabular_sheet(**args) + if isinstance(result, tuple) and len(result) == 3: + chunks, tokens, model = result + total_chunks_saved = chunks + total_embedding_tokens += tokens + if not embedding_model_name: + embedding_model_name = model + else: + total_chunks_saved = result - elif file_ext in ('.xlsx', '.xls'): + elif file_ext in ('.xlsx', '.xls', '.xlsm'): # Process Excel (potentially multiple sheets) excel_file = pandas.ExcelFile( temp_file_path, - engine='openpyxl' if file_ext == '.xlsx' else 'xlrd' + engine='openpyxl' if file_ext in ('.xlsx', '.xlsm') else 'xlrd' ) sheet_names = excel_file.sheet_names base_name, ext = os.path.splitext(original_filename) @@ -3125,9 +4784,15 @@ def process_tabular(document_id, user_id, temp_file_path, original_filename, fil elif is_group: args["group_id"] = group_id - chunks_from_sheet = process_single_tabular_sheet(**args) - - accumulated_total_chunks += chunks_from_sheet + result = process_single_tabular_sheet(**args) + if isinstance(result, tuple) and len(result) == 3: + chunks, tokens, model = result + accumulated_total_chunks += chunks + total_embedding_tokens += tokens + if not embedding_model_name: + embedding_model_name = model + else: + accumulated_total_chunks += result total_chunks_saved = accumulated_total_chunks # Total across all sheets @@ -3167,13 +4832,17 @@ def process_tabular(document_id, user_id, temp_file_path, original_filename, fil print(f"Warning: Error extracting final metadata for Tabular document {document_id}: {str(e)}") update_callback(status=f"Processing complete (metadata extraction warning)") - return total_chunks_saved + return total_chunks_saved, total_embedding_tokens, embedding_model_name def process_di_document(document_id, user_id, temp_file_path, original_filename, file_ext, enable_enhanced_citations, update_callback, group_id=None, public_workspace_id=None): """Processes documents supported by Azure Document Intelligence (PDF, Word, PPT, Image).""" is_group = group_id is not None is_public_workspace = public_workspace_id is not None + # --- Token tracking initialization --- + total_embedding_tokens = 0 + embedding_model_name = None + # --- Extracted Metadata logic --- doc_title, doc_author, doc_subject, doc_keywords = '', '', None, None doc_authors_list = [] @@ -3182,7 +4851,7 @@ def process_di_document(document_id, user_id, temp_file_path, original_filename, is_pdf = file_ext == '.pdf' is_word = file_ext in ('.docx', '.doc') is_ppt = file_ext in ('.pptx', '.ppt') - is_image = file_ext in ('.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif', '.heif') + is_image = file_ext in tuple('.' + ext for ext in IMAGE_EXTENSIONS) try: if is_pdf: @@ -3290,6 +4959,45 @@ def process_di_document(document_id, user_id, temp_file_path, original_filename, except Exception as e: raise Exception(f"Error extracting content from {chunk_effective_filename} with Azure DI: {str(e)}") + # --- Multi-Modal Vision Analysis (for images only) - Must happen BEFORE save_chunks --- + if is_image and enable_enhanced_citations and idx == 1: # Only run once for first chunk + enable_multimodal_vision = settings.get('enable_multimodal_vision', False) + if enable_multimodal_vision: + try: + update_callback(status="Performing AI vision analysis...") + + vision_analysis = analyze_image_with_vision_model( + chunk_path, + user_id, + document_id, + settings + ) + + if vision_analysis: + print(f"Vision analysis completed for image: {chunk_effective_filename}") + + # Update document with vision analysis results BEFORE saving chunks + # This allows save_chunks() to append vision data to chunk_text for AI Search + update_fields = { + 'vision_analysis': vision_analysis, + 'vision_description': vision_analysis.get('description', ''), + 'vision_objects': vision_analysis.get('objects', []), + 'vision_extracted_text': vision_analysis.get('text', ''), + 'status': "AI vision analysis completed" + } + update_callback(**update_fields) + print(f"Vision analysis saved to document metadata and will be appended to chunk_text for AI Search indexing") + else: + print(f"Vision analysis returned no results for: {chunk_effective_filename}") + update_callback(status="Vision analysis completed (no results)") + + except Exception as e: + print(f"Warning: Error in vision analysis for {document_id}: {str(e)}") + import traceback + traceback.print_exc() + # Don't fail the whole process, just update status + update_callback(status=f"Processing continues (vision analysis warning)") + # Content Chunking Strategy (Word needs specific handling) final_chunks_to_save = [] if is_word: @@ -3357,7 +5065,13 @@ def process_di_document(document_id, user_id, temp_file_path, original_filename, elif is_group: args["group_id"] = group_id - save_chunks(**args) + token_usage = save_chunks(**args) + + # Accumulate embedding tokens + if token_usage: + total_embedding_tokens += token_usage.get('total_tokens', 0) + if not embedding_model_name: + embedding_model_name = token_usage.get('model_deployment_name') total_final_chunks_processed += 1 print(f"Saved {num_final_chunks} content chunk(s) from {chunk_effective_filename}.") @@ -3398,10 +5112,13 @@ def process_di_document(document_id, user_id, temp_file_path, original_filename, update_callback(status="Final metadata extraction yielded no new info") except Exception as e: print(f"Warning: Error extracting final metadata for {document_id}: {str(e)}") - # Don't fail the whole process, just update status + # Don't fail the whole proc, total_embedding_tokens, embedding_model_nameess, just update status update_callback(status=f"Processing complete (metadata extraction warning)") - return total_final_chunks_processed + # Note: Vision analysis now happens BEFORE save_chunks (moved earlier in the flow) + # This ensures vision_analysis is available in metadata when chunks are being saved + + return total_final_chunks_processed, total_embedding_tokens, embedding_model_name def _get_content_type(path: str) -> str: ext = os.path.splitext(path)[1].lower() @@ -3446,9 +5163,27 @@ def _split_audio_file(input_path: str, chunk_seconds: int = 540) -> List[str]: if not chunks: print(f"[Error] No WAV chunks produced for '{input_path}'.") raise RuntimeError(f"No chunks produced by ffmpeg for file '{input_path}'") - print(f"[Debug] Produced {len(chunks)} WAV chunks: {chunks}") + print(f"Produced {len(chunks)} WAV chunks: {chunks}") return chunks +# Azure Speech SDK helper to get speech config with fresh token +def _get_speech_config(settings, endpoint: str, locale: str): + """Get speech config with fresh token""" + if settings.get("speech_service_authentication_type") == "managed_identity": + credential = DefaultAzureCredential() + token = credential.get_token(cognitive_services_scope) + speech_config = speechsdk.SpeechConfig(endpoint=endpoint) + + # Set the authorization token AFTER creating the config + speech_config.authorization_token = token.token + else: + key = settings.get("speech_service_key", "") + speech_config = speechsdk.SpeechConfig(endpoint=endpoint, subscription=key) + + speech_config.speech_recognition_language = locale + print(f"[Debug] Speech config obtained successfully", flush=True) + return speech_config + def process_audio_document( document_id: str, user_id: str, @@ -3477,7 +5212,7 @@ def process_audio_document( # 1) size guard file_size = os.path.getsize(temp_file_path) - print(f"[Debug] File size: {file_size} bytes") + print(f"File size: {file_size} bytes") if file_size > 300 * 1024 * 1024: raise ValueError("Audio exceeds 300 MB limit.") @@ -3488,38 +5223,186 @@ def process_audio_document( # 3) transcribe each WAV chunk settings = get_settings() endpoint = settings.get("speech_service_endpoint", "").rstrip('/') - key = settings.get("speech_service_key", "") locale = settings.get("speech_service_locale", "en-US") - url = f"{endpoint}/speechtotext/transcriptions:transcribe?api-version=2024-11-15" all_phrases: List[str] = [] - for idx, chunk_path in enumerate(chunk_paths, start=1): - update_callback(current_file_chunk=idx, status=f"Transcribing chunk {idx}/{len(chunk_paths)}…") - print(f"[Debug] Transcribing WAV chunk: {chunk_path}") - - with open(chunk_path, 'rb') as audio_f: - files = { - 'audio': (os.path.basename(chunk_path), audio_f, 'audio/wav'), - 'definition': (None, json.dumps({'locales':[locale]}), 'application/json') - } - headers = {'Ocp-Apim-Subscription-Key': key} - resp = requests.post(url, headers=headers, files=files) - try: - resp.raise_for_status() - except Exception as e: - print(f"[Error] HTTP error for {chunk_path}: {e}") - raise - result = resp.json() - phrases = result.get('combinedPhrases', []) - print(f"[Debug] Received {len(phrases)} phrases") - all_phrases += [p.get('text','').strip() for p in phrases if p.get('text')] + # Fast Transcription API not yet available in sovereign clouds, so use SDK + if AZURE_ENVIRONMENT in ("usgovernment", "custom"): + for idx, chunk_path in enumerate(chunk_paths, start=1): + print(f"[Debug] Transcribing chunk {idx}: {chunk_path}") + + # Get fresh config (tokens expire after ~1 hour) + try: + speech_config = _get_speech_config(settings, endpoint, locale) + except Exception as e: + print(f"[Error] Failed to get speech config for chunk {idx}: {e}") + raise RuntimeError(f"Speech configuration failed for chunk {idx}: {e}") + + try: + audio_config = speechsdk.AudioConfig(filename=chunk_path) + except Exception as e: + print(f"[Error] Failed to load audio file {chunk_path}: {e}") + raise RuntimeError(f"Audio file loading failed: {e}") + + try: + speech_recognizer = speechsdk.SpeechRecognizer( + speech_config=speech_config, + audio_config=audio_config + ) + except Exception as e: + print(f"[Error] Failed to create speech recognizer for chunk {idx}: {e}") + raise RuntimeError(f"Speech recognizer creation failed: {e}") + + # Use continuous recognition instead of recognize_once + all_results = [] + done = False + error_occurred = False + error_message = None + + def stop_cb(evt): + nonlocal done + print(f"[Debug] Session stopped for chunk {idx}") + done = True + + def recognized_cb(evt): + try: + if evt.result.reason == speechsdk.ResultReason.RecognizedSpeech: + all_results.append(evt.result.text) + print(f"[Debug] Recognized: {evt.result.text}") + elif evt.result.reason == speechsdk.ResultReason.NoMatch: + print(f"[Debug] No speech recognized in segment") + except Exception as e: + print(f"[Error] Error in recognized callback: {e}") + # Don't fail on individual recognition errors + + def canceled_cb(evt): + nonlocal done, error_occurred, error_message + print(f"[Debug] Recognition canceled for chunk {idx}: {evt.cancellation_details.reason}") + + if evt.cancellation_details.reason == speechsdk.CancellationReason.Error: + error_occurred = True + error_message = evt.cancellation_details.error_details + print(f"[Error] Recognition error: {error_message}") + elif evt.cancellation_details.reason == speechsdk.CancellationReason.EndOfStream: + print(f"[Debug] End of audio stream reached") + + done = True + + try: + # Connect callbacks + speech_recognizer.recognized.connect(recognized_cb) + speech_recognizer.session_stopped.connect(stop_cb) + speech_recognizer.canceled.connect(canceled_cb) + + # Start continuous recognition + print(f"[Debug] Starting continuous recognition for chunk {idx}") + speech_recognizer.start_continuous_recognition() + + # Wait for completion with timeout + import time + timeout_seconds = 600 # 10 minutes max per chunk + start_time = time.time() + + while not done: + if time.time() - start_time > timeout_seconds: + print(f"[Error] Recognition timeout for chunk {idx}") + error_occurred = True + error_message = f"Recognition timed out after {timeout_seconds} seconds" + break + time.sleep(0.5) + + # Stop recognition + try: + speech_recognizer.stop_continuous_recognition() + print(f"[Debug] Stopped continuous recognition for chunk {idx}") + except Exception as e: + print(f"[Warning] Error stopping recognition for chunk {idx}: {e}") + # Continue even if stop fails + + # Check for errors after completion + if error_occurred: + raise RuntimeError(f"Recognition failed for chunk {idx}: {error_message}") + + # Add all recognized phrases to the overall list + if all_results: + all_phrases.extend(all_results) + print(f"[Debug] Total phrases from chunk {idx}: {len(all_results)}") + else: + print(f"[Warning] No speech recognized in {chunk_path}") + # Continue to next chunk - empty result is not necessarily an error + + except RuntimeError as e: + # Re-raise runtime errors (these are our custom errors) + raise + except Exception as e: + print(f"[Error] Unexpected error during recognition for chunk {idx}: {e}") + raise RuntimeError(f"Recognition failed unexpectedly for chunk {idx}: {e}") + finally: + # Cleanup: disconnect callbacks and dispose recognizer + try: + speech_recognizer.recognized.disconnect_all() + speech_recognizer.session_stopped.disconnect_all() + speech_recognizer.canceled.disconnect_all() + except Exception as e: + print(f"[Warning] Error disconnecting callbacks for chunk {idx}: {e}") + + # # Get fresh config (tokens expire after ~1 hour) + # speech_config = _get_speech_config(settings, endpoint, locale) + + # audio_config = speechsdk.AudioConfig(filename=chunk_path) + # speech_recognizer = speechsdk.SpeechRecognizer( + # speech_config=speech_config, + # audio_config=audio_config + # ) + + # result = speech_recognizer.recognize_once() + # if result.reason == speechsdk.ResultReason.RecognizedSpeech: + # print(f"[Debug] Recognized: {result.text}") + # all_phrases.append(result.text) + # elif result.reason == speechsdk.ResultReason.NoMatch: + # print(f"[Warning] No speech in {chunk_path}") + # elif result.reason == speechsdk.ResultReason.Canceled: + # print(f"[Error] {result.cancellation_details.reason}: {result.cancellation_details.error_details}") + # raise RuntimeError(f"Transcription canceled for {chunk_path}: {result.cancellation_details.error_details}") + + else: + # Use the fast-transcription API if not in sovereign or custom cloud + url = f"{endpoint}/speechtotext/transcriptions:transcribe?api-version=2024-11-15" + for idx, chunk_path in enumerate(chunk_paths, start=1): + update_callback(current_file_chunk=idx, status=f"Transcribing chunk {idx}/{len(chunk_paths)}…") + print(f"[Debug] Transcribing WAV chunk: {chunk_path}") + + with open(chunk_path, 'rb') as audio_f: + files = { + 'audio': (os.path.basename(chunk_path), audio_f, 'audio/wav'), + 'definition': (None, json.dumps({'locales':[locale]}), 'application/json') + } + if settings.get("speech_service_authentication_type") == "managed_identity": + credential = DefaultAzureCredential() + token = credential.get_token(cognitive_services_scope) + headers = {'Authorization': f'Bearer {token.token}'} + else: + key = settings.get("speech_service_key", "") + headers = {'Ocp-Apim-Subscription-Key': key} + + resp = requests.post(url, headers=headers, files=files) + try: + resp.raise_for_status() + except Exception as e: + print(f"[Error] HTTP error for {chunk_path}: {e}") + raise + + result = resp.json() + phrases = result.get('combinedPhrases', []) + print(f"[Debug] Received {len(phrases)} phrases") + all_phrases += [p.get('text','').strip() for p in phrases if p.get('text')] # 4) cleanup WAV chunks for p in chunk_paths: try: os.remove(p) - print(f"[Debug] Removed chunk: {p}") + print(f"Removed chunk: {p}") except Exception as e: print(f"[Warning] Could not remove chunk {p}: {e}") @@ -3528,7 +5411,7 @@ def process_audio_document( words = full_text.split() chunk_size = 400 total_pages = max(1, math.ceil(len(words) / chunk_size)) - print(f"[Debug] Creating {total_pages} transcript pages") + print(f"Creating {total_pages} transcript pages") for i in range(total_pages): page_text = ' '.join(words[i*chunk_size:(i+1)*chunk_size]) @@ -3589,8 +5472,12 @@ def process_document_upload_background(document_id, user_id, temp_file_path, ori enable_extract_meta_data = settings.get('enable_extract_meta_data', False) # Used by DI flow max_file_size_bytes = settings.get('max_file_size_mb', 16) * 1024 * 1024 - video_extensions = ('.mp4', '.mov', '.avi', '.mkv', '.flv') - audio_extensions = ('.mp3', '.wav', '.ogg', '.aac', '.flac', '.m4a') + # Get allowed extensions from config.py to determine which processing function to call + tabular_extensions = tuple('.' + ext for ext in TABULAR_EXTENSIONS) + image_extensions = tuple('.' + ext for ext in IMAGE_EXTENSIONS) + di_supported_extensions = tuple('.' + ext for ext in DOCUMENT_EXTENSIONS | IMAGE_EXTENSIONS) + video_extensions = tuple('.' + ext for ext in VIDEO_EXTENSIONS) + audio_extensions = tuple('.' + ext for ext in AUDIO_EXTENSIONS) # --- Define update_document callback wrapper --- # This makes it easier to pass the update function to helpers without repeating args @@ -3610,6 +5497,8 @@ def update_doc_callback(**kwargs): total_chunks_saved = 0 + total_embedding_tokens = 0 + embedding_model_name = None file_ext = '' # Initialize try: @@ -3631,8 +5520,7 @@ def update_doc_callback(**kwargs): update_doc_callback(status=f"Processing file {original_filename}, type: {file_ext}") # --- 1. Dispatch to appropriate handler based on file type --- - di_supported_extensions = ('.pdf', '.docx', '.doc', '.pptx', '.ppt', '.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif', '.heif') - tabular_extensions = ('.csv', '.xlsx', '.xls') + # Note: .doc and .docm are handled separately by process_doc() using docx2txt is_group = group_id is not None @@ -3652,15 +5540,60 @@ def update_doc_callback(**kwargs): args["group_id"] = group_id if file_ext == '.txt': - total_chunks_saved = process_txt(**{k: v for k, v in args.items() if k != "file_ext"}) + result = process_txt(**{k: v for k, v in args.items() if k != "file_ext"}) + # Handle tuple return (chunks, tokens, model_name) + if isinstance(result, tuple) and len(result) == 3: + total_chunks_saved, total_embedding_tokens, embedding_model_name = result + else: + total_chunks_saved = result + elif file_ext == '.xml': + result = process_xml(**{k: v for k, v in args.items() if k != "file_ext"}) + if isinstance(result, tuple) and len(result) == 3: + total_chunks_saved, total_embedding_tokens, embedding_model_name = result + else: + total_chunks_saved = result + elif file_ext in ('.yaml', '.yml'): + result = process_yaml(**{k: v for k, v in args.items() if k != "file_ext"}) + if isinstance(result, tuple) and len(result) == 3: + total_chunks_saved, total_embedding_tokens, embedding_model_name = result + else: + total_chunks_saved = result + elif file_ext == '.log': + result = process_log(**{k: v for k, v in args.items() if k != "file_ext"}) + if isinstance(result, tuple) and len(result) == 3: + total_chunks_saved, total_embedding_tokens, embedding_model_name = result + else: + total_chunks_saved = result + elif file_ext in ('.doc', '.docm'): + result = process_doc(**{k: v for k, v in args.items() if k != "file_ext"}) + if isinstance(result, tuple) and len(result) == 3: + total_chunks_saved, total_embedding_tokens, embedding_model_name = result + else: + total_chunks_saved = result elif file_ext == '.html': - total_chunks_saved = process_html(**{k: v for k, v in args.items() if k != "file_ext"}) + result = process_html(**{k: v for k, v in args.items() if k != "file_ext"}) + if isinstance(result, tuple) and len(result) == 3: + total_chunks_saved, total_embedding_tokens, embedding_model_name = result + else: + total_chunks_saved = result elif file_ext == '.md': - total_chunks_saved = process_md(**{k: v for k, v in args.items() if k != "file_ext"}) + result = process_md(**{k: v for k, v in args.items() if k != "file_ext"}) + if isinstance(result, tuple) and len(result) == 3: + total_chunks_saved, total_embedding_tokens, embedding_model_name = result + else: + total_chunks_saved = result elif file_ext == '.json': - total_chunks_saved = process_json(**{k: v for k, v in args.items() if k != "file_ext"}) + result = process_json(**{k: v for k, v in args.items() if k != "file_ext"}) + if isinstance(result, tuple) and len(result) == 3: + total_chunks_saved, total_embedding_tokens, embedding_model_name = result + else: + total_chunks_saved = result elif file_ext in tabular_extensions: - total_chunks_saved = process_tabular(**args) + result = process_tabular(**args) + if isinstance(result, tuple) and len(result) == 3: + total_chunks_saved, total_embedding_tokens, embedding_model_name = result + else: + total_chunks_saved = result elif file_ext in video_extensions: total_chunks_saved = process_video_document( document_id=document_id, @@ -3682,7 +5615,12 @@ def update_doc_callback(**kwargs): public_workspace_id=public_workspace_id ) elif file_ext in di_supported_extensions: - total_chunks_saved = process_di_document(**args) + result = process_di_document(**args) + # Handle tuple return (chunks, tokens, model_name) + if isinstance(result, tuple) and len(result) == 3: + total_chunks_saved, total_embedding_tokens, embedding_model_name = result + else: + total_chunks_saved = result else: raise ValueError(f"Unsupported file type for processing: {file_ext}") @@ -3691,7 +5629,7 @@ def update_doc_callback(**kwargs): final_status = "Processing complete" if total_chunks_saved == 0: # Provide more specific status if no chunks were saved - if file_ext in ('.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif', '.heif'): + if file_ext in image_extensions: final_status = "Processing complete - no text found in image" elif file_ext in tabular_extensions: final_status = "Processing complete - no data rows found or file empty" @@ -3701,14 +5639,197 @@ def update_doc_callback(**kwargs): # Final update uses the total chunks saved across all steps/sheets # For DI types, number_of_pages might have been updated during DI processing, # but let's ensure the final update reflects the *saved* chunk count accurately. - update_doc_callback( - number_of_pages=total_chunks_saved, # Final count of SAVED chunks - status=final_status, - percentage_complete=100, - current_file_chunk=None # Clear current chunk tracking - ) + # Also update embedding token tracking data + final_update_args = { + "number_of_pages": total_chunks_saved, # Final count of SAVED chunks + "status": final_status, + "percentage_complete": 100, + "current_file_chunk": None # Clear current chunk tracking + } + + # Add embedding token data if available + if total_embedding_tokens > 0: + final_update_args["embedding_tokens"] = total_embedding_tokens + if embedding_model_name: + final_update_args["embedding_model_deployment_name"] = embedding_model_name + + update_doc_callback(**final_update_args) - print(f"Document {document_id} ({original_filename}) processed successfully with {total_chunks_saved} chunks saved.") + print(f"Document {document_id} ({original_filename}) processed successfully with {total_chunks_saved} chunks saved and {total_embedding_tokens} embedding tokens used.") + + # Log document creation transaction to activity_logs container + try: + from functions_activity_logging import log_document_creation_transaction, log_token_usage + + # Retrieve final document metadata to capture all extracted fields + doc_metadata = get_document_metadata( + document_id=document_id, + user_id=user_id, + group_id=group_id, + public_workspace_id=public_workspace_id + ) + + # Determine workspace type + if public_workspace_id: + workspace_type = 'public' + elif group_id: + workspace_type = 'group' + else: + workspace_type = 'personal' + + # Log the transaction with all available metadata + log_document_creation_transaction( + user_id=user_id, + document_id=document_id, + workspace_type=workspace_type, + file_name=original_filename, + file_type=file_ext, + file_size=file_size, + page_count=total_chunks_saved, + embedding_tokens=total_embedding_tokens, + embedding_model=embedding_model_name, + version=doc_metadata.get('version') if doc_metadata else None, + author=doc_metadata.get('author') if doc_metadata else None, + title=doc_metadata.get('title') if doc_metadata else None, + subject=doc_metadata.get('subject') if doc_metadata else None, + publication_date=doc_metadata.get('publication_date') if doc_metadata else None, + keywords=doc_metadata.get('keywords') if doc_metadata else None, + abstract=doc_metadata.get('abstract') if doc_metadata else None, + group_id=group_id, + public_workspace_id=public_workspace_id, + additional_metadata={ + 'status': final_status, + 'upload_date': doc_metadata.get('upload_date') if doc_metadata else None, + 'document_classification': doc_metadata.get('document_classification') if doc_metadata else None + } + ) + + # Log embedding token usage separately for easy reporting + if total_embedding_tokens > 0 and embedding_model_name: + log_token_usage( + user_id=user_id, + token_type='embedding', + total_tokens=total_embedding_tokens, + model=embedding_model_name, + workspace_type=workspace_type, + document_id=document_id, + file_name=original_filename, + group_id=group_id, + public_workspace_id=public_workspace_id, + additional_context={ + 'file_type': file_ext, + 'page_count': total_chunks_saved + } + ) + + # Mark document as logged to activity logs to prevent duplicate migration + try: + # All document containers use /id as partition key + if public_workspace_id: + doc_container = cosmos_public_documents_container + elif group_id: + doc_container = cosmos_group_documents_container + else: + doc_container = cosmos_user_documents_container + + # All document containers use document_id (/id) as partition key + partition_key = document_id + + # Read, update, and upsert the document with the flag + doc_record = doc_container.read_item(item=document_id, partition_key=partition_key) + doc_record['added_to_activity_log'] = True + doc_container.upsert_item(doc_record) + print(f"✅ Set added_to_activity_log flag for document {document_id}") + + except Exception as flag_error: + print(f"⚠️ Warning: Failed to set added_to_activity_log flag: {flag_error}") + # Don't fail if flag setting fails + + except Exception as log_error: + print(f"Error logging document creation transaction: {log_error}") + # Don't fail the entire process if logging fails + + # Create notification for document processing completion + try: + from functions_notifications import create_notification, create_group_notification, create_public_workspace_notification + + notification_title = f"Document ready: {original_filename}" + notification_message = f"Your document has been processed successfully with {total_chunks_saved} chunks." + + # Determine workspace type and create appropriate notification + if public_workspace_id: + # Notification for all public workspace members + create_public_workspace_notification( + public_workspace_id=public_workspace_id, + notification_type='document_processing_complete', + title=notification_title, + message=notification_message, + link_url='/public_directory', + link_context={ + 'workspace_type': 'public', + 'public_workspace_id': public_workspace_id, + 'document_id': document_id + }, + metadata={ + 'document_id': document_id, + 'file_name': original_filename, + 'chunks': total_chunks_saved + } + ) + print(f"📢 Created notification for public workspace {public_workspace_id}") + + elif group_id: + # Notification for all group members - get group name + from functions_group import find_group_by_id + group = find_group_by_id(group_id) + group_name = group.get('name', 'Unknown Group') if group else 'Unknown Group' + + create_group_notification( + group_id=group_id, + notification_type='document_processing_complete', + title=notification_title, + message=f"Document uploaded to {group_name} has been processed successfully with {total_chunks_saved} chunks.", + link_url='/group_workspaces', + link_context={ + 'workspace_type': 'group', + 'group_id': group_id, + 'document_id': document_id + }, + metadata={ + 'document_id': document_id, + 'file_name': original_filename, + 'chunks': total_chunks_saved, + 'group_name': group_name, + 'group_id': group_id + } + ) + print(f"📢 Created notification for group {group_id} ({group_name})") + + else: + # Personal notification for the uploader + create_notification( + user_id=user_id, + notification_type='document_processing_complete', + title=notification_title, + message=notification_message, + link_url='/workspace', + link_context={ + 'workspace_type': 'personal', + 'document_id': document_id + }, + metadata={ + 'document_id': document_id, + 'file_name': original_filename, + 'chunks': total_chunks_saved + } + ) + print(f"📢 Created notification for user {user_id}") + + except Exception as notif_error: + print(f"⚠️ Warning: Failed to create notification: {notif_error}") + # Don't fail the entire process if notification creation fails + print(f"⚠️ Warning: Failed to log document creation transaction: {log_error}") + # Don't fail the document processing if logging fails except Exception as e: error_msg = f"Processing failed: {str(e)}" @@ -4230,4 +6351,4 @@ def get_documents_shared_with_group(group_id): except Exception as e: print(f"Error getting documents shared with group {group_id}: {e}") - return [] + return [] \ No newline at end of file diff --git a/application/single_app/functions_global_actions.py b/application/single_app/functions_global_actions.py index 07ce3a19..91f0d9f9 100644 --- a/application/single_app/functions_global_actions.py +++ b/application/single_app/functions_global_actions.py @@ -10,10 +10,10 @@ import json import traceback from datetime import datetime - from config import cosmos_global_actions_container +from functions_keyvault import keyvault_plugin_save_helper, keyvault_plugin_get_helper, keyvault_plugin_delete_helper, SecretReturnType -def get_global_actions(): +def get_global_actions(return_type=SecretReturnType.TRIGGER): """ Get all global actions. @@ -25,7 +25,8 @@ def get_global_actions(): query="SELECT * FROM c", enable_cross_partition_query=True )) - + # Resolve Key Vault references for each action + actions = [keyvault_plugin_get_helper(a, scope_value=a.get('id'), scope="global", return_type=return_type) for a in actions] return actions except Exception as e: @@ -34,7 +35,7 @@ def get_global_actions(): return [] -def get_global_action(action_id): +def get_global_action(action_id, return_type=SecretReturnType.TRIGGER): """ Get a specific global action by ID. @@ -45,13 +46,12 @@ def get_global_action(action_id): dict: Action data or None if not found """ try: - from config import cosmos_global_actions_container - action = cosmos_global_actions_container.read_item( item=action_id, partition_key=action_id ) - + # Resolve Key Vault references + action = keyvault_plugin_get_helper(action, scope_value=action_id, scope="global", return_type=return_type) print(f"✅ Found global action: {action_id}") return action @@ -71,21 +71,17 @@ def save_global_action(action_data): dict: Saved action data or None if failed """ try: - from config import cosmos_global_actions_container - # Ensure required fields if 'id' not in action_data: action_data['id'] = str(uuid.uuid4()) - # Add metadata action_data['is_global'] = True action_data['created_at'] = datetime.utcnow().isoformat() action_data['updated_at'] = datetime.utcnow().isoformat() - print(f"💾 Saving global action: {action_data.get('name', 'Unknown')}") - + # Store secrets in Key Vault before upsert + action_data = keyvault_plugin_save_helper(action_data, scope_value=action_data.get('id'), scope="global") result = cosmos_global_actions_container.upsert_item(body=action_data) - print(f"✅ Global action saved successfully: {result['id']}") return result @@ -106,15 +102,15 @@ def delete_global_action(action_id): bool: True if successful, False otherwise """ try: - from config import cosmos_global_actions_container - print(f"🗑️ Deleting global action: {action_id}") - + # Delete secrets from Key Vault before deleting the action + action = get_global_action(action_id) + if action: + keyvault_plugin_delete_helper(action, scope_value=action_id, scope="global") cosmos_global_actions_container.delete_item( item=action_id, partition_key=action_id ) - print(f"✅ Global action deleted successfully: {action_id}") return True diff --git a/application/single_app/functions_global_agents.py b/application/single_app/functions_global_agents.py index 9d4e934a..5cf6a3d4 100644 --- a/application/single_app/functions_global_agents.py +++ b/application/single_app/functions_global_agents.py @@ -14,6 +14,9 @@ from functions_authentication import get_current_user_id from datetime import datetime from config import cosmos_global_agents_container +from functions_keyvault import keyvault_agent_save_helper, keyvault_agent_get_helper, keyvault_agent_delete_helper +from functions_settings import * +from functions_agent_payload import sanitize_agent_payload, AgentPayloadError def ensure_default_global_agent_exists(): @@ -38,6 +41,8 @@ def ensure_default_global_agent_exists(): "azure_agent_apim_gpt_api_version": "", "enable_agent_gpt_apim": False, "is_global": True, + "is_group": False, + "agent_type": "local", "instructions": ( "You are a highly capable research assistant. Your role is to help the user investigate academic, technical, and real-world topics by finding relevant information, summarizing key points, identifying knowledge gaps, and suggesting credible sources for further study.\n\n" "You must always:\n- Think step-by-step and work methodically.\n- Distinguish between fact, inference, and opinion.\n- Clearly state your assumptions when making inferences.\n- Cite authoritative sources when possible (e.g., peer-reviewed journals, academic publishers, government agencies).\n- Avoid speculation unless explicitly asked for.\n- When asked to summarize, preserve the intent, nuance, and technical accuracy of the original content.\n- When generating questions, aim for depth and clarity to guide rigorous inquiry.\n- Present answers in a clear, structured format using bullet points, tables, or headings when appropriate.\n\n" @@ -46,6 +51,7 @@ def ensure_default_global_agent_exists(): ), "actions_to_load": [], "other_settings": {}, + "max_completion_tokens": 4096 } save_global_agent(default_agent) log_event( @@ -54,13 +60,27 @@ def ensure_default_global_agent_exists(): "agent_name": default_agent["name"] }, ) - print("✅ Default global agent created.") + print("Default global agent created.") else: log_event( "At least one global agent already exists.", extra={"existing_agents_count": len(agents)}, ) - print("ℹ️ At least one global agent already exists.") + print("At least one global agent already exists.") + + settings = get_settings() + needs_default = False + global_selected = settings.get("global_selected_agent") if settings else None + if not isinstance(global_selected, dict): + needs_default = True + elif global_selected.get("name", "") == "": + needs_default = True + if settings and needs_default: + settings["global_selected_agent"] = { + "name": default_agent["name"], + "is_global": True + } + save_settings(settings) except Exception as e: log_event( f"Error ensuring default global agent exists: {e}", @@ -68,7 +88,7 @@ def ensure_default_global_agent_exists(): level=logging.ERROR, exceptionTraceback=True ) - print(f"❌ Error ensuring default global agent exists: {e}") + print(f"Error ensuring default global agent exists: {e}") traceback.print_exc() def get_global_agents(): @@ -83,6 +103,17 @@ def get_global_agents(): query="SELECT * FROM c", enable_cross_partition_query=True )) + # Mask or replace sensitive keys for UI display + agents = [keyvault_agent_get_helper(agent, agent.get('id', ''), scope="global") for agent in agents] + for agent in agents: + if agent.get('max_completion_tokens') is None: + agent['max_completion_tokens'] = -1 + agent.setdefault('is_global', True) + agent.setdefault('is_group', False) + agent.setdefault('agent_type', 'local') + # Remove empty reasoning_effort to prevent validation errors + if agent.get('reasoning_effort') == '': + agent.pop('reasoning_effort', None) return agents except Exception as e: log_event( @@ -90,7 +121,7 @@ def get_global_agents(): extra={"exception": str(e)}, exceptionTraceback=True ) - print(f"❌ Error getting global agents: {str(e)}") + print(f"Error getting global agents: {str(e)}") traceback.print_exc() return [] @@ -110,7 +141,16 @@ def get_global_agent(agent_id): item=agent_id, partition_key=agent_id ) - print(f"✅ Found global agent: {agent_id}") + agent = keyvault_agent_get_helper(agent, agent_id, scope="global") + if agent.get('max_completion_tokens') is None: + agent['max_completion_tokens'] = -1 + agent.setdefault('is_global', True) + agent.setdefault('is_group', False) + agent.setdefault('agent_type', 'local') + # Remove empty reasoning_effort to prevent validation errors + if agent.get('reasoning_effort') == '': + agent.pop('reasoning_effort', None) + print(f"Found global agent: {agent_id}") return agent except Exception as e: log_event( @@ -119,7 +159,7 @@ def get_global_agent(agent_id): level=logging.ERROR, exceptionTraceback=True ) - print(f"❌ Error getting global agent {agent_id}: {str(e)}") + print(f"Error getting global agent {agent_id}: {str(e)}") return None @@ -134,25 +174,35 @@ def save_global_agent(agent_data): dict: Saved agent data or None if failed """ try: - # Ensure required fields user_id = get_current_user_id() - if 'id' not in agent_data: - agent_data['id'] = str(uuid.uuid4()) - # Add metadata - agent_data['is_global'] = True - agent_data['created_at'] = datetime.utcnow().isoformat() - agent_data['updated_at'] = datetime.utcnow().isoformat() + cleaned_agent = sanitize_agent_payload(agent_data) + if 'id' not in cleaned_agent: + cleaned_agent['id'] = str(uuid.uuid4()) + cleaned_agent['is_global'] = True + cleaned_agent['is_group'] = False + cleaned_agent['created_at'] = datetime.utcnow().isoformat() + cleaned_agent['updated_at'] = datetime.utcnow().isoformat() log_event( "Saving global agent.", - extra={"agent_name": agent_data.get('name', 'Unknown')}, + extra={"agent_name": cleaned_agent.get('name', 'Unknown')}, ) - print(f"💾 Saving global agent: {agent_data.get('name', 'Unknown')}") - result = cosmos_global_agents_container.upsert_item(body=agent_data) + print(f"Saving global agent: {cleaned_agent.get('name', 'Unknown')}") + + # Use the new helper to store sensitive agent keys in Key Vault + agent_data = keyvault_agent_save_helper(agent_data, agent_data['id'], scope="global") + if agent_data.get('max_completion_tokens') is None: + agent_data['max_completion_tokens'] = -1 # Default value + + # Remove empty reasoning_effort to avoid schema validation errors + if agent_data.get('reasoning_effort') == '': + agent_data.pop('reasoning_effort', None) + + result = cosmos_global_agents_container.upsert_item(body=cleaned_agent) log_event( "Global agent saved successfully.", extra={"agent_id": result['id'], "user_id": user_id}, ) - print(f"✅ Global agent saved successfully: {result['id']}") + print(f"Global agent saved successfully: {result['id']}") return result except Exception as e: log_event( @@ -161,7 +211,7 @@ def save_global_agent(agent_data): level=logging.ERROR, exceptionTraceback=True ) - print(f"❌ Error saving global agent: {str(e)}") + print(f"Error saving global agent: {str(e)}") traceback.print_exc() return None @@ -178,7 +228,9 @@ def delete_global_agent(agent_id): """ try: user_id = get_current_user_id() - print(f"🗑️ Deleting global agent: {agent_id}") + print(f"Deleting global agent: {agent_id}") + agent_dict = get_global_agent(agent_id) + keyvault_agent_delete_helper(agent_dict, agent_id, scope="global") cosmos_global_agents_container.delete_item( item=agent_id, partition_key=agent_id @@ -187,7 +239,7 @@ def delete_global_agent(agent_id): "Global agent deleted successfully.", extra={"agent_id": agent_id, "user_id": user_id}, ) - print(f"✅ Global agent deleted successfully: {agent_id}") + print(f"Global agent deleted successfully: {agent_id}") return True except Exception as e: log_event( @@ -196,6 +248,6 @@ def delete_global_agent(agent_id): level=logging.ERROR, exceptionTraceback=True ) - print(f"❌ Error deleting global agent {agent_id}: {str(e)}") + print(f"Error deleting global agent {agent_id}: {str(e)}") traceback.print_exc() return False diff --git a/application/single_app/functions_group.py b/application/single_app/functions_group.py index 2d1179f1..dcf63420 100644 --- a/application/single_app/functions_group.py +++ b/application/single_app/functions_group.py @@ -3,6 +3,7 @@ from config import * from functions_authentication import * from functions_settings import * +from typing import Iterable def create_group(name, description): @@ -128,6 +129,32 @@ def get_user_role_in_group(group_doc, user_id): return None +def require_active_group(user_id: str) -> str: + """Return the active group id for a user or raise ValueError if missing.""" + settings = get_user_settings(user_id) + active_group_id = settings.get("settings", {}).get("activeGroupOid") + if not active_group_id: + raise ValueError("No active group selected") + return active_group_id + + +def assert_group_role(user_id: str, group_id: str, allowed_roles: Iterable[str] = ("Owner", "Admin")) -> str: + """Ensure the user holds one of the allowed roles for the group.""" + group_doc = find_group_by_id(group_id) + if not group_doc: + raise LookupError("Group not found") + + role = get_user_role_in_group(group_doc, user_id) + if not role: + raise PermissionError("User is not a member of this group") + + allowed = {r.lower() for r in allowed_roles} + if role.lower() not in allowed: + raise PermissionError("Insufficient permissions for this group") + + return role + + def map_group_list_for_frontend(groups, current_user_id): """ Utility to produce a simplified list of group data @@ -162,4 +189,85 @@ def is_user_in_group(group_doc, user_id): for u in group_doc.get("users", []): if u["userId"] == user_id: return True - return False \ No newline at end of file + return False + + +def check_group_status_allows_operation(group_doc, operation_type): + """ + Check if the group's status allows the specified operation. + + Args: + group_doc: The group document from Cosmos DB + operation_type: One of 'upload', 'delete', 'chat', 'view' + + Returns: + tuple: (allowed: bool, reason: str) + + Status definitions: + - active: All operations allowed + - locked: Read-only mode (view and chat only, no modifications) + - upload_disabled: No new uploads, but deletions and chat allowed + - inactive: No operations allowed except admin viewing + """ + if not group_doc: + return False, "Group not found" + + status = group_doc.get('status', 'active') # Default to 'active' if not set + + # Define what each status allows + status_permissions = { + 'active': { + 'upload': True, + 'delete': True, + 'chat': True, + 'view': True + }, + 'locked': { + 'upload': False, + 'delete': False, + 'chat': True, + 'view': True + }, + 'upload_disabled': { + 'upload': False, + 'delete': True, + 'chat': True, + 'view': True + }, + 'inactive': { + 'upload': False, + 'delete': False, + 'chat': False, + 'view': False + } + } + + # Get permissions for current status + permissions = status_permissions.get(status, status_permissions['active']) + + # Check if operation is allowed + allowed = permissions.get(operation_type, False) + + # Generate helpful reason message if not allowed + if not allowed: + reasons = { + 'locked': { + 'upload': 'This group is locked (read-only mode). Document uploads are disabled.', + 'delete': 'This group is locked (read-only mode). Document deletions are disabled.' + }, + 'upload_disabled': { + 'upload': 'Document uploads are disabled for this group.' + }, + 'inactive': { + 'upload': 'This group is inactive. All operations are disabled.', + 'delete': 'This group is inactive. All operations are disabled.', + 'chat': 'This group is inactive. All operations are disabled.', + 'view': 'This group is inactive. Access is restricted to administrators.' + } + } + + reason = reasons.get(status, {}).get(operation_type, + f'This operation is not allowed when group status is "{status}".') + return False, reason + + return True, "" \ No newline at end of file diff --git a/application/single_app/functions_group_actions.py b/application/single_app/functions_group_actions.py new file mode 100644 index 00000000..bc6aa4ea --- /dev/null +++ b/application/single_app/functions_group_actions.py @@ -0,0 +1,209 @@ +# functions_group_actions.py + +"""Group-level plugin/action management helpers.""" + +import re +import uuid +from datetime import datetime +from typing import Any, Dict, List, Optional +from functions_debug import debug_print +from azure.cosmos import exceptions +from flask import current_app + +from config import cosmos_group_actions_container +from functions_keyvault import ( + SecretReturnType, + keyvault_plugin_delete_helper, + keyvault_plugin_get_helper, + keyvault_plugin_save_helper, +) + + +_NAME_PATTERN = re.compile(r"^[A-Za-z0-9_-]+$") + + +def get_group_actions( + group_id: str, return_type: SecretReturnType = SecretReturnType.TRIGGER +) -> List[Dict[str, Any]]: + """Return all actions/plugins scoped to the provided group.""" + try: + query = "SELECT * FROM c WHERE c.group_id = @group_id" + parameters = [ + {"name": "@group_id", "value": group_id}, + ] + results = list( + cosmos_group_actions_container.query_items( + query=query, + parameters=parameters, + partition_key=group_id, + ) + ) + return [_clean_action(action, group_id, return_type) for action in results] + except exceptions.CosmosResourceNotFoundError: + return [] + except Exception as exc: + debug_print( + "Error fetching group actions for %s: %s", group_id, exc + ) + return [] + + +def get_group_action( + group_id: str, action_id: str, return_type: SecretReturnType = SecretReturnType.TRIGGER +) -> Optional[Dict[str, Any]]: + """Fetch a single group action by id or name.""" + try: + action = cosmos_group_actions_container.read_item( + item=action_id, + partition_key=group_id, + ) + except exceptions.CosmosResourceNotFoundError: + query = "SELECT * FROM c WHERE c.group_id = @group_id AND c.name = @name" + parameters = [ + {"name": "@group_id", "value": group_id}, + {"name": "@name", "value": action_id}, + ] + actions = list( + cosmos_group_actions_container.query_items( + query=query, + parameters=parameters, + partition_key=group_id, + ) + ) + if not actions: + return None + action = actions[0] + except Exception as exc: + debug_print( + "Error fetching group action %s for %s: %s", action_id, group_id, exc + ) + return None + + return _clean_action(action, group_id, return_type) + + +def save_group_action(group_id: str, action_data: Dict[str, Any]) -> Dict[str, Any]: + """Create or update a group action entry.""" + payload = dict(action_data) + action_id = payload.get("id") or str(uuid.uuid4()) + + payload["id"] = action_id + payload["group_id"] = group_id + payload["last_updated"] = datetime.utcnow().isoformat() + + payload.setdefault("name", "") + payload.setdefault("displayName", payload.get("name", "")) + payload.setdefault("type", "") + payload.setdefault("description", "") + payload.setdefault("endpoint", "") + payload.setdefault("auth", {"type": "identity"}) + payload.setdefault("metadata", {}) + payload.setdefault("additionalFields", {}) + + if not isinstance(payload["auth"], dict): + payload["auth"] = {"type": "identity"} + elif "type" not in payload["auth"]: + payload["auth"]["type"] = "identity" + + payload.pop("user_id", None) + + payload = keyvault_plugin_save_helper(payload, scope_value=group_id, scope="group") + + try: + stored = cosmos_group_actions_container.upsert_item(body=payload) + return _clean_action(stored, group_id, SecretReturnType.TRIGGER) + except Exception as exc: + debug_print( + "Error saving group action %s for %s: %s", action_id, group_id, exc + ) + raise + + +def delete_group_action(group_id: str, action_id: str) -> bool: + """Remove a group action entry if it exists.""" + try: + action = cosmos_group_actions_container.read_item( + item=action_id, + partition_key=group_id, + ) + except exceptions.CosmosResourceNotFoundError: + return False + + try: + keyvault_plugin_delete_helper(action, scope_value=group_id, scope="group") + cosmos_group_actions_container.delete_item( + item=action_id, + partition_key=group_id, + ) + return True + except Exception as exc: + debug_print( + "Error deleting group action %s for %s: %s", action_id, group_id, exc + ) + raise + + +def validate_group_action_payload(payload: Dict[str, Any], partial: bool = False) -> None: + """Validate incoming payload data for group actions.""" + if not isinstance(payload, dict): + raise ValueError("Action payload must be an object") + + required_fields = ( + "name", + "displayName", + "type", + "description", + "endpoint", + "auth", + "metadata", + "additionalFields", + ) + + if not partial: + missing = [field for field in required_fields if field not in payload] + if missing: + raise ValueError(f"Missing required action fields: {', '.join(missing)}") + + if "name" in payload: + name = payload["name"] + if not isinstance(name, str) or not name or not _NAME_PATTERN.fullmatch(name): + raise ValueError("Action name must be alphanumeric with optional underscores or hyphens") + + if "displayName" in payload and not isinstance(payload["displayName"], str): + raise ValueError("displayName must be a string") + + if "type" in payload and not isinstance(payload["type"], str): + raise ValueError("type must be a string") + + if "description" in payload and not isinstance(payload["description"], str): + raise ValueError("description must be a string") + + if "endpoint" in payload and not isinstance(payload["endpoint"], str): + raise ValueError("endpoint must be a string") + + if "auth" in payload and not isinstance(payload["auth"], dict): + raise ValueError("auth must be an object") + + if "metadata" in payload and not isinstance(payload["metadata"], dict): + raise ValueError("metadata must be an object") + + if "additionalFields" in payload and not isinstance(payload["additionalFields"], dict): + raise ValueError("additionalFields must be an object") + + +def _clean_action( + action: Dict[str, Any], + group_id: str, + return_type: SecretReturnType, +) -> Dict[str, Any]: + cleaned = {k: v for k, v in action.items() if not k.startswith("_")} + cleaned = keyvault_plugin_get_helper( + cleaned, + scope_value=group_id, + scope="group", + return_type=return_type, + ) + cleaned.setdefault("is_global", False) + cleaned.setdefault("is_group", True) + cleaned.setdefault("scope", "group") + return cleaned diff --git a/application/single_app/functions_group_agents.py b/application/single_app/functions_group_agents.py new file mode 100644 index 00000000..8bf6f87c --- /dev/null +++ b/application/single_app/functions_group_agents.py @@ -0,0 +1,208 @@ +# functions_group_agents.py + +"""Group-level agent management helpers.""" + +import re +import uuid +from datetime import datetime +from typing import Any, Dict, List, Optional +from functions_debug import debug_print +from azure.cosmos import exceptions +from flask import current_app + +from config import cosmos_group_agents_container +from functions_keyvault import ( + keyvault_agent_delete_helper, + keyvault_agent_get_helper, + keyvault_agent_save_helper, +) +from functions_agent_payload import sanitize_agent_payload + + +_NAME_PATTERN = re.compile(r"^[A-Za-z0-9_-]+$") + + +def get_group_agents(group_id: str) -> List[Dict[str, Any]]: + """Return all agents scoped to the provided group.""" + try: + query = "SELECT * FROM c WHERE c.group_id = @group_id" + parameters = [ + {"name": "@group_id", "value": group_id}, + ] + results = list( + cosmos_group_agents_container.query_items( + query=query, + parameters=parameters, + partition_key=group_id, + ) + ) + return [_clean_agent(agent) for agent in results] + except exceptions.CosmosResourceNotFoundError: + return [] + except Exception as exc: + debug_print( + "Error fetching group agents for %s: %s", group_id, exc + ) + return [] + + +def get_group_agent(group_id: str, agent_id: str) -> Optional[Dict[str, Any]]: + """Fetch a single group agent document.""" + try: + agent = cosmos_group_agents_container.read_item( + item=agent_id, + partition_key=group_id, + ) + return _clean_agent(agent) + except exceptions.CosmosResourceNotFoundError: + return None + except Exception as exc: + debug_print( + "Error fetching group agent %s for %s: %s", agent_id, group_id, exc + ) + return None + + +def save_group_agent(group_id: str, agent_data: Dict[str, Any]) -> Dict[str, Any]: + """Create or update a group agent entry.""" + payload = sanitize_agent_payload(agent_data) + agent_id = payload.get("id") or str(uuid.uuid4()) + payload["id"] = agent_id + payload["group_id"] = group_id + payload["last_updated"] = datetime.utcnow().isoformat() + payload["is_global"] = False + payload["is_group"] = True + + # Required/defaulted fields + payload.setdefault("name", "") + payload.setdefault("display_name", payload.get("name", "")) + payload.setdefault("description", "") + payload.setdefault("instructions", "") + payload.setdefault("actions_to_load", []) + payload.setdefault("other_settings", {}) + payload.setdefault("max_completion_tokens", -1) + payload.setdefault("enable_agent_gpt_apim", False) + payload.setdefault("agent_type", "local") + + # Ensure optional Azure fields exist + payload.setdefault("azure_openai_gpt_endpoint", "") + payload.setdefault("azure_openai_gpt_key", "") + payload.setdefault("azure_openai_gpt_deployment", "") + payload.setdefault("azure_openai_gpt_api_version", "") + payload.setdefault("reasoning_effort", "") + payload.setdefault("azure_agent_apim_gpt_endpoint", "") + payload.setdefault("azure_agent_apim_gpt_subscription_key", "") + payload.setdefault("azure_agent_apim_gpt_deployment", "") + payload.setdefault("azure_agent_apim_gpt_api_version", "") + + # Remove empty reasoning_effort to avoid schema validation errors + if payload.get("reasoning_effort") == "": + payload.pop("reasoning_effort", None) + + # Remove user-specific residue if present + payload.pop("user_id", None) + + if payload.get("max_completion_tokens") is None: + payload["max_completion_tokens"] = -1 + + # Store sensitive values in Key Vault before persistence + payload = keyvault_agent_save_helper(payload, payload["id"], scope="group") + + try: + stored = cosmos_group_agents_container.upsert_item(body=payload) + return _clean_agent(stored) + except Exception as exc: + debug_print( + "Error saving group agent %s for %s: %s", agent_id, group_id, exc + ) + raise + + +def delete_group_agent(group_id: str, agent_id: str) -> bool: + """Remove a group agent entry if it exists.""" + try: + agent = cosmos_group_agents_container.read_item( + item=agent_id, + partition_key=group_id, + ) + except exceptions.CosmosResourceNotFoundError: + return False + + try: + keyvault_agent_delete_helper(agent, agent.get("id", agent_id), scope="group") + cosmos_group_agents_container.delete_item( + item=agent_id, + partition_key=group_id, + ) + return True + except Exception as exc: + debug_print( + "Error deleting group agent %s for %s: %s", agent_id, group_id, exc + ) + raise + + +def validate_group_agent_payload(payload: Dict[str, Any], partial: bool = False) -> None: + """Validate incoming payload data for group agents.""" + if not isinstance(payload, dict): + raise ValueError("Agent payload must be an object") + + required_fields = ( + "name", + "display_name", + "description", + "instructions", + "actions_to_load", + "other_settings", + "max_completion_tokens", + ) + + if not partial: + missing = [field for field in required_fields if field not in payload] + if missing: + raise ValueError(f"Missing required agent fields: {', '.join(missing)}") + + if "name" in payload: + name = payload["name"] + if not isinstance(name, str) or not name or not _NAME_PATTERN.fullmatch(name): + raise ValueError("Agent name must be alphanumeric with optional underscores or hyphens") + + if "display_name" in payload and not isinstance(payload["display_name"], str): + raise ValueError("display_name must be a string") + + if "description" in payload and not isinstance(payload["description"], str): + raise ValueError("description must be a string") + + if "instructions" in payload and not isinstance(payload["instructions"], str): + raise ValueError("instructions must be a string") + + if "actions_to_load" in payload: + actions = payload["actions_to_load"] + if not isinstance(actions, list) or not all(isinstance(a, str) for a in actions): + raise ValueError("actions_to_load must be a list of strings") + + if "other_settings" in payload and not isinstance(payload["other_settings"], dict): + raise ValueError("other_settings must be an object") + + if "max_completion_tokens" in payload: + tokens = payload["max_completion_tokens"] + if not isinstance(tokens, int): + raise ValueError("max_completion_tokens must be an integer") + + +def _clean_agent(agent: Dict[str, Any]) -> Dict[str, Any]: + cleaned = {k: v for k, v in agent.items() if not k.startswith("_")} + cleaned = keyvault_agent_get_helper( + cleaned, + cleaned.get("id", ""), + scope="group", + ) + if cleaned.get("max_completion_tokens") is None: + cleaned["max_completion_tokens"] = -1 + cleaned.setdefault("is_global", False) + cleaned.setdefault("is_group", True) + cleaned.setdefault("agent_type", "local") + # Remove empty reasoning_effort to prevent validation errors + if cleaned.get("reasoning_effort") == "": + cleaned.pop("reasoning_effort", None) + return cleaned diff --git a/application/single_app/functions_keyvault.py b/application/single_app/functions_keyvault.py new file mode 100644 index 00000000..2094814f --- /dev/null +++ b/application/single_app/functions_keyvault.py @@ -0,0 +1,550 @@ +# functions_keyvault.py + +import re +import logging +from functions_appinsights import log_event +from config import * +from functions_authentication import * +from functions_settings import * +from enum import Enum +import app_settings_cache + +try: + from azure.identity import DefaultAzureCredential + from azure.keyvault.secrets import SecretClient +except ImportError as e: + raise ImportError("Required Azure SDK packages are not installed. Please install azure-identity and azure-keyvault-secrets.") from e + +""" +KEY_VAULT_DOMAIN # ENV VAR from config.py +enable_key_vault_secret_storage # setting from functions_settings.py +key_vault_name # setting from functions_settings.py +key_vault_identity # setting from functions_settings.py +""" + +supported_sources = [ + 'action', + 'action-addset', + 'agent', + 'other' +] + +supported_scopes = [ + 'global', + 'user', + 'group' +] + +supported_action_auth_types = [ + 'key', + 'servicePrincipal', + 'basic', + 'username_password', + 'connection_string' +] + +ui_trigger_word = "Stored_In_KeyVault" + +class SecretReturnType(Enum): + VALUE = "value" + TRIGGER = "trigger" + NAME = "name" + +def retrieve_secret_from_key_vault(secret_name, scope_value, scope="global", source="global"): + """ + Retrieve a secret from Key Vault using a dynamic name based on source, scope, and scope_value. + + Args: + secret_name (str): The base name of the secret. + scope_value (str): The value for the scope (e.g., user id). + scope (str): The scope (e.g., 'user', 'global'). + source (str): The source (e.g., 'agent', 'plugin'). + + Returns: + str: The value of the retrieved secret. + Raises: + Exception: If retrieval fails or configuration is invalid. + """ + if source not in supported_sources: + logging.error(f"Source '{source}' is not supported. Supported sources: {supported_sources}") + raise ValueError(f"Source '{source}' is not supported. Supported sources: {supported_sources}") + if scope not in supported_scopes: + logging.error(f"Scope '{scope}' is not supported. Supported scopes: {supported_scopes}") + raise ValueError(f"Scope '{scope}' is not supported. Supported scopes: {supported_scopes}") + + full_secret_name = build_full_secret_name(secret_name, scope_value, source, scope) + return retrieve_secret_from_key_vault_by_full_name(full_secret_name) + +def retrieve_secret_from_key_vault_by_full_name(full_secret_name): + """ + Retrieve a secret from Key Vault using a preformatted full secret name. + + Args: + full_secret_name (str): The full secret name (already formatted). + + Returns: + str: The value of the retrieved secret. + Raises: + Exception: If retrieval fails or configuration is invalid. + """ + settings = app_settings_cache.get_settings_cache() + enable_key_vault_secret_storage = settings.get("enable_key_vault_secret_storage", False) + if not enable_key_vault_secret_storage: + return full_secret_name + + key_vault_name = settings.get("key_vault_name", None) + if not key_vault_name: + return full_secret_name + + if not validate_secret_name_dynamic(full_secret_name): + return full_secret_name + + try: + key_vault_url = f"https://{key_vault_name}{KEY_VAULT_DOMAIN}" + secret_client = SecretClient(vault_url=key_vault_url, credential=get_keyvault_credential()) + + retrieved_secret = secret_client.get_secret(full_secret_name) + print(f"Secret '{full_secret_name}' retrieved successfully from Key Vault.") + return retrieved_secret.value + except Exception as e: + logging.error(f"Failed to retrieve secret '{full_secret_name}' from Key Vault: {str(e)}") + return full_secret_name + + +def store_secret_in_key_vault(secret_name, secret_value, scope_value, source="global", scope="global"): + """ + Store a secret in Key Vault using a dynamic name based on source, scope, and scope_value. + + Args: + secret_name (str): The base name of the secret. + secret_value (str): The value to store in Key Vault. + scope_value (str): The value for the scope (e.g., user id). + source (str): The source (e.g., 'agent', 'plugin'). + scope (str): The scope (e.g., 'user', 'global'). + + Returns: + str: The full secret name used in Key Vault. + Raises: + Exception: If storing fails or configuration is invalid. + """ + settings = app_settings_cache.get_settings_cache() + enable_key_vault_secret_storage = settings.get("enable_key_vault_secret_storage", False) + if not enable_key_vault_secret_storage: + logging.warn(f"Key Vault secret storage is not enabled.") + return secret_value + + key_vault_name = settings.get("key_vault_name", None) + if not key_vault_name: + logging.warn(f"Key Vault name is not configured.") + return secret_value + + if source not in supported_sources: + logging.error(f"Source '{source}' is not supported. Supported sources: {supported_sources}") + raise ValueError(f"Source '{source}' is not supported. Supported sources: {supported_sources}") + if scope not in supported_scopes: + logging.error(f"Scope '{scope}' is not supported. Supported scopes: {supported_scopes}") + raise ValueError(f"Scope '{scope}' is not supported. Supported scopes: {supported_scopes}") + + + full_secret_name = build_full_secret_name(secret_name, scope_value, source, scope) + + try: + key_vault_url = f"https://{key_vault_name}{KEY_VAULT_DOMAIN}" + secret_client = SecretClient(vault_url=key_vault_url, credential=get_keyvault_credential()) + secret_client.set_secret(full_secret_name, secret_value) + print(f"Secret '{full_secret_name}' stored successfully in Key Vault.") + return full_secret_name + except Exception as e: + logging.error(f"Failed to store secret '{full_secret_name}' in Key Vault: {str(e)}") + return secret_value + +def build_full_secret_name(secret_name, scope_value, source, scope): + """ + Build the full secret name for Key Vault and check its length. + + Args: + secret_name (str): The base name of the secret. + scope_value (str): The value for the scope (e.g., user id). + source (str): The source (e.g., 'agent', 'plugin'). + scope (str): The scope (e.g., 'user', 'global'). + + Returns: + str: The constructed full secret name. + Raises: + ValueError: If the name exceeds 127 characters. + """ + full_secret_name = f"{clean_name_for_keyvault(scope_value)}--{source}--{scope}--{clean_name_for_keyvault(secret_name)}" + if not validate_secret_name_dynamic(full_secret_name): + logging.error(f"The full secret name '{full_secret_name}' is invalid.") + raise ValueError(f"The full secret name '{full_secret_name}' is invalid.") + return full_secret_name + +def validate_secret_name_dynamic(secret_name): + """ + Validate a Key Vault secret name using a dynamically built regex based on supported scopes and sources. + The secret_name and scope_value can be wildcards, but scope and source must match supported lists. + + Args: + secret_name (str): The full secret name to validate. + + Returns: + bool: True if valid, False otherwise. + """ + # Build regex pattern dynamically + scopes_pattern = '|'.join(re.escape(scope) for scope in supported_scopes) + sources_pattern = '|'.join(re.escape(source) for source in supported_sources) + # Wildcards for secret_name and scope_value + pattern = rf"^(.+)--({sources_pattern})--({scopes_pattern})--(.+)$" + match = re.match(pattern, secret_name) + if not match: + return False + # Optionally, check length + if len(secret_name) > 127: + return False + return True + +def keyvault_agent_save_helper(agent_dict, scope_value, scope="global"): + """ + For agent dicts, store sensitive keys in Key Vault and replace their values with the Key Vault secret name. + Only processes 'azure_agent_apim_gpt_subscription_key' and 'azure_openai_gpt_key'. + + Args: + agent_dict (dict): The agent dictionary to process. + scope_value (str): The value for the scope (e.g., agent id). + scope (str): The scope (e.g., 'user', 'global'). + + Returns: + dict: A new agent dict with sensitive values replaced by Key Vault references. + Raises: + Exception: If storing a key in Key Vault fails. + """ + settings = app_settings_cache.get_settings_cache() + enable_key_vault_secret_storage = settings.get("enable_key_vault_secret_storage", False) + key_vault_name = settings.get("key_vault_name", None) + if not enable_key_vault_secret_storage or not key_vault_name: + return agent_dict + source = "agent" + updated = dict(agent_dict) + agent_name = updated.get('name', 'agent') + use_apim = updated.get('enable_agent_gpt_apim', False) + key = 'azure_agent_apim_gpt_subscription_key' if use_apim else 'azure_openai_gpt_key' + if key in updated and updated[key]: + value = updated[key] + secret_name = agent_name + if value == ui_trigger_word: + updated[key] = build_full_secret_name(secret_name, scope_value, source, scope) + elif validate_secret_name_dynamic(value): + updated[key] = build_full_secret_name(secret_name, scope_value, source, scope) + else: + try: + full_secret_name = store_secret_in_key_vault(secret_name, value, scope_value, source=source, scope=scope) + updated[key] = full_secret_name + except Exception as e: + logging.error(f"Failed to store agent key '{key}' in Key Vault: {e}") + raise Exception(f"Failed to store agent key '{key}' in Key Vault: {e}") + else: + log_event(f"Agent key '{key}' not found while APIM is '{use_apim}' or empty in agent '{agent_name}'. No action taken.", level="INFO") + return updated + +def keyvault_agent_get_helper(agent_dict, scope_value, scope="global", return_type=SecretReturnType.TRIGGER): + """ + For agent dicts, retrieve sensitive keys from Key Vault if they are stored as Key Vault references. + Only processes 'azure_agent_apim_gpt_subscription_key' and 'azure_openai_gpt_key'. + + Args: + agent_dict (dict): The agent dictionary to process. + scope_value (str): The value for the scope (e.g., agent id). + scope (str): The scope (e.g., 'user', 'global'). + return_actual_key (bool): If True, retrieves the actual secret value from Key Vault. If False, replaces with ui_trigger_word. + + Returns: + dict: A new agent dict with sensitive values replaced by Key Vault references. + Raises: + Exception: If retrieving a key from Key Vault fails. + """ + settings = app_settings_cache.get_settings_cache() + enable_key_vault_secret_storage = settings.get("enable_key_vault_secret_storage", False) + key_vault_name = settings.get("key_vault_name", None) + if not enable_key_vault_secret_storage or not key_vault_name: + return agent_dict + updated = dict(agent_dict) + agent_name = updated.get('name', 'agent') + use_apim = updated.get('enable_agent_gpt_apim', False) + key = 'azure_agent_apim_gpt_subscription_key' if use_apim else 'azure_openai_gpt_key' + if key in updated and updated[key]: + value = updated[key] + if validate_secret_name_dynamic(value): + try: + if return_type == SecretReturnType.VALUE: + actual_key = retrieve_secret_from_key_vault_by_full_name(value) + updated[key] = actual_key + elif return_type == SecretReturnType.NAME: + updated[key] = value + else: + updated[key] = ui_trigger_word + except Exception as e: + logging.error(f"Failed to retrieve agent key '{key}' for agent '{agent_name}' from Key Vault: {e}") + return updated + return updated + +def keyvault_plugin_save_helper(plugin_dict, scope_value, scope="global"): + """ + For plugin dicts, store the auth.key in Key Vault if auth.type is 'key', 'servicePrincipal', 'basic', or 'connection_string', + and replace its value with the Key Vault secret name. Also supports dynamic secret storage for any additionalFields key ending with '__Secret'. + + Args: + plugin_dict (dict): The plugin dictionary to process. + scope_value (str): The value for the scope (e.g., plugin id). + scope (str): The scope (e.g., 'user', 'global'). + + Returns: + dict: A new plugin dict with sensitive values replaced by Key Vault references. + Raises: + Exception: If storing a key in Key Vault fails. + + Feature: + Any key in additionalFields ending with '__Secret' will be stored in Key Vault and replaced with a Key Vault reference. + This allows plugin writers to dynamically store secrets without custom code. + """ + if scope not in supported_scopes: + logging.error(f"Scope '{scope}' is not supported. Supported scopes: {supported_scopes}") + raise ValueError(f"Scope '{scope}' is not supported. Supported scopes: {supported_scopes}") + source = "action" # Use 'action' for plugins per app convention + updated = dict(plugin_dict) + plugin_name = updated.get('name', 'plugin') + auth = updated.get('auth', {}) + if isinstance(auth, dict): + auth_type = auth.get('type', None) + if auth_type in supported_action_auth_types and 'key' in auth and auth['key']: + value = auth['key'] + if value == ui_trigger_word: + auth['key'] = build_full_secret_name(plugin_name, scope_value, source, scope) + updated['auth'] = auth + elif validate_secret_name_dynamic(value): + auth['key'] = build_full_secret_name(plugin_name, scope_value, source, scope) + updated['auth'] = auth + else: + try: + full_secret_name = store_secret_in_key_vault(plugin_name, value, scope_value, source=source, scope=scope) + new_auth = dict(auth) + new_auth['key'] = full_secret_name + updated['auth'] = new_auth + except Exception as e: + logging.error(f"Failed to store plugin key in Key Vault: {e}") + raise Exception(f"Failed to store plugin key in Key Vault: {e}") + else: + print(f"Auth type '{auth_type}' does not require Key Vault storage. Does not match ") + + # Handle additionalFields dynamic secrets + additional_fields = updated.get('additionalFields', {}) + if isinstance(additional_fields, dict): + new_additional_fields = dict(additional_fields) + for k, v in additional_fields.items(): + if k.endswith('__Secret') and v: + addset_source = 'action-addset' + base_field = k[:-8] # Remove '__Secret' + akv_key = f"{plugin_name}-{base_field}".replace('__', '-') + full_secret_name = build_full_secret_name(akv_key, scope_value, addset_source, scope) + if v == ui_trigger_word: + new_additional_fields[k] = full_secret_name + continue + elif validate_secret_name_dynamic(v): + new_additional_fields[k] = full_secret_name + continue + else: + try: + full_secret_name = store_secret_in_key_vault(akv_key, v, scope_value, source=addset_source, scope=scope) + new_additional_fields[k] = full_secret_name + except Exception as e: + logging.error(f"Failed to store plugin additionalField secret '{k}' in Key Vault: {e}") + raise Exception(f"Failed to store plugin additionalField secret '{k}' in Key Vault: {e}") + updated['additionalFields'] = new_additional_fields + return updated +# Helper to retrieve plugin secrets from Key Vault +def keyvault_plugin_get_helper(plugin_dict, scope_value, scope="global", return_type=SecretReturnType.TRIGGER): + """ + For plugin dicts, retrieve secrets from Key Vault for auth.key and any additionalFields key ending with '__Secret'. + If the value is a Key Vault reference, retrieve the actual secret and replace with ui_trigger_word. + + Args: + plugin_dict (dict): The plugin dictionary to process. + scope_value (str): The value for the scope (e.g., plugin id). + scope (str): The scope (e.g., 'user', 'global'). + + Returns: + dict: A new plugin dict with sensitive values replaced by ui_trigger_word if stored in Key Vault. + """ + if scope not in supported_scopes: + logging.error(f"Scope '{scope}' is not supported. Supported scopes: {supported_scopes}") + raise ValueError(f"Scope '{scope}' is not supported. Supported scopes: {supported_scopes}") + updated = dict(plugin_dict) + plugin_name = updated.get('name', 'plugin') + auth = updated.get('auth', {}) + if isinstance(auth, dict): + if 'key' in auth and auth['key']: + value = auth['key'] + if validate_secret_name_dynamic(value): + try: + if return_type == SecretReturnType.VALUE: + actual_key = retrieve_secret_from_key_vault_by_full_name(value) + new_auth = dict(auth) + new_auth['key'] = actual_key + updated['auth'] = new_auth + elif return_type == SecretReturnType.NAME: + new_auth = dict(auth) + new_auth['key'] = value + updated['auth'] = new_auth + else: + new_auth = dict(auth) + new_auth['key'] = ui_trigger_word + updated['auth'] = new_auth + except Exception as e: + logging.error(f"Failed to retrieve action {plugin_name} key from Key Vault: {e}") + raise Exception(f"Failed to retrieve action {plugin_name} key from Key Vault: {e}") + + additional_fields = updated.get('additionalFields', {}) + if isinstance(additional_fields, dict): + new_additional_fields = dict(additional_fields) + for k, v in additional_fields.items(): + if k.endswith('__Secret') and v and validate_secret_name_dynamic(v): + addset_source = 'action-addset' + base_field = k[:-8] # Remove '__Secret' + akv_key = f"{plugin_name}-{base_field}".replace('__', '-') + try: + if return_type == SecretReturnType.VALUE: + actual_secret = retrieve_secret_from_key_vault(f"{akv_key}", scope_value, scope, addset_source) + new_additional_fields[k] = actual_secret + elif return_type == SecretReturnType.NAME: + new_additional_fields[k] = v + else: + new_additional_fields[k] = ui_trigger_word + except Exception as e: + logging.error(f"Failed to retrieve action additionalField secret '{k}' from Key Vault: {e}") + raise Exception(f"Failed to retrieve action additionalField secret '{k}' from Key Vault: {e}") + updated['additionalFields'] = new_additional_fields + return updated +# Helper to delete plugin secrets from Key Vault +def keyvault_plugin_delete_helper(plugin_dict, scope_value, scope="global"): + """ + For plugin dicts, delete secrets from Key Vault for auth.key and any additionalFields key ending with '__Secret'. + Only deletes if the value is a Key Vault reference. + + Args: + plugin_dict (dict): The plugin dictionary to process. + scope_value (str): The value for the scope (e.g., plugin id). + scope (str): The scope (e.g., 'user', 'global'). + + Returns: + plugin_dict (dict): The original plugin dict. + Raises: + """ + if scope not in supported_scopes: + log_event(f"Scope '{scope}' is not supported. Supported scopes: {supported_scopes}", level="WARNING") + raise ValueError(f"Scope '{scope}' is not supported. Supported scopes: {supported_scopes}") + settings = app_settings_cache.get_settings_cache() + enable_key_vault_secret_storage = settings.get("enable_key_vault_secret_storage", False) + key_vault_name = settings.get("key_vault_name", None) + if not enable_key_vault_secret_storage or not key_vault_name: + log_event(f"Key Vault secret storage is not enabled or key vault name is missing.", level="WARNING") + return plugin_dict + source = "action" + plugin_name = plugin_dict.get('name', 'plugin') + auth = plugin_dict.get('auth', {}) + if isinstance(auth, dict): + if 'key' in auth and auth['key']: + secret_name = auth['key'] + if validate_secret_name_dynamic(secret_name): + try: + key_vault_url = f"https://{key_vault_name}{KEY_VAULT_DOMAIN}" + log_event(f"Deleting action secret '{secret_name}' for action '{plugin_name}' for '{scope}' '{scope_value}'", level="INFO") + client = SecretClient(vault_url=key_vault_url, credential=get_keyvault_credential()) + client.begin_delete_secret(secret_name) + except Exception as e: + logging.error(f"Error deleting action secret '{secret_name}' for action '{plugin_name}': {e}") + raise Exception(f"Error deleting action secret '{secret_name}' for action '{plugin_name}': {e}") + + additional_fields = plugin_dict.get('additionalFields', {}) + if isinstance(additional_fields, dict): + for k, v in additional_fields.items(): + if k.endswith('__Secret') and v and validate_secret_name_dynamic(v): + addset_source = 'action-addset' + base_field = k[:-8] # Remove '__Secret' + akv_key = f"{plugin_name}-{base_field}".replace('__', '-') + try: + keyvault_secret_name = build_full_secret_name(akv_key, scope_value, addset_source, scope) + key_vault_url = f"https://{key_vault_name}{KEY_VAULT_DOMAIN}" + log_event(f"Deleting action additionalField secret '{k}' for action '{plugin_name}' for '{scope}' '{scope_value}'", level="INFO") + client = SecretClient(vault_url=key_vault_url, credential=get_keyvault_credential()) + client.begin_delete_secret(keyvault_secret_name) + except Exception as e: + logging.error(f"Error deleting action additionalField secret '{k}' for action '{plugin_name}': {e}") + raise Exception(f"Error deleting action additionalField secret '{k}' for action '{plugin_name}': {e}") + return plugin_dict + +# Helper to delete agent secrets from Key Vault +def keyvault_agent_delete_helper(agent_dict, scope_value, scope="global"): + """ + For agent dicts, delete sensitive keys from Key Vault if they are stored as Key Vault references. + Only processes 'azure_agent_apim_gpt_subscription_key' and 'azure_openai_gpt_key'. + + Args: + agent_dict (dict): The agent dictionary to process. + scope_value (str): The value for the scope (e.g., agent id). + scope (str): The scope (e.g., 'user', 'global'). + + Returns: + agent_dict (dict): The original agent dict. + """ + settings = app_settings_cache.get_settings_cache() + enable_key_vault_secret_storage = settings.get("enable_key_vault_secret_storage", False) + key_vault_name = settings.get("key_vault_name", None) + if not enable_key_vault_secret_storage or not key_vault_name: + return agent_dict + source = "agent" + updated = dict(agent_dict) + agent_name = updated.get('name', 'agent') + use_apim = updated.get('enable_agent_gpt_apim', False) + keys = ['azure_agent_apim_gpt_subscription_key'] if use_apim else ['azure_openai_gpt_key'] + for key in keys: + if key in updated and updated[key]: + secret_name = updated[key] + if validate_secret_name_dynamic(secret_name): + try: + key_vault_url = f"https://{key_vault_name}{KEY_VAULT_DOMAIN}" + log_event(f"Deleting agent secret '{secret_name}' for agent '{agent_name}' for '{scope}' '{scope_value}'", level="INFO") + client = SecretClient(vault_url=key_vault_url, credential=get_keyvault_credential()) + client.begin_delete_secret(secret_name) + except Exception as e: + logging.error(f"Error deleting secret '{secret_name}' for agent '{agent_name}': {e}") + raise Exception(f"Error deleting secret '{secret_name}' for agent '{agent_name}': {e}") + return agent_dict + +def get_keyvault_credential(): + """ + Get the Key Vault credential using DefaultAzureCredential, optionally with a managed identity client ID. + + Returns: + DefaultAzureCredential: The credential object for Key Vault access. + """ + settings = app_settings_cache.get_settings_cache() + key_vault_identity = settings.get("key_vault_identity", None) + if key_vault_identity is not None: + credential = DefaultAzureCredential(managed_identity_client_id=key_vault_identity) + else: + credential = DefaultAzureCredential() + return credential + +def clean_name_for_keyvault(name): + """ + Clean a name to be used as a Key Vault secret name by removing invalid characters and truncating to 127 characters. + + Args: + name (str): The name to clean. + + Returns: + str: The cleaned name. + """ + # Remove invalid characters + cleaned_name = re.sub(r"[^a-zA-Z0-9-]", "-", name) + # Truncate to 127 characters + return cleaned_name[:127] diff --git a/application/single_app/functions_notifications.py b/application/single_app/functions_notifications.py new file mode 100644 index 00000000..15ce11e4 --- /dev/null +++ b/application/single_app/functions_notifications.py @@ -0,0 +1,579 @@ +# functions_notifications.py + +""" +Notifications Management + +This module handles all operations related to notifications stored in the +notifications container. Supports personal, group, and public workspace scoped +notifications with per-user read/dismiss tracking. + +Version: 0.234.032 +Implemented in: 0.234.032 +""" + +# Imports (grouped after docstring) +import uuid +from datetime import datetime, timezone +from azure.cosmos import exceptions +from flask import current_app +import logging +from config import cosmos_notifications_container +from functions_group import find_group_by_id +from functions_debug import debug_print +from functions_public_workspaces import find_public_workspace_by_id, get_user_public_workspaces + +# Constants +TTL_60_DAYS = 60 * 24 * 60 * 60 # 60 days in seconds (5184000) + +# Notification type registry for extensibility +NOTIFICATION_TYPES = { + 'document_processing_complete': { + 'icon': 'bi-file-earmark-check', + 'color': 'success' + }, + 'document_processing_failed': { + 'icon': 'bi-file-earmark-x', + 'color': 'danger' + }, + 'ownership_transfer_request': { + 'icon': 'bi-arrow-left-right', + 'color': 'warning' + }, + 'group_deletion_request': { + 'icon': 'bi-trash', + 'color': 'danger' + }, + 'document_deletion_request': { + 'icon': 'bi-trash', + 'color': 'warning' + }, + 'system_announcement': { + 'icon': 'bi-megaphone', + 'color': 'info' + } +} + + +def create_notification( + user_id=None, + group_id=None, + public_workspace_id=None, + notification_type='system_announcement', + title='', + message='', + link_url='', + link_context=None, + metadata=None, + assignment=None +): + """ + Create a notification for personal, group, or public workspace scope. + + Args: + user_id (str, optional): User ID for personal notifications (deprecated if using assignment) + group_id (str, optional): Group ID for group-scoped notifications + public_workspace_id (str, optional): Public workspace ID for workspace notifications + notification_type (str): Type of notification (must be in NOTIFICATION_TYPES) + title (str): Notification title + message (str): Notification message + link_url (str): URL to navigate to when clicked + link_context (dict, optional): Additional context for navigation + metadata (dict, optional): Flexible metadata for type-specific data + assignment (dict, optional): Role and ownership-based assignment: + { + 'roles': ['Admin', 'ControlCenterAdmin'], # Users with these roles see notification + 'personal_workspace_owner_id': 'user123', # Personal workspace owner + 'group_owner_id': 'user456', # Group owner + 'public_workspace_owner_id': 'user789' # Public workspace owner + } + If any role matches or any owner ID matches user's ID, notification is visible. + + Returns: + dict: Created notification document or None on error + """ + try: + # Determine scope and partition key + scope = 'personal' + partition_key = user_id + + # If assignment is provided, always use assignment partition for role-based notifications + if assignment: + # Assignment-based notifications always use the special assignment partition + # This allows role-based filtering across all users + scope = 'assignment' + partition_key = 'assignment-notifications' + else: + # Legacy behavior - partition by specific workspace + if group_id: + scope = 'group' + partition_key = group_id + elif public_workspace_id: + scope = 'public_workspace' + partition_key = public_workspace_id + + if not partition_key: + debug_print("create_notification: No partition key provided") + return None + + # Validate notification type + if notification_type not in NOTIFICATION_TYPES: + debug_print(f"Unknown notification type: {notification_type}") + + notification_doc = { + 'id': str(uuid.uuid4()), + 'user_id': user_id, + 'group_id': group_id, + 'public_workspace_id': public_workspace_id, + 'scope': scope, + 'notification_type': notification_type, + 'title': title, + 'message': message, + 'created_at': datetime.now(timezone.utc).isoformat(), + 'ttl': TTL_60_DAYS, + 'read_by': [], + 'dismissed_by': [], + 'link_url': link_url or '', + 'link_context': link_context or {}, + 'metadata': metadata or {}, + 'assignment': assignment or None + } + + # Create in Cosmos with partition key based on scope + cosmos_notifications_container.create_item(notification_doc) + + debug_print( + f"Notification created: {notification_doc['id']} " + f"[{scope}] [{notification_type}] for partition: {partition_key}" + ) + + return notification_doc + + except Exception as e: + debug_print(f"Error creating notification: {e}") + return None + + +def create_group_notification(group_id, notification_type, title, message, link_url='', link_context=None, metadata=None): + """ + Create a notification for all members of a group. + + Args: + group_id (str): Group ID + notification_type (str): Type of notification + title (str): Notification title + message (str): Notification message + link_url (str): URL to navigate to when clicked + link_context (dict, optional): Additional context for navigation + metadata (dict, optional): Additional metadata + + Returns: + dict: Created notification or None on error + """ + return create_notification( + group_id=group_id, + notification_type=notification_type, + title=title, + message=message, + link_url=link_url, + link_context=link_context or {'workspace_type': 'group', 'group_id': group_id}, + metadata=metadata + ) + + +def create_public_workspace_notification( + public_workspace_id, + notification_type, + title, + message, + link_url='', + link_context=None, + metadata=None +): + """ + Create a notification for all members of a public workspace. + + Args: + public_workspace_id (str): Public workspace ID + notification_type (str): Type of notification + title (str): Notification title + message (str): Notification message + link_url (str): URL to navigate to when clicked + link_context (dict, optional): Additional context for navigation + metadata (dict, optional): Additional metadata + + Returns: + dict: Created notification or None on error + """ + return create_notification( + public_workspace_id=public_workspace_id, + notification_type=notification_type, + title=title, + message=message, + link_url=link_url, + link_context=link_context or { + 'workspace_type': 'public', + 'public_workspace_id': public_workspace_id + }, + metadata=metadata + ) + + +def get_user_notifications(user_id, page=1, per_page=20, include_read=True, include_dismissed=False, user_roles=None): + """ + Fetch notifications visible to a user from personal, group, and public workspace scopes. + Supports assignment-based notifications that target users by roles and/or ownership. + + Args: + user_id (str): User's unique identifier + page (int): Page number (1-indexed) + per_page (int): Items per page + include_read (bool): Include notifications already read by user + include_dismissed (bool): Include notifications dismissed by user + user_roles (list, optional): User's roles for assignment-based notifications + + Returns: + dict: { + 'notifications': [...], + 'total': int, + 'page': int, + 'per_page': int, + 'has_more': bool + } + """ + try: + all_notifications = [] + + # 1. Fetch personal notifications + personal_query = "SELECT * FROM c WHERE c.user_id = @user_id" + personal_params = [{"name": "@user_id", "value": user_id}] + + personal_notifications = list(cosmos_notifications_container.query_items( + query=personal_query, + parameters=personal_params, + partition_key=user_id + )) + all_notifications.extend(personal_notifications) + + # 2. Fetch group notifications for user's groups + from functions_group import get_user_groups + user_groups = get_user_groups(user_id) + + for group in user_groups: + group_id = group['id'] + group_query = "SELECT * FROM c WHERE c.group_id = @group_id" + group_params = [{"name": "@group_id", "value": group_id}] + + group_notifications = list(cosmos_notifications_container.query_items( + query=group_query, + parameters=group_params, + enable_cross_partition_query=True + )) + all_notifications.extend(group_notifications) + + # 3. Fetch public workspace notifications + from functions_public_workspaces import get_user_public_workspaces + user_workspaces = get_user_public_workspaces(user_id) + + for workspace in user_workspaces: + workspace_id = workspace['id'] + workspace_query = "SELECT * FROM c WHERE c.public_workspace_id = @workspace_id" + workspace_params = [{"name": "@workspace_id", "value": workspace_id}] + + workspace_notifications = list(cosmos_notifications_container.query_items( + query=workspace_query, + parameters=workspace_params, + enable_cross_partition_query=True + )) + all_notifications.extend(workspace_notifications) + + # 4. Fetch assignment-based notifications + assignment_query = "SELECT * FROM c WHERE c.scope = 'assignment'" + assignment_notifications = list(cosmos_notifications_container.query_items( + query=assignment_query, + enable_cross_partition_query=True + )) + + # Filter assignment notifications based on user's roles and ownership + for notif in assignment_notifications: + assignment = notif.get('assignment') + if not assignment: + continue + + # Check if user matches assignment criteria + user_matches = False + + # Check roles + if user_roles and assignment.get('roles'): + for role in assignment.get('roles', []): + if role in user_roles: + user_matches = True + break + + # Check ownership IDs + if not user_matches: + if assignment.get('personal_workspace_owner_id') == user_id: + user_matches = True + elif assignment.get('group_owner_id') == user_id: + user_matches = True + elif assignment.get('public_workspace_owner_id') == user_id: + user_matches = True + + if user_matches: + all_notifications.append(notif) + + # Filter based on read/dismissed status + filtered_notifications = [] + for notif in all_notifications: + notif_id = notif.get('id', 'unknown') + read_by = notif.get('read_by', []) + dismissed_by = notif.get('dismissed_by', []) + + if not include_dismissed and user_id in dismissed_by: + continue + if not include_read and user_id in read_by: + continue + + # Add UI metadata + notif['is_read'] = user_id in read_by + notif['is_dismissed'] = user_id in dismissed_by + notif['type_config'] = NOTIFICATION_TYPES.get( + notif.get('notification_type'), + NOTIFICATION_TYPES['system_announcement'] + ) + + filtered_notifications.append(notif) + + # Sort by created_at descending (newest first) + filtered_notifications.sort( + key=lambda x: x.get('created_at', ''), + reverse=True + ) + + # Pagination + total = len(filtered_notifications) + start_idx = (page - 1) * per_page + end_idx = start_idx + per_page + paginated = filtered_notifications[start_idx:end_idx] + + return { + 'notifications': paginated, + 'total': total, + 'page': page, + 'per_page': per_page, + 'has_more': end_idx < total + } + + except Exception as e: + debug_print(f"Error fetching notifications for user {user_id}: {e}") + return { + 'notifications': [], + 'total': 0, + 'page': page, + 'per_page': per_page, + 'has_more': False + } + + +def get_unread_notification_count(user_id): + """ + Get count of unread notifications for a user across all scopes. + + Args: + user_id (str): User's unique identifier + + Returns: + int: Count of unread notifications (capped at 10 for efficiency) + """ + try: + # Get notifications without pagination + result = get_user_notifications( + user_id=user_id, + page=1, + per_page=10, # Only need first 10 for badge display + include_read=False, + include_dismissed=False + ) + + return min(result['total'], 10) # Cap at 10 for display purposes + + except Exception as e: + debug_print(f"Error counting unread notifications for {user_id}: {e}") + return 0 + + +def mark_notification_read(notification_id, user_id): + """ + Mark a notification as read by a specific user. + + Args: + notification_id (str): Notification ID + user_id (str): User ID marking as read + + Returns: + bool: True if successful, False otherwise + """ + try: + # First, find the notification across all partition keys + query = "SELECT * FROM c WHERE c.id = @notification_id" + params = [{"name": "@notification_id", "value": notification_id}] + + notifications = list(cosmos_notifications_container.query_items( + query=query, + parameters=params, + enable_cross_partition_query=True + )) + + if not notifications: + debug_print(f"Notification {notification_id} not found") + return False + + notification = notifications[0] + + # Determine partition key + partition_key = notification.get('user_id') or notification.get('group_id') or notification.get('public_workspace_id') + + if not partition_key: + debug_print(f"No partition key found for notification {notification_id}") + return False + + # Add user to read_by if not already present + read_by = notification.get('read_by', []) + if user_id not in read_by: + read_by.append(user_id) + notification['read_by'] = read_by + + cosmos_notifications_container.upsert_item(notification) + debug_print(f"Notification {notification_id} marked read by {user_id}") + + return True + + except Exception as e: + debug_print(f"Error marking notification {notification_id} as read: {e}") + return False + + +def dismiss_notification(notification_id, user_id): + """ + Dismiss a notification for a specific user (adds to dismissed_by). + + Args: + notification_id (str): Notification ID + user_id (str): User ID dismissing the notification + + Returns: + bool: True if successful, False otherwise + """ + try: + # Find notification across all partitions + query = "SELECT * FROM c WHERE c.id = @notification_id" + params = [{"name": "@notification_id", "value": notification_id}] + + notifications = list(cosmos_notifications_container.query_items( + query=query, + parameters=params, + enable_cross_partition_query=True + )) + + if not notifications: + debug_print(f"Notification {notification_id} not found") + return False + + notification = notifications[0] + + # Determine partition key + partition_key = notification.get('user_id') or notification.get('group_id') or notification.get('public_workspace_id') + + if not partition_key: + debug_print(f"No partition key found for notification {notification_id}") + return False + + # Add user to dismissed_by + dismissed_by = notification.get('dismissed_by', []) + if user_id not in dismissed_by: + dismissed_by.append(user_id) + notification['dismissed_by'] = dismissed_by + + cosmos_notifications_container.upsert_item(notification) + debug_print(f"Notification {notification_id} dismissed by {user_id}") + + return True + + except Exception as e: + debug_print(f"Error dismissing notification {notification_id}: {e}") + return False + + +def mark_all_read(user_id): + """ + Mark all unread notifications as read for a user. + + Args: + user_id (str): User's unique identifier + + Returns: + int: Number of notifications marked as read + """ + try: + # Get all unread notifications + result = get_user_notifications( + user_id=user_id, + page=1, + per_page=1000, # Get all unread + include_read=False, + include_dismissed=True + ) + + count = 0 + for notification in result['notifications']: + if mark_notification_read(notification['id'], user_id): + count += 1 + + debug_print(f"Marked {count} notifications as read for user {user_id}") + return count + + except Exception as e: + debug_print(f"Error marking all notifications as read for {user_id}: {e}") + return 0 + + +def delete_notification(notification_id): + """ + Permanently delete a notification (admin only). + + Args: + notification_id (str): Notification ID to delete + + Returns: + bool: True if successful, False otherwise + """ + try: + # Find notification to get partition key + query = "SELECT * FROM c WHERE c.id = @notification_id" + params = [{"name": "@notification_id", "value": notification_id}] + + notifications = list(cosmos_notifications_container.query_items( + query=query, + parameters=params, + enable_cross_partition_query=True + )) + + if not notifications: + return False + + notification = notifications[0] + partition_key = notification.get('user_id') or notification.get('group_id') or notification.get('public_workspace_id') + + if not partition_key: + return False + + cosmos_notifications_container.delete_item( + item=notification_id, + partition_key=partition_key + ) + + debug_print(f"Notification {notification_id} permanently deleted") + return True + + except Exception as e: + debug_print(f"Error deleting notification {notification_id}: {e}") + return False diff --git a/application/single_app/functions_personal_actions.py b/application/single_app/functions_personal_actions.py index b9cbaa64..6345438e 100644 --- a/application/single_app/functions_personal_actions.py +++ b/application/single_app/functions_personal_actions.py @@ -11,9 +11,13 @@ from datetime import datetime from azure.cosmos import exceptions from flask import current_app +from functions_keyvault import keyvault_plugin_save_helper, keyvault_plugin_get_helper, keyvault_plugin_delete_helper, SecretReturnType +from functions_settings import get_user_settings, update_user_settings +from functions_debug import debug_print +from config import cosmos_personal_actions_container import logging -def get_personal_actions(user_id): +def get_personal_actions(user_id, return_type=SecretReturnType.TRIGGER): """ Fetch all personal actions/plugins for a user. @@ -24,8 +28,6 @@ def get_personal_actions(user_id): list: List of action/plugin dictionaries """ try: - from config import cosmos_personal_actions_container - query = "SELECT * FROM c WHERE c.user_id = @user_id" parameters = [{"name": "@user_id", "value": user_id}] @@ -35,21 +37,21 @@ def get_personal_actions(user_id): partition_key=user_id )) - # Remove Cosmos metadata for cleaner response + # Remove Cosmos metadata for cleaner response and resolve Key Vault references cleaned_actions = [] for action in actions: cleaned_action = {k: v for k, v in action.items() if not k.startswith('_')} + cleaned_action = keyvault_plugin_get_helper(cleaned_action, scope_value=user_id, scope="user", return_type=return_type) cleaned_actions.append(cleaned_action) - return cleaned_actions except exceptions.CosmosResourceNotFoundError: return [] except Exception as e: - current_app.logger.error(f"Error fetching personal actions for user {user_id}: {e}") + debug_print(f"Error fetching personal actions for user {user_id}: {e}") return [] -def get_personal_action(user_id, action_id): +def get_personal_action(user_id, action_id, return_type=SecretReturnType.TRIGGER): """ Fetch a specific personal action/plugin. @@ -61,9 +63,6 @@ def get_personal_action(user_id, action_id): dict: Action dictionary or None if not found """ try: - from config import cosmos_personal_actions_container - - # Try to find by ID first try: action = cosmos_personal_actions_container.read_item( item=action_id, @@ -87,12 +86,13 @@ def get_personal_action(user_id, action_id): return None action = actions[0] - # Remove Cosmos metadata + # Remove Cosmos metadata and resolve Key Vault references cleaned_action = {k: v for k, v in action.items() if not k.startswith('_')} + cleaned_action = keyvault_plugin_get_helper(cleaned_action, scope_value=user_id, scope="user", return_type=return_type) return cleaned_action except Exception as e: - current_app.logger.error(f"Error fetching action {action_id} for user {user_id}: {e}") + debug_print(f"Error fetching action {action_id} for user {user_id}: {e}") return None def save_personal_action(user_id, action_data): @@ -107,8 +107,6 @@ def save_personal_action(user_id, action_data): dict: Saved action data with ID """ try: - from config import cosmos_personal_actions_container - # Check if an action with this name already exists existing_action = None if 'name' in action_data and action_data['name']: @@ -146,14 +144,15 @@ def save_personal_action(user_id, action_data): elif 'type' not in action_data['auth']: action_data['auth']['type'] = 'identity' + # Store secrets in Key Vault before upsert + action_data = keyvault_plugin_save_helper(action_data, scope_value=user_id, scope="user") result = cosmos_personal_actions_container.upsert_item(body=action_data) - # Remove Cosmos metadata from response cleaned_result = {k: v for k, v in result.items() if not k.startswith('_')} return cleaned_result except Exception as e: - current_app.logger.error(f"Error saving action for user {user_id}: {e}") + debug_print(f"Error saving action for user {user_id}: {e}") raise def delete_personal_action(user_id, action_id): @@ -168,13 +167,13 @@ def delete_personal_action(user_id, action_id): bool: True if deleted, False if not found """ try: - from config import cosmos_personal_actions_container - # Try to find the action first to get the correct ID action = get_personal_action(user_id, action_id) if not action: return False + # Delete secrets from Key Vault before deleting the action + keyvault_plugin_delete_helper(action, scope_value=user_id, scope="user") cosmos_personal_actions_container.delete_item( item=action['id'], partition_key=user_id @@ -184,7 +183,7 @@ def delete_personal_action(user_id, action_id): except exceptions.CosmosResourceNotFoundError: return False except Exception as e: - current_app.logger.error(f"Error deleting action {action_id} for user {user_id}: {e}") + debug_print(f"Error deleting action {action_id} for user {user_id}: {e}") raise def ensure_migration_complete(user_id): @@ -199,8 +198,6 @@ def ensure_migration_complete(user_id): int: Number of actions migrated (0 if already migrated) """ try: - from functions_settings import get_user_settings, update_user_settings - user_settings = get_user_settings(user_id) plugins = user_settings.get('settings', {}).get('plugins', []) @@ -217,13 +214,13 @@ def ensure_migration_complete(user_id): settings_to_update = user_settings.get('settings', {}) settings_to_update['plugins'] = [] # Set to empty array instead of removing update_user_settings(user_id, settings_to_update) - current_app.logger.info(f"Cleaned up legacy plugin data for user {user_id} (already migrated)") + debug_print(f"Cleaned up legacy plugin data for user {user_id} (already migrated)") return 0 return 0 except Exception as e: - current_app.logger.error(f"Error ensuring action migration complete for user {user_id}: {e}") + debug_print(f"Error ensuring action migration complete for user {user_id}: {e}") return 0 def migrate_actions_from_user_settings(user_id): @@ -237,8 +234,6 @@ def migrate_actions_from_user_settings(user_id): int: Number of actions migrated """ try: - from functions_settings import get_user_settings, update_user_settings - user_settings = get_user_settings(user_id) plugins = user_settings.get('settings', {}).get('plugins', []) @@ -251,32 +246,31 @@ def migrate_actions_from_user_settings(user_id): try: # Skip if plugin already exists in personal container if plugin.get('name') in existing_action_names: - current_app.logger.info(f"Skipping migration of plugin '{plugin.get('name')}' - already exists") + debug_print(f"Skipping migration of plugin '{plugin.get('name')}' - already exists") continue - # Ensure plugin has an ID (generate GUID if missing) if 'id' not in plugin or not plugin['id']: plugin['id'] = str(uuid.uuid4()) - + # Store secrets in Key Vault before migration + plugin = keyvault_plugin_save_helper(plugin, scope_value=user_id, scope="user") save_personal_action(user_id, plugin) migrated_count += 1 - except Exception as e: - current_app.logger.error(f"Error migrating plugin {plugin.get('name', 'unknown')} for user {user_id}: {e}") + debug_print(f"Error migrating plugin {plugin.get('name', 'unknown')} for user {user_id}: {e}") # Always remove plugins from user settings after processing (even if no new ones migrated) settings_to_update = user_settings.get('settings', {}) settings_to_update['plugins'] = [] # Set to empty array instead of removing update_user_settings(user_id, settings_to_update) - current_app.logger.info(f"Migrated {migrated_count} new actions for user {user_id}, cleaned up legacy data") + debug_print(f"Migrated {migrated_count} new actions for user {user_id}, cleaned up legacy data") return migrated_count except Exception as e: - current_app.logger.error(f"Error during action migration for user {user_id}: {e}") + debug_print(f"Error during action migration for user {user_id}: {e}") return 0 -def get_actions_by_names(user_id, action_names): +def get_actions_by_names(user_id, action_names, return_type=SecretReturnType.TRIGGER): """ Get multiple actions by their names. @@ -288,8 +282,6 @@ def get_actions_by_names(user_id, action_names): list: List of action dictionaries """ try: - from config import cosmos_personal_actions_container - if not action_names: return [] @@ -311,15 +303,16 @@ def get_actions_by_names(user_id, action_names): cleaned_actions = [] for action in actions: cleaned_action = {k: v for k, v in action.items() if not k.startswith('_')} + cleaned_action = keyvault_plugin_get_helper(cleaned_action, scope_value=user_id, scope="user", return_type=return_type) cleaned_actions.append(cleaned_action) return cleaned_actions except Exception as e: - current_app.logger.error(f"Error fetching actions by names for user {user_id}: {e}") + debug_print(f"Error fetching actions by names for user {user_id}: {e}") return [] -def get_actions_by_type(user_id, action_type): +def get_actions_by_type(user_id, action_type, return_type=SecretReturnType.TRIGGER): """ Get all actions of a specific type for a user. @@ -331,8 +324,6 @@ def get_actions_by_type(user_id, action_type): list: List of action dictionaries """ try: - from config import cosmos_personal_actions_container - query = "SELECT * FROM c WHERE c.user_id = @user_id AND c.type = @type" parameters = [ {"name": "@user_id", "value": user_id}, @@ -349,10 +340,11 @@ def get_actions_by_type(user_id, action_type): cleaned_actions = [] for action in actions: cleaned_action = {k: v for k, v in action.items() if not k.startswith('_')} + cleaned_action = keyvault_plugin_get_helper(cleaned_action, scope_value=user_id, scope="user", return_type=return_type) cleaned_actions.append(cleaned_action) return cleaned_actions except Exception as e: - current_app.logger.error(f"Error fetching actions by type {action_type} for user {user_id}: {e}") + debug_print(f"Error fetching actions by type {action_type} for user {user_id}: {e}") return [] diff --git a/application/single_app/functions_personal_agents.py b/application/single_app/functions_personal_agents.py index 6cca4aa1..bf721842 100644 --- a/application/single_app/functions_personal_agents.py +++ b/application/single_app/functions_personal_agents.py @@ -1,17 +1,25 @@ + # functions_personal_agents.py """ Personal Agents Management -This module handles all operations related to personal agents stored in the +This module handles all operations related to personal agents stored in the personal_agents container with user_id partitioning. """ + +# Imports (grouped after docstring) import uuid from datetime import datetime from azure.cosmos import exceptions from flask import current_app import logging +from config import cosmos_personal_agents_container +from functions_settings import get_settings, get_user_settings, update_user_settings +from functions_keyvault import keyvault_agent_save_helper, keyvault_agent_get_helper, keyvault_agent_delete_helper +from functions_agent_payload import sanitize_agent_payload +from functions_debug import debug_print def get_personal_agents(user_id): """ @@ -24,8 +32,6 @@ def get_personal_agents(user_id): list: List of agent dictionaries """ try: - from config import cosmos_personal_agents_container - query = "SELECT * FROM c WHERE c.user_id = @user_id" parameters = [{"name": "@user_id", "value": user_id}] @@ -35,18 +41,26 @@ def get_personal_agents(user_id): partition_key=user_id )) - # Remove Cosmos metadata for cleaner response + # Remove Cosmos metadata for cleaner response and retrieve secrets from Key Vault cleaned_agents = [] for agent in agents: cleaned_agent = {k: v for k, v in agent.items() if not k.startswith('_')} + cleaned_agent = keyvault_agent_get_helper(cleaned_agent, cleaned_agent.get('id', ''), scope="user") + if cleaned_agent.get('max_completion_tokens') is None: + cleaned_agent['max_completion_tokens'] = -1 + cleaned_agent.setdefault('is_global', False) + cleaned_agent.setdefault('is_group', False) + cleaned_agent.setdefault('agent_type', 'local') + # Remove empty reasoning_effort to prevent validation errors + if cleaned_agent.get('reasoning_effort') == '': + cleaned_agent.pop('reasoning_effort', None) cleaned_agents.append(cleaned_agent) - return cleaned_agents except exceptions.CosmosResourceNotFoundError: return [] except Exception as e: - current_app.logger.error(f"Error fetching personal agents for user {user_id}: {e}") + debug_print(f"Error fetching personal agents for user {user_id}: {e}") return [] def get_personal_agent(user_id, agent_id): @@ -61,21 +75,29 @@ def get_personal_agent(user_id, agent_id): dict: Agent dictionary or None if not found """ try: - from config import cosmos_personal_agents_container - agent = cosmos_personal_agents_container.read_item( item=agent_id, partition_key=user_id ) - # Remove Cosmos metadata + # Remove Cosmos metadata and retrieve secrets from Key Vault cleaned_agent = {k: v for k, v in agent.items() if not k.startswith('_')} + cleaned_agent = keyvault_agent_get_helper(cleaned_agent, cleaned_agent.get('id', agent_id), scope="user") + # Ensure max_completion_tokens field exists + if cleaned_agent.get('max_completion_tokens') is None: + cleaned_agent['max_completion_tokens'] = -1 + cleaned_agent.setdefault('is_global', False) + cleaned_agent.setdefault('is_group', False) + cleaned_agent.setdefault('agent_type', 'local') + # Remove empty reasoning_effort to prevent validation errors + if cleaned_agent.get('reasoning_effort') == '': + cleaned_agent.pop('reasoning_effort', None) return cleaned_agent - except exceptions.CosmosResourceNotFoundError: + debug_print(f"Agent {agent_id} not found for user {user_id}") return None except Exception as e: - current_app.logger.error(f"Error fetching agent {agent_id} for user {user_id}: {e}") + debug_print(f"Error fetching agent {agent_id} for user {user_id}: {e}") return None def save_personal_agent(user_id, agent_data): @@ -90,14 +112,27 @@ def save_personal_agent(user_id, agent_data): dict: Saved agent data with ID """ try: - from config import cosmos_personal_agents_container - - # Ensure required fields - if 'id' not in agent_data: - agent_data['id'] = str(f"{user_id}_{agent_data.get('name', 'default')}") + cleaned_agent = sanitize_agent_payload(agent_data) + for field in ['name', 'display_name', 'description', 'instructions']: + cleaned_agent.setdefault(field, '') + for field in [ + 'azure_openai_gpt_endpoint', + 'azure_openai_gpt_key', + 'azure_openai_gpt_deployment', + 'azure_openai_gpt_api_version', + 'azure_agent_apim_gpt_endpoint', + 'azure_agent_apim_gpt_subscription_key', + 'azure_agent_apim_gpt_deployment', + 'azure_agent_apim_gpt_api_version' + ]: + cleaned_agent.setdefault(field, '') + if 'id' not in cleaned_agent: + cleaned_agent['id'] = str(f"{user_id}_{cleaned_agent.get('name', 'default')}") - agent_data['user_id'] = user_id - agent_data['last_updated'] = datetime.utcnow().isoformat() + cleaned_agent['user_id'] = user_id + cleaned_agent['last_updated'] = datetime.utcnow().isoformat() + cleaned_agent['is_global'] = False + cleaned_agent['is_group'] = False # Validate required fields required_fields = ['name', 'display_name', 'description', 'instructions'] @@ -111,18 +146,31 @@ def save_personal_agent(user_id, agent_data): agent_data.setdefault('azure_agent_apim_gpt_deployment', '') agent_data.setdefault('azure_agent_apim_gpt_api_version', '') agent_data.setdefault('enable_agent_gpt_apim', False) + agent_data.setdefault('reasoning_effort', '') agent_data.setdefault('actions_to_load', []) agent_data.setdefault('other_settings', {}) - agent_data.setdefault('is_global', False) + # Remove empty reasoning_effort to avoid schema validation errors + if agent_data.get('reasoning_effort') == '': + agent_data.pop('reasoning_effort', None) + agent_data['is_global'] = False + agent_data['is_group'] = False + agent_data.setdefault('agent_type', 'local') + + # Store sensitive keys in Key Vault if enabled + agent_data = keyvault_agent_save_helper(agent_data, agent_data.get('id', ''), scope="user") + if agent_data.get('max_completion_tokens') is None: + agent_data['max_completion_tokens'] = -1 result = cosmos_personal_agents_container.upsert_item(body=agent_data) - # Remove Cosmos metadata from response cleaned_result = {k: v for k, v in result.items() if not k.startswith('_')} + cleaned_result.setdefault('is_global', False) + cleaned_result.setdefault('is_group', False) + cleaned_result.setdefault('agent_type', 'local') return cleaned_result except Exception as e: - current_app.logger.error(f"Error saving agent for user {user_id}: {e}") + debug_print(f"Error saving agent for user {user_id}: {e}") raise def delete_personal_agent(user_id, agent_id): @@ -137,8 +185,6 @@ def delete_personal_agent(user_id, agent_id): bool: True if deleted, False if not found """ try: - from config import cosmos_personal_agents_container - # Try to find the agent first to get the correct ID # Check if agent_id is actually a name and we need to find the real ID agent = get_personal_agent(user_id, agent_id) @@ -146,20 +192,20 @@ def delete_personal_agent(user_id, agent_id): # Try to find by name if direct ID lookup failed agents = get_personal_agents(user_id) agent = next((a for a in agents if a['name'] == agent_id), None) - if not agent: return False - + # Delete secrets from Key Vault if present + keyvault_agent_delete_helper(agent, agent.get('id', agent_id), scope="user") cosmos_personal_agents_container.delete_item( item=agent['id'], partition_key=user_id ) return True - except exceptions.CosmosResourceNotFoundError: + debug_print(f"Agent {agent_id} not found for user {user_id}") return False except Exception as e: - current_app.logger.error(f"Error deleting agent {agent_id} for user {user_id}: {e}") + debug_print(f"Error deleting agent {agent_id} for user {user_id}: {e}") raise def ensure_migration_complete(user_id): @@ -174,8 +220,6 @@ def ensure_migration_complete(user_id): int: Number of agents migrated (0 if already migrated) """ try: - from functions_settings import get_user_settings, update_user_settings - user_settings = get_user_settings(user_id) agents = user_settings.get('settings', {}).get('agents', []) @@ -192,13 +236,13 @@ def ensure_migration_complete(user_id): settings_to_update = user_settings.get('settings', {}) settings_to_update['agents'] = [] # Set to empty array instead of removing update_user_settings(user_id, settings_to_update) - current_app.logger.info(f"Cleaned up legacy agent data for user {user_id} (already migrated)") + debug_print(f"Cleaned up legacy agent data for user {user_id} (already migrated)") return 0 return 0 except Exception as e: - current_app.logger.error(f"Error ensuring agent migration complete for user {user_id}: {e}") + debug_print(f"Error ensuring agent migration complete for user {user_id}: {e}") return 0 def migrate_agents_from_user_settings(user_id): @@ -212,93 +256,32 @@ def migrate_agents_from_user_settings(user_id): int: Number of agents migrated """ try: - from functions_settings import get_user_settings, update_user_settings - user_settings = get_user_settings(user_id) agents = user_settings.get('settings', {}).get('agents', []) - # Get existing personal agents to avoid duplicates existing_personal_agents = get_personal_agents(user_id) existing_agent_names = {agent['name'] for agent in existing_personal_agents} - migrated_count = 0 for agent in agents: try: # Skip if agent already exists in personal container if agent.get('name') in existing_agent_names: - current_app.logger.info(f"Skipping migration of agent '{agent.get('name')}' - already exists") + debug_print(f"Skipping migration of agent '{agent.get('name')}' - already exists") continue - # Ensure agent has an ID if 'id' not in agent: agent['id'] = str(uuid.uuid4()) - save_personal_agent(user_id, agent) migrated_count += 1 - except Exception as e: - current_app.logger.error(f"Error migrating agent {agent.get('name', 'unknown')} for user {user_id}: {e}") - + debug_print(f"Error migrating agent {agent.get('name', 'unknown')} for user {user_id}: {e}") # Always remove agents from user settings after processing (even if no new ones migrated) settings_to_update = user_settings.get('settings', {}) settings_to_update['agents'] = [] # Set to empty array instead of removing update_user_settings(user_id, settings_to_update) - - current_app.logger.info(f"Migrated {migrated_count} new agents for user {user_id}, cleaned up legacy data") + debug_print(f"Migrated {migrated_count} new agents for user {user_id}, cleaned up legacy data") return migrated_count - except Exception as e: - current_app.logger.error(f"Error during agent migration for user {user_id}: {e}") + debug_print(f"Error during agent migration for user {user_id}: {e}") return 0 -def get_selected_agent(user_id): - """ - Get the user's selected agent preference. - - Args: - user_id (str): The user's unique identifier - - Returns: - dict: Selected agent info or None - """ - try: - from functions_settings import get_user_settings - - user_settings = get_user_settings(user_id) - selected_agent = user_settings.get('settings', {}).get('selected_agent') - - return selected_agent - - except Exception as e: - current_app.logger.error(f"Error getting selected agent for user {user_id}: {e}") - return None - -def set_selected_agent(user_id, agent_name, is_global=False): - """ - Set the user's selected agent preference. - - Args: - user_id (str): The user's unique identifier - agent_name (str): Name of the selected agent - is_global (bool): Whether the agent is global or personal - - Returns: - bool: True if successful - """ - try: - from functions_settings import get_user_settings, update_user_settings - - user_settings = get_user_settings(user_id) - settings_to_update = user_settings.get('settings', {}) - - settings_to_update['selected_agent'] = { - 'name': agent_name, - 'is_global': is_global - } - - update_user_settings(user_id, settings_to_update) - return True - - except Exception as e: - current_app.logger.error(f"Error setting selected agent for user {user_id}: {e}") - return False diff --git a/application/single_app/functions_plugins.py b/application/single_app/functions_plugins.py index d540352c..0bdbedad 100644 --- a/application/single_app/functions_plugins.py +++ b/application/single_app/functions_plugins.py @@ -3,6 +3,7 @@ import os import json import jsonschema +from functions_security import is_safe_slug def load_plugin_schema(plugin_type, schema_dir): """ @@ -92,6 +93,9 @@ def get_merged_plugin_settings(plugin_type, current_settings, schema_dir): """ Loads the schema for the plugin_type, merges with current_settings, and returns the merged dict. """ + if not is_safe_slug(plugin_type): + # Reject unsafe plugin types to avoid path traversal or unexpected filenames + return {} result = {} # Use plugin_type as base for schema loading (matches actual schema filenames) for nested_key, schema_filename in [ diff --git a/application/single_app/functions_public_workspaces.py b/application/single_app/functions_public_workspaces.py index 53abf484..45e5f80e 100644 --- a/application/single_app/functions_public_workspaces.py +++ b/application/single_app/functions_public_workspaces.py @@ -333,4 +333,85 @@ def get_user_visible_public_workspace_docs(user_id: str) -> list: if ws["id"] in visible_workspace_ids ] - return visible_workspaces \ No newline at end of file + return visible_workspaces + + +def check_public_workspace_status_allows_operation(workspace_doc, operation_type): + """ + Check if the public workspace's status allows the specified operation. + + Args: + workspace_doc: The public workspace document from Cosmos DB + operation_type: One of 'upload', 'delete', 'chat', 'view' + + Returns: + tuple: (allowed: bool, reason: str) + + Status definitions: + - active: All operations allowed + - locked: Read-only mode (view and chat only, no modifications) + - upload_disabled: No new uploads, but deletions and chat allowed + - inactive: No operations allowed except admin viewing + """ + if not workspace_doc: + return False, "Public workspace not found" + + status = workspace_doc.get('status', 'active') # Default to 'active' if not set + + # Define what each status allows + status_permissions = { + 'active': { + 'upload': True, + 'delete': True, + 'chat': True, + 'view': True + }, + 'locked': { + 'upload': False, + 'delete': False, + 'chat': True, + 'view': True + }, + 'upload_disabled': { + 'upload': False, + 'delete': True, + 'chat': True, + 'view': True + }, + 'inactive': { + 'upload': False, + 'delete': False, + 'chat': False, + 'view': False + } + } + + # Get permissions for current status + permissions = status_permissions.get(status, status_permissions['active']) + + # Check if operation is allowed + allowed = permissions.get(operation_type, False) + + # Generate helpful reason message if not allowed + if not allowed: + reasons = { + 'locked': { + 'upload': 'This public workspace is locked (read-only mode). Document uploads are disabled.', + 'delete': 'This public workspace is locked (read-only mode). Document deletions are disabled.' + }, + 'upload_disabled': { + 'upload': 'Document uploads are disabled for this public workspace.' + }, + 'inactive': { + 'upload': 'This public workspace is inactive. All operations are disabled.', + 'delete': 'This public workspace is inactive. All operations are disabled.', + 'chat': 'This public workspace is inactive. All operations are disabled.', + 'view': 'This public workspace is inactive. Access is restricted to administrators.' + } + } + + reason = reasons.get(status, {}).get(operation_type, + f'This operation is not allowed when public workspace status is "{status}".') + return False, reason + + return True, "" \ No newline at end of file diff --git a/application/single_app/functions_retention_policy.py b/application/single_app/functions_retention_policy.py new file mode 100644 index 00000000..690e39c9 --- /dev/null +++ b/application/single_app/functions_retention_policy.py @@ -0,0 +1,882 @@ +# functions_retention_policy.py + +""" +Retention Policy Management + +This module handles automated deletion of aged conversations and documents +based on configurable retention policies for personal, group, and public workspaces. + +Version: 0.237.005 +Implemented in: 0.234.067 +Updated in: 0.236.012 - Fixed race condition handling for NotFound errors during deletion +Updated in: 0.237.004 - Fixed critical bug where conversations with null/undefined last_activity_at were deleted regardless of age +Updated in: 0.237.005 - Fixed field name: use last_updated (actual field) instead of last_activity_at (non-existent) +""" + +from config import * +from functions_settings import get_settings, update_settings, cosmos_user_settings_container +from functions_group import get_user_groups, cosmos_groups_container +from functions_public_workspaces import get_user_public_workspaces, cosmos_public_workspaces_container +from functions_documents import delete_document, delete_document_chunks +from functions_activity_logging import log_conversation_deletion, log_conversation_archival +from functions_notifications import create_notification, create_group_notification, create_public_workspace_notification +from functions_debug import debug_print +from functions_appinsights import log_event +from datetime import datetime, timezone, timedelta + + +def get_all_user_settings(): + """ + Get all user settings from Cosmos DB. + + Returns: + list: List of all user setting documents + """ + try: + query = "SELECT * FROM c" + users = list(cosmos_user_settings_container.query_items( + query=query, + enable_cross_partition_query=True + )) + return users + except Exception as e: + log_event("get_all_user_settings_error", {"error": str(e)}) + debug_print(f"Error fetching all user settings: {e}") + return [] + + +def get_all_groups(): + """ + Get all groups from Cosmos DB. + + Returns: + list: List of all group documents + """ + try: + query = "SELECT * FROM c" + groups = list(cosmos_groups_container.query_items( + query=query, + enable_cross_partition_query=True + )) + return groups + except Exception as e: + log_event("get_all_groups_error", {"error": str(e)}) + debug_print(f"Error fetching all groups: {e}") + return [] + + +def get_all_public_workspaces(): + """ + Get all public workspaces from Cosmos DB. + + Returns: + list: List of all public workspace documents + """ + try: + query = "SELECT * FROM c" + workspaces = list(cosmos_public_workspaces_container.query_items( + query=query, + enable_cross_partition_query=True + )) + return workspaces + except Exception as e: + log_event("get_all_public_workspaces_error", {"error": str(e)}) + debug_print(f"Error fetching all public workspaces: {e}") + return [] + + +def resolve_retention_value(value, workspace_type, retention_type, settings=None): + """ + Resolve a retention value, handling 'default' by looking up organization defaults. + + Args: + value: The retention value ('none', 'default', or a number/string of days) + workspace_type: 'personal', 'group', or 'public' + retention_type: 'conversation' or 'document' + settings: Optional pre-loaded settings dict (to avoid repeated lookups) + + Returns: + str or int: 'none' if no deletion, or the number of days as int + """ + if value is None or value == 'default' or value == '': + # Look up the organization default + if settings is None: + settings = get_settings() + + setting_key = f'default_retention_{retention_type}_{workspace_type}' + default_value = settings.get(setting_key, 'none') + + # If the org default is also 'none', return 'none' + if default_value == 'none' or default_value is None: + return 'none' + + # Return the org default as the effective value + try: + return int(default_value) + except (ValueError, TypeError): + return 'none' + + # User/workspace has their own explicit value + if value == 'none': + return 'none' + + try: + return int(value) + except (ValueError, TypeError): + return 'none' + + +def execute_retention_policy(workspace_scopes=None, manual_execution=False): + """ + Execute retention policy for specified workspace scopes. + + Args: + workspace_scopes (list, optional): List of workspace types to process. + Can include 'personal', 'group', 'public'. If None, processes all enabled scopes. + manual_execution (bool): Whether this is a manual execution (bypasses schedule check) + + Returns: + dict: Summary of deletion results + """ + settings = get_settings() + + # Determine which scopes to process + if workspace_scopes is None: + workspace_scopes = [] + if settings.get('enable_retention_policy_personal', False): + workspace_scopes.append('personal') + if settings.get('enable_retention_policy_group', False): + workspace_scopes.append('group') + if settings.get('enable_retention_policy_public', False): + workspace_scopes.append('public') + + if not workspace_scopes: + debug_print("No retention policy scopes enabled") + return { + 'success': False, + 'message': 'No retention policy scopes enabled', + 'scopes_processed': [] + } + + results = { + 'success': True, + 'execution_time': datetime.now(timezone.utc).isoformat(), + 'manual_execution': manual_execution, + 'scopes_processed': workspace_scopes, + 'personal': {'conversations': 0, 'documents': 0, 'users_affected': 0}, + 'group': {'conversations': 0, 'documents': 0, 'workspaces_affected': 0}, + 'public': {'conversations': 0, 'documents': 0, 'workspaces_affected': 0}, + 'errors': [] + } + + try: + # Process personal workspaces + if 'personal' in workspace_scopes: + debug_print("Processing personal workspace retention policies...") + personal_results = process_personal_retention() + results['personal'] = personal_results + + # Process group workspaces + if 'group' in workspace_scopes: + debug_print("Processing group workspace retention policies...") + group_results = process_group_retention() + results['group'] = group_results + + # Process public workspaces + if 'public' in workspace_scopes: + debug_print("Processing public workspace retention policies...") + public_results = process_public_retention() + results['public'] = public_results + + # Update last run time in settings + settings['retention_policy_last_run'] = datetime.now(timezone.utc).isoformat() + + # Calculate next run time (scheduled for configured hour next day) + execution_hour = settings.get('retention_policy_execution_hour', 2) + next_run = datetime.now(timezone.utc).replace(hour=execution_hour, minute=0, second=0, microsecond=0) + if next_run <= datetime.now(timezone.utc): + next_run += timedelta(days=1) + settings['retention_policy_next_run'] = next_run.isoformat() + + update_settings(settings) + + debug_print(f"Retention policy execution completed: {results}") + return results + + except Exception as e: + log_event("execute_retention_policy_error", {"error": str(e), "workspace_scopes": workspace_scopes, "manual_execution": manual_execution}) + debug_print(f"Error executing retention policy: {e}") + results['success'] = False + results['errors'].append(str(e)) + return results + + +def process_personal_retention(): + """ + Process retention policies for all personal workspaces. + + Returns: + dict: Deletion statistics + """ + results = { + 'conversations': 0, + 'documents': 0, + 'users_affected': 0, + 'details': [] + } + + try: + # Get all user settings + all_users = get_all_user_settings() + + # Pre-load settings once for efficiency + settings = get_settings() + + for user in all_users: + user_id = user.get('id') + if not user_id: + continue + + # Get user's retention settings + user_settings = user.get('settings', {}) + retention_settings = user_settings.get('retention_policy', {}) + + # Get raw values (may be 'default', 'none', or a number) + raw_conversation_days = retention_settings.get('conversation_retention_days') + raw_document_days = retention_settings.get('document_retention_days') + + # Resolve to effective values (handles 'default' -> org default lookup) + conversation_retention_days = resolve_retention_value(raw_conversation_days, 'personal', 'conversation', settings) + document_retention_days = resolve_retention_value(raw_document_days, 'personal', 'document', settings) + + # Skip if both resolve to "none" + if conversation_retention_days == 'none' and document_retention_days == 'none': + continue + + debug_print(f"Processing retention for user {user_id}: conversations={conversation_retention_days} days, documents={document_retention_days} days") + + user_deletion_summary = { + 'user_id': user_id, + 'conversations_deleted': 0, + 'documents_deleted': 0, + 'conversation_details': [], + 'document_details': [] + } + + # Process conversations + if conversation_retention_days != 'none': + try: + conv_results = delete_aged_conversations( + user_id=user_id, + retention_days=int(conversation_retention_days), + workspace_type='personal' + ) + user_deletion_summary['conversations_deleted'] = conv_results['count'] + user_deletion_summary['conversation_details'] = conv_results['details'] + results['conversations'] += conv_results['count'] + except Exception as e: + log_event("process_personal_retention_conversations_error", {"error": str(e), "user_id": user_id}) + debug_print(f"Error processing conversations for user {user_id}: {e}") + + # Process documents + if document_retention_days != 'none': + try: + doc_results = delete_aged_documents( + user_id=user_id, + retention_days=int(document_retention_days), + workspace_type='personal' + ) + user_deletion_summary['documents_deleted'] = doc_results['count'] + user_deletion_summary['document_details'] = doc_results['details'] + results['documents'] += doc_results['count'] + except Exception as e: + log_event("process_personal_retention_documents_error", {"error": str(e), "user_id": user_id}) + debug_print(f"Error processing documents for user {user_id}: {e}") + + # Send notification if anything was deleted + if user_deletion_summary['conversations_deleted'] > 0 or user_deletion_summary['documents_deleted'] > 0: + send_retention_notification(user_id, user_deletion_summary, 'personal') + results['users_affected'] += 1 + results['details'].append(user_deletion_summary) + + return results + + except Exception as e: + log_event("process_personal_retention_error", {"error": str(e)}) + debug_print(f"Error in process_personal_retention: {e}") + return results + + +def process_group_retention(): + """ + Process retention policies for all group workspaces. + + Returns: + dict: Deletion statistics + """ + results = { + 'conversations': 0, + 'documents': 0, + 'workspaces_affected': 0, + 'details': [] + } + + try: + # Get all groups + all_groups = get_all_groups() + + # Pre-load settings once for efficiency + settings = get_settings() + + for group in all_groups: + group_id = group.get('id') + if not group_id: + continue + + # Get group's retention settings + retention_settings = group.get('retention_policy', {}) + + # Get raw values (may be 'default', 'none', or a number) + raw_conversation_days = retention_settings.get('conversation_retention_days') + raw_document_days = retention_settings.get('document_retention_days') + + # Resolve to effective values (handles 'default' -> org default lookup) + conversation_retention_days = resolve_retention_value(raw_conversation_days, 'group', 'conversation', settings) + document_retention_days = resolve_retention_value(raw_document_days, 'group', 'document', settings) + + # Skip if both resolve to "none" + if conversation_retention_days == 'none' and document_retention_days == 'none': + continue + + group_deletion_summary = { + 'group_id': group_id, + 'group_name': group.get('name', 'Unnamed Group'), + 'conversations_deleted': 0, + 'documents_deleted': 0, + 'conversation_details': [], + 'document_details': [] + } + + # Process conversations + if conversation_retention_days != 'none': + try: + conv_results = delete_aged_conversations( + group_id=group_id, + retention_days=int(conversation_retention_days), + workspace_type='group' + ) + group_deletion_summary['conversations_deleted'] = conv_results['count'] + group_deletion_summary['conversation_details'] = conv_results['details'] + results['conversations'] += conv_results['count'] + except Exception as e: + log_event("process_group_retention_conversations_error", {"error": str(e), "group_id": group_id}) + debug_print(f"Error processing conversations for group {group_id}: {e}") + + # Process documents + if document_retention_days != 'none': + try: + doc_results = delete_aged_documents( + group_id=group_id, + retention_days=int(document_retention_days), + workspace_type='group' + ) + group_deletion_summary['documents_deleted'] = doc_results['count'] + group_deletion_summary['document_details'] = doc_results['details'] + results['documents'] += doc_results['count'] + except Exception as e: + log_event("process_group_retention_documents_error", {"error": str(e), "group_id": group_id}) + debug_print(f"Error processing documents for group {group_id}: {e}") + + # Send notification if anything was deleted + if group_deletion_summary['conversations_deleted'] > 0 or group_deletion_summary['documents_deleted'] > 0: + send_retention_notification(group_id, group_deletion_summary, 'group') + results['workspaces_affected'] += 1 + results['details'].append(group_deletion_summary) + + return results + + except Exception as e: + log_event("process_group_retention_error", {"error": str(e)}) + debug_print(f"Error in process_group_retention: {e}") + return results + + +def process_public_retention(): + """ + Process retention policies for all public workspaces. + + Returns: + dict: Deletion statistics + """ + results = { + 'conversations': 0, + 'documents': 0, + 'workspaces_affected': 0, + 'details': [] + } + + try: + # Get all public workspaces + all_workspaces = get_all_public_workspaces() + + # Pre-load settings once for efficiency + settings = get_settings() + + for workspace in all_workspaces: + workspace_id = workspace.get('id') + if not workspace_id: + continue + + # Get workspace's retention settings + retention_settings = workspace.get('retention_policy', {}) + + # Get raw values (may be 'default', 'none', or a number) + raw_conversation_days = retention_settings.get('conversation_retention_days') + raw_document_days = retention_settings.get('document_retention_days') + + # Resolve to effective values (handles 'default' -> org default lookup) + conversation_retention_days = resolve_retention_value(raw_conversation_days, 'public', 'conversation', settings) + document_retention_days = resolve_retention_value(raw_document_days, 'public', 'document', settings) + + # Skip if both resolve to "none" + if conversation_retention_days == 'none' and document_retention_days == 'none': + continue + + workspace_deletion_summary = { + 'public_workspace_id': workspace_id, + 'workspace_name': workspace.get('name', 'Unnamed Workspace'), + 'conversations_deleted': 0, + 'documents_deleted': 0, + 'conversation_details': [], + 'document_details': [] + } + + # Note: Public workspaces do not have a separate conversations container. + # Conversations are only stored in personal (cosmos_conversations_container) or + # group (cosmos_group_conversations_container) workspaces. + # Therefore, we skip conversation processing for public workspaces. + # Only documents are processed for public workspace retention. + + # Process documents + if document_retention_days != 'none': + try: + doc_results = delete_aged_documents( + public_workspace_id=workspace_id, + retention_days=int(document_retention_days), + workspace_type='public' + ) + workspace_deletion_summary['documents_deleted'] = doc_results['count'] + workspace_deletion_summary['document_details'] = doc_results['details'] + results['documents'] += doc_results['count'] + except Exception as e: + log_event("process_public_retention_documents_error", {"error": str(e), "public_workspace_id": workspace_id}) + debug_print(f"Error processing documents for public workspace {workspace_id}: {e}") + + # Send notification if anything was deleted + if workspace_deletion_summary['conversations_deleted'] > 0 or workspace_deletion_summary['documents_deleted'] > 0: + send_retention_notification(workspace_id, workspace_deletion_summary, 'public') + results['workspaces_affected'] += 1 + results['details'].append(workspace_deletion_summary) + + return results + + except Exception as e: + log_event("process_public_retention_error", {"error": str(e)}) + debug_print(f"Error in process_public_retention: {e}") + return results + + +def delete_aged_conversations(retention_days, workspace_type='personal', user_id=None, group_id=None, public_workspace_id=None): + """ + Delete conversations that exceed the retention period based on last_updated. + + Args: + retention_days (int): Number of days to retain conversations + workspace_type (str): 'personal', 'group', or 'public' + user_id (str, optional): User ID for personal workspaces + group_id (str, optional): Group ID for group workspaces + public_workspace_id (str, optional): Public workspace ID for public workspaces + + Returns: + dict: {'count': int, 'details': list} + """ + settings = get_settings() + archiving_enabled = settings.get('enable_conversation_archiving', False) + + # Determine which container to use + if workspace_type == 'group': + container = cosmos_group_conversations_container + partition_field = 'group_id' + partition_value = group_id + elif workspace_type == 'public': + container = cosmos_public_conversations_container + partition_field = 'public_workspace_id' + partition_value = public_workspace_id + else: + container = cosmos_conversations_container + partition_field = 'user_id' + partition_value = user_id + + # Calculate cutoff date + cutoff_date = datetime.now(timezone.utc) - timedelta(days=retention_days) + cutoff_iso = cutoff_date.isoformat() + + # Query for aged conversations + # ONLY delete conversations that have a valid last_updated that is older than the cutoff + # Conversations with null/undefined last_updated should be SKIPPED (not deleted) + # This prevents accidentally deleting new conversations that haven't had their timestamp set + query = f""" + SELECT c.id, c.title, c.last_updated, c.{partition_field} + FROM c + WHERE c.{partition_field} = @partition_value + AND IS_DEFINED(c.last_updated) + AND NOT IS_NULL(c.last_updated) + AND c.last_updated < @cutoff_date + """ + + parameters = [ + {"name": "@partition_value", "value": partition_value}, + {"name": "@cutoff_date", "value": cutoff_iso} + ] + + debug_print(f"Querying aged conversations: workspace_type={workspace_type}, partition_field={partition_field}, partition_value={partition_value}, cutoff_date={cutoff_iso}, retention_days={retention_days}") + + try: + aged_conversations = list(container.query_items( + query=query, + parameters=parameters, + enable_cross_partition_query=True + )) + debug_print(f"Found {len(aged_conversations)} aged conversations for {workspace_type} workspace") + except Exception as query_error: + log_event("delete_aged_conversations_query_error", {"error": str(query_error), "workspace_type": workspace_type, "partition_value": partition_value}) + debug_print(f"Error querying aged conversations for {workspace_type} (partition_value={partition_value}): {query_error}") + return {'count': 0, 'details': []} + + deleted_details = [] + + for conv in aged_conversations: + try: + conversation_id = conv.get('id') + conversation_title = conv.get('title', 'Untitled') + + # Read full conversation for archiving/logging + try: + conversation_item = container.read_item( + item=conversation_id, + partition_key=conversation_id + ) + except CosmosResourceNotFoundError: + # Conversation was already deleted (race condition) - this is fine, skip to next + debug_print(f"Conversation {conversation_id} already deleted (not found during read), skipping") + deleted_details.append({ + 'id': conversation_id, + 'title': conversation_title, + 'last_updated': conv.get('last_updated'), + 'already_deleted': True + }) + continue + + # Archive if enabled + if archiving_enabled: + archived_item = dict(conversation_item) + archived_item["archived_at"] = datetime.now(timezone.utc).isoformat() + archived_item["archived_by_retention_policy"] = True + cosmos_archived_conversations_container.upsert_item(archived_item) + + log_conversation_archival( + user_id=conversation_item.get('user_id'), + conversation_id=conversation_id, + title=conversation_title, + workspace_type=workspace_type, + context=conversation_item.get('context', []), + tags=conversation_item.get('tags', []), + group_id=conversation_item.get('group_id'), + public_workspace_id=conversation_item.get('public_workspace_id') + ) + + # Delete messages + + if workspace_type == 'group': + messages_container = cosmos_group_messages_container + elif workspace_type == 'public': + messages_container = cosmos_public_messages_container + else: + messages_container = cosmos_messages_container + + message_query = f"SELECT * FROM c WHERE c.conversation_id = @conversation_id" + message_params = [{"name": "@conversation_id", "value": conversation_id}] + + messages = list(messages_container.query_items( + query=message_query, + parameters=message_params, + partition_key=conversation_id + )) + + for msg in messages: + if archiving_enabled: + archived_msg = dict(msg) + archived_msg["archived_at"] = datetime.now(timezone.utc).isoformat() + archived_msg["archived_by_retention_policy"] = True + cosmos_archived_messages_container.upsert_item(archived_msg) + + try: + messages_container.delete_item(msg['id'], partition_key=conversation_id) + except CosmosResourceNotFoundError: + # Message was already deleted - this is fine, continue + debug_print(f"Message {msg['id']} already deleted (not found), skipping") + + # Log deletion + log_conversation_deletion( + user_id=conversation_item.get('user_id'), + conversation_id=conversation_id, + title=conversation_title, + workspace_type=workspace_type, + context=conversation_item.get('context', []), + tags=conversation_item.get('tags', []), + is_archived=archiving_enabled, + is_bulk_operation=True, + group_id=conversation_item.get('group_id'), + public_workspace_id=conversation_item.get('public_workspace_id'), + additional_context={'deletion_reason': 'retention_policy'} + ) + + # Delete conversation + try: + container.delete_item( + item=conversation_id, + partition_key=conversation_id + ) + except CosmosResourceNotFoundError: + # Conversation was already deleted after we read it (race condition) - this is fine + debug_print(f"Conversation {conversation_id} already deleted (not found during delete)") + + deleted_details.append({ + 'id': conversation_id, + 'title': conversation_title, + 'last_updated': conv.get('last_updated') + }) + + debug_print(f"Deleted conversation {conversation_id} ({conversation_title}) due to retention policy") + + except Exception as e: + conv_id = conv.get('id', 'unknown') if conv else 'unknown' + log_event("delete_aged_conversations_deletion_error", {"error": str(e), "conversation_id": conv_id, "workspace_type": workspace_type}) + debug_print(f"Error deleting conversation {conv_id}: {e}") + + return { + 'count': len(deleted_details), + 'details': deleted_details + } + + +def delete_aged_documents(retention_days, workspace_type='personal', user_id=None, group_id=None, public_workspace_id=None): + """ + Delete documents that exceed the retention period based on last_activity_at. + + Args: + retention_days (int): Number of days to retain documents + workspace_type (str): 'personal', 'group', or 'public' + user_id (str, optional): User ID for personal workspaces + group_id (str, optional): Group ID for group workspaces + public_workspace_id (str, optional): Public workspace ID for public workspaces + + Returns: + dict: {'count': int, 'details': list} + """ + # Determine which container to use + if workspace_type == 'group': + container = cosmos_group_documents_container + partition_field = 'group_id' + partition_value = group_id + deletion_user_id = None # Will be extracted from document + elif workspace_type == 'public': + container = cosmos_public_documents_container + partition_field = 'public_workspace_id' + partition_value = public_workspace_id + deletion_user_id = None # Will be extracted from document + else: + container = cosmos_user_documents_container + partition_field = 'user_id' + partition_value = user_id + deletion_user_id = user_id + + # Calculate cutoff date + # Documents use format like '2026-01-08T21:49:15Z' so we match that format + cutoff_date = datetime.now(timezone.utc) - timedelta(days=retention_days) + cutoff_iso = cutoff_date.strftime('%Y-%m-%dT%H:%M:%SZ') + + # Query for aged documents + # Documents use 'last_updated' field (not 'last_activity_at' like conversations) + # Use simple date comparison - documents always have last_updated field + query = f""" + SELECT c.id, c.file_name, c.title, c.last_updated, c.user_id + FROM c + WHERE c.{partition_field} = @partition_value + AND c.last_updated < @cutoff_date + """ + + parameters = [ + {"name": "@partition_value", "value": partition_value}, + {"name": "@cutoff_date", "value": cutoff_iso} + ] + + debug_print(f"Querying aged documents: workspace_type={workspace_type}, partition_field={partition_field}, partition_value={partition_value}, cutoff_date={cutoff_iso}, retention_days={retention_days}") + + try: + aged_documents = list(container.query_items( + query=query, + parameters=parameters, + enable_cross_partition_query=True + )) + debug_print(f"Found {len(aged_documents)} aged documents for {workspace_type} workspace") + except Exception as query_error: + log_event("delete_aged_documents_query_error", {"error": str(query_error), "workspace_type": workspace_type, "partition_value": partition_value}) + debug_print(f"Error querying aged documents for {workspace_type} (partition_value={partition_value}): {query_error}") + return {'count': 0, 'details': []} + + deleted_details = [] + + for doc in aged_documents: + try: + document_id = doc.get('id') + file_name = doc.get('file_name', 'Unknown') + title = doc.get('title', file_name) + doc_user_id = doc.get('user_id') or deletion_user_id + + # Delete document chunks from search index + try: + delete_document_chunks(document_id, group_id, public_workspace_id) + except CosmosResourceNotFoundError: + # Document chunks already deleted - this is fine + debug_print(f"Document chunks for {document_id} already deleted (not found)") + except Exception as chunk_error: + # Log chunk deletion errors but continue with document deletion + debug_print(f"Error deleting chunks for document {document_id}: {chunk_error}") + + # Delete document from Cosmos DB and blob storage + try: + delete_document(doc_user_id, document_id, group_id, public_workspace_id) + except CosmosResourceNotFoundError: + # Document was already deleted (race condition) - this is fine + debug_print(f"Document {document_id} already deleted (not found)") + + deleted_details.append({ + 'id': document_id, + 'file_name': file_name, + 'title': title, + 'last_updated': doc.get('last_updated') + }) + + debug_print(f"Deleted document {document_id} ({file_name}) due to retention policy") + + except CosmosResourceNotFoundError: + # Document was already deleted - count as success + doc_id = doc.get('id', 'unknown') if doc else 'unknown' + debug_print(f"Document {doc_id} already deleted (not found)") + deleted_details.append({ + 'id': doc_id, + 'file_name': doc.get('file_name', 'Unknown'), + 'title': doc.get('title', doc.get('file_name', 'Unknown')), + 'last_updated': doc.get('last_updated'), + 'already_deleted': True + }) + except Exception as e: + doc_id = doc.get('id', 'unknown') if doc else 'unknown' + log_event("delete_aged_documents_deletion_error", {"error": str(e), "document_id": doc_id, "workspace_type": workspace_type}) + debug_print(f"Error deleting document {doc_id}: {e}") + + return { + 'count': len(deleted_details), + 'details': deleted_details + } + + +def send_retention_notification(workspace_id, deletion_summary, workspace_type): + """ + Send notification about retention policy deletions. + + Args: + workspace_id (str): User ID, group ID, or public workspace ID + deletion_summary (dict): Summary of deletions + workspace_type (str): 'personal', 'group', or 'public' + """ + conversations_deleted = deletion_summary.get('conversations_deleted', 0) + documents_deleted = deletion_summary.get('documents_deleted', 0) + + # Build message + message_parts = [] + if conversations_deleted > 0: + message_parts.append(f"{conversations_deleted} conversation{'s' if conversations_deleted != 1 else ''}") + if documents_deleted > 0: + message_parts.append(f"{documents_deleted} document{'s' if documents_deleted != 1 else ''}") + + message = f"Retention policy automatically deleted {' and '.join(message_parts)}." + + # Build details list + details = [] + + if conversations_deleted > 0: + conv_details = deletion_summary.get('conversation_details', []) + if conv_details: + details.append("**Conversations:**") + for conv in conv_details[:10]: # Limit to first 10 + details.append(f"• {conv.get('title', 'Untitled')}") + if len(conv_details) > 10: + details.append(f"• ...and {len(conv_details) - 10} more") + + if documents_deleted > 0: + doc_details = deletion_summary.get('document_details', []) + if doc_details: + details.append("\n**Documents:**") + for doc in doc_details[:10]: # Limit to first 10 + details.append(f"• {doc.get('file_name', 'Unknown')}") + if len(doc_details) > 10: + details.append(f"• ...and {len(doc_details) - 10} more") + + full_message = message + if details: + full_message += "\n\n" + "\n".join(details) + + # Create notification based on workspace type + if workspace_type == 'group': + create_group_notification( + group_id=workspace_id, + notification_type='system_announcement', + title='Retention Policy Cleanup', + message=full_message, + link_url='/chats', + metadata={ + 'conversations_deleted': conversations_deleted, + 'documents_deleted': documents_deleted, + 'deletion_date': datetime.now(timezone.utc).isoformat() + } + ) + elif workspace_type == 'public': + create_public_workspace_notification( + public_workspace_id=workspace_id, + notification_type='system_announcement', + title='Retention Policy Cleanup', + message=full_message, + link_url='/chats', + metadata={ + 'conversations_deleted': conversations_deleted, + 'documents_deleted': documents_deleted, + 'deletion_date': datetime.now(timezone.utc).isoformat() + } + ) + else: # personal + create_notification( + user_id=workspace_id, + notification_type='system_announcement', + title='Retention Policy Cleanup', + message=full_message, + link_url='/chats', + metadata={ + 'conversations_deleted': conversations_deleted, + 'documents_deleted': documents_deleted, + 'deletion_date': datetime.now(timezone.utc).isoformat() + } + ) + + debug_print(f"Sent retention notification to {workspace_type} workspace {workspace_id}") diff --git a/application/single_app/functions_search.py b/application/single_app/functions_search.py index cbdff52c..561264e7 100644 --- a/application/single_app/functions_search.py +++ b/application/single_app/functions_search.py @@ -1,8 +1,77 @@ # functions_search.py +import logging +from typing import List, Dict, Any from config import * from functions_content import * from functions_public_workspaces import get_user_visible_public_workspace_docs, get_user_visible_public_workspace_ids_from_settings +from utils_cache import ( + generate_search_cache_key, + get_cached_search_results, + cache_search_results, + DEBUG_ENABLED +) +from functions_debug import * + +logger = logging.getLogger(__name__) + + +def normalize_scores(results: List[Dict[str, Any]], index_name: str = "unknown") -> List[Dict[str, Any]]: + """ + Normalize search scores to [0, 1] range using min-max normalization. + + This ensures scores from different indexes (user, group, public) are comparable + when merged together. Without normalization, scores from indexes with different + document counts or characteristics may not be directly comparable. + + Args: + results: List of search results with 'score' field + index_name: Name of the index for debug logging + + Returns: + Same results list with normalized scores (original score preserved) + """ + if not results or len(results) == 0: + debug_print(f"No results to normalize from {index_name}", "NORMALIZE") + return results + + scores = [r['score'] for r in results] + min_score = min(scores) + max_score = max(scores) + score_range = max_score - min_score if max_score > min_score else 1.0 + + debug_print( + f"Score distribution BEFORE normalization ({index_name})", + "NORMALIZE", + index=index_name, + count=len(results), + min=f"{min_score:.4f}", + max=f"{max_score:.4f}", + range=f"{score_range:.4f}" + ) + + # Apply min-max normalization + for r in results: + original_score = r['score'] + normalized_score = (original_score - min_score) / score_range if score_range > 0 else 0.5 + + # Store both scores for transparency + r['original_score'] = original_score + r['original_index'] = index_name + r['score'] = normalized_score + + # Log normalized distribution + normalized_scores = [r['score'] for r in results] + debug_print( + f"Score distribution AFTER normalization ({index_name})", + "NORMALIZE", + index=index_name, + count=len(results), + min=f"{min(normalized_scores):.4f}", + max=f"{max(normalized_scores):.4f}" + ) + + return results def hybrid_search(query, user_id, document_id=None, top_n=12, doc_scope="all", active_group_id=None, active_public_workspace_id=None, enable_file_sharing=True): """ @@ -11,8 +80,63 @@ def hybrid_search(query, user_id, document_id=None, top_n=12, doc_scope="all", a If document_id is None, we just search the user index for the user's docs OR you could unify that logic further (maybe search both). enable_file_sharing: If False, do not include shared_user_ids in filters. + + This function uses document-set-aware caching to ensure consistent results + across identical queries against the same document set. """ - query_embedding = generate_embedding(query) + + # Generate cache key including document set fingerprints + cache_key = generate_search_cache_key( + query=query, + user_id=user_id, + document_id=document_id, + doc_scope=doc_scope, + active_group_id=active_group_id, + active_public_workspace_id=active_public_workspace_id, + top_n=top_n, + enable_file_sharing=enable_file_sharing + ) + + # Check cache first (pass scope parameters for correct partition key) + cached_results = get_cached_search_results( + cache_key, + user_id, + doc_scope, + active_group_id, + active_public_workspace_id + ) + if cached_results is not None: + debug_print( + "Returning CACHED search results", + "SEARCH", + query=query[:40], + scope=doc_scope, + result_count=len(cached_results) + ) + logger.info(f"Returning cached search results for query: '{query[:50]}...'") + return cached_results + + # Cache miss - proceed with search + debug_print( + "Cache MISS - Executing Azure AI Search", + "SEARCH", + query=query[:40], + scope=doc_scope, + top_n=top_n + ) + logger.info(f"Cache miss - executing search for query: '{query[:50]}...'") + + # Unpack tuple from generate_embedding (returns embedding, token_usage) + result = generate_embedding(query) + if result is None: + return None + + # Handle both tuple (new) and single value (backward compatibility) + if isinstance(result, tuple): + query_embedding, _ = result # Ignore token_usage for search + else: + query_embedding = result + if query_embedding is None: return None @@ -46,18 +170,22 @@ def hybrid_search(query, user_id, document_id=None, top_n=12, doc_scope="all", a select=["id", "chunk_text", "chunk_id", "file_name", "user_id", "version", "chunk_sequence", "upload_date", "document_classification", "page_number", "author", "chunk_keywords", "title", "chunk_summary"] ) - group_results = search_client_group.search( - search_text=query, - vector_queries=[vector_query], - filter=( - f"(group_id eq '{active_group_id}' or shared_group_ids/any(g: g eq '{active_group_id},approved')) and document_id eq '{document_id}'" - ), - query_type="semantic", - semantic_configuration_name="nexus-group-index-semantic-configuration", - query_caption="extractive", - query_answer="extractive", - select=["id", "chunk_text", "chunk_id", "file_name", "group_id", "version", "chunk_sequence", "upload_date", "document_classification", "page_number", "author", "chunk_keywords", "title", "chunk_summary"] - ) + # Only search group index if active_group_id is provided + if active_group_id: + group_results = search_client_group.search( + search_text=query, + vector_queries=[vector_query], + filter=( + f"(group_id eq '{active_group_id}' or shared_group_ids/any(g: g eq '{active_group_id},approved')) and document_id eq '{document_id}'" + ), + query_type="semantic", + semantic_configuration_name="nexus-group-index-semantic-configuration", + query_caption="extractive", + query_answer="extractive", + select=["id", "chunk_text", "chunk_id", "file_name", "group_id", "version", "chunk_sequence", "upload_date", "document_classification", "page_number", "author", "chunk_keywords", "title", "chunk_summary"] + ) + else: + group_results = [] # Get visible public workspace IDs from user settings visible_public_workspace_ids = get_user_visible_public_workspace_ids_from_settings(user_id) @@ -97,18 +225,22 @@ def hybrid_search(query, user_id, document_id=None, top_n=12, doc_scope="all", a select=["id", "chunk_text", "chunk_id", "file_name", "user_id", "version", "chunk_sequence", "upload_date", "document_classification", "page_number", "author", "chunk_keywords", "title", "chunk_summary"] ) - group_results = search_client_group.search( - search_text=query, - vector_queries=[vector_query], - filter=( - f"(group_id eq '{active_group_id}' or shared_group_ids/any(g: g eq '{active_group_id},approved'))" - ), - query_type="semantic", - semantic_configuration_name="nexus-group-index-semantic-configuration", - query_caption="extractive", - query_answer="extractive", - select=["id", "chunk_text", "chunk_id", "file_name", "group_id", "version", "chunk_sequence", "upload_date", "document_classification", "page_number", "author", "chunk_keywords", "title", "chunk_summary"] - ) + # Only search group index if active_group_id is provided + if active_group_id: + group_results = search_client_group.search( + search_text=query, + vector_queries=[vector_query], + filter=( + f"(group_id eq '{active_group_id}' or shared_group_ids/any(g: g eq '{active_group_id},approved'))" + ), + query_type="semantic", + semantic_configuration_name="nexus-group-index-semantic-configuration", + query_caption="extractive", + query_answer="extractive", + select=["id", "chunk_text", "chunk_id", "file_name", "group_id", "version", "chunk_sequence", "upload_date", "document_classification", "page_number", "author", "chunk_keywords", "title", "chunk_summary"] + ) + else: + group_results = [] # Get visible public workspace IDs from user settings visible_public_workspace_ids = get_user_visible_public_workspace_ids_from_settings(user_id) @@ -133,10 +265,32 @@ def hybrid_search(query, user_id, document_id=None, top_n=12, doc_scope="all", a select=["id", "chunk_text", "chunk_id", "file_name", "public_workspace_id", "version", "chunk_sequence", "upload_date", "document_classification", "page_number", "author", "chunk_keywords", "title", "chunk_summary"] ) + # Extract results from each index user_results_final = extract_search_results(user_results, top_n) group_results_final = extract_search_results(group_results, top_n) public_results_final = extract_search_results(public_results, top_n) - results = user_results_final + group_results_final + public_results_final + + debug_print( + "Extracted raw results from indexes", + "SEARCH", + user_count=len(user_results_final), + group_count=len(group_results_final), + public_count=len(public_results_final) + ) + + # Normalize scores from each index to [0, 1] range for fair comparison + user_results_normalized = normalize_scores(user_results_final, "user_index") + group_results_normalized = normalize_scores(group_results_final, "group_index") + public_results_normalized = normalize_scores(public_results_final, "public_index") + + # Merge normalized results + results = user_results_normalized + group_results_normalized + public_results_normalized + + debug_print( + "Merged results from all indexes", + "SEARCH", + total_count=len(results) + ) elif doc_scope == "personal": if document_id: @@ -255,8 +409,85 @@ def hybrid_search(query, user_id, document_id=None, top_n=12, doc_scope="all", a ) results = extract_search_results(public_results, top_n) - results = sorted(results, key=lambda x: x['score'], reverse=True)[:top_n] - + # Log pre-sort statistics + if results: + scores = [r['score'] for r in results] + debug_print( + "Results BEFORE final sorting", + "SORT", + total_results=len(results), + min_score=f"{min(scores):.4f}", + max_score=f"{max(scores):.4f}", + avg_score=f"{sum(scores)/len(scores):.4f}" + ) + + # Show top 5 results before sorting (for debugging) + if DEBUG_ENABLED and len(results) > 0: + import os + if os.environ.get('DEBUG_SEARCH_CACHE', '0') == '1': + for i, r in enumerate(results[:5]): + debug_print( + f"Pre-sort #{i+1}", + "SORT", + file=r['file_name'][:30], + score=f"{r['score']:.4f}", + original_score=f"{r.get('original_score', r['score']):.4f}", + index=r.get('original_index', 'N/A'), + chunk=r['chunk_sequence'] + ) + + # Sort with deterministic tie-breaking to ensure consistent ordering + # Primary: score (descending) + # Secondary: file_name (ascending) - ensures consistent order when scores are equal + # Tertiary: chunk_sequence (ascending) - final tie-breaker for same file + results = sorted( + results, + key=lambda x: ( + -x['score'], # Negative for descending order + x['file_name'], # Alphabetical for tie-breaking + x['chunk_sequence'] # Chunk order for same file + ) + )[:top_n] + + # Log post-sort results + debug_print( + f"Results AFTER sorting (top {top_n})", + "SORT", + final_count=len(results) + ) + + # Show top results after sorting + if DEBUG_ENABLED and len(results) > 0: + import os + if os.environ.get('DEBUG_SEARCH_CACHE', '0') == '1': + for i, r in enumerate(results[:5]): + debug_print( + f"Final #{i+1}", + "SORT", + file=r['file_name'][:30], + score=f"{r['score']:.4f}", + original_score=f"{r.get('original_score', r['score']):.4f}", + index=r.get('original_index', 'N/A'), + chunk=r['chunk_sequence'] + ) + + # Cache the results before returning (pass scope parameters for correct partition key) + cache_search_results( + cache_key, + results, + user_id, + doc_scope, + active_group_id, + active_public_workspace_id + ) + + debug_print( + "Search complete - returning results", + "SEARCH", + query=query[:40], + final_result_count=len(results) + ) + return results def extract_search_results(paged_results, top_n): diff --git a/application/single_app/functions_security.py b/application/single_app/functions_security.py new file mode 100644 index 00000000..a6e71c22 --- /dev/null +++ b/application/single_app/functions_security.py @@ -0,0 +1,24 @@ +# functions_security.py +"""Security-related helper functions.""" + +import re + + +SAFE_STORAGE_NAME_PATTERN = re.compile(r"^(?!.*\.\.)[A-Za-z0-9_-]+(?:\.[A-Za-z0-9_-]+)*$") +SAFE_SLUG_PATTERN = re.compile(r"^[A-Za-z0-9_-]+$") + + +def is_valid_storage_name(name: str) -> bool: + """Validate storage file names to prevent traversal and unsafe patterns.""" + if not name: + return False + if '/' in name or '\\' in name: + return False + return bool(SAFE_STORAGE_NAME_PATTERN.fullmatch(name)) + + +def is_safe_slug(value: str) -> bool: + """Allowlist check for simple slug values (alnum, underscore, hyphen).""" + if not value: + return False + return bool(SAFE_SLUG_PATTERN.fullmatch(value)) diff --git a/application/single_app/functions_settings.py b/application/single_app/functions_settings.py index 712a8d1c..5fa59f12 100644 --- a/application/single_app/functions_settings.py +++ b/application/single_app/functions_settings.py @@ -2,8 +2,10 @@ from config import * from functions_appinsights import log_event +import app_settings_cache +import inspect -def get_settings(): +def get_settings(use_cosmos=False): import secrets default_settings = { # External health check @@ -38,8 +40,16 @@ def get_settings(): 'allow_user_plugins': False, 'allow_group_agents': False, 'allow_group_custom_agent_endpoints': False, + 'allow_ai_foundry_agents': False, + 'allow_group_ai_foundry_agents': False, + 'allow_personal_ai_foundry_agents': False, + 'enable_agent_template_gallery': True, + 'agent_templates_allow_user_submission': True, + 'agent_templates_require_approval': True, 'allow_group_plugins': False, 'id': 'app_settings', + # Control Center settings + 'control_center_last_refresh': None, # Timestamp of last data refresh # -- Your entire default dictionary here -- 'app_title': 'Simple Chat', 'landing_page_text': 'You can add text here and it supports Markdown. ' @@ -118,6 +128,7 @@ def get_settings(): # Workspaces 'enable_user_workspace': True, 'enable_group_workspaces': True, + 'enable_group_creation': True, 'require_member_of_create_group': False, 'enable_public_workspaces': False, 'require_member_of_create_public_workspace': False, @@ -130,10 +141,19 @@ def get_settings(): # Metadata Extraction 'enable_extract_meta_data': False, 'metadata_extraction_model': '', + + # Multimodal Vision + 'enable_multimodal_vision': False, + 'multimodal_vision_model': '', + 'enable_summarize_content_history_for_search': False, 'number_of_historical_messages_to_summarize': 10, 'enable_summarize_content_history_beyond_conversation_history_limit': False, + # Multi-Modal Vision Analysis + 'enable_multimodal_vision': False, + 'multimodal_vision_model': '', + # Document Classification 'enable_document_classification': False, 'document_classification_categories': [ @@ -169,6 +189,8 @@ def get_settings(): # Safety (Content Safety) Settings 'enable_content_safety': False, 'require_member_of_safety_violation_admin': False, + 'require_member_of_control_center_admin': False, + 'require_member_of_control_center_dashboard_reader': False, 'content_safety_endpoint': '', 'content_safety_key': '', 'content_safety_authentication_type': 'key', @@ -188,6 +210,10 @@ def get_settings(): 'enable_ai_search_apim': False, 'azure_apim_ai_search_endpoint': '', 'azure_apim_ai_search_subscription_key': '', + + # Search Result Caching + 'enable_search_result_caching': True, + 'search_cache_ttl_seconds': 300, 'azure_document_intelligence_endpoint': '', 'azure_document_intelligence_key': '', @@ -196,6 +222,34 @@ def get_settings(): 'azure_apim_document_intelligence_endpoint': '', 'azure_apim_document_intelligence_subscription_key': '', + # Web search (via Azure AI Foundry agent) + 'enable_web_search': False, + 'web_search_consent_accepted': False, + 'enable_web_search_user_notice': False, # Show popup to users explaining their message will be sent to Bing + 'web_search_user_notice_text': 'Your message will be sent to Microsoft Bing for web search. Only your current message is sent, not your conversation history.', + 'web_search_agent': { + 'agent_type': 'aifoundry', + 'azure_openai_gpt_endpoint': '', + 'azure_openai_gpt_api_version': '', + 'azure_openai_gpt_deployment': '', + 'other_settings': { + 'azure_ai_foundry': { + 'agent_id': '', + 'endpoint': '', + 'api_version': 'v1', + 'authentication_type': 'managed_identity', + 'managed_identity_type': 'system_assigned', + 'managed_identity_client_id': '', + 'tenant_id': '', + 'client_id': '', + 'client_secret': '', + 'cloud': '', + 'authority': '', + 'notes': '' + } + } + }, + # Authentication & Redirect Settings 'enable_front_door': False, 'front_door_url': '', @@ -210,31 +264,103 @@ def get_settings(): 'file_timer_unit': 'hours', 'file_processing_logs_turnoff_time': None, 'enable_external_healthcheck': False, + + # Streaming settings + 'streamingEnabled': False, + + # Reasoning effort settings (per-model) + 'reasoningEffortSettings': {}, # Video file settings with Azure Video Indexer Settings 'video_indexer_endpoint': video_indexer_endpoint, 'video_indexer_location': '', 'video_indexer_account_id': '', - 'video_indexer_api_key': '', 'video_indexer_resource_group': '', 'video_indexer_subscription_id': '', 'video_indexer_account_name': '', - 'video_indexer_arm_api_version': '2021-11-10-preview', + 'video_indexer_arm_api_version': '2024-01-01', 'video_index_timeout': 600, # Audio file settings with Azure speech service "speech_service_endpoint": '', "speech_service_location": '', "speech_service_locale": "en-US", - "speech_service_key": "" + "speech_service_key": "", + "speech_service_authentication_type": "key", # 'key' or 'managed_identity' + + # Speech-to-text chat input + "enable_speech_to_text_input": False, + + # Text-to-speech chat output + "enable_text_to_speech": False, + + #key vault settings + 'enable_key_vault_secret_storage': False, + 'key_vault_name': '', + 'key_vault_identity': '', + + # Retention Policy Settings + 'enable_retention_policy_personal': False, + 'enable_retention_policy_group': False, + 'enable_retention_policy_public': False, + 'retention_policy_execution_hour': 2, # Run at 2 AM by default (0-23) + 'retention_policy_last_run': None, # ISO timestamp of last execution + 'retention_policy_next_run': None, # ISO timestamp of next scheduled execution + 'retention_conversation_min_days': 1, + 'retention_conversation_max_days': 3650, # ~10 years + 'retention_document_min_days': 1, + 'retention_document_max_days': 3650, # ~10 years + # Default retention policies for each workspace type + # 'none' means no automatic deletion (users can still set their own) + # Numeric values (e.g., 30, 60, 90, 180, 365, 730) represent days + 'default_retention_conversation_personal': 'none', + 'default_retention_document_personal': 'none', + 'default_retention_conversation_group': 'none', + 'default_retention_document_group': 'none', + 'default_retention_conversation_public': 'none', + 'default_retention_document_public': 'none', } try: # Attempt to read the existing doc - settings_item = cosmos_settings_container.read_item( - item="app_settings", - partition_key="app_settings" - ) + if use_cosmos: + settings_item = cosmos_settings_container.read_item( + item="app_settings", + partition_key="app_settings" + ) + else: + settings_item = None + + cache_accessor = getattr(app_settings_cache, "get_settings_cache", None) + if callable(cache_accessor): + try: + settings_item = cache_accessor() + except Exception: + settings_item = None + + if not settings_item: + settings_item = cosmos_settings_container.read_item( + item="app_settings", + partition_key="app_settings" + ) + + frame = inspect.currentframe() + caller = frame.f_back # the function that called *this* code + + if caller is not None: + code = caller.f_code + caller_file = code.co_filename + caller_line = caller.f_lineno + caller_func = code.co_name + print( + "Warning: Failed to get settings from cache, read from Cosmos DB instead. " + f"Called from {caller_file}:{caller_line} in {caller_func}()." + ) + else: + print( + "Warning: Failed to get settings from cache, " + "read from Cosmos DB instead. (no caller frame)" + ) #print("Successfully retrieved settings from Cosmos DB.") # Merge default_settings in, to fill in any missing or nested keys @@ -264,6 +390,9 @@ def update_settings(new_settings): settings_item = get_settings() settings_item.update(new_settings) cosmos_settings_container.upsert_item(settings_item) + cache_updater = getattr(app_settings_cache, "update_settings_cache", None) + if callable(cache_updater): + cache_updater(settings_item) print("Settings updated successfully.") return True except Exception as e: @@ -491,7 +620,8 @@ def update_user_settings(user_id, settings_to_update): bool: True if the update was successful, False otherwise. """ log_prefix = f"User settings update for {user_id}:" - log_event("[UserSettings] Update Attempt", {"user_id": user_id, "settings_to_update": settings_to_update}) + sanitized_settings_to_update = sanitize_settings_for_logging(settings_to_update) + log_event("[UserSettings] Update Attempt", {"user_id": user_id, "settings_to_update": sanitized_settings_to_update}) try: @@ -547,8 +677,13 @@ def update_user_settings(user_id, settings_to_update): first_user_agent = doc['settings']['agents'][0] if first_user_agent: doc['settings']['selected_agent'] = { + 'id': first_user_agent.get('id'), 'name': first_user_agent['name'], + 'display_name': first_user_agent.get('display_name', first_user_agent['name']), 'is_global': False, + 'is_group': False, + 'group_id': None, + 'group_name': None, } else: settings = get_settings() @@ -560,24 +695,44 @@ def update_user_settings(user_id, settings_to_update): if global_agents: first_global_agent = global_agents[0] doc['settings']['selected_agent'] = { + 'id': first_global_agent.get('id'), 'name': first_global_agent['name'], + 'display_name': first_global_agent.get('display_name', first_global_agent['name']), 'is_global': True, + 'is_group': False, + 'group_id': None, + 'group_name': None, } else: doc['settings']['selected_agent'] = { + 'id': None, 'name': 'default_agent', + 'display_name': 'default_agent', 'is_global': True, + 'is_group': False, + 'group_id': None, + 'group_name': None, } except Exception: # Fallback if container access fails doc['settings']['selected_agent'] = { + 'id': None, 'name': 'default_agent', + 'display_name': 'default_agent', 'is_global': True, + 'is_group': False, + 'group_id': None, + 'group_name': None, } else: doc['settings']['selected_agent'] = { + 'id': None, 'name': 'researcher', + 'display_name': 'researcher', 'is_global': False, + 'is_group': False, + 'group_id': None, + 'group_name': None, } if doc['settings']['agents'] is not None and len(doc['settings']['agents']) > 0: @@ -620,5 +775,122 @@ def wrapper(*args, **kwargs): return decorator def sanitize_settings_for_user(full_settings: dict) -> dict: - # Exclude any key containing the substring "key" or specific sensitive URLs - return {k: v for k, v in full_settings.items() if "key" not in k and k != "office_docs_storage_account_url"} \ No newline at end of file + if not isinstance(full_settings, dict): + return full_settings + + sensitive_terms = ("key", "secret", "password", "connection", "base64", "storage_account_url") + sanitized = {} + + for k, v in full_settings.items(): + if any(term in k.lower() for term in sensitive_terms): + continue + if isinstance(v, dict): + sanitized[k] = sanitize_settings_for_user(v) + elif isinstance(v, list): + sanitized[k] = [ + sanitize_settings_for_user(item) if isinstance(item, dict) else item + for item in v + ] + else: + sanitized[k] = v + + # Add boolean flags for logo/favicon existence so templates can check without exposing base64 data + # These fields are stripped by the base64 filter above, but templates need to know if logos exist + if 'custom_logo_base64' in full_settings: + sanitized['custom_logo_base64'] = bool(full_settings.get('custom_logo_base64')) + if 'custom_logo_dark_base64' in full_settings: + sanitized['custom_logo_dark_base64'] = bool(full_settings.get('custom_logo_dark_base64')) + if 'custom_favicon_base64' in full_settings: + sanitized['custom_favicon_base64'] = bool(full_settings.get('custom_favicon_base64')) + + return sanitized + +def sanitize_settings_for_logging(full_settings: dict) -> dict: + """ + Recursively sanitize settings to remove sensitive data from debug logs. + Filters out keys containing: key, base64, image, storage_account_url + Also filters out values containing base64 data + """ + if not isinstance(full_settings, dict): + return full_settings + + sanitized = {} + sensitive_key_terms = ["key", "base64", "image", "storage_account_url"] + + for k, v in full_settings.items(): + # Skip keys with sensitive terms + if any(term in k.lower() for term in sensitive_key_terms): + sanitized[k] = "[REDACTED]" + continue + + # Check if value is a string containing base64 data + if isinstance(v, str) and ("base64," in v or len(v) > 500): + sanitized[k] = "[BASE64_DATA_REDACTED]" + # Recursively sanitize nested dicts + elif isinstance(v, dict): + sanitized[k] = sanitize_settings_for_logging(v) + # Recursively sanitize lists + elif isinstance(v, list): + sanitized[k] = [sanitize_settings_for_logging(item) if isinstance(item, dict) else item for item in v] + else: + sanitized[k] = v + + return sanitized + +# Search history management functions +def get_user_search_history(user_id): + """Get user's search history from their settings document""" + try: + doc = cosmos_user_settings_container.read_item(item=user_id, partition_key=user_id) + return doc.get('search_history', []) + except exceptions.CosmosResourceNotFoundError: + return [] + except Exception as e: + print(f"Error getting search history: {e}") + return [] + +def add_search_to_history(user_id, search_term): + """Add a search term to user's history, maintaining max 20 items""" + try: + try: + doc = cosmos_user_settings_container.read_item(item=user_id, partition_key=user_id) + except exceptions.CosmosResourceNotFoundError: + doc = {'id': user_id, 'settings': {}} + + search_history = doc.get('search_history', []) + + # Remove if already exists (deduplicate) + search_history = [item for item in search_history if item.get('term') != search_term] + + # Add new search at beginning + search_history.insert(0, { + 'term': search_term, + 'timestamp': datetime.now(timezone.utc).isoformat() + }) + + # Trim to 20 items + search_history = search_history[:20] + + doc['search_history'] = search_history + cosmos_user_settings_container.upsert_item(body=doc) + + return search_history + except Exception as e: + print(f"Error adding search to history: {e}") + return [] + +def clear_user_search_history(user_id): + """Clear all search history for a user""" + try: + try: + doc = cosmos_user_settings_container.read_item(item=user_id, partition_key=user_id) + except exceptions.CosmosResourceNotFoundError: + doc = {'id': user_id, 'settings': {}} + + doc['search_history'] = [] + cosmos_user_settings_container.upsert_item(body=doc) + + return True + except Exception as e: + print(f"Error clearing search history: {e}") + return False \ No newline at end of file diff --git a/application/single_app/json_schema_validation.py b/application/single_app/json_schema_validation.py index 4cda4da2..c7c58a3c 100644 --- a/application/single_app/json_schema_validation.py +++ b/application/single_app/json_schema_validation.py @@ -43,7 +43,7 @@ def validate_plugin(plugin): validator = Draft7Validator(schema['definitions']['Plugin']) errors = sorted(validator.iter_errors(plugin_copy), key=lambda e: e.path) if errors: - return '; '.join([e.message for e in errors]) + return '; '.join([f"{plugin.get('name', '')}: {e.message}" for e in errors]) # Additional business logic validation # For non-SQL plugins, endpoint must not be empty diff --git a/application/single_app/plugin_validation_endpoint.py b/application/single_app/plugin_validation_endpoint.py index 639fdc0e..d59a8d80 100644 --- a/application/single_app/plugin_validation_endpoint.py +++ b/application/single_app/plugin_validation_endpoint.py @@ -8,6 +8,7 @@ from semantic_kernel_plugins.plugin_loader import discover_plugins from functions_appinsights import log_event from functions_authentication import login_required, admin_required +from swagger_wrapper import swagger_route, get_auth_security import logging @@ -15,6 +16,9 @@ @plugin_validation_bp.route('/api/admin/plugins/validate', methods=['POST']) +@swagger_route( + security=get_auth_security() +) @login_required @admin_required def validate_plugin_manifest(): @@ -60,6 +64,9 @@ def validate_plugin_manifest(): @plugin_validation_bp.route('/api/admin/plugins/test-instantiation', methods=['POST']) +@swagger_route( + security=get_auth_security() +) def test_plugin_instantiation(): """ Test if a plugin can be instantiated successfully. @@ -128,6 +135,9 @@ def normalize(s): @plugin_validation_bp.route('/api/admin/plugins/health-check/', methods=['GET']) +@swagger_route( + security=get_auth_security() +) def check_plugin_health(plugin_name): """ Perform a health check on an existing plugin. @@ -201,6 +211,9 @@ def normalize(s): @plugin_validation_bp.route('/api/admin/plugins/repair/', methods=['POST']) +@swagger_route( + security=get_auth_security() +) def repair_plugin(plugin_name): """ Attempt to repair a plugin that has issues. diff --git a/application/single_app/requirements.txt b/application/single_app/requirements.txt index 4acd2326..6a738388 100644 --- a/application/single_app/requirements.txt +++ b/application/single_app/requirements.txt @@ -1,13 +1,12 @@ # requirements.txt pandas==2.2.3 azure-monitor-query==1.4.1 -opencensus-ext-azure==1.1.15 Flask==2.2.5 Flask-WTF==1.2.1 gunicorn -Werkzeug==3.0.6 +Werkzeug==3.1.5 requests==2.32.4 -openai==1.67 +openai>=1.98.0,<2.0.0 docx2txt==0.8 Markdown==3.3.4 bleach==6.1.0 @@ -30,18 +29,19 @@ azure-identity==1.23.0 azure-ai-contentsafety==1.0.0 azure-storage-blob==12.24.1 azure-storage-queue==12.12.0 -pypdf==6.0.0 +azure-keyvault-secrets==4.10.0 +pypdf==6.4.0 python-docx==1.1.2 flask-executor==1.0.0 PyMuPDF==1.25.3 -langchain-text-splitters==0.3.7 +langchain-text-splitters==0.3.9 beautifulsoup4==4.13.3 openpyxl==3.1.5 xlrd==2.0.1 pillow==11.1.0 ffmpeg-binaries-compat==1.0.1 ffmpeg-python==0.2.0 -semantic-kernel>=1.32.1 +semantic-kernel>=1.39.2 redis>=5.0,<6.0 pyodbc>=4.0.0 PyMySQL>=1.0.0 @@ -49,5 +49,7 @@ azure-monitor-opentelemetry==1.6.13 psycopg2-binary==2.9.10 cython pyyaml==6.0.2 -aiohttp==3.12.15 -html2text==2025.4.15 \ No newline at end of file +aiohttp==3.13.3 +html2text==2025.4.15 +matplotlib==3.10.7 +azure-cognitiveservices-speech==1.47.0 \ No newline at end of file diff --git a/application/single_app/route_backend_agent_templates.py b/application/single_app/route_backend_agent_templates.py new file mode 100644 index 00000000..282b157c --- /dev/null +++ b/application/single_app/route_backend_agent_templates.py @@ -0,0 +1,188 @@ +"""Backend routes for agent template management.""" + +from flask import Blueprint, jsonify, request, session +from swagger_wrapper import swagger_route, get_auth_security + +from functions_authentication import ( + admin_required, + login_required, + get_current_user_info, +) +from functions_agent_templates import ( + STATUS_APPROVED, + validate_template_payload, + list_agent_templates, + create_agent_template, + update_agent_template, + approve_agent_template, + reject_agent_template, + delete_agent_template, + get_agent_template, +) +from functions_settings import get_settings + +bp_agent_templates = Blueprint('agent_templates', __name__) + + +def _feature_flags(): + settings = get_settings() + enabled = settings.get('enable_agent_template_gallery', False) + allow_submissions = settings.get('agent_templates_allow_user_submission', True) + require_approval = settings.get('agent_templates_require_approval', True) + return enabled, allow_submissions, require_approval, settings + + +def _is_admin() -> bool: + user = session.get('user') or {} + return 'Admin' in (user.get('roles') or []) + + +@bp_agent_templates.route('/api/agent-templates', methods=['GET']) +@login_required +@swagger_route(security=get_auth_security()) +def list_public_agent_templates(): + enabled, _, _, _ = _feature_flags() + if not enabled: + return jsonify({'templates': []}) + templates = list_agent_templates(status=STATUS_APPROVED, include_internal=False) + return jsonify({'templates': templates}) + + +@bp_agent_templates.route('/api/agent-templates', methods=['POST']) +@login_required +@swagger_route(security=get_auth_security()) +def submit_agent_template(): + enabled, allow_submissions, require_approval, settings = _feature_flags() + if not enabled: + return jsonify({'error': 'Agent template gallery is disabled.'}), 403 + if not settings.get('allow_user_agents') and not _is_admin(): + return jsonify({'error': 'Agent creation is disabled for your workspace.'}), 403 + if not allow_submissions and not _is_admin(): + return jsonify({'error': 'Template submissions are disabled for users.'}), 403 + + data = request.get_json(silent=True) or {} + payload = data.get('template') or data + validation_error = validate_template_payload(payload) + # validate_template_payload returns false if valid, returns the simple error otherwise. + if validation_error: + return jsonify({'error': validation_error}), 400 + + is_admin_user = _is_admin() + payload['source_agent_id'] = payload.get('source_agent_id') or data.get('source_agent_id') + submission_scope = ( + payload.get('source_scope') + or data.get('source_scope') + or ('global' if is_admin_user else 'personal') + ) + submission_scope = str(submission_scope).lower() + payload['source_scope'] = submission_scope + + admin_context_submission = is_admin_user and submission_scope == 'global' + auto_approve = admin_context_submission or not require_approval + + try: + template = create_agent_template(payload, get_current_user_info(), auto_approve=auto_approve) + except ValueError as exc: + return jsonify({'error': str(exc)}), 400 + except Exception: + return jsonify({'error': 'Failed to submit template.'}), 500 + + if not is_admin_user: + for field in ('submission_notes', 'review_notes', 'rejection_reason', 'created_by_email'): + template.pop(field, None) + + status_code = 201 if template.get('status') == STATUS_APPROVED else 202 + return jsonify({'template': template}), status_code + + +@bp_agent_templates.route('/api/admin/agent-templates', methods=['GET']) +@login_required +@admin_required +@swagger_route(security=get_auth_security()) +def admin_list_agent_templates(): + status = request.args.get('status') + if status == 'all': + status = None + templates = list_agent_templates(status=status, include_internal=True) + return jsonify({'templates': templates}) + + +@bp_agent_templates.route('/api/admin/agent-templates/', methods=['GET']) +@login_required +@admin_required +@swagger_route(security=get_auth_security()) +def admin_get_agent_template(template_id): + template = get_agent_template(template_id) + if not template: + return jsonify({'error': 'Template not found.'}), 404 + return jsonify({'template': template}) + + +@bp_agent_templates.route('/api/admin/agent-templates/', methods=['PATCH']) +@login_required +@admin_required +@swagger_route(security=get_auth_security()) +def admin_update_agent_template(template_id): + payload = request.get_json(silent=True) or {} + try: + template = update_agent_template(template_id, payload) + except ValueError as exc: + return jsonify({'error': str(exc)}), 400 + except Exception: + return jsonify({'error': 'Failed to update template.'}), 500 + + if not template: + return jsonify({'error': 'Template not found.'}), 404 + return jsonify({'template': template}) + + +@bp_agent_templates.route('/api/admin/agent-templates//approve', methods=['POST']) +@login_required +@admin_required +@swagger_route(security=get_auth_security()) +def admin_approve_agent_template(template_id): + data = request.get_json(silent=True) or {} + notes = data.get('notes') + try: + template = approve_agent_template(template_id, get_current_user_info(), notes) + except Exception: + return jsonify({'error': 'Failed to approve template.'}), 500 + + if not template: + return jsonify({'error': 'Template not found.'}), 404 + return jsonify({'template': template}) + + +@bp_agent_templates.route('/api/admin/agent-templates//reject', methods=['POST']) +@login_required +@admin_required +@swagger_route(security=get_auth_security()) +def admin_reject_agent_template(template_id): + data = request.get_json(silent=True) or {} + reason = (data.get('reason') or '').strip() + if not reason: + return jsonify({'error': 'A rejection reason is required.'}), 400 + notes = data.get('notes') + try: + template = reject_agent_template(template_id, get_current_user_info(), reason, notes) + except Exception: + return jsonify({'error': 'Failed to reject template.'}), 500 + + if not template: + return jsonify({'error': 'Template not found.'}), 404 + return jsonify({'template': template}) + + +@bp_agent_templates.route('/api/admin/agent-templates/', methods=['DELETE']) +@login_required +@admin_required +@swagger_route(security=get_auth_security()) +def admin_delete_agent_template(template_id): + try: + deleted = delete_agent_template(template_id) + except Exception: + return jsonify({'error': 'Failed to delete template.'}), 500 + + if not deleted: + return jsonify({'error': 'Template not found.'}), 404 + return jsonify({'success': True}) diff --git a/application/single_app/route_backend_agents.py b/application/single_app/route_backend_agents.py index 2af1d8df..b3a8220a 100644 --- a/application/single_app/route_backend_agents.py +++ b/application/single_app/route_backend_agents.py @@ -4,10 +4,21 @@ import uuid import logging import builtins -from flask import Blueprint, jsonify, request +from flask import Blueprint, jsonify, request, current_app from semantic_kernel_loader import get_agent_orchestration_types from functions_settings import get_settings, update_settings, get_user_settings, update_user_settings from functions_global_agents import get_global_agents, save_global_agent, delete_global_agent +from functions_personal_agents import get_personal_agents, ensure_migration_complete, save_personal_agent, delete_personal_agent +from functions_group import require_active_group, assert_group_role +from functions_agent_payload import sanitize_agent_payload, AgentPayloadError +from functions_group_agents import ( + get_group_agents, + get_group_agent, + save_group_agent, + delete_group_agent, + validate_group_agent_payload, +) +from functions_debug import debug_print from functions_authentication import * from functions_appinsights import log_event from json_schema_validation import validate_agent @@ -33,10 +44,6 @@ def generate_agent_id(): @login_required def get_user_agents(): user_id = get_current_user_id() - - # Import the new personal agents functions - from functions_personal_agents import get_personal_agents, ensure_migration_complete - # Ensure migration is complete (will migrate any remaining legacy data) ensure_migration_complete(user_id) @@ -46,6 +53,8 @@ def get_user_agents(): # Always mark user agents as is_global: False for agent in agents: agent['is_global'] = False + agent['is_group'] = False + agent.setdefault('agent_type', 'local') # Check global/merge toggles settings = get_settings() @@ -53,11 +62,12 @@ def get_user_agents(): merge_global = settings.get('merge_global_semantic_kernel_with_workspace', False) if per_user and merge_global: # Import and get global agents from container - from functions_global_agents import get_global_agents global_agents = get_global_agents() # Mark global agents for agent in global_agents: agent['is_global'] = True + agent['is_group'] = False + agent.setdefault('agent_type', 'local') # Merge agents using ID as key to avoid name conflicts # This allows both personal and global agents with same name to coexist @@ -87,10 +97,6 @@ def set_user_agents(): user_id = get_current_user_id() agents = request.json if isinstance(request.json, list) else [] settings = get_settings() - - # Import the new personal agents functions - from functions_personal_agents import save_personal_agent, delete_personal_agent, get_personal_agents - # If custom endpoints are not allowed, strip deployment settings for endpoint, key, and api-revision if not settings.get('allow_user_custom_agent_endpoints', False): for agent in agents: @@ -106,14 +112,16 @@ def set_user_agents(): for agent in agents: if agent.get('is_global', False): continue # Skip global agents - agent['is_global'] = False # Ensure user agents are not global - # --- Require at least one deployment field --- - #if not (agent.get('azure_openai_gpt_deployment') or agent.get('azure_agent_apim_gpt_deployment')): - # return jsonify({'error': f'Agent "{agent.get("name", "(unnamed)")}" must have either azure_openai_gpt_deployment or azure_agent_apim_gpt_deployment set.'}), 400 - validation_error = validate_agent(agent) + try: + cleaned_agent = sanitize_agent_payload(agent) + except AgentPayloadError as exc: + return jsonify({'error': str(exc)}), 400 + cleaned_agent['is_global'] = False + cleaned_agent['is_group'] = False + validation_error = validate_agent(cleaned_agent) if validation_error: return jsonify({'error': f'Agent validation failed: {validation_error}'}), 400 - filtered_agents.append(agent) + filtered_agents.append(cleaned_agent) # Enforce global agent only if per_user_semantic_kernel is False per_user_semantic_kernel = settings.get('per_user_semantic_kernel', False) @@ -151,10 +159,6 @@ def set_user_agents(): @login_required def delete_user_agent(agent_name): user_id = get_current_user_id() - - # Import the new personal agents functions - from functions_personal_agents import get_personal_agents, delete_personal_agent - # Get current agents from personal_agents container agents = get_personal_agents(user_id) agent_to_delete = next((a for a in agents if a['name'] == agent_name), None) @@ -181,6 +185,177 @@ def delete_user_agent(agent_name): log_event("User agent deleted", extra={"user_id": user_id, "agent_name": agent_name}) return jsonify({'success': True}) + +# === GROUP AGENT ENDPOINTS === + +@bpa.route('/api/group/agents', methods=['GET']) +@swagger_route(security=get_auth_security()) +@login_required +@user_required +@enabled_required('enable_group_workspaces') +def get_group_agents_route(): + user_id = get_current_user_id() + try: + active_group = require_active_group(user_id) + assert_group_role( + user_id, + active_group, + allowed_roles=("Owner", "Admin", "DocumentManager", "User"), + ) + except ValueError as exc: + return jsonify({'error': str(exc)}), 400 + except LookupError as exc: + return jsonify({'error': str(exc)}), 404 + except PermissionError as exc: + return jsonify({'error': str(exc)}), 403 + + agents = get_group_agents(active_group) + return jsonify({'agents': agents}), 200 + + +@bpa.route('/api/group/agents/', methods=['GET']) +@swagger_route(security=get_auth_security()) +@login_required +@user_required +@enabled_required('enable_group_workspaces') +def get_group_agent_route(agent_id): + user_id = get_current_user_id() + try: + active_group = require_active_group(user_id) + assert_group_role( + user_id, + active_group, + allowed_roles=("Owner", "Admin", "DocumentManager", "User"), + ) + except ValueError as exc: + return jsonify({'error': str(exc)}), 400 + except LookupError as exc: + return jsonify({'error': str(exc)}), 404 + except PermissionError as exc: + return jsonify({'error': str(exc)}), 403 + + agent = get_group_agent(active_group, agent_id) + if not agent: + return jsonify({'error': 'Agent not found'}), 404 + return jsonify(agent), 200 + + +@bpa.route('/api/group/agents', methods=['POST']) +@swagger_route(security=get_auth_security()) +@login_required +@user_required +@enabled_required('enable_group_workspaces') +def create_group_agent_route(): + user_id = get_current_user_id() + try: + active_group = require_active_group(user_id) + assert_group_role(user_id, active_group) + except ValueError as exc: + return jsonify({'error': str(exc)}), 400 + except LookupError as exc: + return jsonify({'error': str(exc)}), 404 + except PermissionError as exc: + return jsonify({'error': str(exc)}), 403 + + payload = request.get_json(silent=True) or {} + try: + validate_group_agent_payload(payload, partial=False) + cleaned_payload = sanitize_agent_payload(payload) + except (ValueError, AgentPayloadError) as exc: + return jsonify({'error': str(exc)}), 400 + + for key in ('group_id', 'last_updated', 'is_global', 'is_group'): + cleaned_payload.pop(key, None) + + try: + saved = save_group_agent(active_group, cleaned_payload) + except Exception as exc: + debug_print('Failed to save group agent: %s', exc) + return jsonify({'error': 'Unable to save agent'}), 500 + + return jsonify(saved), 201 + + +@bpa.route('/api/group/agents/', methods=['PATCH']) +@swagger_route(security=get_auth_security()) +@login_required +@user_required +@enabled_required('enable_group_workspaces') +def update_group_agent_route(agent_id): + user_id = get_current_user_id() + try: + active_group = require_active_group(user_id) + assert_group_role(user_id, active_group) + except ValueError as exc: + return jsonify({'error': str(exc)}), 400 + except LookupError as exc: + return jsonify({'error': str(exc)}), 404 + except PermissionError as exc: + return jsonify({'error': str(exc)}), 403 + + existing = get_group_agent(active_group, agent_id) + if not existing: + return jsonify({'error': 'Agent not found'}), 404 + + updates = request.get_json(silent=True) or {} + for key in ('id', 'group_id', 'last_updated', 'is_global', 'is_group'): + updates.pop(key, None) + + try: + validate_group_agent_payload(updates, partial=True) + except ValueError as exc: + return jsonify({'error': str(exc)}), 400 + + merged = dict(existing) + merged.update(updates) + merged['id'] = agent_id + + try: + validate_group_agent_payload(merged, partial=False) + except ValueError as exc: + return jsonify({'error': str(exc)}), 400 + + try: + cleaned_payload = sanitize_agent_payload(merged) + except AgentPayloadError as exc: + return jsonify({'error': str(exc)}), 400 + + try: + saved = save_group_agent(active_group, cleaned_payload) + except Exception as exc: + debug_print('Failed to update group agent %s: %s', agent_id, exc) + return jsonify({'error': 'Unable to update agent'}), 500 + + return jsonify(saved), 200 + + +@bpa.route('/api/group/agents/', methods=['DELETE']) +@swagger_route(security=get_auth_security()) +@login_required +@user_required +@enabled_required('enable_group_workspaces') +def delete_group_agent_route(agent_id): + user_id = get_current_user_id() + try: + active_group = require_active_group(user_id) + assert_group_role(user_id, active_group) + except ValueError as exc: + return jsonify({'error': str(exc)}), 400 + except LookupError as exc: + return jsonify({'error': str(exc)}), 404 + except PermissionError as exc: + return jsonify({'error': str(exc)}), 403 + + try: + removed = delete_group_agent(active_group, agent_id) + except Exception as exc: + debug_print('Failed to delete group agent %s: %s', agent_id, exc) + return jsonify({'error': 'Unable to delete agent'}), 500 + + if not removed: + return jsonify({'error': 'Agent not found'}), 404 + return jsonify({'message': 'Agent deleted'}), 200 + # User endpoint to set selected agent (new model, not legacy default_agent) @bpa.route('/api/user/settings/selected_agent', methods=['POST']) @swagger_route( @@ -195,9 +370,17 @@ def set_user_selected_agent(): return jsonify({'error': 'selected_agent is required.'}), 400 user_settings = get_user_settings(user_id) settings_to_update = user_settings.get('settings', {}) + agent_name = (selected_agent.get('name') or '').strip() + if not agent_name: + return jsonify({'error': 'selected_agent.name is required.'}), 400 agent = { - "name": selected_agent.get('name'), - "is_global": selected_agent.get('is_global', False) + "id": selected_agent.get('id'), + "name": agent_name, + "display_name": selected_agent.get('display_name'), + "is_global": selected_agent.get('is_global', False), + "is_group": selected_agent.get('is_group', False), + "group_id": selected_agent.get('group_id'), + "group_name": selected_agent.get('group_name') } settings_to_update['selected_agent'] = agent update_user_settings(user_id, settings_to_update) @@ -236,7 +419,6 @@ def set_selected_agent(): return jsonify({'error': 'Agent name is required.'}), 400 # Import and get global agents from container - from functions_global_agents import get_global_agents agents = get_global_agents() # Check that the agent exists @@ -246,7 +428,7 @@ def set_selected_agent(): # Set global_selected_agent field only settings = get_settings() - settings['global_selected_agent'] = { 'name': agent_name, 'is_global': True } + settings['global_selected_agent'] = { 'name': agent_name, 'is_global': True, 'is_group': False } update_settings(settings) log_event("Global selected agent set", extra={"action": "set-global-selected", "agent_name": agent_name, "user": str(get_current_user_id())}) # --- HOT RELOAD TRIGGER --- @@ -266,8 +448,6 @@ def set_selected_agent(): def list_agents(): try: # Use new global agents container - from functions_global_agents import get_global_agents - agents = get_global_agents() # Ensure each agent has an actions_to_load field @@ -276,6 +456,7 @@ def list_agents(): agent['actions_to_load'] = [] # Mark as global agents agent['is_global'] = True + agent['is_group'] = False log_event("List agents", extra={"action": "list", "user": str(get_current_user_id())}) return jsonify(agents) @@ -293,25 +474,31 @@ def add_agent(): try: agents = get_global_agents() new_agent = request.json.copy() if hasattr(request.json, 'copy') else dict(request.json) - new_agent['is_global'] = True - validation_error = validate_agent(new_agent) + try: + cleaned_agent = sanitize_agent_payload(new_agent) + except AgentPayloadError as exc: + log_event("Add agent failed: payload error", level=logging.WARNING, extra={"action": "add", "error": str(exc)}) + return jsonify({'error': str(exc)}), 400 + cleaned_agent['is_global'] = True + cleaned_agent['is_group'] = False + validation_error = validate_agent(cleaned_agent) if validation_error: - log_event("Add agent failed: validation error", level=logging.WARNING, extra={"action": "add", "agent": new_agent, "error": validation_error}) + log_event("Add agent failed: validation error", level=logging.WARNING, extra={"action": "add", "agent": cleaned_agent, "error": validation_error}) return jsonify({'error': validation_error}), 400 # Prevent duplicate names (case-insensitive) - if any(a['name'].lower() == new_agent['name'].lower() for a in agents): - log_event("Add agent failed: duplicate name", level=logging.WARNING, extra={"action": "add", "agent": new_agent}) + if any(a['name'].lower() == cleaned_agent['name'].lower() for a in agents): + log_event("Add agent failed: duplicate name", level=logging.WARNING, extra={"action": "add", "agent": cleaned_agent}) return jsonify({'error': 'Agent with this name already exists.'}), 400 # Assign a new GUID as id unless this is the default agent (which should have a static GUID) - if not new_agent.get('default_agent', False): - new_agent['id'] = str(uuid.uuid4()) + if not cleaned_agent.get('default_agent', False): + cleaned_agent['id'] = str(uuid.uuid4()) else: # If default_agent, ensure the static GUID is present (do not overwrite if already set) - if not new_agent.get('id'): - new_agent['id'] = '15b0c92a-741d-42ff-ba0b-367c7ee0c848' + if not cleaned_agent.get('id'): + cleaned_agent['id'] = '15b0c92a-741d-42ff-ba0b-367c7ee0c848' # Save to global agents container - result = save_global_agent(new_agent) + result = save_global_agent(cleaned_agent) if not result: return jsonify({'error': 'Failed to save agent.'}), 500 @@ -325,7 +512,7 @@ def add_agent(): if not found: return jsonify({'error': 'There must be at least one agent matching the global_selected_agent.'}), 400 - log_event("Agent added", extra={"action": "add", "agent": {k: v for k, v in new_agent.items() if k != 'id'}, "user": str(get_current_user_id())}) + log_event("Agent added", extra={"action": "add", "agent": {k: v for k, v in cleaned_agent.items() if k != 'id'}, "user": str(get_current_user_id())}) # --- HOT RELOAD TRIGGER --- setattr(builtins, "kernel_reload_needed", True) return jsonify({'success': True}) @@ -400,18 +587,22 @@ def update_agent_setting(setting_name): @admin_required def edit_agent(agent_name): try: - from functions_global_agents import get_global_agents, save_global_agent - agents = get_global_agents() updated_agent = request.json.copy() if hasattr(request.json, 'copy') else dict(request.json) - updated_agent['is_global'] = True - validation_error = validate_agent(updated_agent) + try: + cleaned_agent = sanitize_agent_payload(updated_agent) + except AgentPayloadError as exc: + log_event("Edit agent failed: payload error", level=logging.WARNING, extra={"action": "edit", "agent_name": agent_name, "error": str(exc)}) + return jsonify({'error': str(exc)}), 400 + cleaned_agent['is_global'] = True + cleaned_agent['is_group'] = False + validation_error = validate_agent(cleaned_agent) if validation_error: - log_event("Edit agent failed: validation error", level=logging.WARNING, extra={"action": "edit", "agent": updated_agent, "error": validation_error}) + log_event("Edit agent failed: validation error", level=logging.WARNING, extra={"action": "edit", "agent": cleaned_agent, "error": validation_error}) return jsonify({'error': validation_error}), 400 # --- Require at least one deployment field --- - if not (updated_agent.get('azure_openai_gpt_deployment') or updated_agent.get('azure_agent_apim_gpt_deployment')): - log_event("Edit agent failed: missing deployment field", level=logging.WARNING, extra={"action": "edit", "agent": updated_agent}) + if not (cleaned_agent.get('azure_openai_gpt_deployment') or cleaned_agent.get('azure_agent_apim_gpt_deployment')): + log_event("Edit agent failed: missing deployment field", level=logging.WARNING, extra={"action": "edit", "agent": cleaned_agent}) return jsonify({'error': 'Agent must have either azure_openai_gpt_deployment or azure_agent_apim_gpt_deployment set.'}), 400 # Find the agent to update @@ -419,7 +610,7 @@ def edit_agent(agent_name): for a in agents: if a['name'] == agent_name: # Preserve the existing id - updated_agent['id'] = a.get('id') + cleaned_agent['id'] = a.get('id') agent_found = True break @@ -428,7 +619,7 @@ def edit_agent(agent_name): return jsonify({'error': 'Agent not found.'}), 404 # Save the updated agent - result = save_global_agent(updated_agent) + result = save_global_agent(cleaned_agent) if not result: return jsonify({'error': 'Failed to save agent.'}), 500 @@ -446,7 +637,7 @@ def edit_agent(agent_name): f"Agent {agent_name} edited", extra={ "action": "edit", - "agent": {k: v for k, v in updated_agent.items() if k != 'id'}, + "agent": {k: v for k, v in cleaned_agent.items() if k != 'id'}, "user": str(get_current_user_id()), } ) @@ -465,8 +656,6 @@ def edit_agent(agent_name): @admin_required def delete_agent(agent_name): try: - from functions_global_agents import get_global_agents, delete_global_agent - agents = get_global_agents() # Find the agent to delete @@ -550,9 +739,7 @@ def orchestration_settings(): log_event(f"Error updating orchestration settings: {e}", level=logging.ERROR, exceptionTraceback=True) return jsonify({'error': 'Failed to update orchestration settings.'}), 500 -def get_global_agent_settings(include_admin_extras=False): - from functions_global_agents import get_global_agents - +def get_global_agent_settings(include_admin_extras=False): settings = get_settings() agents = get_global_agents() diff --git a/application/single_app/route_backend_chats.py b/application/single_app/route_backend_chats.py index 8e6aa196..10ea1abe 100644 --- a/application/single_app/route_backend_chats.py +++ b/application/single_app/route_backend_chats.py @@ -8,9 +8,13 @@ from semantic_kernel_fact_memory_store import FactMemoryStore from semantic_kernel_loader import initialize_semantic_kernel from semantic_kernel_plugins.plugin_invocation_logger import get_plugin_logger +from foundry_agent_runtime import FoundryAgentInvocationError, execute_foundry_agent import builtins import asyncio, types +import ast import json +import re +from typing import Any, Dict, List, Mapping, Optional from config import * from flask import g from functions_authentication import * @@ -21,6 +25,7 @@ from functions_chat import * from functions_conversation_metadata import collect_conversation_metadata, update_conversation_with_metadata from functions_debug import debug_print +from functions_activity_logging import log_chat_activity, log_conversation_creation, log_token_usage from flask import current_app from swagger_wrapper import swagger_route, get_auth_security @@ -36,9 +41,7 @@ def get_kernel_agents(): def register_route_backend_chats(app): @app.route('/api/chat', methods=['POST']) - @swagger_route( - security=get_auth_security() - ) + @swagger_route(security=get_auth_security()) @login_required @user_required def chat_api(): @@ -55,14 +58,88 @@ def chat_api(): user_message = data.get('message', '') conversation_id = data.get('conversation_id') hybrid_search_enabled = data.get('hybrid_search') + web_search_enabled = data.get('web_search_enabled') selected_document_id = data.get('selected_document_id') image_gen_enabled = data.get('image_generation') document_scope = data.get('doc_scope') + reload_messages_required = False + + def parse_json_string(candidate: str) -> Any: + """Parse JSON content when strings look like serialized structures.""" + trimmed = candidate.strip() + if not trimmed or trimmed[0] not in ('{', '['): + return None + try: + return json.loads(trimmed) + except Exception as exc: + log_event( + f"[result_requires_message_reload] Failed to parse JSON: {str(exc)} | candidate: {trimmed[:200]}", + level=logging.DEBUG + ) + return None + + def dict_requires_reload(payload: Dict[str, Any]) -> bool: + """Inspect dictionary payloads for any signal that messages were persisted.""" + if payload.get('reload_messages') or payload.get('requires_message_reload'): + return True + + metadata = payload.get('metadata') + if isinstance(metadata, dict) and metadata.get('requires_message_reload'): + return True + + image_url = payload.get('image_url') + if isinstance(image_url, dict) and image_url.get('url'): + return True + if isinstance(image_url, str) and image_url.strip(): + return True + + result_type = payload.get('type') + if isinstance(result_type, str) and result_type.lower() == 'image_url': + return True + + mime = payload.get('mime') + if isinstance(mime, str) and mime.startswith('image/'): + return True + + for value in payload.values(): + if result_requires_message_reload(value): + return True + return False + + def list_requires_reload(items: List[Any]) -> bool: + """Evaluate list items for reload requirements.""" + return any(result_requires_message_reload(item) for item in items) + + def result_requires_message_reload(result: Any) -> bool: + """Heuristically detect plugin outputs that inject new Cosmos messages (e.g., chart images).""" + if result is None: + return False + if isinstance(result, str): + parsed = parse_json_string(result) + return result_requires_message_reload(parsed) if parsed is not None else False + if isinstance(result, list): + return list_requires_reload(result) + if isinstance(result, dict): + return dict_requires_reload(result) + return False active_group_id = data.get('active_group_id') + active_public_workspace_id = data.get('active_public_workspace_id') # Extract active public workspace ID frontend_gpt_model = data.get('model_deployment') top_n_results = data.get('top_n') # Extract top_n parameter from request classifications_to_send = data.get('classifications') # Extract classifications parameter from request chat_type = data.get('chat_type', 'user') # 'user' or 'group', default to 'user' + reasoning_effort = data.get('reasoning_effort') # Extract reasoning effort for reasoning models + + # Check if this is a retry or edit request (both work the same way - reuse existing user message) + retry_user_message_id = data.get('retry_user_message_id') or data.get('edited_user_message_id') + retry_thread_id = data.get('retry_thread_id') + retry_thread_attempt = data.get('retry_thread_attempt') + is_retry = bool(retry_user_message_id) + is_edit = bool(data.get('edited_user_message_id')) + + if is_retry: + operation_type = 'Edit' if is_edit else 'Retry' + debug_print(f"🔍 Chat API - {operation_type} detected! user_message_id={retry_user_message_id}, thread_id={retry_thread_id}, attempt={retry_thread_attempt}") # Store conversation_id in Flask context for plugin logger access g.conversation_id = conversation_id @@ -80,6 +157,7 @@ def chat_api(): search_query = user_message # <--- ADD THIS LINE (Initialize search_query) hybrid_citations_list = [] # <--- ADD THIS LINE (Initialize hybrid list) agent_citations_list = [] # <--- ADD THIS LINE (Initialize agent citations list) + web_search_citations_list = [] system_messages_for_augmentation = [] # Collect system messages from search search_results = [] selected_agent = None # Initialize selected_agent early to prevent NameError @@ -99,6 +177,8 @@ def chat_api(): # Convert toggles from string -> bool if needed if isinstance(hybrid_search_enabled, str): hybrid_search_enabled = hybrid_search_enabled.lower() == 'true' + if isinstance(web_search_enabled, str): + web_search_enabled = web_search_enabled.lower() == 'true' if isinstance(image_gen_enabled, str): image_gen_enabled = image_gen_enabled.lower() == 'true' @@ -186,10 +266,10 @@ def chat_api(): raise ValueError("GPT Client or Model could not be initialized.") except Exception as e: - print(f"Error initializing GPT client/model: {e}") + debug_print(f"Error initializing GPT client/model: {e}") # Handle error appropriately - maybe return 500 or default behavior return jsonify({'error': f'Failed to initialize AI model: {str(e)}'}), 500 - + # region 1 - Load or Create Conversation # --------------------------------------------------------------------- # 1) Load or create conversation # --------------------------------------------------------------------- @@ -205,6 +285,18 @@ def chat_api(): 'strict': False } cosmos_conversations_container.upsert_item(conversation_item) + + # Log conversation creation + log_conversation_creation( + user_id=user_id, + conversation_id=conversation_id, + title='New Conversation', + workspace_type='personal' + ) + + # Mark as logged to activity logs to prevent duplicate migration + conversation_item['added_to_activity_log'] = True + cosmos_conversations_container.upsert_item(conversation_item) else: try: conversation_item = cosmos_conversations_container.read_item(item=conversation_id, partition_key=conversation_id) @@ -221,10 +313,22 @@ def chat_api(): 'strict': False } # Optionally log that a conversation was expected but not found - print(f"Warning: Conversation ID {conversation_id} not found, creating new.") + debug_print(f"Warning: Conversation ID {conversation_id} not found, creating new.") + cosmos_conversations_container.upsert_item(conversation_item) + + # Log conversation creation + log_conversation_creation( + user_id=user_id, + conversation_id=conversation_id, + title='New Conversation', + workspace_type='personal' + ) + + # Mark as logged to activity logs to prevent duplicate migration + conversation_item['added_to_activity_log'] = True cosmos_conversations_container.upsert_item(conversation_item) except Exception as e: - print(f"Error reading conversation {conversation_id}: {e}") + debug_print(f"Error reading conversation {conversation_id}: {e}") return jsonify({'error': f'Error reading conversation: {str(e)}'}), 500 # Determine the actual chat context based on existing conversation or document usage @@ -235,7 +339,7 @@ def chat_api(): if conversation_item.get('chat_type'): # Use existing chat_type from conversation metadata actual_chat_type = conversation_item['chat_type'] - print(f"Using existing chat_type from conversation: {actual_chat_type}") + debug_print(f"Using existing chat_type from conversation: {actual_chat_type}") elif conversation_item.get('context'): # Fallback: determine from existing context primary_context = next((ctx for ctx in conversation_item['context'] if ctx.get('type') == 'primary'), None) @@ -246,11 +350,11 @@ def chat_api(): actual_chat_type = 'public' elif primary_context.get('scope') == 'personal': actual_chat_type = 'personal' - print(f"Determined chat_type from existing primary context: {actual_chat_type}") + debug_print(f"Determined chat_type from existing primary context: {actual_chat_type}") else: # No primary context exists - model-only conversation actual_chat_type = None # This will result in no badges - print(f"No primary context found - model-only conversation") + debug_print(f"No primary context found - model-only conversation") else: # New conversation - will be determined by document usage during metadata collection # For now, use the legacy logic as fallback @@ -258,42 +362,70 @@ def chat_api(): actual_chat_type = 'group' elif document_scope == 'public': actual_chat_type = 'public' - print(f"New conversation - using legacy logic: {actual_chat_type}") - + debug_print(f"New conversation - using legacy logic: {actual_chat_type}") + # region 2 - Append User Message # --------------------------------------------------------------------- - # 2) Append the user message to conversation immediately + # 2) Append the user message to conversation immediately (or use existing for retry) # --------------------------------------------------------------------- - user_message_id = f"{conversation_id}_user_{int(time.time())}_{random.randint(1000,9999)}" - - # Collect comprehensive metadata for user message - user_metadata = {} - - # Get current user information - current_user = get_current_user_info() - if current_user: - user_metadata['user_info'] = { - 'user_id': current_user.get('userId'), - 'username': current_user.get('userPrincipalName'), - 'display_name': current_user.get('displayName'), - 'email': current_user.get('email'), - 'timestamp': datetime.utcnow().isoformat() - } - - # Button states and selections - user_metadata['button_states'] = { - 'image_generation': image_gen_enabled, - 'document_search': hybrid_search_enabled - } - # Document search scope and selections - if hybrid_search_enabled: - user_metadata['workspace_search'] = { - 'search_enabled': True, - 'document_scope': document_scope, - 'selected_document_id': selected_document_id, - 'classification': classifications_to_send + if is_retry: + # For retry, use the provided user message ID and thread info + user_message_id = retry_user_message_id + current_user_thread_id = retry_thread_id + latest_thread_id = current_user_thread_id + + # Read the existing user message to get metadata + try: + user_message_doc = cosmos_messages_container.read_item( + item=user_message_id, + partition_key=conversation_id + ) + previous_thread_id = user_message_doc.get('metadata', {}).get('thread_info', {}).get('previous_thread_id') + # Extract user_metadata from existing message for later use + user_metadata = user_message_doc.get('metadata', {}) + + debug_print(f"🔍 Chat API - Read retry user message:") + debug_print(f" thread_id: {user_message_doc.get('metadata', {}).get('thread_info', {}).get('thread_id')}") + debug_print(f" previous_thread_id: {previous_thread_id}") + debug_print(f" attempt: {user_message_doc.get('metadata', {}).get('thread_info', {}).get('thread_attempt')}") + debug_print(f" active: {user_message_doc.get('metadata', {}).get('thread_info', {}).get('active_thread')}") + except Exception as e: + debug_print(f"Error reading retry user message: {e}") + return jsonify({'error': 'Retry user message not found'}), 404 + else: + # Normal flow: create new user message + user_message_id = f"{conversation_id}_user_{int(time.time())}_{random.randint(1000,9999)}" + + # Collect comprehensive metadata for user message + user_metadata = {} + + # Get current user information + current_user = get_current_user_info() + if current_user: + user_metadata['user_info'] = { + 'user_id': current_user.get('userId'), + 'username': current_user.get('userPrincipalName'), + 'display_name': current_user.get('displayName'), + 'email': current_user.get('email'), + 'timestamp': datetime.utcnow().isoformat() + } + + # Button states and selections + user_metadata['button_states'] = { + 'image_generation': image_gen_enabled, + 'document_search': hybrid_search_enabled, + 'web_search': bool(web_search_enabled) } + # Document search scope and selections + if hybrid_search_enabled: + user_metadata['workspace_search'] = { + 'search_enabled': True, + 'document_scope': document_scope, + 'selected_document_id': selected_document_id, + 'classification': classifications_to_send + } + # Get document details if specific document selected if selected_document_id and selected_document_id != "all": try: @@ -302,7 +434,7 @@ def chat_api(): cosmos_container = cosmos_group_documents_container elif document_scope == 'public': cosmos_container = cosmos_public_documents_container - else: + elif document_scope == 'personal': cosmos_container = cosmos_user_documents_container doc_query = "SELECT c.file_name, c.title, c.document_id, c.group_id FROM c WHERE c.id = @doc_id" @@ -310,12 +442,12 @@ def chat_api(): doc_results = list(cosmos_container.query_items( query=doc_query, parameters=doc_params, enable_cross_partition_query=True )) - if doc_results: + if doc_results and 'workspace_search' in user_metadata: doc_info = doc_results[0] user_metadata['workspace_search']['document_name'] = doc_info.get('title') or doc_info.get('file_name') user_metadata['workspace_search']['document_filename'] = doc_info.get('file_name') except Exception as e: - print(f"Error retrieving document details: {e}") + debug_print(f"Error retrieving document details: {e}") # Add scope-specific details if document_scope == 'group' and active_group_id: @@ -324,110 +456,201 @@ def chat_api(): group_doc = find_group_by_id(active_group_id) debug_print(f"Workspace search group lookup result: {group_doc}") - if group_doc and group_doc.get('name'): - group_name = group_doc.get('name') - user_metadata['workspace_search']['group_name'] = group_name - debug_print(f"Workspace search - set group_name to: {group_name}") + if group_doc: + # Check if group status allows chat operations + from functions_group import check_group_status_allows_operation + allowed, reason = check_group_status_allows_operation(group_doc, 'chat') + if not allowed: + return jsonify({'error': reason}), 403 + + if group_doc.get('name'): + group_name = group_doc.get('name') + if 'workspace_search' in user_metadata: + user_metadata['workspace_search']['group_name'] = group_name + debug_print(f"Workspace search - set group_name to: {group_name}") + else: + debug_print(f"Workspace search - no name for group: {active_group_id}") + if 'workspace_search' in user_metadata: + user_metadata['workspace_search']['group_name'] = None else: - debug_print(f"Workspace search - no group found or no name for id: {active_group_id}") - user_metadata['workspace_search']['group_name'] = None + debug_print(f"Workspace search - no group found for id: {active_group_id}") + if 'workspace_search' in user_metadata: + user_metadata['workspace_search']['group_name'] = None except Exception as e: - print(f"Error retrieving group details: {e}") - user_metadata['workspace_search']['group_name'] = None + debug_print(f"Error retrieving group details: {e}") + if 'workspace_search' in user_metadata: + user_metadata['workspace_search']['group_name'] = None import traceback traceback.print_exc() - else: - user_metadata['workspace_search'] = { - 'search_enabled': False - } - - # Agent selection (if available) - if hasattr(g, 'kernel_agents') and g.kernel_agents: - try: - # Try to get selected agent info from user settings or global settings - selected_agent_info = None - if user_id: - try: - user_settings_doc = cosmos_user_settings_container.read_item( - item=user_id, partition_key=user_id - ) - selected_agent_info = user_settings_doc.get('settings', {}).get('selected_agent') - except: - pass - - if not selected_agent_info: - # Fallback to global selected agent - selected_agent_info = settings.get('global_selected_agent') + + if document_scope == 'public' and active_public_workspace_id: + # Check if public workspace status allows chat operations + try: + from functions_public_workspaces import find_public_workspace_by_id, check_public_workspace_status_allows_operation + workspace_doc = find_public_workspace_by_id(active_public_workspace_id) + if workspace_doc: + allowed, reason = check_public_workspace_status_allows_operation(workspace_doc, 'chat') + if not allowed: + return jsonify({'error': reason}), 403 + except Exception as e: + debug_print(f"Error checking public workspace status: {e}") - if selected_agent_info: - user_metadata['agent_selection'] = { - 'selected_agent': selected_agent_info.get('name'), - 'agent_display_name': selected_agent_info.get('display_name'), - 'is_global': selected_agent_info.get('is_global', False) - } - except Exception as e: - print(f"Error retrieving agent details: {e}") + if 'workspace_search' in user_metadata: + user_metadata['workspace_search']['active_public_workspace_id'] = active_public_workspace_id + + # Ensure workspace_search key always exists for consistency + if 'workspace_search' not in user_metadata: + user_metadata['workspace_search'] = { + 'search_enabled': False + } - # Prompt selection (extract from message if available) - prompt_info = data.get('prompt_info') - if prompt_info: - user_metadata['prompt_selection'] = { - 'selected_prompt_index': prompt_info.get('index'), - 'selected_prompt_text': prompt_info.get('content'), - 'prompt_name': prompt_info.get('name'), - 'prompt_id': prompt_info.get('id') + # Agent selection (if available) + if hasattr(g, 'kernel_agents') and g.kernel_agents: + try: + # Try to get selected agent info from user settings or global settings + selected_agent_info = None + if user_id: + try: + user_settings_doc = cosmos_user_settings_container.read_item( + item=user_id, partition_key=user_id + ) + selected_agent_info = user_settings_doc.get('settings', {}).get('selected_agent') + except: + pass + + if not selected_agent_info: + # Fallback to global selected agent + selected_agent_info = settings.get('global_selected_agent') + + if selected_agent_info: + user_metadata['agent_selection'] = { + 'selected_agent': selected_agent_info.get('name'), + 'agent_display_name': selected_agent_info.get('display_name'), + 'is_global': selected_agent_info.get('is_global', False), + 'is_group': selected_agent_info.get('is_group', False), + 'group_id': selected_agent_info.get('group_id'), + 'group_name': selected_agent_info.get('group_name'), + 'agent_id': selected_agent_info.get('id') + } + except Exception as e: + debug_print(f"Error retrieving agent details: {e}") + + # Prompt selection (extract from message if available) + prompt_info = data.get('prompt_info') + if prompt_info: + user_metadata['prompt_selection'] = { + 'selected_prompt_index': prompt_info.get('index'), + 'selected_prompt_text': prompt_info.get('content'), + 'prompt_name': prompt_info.get('name'), + 'prompt_id': prompt_info.get('id') + } + + # Agent selection (from frontend if available, override settings-based selection) + agent_info = data.get('agent_info') + if agent_info: + user_metadata['agent_selection'] = { + 'selected_agent': agent_info.get('name'), + 'agent_display_name': agent_info.get('display_name'), + 'is_global': agent_info.get('is_global', False), + 'is_group': agent_info.get('is_group', False), + 'group_id': agent_info.get('group_id'), + 'group_name': agent_info.get('group_name'), + 'agent_id': agent_info.get('id') + } + + # Model selection information + user_metadata['model_selection'] = { + 'selected_model': gpt_model, + 'frontend_requested_model': frontend_gpt_model, + 'reasoning_effort': reasoning_effort if reasoning_effort and reasoning_effort != 'none' else None, + 'streaming': 'Disabled' } - - # Agent selection (from frontend if available, override settings-based selection) - agent_info = data.get('agent_info') - if agent_info: - user_metadata['agent_selection'] = { - 'selected_agent': agent_info.get('name'), - 'agent_display_name': agent_info.get('display_name'), - 'is_global': agent_info.get('is_global', False) + + # Chat type and group context for this specific message + user_metadata['chat_context'] = { + 'conversation_id': conversation_id } - - # Model selection information - user_metadata['model_selection'] = { - 'selected_model': gpt_model, - 'frontend_requested_model': frontend_gpt_model - } - - # Chat type and group context for this specific message - user_metadata['chat_context'] = { - 'conversation_id': conversation_id - } - - # Note: Message-level chat_type will be determined after document search is completed - - user_message_doc = { - 'id': user_message_id, - 'conversation_id': conversation_id, - 'role': 'user', - 'content': user_message, - 'timestamp': datetime.utcnow().isoformat(), - 'model_deployment_name': None, # Model not used for user message - 'metadata': user_metadata, - } - - # Debug: Print the complete metadata being saved - debug_print(f"Complete user_metadata being saved: {json.dumps(user_metadata, indent=2, default=str)}") - debug_print(f"Final chat_context for message: {user_metadata['chat_context']}") - debug_print(f"document_search: {hybrid_search_enabled}, has_search_results: {bool(search_results)}") - - # Note: Message-level chat_type will be updated after document search - - cosmos_messages_container.upsert_item(user_message_doc) - - # Set conversation title if it's still the default - if conversation_item.get('title', 'New Conversation') == 'New Conversation' and user_message: - new_title = (user_message[:30] + '...') if len(user_message) > 30 else user_message - conversation_item['title'] = new_title + + # Note: Message-level chat_type will be determined after document search is completed + + # --- Threading Logic --- + # Find the last message in the conversation to establish the chain + previous_thread_id = None + try: + # Query for the last message in this conversation + last_msg_query = f""" + SELECT TOP 1 c.metadata.thread_info.thread_id as thread_id + FROM c + WHERE c.conversation_id = '{conversation_id}' + ORDER BY c.timestamp DESC + """ + last_msgs = list(cosmos_messages_container.query_items( + query=last_msg_query, + partition_key=conversation_id + )) + if last_msgs: + previous_thread_id = last_msgs[0].get('thread_id') + except Exception as e: + debug_print(f"Error fetching last message for threading: {e}") - conversation_item['last_updated'] = datetime.utcnow().isoformat() - cosmos_conversations_container.upsert_item(conversation_item) # Update timestamp and potentially title + # Generate thread_id for the user message + # We track the 'tip' of the thread in latest_thread_id + import uuid + current_user_thread_id = str(uuid.uuid4()) + latest_thread_id = current_user_thread_id + + # Add thread information to user metadata + user_metadata['thread_info'] = { + 'thread_id': current_user_thread_id, + 'previous_thread_id': previous_thread_id, + 'active_thread': True, + 'thread_attempt': 1 + } + + user_message_doc = { + 'id': user_message_id, + 'conversation_id': conversation_id, + 'role': 'user', + 'content': user_message, + 'timestamp': datetime.utcnow().isoformat(), + 'model_deployment_name': None, # Model not used for user message + 'metadata': user_metadata + } + + # Debug: Print the complete metadata being saved + debug_print(f"Complete user_metadata being saved: {json.dumps(user_metadata, indent=2, default=str)}") + debug_print(f"Final chat_context for message: {user_metadata['chat_context']}") + debug_print(f"document_search: {hybrid_search_enabled}, has_search_results: {bool(search_results)}") + + # Note: Message-level chat_type will be updated after document search + + cosmos_messages_container.upsert_item(user_message_doc) + + # Log chat activity for real-time tracking + try: + log_chat_activity( + user_id=user_id, + conversation_id=conversation_id, + message_type='user_message', + message_length=len(user_message) if user_message else 0, + has_document_search=hybrid_search_enabled, + has_image_generation=image_gen_enabled, + document_scope=document_scope, + chat_context=actual_chat_type + ) + except Exception as e: + # Don't let activity logging errors interrupt chat flow + debug_print(f"Activity logging error: {e}") + + # Set conversation title if it's still the default + if conversation_item.get('title', 'New Conversation') == 'New Conversation' and user_message: + new_title = (user_message[:30] + '...') if len(user_message) > 30 else user_message + conversation_item['title'] = new_title + conversation_item['last_updated'] = datetime.utcnow().isoformat() + cosmos_conversations_container.upsert_item(conversation_item) # Update timestamp and potentially title + # region 3 - Content Safety # --------------------------------------------------------------------- # 3) Check Content Safety (but DO NOT return 403). # If blocked, add a "safety" role message & skip GPT. @@ -530,10 +753,10 @@ def chat_api(): }), 200 except HttpResponseError as e: - print(f"[Content Safety Error] {e}") + debug_print(f"[Content Safety Error] {e}") except Exception as ex: - print(f"[Content Safety] Unexpected error: {ex}") - + debug_print(f"[Content Safety] Unexpected error: {ex}") + # region 4 - Augmentation # --------------------------------------------------------------------- # 4) Augmentation (Search, etc.) - Run *before* final history prep # --------------------------------------------------------------------- @@ -557,24 +780,41 @@ def chat_api(): if last_messages_asc and len(last_messages_asc) >= conversation_history_limit: summary_prompt_search = "Please summarize the key topics or questions from this recent conversation history in 50 words or less:\n\n" - message_texts_search = [f"{msg.get('role', 'user').upper()}: {msg.get('content', '')}" for msg in last_messages_asc] - summary_prompt_search += "\n".join(message_texts_search) + + # Filter out inactive thread messages before summarizing + message_texts_search = [] + for msg in last_messages_asc: + thread_info = msg.get('metadata', {}).get('thread_info', {}) + active_thread = thread_info.get('active_thread') + + # Exclude messages with active_thread=False + if active_thread is False: + debug_print(f"[THREAD] Skipping inactive thread message {msg.get('id')} from search summary") + continue + + message_texts_search.append(f"{msg.get('role', 'user').upper()}: {msg.get('content', '')}") + + if not message_texts_search: + # No active messages to summarize + debug_print("[THREAD] No active thread messages available for search summary") + else: + summary_prompt_search += "\n".join(message_texts_search) - try: - # Use the already initialized gpt_client and gpt_model - summary_response_search = gpt_client.chat.completions.create( - model=gpt_model, - messages=[{"role": "system", "content": summary_prompt_search}], - max_tokens=100 # Keep summary short - ) - summary_for_search = summary_response_search.choices[0].message.content.strip() - if summary_for_search: - search_query = f"Based on the recent conversation about: '{summary_for_search}', the user is now asking: {user_message}" - except Exception as e: - print(f"Error summarizing conversation for search: {e}") - # Proceed with original user_message as search_query + try: + # Use the already initialized gpt_client and gpt_model + summary_response_search = gpt_client.chat.completions.create( + model=gpt_model, + messages=[{"role": "system", "content": summary_prompt_search}], + max_tokens=100 # Keep summary short + ) + summary_for_search = summary_response_search.choices[0].message.content.strip() + if summary_for_search: + search_query = f"Based on the recent conversation about: '{summary_for_search}', the user is now asking: {user_message}" + except Exception as e: + debug_print(f"Error summarizing conversation for search: {e}") + # Proceed with original user_message as search_query except Exception as e: - print(f"Error fetching messages for search summarization: {e}") + debug_print(f"Error fetching messages for search summarization: {e}") # Perform the search @@ -606,22 +846,29 @@ def chat_api(): "doc_scope": document_scope, } - # Add active_group_id when document scope is 'group' or chat_type is 'group' - if (document_scope == 'group' or chat_type == 'group') and active_group_id: + # Add active_group_id when: + # 1. Document scope is 'group' or chat_type is 'group', OR + # 2. Document scope is 'all' and groups are enabled (so group search can be included) + if active_group_id and (document_scope == 'group' or document_scope == 'all' or chat_type == 'group'): search_args["active_group_id"] = active_group_id + # Add active_public_workspace_id when: + # 1. Document scope is 'public' or + # 2. Document scope is 'all' and public workspaces are enabled + if active_public_workspace_id and (document_scope == 'public' or document_scope == 'all'): + search_args["active_public_workspace_id"] = active_public_workspace_id if selected_document_id: search_args["document_id"] = selected_document_id # Log if a non-default top_n value is being used if top_n != default_top_n: - print(f"Using custom top_n value: {top_n} (requested: {top_n_results})") + debug_print(f"Using custom top_n value: {top_n} (requested: {top_n_results})") # Public scope now automatically searches all visible public workspaces search_results = hybrid_search(**search_args) # Assuming hybrid_search handles None document_id except Exception as e: - print(f"Error during hybrid search: {e}") + debug_print(f"Error during hybrid search: {e}") # Only treat as error if the exception is from embedding failure return jsonify({ 'error': 'There was an issue with the embedding process. Please check with an admin on embedding configuration.' @@ -705,6 +952,172 @@ def chat_api(): # Reorder hybrid citations list in descending order based on page_number hybrid_citations_list.sort(key=lambda x: x.get('page_number', 0), reverse=True) + # --- NEW: Extract metadata (keywords/abstract) for additional citations --- + # Only if extract_metadata is enabled + if settings.get('enable_extract_meta_data', False): + from functions_documents import get_document_metadata_for_citations + + # Track which documents we've already processed to avoid duplicates + processed_doc_ids = set() + + for doc in search_results: + # Get document ID (from the chunk's document reference) + # AI Search chunks contain references to their parent document + doc_id = doc.get('id', '').split('_')[0] if doc.get('id') else None + + # Skip if we've already processed this document + if not doc_id or doc_id in processed_doc_ids: + continue + + processed_doc_ids.add(doc_id) + # Determine workspace type from the search result fields + doc_user_id = doc.get('user_id') + doc_group_id = doc.get('group_id') + doc_public_workspace_id = doc.get('public_workspace_id') + + + # Query Cosmos for this document's metadata + metadata = get_document_metadata_for_citations( + document_id=doc_id, + user_id=doc_user_id if doc_user_id else None, + group_id=doc_group_id if doc_group_id else None, + public_workspace_id=doc_public_workspace_id if doc_public_workspace_id else None + ) + + + # If we have metadata with content, create additional citations + if metadata: + file_name = metadata.get('file_name', 'Unknown') + keywords = metadata.get('keywords', []) + abstract = metadata.get('abstract', '') + + + # Create citation for keywords if they exist + if keywords and len(keywords) > 0: + keywords_text = ', '.join(keywords) if isinstance(keywords, list) else str(keywords) + keywords_citation_id = f"{doc_id}_keywords" + + + keywords_citation = { + "file_name": file_name, + "citation_id": keywords_citation_id, + "page_number": "Metadata", # Special page identifier + "chunk_id": keywords_citation_id, + "chunk_sequence": 9999, # High number to sort to end + "score": 0.0, # No relevance score for metadata + "group_id": doc_group_id, + "version": doc.get('version', 'N/A'), + "classification": doc.get('document_classification'), + "metadata_type": "keywords", # Flag this as metadata citation + "metadata_content": keywords_text + } + hybrid_citations_list.append(keywords_citation) + combined_documents.append(keywords_citation) # Add to combined_documents too + + # Add keywords to retrieved content for the model + keywords_context = f"Document Keywords ({file_name}): {keywords_text}" + retrieved_texts.append(keywords_context) + + # Create citation for abstract if it exists + if abstract and len(abstract.strip()) > 0: + abstract_citation_id = f"{doc_id}_abstract" + + + # Add keywords to retrieved content for the model + keywords_context = f"Document Keywords ({file_name}): {keywords_text}" + retrieved_texts.append(keywords_context) + + # Create citation for abstract if it exists + if abstract and len(abstract.strip()) > 0: + abstract_citation_id = f"{doc_id}_abstract" + + abstract_citation = { + "file_name": file_name, + "citation_id": abstract_citation_id, + "page_number": "Metadata", # Special page identifier + "chunk_id": abstract_citation_id, + "chunk_sequence": 9998, # High number to sort to end + "score": 0.0, # No relevance score for metadata + "group_id": doc_group_id, + "version": doc.get('version', 'N/A'), + "classification": doc.get('document_classification'), + "metadata_type": "abstract", # Flag this as metadata citation + "metadata_content": abstract + } + hybrid_citations_list.append(abstract_citation) + combined_documents.append(abstract_citation) # Add to combined_documents too + + # Add abstract to retrieved content for the model + abstract_context = f"Document Abstract ({file_name}): {abstract}" + retrieved_texts.append(abstract_context) + + + # Add abstract to retrieved content for the model + abstract_context = f"Document Abstract ({file_name}): {abstract}" + retrieved_texts.append(abstract_context) + + # Create citation for vision analysis if it exists + vision_analysis = metadata.get('vision_analysis') + if vision_analysis: + vision_citation_id = f"{doc_id}_vision" + + # Format vision analysis for citation display + vision_description = vision_analysis.get('description', '') + vision_objects = vision_analysis.get('objects', []) + vision_text = vision_analysis.get('text', '') + + vision_content = f"AI Vision Analysis:\n" + if vision_description: + vision_content += f"Description: {vision_description}\n" + if vision_objects: + vision_content += f"Objects: {', '.join(vision_objects)}\n" + if vision_text: + vision_content += f"Text in Image: {vision_text}\n" + + vision_citation = { + "file_name": file_name, + "citation_id": vision_citation_id, + "page_number": "AI Vision", # Special page identifier + "chunk_id": vision_citation_id, + "chunk_sequence": 9997, # High number to sort to end (before keywords/abstract) + "score": 0.0, # No relevance score for vision analysis + "group_id": doc_group_id, + "version": doc.get('version', 'N/A'), + "classification": doc.get('document_classification'), + "metadata_type": "vision", # Flag this as vision citation + "metadata_content": vision_content + } + hybrid_citations_list.append(vision_citation) + combined_documents.append(vision_citation) # Add to combined_documents too + + # Add vision analysis to retrieved content for the model + vision_context = f"AI Vision Analysis ({file_name}): {vision_content}" + retrieved_texts.append(vision_context) + + + # Update the system prompt with the enhanced content including metadata + if retrieved_texts: + retrieved_content = "\n\n".join(retrieved_texts) + system_prompt_search = f"""You are an AI assistant. Use the following retrieved document excerpts to answer the user's question. Cite sources using the format (Source: filename, Page: page number). + Retrieved Excerpts: + {retrieved_content} + Based *only* on the information provided above, answer the user's query. If the answer isn't in the excerpts, say so. + + Retrieved Excerpts: + {retrieved_content} + + Based *only* on the information provided above, answer the user's query. If the answer isn't in the excerpts, say so. + + Example + User: What is the policy on double dipping? + Assistant: The policy prohibits entities from using federal funds received through one program to apply for additional funds through another program, commonly known as 'double dipping' (Source: PolicyDocument.pdf, Page: 12) + """ + # Update the system message with enhanced content and updated documents array + if system_messages_for_augmentation: + system_messages_for_augmentation[-1]['content'] = system_prompt_search + system_messages_for_augmentation[-1]['documents'] = combined_documents + # --- END NEW METADATA CITATIONS --- + # Update conversation classifications if new ones were found if list(classifications_found) != conversation_item.get('classification', []): conversation_item['classification'] = list(classifications_found) @@ -752,7 +1165,7 @@ def chat_api(): user_metadata['chat_context']['group_name'] = None except Exception as e: - print(f"Error retrieving group name for chat context: {e}") + debug_print(f"Error retrieving group name for chat context: {e}") user_metadata['chat_context']['group_name'] = None import traceback traceback.print_exc() @@ -896,6 +1309,22 @@ def chat_api(): # Create main image document with metadata + + # Get user_info and thread_id from the user message for ownership tracking and threading + user_info_for_chunked_image = None + user_thread_id = None + user_previous_thread_id = None + try: + user_msg = cosmos_messages_container.read_item( + item=user_message_id, + partition_key=conversation_id + ) + user_info_for_chunked_image = user_msg.get('metadata', {}).get('user_info') + user_thread_id = user_msg.get('metadata', {}).get('thread_info', {}).get('thread_id') + user_previous_thread_id = user_msg.get('metadata', {}).get('thread_info', {}).get('previous_thread_id') + except Exception as e: + debug_print(f"Warning: Could not retrieve user_info from user message for chunked image: {e}") + main_image_doc = { 'id': image_message_id, 'conversation_id': conversation_id, @@ -906,12 +1335,20 @@ def chat_api(): 'timestamp': datetime.utcnow().isoformat(), 'model_deployment_name': image_gen_model, 'metadata': { + 'user_info': user_info_for_chunked_image, # Track which user created this image 'is_chunked': True, 'total_chunks': total_chunks, 'chunk_index': 0, - 'original_size': len(generated_image_url) + 'original_size': len(generated_image_url), + 'thread_info': { + 'thread_id': user_thread_id, # Same thread as user message + 'previous_thread_id': user_previous_thread_id, # Same previous_thread_id as user message + 'active_thread': True, + 'thread_attempt': 1 + } } } + # Image message shares the same thread as user message # Create additional chunk documents chunk_docs = [] @@ -952,6 +1389,21 @@ def chat_api(): # Small image - store normally in single document debug_print(f"Small image ({len(generated_image_url)} bytes), storing in single document") + # Get user_info and thread_id from the user message for ownership tracking and threading + user_info_for_image = None + user_thread_id = None + user_previous_thread_id = None + try: + user_msg = cosmos_messages_container.read_item( + item=user_message_id, + partition_key=conversation_id + ) + user_info_for_image = user_msg.get('metadata', {}).get('user_info') + user_thread_id = user_msg.get('metadata', {}).get('thread_info', {}).get('thread_id') + user_previous_thread_id = user_msg.get('metadata', {}).get('thread_info', {}).get('previous_thread_id') + except Exception as e: + debug_print(f"Warning: Could not retrieve user_info from user message for image: {e}") + image_doc = { 'id': image_message_id, 'conversation_id': conversation_id, @@ -962,12 +1414,20 @@ def chat_api(): 'timestamp': datetime.utcnow().isoformat(), 'model_deployment_name': image_gen_model, 'metadata': { + 'user_info': user_info_for_image, # Track which user created this image 'is_chunked': False, - 'original_size': len(generated_image_url) + 'original_size': len(generated_image_url), + 'thread_info': { + 'thread_id': user_thread_id, # Same thread as user message + 'previous_thread_id': user_previous_thread_id, # Same previous_thread_id as user message + 'active_thread': True, + 'thread_attempt': 1 + } } } cosmos_messages_container.upsert_item(image_doc) response_image_url = generated_image_url + # Image message shares the same thread as user message conversation_item['last_updated'] = datetime.utcnow().isoformat() cosmos_conversations_container.upsert_item(conversation_item) @@ -1004,6 +1464,24 @@ def chat_api(): 'error': user_friendly_message }), status_code + if web_search_enabled: + perform_web_search( + settings=settings, + conversation_id=conversation_id, + user_id=user_id, + user_message=user_message, + user_message_id=user_message_id, + chat_type=chat_type, + document_scope=document_scope, + active_group_id=active_group_id, + active_public_workspace_id=active_public_workspace_id, + search_query=search_query, + system_messages_for_augmentation=system_messages_for_augmentation, + agent_citations_list=agent_citations_list, + web_search_citations_list=web_search_citations_list, + ) + + # region 5 - FINAL conversation history preparation # --------------------------------------------------------------------- # 5) Prepare FINAL conversation history for GPT (including summarization) # --------------------------------------------------------------------- @@ -1019,6 +1497,9 @@ def chat_api(): query=all_messages_query, parameters=params_all, partition_key=conversation_id, enable_cross_partition_query=True )) + # Sort messages using threading logic + all_messages = sort_messages_by_thread(all_messages) + total_messages = len(all_messages) # Determine which messages are "recent" and which are "older" @@ -1031,7 +1512,7 @@ def chat_api(): # Summarize older messages if needed and present if enable_summarize_content_history_beyond_conversation_history_limit and older_messages_to_summarize: - print(f"Summarizing {len(older_messages_to_summarize)} older messages for conversation {conversation_id}") + debug_print(f"Summarizing {len(older_messages_to_summarize)} older messages for conversation {conversation_id}") summary_prompt_older = ( "Summarize the following conversation history concisely (around 50-100 words), " "focusing on key facts, decisions, or context that might be relevant for future turns. " @@ -1041,6 +1522,17 @@ def chat_api(): message_texts_older = [] for msg in older_messages_to_summarize: role = msg.get('role', 'user') + metadata = msg.get('metadata', {}) + + # Check active_thread flag - skip messages with active_thread=False + thread_info = metadata.get('thread_info', {}) + active_thread = thread_info.get('active_thread') + + # Exclude content when active_thread is explicitly False + if active_thread is False: + debug_print(f"[THREAD] Skipping inactive thread message {msg.get('id')} from summary") + continue + # Skip roles that shouldn't be in summary (adjust as needed) if role in ['system', 'safety', 'blocked', 'image', 'file']: continue content = msg.get('content', '') @@ -1057,12 +1549,12 @@ def chat_api(): temperature=0.3 # Lower temp for factual summary ) summary_of_older = summary_response_older.choices[0].message.content.strip() - print(f"Generated summary: {summary_of_older}") + debug_print(f"Generated summary: {summary_of_older}") except Exception as e: - print(f"Error summarizing older conversation history: {e}") + debug_print(f"Error summarizing older conversation history: {e}") summary_of_older = "" # Failed, proceed without summary else: - print("No summarizable content found in older messages.") + debug_print("No summarizable content found in older messages.") # Construct the final history for the API call @@ -1082,6 +1574,22 @@ def chat_api(): # 5. Create the final system_doc dictionary for Cosmos DB upsert system_message_id = f"{conversation_id}_system_aug_{int(time.time())}_{random.randint(1000,9999)}" + + # Get user_info and thread_id from the user message for ownership tracking and threading + user_info_for_system = None + user_thread_id = None + user_previous_thread_id = None + try: + user_msg = cosmos_messages_container.read_item( + item=user_message_id, + partition_key=conversation_id + ) + user_info_for_system = user_msg.get('metadata', {}).get('user_info') + user_thread_id = user_msg.get('metadata', {}).get('thread_info', {}).get('thread_id') + user_previous_thread_id = user_msg.get('metadata', {}).get('thread_info', {}).get('previous_thread_id') + except Exception as e: + debug_print(f"Warning: Could not retrieve user_info from user message for system message: {e}") + system_doc = { 'id': system_message_id, 'conversation_id': conversation_id, @@ -1091,10 +1599,19 @@ def chat_api(): 'user_message': user_message, # Include the original user message for context 'model_deployment_name': None, # As per your original structure 'timestamp': datetime.utcnow().isoformat(), - 'metadata': {} + 'metadata': { + 'user_info': user_info_for_system, + 'thread_info': { + 'thread_id': user_thread_id, # Same thread as user message + 'previous_thread_id': user_previous_thread_id, # Same previous_thread_id as user message + 'active_thread': True, + 'thread_attempt': 1 + } + } } cosmos_messages_container.upsert_item(system_doc) conversation_history_for_api.append(aug_msg) # Add to API context + # System message shares the same thread as user message, no thread update needed # --- NEW: Save plugin output as agent citation --- agent_citations_list.append({ @@ -1113,6 +1630,30 @@ def chat_api(): for message in recent_messages: role = message.get('role') content = message.get('content') + metadata = message.get('metadata', {}) + + # Check active_thread flag - skip messages with active_thread=False + # This handles both threaded messages and legacy messages with the flag set + thread_info = metadata.get('thread_info', {}) + active_thread = thread_info.get('active_thread') + + # Exclude content when active_thread is explicitly False + # Include when: active_thread is True, None, or not present (legacy messages) + if active_thread is False: + debug_print(f"[THREAD] Skipping inactive thread message {message.get('id')} (thread_id: {thread_info.get('thread_id')}, attempt: {thread_info.get('thread_attempt')})") + continue + + # Check if message is fully masked - skip it entirely + if metadata.get('masked', False): + debug_print(f"[MASK] Skipping fully masked message {message.get('id')}") + continue + + # Check for partially masked content + masked_ranges = metadata.get('masked_ranges', []) + if masked_ranges and content: + # Remove masked portions from content + content = remove_masked_content(content, masked_ranges) + debug_print(f"[MASK] Applied {len(masked_ranges)} masked ranges to message {message.get('id')}") if role in allowed_roles_in_history: conversation_history_for_api.append({"role": role, "content": content}) @@ -1139,19 +1680,72 @@ def chat_api(): 'role': 'system', # Represent file as system info 'content': f"[User uploaded a file named '{filename}'. Content preview:\n{display_content}]\nUse this file context if relevant." }) - # elif role == 'image': # If you want to represent image generation prompts/results - # prompt = message.get('prompt', 'User generated an image.') - # img_url = message.get('content', '') # URL is in content - # conversation_history_for_api.append({ - # 'role': 'system', - # 'content': f"[Assistant generated an image based on the prompt: '{prompt}'. Image URL: {img_url}]" - # }) + elif role == 'image': # Handle image uploads with extracted text and vision analysis + filename = message.get('filename', 'uploaded_image') + is_user_upload = message.get('metadata', {}).get('is_user_upload', False) + + if is_user_upload: + # This is a user-uploaded image with extracted text and vision analysis + # IMPORTANT: Do NOT include message.get('content') as it contains base64 image data + # which would consume excessive tokens. Only use extracted_text and vision_analysis. + extracted_text = message.get('extracted_text', '') + vision_analysis = message.get('vision_analysis', {}) + + # Build comprehensive context from OCR and vision analysis (NO BASE64!) + image_context_parts = [f"[User uploaded an image named '{filename}'.]"] + + if extracted_text: + # Include OCR text from Document Intelligence + extracted_preview = extracted_text[:max_file_content_length_in_history] + if len(extracted_text) > max_file_content_length_in_history: + extracted_preview += "..." + image_context_parts.append(f"\n\nExtracted Text (OCR):\n{extracted_preview}") + + if vision_analysis: + # Include AI vision analysis + image_context_parts.append("\n\nAI Vision Analysis:") + + if vision_analysis.get('description'): + image_context_parts.append(f"\nDescription: {vision_analysis['description']}") + + if vision_analysis.get('objects'): + objects_str = ', '.join(vision_analysis['objects']) + image_context_parts.append(f"\nObjects detected: {objects_str}") + + if vision_analysis.get('text'): + image_context_parts.append(f"\nText visible in image: {vision_analysis['text']}") + + if vision_analysis.get('contextual_analysis'): + image_context_parts.append(f"\nContextual analysis: {vision_analysis['contextual_analysis']}") + + image_context_content = ''.join(image_context_parts) + "\n\nUse this image information to answer questions about the uploaded image." + + # Verify we're not accidentally including base64 data + if 'data:image/' in image_context_content or ';base64,' in image_context_content: + debug_print(f"WARNING: Base64 image data detected in chat history for {filename}! Removing to save tokens.") + # This should never happen, but safety check just in case + image_context_content = f"[User uploaded an image named '{filename}' - image data excluded from chat history to conserve tokens]" + + debug_print(f"[IMAGE_CONTEXT] Adding user-uploaded image to history: {filename}, context length: {len(image_context_content)} chars") + conversation_history_for_api.append({ + 'role': 'system', + 'content': image_context_content + }) + else: + # This is a system-generated image (DALL-E, etc.) + # Don't include the image data URL in history either + prompt = message.get('prompt', 'User requested image generation.') + debug_print(f"[IMAGE_CONTEXT] Adding system-generated image to history: {prompt[:100]}...") + conversation_history_for_api.append({ + 'role': 'system', + 'content': f"[Assistant generated an image based on the prompt: '{prompt}']" + }) # Ignored roles: 'safety', 'blocked', 'system' (if they are only for augmentation/summary) # Ensure the very last message is the current user's message (it should be if fetched correctly) if not conversation_history_for_api or conversation_history_for_api[-1]['role'] != 'user': - print("Warning: Last message in history is not the user's current message. Appending.") + debug_print("Warning: Last message in history is not the user's current message. Appending.") # This might happen if 'recent_messages' somehow didn't include the latest user message saved in step 2 # Or if the last message had an ignored role. Find the actual user message: user_msg_found = False @@ -1164,9 +1758,10 @@ def chat_api(): conversation_history_for_api.append({"role": "user", "content": user_message}) except Exception as e: - print(f"Error preparing conversation history: {e}") + debug_print(f"Error preparing conversation history: {e}") return jsonify({'error': f'Error preparing conversation history: {str(e)}'}), 500 + # region 6 - Final GPT Call # --------------------------------------------------------------------- # 6) Final GPT Call # --------------------------------------------------------------------- @@ -1327,7 +1922,20 @@ async def run_sk_call(callable_obj, *args, **kwargs): user_settings = get_user_settings(user_id).get('settings', {}) per_user_semantic_kernel = settings.get('per_user_semantic_kernel', False) enable_semantic_kernel = settings.get('enable_semantic_kernel', False) + + # Check if agent_info is provided in request (e.g., from retry with agent selection) + request_agent_info = data.get('agent_info') + force_enable_agents = bool(request_agent_info) # Force enable agents if agent_info provided + user_enable_agents = user_settings.get('enable_agents', True) # Default to True for backward compatibility + # Override user setting if agent explicitly requested via agent_info + if force_enable_agents: + user_enable_agents = True + g.force_enable_agents = True # Store in Flask g for SK loader to check + g.request_agent_name = request_agent_info.get('name') if isinstance(request_agent_info, dict) else request_agent_info + log_event(f"[SKChat] agent_info provided in request - forcing agent enablement for this request", level=logging.INFO) + + enable_key_vault_secret_storage = settings.get('enable_key_vault_secret_storage', False) redis_client = None # --- Semantic Kernel state management (per-user mode) --- if enable_semantic_kernel and per_user_semantic_kernel: @@ -1360,9 +1968,19 @@ async def run_sk_call(callable_obj, *args, **kwargs): if enable_semantic_kernel and user_enable_agents: # PATCH: Use new agent selection logic agent_name_to_select = None - if per_user_semantic_kernel: + + # Priority 1: Use agent_info from request if provided (e.g., retry with specific agent) + if request_agent_info: + # Extract agent name or create dict format expected by selection logic + agent_name_to_select = request_agent_info if isinstance(request_agent_info, dict) else {'name': request_agent_info} + if isinstance(agent_name_to_select, dict): + agent_name_to_select = agent_name_to_select.get('name') + log_event(f"[SKChat] Using agent from request agent_info: {agent_name_to_select}") + # Priority 2: Use user settings + elif per_user_semantic_kernel: agent_name_to_select = user_settings.get('selected_agent') log_event(f"[SKChat] Per-user mode: selected_agent from user_settings: {agent_name_to_select}") + # Priority 3: Use global settings else: global_selected_agent_info = settings.get('global_selected_agent') if global_selected_agent_info: @@ -1453,7 +2071,7 @@ def orchestrator_success(result): notice = None return (msg, "multi-agent-chat", "multi-agent-chat", notice) def orchestrator_error(e): - print(f"Error during Semantic Kernel Agent invocation: {str(e)}") + debug_print(f"Error during Semantic Kernel Agent invocation: {str(e)}") log_event( f"Error during Semantic Kernel Agent invocation: {str(e)}", extra=extra, @@ -1474,6 +2092,7 @@ def invoke_selected_agent(): agent_message_history, )) def agent_success(result): + nonlocal reload_messages_required msg = str(result) notice = None agent_used = getattr(selected_agent, 'name', 'All Plugins') @@ -1535,16 +2154,22 @@ def make_json_serializable(obj): } ) - # print(f"[Enhanced Agent Citations] Agent used: {agent_used}") - # print(f"[Enhanced Agent Citations] Extracted {len(detailed_citations)} detailed plugin invocations") + # debug_print(f"[Enhanced Agent Citations] Agent used: {agent_used}") + # debug_print(f"[Enhanced Agent Citations] Extracted {len(detailed_citations)} detailed plugin invocations") # for citation in detailed_citations: - # print(f"[Enhanced Agent Citations] - Plugin: {citation['plugin_name']}, Function: {citation['function_name']}") - # print(f" Parameters: {citation['function_arguments']}") - # print(f" Result: {citation['function_result']}") - # print(f" Duration: {citation['duration_ms']}ms, Success: {citation['success']}") + # debug_print(f"[Enhanced Agent Citations] - Plugin: {citation['plugin_name']}, Function: {citation['function_name']}") + # debug_print(f" Parameters: {citation['function_arguments']}") + # debug_print(f" Result: {citation['function_result']}") + # debug_print(f" Duration: {citation['duration_ms']}ms, Success: {citation['success']}") # Store detailed citations globally to be accessed by the calling function agent_citations_list.extend(detailed_citations) + + if not reload_messages_required: + for citation in detailed_citations: + if result_requires_message_reload(citation.get('function_result')): + reload_messages_required = True + break if enable_multi_agent_orchestration and not per_user_semantic_kernel: # If the agent response indicates fallback mode @@ -1555,19 +2180,108 @@ def make_json_serializable(obj): ) return (msg, actual_model_deployment, "agent", notice) def agent_error(e): - print(f"Error during Semantic Kernel Agent invocation: {str(e)}") + debug_print(f"Error during Semantic Kernel Agent invocation: {str(e)}") log_event( f"Error during Semantic Kernel Agent invocation: {str(e)}", extra=extra, level=logging.ERROR, exceptionTraceback=True ) - fallback_steps.append({ - 'name': 'agent', - 'func': invoke_selected_agent, - 'on_success': agent_success, - 'on_error': agent_error - }) + + selected_agent_type = getattr(selected_agent, 'agent_type', 'local') or 'local' + if isinstance(selected_agent_type, str): + selected_agent_type = selected_agent_type.lower() + + if selected_agent_type == 'aifoundry': + def invoke_foundry_agent(): + foundry_metadata = { + 'conversation_id': conversation_id, + 'user_id': user_id, + 'message_id': user_message_id, + 'chat_type': chat_type, + 'document_scope': document_scope, + 'group_id': active_group_id if chat_type == 'group' else None, + 'hybrid_search_enabled': hybrid_search_enabled, + 'selected_document_id': selected_document_id, + 'search_query': search_query, + } + return selected_agent.invoke( + agent_message_history, + metadata={k: v for k, v in foundry_metadata.items() if v is not None} + ) + + def foundry_agent_success(result): + msg = str(result) + notice = None + agent_used = getattr(selected_agent, 'name', 'Azure AI Foundry Agent') + actual_model_deployment = ( + getattr(selected_agent, 'last_run_model', None) + or getattr(selected_agent, 'deployment_name', None) + or agent_used + ) + + foundry_citations = getattr(selected_agent, 'last_run_citations', []) or [] + if foundry_citations: + for citation in foundry_citations: + try: + serializable = json.loads(json.dumps(citation, default=str)) + except (TypeError, ValueError): + serializable = {'value': str(citation)} + agent_citations_list.append({ + 'tool_name': agent_used, + 'function_name': 'azure_ai_foundry_citation', + 'plugin_name': 'azure_ai_foundry', + 'function_arguments': serializable, + 'function_result': serializable, + 'timestamp': datetime.utcnow().isoformat(), + 'success': True + }) + + if enable_multi_agent_orchestration and not per_user_semantic_kernel: + notice = ( + "[SK Fallback]: The AI assistant is running in single agent fallback mode. " + "Some advanced features may not be available. " + "Please contact your administrator to configure Semantic Kernel for richer responses." + ) + + log_event( + f"[Foundry Agent] Invocation complete for {agent_used}", + extra={ + 'conversation_id': conversation_id, + 'user_id': user_id, + 'agent_id': getattr(selected_agent, 'id', None), + 'model_used': actual_model_deployment, + 'citation_count': len(foundry_citations), + } + ) + + return (msg, actual_model_deployment, 'agent', notice) + + def foundry_agent_error(e): + log_event( + f"Error during Azure AI Foundry agent invocation: {str(e)}", + extra={ + 'conversation_id': conversation_id, + 'user_id': user_id, + 'agent_id': getattr(selected_agent, 'id', None) + }, + level=logging.ERROR, + exceptionTraceback=True + ) + + fallback_steps.append({ + 'name': 'foundry_agent', + 'func': invoke_foundry_agent, + 'on_success': foundry_agent_success, + 'on_error': foundry_agent_error + }) + else: + fallback_steps.append({ + 'name': 'agent', + 'func': invoke_selected_agent, + 'on_success': agent_success, + 'on_error': agent_error + }) if kernel: def invoke_kernel(): @@ -1608,7 +2322,7 @@ def kernel_success(result): msg = '[SK fallback] Running in kernel only mode. Ask your administrator to configure Semantic Kernel for richer responses.' return (str(result), "kernel", "kernel", msg) def kernel_error(e): - print(f"Error during kernel invocation: {str(e)}") + debug_print(f"Error during kernel invocation: {str(e)}") log_event( f"Error during kernel invocation: {str(e)}", extra=extra, @@ -1627,12 +2341,37 @@ def invoke_gpt_fallback(): raise Exception('Cannot generate response: No conversation history available.') if conversation_history_for_api[-1].get('role') != 'user': raise Exception('Internal error: Conversation history improperly formed.') - print(f"--- Sending to GPT ({gpt_model}) ---") - print(f"Total messages in API call: {len(conversation_history_for_api)}") - response = gpt_client.chat.completions.create( - model=gpt_model, - messages=conversation_history_for_api, - ) + debug_print(f"--- Sending to GPT ({gpt_model}) ---") + debug_print(f"Total messages in API call: {len(conversation_history_for_api)}") + + # Prepare API call parameters + api_params = { + 'model': gpt_model, + 'messages': conversation_history_for_api, + } + + # Add reasoning_effort if provided and not 'none' + if reasoning_effort and reasoning_effort != 'none': + api_params['reasoning_effort'] = reasoning_effort + debug_print(f"Using reasoning effort: {reasoning_effort}") + + try: + response = gpt_client.chat.completions.create(**api_params) + except Exception as e: + # Check if error is related to reasoning_effort parameter + error_str = str(e).lower() + if reasoning_effort and reasoning_effort != 'none' and ( + 'reasoning_effort' in error_str or + 'unrecognized request argument' in error_str or + 'invalid_request_error' in error_str + ): + debug_print(f"Reasoning effort not supported by {gpt_model}, retrying without reasoning_effort...") + # Retry without reasoning_effort + api_params.pop('reasoning_effort', None) + response = gpt_client.chat.completions.create(**api_params) + else: + raise + msg = response.choices[0].message.content notice = None if enable_semantic_kernel and user_enable_agents: @@ -1642,6 +2381,14 @@ def invoke_gpt_fallback(): "No advanced features are available. " "Please contact your administrator to resolve Semantic Kernel integration." ) + # Capture token usage for storage in message metadata + token_usage_data = { + 'prompt_tokens': response.usage.prompt_tokens, + 'completion_tokens': response.usage.completion_tokens, + 'total_tokens': response.usage.total_tokens, + 'captured_at': datetime.utcnow().isoformat() + } + log_event( f"[Tokens] GPT completion response received - prompt_tokens: {response.usage.prompt_tokens}, completion_tokens: {response.usage.completion_tokens}, total_tokens: {response.usage.total_tokens}", extra={ @@ -1655,15 +2402,15 @@ def invoke_gpt_fallback(): }, level=logging.INFO ) - return (msg, gpt_model, None, notice) + return (msg, gpt_model, None, notice, token_usage_data) def gpt_success(result): return result def gpt_error(e): - print(f"Error during final GPT completion: {str(e)}") + debug_print(f"Error during final GPT completion: {str(e)}") if "context length" in str(e).lower(): - return ("Sorry, the conversation history is too long even after summarization. Please start a new conversation or try a shorter message.", gpt_model, None, None) + return ("Sorry, the conversation history is too long even after summarization. Please start a new conversation or try a shorter message.", gpt_model, None, None, None) else: - return (f"Sorry, I encountered an error generating the response. Details: {str(e)}", gpt_model, None, None) + return (f"Sorry, I encountered an error generating the response. Details: {str(e)}", gpt_model, None, None, None) fallback_steps.append({ 'name': 'gpt', 'func': invoke_gpt_fallback, @@ -1671,8 +2418,16 @@ def gpt_error(e): 'on_error': gpt_error }) - ai_message, final_model_used, chat_mode, kernel_fallback_notice = try_fallback_chain(fallback_steps) - if kernel: + fallback_result = try_fallback_chain(fallback_steps) + # Unpack result - handle both 4-tuple (SK) and 5-tuple (GPT with tokens) + if len(fallback_result) == 5: + ai_message, final_model_used, chat_mode, kernel_fallback_notice, token_usage_data = fallback_result + else: + ai_message, final_model_used, chat_mode, kernel_fallback_notice = fallback_result + token_usage_data = None + + # Collect token usage from Semantic Kernel services if available + if kernel and not token_usage_data: try: for service in getattr(kernel, "services", {}).values(): # Each service is likely an AzureChatCompletion or similar @@ -1693,6 +2448,16 @@ def gpt_error(e): }, level=logging.INFO ) + + # Capture token usage from first service with token data + if (prompt_tokens or completion_tokens or total_tokens) and not token_usage_data: + token_usage_data = { + 'prompt_tokens': prompt_tokens, + 'completion_tokens': completion_tokens, + 'total_tokens': total_tokens, + 'captured_at': datetime.utcnow().isoformat(), + 'service_id': getattr(service, 'service_id', None) + } except Exception as e: log_event( f"[Tokens] Error logging service token usage for user '{get_current_user_id()}': {e}", @@ -1700,7 +2465,7 @@ def gpt_error(e): exceptionTraceback=True ) - + # region 7 - Save GPT Response # --------------------------------------------------------------------- # 7) Save GPT response (or error message) # --------------------------------------------------------------------- @@ -1722,6 +2487,24 @@ def gpt_error(e): agent_name = selected_agent.name assistant_message_id = f"{conversation_id}_assistant_{int(time.time())}_{random.randint(1000,9999)}" + + # Get user_info and thread_id from the user message for ownership tracking and threading + user_info_for_assistant = None + user_thread_id = None + user_previous_thread_id = None + try: + user_msg = cosmos_messages_container.read_item( + item=user_message_id, + partition_key=conversation_id + ) + user_info_for_assistant = user_msg.get('metadata', {}).get('user_info') + user_thread_id = user_msg.get('metadata', {}).get('thread_info', {}).get('thread_id') + user_previous_thread_id = user_msg.get('metadata', {}).get('thread_info', {}).get('previous_thread_id') + except Exception as e: + debug_print(f"Warning: Could not retrieve user_info from user message: {e}") + + # Assistant message should be part of the same thread as the user message + # Only system/augmentation messages create new threads within a conversation assistant_doc = { 'id': assistant_message_id, 'conversation_id': conversation_id, @@ -1730,15 +2513,67 @@ def gpt_error(e): 'timestamp': datetime.utcnow().isoformat(), 'augmented': bool(system_messages_for_augmentation), 'hybrid_citations': hybrid_citations_list, # <--- SIMPLIFIED: Directly use the list + 'web_search_citations': web_search_citations_list, 'hybridsearch_query': search_query if hybrid_search_enabled and search_results else None, # Log query only if hybrid search ran and found results 'agent_citations': agent_citations_list, # <--- NEW: Store agent tool invocation results 'user_message': user_message, 'model_deployment_name': actual_model_used, 'agent_display_name': agent_display_name, 'agent_name': agent_name, - 'metadata': {} # Used by SK + 'metadata': { + 'user_info': user_info_for_assistant, # Track which user created this assistant message + 'reasoning_effort': reasoning_effort, + 'thread_info': { + 'thread_id': user_thread_id, # Same thread as user message + 'previous_thread_id': user_previous_thread_id, # Same previous_thread_id as user message + 'active_thread': True, + 'thread_attempt': retry_thread_attempt if is_retry else 1 + }, + 'token_usage': token_usage_data # Store token usage information + } # Used by SK and reasoning effort } + + debug_print(f"🔍 Chat API - Creating assistant message with thread_info:") + debug_print(f" thread_id: {user_thread_id}") + debug_print(f" previous_thread_id: {user_previous_thread_id}") + debug_print(f" attempt: {retry_thread_attempt if is_retry else 1}") + debug_print(f" is_retry: {is_retry}") + cosmos_messages_container.upsert_item(assistant_doc) + + # Log chat token usage to activity_logs for easy reporting + if token_usage_data and token_usage_data.get('total_tokens'): + try: + from functions_activity_logging import log_token_usage + + # Determine workspace type based on active group/public workspace + workspace_type = 'personal' + if active_public_workspace_id: + workspace_type = 'public' + elif active_group_id: + workspace_type = 'group' + + log_token_usage( + user_id=get_current_user_id(), + token_type='chat', + total_tokens=token_usage_data.get('total_tokens'), + model=actual_model_used, + workspace_type=workspace_type, + prompt_tokens=token_usage_data.get('prompt_tokens'), + completion_tokens=token_usage_data.get('completion_tokens'), + conversation_id=conversation_id, + message_id=assistant_message_id, + group_id=active_group_id, + public_workspace_id=active_public_workspace_id, + additional_context={ + 'agent_name': agent_name, + 'augmented': bool(system_messages_for_augmentation), + 'reasoning_effort': reasoning_effort + } + ) + except Exception as log_error: + debug_print(f"⚠️ Warning: Failed to log chat token usage: {log_error}") + # Don't fail the chat flow if logging fails # Update the user message metadata with the actual model used # This ensures the UI shows the correct model in the metadata panel @@ -1754,7 +2589,7 @@ def gpt_error(e): cosmos_messages_container.upsert_item(user_message_doc) except Exception as e: - print(f"Warning: Could not update user message metadata: {e}") + debug_print(f"Warning: Could not update user message metadata: {e}") # Update conversation's last_updated timestamp one last time conversation_item['last_updated'] = datetime.utcnow().isoformat() @@ -1779,11 +2614,12 @@ def gpt_error(e): image_gen_enabled=image_gen_enabled, selected_documents=combined_documents if 'combined_documents' in locals() else None, selected_agent=selected_agent_name, + selected_agent_details=user_metadata.get('agent_selection'), search_results=search_results if 'search_results' in locals() else None, conversation_item=conversation_item ) except Exception as e: - print(f"Error collecting conversation metadata: {e}") + debug_print(f"Error collecting conversation metadata: {e}") # Continue even if metadata collection fails # Add any other final updates to conversation_item if needed (like classifications if not done earlier) @@ -1809,15 +2645,17 @@ def gpt_error(e): 'blocked': False, # Explicitly false if we got this far 'augmented': bool(system_messages_for_augmentation), 'hybrid_citations': hybrid_citations_list, + 'web_search_citations': web_search_citations_list, 'agent_citations': agent_citations_list, + 'reload_messages': reload_messages_required, 'kernel_fallback_notice': kernel_fallback_notice }), 200 except Exception as e: import traceback error_traceback = traceback.format_exc() - print(f"[CHAT API ERROR] Unhandled exception in chat_api: {str(e)}") - print(f"[CHAT API ERROR] Full traceback:\n{error_traceback}") + debug_print(f"[CHAT API ERROR] Unhandled exception in chat_api: {str(e)}") + debug_print(f"[CHAT API ERROR] Full traceback:\n{error_traceback}") log_event( f"[CHAT API ERROR] Unhandled exception in chat_api: {str(e)}", extra={ @@ -1831,4 +2669,1783 @@ def gpt_error(e): return jsonify({ 'error': f'Internal server error: {str(e)}', 'details': error_traceback if app.debug else None - }), 500 \ No newline at end of file + }), 500 + + @app.route('/api/chat/stream', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def chat_stream_api(): + """ + Streaming version of chat endpoint using Server-Sent Events (SSE). + Streams tokens as they are generated from Azure OpenAI. + """ + from flask import Response, stream_with_context + import json + + # IMPORTANT: Parse JSON and get user_id BEFORE entering the generator + # because request context may not be available inside the generator + try: + data = request.get_json() + user_id = get_current_user_id() + settings = get_settings() + except Exception as e: + return jsonify({'error': f'Failed to parse request: {str(e)}'}), 400 + + def generate(): + try: + # Import debug_print for use in generator + from functions_debug import debug_print + + if not user_id: + yield f"data: {json.dumps({'error': 'User not authenticated'})}\n\n" + return + + # Extract request parameters (same as non-streaming endpoint) + user_message = data.get('message', '') + conversation_id = data.get('conversation_id') + hybrid_search_enabled = data.get('hybrid_search') + web_search_enabled = data.get('web_search_enabled') + selected_document_id = data.get('selected_document_id') + image_gen_enabled = data.get('image_generation') + document_scope = data.get('doc_scope') + active_group_id = data.get('active_group_id') + active_public_workspace_id = data.get('active_public_workspace_id') # Extract active public workspace ID + frontend_gpt_model = data.get('model_deployment') + classifications_to_send = data.get('classifications') + chat_type = data.get('chat_type', 'user') + reasoning_effort = data.get('reasoning_effort') # Extract reasoning effort for reasoning models + + # Check if agents are enabled + enable_semantic_kernel = settings.get('enable_semantic_kernel', False) + per_user_semantic_kernel = settings.get('per_user_semantic_kernel', False) + user_settings = {} + user_enable_agents = False + + debug_print(f"[DEBUG] enable_semantic_kernel={enable_semantic_kernel}, per_user_semantic_kernel={per_user_semantic_kernel}") + + # Initialize Semantic Kernel if needed + redis_client = None + if enable_semantic_kernel and per_user_semantic_kernel: + redis_client = current_app.config.get('SESSION_REDIS') if 'current_app' in globals() else None + initialize_semantic_kernel(user_id=user_id, redis_client=redis_client) + debug_print(f"[DEBUG] Initialized Semantic Kernel for user {user_id}") + elif enable_semantic_kernel: + # Global mode: set g.kernel/g.kernel_agents from builtins + g.kernel = getattr(builtins, 'kernel', None) + g.kernel_agents = getattr(builtins, 'kernel_agents', None) + debug_print(f"[DEBUG] Using global Semantic Kernel") + + if enable_semantic_kernel and per_user_semantic_kernel: + try: + user_settings_obj = get_user_settings(user_id) + debug_print(f"[DEBUG] user_settings_obj type: {type(user_settings_obj)}") + # Sanitize user_settings_obj to remove sensitive data (keys, base64, images) from debug logs + sanitized_settings = sanitize_settings_for_logging(user_settings_obj) if isinstance(user_settings_obj, dict) else user_settings_obj + debug_print(f"[DEBUG] user_settings_obj (sanitized): {sanitized_settings}") + + # user_settings_obj might be nested with 'settings' key + if isinstance(user_settings_obj, dict): + if 'settings' in user_settings_obj: + user_settings = user_settings_obj['settings'] + sanitized_user_settings = sanitize_settings_for_logging(user_settings) if isinstance(user_settings, dict) else user_settings + debug_print(f"[DEBUG] Extracted user_settings from 'settings' key (sanitized): {sanitized_user_settings}") + else: + user_settings = user_settings_obj + sanitized_user_settings = sanitize_settings_for_logging(user_settings) if isinstance(user_settings, dict) else user_settings + debug_print(f"[DEBUG] Using user_settings_obj directly (sanitized): {sanitized_user_settings}") + + user_enable_agents = user_settings.get('enable_agents', False) + debug_print(f"[DEBUG] user_enable_agents={user_enable_agents}") + except Exception as e: + debug_print(f"Error loading user settings: {e}") + import traceback + traceback.print_exc() + + # Streaming does not support image generation + if image_gen_enabled: + yield f"data: {json.dumps({'error': 'Image generation is not supported in streaming mode'})}\n\n" + return + + # Initialize Flask context + g.conversation_id = conversation_id + + # Clear plugin invocations + from semantic_kernel_plugins.plugin_invocation_logger import get_plugin_logger + plugin_logger = get_plugin_logger() + plugin_logger.clear_invocations_for_conversation(user_id, conversation_id) + + # Validate chat_type + if chat_type not in ('user', 'group'): + chat_type = 'user' + + # Initialize variables + search_query = user_message + hybrid_citations_list = [] + agent_citations_list = [] + web_search_citations_list = [] + system_messages_for_augmentation = [] + search_results = [] + selected_agent = None + + # Configuration + raw_conversation_history_limit = settings.get('conversation_history_limit', 6) + conversation_history_limit = math.ceil(raw_conversation_history_limit) + if conversation_history_limit % 2 != 0: + conversation_history_limit += 1 + + # Convert toggles + if isinstance(hybrid_search_enabled, str): + hybrid_search_enabled = hybrid_search_enabled.lower() == 'true' + if isinstance(web_search_enabled, str): + web_search_enabled = web_search_enabled.lower() == 'true' + + # Initialize GPT client (simplified version) + gpt_model = "" + gpt_client = None + enable_gpt_apim = settings.get('enable_gpt_apim', False) + + try: + if enable_gpt_apim: + raw = settings.get('azure_apim_gpt_deployment', '') + if not raw: + yield f"data: {json.dumps({'error': 'APIM deployment not configured'})}\n\n" + return + + apim_models = [m.strip() for m in raw.split(',') if m.strip()] + if not apim_models: + yield f"data: {json.dumps({'error': 'No valid APIM models configured'})}\n\n" + return + + if frontend_gpt_model and frontend_gpt_model in apim_models: + gpt_model = frontend_gpt_model + else: + gpt_model = apim_models[0] + + gpt_client = AzureOpenAI( + api_version=settings.get('azure_apim_gpt_api_version'), + azure_endpoint=settings.get('azure_apim_gpt_endpoint'), + api_key=settings.get('azure_apim_gpt_subscription_key') + ) + else: + auth_type = settings.get('azure_openai_gpt_authentication_type') + endpoint = settings.get('azure_openai_gpt_endpoint') + api_version = settings.get('azure_openai_gpt_api_version') + gpt_model_obj = settings.get('gpt_model', {}) + + if gpt_model_obj and gpt_model_obj.get('selected'): + gpt_model = gpt_model_obj['selected'][0]['deploymentName'] + else: + gpt_model = settings.get('azure_openai_gpt_deployment', 'gpt-4o') + + if frontend_gpt_model: + gpt_model = frontend_gpt_model + + if auth_type == 'managed_identity': + credential = DefaultAzureCredential() + token_provider = get_bearer_token_provider( + credential, + cognitive_services_scope + ) + gpt_client = AzureOpenAI( + api_version=api_version, + azure_endpoint=endpoint, + azure_ad_token_provider=token_provider + ) + else: + gpt_client = AzureOpenAI( + api_version=api_version, + azure_endpoint=endpoint, + api_key=settings.get('azure_openai_gpt_key') + ) + + if not gpt_client or not gpt_model: + yield f"data: {json.dumps({'error': 'Failed to initialize AI model'})}\n\n" + return + + except Exception as e: + yield f"data: {json.dumps({'error': f'Model initialization failed: {str(e)}'})}\n\n" + return + + # Load or create conversation (simplified) + if not conversation_id: + conversation_id = str(uuid.uuid4()) + conversation_item = { + 'id': conversation_id, + 'user_id': user_id, + 'last_updated': datetime.utcnow().isoformat(), + 'title': 'New Conversation', + 'context': [], + 'tags': [], + 'strict': False + } + cosmos_conversations_container.upsert_item(conversation_item) + else: + try: + conversation_item = cosmos_conversations_container.read_item( + item=conversation_id, partition_key=conversation_id + ) + except CosmosResourceNotFoundError: + conversation_item = { + 'id': conversation_id, + 'user_id': user_id, + 'last_updated': datetime.utcnow().isoformat(), + 'title': 'New Conversation', + 'context': [], + 'tags': [], + 'strict': False + } + cosmos_conversations_container.upsert_item(conversation_item) + + # Determine chat type + actual_chat_type = 'personal' + if conversation_item.get('chat_type'): + actual_chat_type = conversation_item['chat_type'] + + # Save user message + user_message_id = f"{conversation_id}_user_{int(time.time())}_{random.randint(1000,9999)}" + + user_metadata = {} + current_user = get_current_user_info() + if current_user: + user_metadata['user_info'] = { + 'user_id': current_user.get('userId'), + 'username': current_user.get('userPrincipalName'), + 'display_name': current_user.get('displayName'), + 'email': current_user.get('email'), + 'timestamp': datetime.utcnow().isoformat() + } + + user_metadata['button_states'] = { + 'image_generation': False, + 'document_search': hybrid_search_enabled, + 'web_search': bool(web_search_enabled) + } + + # Document search scope and selections + if hybrid_search_enabled: + user_metadata['workspace_search'] = { + 'search_enabled': True, + 'document_scope': document_scope, + 'selected_document_id': selected_document_id, + 'classification': classifications_to_send + } + + # Get document details if specific document selected + if selected_document_id and selected_document_id != "all": + try: + # Use the appropriate documents container based on scope + if document_scope == 'group': + cosmos_container = cosmos_group_documents_container + elif document_scope == 'public': + cosmos_container = cosmos_public_documents_container + elif document_scope == 'personal': + cosmos_container = cosmos_user_documents_container + + doc_query = "SELECT c.file_name, c.title, c.document_id, c.group_id FROM c WHERE c.id = @doc_id" + doc_params = [{"name": "@doc_id", "value": selected_document_id}] + doc_results = list(cosmos_container.query_items( + query=doc_query, parameters=doc_params, enable_cross_partition_query=True + )) + if doc_results: + doc_info = doc_results[0] + user_metadata['workspace_search']['document_name'] = doc_info.get('title') or doc_info.get('file_name') + user_metadata['workspace_search']['document_filename'] = doc_info.get('file_name') + except Exception as e: + debug_print(f"Error retrieving document details: {e}") + + # Add scope-specific details + if document_scope == 'group' and active_group_id: + try: + from functions_debug import debug_print + debug_print(f"Workspace search - looking up group for id: {active_group_id}") + group_doc = find_group_by_id(active_group_id) + debug_print(f"Workspace search group lookup result: {group_doc}") + + if group_doc and group_doc.get('name'): + group_name = group_doc.get('name') + user_metadata['workspace_search']['group_name'] = group_name + debug_print(f"Workspace search - set group_name to: {group_name}") + else: + debug_print(f"Workspace search - no group found or no name for id: {active_group_id}") + user_metadata['workspace_search']['group_name'] = None + + except Exception as e: + debug_print(f"Error retrieving group details: {e}") + user_metadata['workspace_search']['group_name'] = None + import traceback + traceback.print_exc() + + if document_scope == 'public' and active_public_workspace_id: + # Check if public workspace status allows chat operations + try: + from functions_public_workspaces import find_public_workspace_by_id, check_public_workspace_status_allows_operation + workspace_doc = find_public_workspace_by_id(active_public_workspace_id) + if workspace_doc: + allowed, reason = check_public_workspace_status_allows_operation(workspace_doc, 'chat') + if not allowed: + yield f"data: {json.dumps({'error': reason})}\n\n" + return + except Exception as e: + debug_print(f"Error checking public workspace status: {e}") + + user_metadata['workspace_search']['active_public_workspace_id'] = active_public_workspace_id + else: + user_metadata['workspace_search'] = { + 'search_enabled': False + } + + user_metadata['model_selection'] = { + 'selected_model': gpt_model, + 'frontend_requested_model': frontend_gpt_model, + 'reasoning_effort': reasoning_effort if reasoning_effort and reasoning_effort != 'none' else None, + 'streaming': 'Enabled' + } + + user_metadata['chat_context'] = { + 'conversation_id': conversation_id + } + + # --- Threading Logic for Streaming --- + previous_thread_id = None + try: + last_msg_query = f""" + SELECT TOP 1 c.metadata.thread_info.thread_id as thread_id + FROM c + WHERE c.conversation_id = '{conversation_id}' + ORDER BY c.timestamp DESC + """ + last_msgs = list(cosmos_messages_container.query_items( + query=last_msg_query, + partition_key=conversation_id + )) + if last_msgs: + previous_thread_id = last_msgs[0].get('thread_id') + except Exception as e: + debug_print(f"Error fetching last message for threading: {e}") + + current_user_thread_id = str(uuid.uuid4()) + latest_thread_id = current_user_thread_id + + # Add thread information to user metadata + user_metadata['thread_info'] = { + 'thread_id': current_user_thread_id, + 'previous_thread_id': previous_thread_id, + 'active_thread': True, + 'thread_attempt': 1 + } + + user_message_doc = { + 'id': user_message_id, + 'conversation_id': conversation_id, + 'role': 'user', + 'content': user_message, + 'timestamp': datetime.utcnow().isoformat(), + 'model_deployment_name': None, + 'metadata': user_metadata + } + + cosmos_messages_container.upsert_item(user_message_doc) + + # Log activity + try: + log_chat_activity( + user_id=user_id, + conversation_id=conversation_id, + message_type='user_message', + message_length=len(user_message) if user_message else 0, + has_document_search=hybrid_search_enabled, + has_image_generation=False, + document_scope=document_scope, + chat_context=actual_chat_type + ) + except Exception as e: + debug_print(f"Activity logging error: {e}") + + # Update conversation title + if conversation_item.get('title', 'New Conversation') == 'New Conversation' and user_message: + new_title = (user_message[:30] + '...') if len(user_message) > 30 else user_message + conversation_item['title'] = new_title + + conversation_item['last_updated'] = datetime.utcnow().isoformat() + cosmos_conversations_container.upsert_item(conversation_item) + + # Hybrid search (if enabled) + combined_documents = [] + if hybrid_search_enabled: + try: + search_args = { + "query": search_query, + "user_id": user_id, + "top_n": 12, + "doc_scope": document_scope, + } + + if active_group_id and (document_scope == 'group' or document_scope == 'all' or chat_type == 'group'): + search_args['active_group_id'] = active_group_id + + # Add active_public_workspace_id when: + # 1. Document scope is 'public' or + # 2. Document scope is 'all' and public workspaces are enabled + if active_public_workspace_id and (document_scope == 'public' or document_scope == 'all'): + search_args['active_public_workspace_id'] = active_public_workspace_id + + if selected_document_id: + search_args['document_id'] = selected_document_id + + search_results = hybrid_search(**search_args) + except Exception as e: + debug_print(f"Error during hybrid search: {e}") + + if search_results: + retrieved_texts = [] + + for doc in search_results: + chunk_text = doc.get('chunk_text', '') + file_name = doc.get('file_name', 'Unknown') + version = doc.get('version', 'N/A') + chunk_sequence = doc.get('chunk_sequence', 0) + page_number = doc.get('page_number') or chunk_sequence or 1 + citation_id = doc.get('id', str(uuid.uuid4())) + classification = doc.get('document_classification') + chunk_id = doc.get('chunk_id', str(uuid.uuid4())) + score = doc.get('score', 0.0) + group_id = doc.get('group_id', None) + + citation = f"(Source: {file_name}, Page: {page_number}) [#{citation_id}]" + retrieved_texts.append(f"{chunk_text}\n{citation}") + + combined_documents.append({ + "file_name": file_name, + "citation_id": citation_id, + "page_number": page_number, + "version": version, + "classification": classification, + "chunk_text": chunk_text, + "chunk_sequence": chunk_sequence, + "chunk_id": chunk_id, + "score": score, + "group_id": group_id, + }) + + # Build citation data to match non-streaming format + citation_data = { + "file_name": file_name, + "citation_id": citation_id, + "page_number": page_number, + "chunk_id": chunk_id, + "chunk_sequence": chunk_sequence, + "score": score, + "group_id": group_id, + "version": version, + "classification": classification + } + hybrid_citations_list.append(citation_data) + + # --- Extract metadata (keywords/abstract) for additional citations --- + if settings.get('enable_extract_meta_data', False): + from functions_documents import get_document_metadata_for_citations + + processed_doc_ids = set() + + for doc in search_results: + doc_id = doc.get('document_id') or doc.get('id') + if not doc_id or doc_id in processed_doc_ids: + continue + + processed_doc_ids.add(doc_id) + + file_name = doc.get('file_name', 'Unknown') + doc_group_id = doc.get('group_id', None) + + # Map document_scope to correct parameter names for the function + metadata_params = {'user_id': user_id} + if document_scope == 'group': + metadata_params['group_id'] = active_group_id + elif document_scope == 'public': + metadata_params['public_workspace_id'] = active_public_workspace_id + + metadata = get_document_metadata_for_citations( + doc_id, + **metadata_params + ) + + if metadata: + keywords = metadata.get('keywords', []) + abstract = metadata.get('abstract', '') + + if keywords and len(keywords) > 0: + keywords_citation_id = f"{doc_id}_keywords" + keywords_text = ', '.join(keywords) if isinstance(keywords, list) else str(keywords) + + keywords_citation = { + "file_name": file_name, + "citation_id": keywords_citation_id, + "page_number": "Metadata", + "chunk_id": keywords_citation_id, + "chunk_sequence": 9999, + "score": 0.0, + "group_id": doc_group_id, + "version": doc.get('version', 'N/A'), + "classification": doc.get('document_classification'), + "metadata_type": "keywords", + "metadata_content": keywords_text + } + hybrid_citations_list.append(keywords_citation) + combined_documents.append(keywords_citation) + + keywords_context = f"Document Keywords ({file_name}): {keywords_text}" + retrieved_texts.append(keywords_context) + + if abstract and len(abstract.strip()) > 0: + abstract_citation_id = f"{doc_id}_abstract" + + abstract_citation = { + "file_name": file_name, + "citation_id": abstract_citation_id, + "page_number": "Metadata", + "chunk_id": abstract_citation_id, + "chunk_sequence": 9998, + "score": 0.0, + "group_id": doc_group_id, + "version": doc.get('version', 'N/A'), + "classification": doc.get('document_classification'), + "metadata_type": "abstract", + "metadata_content": abstract + } + hybrid_citations_list.append(abstract_citation) + combined_documents.append(abstract_citation) + + abstract_context = f"Document Abstract ({file_name}): {abstract}" + retrieved_texts.append(abstract_context) + + vision_analysis = metadata.get('vision_analysis') + if vision_analysis: + vision_citation_id = f"{doc_id}_vision" + + vision_description = vision_analysis.get('description', '') + vision_objects = vision_analysis.get('objects', []) + vision_text = vision_analysis.get('text', '') + + vision_content = f"AI Vision Analysis:\n" + if vision_description: + vision_content += f"Description: {vision_description}\n" + if vision_objects: + vision_content += f"Objects: {', '.join(vision_objects)}\n" + if vision_text: + vision_content += f"Text in Image: {vision_text}\n" + + vision_citation = { + "file_name": file_name, + "citation_id": vision_citation_id, + "page_number": "AI Vision", + "chunk_id": vision_citation_id, + "chunk_sequence": 9997, + "score": 0.0, + "group_id": doc_group_id, + "version": doc.get('version', 'N/A'), + "classification": doc.get('document_classification'), + "metadata_type": "vision", + "metadata_content": vision_content + } + hybrid_citations_list.append(vision_citation) + combined_documents.append(vision_citation) + + vision_context = f"AI Vision Analysis ({file_name}): {vision_content}" + retrieved_texts.append(vision_context) + + retrieved_content = "\n\n".join(retrieved_texts) + system_prompt_search = f"""You are an AI assistant. Use the following retrieved document excerpts to answer the user's question. Cite sources using the format (Source: filename, Page: page number). + Retrieved Excerpts: + {retrieved_content} + + Based *only* on the information provided above, answer the user's query. If the answer isn't in the excerpts, say so. + + Example + User: What is the policy on double dipping? + Assistant: The policy prohibits entities from using federal funds received through one program to apply for additional funds through another program, commonly known as 'double dipping' (Source: PolicyDocument.pdf, Page: 12) + """ + + system_messages_for_augmentation.append({ + 'role': 'system', + 'content': system_prompt_search, + 'documents': combined_documents + }) + + # Reorder hybrid citations list in descending order based on page_number + hybrid_citations_list.sort(key=lambda x: x.get('page_number', 0), reverse=True) + + if web_search_enabled: + perform_web_search( + settings=settings, + conversation_id=conversation_id, + user_id=user_id, + user_message=user_message, + user_message_id=user_message_id, + chat_type=chat_type, + document_scope=document_scope, + active_group_id=active_group_id, + active_public_workspace_id=active_public_workspace_id, + search_query=search_query, + system_messages_for_augmentation=system_messages_for_augmentation, + agent_citations_list=agent_citations_list, + web_search_citations_list=web_search_citations_list, + ) + + # Update message chat type + message_chat_type = None + if hybrid_search_enabled and search_results and len(search_results) > 0: + if document_scope == 'group': + message_chat_type = 'group' + elif document_scope == 'public': + message_chat_type = 'public' + else: + message_chat_type = 'personal' + else: + message_chat_type = 'Model' + + user_metadata['chat_context']['chat_type'] = message_chat_type + user_message_doc['metadata'] = user_metadata + cosmos_messages_container.upsert_item(user_message_doc) + + # Prepare conversation history + conversation_history_for_api = [] + + try: + all_messages_query = "SELECT * FROM c WHERE c.conversation_id = @conv_id ORDER BY c.timestamp ASC" + params_all = [{"name": "@conv_id", "value": conversation_id}] + all_messages = list(cosmos_messages_container.query_items( + query=all_messages_query, parameters=params_all, + partition_key=conversation_id, enable_cross_partition_query=True + )) + + # Sort messages using threading logic + all_messages = sort_messages_by_thread(all_messages) + + total_messages = len(all_messages) + num_recent_messages = min(total_messages, conversation_history_limit) + recent_messages = all_messages[-num_recent_messages:] + + # Add augmentation messages + for aug_msg in system_messages_for_augmentation: + conversation_history_for_api.append({ + 'role': aug_msg['role'], + 'content': aug_msg['content'] + }) + + # Add recent messages + allowed_roles_in_history = ['user', 'assistant'] + for message in recent_messages: + if message.get('role') in allowed_roles_in_history: + conversation_history_for_api.append({ + 'role': message['role'], + 'content': message.get('content', '') + }) + + except Exception as e: + yield f"data: {json.dumps({'error': f'History error: {str(e)}'})}\n\n" + return + + # Add system prompt + default_system_prompt = settings.get('default_system_prompt', '').strip() + if default_system_prompt: + has_general_system_prompt = any( + msg.get('role') == 'system' and not ( + "retrieved document excerpts" in msg.get('content', '') + ) + for msg in conversation_history_for_api + ) + if not has_general_system_prompt: + conversation_history_for_api.insert(0, { + 'role': 'system', + 'content': default_system_prompt + }) + + # Check if agents are enabled and should be used + selected_agent = None + agent_name_used = None + agent_display_name_used = None + use_agent_streaming = False + + if enable_semantic_kernel and user_enable_agents: + # Agent selection logic (similar to non-streaming) + kernel = get_kernel() + all_agents = get_kernel_agents() + + if all_agents: + agent_name_to_select = None + if per_user_semantic_kernel: + # user_settings.get('selected_agent') returns a dict with agent info + selected_agent_info = user_settings.get('selected_agent') + if isinstance(selected_agent_info, dict): + agent_name_to_select = selected_agent_info.get('name') + elif isinstance(selected_agent_info, str): + agent_name_to_select = selected_agent_info + debug_print(f"[Streaming] Per-user agent name to select: {agent_name_to_select}") + else: + global_selected_agent_info = settings.get('global_selected_agent') + if global_selected_agent_info: + agent_name_to_select = global_selected_agent_info.get('name') + debug_print(f"[Streaming] Global agent name to select: {agent_name_to_select}") + + # Find the agent + agent_iter = all_agents.values() if isinstance(all_agents, dict) else all_agents + for agent in agent_iter: + agent_obj_name = getattr(agent, 'name', None) + debug_print(f"[Streaming] Checking agent: {agent_obj_name} against target: {agent_name_to_select}") + if agent_name_to_select and agent_obj_name == agent_name_to_select: + selected_agent = agent + debug_print(f"[Streaming] ✅ Found matching agent: {agent_obj_name}") + break + + # Fallback to default agent + if not selected_agent: + for agent in agent_iter: + if getattr(agent, 'default_agent', False): + selected_agent = agent + debug_print(f"[Streaming] Using default agent: {getattr(agent, 'name', 'unknown')}") + break + + # Fallback to first agent + if not selected_agent: + selected_agent = next(iter(agent_iter), None) + if selected_agent: + debug_print(f"[Streaming] Using first agent: {getattr(selected_agent, 'name', 'unknown')}") + + if selected_agent: + use_agent_streaming = True + agent_name_used = getattr(selected_agent, 'name', 'agent') + agent_display_name_used = getattr(selected_agent, 'display_name', agent_name_used) + actual_model_used = getattr(selected_agent, 'deployment_name', None) or gpt_model + debug_print(f"--- Streaming from Agent: {agent_name_used} (model: {actual_model_used}) ---") + else: + debug_print(f"[Streaming] ⚠️ No agent selected, falling back to GPT") + + # Stream the response + accumulated_content = "" + token_usage_data = None # Will be populated from final stream chunk + assistant_message_id = f"{conversation_id}_assistant_{int(time.time())}_{random.randint(1000,9999)}" + final_model_used = gpt_model # Default to gpt_model, will be overridden if agent is used + + # DEBUG: Check agent streaming decision + debug_print(f"[DEBUG] use_agent_streaming={use_agent_streaming}, selected_agent={selected_agent is not None}") + debug_print(f"[DEBUG] enable_semantic_kernel={enable_semantic_kernel}, user_enable_agents={user_enable_agents}") + + try: + if use_agent_streaming and selected_agent: + # Stream from agent using invoke_stream + debug_print(f"--- Streaming from Agent: {agent_name_used} ---") + + # Import required classes + from semantic_kernel.contents.chat_message_content import ChatMessageContent + + # Convert conversation history to ChatMessageContent (same as non-streaming) + agent_message_history = [ + ChatMessageContent( + role=msg["role"], + content=msg["content"], + metadata=msg.get("metadata", {}) + ) + for msg in conversation_history_for_api + ] + + # Stream agent responses - collect chunks first then yield + async def stream_agent_async(): + """Collect all streaming chunks from agent""" + chunks = [] + usage_data = None + + # invoke_stream doesn't need a thread parameter - it works like invoke but streams + async for response in selected_agent.invoke_stream(messages=agent_message_history): + # Extract content from StreamingChatMessageContent + if hasattr(response, 'content') and response.content: + chunks.append(str(response.content)) + elif isinstance(response, str): + chunks.append(response) + else: + # Fallback: convert to string + chunks.append(str(response)) + + # Check for usage metadata in the last response + # Don't break early - keep collecting all chunks + if hasattr(response, 'metadata') and isinstance(response.metadata, dict): + usage = response.metadata.get('usage') + if usage: + usage_data = usage # Keep updating, last one wins + + return chunks, usage_data + + # Execute async streaming + import asyncio + try: + # Try to get existing event loop + loop = asyncio.get_event_loop() + if loop.is_closed(): + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + except RuntimeError: + # No event loop in current thread + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + try: + # Run streaming and collect chunks and usage + chunks, stream_usage = loop.run_until_complete(stream_agent_async()) + + # Yield chunks to frontend + for chunk_content in chunks: + accumulated_content += chunk_content + yield f"data: {json.dumps({'content': chunk_content})}\n\n" + + # Try to capture token usage from stream metadata + if stream_usage: + # stream_usage is a CompletionUsage object, not a dict + prompt_tokens = getattr(stream_usage, 'prompt_tokens', 0) + completion_tokens = getattr(stream_usage, 'completion_tokens', 0) + total_tokens = getattr(stream_usage, 'total_tokens', None) + + # Calculate total if not provided + if total_tokens is None or total_tokens == 0: + total_tokens = prompt_tokens + completion_tokens + + token_usage_data = { + 'prompt_tokens': prompt_tokens, + 'completion_tokens': completion_tokens, + 'total_tokens': total_tokens, + 'captured_at': datetime.utcnow().isoformat() + } + debug_print(f"[Agent Streaming Tokens] From metadata - prompt: {prompt_tokens}, completion: {completion_tokens}, total: {total_tokens}") + except Exception as stream_error: + debug_print(f"❌ Agent streaming error: {stream_error}") + import traceback + traceback.print_exc() + yield f"data: {json.dumps({'error': f'Agent streaming failed: {str(stream_error)}'})}\n\n" + return + + # Collect token usage from kernel services if not captured from stream + if not token_usage_data: + kernel = get_kernel() + if kernel: + try: + for service in getattr(kernel, "services", {}).values(): + prompt_tokens = getattr(service, "prompt_tokens", None) + completion_tokens = getattr(service, "completion_tokens", None) + total_tokens = getattr(service, "total_tokens", None) + + if prompt_tokens is not None or completion_tokens is not None: + token_usage_data = { + 'prompt_tokens': prompt_tokens or 0, + 'completion_tokens': completion_tokens or 0, + 'total_tokens': total_tokens or (prompt_tokens or 0) + (completion_tokens or 0), + 'captured_at': datetime.utcnow().isoformat() + } + debug_print(f"[Agent Streaming Tokens] From kernel service - prompt: {prompt_tokens}, completion: {completion_tokens}, total: {total_tokens}") + break + except Exception as e: + debug_print(f"Warning: Could not collect token usage from kernel services: {e}") + + # Capture agent citations after streaming completes + # Plugin invocations should have been logged during agent execution + plugin_logger = get_plugin_logger() + + # Debug: Check all invocations first + all_invocations = plugin_logger.get_recent_invocations() + debug_print(f"[Agent Streaming] Total plugin invocations logged: {len(all_invocations)}") + + plugin_invocations = plugin_logger.get_invocations_for_conversation(user_id, conversation_id) + debug_print(f"[Agent Streaming] Found {len(plugin_invocations)} plugin invocations for user {user_id}, conversation {conversation_id}") + + # If no invocations found, check if plugins were called at all + if len(plugin_invocations) == 0 and len(all_invocations) > 0: + debug_print(f"[Agent Streaming] ⚠️ Plugin invocations exist but not for this conversation - possible filtering issue") + # Debug: show last few invocations + for inv in all_invocations[-3:]: + debug_print(f"[Agent Streaming] Recent invocation: user={inv.user_id}, conv={inv.conversation_id}, plugin={inv.plugin_name}.{inv.function_name}") + + # Convert to citation format + for inv in plugin_invocations: + timestamp_str = None + if inv.timestamp: + if hasattr(inv.timestamp, 'isoformat'): + timestamp_str = inv.timestamp.isoformat() + else: + timestamp_str = str(inv.timestamp) + + def make_json_serializable(obj): + if obj is None: + return None + elif isinstance(obj, (str, int, float, bool)): + return obj + elif isinstance(obj, dict): + return {str(k): make_json_serializable(v) for k, v in obj.items()} + elif isinstance(obj, (list, tuple)): + return [make_json_serializable(item) for item in obj] + else: + return str(obj) + + citation = { + 'tool_name': f"{inv.plugin_name}.{inv.function_name}", + 'function_name': inv.function_name, + 'plugin_name': inv.plugin_name, + 'function_arguments': make_json_serializable(inv.parameters), + 'function_result': make_json_serializable(inv.result), + 'duration_ms': inv.duration_ms, + 'timestamp': timestamp_str, + 'success': inv.success, + 'error_message': make_json_serializable(inv.error_message), + 'user_id': inv.user_id + } + agent_citations_list.append(citation) + + debug_print(f"[Agent Streaming] Captured {len(agent_citations_list)} citations") + final_model_used = actual_model_used + + else: + # Stream from regular GPT model (non-agent) + debug_print(f"--- Streaming from GPT ({gpt_model}) ---") + + # Prepare stream parameters + stream_params = { + 'model': gpt_model, + 'messages': conversation_history_for_api, + 'stream': True, + 'stream_options': {'include_usage': True} # Request token usage in final chunk + } + + # Add reasoning_effort if provided and not 'none' + if reasoning_effort and reasoning_effort != 'none': + stream_params['reasoning_effort'] = reasoning_effort + debug_print(f"Using reasoning effort: {reasoning_effort}") + + final_model_used = gpt_model + + try: + stream = gpt_client.chat.completions.create(**stream_params) + except Exception as e: + # Check if error is related to reasoning_effort parameter + error_str = str(e).lower() + if reasoning_effort and reasoning_effort != 'none' and ( + 'reasoning_effort' in error_str or + 'unrecognized request argument' in error_str or + 'invalid_request_error' in error_str + ): + debug_print(f"Reasoning effort not supported by {gpt_model}, retrying without reasoning_effort...") + # Retry without reasoning_effort + stream_params.pop('reasoning_effort', None) + stream = gpt_client.chat.completions.create(**stream_params) + else: + raise + + for chunk in stream: + if chunk.choices and len(chunk.choices) > 0: + delta = chunk.choices[0].delta + if delta.content: + accumulated_content += delta.content + yield f"data: {json.dumps({'content': delta.content})}\n\n" + + # Capture token usage from final chunk with stream_options + if hasattr(chunk, 'usage') and chunk.usage: + token_usage_data = { + 'prompt_tokens': chunk.usage.prompt_tokens, + 'completion_tokens': chunk.usage.completion_tokens, + 'total_tokens': chunk.usage.total_tokens, + 'captured_at': datetime.utcnow().isoformat() + } + debug_print(f"[Streaming Tokens] Captured usage - prompt: {chunk.usage.prompt_tokens}, completion: {chunk.usage.completion_tokens}, total: {chunk.usage.total_tokens}") + + # Stream complete - save message and send final metadata + # Get user thread info to maintain thread consistency + user_thread_id = None + user_previous_thread_id = None + try: + user_msg = cosmos_messages_container.read_item( + item=user_message_id, + partition_key=conversation_id + ) + user_thread_id = user_msg.get('metadata', {}).get('thread_info', {}).get('thread_id') + user_previous_thread_id = user_msg.get('metadata', {}).get('thread_info', {}).get('previous_thread_id') + except Exception as e: + debug_print(f"Warning: Could not retrieve thread_id from user message: {e}") + + assistant_doc = { + 'id': assistant_message_id, + 'conversation_id': conversation_id, + 'role': 'assistant', + 'content': accumulated_content, + 'timestamp': datetime.utcnow().isoformat(), + 'augmented': bool(system_messages_for_augmentation), + 'hybrid_citations': hybrid_citations_list, + 'web_search_citations': web_search_citations_list, + 'hybridsearch_query': search_query if hybrid_search_enabled and search_results else None, + 'agent_citations': agent_citations_list, + 'user_message': user_message, + 'model_deployment_name': final_model_used if use_agent_streaming else gpt_model, + 'agent_display_name': agent_display_name_used if use_agent_streaming else None, + 'agent_name': agent_name_used if use_agent_streaming else None, + 'metadata': { + 'reasoning_effort': reasoning_effort, + 'thread_info': { + 'thread_id': user_thread_id, + 'previous_thread_id': user_previous_thread_id, + 'active_thread': True, + 'thread_attempt': 1 + }, + 'token_usage': token_usage_data if token_usage_data else None # Store token usage from stream + } + } + cosmos_messages_container.upsert_item(assistant_doc) + + # Log chat token usage to activity_logs for easy reporting + if token_usage_data and token_usage_data.get('total_tokens'): + try: + from functions_activity_logging import log_token_usage + + # Determine workspace type based on active group/public workspace + workspace_type = 'personal' + if active_public_workspace_id: + workspace_type = 'public' + elif active_group_id: + workspace_type = 'group' + + log_token_usage( + user_id=user_id, + token_type='chat', + total_tokens=token_usage_data.get('total_tokens'), + model=final_model_used if use_agent_streaming else gpt_model, + workspace_type=workspace_type, + prompt_tokens=token_usage_data.get('prompt_tokens'), + completion_tokens=token_usage_data.get('completion_tokens'), + conversation_id=conversation_id, + message_id=assistant_message_id, + group_id=active_group_id, + public_workspace_id=active_public_workspace_id, + additional_context={ + 'agent_name': agent_name_used if use_agent_streaming else None, + 'augmented': bool(system_messages_for_augmentation), + 'reasoning_effort': reasoning_effort + } + ) + debug_print(f"✅ Logged streaming chat token usage: {token_usage_data.get('total_tokens')} tokens") + except Exception as log_error: + debug_print(f"⚠️ Warning: Failed to log streaming chat token usage: {log_error}") + # Don't fail the chat flow if logging fails + + # Update conversation + conversation_item['last_updated'] = datetime.utcnow().isoformat() + + try: + conversation_item = collect_conversation_metadata( + user_message=user_message, + conversation_id=conversation_id, + user_id=user_id, + active_group_id=active_group_id, + document_scope=document_scope, + selected_document_id=selected_document_id, + model_deployment=gpt_model, + hybrid_search_enabled=hybrid_search_enabled, + image_gen_enabled=False, + selected_documents=combined_documents if combined_documents else None, + selected_agent=None, + selected_agent_details=None, + search_results=search_results if search_results else None, + conversation_item=conversation_item + ) + except Exception as e: + debug_print(f"Error collecting conversation metadata: {e}") + + cosmos_conversations_container.upsert_item(conversation_item) + + # Send final message with metadata + final_data = { + 'done': True, + 'conversation_id': conversation_id, + 'conversation_title': conversation_item['title'], + 'classification': conversation_item.get('classification', []), + 'model_deployment_name': final_model_used if use_agent_streaming else gpt_model, + 'message_id': assistant_message_id, + 'user_message_id': user_message_id, + 'augmented': bool(system_messages_for_augmentation), + 'hybrid_citations': hybrid_citations_list, + 'web_search_citations': web_search_citations_list, + 'agent_citations': agent_citations_list, + 'agent_display_name': agent_display_name_used if use_agent_streaming else None, + 'agent_name': agent_name_used if use_agent_streaming else None, + 'full_content': accumulated_content + } + yield f"data: {json.dumps(final_data)}\n\n" + + except Exception as e: + error_msg = str(e) + debug_print(f"Error during streaming: {error_msg}") + + # Save partial response if we have content + if accumulated_content: + current_assistant_thread_id = str(uuid.uuid4()) + + assistant_doc = { + 'id': assistant_message_id, + 'conversation_id': conversation_id, + 'role': 'assistant', + 'content': accumulated_content, + 'timestamp': datetime.utcnow().isoformat(), + 'augmented': bool(system_messages_for_augmentation), + 'hybrid_citations': hybrid_citations_list, + 'web_search_citations': web_search_citations_list, + 'hybridsearch_query': search_query if hybrid_search_enabled and search_results else None, + 'agent_citations': agent_citations_list, + 'user_message': user_message, + 'model_deployment_name': final_model_used if use_agent_streaming else gpt_model, + 'agent_display_name': agent_display_name_used if use_agent_streaming else None, + 'agent_name': agent_name_used if use_agent_streaming else None, + 'metadata': { + 'incomplete': True, + 'error': error_msg, + 'reasoning_effort': reasoning_effort, + 'thread_info': { + 'thread_id': user_thread_id, + 'previous_thread_id': user_previous_thread_id, + 'active_thread': True, + 'thread_attempt': 1 + } + } + } + try: + cosmos_messages_container.upsert_item(assistant_doc) + except: + pass + + yield f"data: {json.dumps({'error': error_msg, 'partial_content': accumulated_content})}\n\n" + + except Exception as e: + import traceback + error_traceback = traceback.format_exc() + debug_print(f"[STREAM API ERROR] Unhandled exception: {str(e)}") + debug_print(f"[STREAM API ERROR] Full traceback:\n{error_traceback}") + yield f"data: {json.dumps({'error': f'Internal server error: {str(e)}'})}\n\n" + + return Response( + stream_with_context(generate()), + mimetype='text/event-stream', + headers={ + 'Cache-Control': 'no-cache', + 'X-Accel-Buffering': 'no', + 'Connection': 'keep-alive' + } + ) + + @app.route('/api/message//mask', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def mask_message_api(message_id): + """ + API endpoint to mask/unmask messages or parts of messages. + This prevents masked content from being sent to the AI model in conversation history. + """ + try: + settings = get_settings() + data = request.get_json() + user_id = get_current_user_id() + + if not user_id: + return jsonify({'error': 'User not authenticated'}), 401 + + # Get action: "mask_all", "mask_selection", or "unmask_all" + action = data.get('action') + selection = data.get('selection', {}) + user_display_name = data.get('display_name', 'Unknown User') + + # Validate action + if action not in ['mask_all', 'mask_selection', 'unmask_all']: + return jsonify({'error': 'Invalid action'}), 400 + + # Fetch the message + try: + # Query for the message (need conversation_id for partition key) + query = "SELECT * FROM c WHERE c.id = @message_id" + params = [{"name": "@message_id", "value": message_id}] + + # We need to find the message across all partitions first + # This is inefficient but necessary without knowing the conversation_id + message_results = list(cosmos_messages_container.query_items( + query=query, + parameters=params, + enable_cross_partition_query=True + )) + + if not message_results: + return jsonify({'error': 'Message not found'}), 404 + + message_doc = message_results[0] + conversation_id = message_doc.get('conversation_id') + + # Verify ownership - only the message author can mask their message + message_user_id = message_doc.get('metadata', {}).get('user_info', {}).get('user_id') + if not message_user_id: + # Fallback: check conversation ownership for backwards compatibility + # All messages in a conversation (user, assistant, system) belong to the conversation owner + try: + conversation = cosmos_conversations_container.read_item( + item=conversation_id, + partition_key=conversation_id + ) + if conversation.get('user_id') != user_id: + return jsonify({'error': 'You can only mask messages from your own conversations'}), 403 + except: + return jsonify({'error': 'Conversation not found'}), 404 + elif message_user_id != user_id: + return jsonify({'error': 'You can only mask your own messages'}), 403 + + except Exception as e: + debug_print(f"Error fetching message {message_id}: {str(e)}") + return jsonify({'error': f'Error fetching message: {str(e)}'}), 500 + + # Initialize metadata if it doesn't exist + if 'metadata' not in message_doc: + message_doc['metadata'] = {} + + # Process based on action + if action == 'mask_all': + # Mask the entire message + message_doc['metadata']['masked'] = True + message_doc['metadata']['masked_by_user_id'] = user_id + message_doc['metadata']['masked_timestamp'] = datetime.now(timezone.utc).isoformat() + message_doc['metadata']['masked_by_display_name'] = user_display_name + + elif action == 'unmask_all': + # Unmask the entire message and clear all masked ranges + message_doc['metadata']['masked'] = False + message_doc['metadata']['masked_ranges'] = [] + message_doc['metadata']['masked_by_user_id'] = None + message_doc['metadata']['masked_timestamp'] = None + message_doc['metadata']['masked_by_display_name'] = None + + elif action == 'mask_selection': + # Mask a selection of text + start = selection.get('start') + end = selection.get('end') + text = selection.get('text', '') + + if start is None or end is None: + return jsonify({'error': 'Selection start and end required'}), 400 + + # Initialize masked_ranges if it doesn't exist + if 'masked_ranges' not in message_doc['metadata']: + message_doc['metadata']['masked_ranges'] = [] + + # Create new masked range + new_range = { + 'id': str(uuid.uuid4()), + 'user_id': user_id, + 'display_name': user_display_name, + 'start': start, + 'end': end, + 'text': text, + 'timestamp': datetime.now(timezone.utc).isoformat() + } + + # Add the new range + message_doc['metadata']['masked_ranges'].append(new_range) + + # Sort and merge overlapping/adjacent ranges + message_doc['metadata']['masked_ranges'] = merge_masked_ranges( + message_doc['metadata']['masked_ranges'] + ) + + # Update the message in Cosmos DB + try: + cosmos_messages_container.upsert_item(message_doc) + except Exception as e: + debug_print(f"Error updating message {message_id}: {str(e)}") + return jsonify({'error': f'Error updating message: {str(e)}'}), 500 + + return jsonify({ + 'success': True, + 'message_id': message_id, + 'masked': message_doc['metadata'].get('masked', False), + 'masked_ranges': message_doc['metadata'].get('masked_ranges', []) + }), 200 + + except Exception as e: + import traceback + error_traceback = traceback.format_exc() + debug_print(f"[MASK API ERROR] Unhandled exception: {str(e)}") + debug_print(f"[MASK API ERROR] Full traceback:\n{error_traceback}") + return jsonify({ + 'error': f'Internal server error: {str(e)}', + 'details': error_traceback if app.debug else None + }), 500 + + +def merge_masked_ranges(ranges): + """ + Merge overlapping and adjacent masked ranges. + Preserves the earliest timestamp and user info for merged ranges. + """ + if not ranges: + return [] + + # Sort by start position + sorted_ranges = sorted(ranges, key=lambda x: x['start']) + merged = [sorted_ranges[0]] + + for current in sorted_ranges[1:]: + last_merged = merged[-1] + + # Check if current range overlaps or is adjacent to the last merged range + if current['start'] <= last_merged['end']: + # Merge: extend the end if current goes further + if current['end'] > last_merged['end']: + last_merged['end'] = current['end'] + # Update text to cover merged range + last_merged['text'] = last_merged['text'] + current['text'][last_merged['end'] - current['start']:] + # Keep the earliest timestamp + if current['timestamp'] < last_merged['timestamp']: + last_merged['timestamp'] = current['timestamp'] + else: + # No overlap, add as separate range + merged.append(current) + + return merged + + +def remove_masked_content(content, masked_ranges): + """ + Remove masked portions from message content. + Works backwards through sorted ranges to maintain correct offsets. + """ + if not masked_ranges or not content: + return content + + # Sort ranges by start position (descending) to work backwards + sorted_ranges = sorted(masked_ranges, key=lambda x: x['start'], reverse=True) + + # Create a list from content for easier manipulation + result = content + + # Remove masked ranges working backwards to maintain offsets + for range_item in sorted_ranges: + start = range_item['start'] + end = range_item['end'] + + # Ensure indices are within bounds + if start < 0: + start = 0 + if end > len(result): + end = len(result) + + # Remove the masked portion + if start < end: + result = result[:start] + result[end:] + + return result + + +def _extract_web_search_citations_from_content(content: str) -> List[Dict[str, str]]: + if not content: + return [] + debug_print(f"[Citation Extraction] Extracting citations from:\n{content}\n") + + citations: List[Dict[str, str]] = [] + + markdown_pattern = re.compile(r"\[([^\]]+)\]\((https?://[^\s\)]+)(?:\s+\"([^\"]+)\")?\)") + html_pattern = re.compile( + r"]+href=\"(https?://[^\"]+)\"([^>]*)>(.*?)", + re.IGNORECASE | re.DOTALL, + ) + title_pattern = re.compile(r"title=\"([^\"]+)\"", re.IGNORECASE) + url_pattern = re.compile(r"https?://[^\s\)\]\">]+") + + occupied_spans: List[range] = [] + + for match in markdown_pattern.finditer(content): + text, url, title = match.groups() + url = (url or "").strip().rstrip(".,)") + if not url: + continue + display_title = (title or text or url).strip() + citations.append({"url": url, "title": display_title}) + occupied_spans.append(range(match.start(), match.end())) + + for match in html_pattern.finditer(content): + url, attrs, inner = match.groups() + url = (url or "").strip().rstrip(".,)") + if not url: + continue + title_match = title_pattern.search(attrs or "") + title = title_match.group(1) if title_match else None + inner_text = re.sub(r"<[^>]+>", "", inner or "").strip() + display_title = (title or inner_text or url).strip() + citations.append({"url": url, "title": display_title}) + occupied_spans.append(range(match.start(), match.end())) + + for match in url_pattern.finditer(content): + if any(match.start() in span for span in occupied_spans): + continue + url = (match.group(0) or "").strip().rstrip(".,)") + if not url: + continue + citations.append({"url": url, "title": url}) + debug_print(f"[Citation Extraction] Extracted {len(citations)} citations. - {citations}\n") + + return citations + + +def _extract_token_usage_from_metadata(metadata: Dict[str, Any]) -> Dict[str, int]: + if not isinstance(metadata, Mapping): + debug_print( + "[Web Search][Token Usage Extraction] Metadata is not a mapping. " + f"type={type(metadata)}" + ) + return {} + + usage = metadata.get("usage") + if not usage: + debug_print("[Web Search][Token Usage Extraction] No usage field found in metadata.") + return {} + + if isinstance(usage, str): + raw_usage = usage.strip() + if not raw_usage: + debug_print("[Web Search][Token Usage Extraction] Usage string was empty.") + return {} + try: + usage = json.loads(raw_usage) + except json.JSONDecodeError: + try: + usage = ast.literal_eval(raw_usage) + except (ValueError, SyntaxError): + debug_print( + "[Web Search][Token Usage Extraction] Failed to parse usage string." + ) + return {} + + if not isinstance(usage, Mapping): + debug_print( + "[Web Search][Token Usage Extraction] Usage is not a mapping. " + f"type={type(usage)}" + ) + return {} + + def to_int(value: Any) -> Optional[int]: + try: + return int(float(value)) + except (TypeError, ValueError): + return None + + total_tokens = to_int(usage.get("total_tokens")) + if total_tokens is None: + debug_print( + "[Web Search][Token Usage Extraction] total_tokens missing or invalid. " + f"usage={usage}" + ) + return {} + + prompt_tokens = to_int(usage.get("prompt_tokens")) or 0 + completion_tokens = to_int(usage.get("completion_tokens")) or 0 + debug_print( + "[Web Search][Token Usage Extraction] Extracted token usage - " + f"prompt: {prompt_tokens}, completion: {completion_tokens}, total: {total_tokens}" + ) + + return { + "total_tokens": int(total_tokens), + "prompt_tokens": int(prompt_tokens), + "completion_tokens": int(completion_tokens), + } + +def perform_web_search( + *, + settings, + conversation_id, + user_id, + user_message, + user_message_id, + chat_type, + document_scope, + active_group_id, + active_public_workspace_id, + search_query, + system_messages_for_augmentation, + agent_citations_list, + web_search_citations_list, +): + debug_print("[WebSearch] ========== ENTERING perform_web_search ==========") + debug_print(f"[WebSearch] Parameters received:") + debug_print(f"[WebSearch] conversation_id: {conversation_id}") + debug_print(f"[WebSearch] user_id: {user_id}") + debug_print(f"[WebSearch] user_message: {user_message[:100] if user_message else None}...") + debug_print(f"[WebSearch] user_message_id: {user_message_id}") + debug_print(f"[WebSearch] chat_type: {chat_type}") + debug_print(f"[WebSearch] document_scope: {document_scope}") + debug_print(f"[WebSearch] active_group_id: {active_group_id}") + debug_print(f"[WebSearch] active_public_workspace_id: {active_public_workspace_id}") + debug_print(f"[WebSearch] search_query: {search_query[:100] if search_query else None}...") + + enable_web_search = settings.get("enable_web_search") + debug_print(f"[WebSearch] enable_web_search setting: {enable_web_search}") + + if not enable_web_search: + debug_print("[WebSearch] Web search is DISABLED in settings, returning early") + return True # Not an error, just disabled + + debug_print("[WebSearch] Web search is ENABLED, proceeding...") + + web_search_agent = settings.get("web_search_agent") or {} + debug_print(f"[WebSearch] web_search_agent config present: {bool(web_search_agent)}") + if web_search_agent: + # Avoid logging sensitive data, just log structure + debug_print(f"[WebSearch] web_search_agent keys: {list(web_search_agent.keys())}") + + other_settings = web_search_agent.get("other_settings") or {} + debug_print(f"[WebSearch] other_settings keys: {list(other_settings.keys()) if other_settings else ''}") + + foundry_settings = other_settings.get("azure_ai_foundry") or {} + debug_print(f"[WebSearch] foundry_settings present: {bool(foundry_settings)}") + if foundry_settings: + # Log only non-sensitive keys + safe_keys = ['agent_id', 'project_id', 'endpoint'] + safe_info = {k: foundry_settings.get(k, '') for k in safe_keys} + debug_print(f"[WebSearch] foundry_settings (safe keys): {safe_info}") + + agent_id = (foundry_settings.get("agent_id") or "").strip() + debug_print(f"[WebSearch] Extracted agent_id: '{agent_id}'") + + if not agent_id: + log_event( + "[WebSearch] Skipping Foundry web search: agent_id is not configured", + extra={ + "conversation_id": conversation_id, + "user_id": user_id, + }, + level=logging.WARNING, + ) + debug_print("[WebSearch] Foundry agent_id not configured, skipping web search.") + # Add failure message so the model knows search was requested but not configured + system_messages_for_augmentation.append({ + "role": "system", + "content": "Web search was requested but is not properly configured. Please inform the user that web search is currently unavailable and you cannot provide real-time information. Do not attempt to answer questions requiring current information from your training data.", + }) + return False # Configuration error + + debug_print(f"[WebSearch] Agent ID is configured: {agent_id}") + + query_text = None + try: + query_text = search_query + debug_print(f"[WebSearch] Using search_query as query_text: {query_text[:100] if query_text else None}...") + except NameError: + query_text = None + debug_print("[WebSearch] search_query not defined, query_text is None") + + query_text = (query_text or user_message or "").strip() + debug_print(f"[WebSearch] Final query_text after fallback: '{query_text[:100] if query_text else ''}'") + + if not query_text: + debug_print("[WebSearch] Query text is EMPTY after processing, skipping web search") + log_event( + "[WebSearch] Skipping Foundry web search: empty query", + extra={ + "conversation_id": conversation_id, + "user_id": user_id, + }, + level=logging.WARNING, + ) + return True # Not an error, just empty query + + debug_print(f"[WebSearch] Building message history with query: {query_text[:100]}...") + message_history = [ + ChatMessageContent(role="user", content=query_text) + ] + debug_print(f"[WebSearch] Message history created with {len(message_history)} message(s)") + + try: + foundry_metadata = { + "conversation_id": conversation_id, + "user_id": user_id, + "message_id": user_message_id, + "chat_type": chat_type, + "document_scope": document_scope, + "group_id": active_group_id if chat_type == "group" else None, + "public_workspace_id": active_public_workspace_id, + "search_query": query_text, + } + debug_print(f"[WebSearch] Foundry metadata prepared: {json.dumps(foundry_metadata, default=str)}") + + debug_print("[WebSearch] Calling execute_foundry_agent...") + debug_print(f"[WebSearch] foundry_settings keys: {list(foundry_settings.keys())}") + debug_print(f"[WebSearch] global_settings type: {type(settings)}") + + result = asyncio.run( + execute_foundry_agent( + foundry_settings=foundry_settings, + global_settings=settings, + message_history=message_history, + metadata={k: v for k, v in foundry_metadata.items() if v is not None}, + ) + ) + except FoundryAgentInvocationError as exc: + log_event( + f"[WebSearch] Foundry agent invocation failed: {exc}", + extra={ + "conversation_id": conversation_id, + "user_id": user_id, + "agent_id": agent_id, + }, + level=logging.ERROR, + exceptionTraceback=True, + ) + # Add failure message so the model informs the user + system_messages_for_augmentation.append({ + "role": "system", + "content": f"Web search failed with error: {exc}. Please inform the user that the web search encountered an error and you cannot provide real-time information for this query. Do not attempt to answer questions requiring current information from your training data - instead, acknowledge the search failure and suggest the user try again.", + }) + return False # Search failed + except Exception as exc: + log_event( + f"[WebSearch] Unexpected error invoking Foundry agent: {exc}", + extra={ + "conversation_id": conversation_id, + "user_id": user_id, + "agent_id": agent_id, + }, + level=logging.ERROR, + exceptionTraceback=True, + ) + # Add failure message so the model informs the user + system_messages_for_augmentation.append({ + "role": "system", + "content": f"Web search failed with an unexpected error: {exc}. Please inform the user that the web search encountered an error and you cannot provide real-time information for this query. Do not attempt to answer questions requiring current information from your training data - instead, acknowledge the search failure and suggest the user try again.", + }) + return False # Search failed + + debug_print("[WebSearch] ========== FOUNDRY AGENT RESULT ==========") + debug_print(f"[WebSearch] Result type: {type(result)}") + debug_print(f"[WebSearch] Result has message: {bool(result.message)}") + debug_print(f"[WebSearch] Result has citations: {bool(result.citations)}") + debug_print(f"[WebSearch] Result has metadata: {bool(result.metadata)}") + debug_print(f"[WebSearch] Result model: {getattr(result, 'model', 'N/A')}") + + if result.message: + debug_print(f"[WebSearch] Result message length: {len(result.message)} chars") + debug_print(f"[WebSearch] Result message preview: {result.message[:500] if len(result.message) > 500 else result.message}") + else: + debug_print("[WebSearch] Result message is EMPTY or None") + + if result.citations: + debug_print(f"[WebSearch] Result citations count: {len(result.citations)}") + for i, cit in enumerate(result.citations[:3]): + debug_print(f"[WebSearch] Citation {i}: {json.dumps(cit, default=str)[:200]}...") + else: + debug_print("[WebSearch] Result citations is EMPTY or None") + + if result.metadata: + try: + metadata_payload = json.dumps(result.metadata, default=str) + except (TypeError, ValueError): + metadata_payload = str(result.metadata) + debug_print(f"[WebSearch] Foundry metadata: {metadata_payload}") + else: + debug_print("[WebSearch] Foundry metadata: ") + + if result.message: + debug_print("[WebSearch] Adding result message to system_messages_for_augmentation") + system_messages_for_augmentation.append({ + "role": "system", + "content": f"Web search results:\n{result.message}", + }) + debug_print(f"[WebSearch] Added system message to augmentation list. Total augmentation messages: {len(system_messages_for_augmentation)}") + + debug_print("[WebSearch] Extracting web citations from result message...") + web_citations = _extract_web_search_citations_from_content(result.message) + debug_print(f"[WebSearch] Extracted {len(web_citations)} web citations from message content") + if web_citations: + web_search_citations_list.extend(web_citations) + debug_print(f"[WebSearch] Total web_search_citations_list now has {len(web_search_citations_list)} citations") + else: + debug_print("[WebSearch] No web citations extracted from message content") + else: + debug_print("[WebSearch] No result.message to process for augmentation") + + citations = result.citations or [] + debug_print(f"[WebSearch] Processing {len(citations)} citations from result.citations") + if citations: + for i, citation in enumerate(citations): + debug_print(f"[WebSearch] Processing citation {i}: {json.dumps(citation, default=str)[:200]}...") + try: + serializable = json.loads(json.dumps(citation, default=str)) + except (TypeError, ValueError): + serializable = {"value": str(citation)} + citation_title = serializable.get("title") or serializable.get("url") or "Web search source" + debug_print(f"[WebSearch] Adding agent citation with title: {citation_title}") + agent_citations_list.append({ + "tool_name": citation_title, + "function_name": "azure_ai_foundry_web_search", + "plugin_name": "azure_ai_foundry", + "function_arguments": serializable, + "function_result": serializable, + "timestamp": datetime.utcnow().isoformat(), + "success": True, + }) + debug_print(f"[WebSearch] Total agent_citations_list now has {len(agent_citations_list)} citations") + else: + debug_print("[WebSearch] No citations in result.citations to process") + + debug_print(f"[WebSearch] Starting token usage extraction from Foundry metadata. Metadata: {result.metadata}") + token_usage = _extract_token_usage_from_metadata(result.metadata or {}) + if token_usage.get("total_tokens"): + try: + workspace_type = 'personal' + if active_public_workspace_id: + workspace_type = 'public' + elif active_group_id: + workspace_type = 'group' + + log_token_usage( + user_id=user_id, + token_type='web_search', + total_tokens=token_usage.get('total_tokens', 0), + model=result.model or 'azure-ai-foundry-web-search', + workspace_type=workspace_type, + prompt_tokens=token_usage.get('prompt_tokens'), + completion_tokens=token_usage.get('completion_tokens'), + conversation_id=conversation_id, + message_id=user_message_id, + group_id=active_group_id, + public_workspace_id=active_public_workspace_id, + additional_context={ + 'agent_id': agent_id, + 'search_query': query_text, + 'token_source': 'foundry_metadata' + } + ) + except Exception as log_error: + log_event( + f"[WebSearch] Failed to log web search token usage: {log_error}", + extra={ + "conversation_id": conversation_id, + "user_id": user_id, + "agent_id": agent_id, + }, + level=logging.WARNING, + ) + + debug_print("[WebSearch] ========== FINAL SUMMARY ==========") + debug_print(f"[WebSearch] system_messages_for_augmentation count: {len(system_messages_for_augmentation)}") + debug_print(f"[WebSearch] agent_citations_list count: {len(agent_citations_list)}") + debug_print(f"[WebSearch] web_search_citations_list count: {len(web_search_citations_list)}") + debug_print(f"[WebSearch] Token usage extracted: {token_usage}") + debug_print("[WebSearch] ========== EXITING perform_web_search ==========") + + log_event( + "[WebSearch] Foundry web search invocation complete", + extra={ + "conversation_id": conversation_id, + "user_id": user_id, + "agent_id": agent_id, + "citation_count": len(citations), + }, + level=logging.INFO, + ) + + return True # Search succeeded \ No newline at end of file diff --git a/application/single_app/route_backend_control_center.py b/application/single_app/route_backend_control_center.py new file mode 100644 index 00000000..2c3952f1 --- /dev/null +++ b/application/single_app/route_backend_control_center.py @@ -0,0 +1,6982 @@ +# route_backend_control_center.py + +from config import * +from functions_authentication import * +from functions_settings import * +from functions_logging import * +from functions_activity_logging import * +from functions_approvals import * +from functions_documents import update_document, delete_document, delete_document_chunks +from functions_group import delete_group +from utils_cache import invalidate_group_search_cache +from swagger_wrapper import swagger_route, get_auth_security +from datetime import datetime, timedelta, timezone +import json +from functions_debug import debug_print + +def enhance_user_with_activity(user, force_refresh=False): + """ + Enhance user data with activity information and computed fields. + If force_refresh is False, will try to use cached metrics from user settings. + """ + try: + user_id = user.get('id') + debug_print(f"👤 [USER DEBUG] Processing user {user_id}, force_refresh={force_refresh}") + + # Check both user and app settings for enhanced citations + user_enhanced_citation = user.get('settings', {}).get('enable_enhanced_citation', False) + from functions_settings import get_settings + app_settings = get_settings() + app_enhanced_citations = app_settings.get('enable_enhanced_citations', False) if app_settings else False + + debug_print(f"📋 [SETTINGS DEBUG] User enhanced citation: {user_enhanced_citation}") + debug_print(f"📋 [SETTINGS DEBUG] App enhanced citations: {app_enhanced_citations}") + debug_print(f"📋 [SETTINGS DEBUG] Will use app setting: {app_enhanced_citations}") + enhanced = { + 'id': user.get('id'), + 'email': user.get('email', ''), + 'display_name': user.get('display_name', ''), + 'lastUpdated': user.get('lastUpdated'), + 'settings': user.get('settings', {}), + 'profile_image': user.get('settings', {}).get('profileImage'), # Extract profile image + 'activity': { + 'login_metrics': { + 'total_logins': 0, + 'last_login': None + }, + 'chat_metrics': { + 'last_day_conversations': 0, + 'total_conversations': 0, + 'total_messages': 0, + 'total_content_size': 0 # Based on actual message content length + }, + 'document_metrics': { + 'personal_workspace_enabled': user.get('settings', {}).get('enable_personal_workspace', False), + # enhanced_citation_enabled is NOT stored in user data - frontend gets it from app settings + 'total_documents': 0, + 'ai_search_size': 0, # pages × 80KB + 'storage_account_size': 0 # Actual file sizes from storage + } + }, + 'access_status': 'allow', # default + 'file_upload_status': 'allow' # default + } + + # Extract access status + access_settings = user.get('settings', {}).get('access', {}) + if access_settings.get('status') == 'deny': + datetime_to_allow = access_settings.get('datetime_to_allow') + if datetime_to_allow: + # Check if time-based restriction has expired + try: + allow_time = datetime.fromisoformat(datetime_to_allow.replace('Z', '+00:00') if 'Z' in datetime_to_allow else datetime_to_allow) + if datetime.now(timezone.utc) >= allow_time: + enhanced['access_status'] = 'allow' # Expired, should be auto-restored + else: + enhanced['access_status'] = f"deny_until_{datetime_to_allow}" + except: + enhanced['access_status'] = 'deny' + else: + enhanced['access_status'] = 'deny' + + # Extract file upload status + file_upload_settings = user.get('settings', {}).get('file_uploads', {}) + if file_upload_settings.get('status') == 'deny': + datetime_to_allow = file_upload_settings.get('datetime_to_allow') + if datetime_to_allow: + # Check if time-based restriction has expired + try: + allow_time = datetime.fromisoformat(datetime_to_allow.replace('Z', '+00:00') if 'Z' in datetime_to_allow else datetime_to_allow) + if datetime.now(timezone.utc) >= allow_time: + enhanced['file_upload_status'] = 'allow' # Expired, should be auto-restored + else: + enhanced['file_upload_status'] = f"deny_until_{datetime_to_allow}" + except: + enhanced['file_upload_status'] = 'deny' + else: + enhanced['file_upload_status'] = 'deny' + + # Check for cached metrics if not forcing refresh + if not force_refresh: + cached_metrics = user.get('settings', {}).get('metrics') + if cached_metrics and cached_metrics.get('calculated_at'): + try: + debug_print(f"Using cached metrics for user {user.get('id')}") + # Use cached data regardless of age when not forcing refresh + if 'login_metrics' in cached_metrics: + enhanced['activity']['login_metrics'] = cached_metrics['login_metrics'] + if 'chat_metrics' in cached_metrics: + enhanced['activity']['chat_metrics'] = cached_metrics['chat_metrics'] + if 'document_metrics' in cached_metrics: + # Merge cached document metrics with settings-based flags + cached_doc_metrics = cached_metrics['document_metrics'].copy() + cached_doc_metrics['personal_workspace_enabled'] = user.get('settings', {}).get('enable_personal_workspace', False) + # Do NOT include enhanced_citation_enabled in user data - frontend gets it from app settings + enhanced['activity']['document_metrics'] = cached_doc_metrics + return enhanced + except Exception as cache_e: + debug_print(f"Error using cached metrics for user {user.get('id')}: {cache_e}") + + # If no cached metrics and not forcing refresh, return with default/empty metrics + # Do NOT include enhanced_citation_enabled in user data - frontend gets it from app settings + debug_print(f"No cached metrics for user {user.get('id')}, returning default values (use refresh button to calculate)") + return enhanced + + debug_print(f"Force refresh requested - calculating fresh metrics for user {user.get('id')}") + + + # Try to get comprehensive conversation metrics + try: + # Get all user conversations with last_updated info + user_conversations_query = """ + SELECT c.id, c.last_updated FROM c WHERE c.user_id = @user_id + """ + user_conversations_params = [{"name": "@user_id", "value": user.get('id')}] + user_conversations = list(cosmos_conversations_container.query_items( + query=user_conversations_query, + parameters=user_conversations_params, + enable_cross_partition_query=True + )) + + # Total conversations count (all time) + enhanced['activity']['chat_metrics']['total_conversations'] = len(user_conversations) + + # Find last day conversation (most recent conversation with latest last_updated) + last_day_conversation = None + if user_conversations: + # Sort by last_updated to get the most recent + sorted_conversations = sorted( + user_conversations, + key=lambda x: x.get('last_updated', ''), + reverse=True + ) + if sorted_conversations: + most_recent_conv = sorted_conversations[0] + last_updated = most_recent_conv.get('last_updated') + if last_updated: + # Parse the date and format as MM/DD/YYYY + try: + date_obj = datetime.fromisoformat(last_updated.replace('Z', '+00:00')) + last_day_conversation = date_obj.strftime('%m/%d/%Y') + except: + last_day_conversation = 'Invalid date' + + enhanced['activity']['chat_metrics']['last_day_conversation'] = last_day_conversation or 'Never' + + # Get message count and total size using two-step query approach + if user_conversations: + conversation_ids = [conv['id'] for conv in user_conversations] + total_messages = 0 + total_message_size = 0 + + # Process conversations in batches to avoid query limits + batch_size = 10 + for i in range(0, len(conversation_ids), batch_size): + batch_ids = conversation_ids[i:i+batch_size] + + # Use parameterized query with IN clause for message querying + try: + # Build the IN parameters for the batch + in_params = [] + param_placeholders = [] + for j, conv_id in enumerate(batch_ids): + param_name = f"@conv_id_{j}" + param_placeholders.append(param_name) + in_params.append({"name": param_name, "value": conv_id}) + + # Split into separate queries to avoid MultipleAggregates issue + # First query: Get message count + messages_count_query = f""" + SELECT VALUE COUNT(1) + FROM m + WHERE m.conversation_id IN ({', '.join(param_placeholders)}) + """ + + count_result = list(cosmos_messages_container.query_items( + query=messages_count_query, + parameters=in_params, + enable_cross_partition_query=True + )) + + batch_messages = count_result[0] if count_result else 0 + total_messages += batch_messages + + # Second query: Get message size + messages_size_query = f""" + SELECT VALUE SUM(LENGTH(TO_STRING(m))) + FROM m + WHERE m.conversation_id IN ({', '.join(param_placeholders)}) + """ + + size_result = list(cosmos_messages_container.query_items( + query=messages_size_query, + parameters=in_params, + enable_cross_partition_query=True + )) + + batch_size = size_result[0] if size_result else 0 + total_message_size += batch_size or 0 + + debug_print(f"Messages batch {i//batch_size + 1}: {batch_messages} messages, {batch_size or 0} bytes") + + except Exception as msg_e: + debug_print(f"Could not query message sizes for batch {i//batch_size + 1}: {msg_e}") + # Try individual conversation queries as fallback + for conv_id in batch_ids: + try: + individual_params = [{"name": "@conv_id", "value": conv_id}] + + # Individual count query + individual_count_query = """ + SELECT VALUE COUNT(1) + FROM m + WHERE m.conversation_id = @conv_id + """ + count_result = list(cosmos_messages_container.query_items( + query=individual_count_query, + parameters=individual_params, + enable_cross_partition_query=True + )) + total_messages += count_result[0] if count_result else 0 + + # Individual size query + individual_size_query = """ + SELECT VALUE SUM(LENGTH(TO_STRING(m))) + FROM m + WHERE m.conversation_id = @conv_id + """ + size_result = list(cosmos_messages_container.query_items( + query=individual_size_query, + parameters=individual_params, + enable_cross_partition_query=True + )) + total_message_size += size_result[0] if size_result and size_result[0] else 0 + + except Exception as individual_e: + debug_print(f"Could not query individual conversation {conv_id}: {individual_e}") + continue + + enhanced['activity']['chat_metrics']['total_messages'] = total_messages + enhanced['activity']['chat_metrics']['total_message_size'] = total_message_size + debug_print(f"Final chat metrics for user {user.get('id')}: {total_messages} messages, {total_message_size} bytes") + + except Exception as e: + debug_print(f"Could not get chat metrics for user {user.get('id')}: {e}") + + # Try to get comprehensive login metrics + try: + # Get total login count (all time) + total_logins_query = """ + SELECT VALUE COUNT(1) FROM c + WHERE c.user_id = @user_id AND c.activity_type = 'user_login' + """ + login_params = [{"name": "@user_id", "value": user.get('id')}] + total_logins = list(cosmos_activity_logs_container.query_items( + query=total_logins_query, + parameters=login_params, + enable_cross_partition_query=True + )) + enhanced['activity']['login_metrics']['total_logins'] = total_logins[0] if total_logins else 0 + + # Get last login timestamp + last_login_query = """ + SELECT TOP 1 c.timestamp, c.created_at FROM c + WHERE c.user_id = @user_id AND c.activity_type = 'user_login' + ORDER BY c.timestamp DESC + """ + last_login_result = list(cosmos_activity_logs_container.query_items( + query=last_login_query, + parameters=login_params, + enable_cross_partition_query=True + )) + if last_login_result: + login_record = last_login_result[0] + enhanced['activity']['login_metrics']['last_login'] = login_record.get('timestamp') or login_record.get('created_at') + + except Exception as e: + debug_print(f"Could not get login metrics for user {user.get('id')}: {e}") + + # Try to get comprehensive document metrics + try: + # Get document count using separate query (avoid MultipleAggregates issue) + doc_count_query = """ + SELECT VALUE COUNT(1) + FROM c + WHERE c.user_id = @user_id AND c.type = 'document_metadata' + """ + doc_metrics_params = [{"name": "@user_id", "value": user.get('id')}] + doc_count_result = list(cosmos_user_documents_container.query_items( + query=doc_count_query, + parameters=doc_metrics_params, + enable_cross_partition_query=True + )) + + # Get total pages using separate query + doc_pages_query = """ + SELECT VALUE SUM(c.number_of_pages) + FROM c + WHERE c.user_id = @user_id AND c.type = 'document_metadata' + """ + doc_pages_result = list(cosmos_user_documents_container.query_items( + query=doc_pages_query, + parameters=doc_metrics_params, + enable_cross_partition_query=True + )) + + total_docs = doc_count_result[0] if doc_count_result else 0 + total_pages = doc_pages_result[0] if doc_pages_result and doc_pages_result[0] else 0 + + enhanced['activity']['document_metrics']['total_documents'] = total_docs + # AI search size = pages × 80KB + enhanced['activity']['document_metrics']['ai_search_size'] = total_pages * 22 * 1024 # 22KB per page + + # Last day upload tracking removed - keeping only document count and sizes + + # Get actual storage account size if enhanced citation is enabled (check app settings) + debug_print(f"💾 [STORAGE DEBUG] Enhanced citation enabled: {app_enhanced_citations}") + if app_enhanced_citations: + debug_print(f"💾 [STORAGE DEBUG] Starting storage calculation for user {user.get('id')}") + try: + # Query actual file sizes from Azure Storage + storage_client = CLIENTS.get("storage_account_office_docs_client") + debug_print(f"💾 [STORAGE DEBUG] Storage client retrieved: {storage_client is not None}") + if storage_client: + user_folder_prefix = f"{user.get('id')}/" + total_storage_size = 0 + + debug_print(f"💾 [STORAGE DEBUG] Looking for blobs with prefix: {user_folder_prefix}") + + # List all blobs in the user's folder + container_client = storage_client.get_container_client(storage_account_user_documents_container_name) + blob_list = container_client.list_blobs(name_starts_with=user_folder_prefix) + + blob_count = 0 + for blob in blob_list: + total_storage_size += blob.size + blob_count += 1 + debug_print(f"💾 [STORAGE DEBUG] Blob {blob.name}: {blob.size} bytes") + debug_print(f"Storage blob {blob.name}: {blob.size} bytes") + + debug_print(f"💾 [STORAGE DEBUG] Found {blob_count} blobs, total size: {total_storage_size} bytes") + enhanced['activity']['document_metrics']['storage_account_size'] = total_storage_size + debug_print(f"Total storage size for user {user.get('id')}: {total_storage_size} bytes") + else: + debug_print(f"💾 [STORAGE DEBUG] Storage client NOT available for user {user.get('id')}") + debug_print(f"Storage client not available for user {user.get('id')}") + # Fallback to estimation if storage client not available + storage_size_query = """ + SELECT c.file_name, c.number_of_pages FROM c + WHERE c.user_id = @user_id AND c.type = 'document_metadata' + """ + storage_docs = list(cosmos_user_documents_container.query_items( + query=storage_size_query, + parameters=doc_metrics_params, + enable_cross_partition_query=True + )) + + total_storage_size = 0 + for doc in storage_docs: + # Estimate file size based on pages and file type + pages = doc.get('number_of_pages', 1) + file_name = doc.get('file_name', '') + + if file_name.lower().endswith('.pdf'): + # PDF: ~500KB per page average + estimated_size = pages * 500 * 1024 + elif file_name.lower().endswith(('.docx', '.doc')): + # Word docs: ~300KB per page average + estimated_size = pages * 300 * 1024 + elif file_name.lower().endswith(('.pptx', '.ppt')): + # PowerPoint: ~800KB per page average + estimated_size = pages * 800 * 1024 + else: + # Other files: ~400KB per page average + estimated_size = pages * 400 * 1024 + + total_storage_size += estimated_size + + enhanced['activity']['document_metrics']['storage_account_size'] = total_storage_size + debug_print(f"💾 [STORAGE DEBUG] Fallback estimation complete: {total_storage_size} bytes") + debug_print(f"Estimated storage size for user {user.get('id')}: {total_storage_size} bytes") + + except Exception as storage_e: + debug_print(f"❌ [STORAGE DEBUG] Storage calculation failed for user {user.get('id')}: {storage_e}") + debug_print(f"Could not calculate storage size for user {user.get('id')}: {storage_e}") + # Set to 0 if we can't calculate + enhanced['activity']['document_metrics']['storage_account_size'] = 0 + + except Exception as e: + debug_print(f"Could not get document metrics for user {user.get('id')}: {e}") + + # Save calculated metrics to user settings for caching (only if we calculated fresh data) + if force_refresh or not user.get('settings', {}).get('metrics', {}).get('calculated_at'): + try: + from functions_settings import update_user_settings + + # Prepare metrics data for caching + metrics_cache = { + 'calculated_at': datetime.now(timezone.utc).isoformat(), + 'login_metrics': enhanced['activity']['login_metrics'], + 'chat_metrics': enhanced['activity']['chat_metrics'], + 'document_metrics': { + 'total_documents': enhanced['activity']['document_metrics']['total_documents'], + 'ai_search_size': enhanced['activity']['document_metrics']['ai_search_size'], + 'storage_account_size': enhanced['activity']['document_metrics']['storage_account_size'] + # Note: personal_workspace_enabled and enhanced_citation_enabled are not cached as they're settings-based + } + } + + # Update user settings with cached metrics + settings_update = {'metrics': metrics_cache} + update_success = update_user_settings(user.get('id'), settings_update) + + if update_success: + debug_print(f"Successfully cached metrics for user {user.get('id')}") + else: + debug_print(f"Failed to cache metrics for user {user.get('id')}") + + except Exception as cache_save_e: + debug_print(f"Error saving metrics cache for user {user.get('id')}: {cache_save_e}") + + return enhanced + + except Exception as e: + debug_print(f"Error enhancing user data: {e}") + return user # Return original user data if enhancement fails + +def enhance_public_workspace_with_activity(workspace, force_refresh=False): + """ + Enhance public workspace data with activity information and computed fields. + Follows the same pattern as group enhancement but for public workspaces. + """ + try: + workspace_id = workspace.get('id') + debug_print(f"🌐 [PUBLIC WORKSPACE DEBUG] Processing workspace {workspace_id}, force_refresh={force_refresh}") + + # Get app settings for enhanced citations + from functions_settings import get_settings + app_settings = get_settings() + app_enhanced_citations = app_settings.get('enable_enhanced_citations', False) if app_settings else False + + debug_print(f"📋 [PUBLIC WORKSPACE SETTINGS DEBUG] App enhanced citations: {app_enhanced_citations}") + + # Create flat structure that matches frontend expectations + owner_info = workspace.get('owner', {}) + + enhanced = { + 'id': workspace.get('id'), + 'name': workspace.get('name', ''), + 'description': workspace.get('description', ''), + 'owner': workspace.get('owner', {}), + 'admins': workspace.get('admins', []), + 'documentManagers': workspace.get('documentManagers', []), + 'createdDate': workspace.get('createdDate'), + 'modifiedDate': workspace.get('modifiedDate'), + 'created_at': workspace.get('createdDate'), # Alias for frontend + + # Flat fields expected by frontend + 'owner_name': owner_info.get('displayName') or owner_info.get('display_name') or owner_info.get('name', 'Unknown'), + 'owner_email': owner_info.get('email', ''), + 'created_by': owner_info.get('displayName') or owner_info.get('display_name') or owner_info.get('name', 'Unknown'), + 'document_count': 0, # Will be updated from database + 'member_count': len(workspace.get('admins', [])) + len(workspace.get('documentManagers', [])) + (1 if owner_info else 0), # Total members including owner + 'storage_size': 0, # Will be updated from storage account + 'last_activity': None, # Will be updated from public_documents + 'recent_activity_count': 0, # Will be calculated + 'status': workspace.get('status', 'active'), # Read from workspace document, default to 'active' + 'statusHistory': workspace.get('statusHistory', []), # Include status change history + + # Keep nested structure for backward compatibility + 'activity': { + 'document_metrics': { + 'total_documents': 0, + 'ai_search_size': 0, # pages × 80KB + 'storage_account_size': 0 # Actual file sizes from storage + }, + 'member_metrics': { + 'total_members': len(workspace.get('admins', [])) + len(workspace.get('documentManagers', [])) + (1 if owner_info else 0), + 'admin_count': len(workspace.get('admins', [])), + 'document_manager_count': len(workspace.get('documentManagers', [])), + } + } + } + + # Check for cached metrics if not forcing refresh + if not force_refresh: + cached_metrics = workspace.get('metrics') + if cached_metrics and cached_metrics.get('calculated_at'): + try: + # Check if cache is recent (within last 24 hours) + cache_time = datetime.fromisoformat(cached_metrics['calculated_at'].replace('Z', '+00:00')) + now = datetime.now(timezone.utc) + + if now - cache_time < timedelta(hours=24): # Use 24-hour cache window + debug_print(f"🌐 [PUBLIC WORKSPACE DEBUG] Using cached metrics for workspace {workspace_id} (cached at {cache_time})") + if 'document_metrics' in cached_metrics: + doc_metrics = cached_metrics['document_metrics'] + enhanced['activity']['document_metrics'] = doc_metrics + # Update flat fields + enhanced['document_count'] = doc_metrics.get('total_documents', 0) + enhanced['storage_size'] = doc_metrics.get('storage_account_size', 0) + + # Apply cached activity metrics if available + if 'last_activity' in cached_metrics: + enhanced['last_activity'] = cached_metrics['last_activity'] + if 'recent_activity_count' in cached_metrics: + enhanced['recent_activity_count'] = cached_metrics['recent_activity_count'] + + debug_print(f"🌐 [PUBLIC WORKSPACE DEBUG] Returning cached data for {workspace_id}: {enhanced['activity']['document_metrics']}") + return enhanced + else: + debug_print(f"🌐 [PUBLIC WORKSPACE DEBUG] Cache expired for workspace {workspace_id} (cached at {cache_time}, age: {now - cache_time})") + except Exception as cache_e: + debug_print(f"Error using cached metrics for workspace {workspace_id}: {cache_e}") + + debug_print(f"No cached metrics for workspace {workspace_id}, calculating basic document count") + + # Calculate at least the basic document count + try: + doc_count_query = "SELECT VALUE COUNT(1) FROM c WHERE c.public_workspace_id = @workspace_id AND c.type = 'document_metadata'" + doc_count_params = [{"name": "@workspace_id", "value": workspace_id}] + + doc_count_results = list(cosmos_public_documents_container.query_items( + query=doc_count_query, + parameters=doc_count_params, + enable_cross_partition_query=True + )) + + total_docs = 0 + if doc_count_results and len(doc_count_results) > 0: + total_docs = doc_count_results[0] if isinstance(doc_count_results[0], int) else 0 + + debug_print(f"📄 [PUBLIC WORKSPACE BASIC DEBUG] Document count for workspace {workspace_id}: {total_docs}") + enhanced['activity']['document_metrics']['total_documents'] = total_docs + enhanced['document_count'] = total_docs + + except Exception as basic_e: + debug_print(f"Error calculating basic document count for workspace {workspace_id}: {basic_e}") + + return enhanced + + # Force refresh - calculate fresh metrics + debug_print(f"🌐 [PUBLIC WORKSPACE DEBUG] Force refresh - calculating fresh metrics for workspace {workspace_id}") + + # Calculate document metrics from public_documents container + try: + # Count documents for this workspace + documents_count_query = """ + SELECT VALUE COUNT(1) FROM c + WHERE c.public_workspace_id = @workspace_id + AND c.type = 'document_metadata' + """ + documents_count_params = [{"name": "@workspace_id", "value": workspace_id}] + + documents_count_result = list(cosmos_public_documents_container.query_items( + query=documents_count_query, + parameters=documents_count_params, + enable_cross_partition_query=True + )) + + total_documents = documents_count_result[0] if documents_count_result else 0 + enhanced['activity']['document_metrics']['total_documents'] = total_documents + enhanced['document_count'] = total_documents + + # Calculate AI search size (pages × 80KB) + pages_sum_query = """ + SELECT VALUE SUM(c.number_of_pages) FROM c + WHERE c.public_workspace_id = @workspace_id + AND c.type = 'document_metadata' + """ + pages_sum_params = [{"name": "@workspace_id", "value": workspace_id}] + + pages_sum_result = list(cosmos_public_documents_container.query_items( + query=pages_sum_query, + parameters=pages_sum_params, + enable_cross_partition_query=True + )) + + total_pages = pages_sum_result[0] if pages_sum_result and pages_sum_result[0] else 0 + ai_search_size = total_pages * 22 * 1024 # 22KB per page + enhanced['activity']['document_metrics']['ai_search_size'] = ai_search_size + + debug_print(f"📊 [PUBLIC WORKSPACE DOCUMENT DEBUG] Workspace {workspace_id}: {total_documents} documents, {total_pages} pages, {ai_search_size} AI search size") + + # Find last upload date + last_upload_query = """ + SELECT c.upload_date + FROM c + WHERE c.public_workspace_id = @workspace_id + AND c.type = 'document_metadata' + """ + last_upload_params = [{"name": "@workspace_id", "value": workspace_id}] + + upload_docs = list(cosmos_public_documents_container.query_items( + query=last_upload_query, + parameters=last_upload_params, + enable_cross_partition_query=True + )) + + # Last day upload tracking removed - keeping only document count and sizes + debug_print(f"� [PUBLIC WORKSPACE DEBUG] Document metrics calculation complete for workspace {workspace_id}") + + except Exception as doc_e: + debug_print(f"❌ [PUBLIC WORKSPACE DOCUMENT DEBUG] Error calculating document metrics for workspace {workspace_id}: {doc_e}") + + # Get actual storage account size if enhanced citation is enabled + debug_print(f"💾 [PUBLIC WORKSPACE STORAGE DEBUG] Enhanced citation enabled: {app_enhanced_citations}") + if app_enhanced_citations: + debug_print(f"💾 [PUBLIC WORKSPACE STORAGE DEBUG] Starting storage calculation for workspace {workspace_id}") + try: + # Query actual file sizes from Azure Storage for public workspace documents + storage_client = CLIENTS.get("storage_account_office_docs_client") + debug_print(f"💾 [PUBLIC WORKSPACE STORAGE DEBUG] Storage client retrieved: {storage_client is not None}") + if storage_client: + workspace_folder_prefix = f"{workspace_id}/" + total_storage_size = 0 + + debug_print(f"💾 [PUBLIC WORKSPACE STORAGE DEBUG] Looking for blobs with prefix: {workspace_folder_prefix}") + + # List all blobs in the workspace's folder - use PUBLIC documents container + container_client = storage_client.get_container_client(storage_account_public_documents_container_name) + blob_list = container_client.list_blobs(name_starts_with=workspace_folder_prefix) + + blob_count = 0 + for blob in blob_list: + total_storage_size += blob.size + blob_count += 1 + debug_print(f"💾 [PUBLIC WORKSPACE STORAGE DEBUG] Blob {blob.name}: {blob.size} bytes") + + debug_print(f"💾 [PUBLIC WORKSPACE STORAGE DEBUG] Found {blob_count} blobs, total size: {total_storage_size} bytes") + enhanced['activity']['document_metrics']['storage_account_size'] = total_storage_size + enhanced['storage_size'] = total_storage_size # Update flat field + else: + debug_print(f"💾 [PUBLIC WORKSPACE STORAGE DEBUG] Storage client NOT available for workspace {workspace_id}") + # Fallback to estimation if storage client not available + storage_size_query = """ + SELECT c.file_name, c.number_of_pages FROM c + WHERE c.public_workspace_id = @workspace_id AND c.type = 'document_metadata' + """ + storage_docs = list(cosmos_public_documents_container.query_items( + query=storage_size_query, + parameters=documents_count_params, + enable_cross_partition_query=True + )) + + total_storage_size = 0 + for doc in storage_docs: + # Estimate file size based on pages and file type + pages = doc.get('number_of_pages', 1) + file_name = doc.get('file_name', '') + + if file_name.lower().endswith('.pdf'): + # PDF: ~500KB per page average + estimated_size = pages * 500 * 1024 + elif file_name.lower().endswith(('.docx', '.doc')): + # Word docs: ~300KB per page average + estimated_size = pages * 300 * 1024 + elif file_name.lower().endswith(('.pptx', '.ppt')): + # PowerPoint: ~800KB per page average + estimated_size = pages * 800 * 1024 + else: + # Other files: ~400KB per page average + estimated_size = pages * 400 * 1024 + + total_storage_size += estimated_size + + enhanced['activity']['document_metrics']['storage_account_size'] = total_storage_size + enhanced['storage_size'] = total_storage_size # Update flat field + debug_print(f"💾 [PUBLIC WORKSPACE STORAGE DEBUG] Fallback estimation complete: {total_storage_size} bytes") + + except Exception as storage_e: + debug_print(f"❌ [PUBLIC WORKSPACE STORAGE DEBUG] Storage calculation failed for workspace {workspace_id}: {storage_e}") + # Set to 0 if we can't calculate + enhanced['activity']['document_metrics']['storage_account_size'] = 0 + enhanced['storage_size'] = 0 + + # Cache the computed metrics in the workspace document + if force_refresh: + try: + metrics_cache = { + 'document_metrics': enhanced['activity']['document_metrics'], + 'last_activity': enhanced.get('last_activity'), + 'recent_activity_count': enhanced.get('recent_activity_count', 0), + 'calculated_at': datetime.now(timezone.utc).isoformat() + } + + # Update workspace document with cached metrics + workspace['metrics'] = metrics_cache + cosmos_public_workspaces_container.upsert_item(workspace) + debug_print(f"Successfully cached metrics for workspace {workspace_id}") + + except Exception as cache_save_e: + debug_print(f"Error saving metrics cache for workspace {workspace_id}: {cache_save_e}") + + return enhanced + + except Exception as e: + debug_print(f"Error enhancing public workspace data: {e}") + return workspace # Return original workspace data if enhancement fails + +def enhance_group_with_activity(group, force_refresh=False): + """ + Enhance group data with activity information and computed fields. + Follows the same pattern as user enhancement but for groups. + """ + try: + group_id = group.get('id') + debug_print(f"👥 [GROUP DEBUG] Processing group {group_id}, force_refresh={force_refresh}") + + # Get app settings for enhanced citations + from functions_settings import get_settings + app_settings = get_settings() + app_enhanced_citations = app_settings.get('enable_enhanced_citations', False) if app_settings else False + + debug_print(f"📋 [GROUP SETTINGS DEBUG] App enhanced citations: {app_enhanced_citations}") + + # Create flat structure that matches frontend expectations + owner_info = group.get('owner', {}) + users_list = group.get('users', []) + + enhanced = { + 'id': group.get('id'), + 'name': group.get('name', ''), + 'description': group.get('description', ''), + 'owner': group.get('owner', {}), + 'users': users_list, + 'admins': group.get('admins', []), + 'documentManagers': group.get('documentManagers', []), + 'pendingUsers': group.get('pendingUsers', []), + 'createdDate': group.get('createdDate'), + 'modifiedDate': group.get('modifiedDate'), + 'created_at': group.get('createdDate'), # Alias for frontend + + # Flat fields expected by frontend + 'owner_name': owner_info.get('displayName') or owner_info.get('display_name') or owner_info.get('name', 'Unknown'), + 'owner_email': owner_info.get('email', ''), + 'created_by': owner_info.get('displayName') or owner_info.get('display_name') or owner_info.get('name', 'Unknown'), + 'member_count': len(users_list), # Owner is already included in users_list + 'document_count': 0, # Will be updated from database + 'storage_size': 0, # Will be updated from storage account + 'last_activity': None, # Will be updated from group_documents + 'recent_activity_count': 0, # Will be calculated + 'status': group.get('status', 'active'), # Read from group document, default to 'active' + 'statusHistory': group.get('statusHistory', []), # Include status change history + + # Keep nested structure for backward compatibility + 'activity': { + 'document_metrics': { + 'total_documents': 0, + 'ai_search_size': 0, # pages × 80KB + 'storage_account_size': 0 # Actual file sizes from storage + }, + 'member_metrics': { + 'total_members': len(users_list), # Owner is already included in users_list + 'admin_count': len(group.get('admins', [])), + 'document_manager_count': len(group.get('documentManagers', [])), + 'pending_count': len(group.get('pendingUsers', [])) + } + } + } + + # Check for cached metrics if not forcing refresh + if not force_refresh: + # Groups don't have settings like users, but we could store metrics in the group doc + cached_metrics = group.get('metrics') + if cached_metrics and cached_metrics.get('calculated_at'): + try: + # Check if cache is recent (within last hour) + cache_time = datetime.fromisoformat(cached_metrics['calculated_at'].replace('Z', '+00:00')) + now = datetime.now(timezone.utc) + + if now - cache_time < timedelta(hours=24): # Use 24-hour cache window + debug_print(f"👥 [GROUP DEBUG] Using cached metrics for group {group_id} (cached at {cache_time})") + if 'document_metrics' in cached_metrics: + doc_metrics = cached_metrics['document_metrics'] + enhanced['activity']['document_metrics'] = doc_metrics + # Update flat fields + enhanced['document_count'] = doc_metrics.get('total_documents', 0) + enhanced['storage_size'] = doc_metrics.get('storage_account_size', 0) + # Cached document metrics applied successfully + + debug_print(f"👥 [GROUP DEBUG] Returning cached data for {group_id}: {enhanced['activity']['document_metrics']}") + return enhanced + else: + debug_print(f"👥 [GROUP DEBUG] Cache expired for group {group_id} (cached at {cache_time}, age: {now - cache_time})") + except Exception as cache_e: + debug_print(f"Error using cached metrics for group {group_id}: {cache_e}") + + debug_print(f"No cached metrics for group {group_id}, calculating basic document count") + + # Calculate at least the basic document count + try: + doc_count_query = "SELECT VALUE COUNT(1) FROM c WHERE c.group_id = @group_id" + doc_count_params = [{"name": "@group_id", "value": group_id}] + + doc_count_results = list(cosmos_group_documents_container.query_items( + query=doc_count_query, + parameters=doc_count_params, + enable_cross_partition_query=True + )) + + total_docs = 0 + if doc_count_results and len(doc_count_results) > 0: + total_docs = doc_count_results[0] if isinstance(doc_count_results[0], int) else 0 + + debug_print(f"📄 [GROUP BASIC DEBUG] Document count for group {group_id}: {total_docs}") + enhanced['activity']['document_metrics']['total_documents'] = total_docs + enhanced['document_count'] = total_docs + + except Exception as basic_e: + debug_print(f"Error calculating basic document count for group {group_id}: {basic_e}") + + return enhanced + + # Force refresh - calculate fresh metrics + debug_print(f"👥 [GROUP DEBUG] Force refresh - calculating fresh metrics for group {group_id}") + + # Calculate document metrics from group_documents container + try: + # Get document count using separate query (avoid MultipleAggregates issue) - same as user management + doc_count_query = """ + SELECT VALUE COUNT(1) + FROM c + WHERE c.group_id = @group_id AND c.type = 'document_metadata' + """ + doc_metrics_params = [{"name": "@group_id", "value": group_id}] + doc_count_result = list(cosmos_group_documents_container.query_items( + query=doc_count_query, + parameters=doc_metrics_params, + enable_cross_partition_query=True + )) + + # Get total pages using separate query - same as user management + doc_pages_query = """ + SELECT VALUE SUM(c.number_of_pages) + FROM c + WHERE c.group_id = @group_id AND c.type = 'document_metadata' + """ + doc_pages_result = list(cosmos_group_documents_container.query_items( + query=doc_pages_query, + parameters=doc_metrics_params, + enable_cross_partition_query=True + )) + + total_docs = doc_count_result[0] if doc_count_result else 0 + total_pages = doc_pages_result[0] if doc_pages_result and doc_pages_result[0] else 0 + + enhanced['activity']['document_metrics']['total_documents'] = total_docs + enhanced['document_count'] = total_docs # Update flat field + # AI search size = pages × 22KB + enhanced['activity']['document_metrics']['ai_search_size'] = total_pages * 22 * 1024 # 22KB per page + + debug_print(f"📄 [GROUP DOCUMENT DEBUG] Total documents for group {group_id}: {total_docs}") + debug_print(f"📊 [GROUP AI SEARCH DEBUG] Total pages for group {group_id}: {total_pages}, AI search size: {total_pages * 22 * 1024} bytes") + + # Last day upload tracking removed - keeping only document count and sizes + debug_print(f"� [GROUP DOCUMENT DEBUG] Document metrics calculation complete for group {group_id}") + + # Find the most recent document upload for last_activity (avoid ORDER BY composite index) + recent_activity_query = """ + SELECT c.upload_date, c.created_at, c.modified_at + FROM c + WHERE c.group_id = @group_id + """ + recent_activity_params = [{"name": "@group_id", "value": group_id}] + + recent_docs = list(cosmos_group_documents_container.query_items( + query=recent_activity_query, + parameters=recent_activity_params, + enable_cross_partition_query=True + )) + + if recent_docs: + # Find the most recent activity date from all documents in code + most_recent_activity = None + most_recent_activity_str = None + + for doc in recent_docs: + # Try multiple date fields to find the most recent activity + dates_to_check = [ + doc.get('upload_date'), + doc.get('modified_at'), + doc.get('created_at') + ] + + for date_str in dates_to_check: + if date_str: + try: + if isinstance(date_str, str): + if 'T' in date_str: # ISO format + date_obj = datetime.fromisoformat(date_str.replace('Z', '+00:00')) + else: # Date only format + date_obj = datetime.strptime(date_str, '%Y-%m-%d') + else: + date_obj = date_str # Already datetime + + if most_recent_activity is None or date_obj > most_recent_activity: + most_recent_activity = date_obj + most_recent_activity_str = date_str + except Exception as date_parse_e: + debug_print(f"📅 [GROUP ACTIVITY DEBUG] Error parsing activity date '{date_str}': {date_parse_e}") + continue + + if most_recent_activity_str: + enhanced['last_activity'] = most_recent_activity_str + debug_print(f"📅 [GROUP ACTIVITY DEBUG] Last activity for group {group_id}: {most_recent_activity_str}") + else: + debug_print(f"📅 [GROUP ACTIVITY DEBUG] No valid activity dates found for group {group_id}") + + # Calculate recent activity count (documents in last 7 days) + week_ago = datetime.now(timezone.utc) - timedelta(days=7) + week_ago_str = week_ago.strftime('%Y-%m-%d') + + recent_activity_count_query = """ + SELECT VALUE COUNT(1) FROM c + WHERE c.group_id = @group_id + AND c.upload_date >= @week_ago + """ + recent_activity_count_params = [ + {"name": "@group_id", "value": group_id}, + {"name": "@week_ago", "value": week_ago_str} + ] + + recent_count_results = list(cosmos_group_documents_container.query_items( + query=recent_activity_count_query, + parameters=recent_activity_count_params, + enable_cross_partition_query=True + )) + + if recent_count_results: + enhanced['recent_activity_count'] = recent_count_results[0] + debug_print(f"📊 [GROUP ACTIVITY DEBUG] Recent activity count for group {group_id}: {recent_count_results[0]}") + + # AI search size already calculated above with document count + + except Exception as doc_e: + debug_print(f"❌ [GROUP DOCUMENT DEBUG] Error calculating document metrics for group {group_id}: {doc_e}") + + # Get actual storage account size if enhanced citation is enabled (check app settings) + debug_print(f"💾 [GROUP STORAGE DEBUG] Enhanced citation enabled: {app_enhanced_citations}") + if app_enhanced_citations: + debug_print(f"💾 [GROUP STORAGE DEBUG] Starting storage calculation for group {group_id}") + try: + # Query actual file sizes from Azure Storage for group documents + storage_client = CLIENTS.get("storage_account_office_docs_client") + debug_print(f"💾 [GROUP STORAGE DEBUG] Storage client retrieved: {storage_client is not None}") + if storage_client: + group_folder_prefix = f"{group_id}/" + total_storage_size = 0 + + debug_print(f"💾 [GROUP STORAGE DEBUG] Looking for blobs with prefix: {group_folder_prefix}") + + # List all blobs in the group's folder - use GROUP documents container, not user documents + container_client = storage_client.get_container_client(storage_account_group_documents_container_name) + blob_list = container_client.list_blobs(name_starts_with=group_folder_prefix) + + blob_count = 0 + for blob in blob_list: + total_storage_size += blob.size + blob_count += 1 + debug_print(f"💾 [GROUP STORAGE DEBUG] Blob {blob.name}: {blob.size} bytes") + debug_print(f"Group storage blob {blob.name}: {blob.size} bytes") + + debug_print(f"💾 [GROUP STORAGE DEBUG] Found {blob_count} blobs, total size: {total_storage_size} bytes") + enhanced['activity']['document_metrics']['storage_account_size'] = total_storage_size + enhanced['storage_size'] = total_storage_size # Update flat field + debug_print(f"Total storage size for group {group_id}: {total_storage_size} bytes") + else: + debug_print(f"💾 [GROUP STORAGE DEBUG] Storage client NOT available for group {group_id}") + debug_print(f"Storage client not available for group {group_id}") + # Fallback to estimation if storage client not available + storage_size_query = """ + SELECT c.file_name, c.number_of_pages FROM c + WHERE c.group_id = @group_id AND c.type = 'document_metadata' + """ + storage_docs = list(cosmos_group_documents_container.query_items( + query=storage_size_query, + parameters=doc_metrics_params, + enable_cross_partition_query=True + )) + + total_storage_size = 0 + for doc in storage_docs: + # Estimate file size based on pages and file type + pages = doc.get('number_of_pages', 1) + file_name = doc.get('file_name', '') + + if file_name.lower().endswith('.pdf'): + # PDF: ~500KB per page average + estimated_size = pages * 500 * 1024 + elif file_name.lower().endswith(('.docx', '.doc')): + # Word docs: ~300KB per page average + estimated_size = pages * 300 * 1024 + elif file_name.lower().endswith(('.pptx', '.ppt')): + # PowerPoint: ~800KB per page average + estimated_size = pages * 800 * 1024 + else: + # Other files: ~400KB per page average + estimated_size = pages * 400 * 1024 + + total_storage_size += estimated_size + + enhanced['activity']['document_metrics']['storage_account_size'] = total_storage_size + enhanced['storage_size'] = total_storage_size # Update flat field + debug_print(f"💾 [GROUP STORAGE DEBUG] Fallback estimation complete: {total_storage_size} bytes") + debug_print(f"Estimated storage size for group {group_id}: {total_storage_size} bytes") + + except Exception as storage_e: + debug_print(f"❌ [GROUP STORAGE DEBUG] Storage calculation failed for group {group_id}: {storage_e}") + debug_print(f"Could not calculate storage size for group {group_id}: {storage_e}") + # Set to 0 if we can't calculate + enhanced['activity']['document_metrics']['storage_account_size'] = 0 + enhanced['storage_size'] = 0 + + # Cache the computed metrics in the group document + if force_refresh: + try: + metrics_cache = { + 'document_metrics': enhanced['activity']['document_metrics'], + 'calculated_at': datetime.now(timezone.utc).isoformat() + } + + # Update group document with cached metrics + group['metrics'] = metrics_cache + cosmos_groups_container.upsert_item(group) + debug_print(f"Successfully cached metrics for group {group_id}") + + except Exception as cache_save_e: + debug_print(f"Error saving metrics cache for group {group_id}: {cache_save_e}") + + return enhanced + + except Exception as e: + debug_print(f"Error enhancing group data: {e}") + return group # Return original group data if enhancement fails + +def get_activity_trends_data(start_date, end_date): + """ + Get aggregated activity data for the specified date range from existing containers. + Returns daily activity counts by type using real application data. + """ + try: + # Debug logging + debug_print(f"🔍 [ACTIVITY TRENDS DEBUG] Getting data for range: {start_date} to {end_date}") + + # Convert string dates to datetime objects if needed + if isinstance(start_date, str): + start_date = datetime.fromisoformat(start_date) + if isinstance(end_date, str): + end_date = datetime.fromisoformat(end_date) + + # Initialize daily data structure + daily_data = {} + current_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0) + + while current_date <= end_date: + date_key = current_date.strftime('%Y-%m-%d') + daily_data[date_key] = { + 'date': date_key, + 'chats_created': 0, + 'chats_deleted': 0, + 'chats': 0, # Keep for backward compatibility + 'personal_documents_created': 0, + 'personal_documents_deleted': 0, + 'group_documents_created': 0, + 'group_documents_deleted': 0, + 'public_documents_created': 0, + 'public_documents_deleted': 0, + 'personal_documents': 0, # Keep for backward compatibility + 'group_documents': 0, # Keep for backward compatibility + 'public_documents': 0, # Keep for backward compatibility + 'documents': 0, # Keep for backward compatibility + 'logins': 0, + 'total': 0 + } + current_date += timedelta(days=1) + + debug_print(f"🔍 [ACTIVITY TRENDS DEBUG] Initialized {len(daily_data)} days of data: {list(daily_data.keys())}") + + # Parameters for queries + parameters = [ + {"name": "@start_date", "value": start_date.isoformat()}, + {"name": "@end_date", "value": end_date.isoformat()} + ] + + debug_print(f"🔍 [ACTIVITY TRENDS DEBUG] Query parameters: {parameters}") + + # Query 1: Get chat activity from activity logs (both creation and deletion) + try: + debug_print("🔍 [ACTIVITY TRENDS DEBUG] Querying conversations...") + + # Count conversation creations + conversations_query = """ + SELECT c.timestamp, c.created_at + FROM c + WHERE c.activity_type = 'conversation_creation' + AND ((c.timestamp >= @start_date AND c.timestamp <= @end_date) + OR (c.created_at >= @start_date AND c.created_at <= @end_date)) + """ + + conversations = list(cosmos_activity_logs_container.query_items( + query=conversations_query, + parameters=parameters, + enable_cross_partition_query=True + )) + + debug_print(f"🔍 [ACTIVITY TRENDS DEBUG] Found {len(conversations)} conversation creation logs") + + for conv in conversations: + timestamp = conv.get('timestamp') or conv.get('created_at') + if timestamp: + try: + if isinstance(timestamp, str): + conv_date = datetime.fromisoformat(timestamp.replace('Z', '+00:00') if 'Z' in timestamp else timestamp) + else: + conv_date = timestamp + + date_key = conv_date.strftime('%Y-%m-%d') + if date_key in daily_data: + daily_data[date_key]['chats_created'] += 1 + daily_data[date_key]['chats'] += 1 # Keep total for backward compatibility + except Exception as e: + debug_print(f"Could not parse conversation timestamp {timestamp}: {e}") + + # Count conversation deletions + deletions_query = """ + SELECT c.timestamp, c.created_at + FROM c + WHERE c.activity_type = 'conversation_deletion' + AND ((c.timestamp >= @start_date AND c.timestamp <= @end_date) + OR (c.created_at >= @start_date AND c.created_at <= @end_date)) + """ + + deletions = list(cosmos_activity_logs_container.query_items( + query=deletions_query, + parameters=parameters, + enable_cross_partition_query=True + )) + + debug_print(f"🔍 [ACTIVITY TRENDS DEBUG] Found {len(deletions)} conversation deletion logs") + + for deletion in deletions: + timestamp = deletion.get('timestamp') or deletion.get('created_at') + if timestamp: + try: + if isinstance(timestamp, str): + del_date = datetime.fromisoformat(timestamp.replace('Z', '+00:00') if 'Z' in timestamp else timestamp) + else: + del_date = timestamp + + date_key = del_date.strftime('%Y-%m-%d') + if date_key in daily_data: + daily_data[date_key]['chats_deleted'] += 1 + except Exception as e: + debug_print(f"Could not parse deletion timestamp {timestamp}: {e}") + + except Exception as e: + debug_print(f"Could not query conversation activity logs: {e}") + print(f"❌ [ACTIVITY TRENDS DEBUG] Error querying chats: {e}") + + # Query 2: Get document activity from activity_logs (both creation and deletion) + try: + debug_print("🔍 [ACTIVITY TRENDS DEBUG] Querying documents from activity logs...") + + # Document creations + documents_query = """ + SELECT c.timestamp, c.created_at, c.workspace_type + FROM c + WHERE c.activity_type = 'document_creation' + AND ((c.timestamp >= @start_date AND c.timestamp <= @end_date) + OR (c.created_at >= @start_date AND c.created_at <= @end_date)) + """ + + docs = list(cosmos_activity_logs_container.query_items( + query=documents_query, + parameters=parameters, + enable_cross_partition_query=True + )) + + debug_print(f"🔍 [ACTIVITY TRENDS DEBUG] Found {len(docs)} document creation logs") + + for doc in docs: + timestamp = doc.get('timestamp') or doc.get('created_at') + workspace_type = doc.get('workspace_type', 'personal') + + if timestamp: + try: + if isinstance(timestamp, str): + doc_date = datetime.fromisoformat(timestamp.replace('Z', '+00:00') if 'Z' in timestamp else timestamp) + else: + doc_date = timestamp + + date_key = doc_date.strftime('%Y-%m-%d') + if date_key in daily_data: + if workspace_type == 'group': + daily_data[date_key]['group_documents_created'] += 1 + daily_data[date_key]['group_documents'] += 1 + elif workspace_type == 'public': + daily_data[date_key]['public_documents_created'] += 1 + daily_data[date_key]['public_documents'] += 1 + else: + daily_data[date_key]['personal_documents_created'] += 1 + daily_data[date_key]['personal_documents'] += 1 + + daily_data[date_key]['documents'] += 1 + except Exception as e: + debug_print(f"Could not parse document timestamp {timestamp}: {e}") + + # Document deletions + deletions_query = """ + SELECT c.timestamp, c.created_at, c.workspace_type + FROM c + WHERE c.activity_type = 'document_deletion' + AND ((c.timestamp >= @start_date AND c.timestamp <= @end_date) + OR (c.created_at >= @start_date AND c.created_at <= @end_date)) + """ + + doc_deletions = list(cosmos_activity_logs_container.query_items( + query=deletions_query, + parameters=parameters, + enable_cross_partition_query=True + )) + + debug_print(f"🔍 [ACTIVITY TRENDS DEBUG] Found {len(doc_deletions)} document deletion logs") + + for doc in doc_deletions: + timestamp = doc.get('timestamp') or doc.get('created_at') + workspace_type = doc.get('workspace_type', 'personal') + + if timestamp: + try: + if isinstance(timestamp, str): + doc_date = datetime.fromisoformat(timestamp.replace('Z', '+00:00') if 'Z' in timestamp else timestamp) + else: + doc_date = timestamp + + date_key = doc_date.strftime('%Y-%m-%d') + if date_key in daily_data: + if workspace_type == 'group': + daily_data[date_key]['group_documents_deleted'] += 1 + elif workspace_type == 'public': + daily_data[date_key]['public_documents_deleted'] += 1 + else: + daily_data[date_key]['personal_documents_deleted'] += 1 + except Exception as e: + debug_print(f"Could not parse document deletion timestamp {timestamp}: {e}") + + debug_print(f"🔍 [ACTIVITY TRENDS DEBUG] Total documents found: {len(docs)} created, {len(doc_deletions)} deleted") + + except Exception as e: + debug_print(f"Could not query document activity logs: {e}") + print(f"❌ [ACTIVITY TRENDS DEBUG] Error querying documents: {e}") + + # Query 3: Get login activity from activity_logs container + try: + debug_print("🔍 [ACTIVITY TRENDS DEBUG] Querying login activity...") + + # Query login activity from activity_logs container + + # Count total records with login_method + count_query = """ + SELECT VALUE COUNT(1) + FROM c + WHERE c.login_method != null + """ + + login_count = list(cosmos_activity_logs_container.query_items( + query=count_query, + enable_cross_partition_query=True + )) + + debug_print(f"🔍 [ACTIVITY TRENDS DEBUG] Total records with login_method: {login_count[0] if login_count else 0}") + + # Query for login records using the correct activity_type + # The data shows records have activity_type: "user_login" and proper timestamps + login_query = """ + SELECT c.timestamp, c.created_at, c.activity_type, c.login_method, c.user_id + FROM c + WHERE c.activity_type = 'user_login' + AND ((c.timestamp >= @start_date AND c.timestamp <= @end_date) + OR (c.created_at >= @start_date AND c.created_at <= @end_date)) + """ + + login_activities = list(cosmos_activity_logs_container.query_items( + query=login_query, + parameters=parameters, + enable_cross_partition_query=True + )) + + debug_print(f"🔍 [ACTIVITY TRENDS DEBUG] Found {len(login_activities)} user_login records") + debug_print(f"🔍 [ACTIVITY TRENDS DEBUG] Date range: {start_date.isoformat()} to {end_date.isoformat()}") + + for login in login_activities: + timestamp = login.get('timestamp') or login.get('created_at') + if timestamp: + try: + if isinstance(timestamp, str): + login_date = datetime.fromisoformat(timestamp.replace('Z', '+00:00') if 'Z' in timestamp else timestamp) + else: + login_date = timestamp + + date_key = login_date.strftime('%Y-%m-%d') + if date_key in daily_data: + daily_data[date_key]['logins'] += 1 + except Exception as e: + debug_print(f"Could not parse login timestamp {timestamp}: {e}") + + except Exception as e: + debug_print(f"Could not query activity logs for login data: {e}") + print(f"❌ [ACTIVITY TRENDS DEBUG] Error querying logins: {e}") + + # Query 4: Get token usage from activity_logs (token_usage activity_type) + try: + debug_print("🔍 [ACTIVITY TRENDS DEBUG] Querying token usage...") + + token_usage_query = """ + SELECT c.timestamp, c.created_at, c.token_type, c.usage.total_tokens as token_count + FROM c + WHERE c.activity_type = 'token_usage' + AND ((c.timestamp >= @start_date AND c.timestamp <= @end_date) + OR (c.created_at >= @start_date AND c.created_at <= @end_date)) + """ + + token_activities = list(cosmos_activity_logs_container.query_items( + query=token_usage_query, + parameters=parameters, + enable_cross_partition_query=True + )) + + debug_print(f"🔍 [ACTIVITY TRENDS DEBUG] Found {len(token_activities)} token_usage records") + + # Initialize token tracking structure + token_daily_data = {} + current_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0) + while current_date <= end_date: + date_key = current_date.strftime('%Y-%m-%d') + token_daily_data[date_key] = { + 'embedding': 0, + 'chat': 0, + 'web_search': 0 + } + current_date += timedelta(days=1) + + for token_record in token_activities: + timestamp = token_record.get('timestamp') or token_record.get('created_at') + token_type = token_record.get('token_type', '') + token_count = token_record.get('token_count', 0) + + if timestamp and token_type in ['embedding', 'chat', 'web_search']: + try: + if isinstance(timestamp, str): + token_date = datetime.fromisoformat(timestamp.replace('Z', '+00:00') if 'Z' in timestamp else timestamp) + else: + token_date = timestamp + + date_key = token_date.strftime('%Y-%m-%d') + if date_key in token_daily_data: + token_daily_data[date_key][token_type] += token_count + except Exception as e: + debug_print(f"Could not parse token timestamp {timestamp}: {e}") + + debug_print(f"🔍 [ACTIVITY TRENDS DEBUG] Token daily data: {token_daily_data}") + + except Exception as e: + debug_print(f"Could not query activity logs for token usage: {e}") + print(f"❌ [ACTIVITY TRENDS DEBUG] Error querying tokens: {e}") + # Initialize empty token data on error + token_daily_data = {} + current_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0) + while current_date <= end_date: + date_key = current_date.strftime('%Y-%m-%d') + token_daily_data[date_key] = {'embedding': 0, 'chat': 0, 'web_search': 0} + current_date += timedelta(days=1) + + # Calculate totals for each day + for date_key in daily_data: + daily_data[date_key]['total'] = ( + daily_data[date_key]['chats'] + + daily_data[date_key]['documents'] + + daily_data[date_key]['logins'] + ) + + # Group by activity type for chart display + result = { + 'chats': {}, + 'chats_created': {}, + 'chats_deleted': {}, + 'documents': {}, # Keep for backward compatibility + 'personal_documents': {}, # Keep for backward compatibility + 'group_documents': {}, # Keep for backward compatibility + 'public_documents': {}, # Keep for backward compatibility + 'personal_documents_created': {}, + 'personal_documents_deleted': {}, + 'group_documents_created': {}, + 'group_documents_deleted': {}, + 'public_documents_created': {}, + 'public_documents_deleted': {}, + 'logins': {}, + 'tokens': token_daily_data # Token usage by type (embedding, chat) + } + + for date_key, data in daily_data.items(): + result['chats'][date_key] = data['chats'] + result['chats_created'][date_key] = data['chats_created'] + result['chats_deleted'][date_key] = data['chats_deleted'] + result['documents'][date_key] = data['documents'] + result['personal_documents'][date_key] = data['personal_documents'] + result['group_documents'][date_key] = data['group_documents'] + result['public_documents'][date_key] = data['public_documents'] + result['personal_documents_created'][date_key] = data['personal_documents_created'] + result['personal_documents_deleted'][date_key] = data['personal_documents_deleted'] + result['group_documents_created'][date_key] = data['group_documents_created'] + result['group_documents_deleted'][date_key] = data['group_documents_deleted'] + result['public_documents_created'][date_key] = data['public_documents_created'] + result['public_documents_deleted'][date_key] = data['public_documents_deleted'] + result['logins'][date_key] = data['logins'] + + debug_print(f"🔍 [ACTIVITY TRENDS DEBUG] Final result: {result}") + + return result + + except Exception as e: + debug_print(f"Error getting activity trends data: {e}") + print(f"❌ [ACTIVITY TRENDS DEBUG] Fatal error: {e}") + return { + 'chats': {}, + 'documents': {}, + 'personal_documents': {}, + 'group_documents': {}, + 'public_documents': {}, + 'logins': {}, + 'tokens': {} + } + +def get_raw_activity_trends_data(start_date, end_date, charts): + """ + Get raw detailed activity data for export instead of aggregated counts. + Returns individual records with user information for each activity type. + """ + try: + debug_print(f"🔍 [RAW ACTIVITY DEBUG] Getting raw data for range: {start_date} to {end_date}") + debug_print(f"🔍 [RAW ACTIVITY DEBUG] Requested charts: {charts}") + + result = {} + + # Parameters for queries + parameters = [ + {"name": "@start_date", "value": start_date.isoformat()}, + {"name": "@end_date", "value": end_date.isoformat()} + ] + + # Helper function to get user info + def get_user_info(user_id): + try: + user_doc = cosmos_user_settings_container.read_item( + item=user_id, + partition_key=user_id + ) + return { + 'display_name': user_doc.get('display_name', ''), + 'email': user_doc.get('email', '') + } + except Exception: + return { + 'display_name': '', + 'email': '' + } + + # Helper function to get AI Search size with caching + def get_ai_search_size(doc, cosmos_container): + """ + Get AI Search size for a document (pages × 80KB). + Uses cached value from Cosmos if available, otherwise calculates and caches it. + + Args: + doc: The document dict from Cosmos (to check for cached value) + cosmos_container: Cosmos container to update with cached value + + Returns: + AI Search size in bytes + """ + try: + # Check if AI Search size is already cached in the document + cached_size = doc.get('ai_search_size', 0) + if cached_size and cached_size > 0: + return cached_size + + # Not cached or zero, calculate from page count + pages = doc.get('number_of_pages', 0) or 0 + ai_search_size = pages * 80 * 1024 if pages else 0 # 80KB per page + + # Cache the calculated size in Cosmos for future use using update_document + # This ensures we only update the specific field without overwriting other metadata + if ai_search_size > 0: + try: + document_id = doc.get('id') or doc.get('document_id') + user_id = doc.get('user_id') + group_id = doc.get('group_id') + public_workspace_id = doc.get('public_workspace_id') + + if document_id and user_id: + update_document( + document_id=document_id, + user_id=user_id, + group_id=group_id, + public_workspace_id=public_workspace_id, + ai_search_size=ai_search_size + ) + except Exception as cache_e: + # Don't fail if caching fails, just return the calculated value + pass + + return ai_search_size + + except Exception as e: + return 0 + + # Helper function to get document storage size from Azure Storage with caching + def get_document_storage_size(doc, cosmos_container, container_name, folder_prefix, document_id): + """ + Get actual storage size for a document from Azure Storage. + Uses cached value from Cosmos if available, otherwise calculates and caches it. + + Args: + doc: The document dict from Cosmos (to check for cached value) + cosmos_container: Cosmos container to update with cached value + container_name: Azure Storage container name (e.g., 'user-documents', 'group-documents', 'public-documents') + folder_prefix: Folder prefix (e.g., user_id, group_id, public_workspace_id) + document_id: Document ID + + Returns: + Total size in bytes of all blobs for this document + """ + try: + # Check if storage size is already cached in the document + cached_size = doc.get('storage_account_size', 0) + if cached_size and cached_size > 0: + debug_print(f"💾 [STORAGE CACHE] Using cached storage size for {document_id}: {cached_size} bytes") + return cached_size + + # Not cached or zero, calculate from Azure Storage + storage_client = CLIENTS.get("storage_account_office_docs_client") + if not storage_client: + debug_print(f"❌ [STORAGE DEBUG] Storage client not available for {document_id}") + return 0 + + # Get the file_name from the document to construct the correct blob path + # Blob path structure: {folder_prefix}/{file_name} + # NOT {folder_prefix}/{document_id}/... + file_name = doc.get('file_name', '') + if not file_name: + debug_print(f"⚠️ [STORAGE DEBUG] No file_name for document {document_id}, cannot calculate storage size") + return 0 + + # Construct the exact blob path + blob_path = f"{folder_prefix}/{file_name}" + + debug_print(f"💾 [STORAGE DEBUG] Looking for blob: {blob_path}") + + container_client = storage_client.get_container_client(container_name) + + # Try to get the specific blob + try: + blob_client = container_client.get_blob_client(blob_path) + blob_properties = blob_client.get_blob_properties() + total_size = blob_properties.size + blob_count = 1 + + debug_print(f"💾 [STORAGE CALC] Found blob {blob_path}: {total_size} bytes") + except Exception as blob_e: + debug_print(f"⚠️ [STORAGE DEBUG] Blob not found or error: {blob_path} - {blob_e}") + return 0 + + debug_print(f"💾 [STORAGE CALC] Calculated storage size for {document_id}: {total_size} bytes ({blob_count} blobs)") + + # Cache the calculated size in Cosmos for future use using update_document + # This ensures we only update the specific field without overwriting other metadata + if total_size > 0: + try: + user_id = doc.get('user_id') + group_id = doc.get('group_id') + public_workspace_id = doc.get('public_workspace_id') + + if document_id and user_id: + update_document( + document_id=document_id, + user_id=user_id, + group_id=group_id, + public_workspace_id=public_workspace_id, + storage_account_size=total_size + ) + debug_print(f"💾 [STORAGE CACHE] Cached storage size in Cosmos for {document_id}") + except Exception as cache_e: + debug_print(f"⚠️ [STORAGE CACHE] Could not cache storage size for {document_id}: {cache_e}") + # Don't fail if caching fails, just return the calculated value + + return total_size + + except Exception as e: + debug_print(f"❌ [STORAGE DEBUG] Error getting storage size for document {document_id}: {e}") + return 0 + + # 1. Login Data + if 'logins' in charts: + debug_print("🔍 [RAW ACTIVITY DEBUG] Getting login records...") + try: + login_query = """ + SELECT c.timestamp, c.created_at, c.user_id, c.activity_type, c.login_method + FROM c + WHERE c.activity_type = 'user_login' + AND ((c.timestamp >= @start_date AND c.timestamp <= @end_date) + OR (c.created_at >= @start_date AND c.created_at <= @end_date)) + """ + + login_activities = list(cosmos_activity_logs_container.query_items( + query=login_query, + parameters=parameters, + enable_cross_partition_query=True + )) + + login_records = [] + for login in login_activities: + user_id = login.get('user_id', '') + user_info = get_user_info(user_id) + timestamp = login.get('timestamp') or login.get('created_at') + + if timestamp: + try: + if isinstance(timestamp, str): + login_date = datetime.fromisoformat(timestamp.replace('Z', '+00:00') if 'Z' in timestamp else timestamp) + else: + login_date = timestamp + + login_records.append({ + 'display_name': user_info['display_name'], + 'email': user_info['email'], + 'user_id': user_id, + 'login_time': login_date.strftime('%Y-%m-%d %H:%M:%S') + }) + except Exception as e: + debug_print(f"Could not parse login timestamp {timestamp}: {e}") + + result['logins'] = login_records + debug_print(f"🔍 [RAW ACTIVITY DEBUG] Found {len(login_records)} login records") + + except Exception as e: + debug_print(f"❌ [RAW ACTIVITY DEBUG] Error getting login data: {e}") + result['logins'] = [] + + # 2. Document Data - From activity_logs container using document_creation activity_type + # Personal Documents + if 'personal_documents' in charts: + debug_print("🔍 [RAW ACTIVITY DEBUG] Getting personal document records from activity logs...") + try: + personal_docs_query = """ + SELECT c.timestamp, c.created_at, c.user_id, c.document.document_id, + c.document.file_name, c.document.file_type, c.document.file_size_bytes, + c.document.page_count, c.document_metadata, c.embedding_usage + FROM c + WHERE c.activity_type = 'document_creation' + AND c.workspace_type = 'personal' + AND ((c.timestamp >= @start_date AND c.timestamp <= @end_date) + OR (c.created_at >= @start_date AND c.created_at <= @end_date)) + """ + + personal_docs = list(cosmos_activity_logs_container.query_items( + query=personal_docs_query, + parameters=parameters, + enable_cross_partition_query=True + )) + + personal_document_records = [] + for doc in personal_docs: + user_id = doc.get('user_id', '') + user_info = get_user_info(user_id) + timestamp = doc.get('timestamp') or doc.get('created_at') + + if timestamp: + try: + if isinstance(timestamp, str): + doc_date = datetime.fromisoformat(timestamp.replace('Z', '+00:00') if 'Z' in timestamp else timestamp) + else: + doc_date = timestamp + + document_info = doc.get('document', {}) + doc_metadata = doc.get('document_metadata', {}) + pages = document_info.get('page_count', 0) or 0 + + # Calculate AI Search size (pages × 80KB) + ai_search_size = pages * 80 * 1024 if pages else 0 + + # Get file size from activity log + storage_size = document_info.get('file_size_bytes', 0) or 0 + + personal_document_records.append({ + 'display_name': user_info['display_name'], + 'email': user_info['email'], + 'user_id': user_id, + 'document_id': document_info.get('document_id', ''), + 'filename': document_info.get('file_name', ''), + 'title': doc_metadata.get('title', 'Unknown Title'), + 'page_count': pages, + 'ai_search_size': ai_search_size, + 'storage_account_size': storage_size, + 'upload_date': doc_date.strftime('%Y-%m-%d %H:%M:%S'), + 'document_type': 'Personal' + }) + except Exception as e: + debug_print(f"Could not parse personal document timestamp {timestamp}: {e}") + + result['personal_documents'] = personal_document_records + debug_print(f"🔍 [RAW ACTIVITY DEBUG] Found {len(personal_document_records)} personal document records") + + except Exception as e: + debug_print(f"❌ [RAW ACTIVITY DEBUG] Error getting personal document data: {e}") + result['personal_documents'] = [] + + # Group Documents + if 'group_documents' in charts: + debug_print("🔍 [RAW ACTIVITY DEBUG] Getting group document records from activity logs...") + try: + group_docs_query = """ + SELECT c.timestamp, c.created_at, c.user_id, c.document.document_id, + c.document.file_name, c.document.file_type, c.document.file_size_bytes, + c.document.page_count, c.document_metadata, c.embedding_usage, + c.workspace_context.group_id + FROM c + WHERE c.activity_type = 'document_creation' + AND c.workspace_type = 'group' + AND ((c.timestamp >= @start_date AND c.timestamp <= @end_date) + OR (c.created_at >= @start_date AND c.created_at <= @end_date)) + """ + + group_docs = list(cosmos_activity_logs_container.query_items( + query=group_docs_query, + parameters=parameters, + enable_cross_partition_query=True + )) + + group_document_records = [] + for doc in group_docs: + user_id = doc.get('user_id', '') + user_info = get_user_info(user_id) + timestamp = doc.get('timestamp') or doc.get('created_at') + + if timestamp: + try: + if isinstance(timestamp, str): + doc_date = datetime.fromisoformat(timestamp.replace('Z', '+00:00') if 'Z' in timestamp else timestamp) + else: + doc_date = timestamp + + document_info = doc.get('document', {}) + doc_metadata = doc.get('document_metadata', {}) + pages = document_info.get('page_count', 0) or 0 + + # Calculate AI Search size (pages × 80KB) + ai_search_size = pages * 80 * 1024 if pages else 0 + + # Get file size from activity log + storage_size = document_info.get('file_size_bytes', 0) or 0 + + group_document_records.append({ + 'display_name': user_info['display_name'], + 'email': user_info['email'], + 'user_id': user_id, + 'document_id': document_info.get('document_id', ''), + 'filename': document_info.get('file_name', ''), + 'title': doc_metadata.get('title', 'Unknown Title'), + 'page_count': pages, + 'ai_search_size': ai_search_size, + 'storage_account_size': storage_size, + 'upload_date': doc_date.strftime('%Y-%m-%d %H:%M:%S'), + 'document_type': 'Group' + }) + except Exception as e: + debug_print(f"Could not parse group document timestamp {timestamp}: {e}") + + result['group_documents'] = group_document_records + debug_print(f"🔍 [RAW ACTIVITY DEBUG] Found {len(group_document_records)} group document records") + + except Exception as e: + debug_print(f"❌ [RAW ACTIVITY DEBUG] Error getting group document data: {e}") + result['group_documents'] = [] + + # Public Documents + if 'public_documents' in charts: + debug_print("🔍 [RAW ACTIVITY DEBUG] Getting public document records from activity logs...") + try: + public_docs_query = """ + SELECT c.timestamp, c.created_at, c.user_id, c.document.document_id, + c.document.file_name, c.document.file_type, c.document.file_size_bytes, + c.document.page_count, c.document_metadata, c.embedding_usage, + c.workspace_context.public_workspace_id + FROM c + WHERE c.activity_type = 'document_creation' + AND c.workspace_type = 'public' + AND ((c.timestamp >= @start_date AND c.timestamp <= @end_date) + OR (c.created_at >= @start_date AND c.created_at <= @end_date)) + """ + + public_docs = list(cosmos_activity_logs_container.query_items( + query=public_docs_query, + parameters=parameters, + enable_cross_partition_query=True + )) + + public_document_records = [] + for doc in public_docs: + user_id = doc.get('user_id', '') + user_info = get_user_info(user_id) + timestamp = doc.get('timestamp') or doc.get('created_at') + + if timestamp: + try: + if isinstance(timestamp, str): + doc_date = datetime.fromisoformat(timestamp.replace('Z', '+00:00') if 'Z' in timestamp else timestamp) + else: + doc_date = timestamp + + document_info = doc.get('document', {}) + doc_metadata = doc.get('document_metadata', {}) + pages = document_info.get('page_count', 0) or 0 + + # Calculate AI Search size (pages × 80KB) + ai_search_size = pages * 80 * 1024 if pages else 0 + + # Get file size from activity log + storage_size = document_info.get('file_size_bytes', 0) or 0 + + public_document_records.append({ + 'display_name': user_info['display_name'], + 'email': user_info['email'], + 'user_id': user_id, + 'document_id': document_info.get('document_id', ''), + 'filename': document_info.get('file_name', ''), + 'title': doc_metadata.get('title', 'Unknown Title'), + 'page_count': pages, + 'ai_search_size': ai_search_size, + 'storage_account_size': storage_size, + 'upload_date': doc_date.strftime('%Y-%m-%d %H:%M:%S'), + 'document_type': 'Public' + }) + except Exception as e: + debug_print(f"Could not parse public document timestamp {timestamp}: {e}") + + result['public_documents'] = public_document_records + debug_print(f"🔍 [RAW ACTIVITY DEBUG] Found {len(public_document_records)} public document records") + + except Exception as e: + debug_print(f"❌ [RAW ACTIVITY DEBUG] Error getting public document data: {e}") + result['public_documents'] = [] + + # Keep backward compatibility - if 'documents' is requested, combine all types + if 'documents' in charts: + debug_print("🔍 [RAW ACTIVITY DEBUG] Getting combined document records for backward compatibility...") + combined_records = [] + if 'personal_documents' in result: + combined_records.extend(result['personal_documents']) + if 'group_documents' in result: + combined_records.extend(result['group_documents']) + if 'public_documents' in result: + combined_records.extend(result['public_documents']) + result['documents'] = combined_records + debug_print(f"🔍 [RAW ACTIVITY DEBUG] Combined {len(combined_records)} total document records") + + # 3. Chat Data - From activity_logs container using conversation_creation activity_type + if 'chats' in charts: + debug_print("🔍 [RAW ACTIVITY DEBUG] Getting chat records from activity logs...") + try: + conversations_query = """ + SELECT c.timestamp, c.created_at, c.user_id, + c.conversation.conversation_id as conversation_id, + c.conversation.title as conversation_title + FROM c + WHERE c.activity_type = 'conversation_creation' + AND ((c.timestamp >= @start_date AND c.timestamp <= @end_date) + OR (c.created_at >= @start_date AND c.created_at <= @end_date)) + """ + + conversations = list(cosmos_activity_logs_container.query_items( + query=conversations_query, + parameters=parameters, + enable_cross_partition_query=True + )) + + chat_records = [] + for conv in conversations: + user_id = conv.get('user_id', '') + user_info = get_user_info(user_id) + conversation_id = conv.get('conversation_id', '') + conversation_title = conv.get('conversation_title', '') + timestamp = conv.get('timestamp') or conv.get('created_at') + + # Get message count and total size for this conversation (still from messages container) + try: + messages_query = """ + SELECT VALUE COUNT(1) + FROM c + WHERE c.conversation_id = @conversation_id + """ + + message_count_result = list(cosmos_messages_container.query_items( + query=messages_query, + parameters=[{"name": "@conversation_id", "value": conversation_id}], + enable_cross_partition_query=True + )) + message_count = message_count_result[0] if message_count_result else 0 + + # Get total character count + messages_size_query = """ + SELECT c.content + FROM c + WHERE c.conversation_id = @conversation_id + """ + + messages = list(cosmos_messages_container.query_items( + query=messages_size_query, + parameters=[{"name": "@conversation_id", "value": conversation_id}], + enable_cross_partition_query=True + )) + + total_size = sum(len(str(msg.get('content', ''))) for msg in messages) + + except Exception as msg_e: + debug_print(f"Could not get message data for conversation {conversation_id}: {msg_e}") + message_count = 0 + total_size = 0 + + if timestamp: + try: + if isinstance(timestamp, str): + conv_date = datetime.fromisoformat(timestamp.replace('Z', '+00:00') if 'Z' in timestamp else timestamp) + else: + conv_date = timestamp + + created_date_str = conv_date.strftime('%Y-%m-%d %H:%M:%S') + + chat_records.append({ + 'display_name': user_info['display_name'], + 'email': user_info['email'], + 'user_id': user_id, + 'chat_id': conversation_id, + 'chat_title': conversation_title, + 'message_count': message_count, + 'total_size': total_size, + 'created_date': created_date_str + }) + except Exception as e: + debug_print(f"Could not parse conversation timestamp {timestamp}: {e}") + + result['chats'] = chat_records + debug_print(f"🔍 [RAW ACTIVITY DEBUG] Found {len(chat_records)} chat records") + + except Exception as e: + debug_print(f"❌ [RAW ACTIVITY DEBUG] Error getting chat data: {e}") + result['chats'] = [] + + # 4. Token Usage Data - From activity_logs container using token_usage activity_type + if 'tokens' in charts: + debug_print("🔍 [RAW ACTIVITY DEBUG] Getting token usage records from activity logs...") + try: + tokens_query = """ + SELECT c.timestamp, c.created_at, c.user_id, c.token_type, + c.usage.model as model_name, + c.usage.prompt_tokens as prompt_tokens, + c.usage.completion_tokens as completion_tokens, + c.usage.total_tokens as total_tokens + FROM c + WHERE c.activity_type = 'token_usage' + AND ((c.timestamp >= @start_date AND c.timestamp <= @end_date) + OR (c.created_at >= @start_date AND c.created_at <= @end_date)) + """ + + token_activities = list(cosmos_activity_logs_container.query_items( + query=tokens_query, + parameters=parameters, + enable_cross_partition_query=True + )) + + token_records = [] + for token_log in token_activities: + user_id = token_log.get('user_id', '') + user_info = get_user_info(user_id) + timestamp = token_log.get('timestamp') or token_log.get('created_at') + token_type = token_log.get('token_type', 'unknown') + + if timestamp: + try: + if isinstance(timestamp, str): + token_date = datetime.fromisoformat(timestamp.replace('Z', '+00:00') if 'Z' in timestamp else timestamp) + else: + token_date = timestamp + + # Handle both chat and embedding tokens + prompt_tokens = token_log.get('prompt_tokens', 0) if token_type == 'chat' else 0 + completion_tokens = token_log.get('completion_tokens', 0) if token_type == 'chat' else 0 + + token_records.append({ + 'display_name': user_info['display_name'], + 'email': user_info['email'], + 'user_id': user_id, + 'token_type': token_type, + 'model_name': token_log.get('model_name', 'Unknown'), + 'prompt_tokens': prompt_tokens, + 'completion_tokens': completion_tokens, + 'total_tokens': token_log.get('total_tokens', 0), + 'timestamp': token_date.strftime('%Y-%m-%d %H:%M:%S') + }) + except Exception as e: + debug_print(f"Could not parse token timestamp {timestamp}: {e}") + + result['tokens'] = token_records + debug_print(f"🔍 [RAW ACTIVITY DEBUG] Found {len(token_records)} token usage records") + + except Exception as e: + debug_print(f"❌ [RAW ACTIVITY DEBUG] Error getting token usage data: {e}") + result['tokens'] = [] + + debug_print(f"🔍 [RAW ACTIVITY DEBUG] Returning raw data with {len(result)} chart types") + return result + + except Exception as e: + debug_print(f"Error getting raw activity trends data: {e}") + debug_print(f"❌ [RAW ACTIVITY DEBUG] Fatal error: {e}") + return {} + + +def register_route_backend_control_center(app): + + # User Management APIs + @app.route('/api/admin/control-center/users', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_get_all_users(): + """ + Get all users with their settings, activity data, and access status. + Supports pagination and filtering. + """ + try: + page = int(request.args.get('page', 1)) + per_page = min(int(request.args.get('per_page', 50)), 100) # Max 100 per page + search = request.args.get('search', '').strip() + access_filter = request.args.get('access_filter', 'all') # all, allow, deny + force_refresh = request.args.get('force_refresh', 'false').lower() == 'true' + export_all = request.args.get('all', 'false').lower() == 'true' # For CSV export + + # Build query with filters + query_conditions = [] + parameters = [] + + if search: + query_conditions.append("(CONTAINS(LOWER(c.email), @search) OR CONTAINS(LOWER(c.display_name), @search))") + parameters.append({"name": "@search", "value": search.lower()}) + + if access_filter != 'all': + query_conditions.append("c.settings.access.status = @access_status") + parameters.append({"name": "@access_status", "value": access_filter}) + + where_clause = " AND ".join(query_conditions) if query_conditions else "1=1" + + if export_all: + # For CSV export, get all users without pagination + users_query = f""" + SELECT c.id, c.email, c.display_name, c.lastUpdated, c.settings + FROM c + WHERE {where_clause} + ORDER BY c.display_name + """ + + users = list(cosmos_user_settings_container.query_items( + query=users_query, + parameters=parameters, + enable_cross_partition_query=True + )) + + # Enhance user data with activity information + enhanced_users = [] + for user in users: + enhanced_user = enhance_user_with_activity(user, force_refresh=force_refresh) + enhanced_users.append(enhanced_user) + + return jsonify({ + 'success': True, + 'users': enhanced_users, + 'total_count': len(enhanced_users) + }), 200 + + # Get total count for pagination + count_query = f"SELECT VALUE COUNT(1) FROM c WHERE {where_clause}" + total_items_result = list(cosmos_user_settings_container.query_items( + query=count_query, + parameters=parameters, + enable_cross_partition_query=True + )) + total_items = total_items_result[0] if total_items_result and isinstance(total_items_result[0], int) else 0 + + # Calculate pagination + offset = (page - 1) * per_page + total_pages = (total_items + per_page - 1) // per_page + + # Get paginated results + users_query = f""" + SELECT c.id, c.email, c.display_name, c.lastUpdated, c.settings + FROM c + WHERE {where_clause} + ORDER BY c.display_name + OFFSET {offset} LIMIT {per_page} + """ + + users = list(cosmos_user_settings_container.query_items( + query=users_query, + parameters=parameters, + enable_cross_partition_query=True + )) + + # Enhance user data with activity information + enhanced_users = [] + for user in users: + enhanced_user = enhance_user_with_activity(user, force_refresh=force_refresh) + enhanced_users.append(enhanced_user) + + return jsonify({ + 'users': enhanced_users, + 'pagination': { + 'page': page, + 'per_page': per_page, + 'total_items': total_items, + 'total_pages': total_pages, + 'has_prev': page > 1, + 'has_next': page < total_pages + } + }), 200 + + except Exception as e: + debug_print(f"Error getting users: {e}") + return jsonify({'error': 'Failed to retrieve users'}), 500 + + @app.route('/api/admin/control-center/users//access', methods=['PATCH']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_update_user_access(user_id): + """ + Update user access permissions (allow/deny with optional time-based restriction). + """ + try: + data = request.get_json() + + if not data: + return jsonify({'error': 'No data provided'}), 400 + + status = data.get('status') + datetime_to_allow = data.get('datetime_to_allow') + + if status not in ['allow', 'deny']: + return jsonify({'error': 'Status must be "allow" or "deny"'}), 400 + + # Validate datetime_to_allow if provided + if datetime_to_allow: + try: + # Validate ISO 8601 format + datetime.fromisoformat(datetime_to_allow.replace('Z', '+00:00') if 'Z' in datetime_to_allow else datetime_to_allow) + except ValueError: + return jsonify({'error': 'Invalid datetime format. Use ISO 8601 format'}), 400 + + # Update user access settings + access_settings = { + 'access': { + 'status': status, + 'datetime_to_allow': datetime_to_allow + } + } + + success = update_user_settings(user_id, access_settings) + + if success: + # Log admin action + admin_user = session.get('user', {}) + log_event("[ControlCenter] User Access Updated", { + "admin_user": admin_user.get('preferred_username', 'unknown'), + "target_user_id": user_id, + "access_status": status, + "datetime_to_allow": datetime_to_allow + }) + + return jsonify({'message': 'User access updated successfully'}), 200 + else: + return jsonify({'error': 'Failed to update user access'}), 500 + + except Exception as e: + debug_print(f"Error updating user access: {e}") + return jsonify({'error': 'Failed to update user access'}), 500 + + @app.route('/api/admin/control-center/users//file-uploads', methods=['PATCH']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_update_user_file_uploads(user_id): + """ + Update user file upload permissions (allow/deny with optional time-based restriction). + """ + try: + data = request.get_json() + + if not data: + return jsonify({'error': 'No data provided'}), 400 + + status = data.get('status') + datetime_to_allow = data.get('datetime_to_allow') + + if status not in ['allow', 'deny']: + return jsonify({'error': 'Status must be "allow" or "deny"'}), 400 + + # Validate datetime_to_allow if provided + if datetime_to_allow: + try: + # Validate ISO 8601 format + datetime.fromisoformat(datetime_to_allow.replace('Z', '+00:00') if 'Z' in datetime_to_allow else datetime_to_allow) + except ValueError: + return jsonify({'error': 'Invalid datetime format. Use ISO 8601 format'}), 400 + + # Update user file upload settings + file_upload_settings = { + 'file_uploads': { + 'status': status, + 'datetime_to_allow': datetime_to_allow + } + } + + success = update_user_settings(user_id, file_upload_settings) + + if success: + # Log admin action + admin_user = session.get('user', {}) + log_event("[ControlCenter] User File Upload Updated", { + "admin_user": admin_user.get('preferred_username', 'unknown'), + "target_user_id": user_id, + "file_upload_status": status, + "datetime_to_allow": datetime_to_allow + }) + + return jsonify({'message': 'User file upload permissions updated successfully'}), 200 + else: + return jsonify({'error': 'Failed to update user file upload permissions'}), 500 + + except Exception as e: + debug_print(f"Error updating user file uploads: {e}") + return jsonify({'error': 'Failed to update user file upload permissions'}), 500 + + @app.route('/api/admin/control-center/users//delete-documents', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_delete_user_documents_admin(user_id): + """ + Create an approval request to delete all documents for a user. + Requires approval from another admin. + + Body: + reason (str): Explanation for deleting documents (required) + """ + try: + data = request.get_json() or {} + reason = data.get('reason', '').strip() + + if not reason: + return jsonify({'error': 'Reason is required for document deletion'}), 400 + + admin_user = session.get('user', {}) + admin_user_id = admin_user.get('oid') or admin_user.get('sub') + admin_email = admin_user.get('preferred_username', admin_user.get('email', 'unknown')) + admin_display_name = admin_user.get('name', admin_email) + + # Validate user exists by trying to get their data from Cosmos + try: + user_doc = cosmos_user_settings_container.read_item( + item=user_id, + partition_key=user_id + ) + user_email = user_doc.get('email', 'unknown') + user_name = user_doc.get('display_name', user_email) + except Exception: + return jsonify({'error': 'User not found'}), 404 + + # Create approval request using user_id as both group_id (for partition) and storing user_id in metadata + from functions_approvals import create_approval_request, TYPE_DELETE_USER_DOCUMENTS + approval = create_approval_request( + request_type=TYPE_DELETE_USER_DOCUMENTS, + group_id=user_id, # Using user_id as partition key for user-related approvals + requester_id=admin_user_id, + requester_email=admin_email, + requester_name=admin_display_name, + reason=reason, + metadata={ + 'user_id': user_id, + 'user_name': user_name, + 'user_email': user_email + } + ) + + # Log event + log_event("[ControlCenter] Delete User Documents Request Created", { + "admin_user": admin_email, + "user_id": user_id, + "user_email": user_email, + "approval_id": approval['id'], + "reason": reason + }) + + return jsonify({ + 'success': True, + 'message': 'Document deletion request created successfully. Awaiting approval from another admin.', + 'approval_id': approval['id'] + }), 200 + + except Exception as e: + debug_print(f"Error creating user document deletion request: {e}") + log_event("[ControlCenter] Delete User Documents Request Failed", { + "error": str(e), + "user_id": user_id + }) + return jsonify({'error': str(e)}), 500 + + @app.route('/api/admin/control-center/users/bulk-action', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_bulk_user_action(): + """ + Perform bulk actions on multiple users (access control, file upload control). + """ + try: + data = request.get_json() + + if not data: + return jsonify({'error': 'No data provided'}), 400 + + user_ids = data.get('user_ids', []) + action_type = data.get('action_type') # 'access' or 'file_uploads' + settings = data.get('settings', {}) + + if not user_ids or not action_type or not settings: + return jsonify({'error': 'Missing required fields: user_ids, action_type, settings'}), 400 + + if action_type not in ['access', 'file_uploads']: + return jsonify({'error': 'action_type must be "access" or "file_uploads"'}), 400 + + status = settings.get('status') + datetime_to_allow = settings.get('datetime_to_allow') + + if status not in ['allow', 'deny']: + return jsonify({'error': 'Status must be "allow" or "deny"'}), 400 + + # Validate datetime_to_allow if provided + if datetime_to_allow: + try: + datetime.fromisoformat(datetime_to_allow.replace('Z', '+00:00') if 'Z' in datetime_to_allow else datetime_to_allow) + except ValueError: + return jsonify({'error': 'Invalid datetime format. Use ISO 8601 format'}), 400 + + # Apply bulk action + success_count = 0 + failed_users = [] + + update_settings = { + action_type: { + 'status': status, + 'datetime_to_allow': datetime_to_allow + } + } + + for user_id in user_ids: + try: + success = update_user_settings(user_id, update_settings) + if success: + success_count += 1 + else: + failed_users.append(user_id) + except Exception as e: + debug_print(f"Error updating user {user_id}: {e}") + failed_users.append(user_id) + + # Log admin action + admin_user = session.get('user', {}) + log_event("[ControlCenter] Bulk User Action", { + "admin_user": admin_user.get('preferred_username', 'unknown'), + "action_type": action_type, + "user_count": len(user_ids), + "success_count": success_count, + "failed_count": len(failed_users), + "settings": settings + }) + + result = { + 'message': f'Bulk action completed. {success_count} users updated successfully.', + 'success_count': success_count, + 'failed_count': len(failed_users) + } + + if failed_users: + result['failed_users'] = failed_users + + return jsonify(result), 200 + + except Exception as e: + debug_print(f"Error performing bulk user action: {e}") + return jsonify({'error': 'Failed to perform bulk action'}), 500 + + # Group Management APIs + @app.route('/api/admin/control-center/groups', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_get_all_groups(): + """ + Get all groups with their activity data and metrics. + Supports pagination and filtering. + """ + try: + page = int(request.args.get('page', 1)) + per_page = min(int(request.args.get('per_page', 50)), 100) # Max 100 per page + search = request.args.get('search', '').strip() + status_filter = request.args.get('status_filter', 'all') # all, active, locked, etc. + force_refresh = request.args.get('force_refresh', 'false').lower() == 'true' + export_all = request.args.get('all', 'false').lower() == 'true' # For CSV export + + # Build query with filters + query_conditions = [] + parameters = [] + + if search: + query_conditions.append("(CONTAINS(LOWER(c.name), @search) OR CONTAINS(LOWER(c.description), @search))") + parameters.append({"name": "@search", "value": search.lower()}) + + # Note: status filtering would need to be implemented based on business logic + # For now, we'll get all groups and filter client-side if needed + + where_clause = " AND ".join(query_conditions) if query_conditions else "1=1" + + if export_all: + # For CSV export, get all groups without pagination + groups_query = f""" + SELECT * + FROM c + WHERE {where_clause} + ORDER BY c.name + """ + + groups = list(cosmos_groups_container.query_items( + query=groups_query, + parameters=parameters, + enable_cross_partition_query=True + )) + + # Enhance group data with activity information + enhanced_groups = [] + for group in groups: + enhanced_group = enhance_group_with_activity(group, force_refresh=force_refresh) + enhanced_groups.append(enhanced_group) + + return jsonify({ + 'success': True, + 'groups': enhanced_groups, + 'total_count': len(enhanced_groups) + }), 200 + + # Get total count for pagination + count_query = f"SELECT VALUE COUNT(1) FROM c WHERE {where_clause}" + total_items_result = list(cosmos_groups_container.query_items( + query=count_query, + parameters=parameters, + enable_cross_partition_query=True + )) + total_items = total_items_result[0] if total_items_result and isinstance(total_items_result[0], int) else 0 + + # Calculate pagination + offset = (page - 1) * per_page + total_pages = (total_items + per_page - 1) // per_page + + # Get paginated results + groups_query = f""" + SELECT * + FROM c + WHERE {where_clause} + ORDER BY c.name + OFFSET {offset} LIMIT {per_page} + """ + + groups = list(cosmos_groups_container.query_items( + query=groups_query, + parameters=parameters, + enable_cross_partition_query=True + )) + + # Enhance group data with activity information + enhanced_groups = [] + for group in groups: + enhanced_group = enhance_group_with_activity(group, force_refresh=force_refresh) + enhanced_groups.append(enhanced_group) + + return jsonify({ + 'groups': enhanced_groups, + 'pagination': { + 'page': page, + 'per_page': per_page, + 'total_items': total_items, + 'total_pages': total_pages, + 'has_prev': page > 1, + 'has_next': page < total_pages + } + }), 200 + + except Exception as e: + debug_print(f"Error getting groups: {e}") + return jsonify({'error': 'Failed to retrieve groups'}), 500 + + @app.route('/api/admin/control-center/groups//status', methods=['PUT']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_update_group_status(group_id): + """ + Update group status (active, locked, upload_disabled, inactive) + Tracks who made the change and when, logs to activity_logs + """ + try: + data = request.get_json() + if not data: + return jsonify({'error': 'No data provided'}), 400 + + new_status = data.get('status') + reason = data.get('reason') # Optional reason for the status change + + if not new_status: + return jsonify({'error': 'Status is required'}), 400 + + # Validate status values + valid_statuses = ['active', 'locked', 'upload_disabled', 'inactive'] + if new_status not in valid_statuses: + return jsonify({'error': f'Invalid status. Must be one of: {", ".join(valid_statuses)}'}), 400 + + # Get the group + try: + group = cosmos_groups_container.read_item(item=group_id, partition_key=group_id) + except: + return jsonify({'error': 'Group not found'}), 404 + + # Get admin user info + admin_user = session.get('user', {}) + admin_user_id = admin_user.get('oid', 'unknown') + admin_email = admin_user.get('preferred_username', 'unknown') + + # Get old status for logging + old_status = group.get('status', 'active') # Default to 'active' if not set + + # Only update and log if status actually changed + if old_status != new_status: + # Update group status + group['status'] = new_status + group['modifiedDate'] = datetime.utcnow().isoformat() + + # Add status change metadata + if 'statusHistory' not in group: + group['statusHistory'] = [] + + group['statusHistory'].append({ + 'old_status': old_status, + 'new_status': new_status, + 'changed_by_user_id': admin_user_id, + 'changed_by_email': admin_email, + 'changed_at': datetime.utcnow().isoformat(), + 'reason': reason + }) + + # Update in database + cosmos_groups_container.upsert_item(group) + + # Log to activity_logs container for audit trail + from functions_activity_logging import log_group_status_change + log_group_status_change( + group_id=group_id, + group_name=group.get('name', 'Unknown'), + old_status=old_status, + new_status=new_status, + changed_by_user_id=admin_user_id, + changed_by_email=admin_email, + reason=reason + ) + + # Log admin action (legacy logging) + log_event("[ControlCenter] Group Status Update", { + "admin_user": admin_email, + "admin_user_id": admin_user_id, + "group_id": group_id, + "group_name": group.get('name'), + "old_status": old_status, + "new_status": new_status, + "reason": reason + }) + + return jsonify({ + 'message': 'Group status updated successfully', + 'old_status': old_status, + 'new_status': new_status + }), 200 + else: + return jsonify({ + 'message': 'Group status unchanged', + 'status': new_status + }), 200 + + except Exception as e: + debug_print(f"Error updating group status: {e}") + return jsonify({'error': 'Failed to update group status'}), 500 + + @app.route('/api/admin/control-center/groups/', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_get_group_details_admin(group_id): + """ + Get detailed information about a specific group + """ + try: + # Get the group + try: + group = cosmos_groups_container.read_item(item=group_id, partition_key=group_id) + except: + return jsonify({'error': 'Group not found'}), 404 + + # Enhance with activity data + enhanced_group = enhance_group_with_activity(group) + + return jsonify(enhanced_group), 200 + + except Exception as e: + debug_print(f"Error getting group details: {e}") + return jsonify({'error': 'Failed to retrieve group details'}), 500 + + @app.route('/api/admin/control-center/groups/', methods=['DELETE']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_delete_group_admin(group_id): + """ + Create an approval request to delete a group and all its documents. + Requires approval from group owner or another admin. + + Body: + reason (str): Explanation for deleting the group (required) + """ + try: + data = request.get_json() or {} + reason = data.get('reason', '').strip() + + if not reason: + return jsonify({'error': 'Reason is required for group deletion'}), 400 + + admin_user = session.get('user', {}) + admin_user_id = admin_user.get('oid') or admin_user.get('sub') + admin_email = admin_user.get('preferred_username', admin_user.get('email', 'unknown')) + admin_display_name = admin_user.get('name', admin_email) + + # Validate group exists + try: + group = cosmos_groups_container.read_item(item=group_id, partition_key=group_id) + except: + return jsonify({'error': 'Group not found'}), 404 + + # Create approval request + approval = create_approval_request( + request_type=TYPE_DELETE_GROUP, + group_id=group_id, + requester_id=admin_user_id, + requester_email=admin_email, + requester_name=admin_display_name, + reason=reason, + metadata={ + 'group_name': group.get('name'), + 'owner_id': group.get('owner', {}).get('id'), + 'owner_email': group.get('owner', {}).get('email') + } + ) + + # Log event + log_event("[ControlCenter] Delete Group Request Created", { + "admin_user": admin_email, + "group_id": group_id, + "group_name": group.get('name'), + "approval_id": approval['id'], + "reason": reason + }) + + return jsonify({ + 'success': True, + 'message': 'Group deletion request created and pending approval', + 'approval_id': approval['id'], + 'status': 'pending' + }), 200 + + except Exception as e: + debug_print(f"Error creating group deletion request: {e}") + return jsonify({'error': str(e)}), 500 + + @app.route('/api/admin/control-center/groups//delete-documents', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_delete_group_documents_admin(group_id): + """ + Create an approval request to delete all documents in a group. + Requires approval from group owner or another admin. + + Body: + reason (str): Explanation for deleting documents (required) + """ + try: + data = request.get_json() or {} + reason = data.get('reason', '').strip() + + if not reason: + return jsonify({'error': 'Reason is required for document deletion'}), 400 + + admin_user = session.get('user', {}) + admin_user_id = admin_user.get('oid') or admin_user.get('sub') + admin_email = admin_user.get('preferred_username', admin_user.get('email', 'unknown')) + admin_display_name = admin_user.get('name', admin_email) + + # Validate group exists + try: + group = cosmos_groups_container.read_item(item=group_id, partition_key=group_id) + except: + return jsonify({'error': 'Group not found'}), 404 + + # Create approval request + approval = create_approval_request( + request_type=TYPE_DELETE_DOCUMENTS, + group_id=group_id, + requester_id=admin_user_id, + requester_email=admin_email, + requester_name=admin_display_name, + reason=reason, + metadata={ + 'group_name': group.get('name') + } + ) + + # Log event + log_event("[ControlCenter] Delete Documents Request Created", { + "admin_user": admin_email, + "group_id": group_id, + "group_name": group.get('name'), + "approval_id": approval['id'], + "reason": reason + }) + + return jsonify({ + 'success': True, + 'message': 'Document deletion request created and pending approval', + 'approval_id': approval['id'], + 'status': 'pending' + }), 200 + + except Exception as e: + debug_print(f"Error creating document deletion request: {e}") + return jsonify({'error': str(e)}), 500 + + @app.route('/api/admin/control-center/groups//members', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_get_group_members_admin(group_id): + """ + Get list of group members for ownership transfer selection + """ + try: + # Get the group + try: + group = cosmos_groups_container.read_item(item=group_id, partition_key=group_id) + except: + return jsonify({'error': 'Group not found'}), 404 + + # Get member list with user details + members = [] + for member in group.get('users', []): + # Skip the current owner from the list + if member.get('userId') == group.get('owner', {}).get('id'): + continue + + members.append({ + 'userId': member.get('userId'), + 'email': member.get('email', 'No email'), + 'displayName': member.get('displayName', 'Unknown User') + }) + + return jsonify({'members': members}), 200 + + except Exception as e: + debug_print(f"Error getting group members: {e}") + return jsonify({'error': 'Failed to retrieve group members'}), 500 + + @app.route('/api/admin/control-center/groups//take-ownership', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_admin_take_group_ownership(group_id): + """ + Create an approval request for admin to take ownership of a group. + Requires approval from group owner or another admin. + + Body: + reason (str): Explanation for taking ownership (required) + """ + try: + admin_user = session.get('user', {}) + admin_user_id = admin_user.get('oid') or admin_user.get('sub') + admin_email = admin_user.get('preferred_username', admin_user.get('email', 'unknown')) + admin_display_name = admin_user.get('name', admin_email) + + if not admin_user_id: + return jsonify({'error': 'Could not identify admin user'}), 400 + + # Get request body + data = request.get_json() or {} + reason = data.get('reason', '').strip() + + if not reason: + return jsonify({'error': 'Reason is required for ownership transfer'}), 400 + + # Validate group exists + try: + group = cosmos_groups_container.read_item(item=group_id, partition_key=group_id) + except: + return jsonify({'error': 'Group not found'}), 404 + + # Create approval request + approval = create_approval_request( + request_type=TYPE_TAKE_OWNERSHIP, + group_id=group_id, + requester_id=admin_user_id, + requester_email=admin_email, + requester_name=admin_display_name, + reason=reason, + metadata={ + 'old_owner_id': group.get('owner', {}).get('id'), + 'old_owner_email': group.get('owner', {}).get('email') + } + ) + + # Log event + log_event("[ControlCenter] Take Ownership Request Created", { + "admin_user": admin_email, + "group_id": group_id, + "group_name": group.get('name'), + "approval_id": approval['id'], + "reason": reason + }) + + return jsonify({ + 'success': True, + 'message': 'Ownership transfer request created and pending approval', + 'approval_id': approval['id'], + 'status': 'pending' + }), 200 + + except Exception as e: + debug_print(f"Error creating take ownership request: {e}") + return jsonify({'error': str(e)}), 500 + + @app.route('/api/admin/control-center/groups//transfer-ownership', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_admin_transfer_group_ownership(group_id): + """ + Create an approval request to transfer group ownership to another member. + Requires approval from group owner or another admin. + + Body: + newOwnerId (str): User ID of the new owner (required) + reason (str): Explanation for ownership transfer (required) + """ + try: + data = request.get_json() + new_owner_user_id = data.get('newOwnerId') + reason = data.get('reason', '').strip() + + if not new_owner_user_id: + return jsonify({'error': 'Missing newOwnerId'}), 400 + + if not reason: + return jsonify({'error': 'Reason is required for ownership transfer'}), 400 + + admin_user = session.get('user', {}) + admin_user_id = admin_user.get('oid') or admin_user.get('sub') + admin_email = admin_user.get('preferred_username', admin_user.get('email', 'unknown')) + admin_display_name = admin_user.get('name', admin_email) + + # Get the group + try: + group = cosmos_groups_container.read_item(item=group_id, partition_key=group_id) + except: + return jsonify({'error': 'Group not found'}), 404 + + # Find the new owner in members list + new_owner_member = None + for member in group.get('users', []): + if member.get('userId') == new_owner_user_id: + new_owner_member = member + break + + if not new_owner_member: + return jsonify({'error': 'Selected user is not a member of this group'}), 400 + + # Create approval request + approval = create_approval_request( + request_type=TYPE_TRANSFER_OWNERSHIP, + group_id=group_id, + requester_id=admin_user_id, + requester_email=admin_email, + requester_name=admin_display_name, + reason=reason, + metadata={ + 'new_owner_id': new_owner_user_id, + 'new_owner_email': new_owner_member.get('email'), + 'new_owner_name': new_owner_member.get('displayName'), + 'old_owner_id': group.get('owner', {}).get('id'), + 'old_owner_email': group.get('owner', {}).get('email') + } + ) + + # Log event + log_event("[ControlCenter] Transfer Ownership Request Created", { + "admin_user": admin_email, + "group_id": group_id, + "group_name": group.get('name'), + "new_owner": new_owner_member.get('email'), + "approval_id": approval['id'], + "reason": reason + }) + + return jsonify({ + 'success': True, + 'message': 'Ownership transfer request created and pending approval', + 'approval_id': approval['id'], + 'status': 'pending' + }), 200 + + except Exception as e: + debug_print(f"Error creating transfer ownership request: {e}") + return jsonify({'error': str(e)}), 500 + + @app.route('/api/admin/control-center/groups//add-member', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_admin_add_group_member(group_id): + """ + Admin adds a member to a group (used by both single add and CSV bulk upload) + """ + try: + data = request.get_json() + user_id = data.get('userId') + # Support both 'name' (from CSV) and 'displayName' (from single add form) + name = data.get('displayName') or data.get('name') + email = data.get('email') + role = data.get('role', 'user').lower() + + if not user_id or not name or not email: + return jsonify({'error': 'Missing required fields: userId, name/displayName, email'}), 400 + + # Validate role + valid_roles = ['admin', 'document_manager', 'user'] + if role not in valid_roles: + return jsonify({'error': f'Invalid role. Must be: {", ".join(valid_roles)}'}), 400 + + admin_user = session.get('user', {}) + admin_email = admin_user.get('preferred_username', admin_user.get('email', 'unknown')) + + # Get the group + try: + group = cosmos_groups_container.read_item(item=group_id, partition_key=group_id) + except: + return jsonify({'error': 'Group not found'}), 404 + + # Check if user already exists (skip duplicate) + existing_user = False + for member in group.get('users', []): + if member.get('userId') == user_id: + existing_user = True + break + + if existing_user: + return jsonify({ + 'message': f'User {email} already exists in group', + 'skipped': True + }), 200 + + # Add user to users array + group.setdefault('users', []).append({ + 'userId': user_id, + 'email': email, + 'displayName': name + }) + + # Add to appropriate role array + if role == 'admin': + if user_id not in group.get('admins', []): + group.setdefault('admins', []).append(user_id) + elif role == 'document_manager': + if user_id not in group.get('documentManagers', []): + group.setdefault('documentManagers', []).append(user_id) + + # Update modification timestamp + group['modifiedDate'] = datetime.utcnow().isoformat() + + # Save group + cosmos_groups_container.upsert_item(group) + + # Determine the action source (single add vs bulk CSV) + source = data.get('source', 'csv') # Default to 'csv' for backward compatibility + action_type = 'add_member_directly' if source == 'single' else 'admin_add_member_csv' + + # Log to activity logs + activity_record = { + 'id': str(uuid.uuid4()), + 'activity_type': action_type, + 'timestamp': datetime.utcnow().isoformat(), + 'admin_user_id': admin_user.get('oid') or admin_user.get('sub'), + 'admin_email': admin_email, + 'group_id': group_id, + 'group_name': group.get('name', 'Unknown'), + 'member_user_id': user_id, + 'member_email': email, + 'member_name': name, + 'member_role': role, + 'source': source, + 'description': f"Admin {admin_email} added member {name} ({email}) to group {group.get('name', group_id)} as {role}" + } + cosmos_activity_logs_container.create_item(body=activity_record) + + # Log to Application Insights + log_event("[ControlCenter] Admin Add Group Member", { + "admin_user": admin_email, + "group_id": group_id, + "group_name": group.get('name'), + "member_email": email, + "member_role": role + }) + + return jsonify({ + 'message': f'Member {email} added successfully', + 'skipped': False + }), 200 + + except Exception as e: + debug_print(f"Error adding group member: {e}") + return jsonify({'error': 'Failed to add member'}), 500 + + @app.route('/api/admin/control-center/groups//activity', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_admin_get_group_activity(group_id): + """ + Get activity timeline for a specific group from activity logs + Returns document creation/deletion, member changes, status changes, and conversations + """ + try: + # Get time range filter (default: last 30 days) + days = request.args.get('days', '30') + + # Calculate date filter + cutoff_date = None + if days != 'all': + try: + days_int = int(days) + cutoff_date = (datetime.utcnow() - timedelta(days=days_int)).isoformat() + except ValueError: + pass + + # Build queries - use two separate queries to avoid nested property access issues + # Query 1: Activities with c.group.group_id (member/status changes) + # Query 2: Activities with c.workspace_context.group_id (document operations) + + time_filter = "AND c.timestamp >= @cutoff_date" if cutoff_date else "" + + # Query 1: Member and status activities (all activity types with c.group.group_id) + # Use SELECT * to get complete raw documents for modal display + query1 = f""" + SELECT * + FROM c + WHERE c.group.group_id = @group_id + {time_filter} + """ + + # Query 2: Document activities (all activity types with c.workspace_context.group_id) + # Use SELECT * to get complete raw documents for modal display + query2 = f""" + SELECT * + FROM c + WHERE c.workspace_context.group_id = @group_id + {time_filter} + """ + + # Log the queries for debugging + debug_print(f"[Group Activity] Querying for group: {group_id}, days: {days}") + debug_print(f"[Group Activity] Query 1: {query1}") + debug_print(f"[Group Activity] Query 2: {query2}") + + parameters = [ + {"name": "@group_id", "value": group_id} + ] + + if cutoff_date: + parameters.append({"name": "@cutoff_date", "value": cutoff_date}) + + debug_print(f"[Group Activity] Parameters: {parameters}") + + # Execute both queries + activities = [] + + try: + # Query 1: Member and status activities + activities1 = list(cosmos_activity_logs_container.query_items( + query=query1, + parameters=parameters, + enable_cross_partition_query=True + )) + debug_print(f"[Group Activity] Query 1 returned {len(activities1)} activities") + activities.extend(activities1) + except Exception as e: + debug_print(f"[Group Activity] Query 1 failed: {e}") + + try: + # Query 2: Document activities + activities2 = list(cosmos_activity_logs_container.query_items( + query=query2, + parameters=parameters, + enable_cross_partition_query=True + )) + debug_print(f"[Group Activity] Query 2 returned {len(activities2)} activities") + activities.extend(activities2) + except Exception as e: + debug_print(f"[Group Activity] Query 2 failed: {e}") + + # Sort combined results by timestamp descending + activities.sort(key=lambda x: x.get('timestamp', ''), reverse=True) + + # Format activities for timeline display + formatted_activities = [] + for activity in activities: + formatted = { + 'id': activity.get('id'), + 'type': activity.get('activity_type'), + 'timestamp': activity.get('timestamp'), + 'user_id': activity.get('user_id'), + 'description': activity.get('description', '') + } + + # Add type-specific details + activity_type = activity.get('activity_type') + + if activity_type == 'document_creation': + doc = activity.get('document', {}) + formatted['document'] = { + 'file_name': doc.get('file_name'), + 'file_type': doc.get('file_type'), + 'file_size_bytes': doc.get('file_size_bytes'), + 'page_count': doc.get('page_count') + } + formatted['icon'] = 'file-earmark-plus' + formatted['color'] = 'success' + + elif activity_type == 'document_deletion': + doc = activity.get('document', {}) + formatted['document'] = { + 'file_name': doc.get('file_name'), + 'file_type': doc.get('file_type') + } + formatted['icon'] = 'file-earmark-minus' + formatted['color'] = 'danger' + + elif activity_type == 'document_metadata_update': + doc = activity.get('document', {}) + formatted['document'] = { + 'file_name': doc.get('file_name') + } + formatted['icon'] = 'pencil-square' + formatted['color'] = 'info' + + elif activity_type == 'group_member_added': + added_by = activity.get('added_by', {}) + added_member = activity.get('added_member', {}) + formatted['member'] = { + 'name': added_member.get('name'), + 'email': added_member.get('email'), + 'role': added_member.get('role') + } + formatted['added_by'] = { + 'email': added_by.get('email'), + 'role': added_by.get('role') + } + formatted['icon'] = 'person-plus' + formatted['color'] = 'primary' + + elif activity_type == 'group_member_deleted': + removed_by = activity.get('removed_by', {}) + removed_member = activity.get('removed_member', {}) + formatted['member'] = { + 'name': removed_member.get('name'), + 'email': removed_member.get('email') + } + formatted['removed_by'] = { + 'email': removed_by.get('email'), + 'role': removed_by.get('role') + } + formatted['icon'] = 'person-dash' + formatted['color'] = 'warning' + + elif activity_type == 'group_status_change': + status_change = activity.get('status_change', {}) + formatted['status_change'] = { + 'from_status': status_change.get('old_status'), # Use old_status from log + 'to_status': status_change.get('new_status') # Use new_status from log + } + formatted['icon'] = 'shield-lock' + formatted['color'] = 'secondary' + + elif activity_type == 'conversation_creation': + formatted['icon'] = 'chat-dots' + formatted['color'] = 'info' + + elif activity_type == 'token_usage': + usage = activity.get('usage', {}) + formatted['token_usage'] = { + 'total_tokens': usage.get('total_tokens'), + 'prompt_tokens': usage.get('prompt_tokens'), + 'completion_tokens': usage.get('completion_tokens'), + 'model': usage.get('model'), + 'token_type': activity.get('token_type') # 'chat' or 'embedding' + } + # Add chat details if available + chat_details = activity.get('chat_details', {}) + if chat_details: + formatted['token_usage']['conversation_id'] = chat_details.get('conversation_id') + formatted['token_usage']['message_id'] = chat_details.get('message_id') + # Add embedding details if available + embedding_details = activity.get('embedding_details', {}) + if embedding_details: + formatted['token_usage']['document_id'] = embedding_details.get('document_id') + formatted['token_usage']['file_name'] = embedding_details.get('file_name') + formatted['icon'] = 'cpu' + formatted['color'] = 'info' + + else: + # Fallback for unknown activity types - still show them! + formatted['icon'] = 'circle' + formatted['color'] = 'secondary' + # Keep any additional data that might be in the activity + if activity.get('status_change'): + formatted['status_change'] = activity.get('status_change') + if activity.get('document'): + formatted['document'] = activity.get('document') + if activity.get('group'): + formatted['group'] = activity.get('group') + + formatted_activities.append(formatted) + + return jsonify({ + 'group_id': group_id, + 'activities': formatted_activities, + 'raw_activities': activities, # Include raw activities for modal display + 'count': len(formatted_activities), + 'time_range_days': days + }), 200 + + except Exception as e: + debug_print(f"Error fetching group activity: {e}") + import traceback + traceback.print_exc() + return jsonify({'error': f'Failed to fetch group activity: {str(e)}'}), 500 + + # Public Workspaces API + @app.route('/api/admin/control-center/public-workspaces', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_control_center_public_workspaces(): + """ + Get paginated list of public workspaces with activity data for control center management. + Similar to groups endpoint but for public workspaces. + """ + try: + # Parse request parameters + page = int(request.args.get('page', 1)) + per_page = min(int(request.args.get('per_page', 50)), 100) # Max 100 per page + search_term = request.args.get('search', '').strip() + status_filter = request.args.get('status_filter', 'all') + force_refresh = request.args.get('force_refresh', 'false').lower() == 'true' + export_all = request.args.get('all', 'false').lower() == 'true' # For CSV export + + # Calculate offset (only needed if not exporting all) + offset = (page - 1) * per_page if not export_all else 0 + + # Base query for public workspaces + if search_term: + # Search in workspace name and description + query = """ + SELECT * FROM c + WHERE CONTAINS(LOWER(c.name), @search_term) + OR CONTAINS(LOWER(c.description), @search_term) + ORDER BY c.name + """ + parameters = [{"name": "@search_term", "value": search_term.lower()}] + else: + # Get all workspaces + query = "SELECT * FROM c ORDER BY c.name" + parameters = [] + + # Execute query to get all matching workspaces + all_workspaces = list(cosmos_public_workspaces_container.query_items( + query=query, + parameters=parameters, + enable_cross_partition_query=True + )) + + # Apply status filter if specified + if status_filter != 'all': + # For now, we'll treat all workspaces as 'active' + # This can be enhanced later with actual status logic + if status_filter != 'active': + all_workspaces = [] + + # Calculate pagination + total_count = len(all_workspaces) + total_pages = math.ceil(total_count / per_page) if per_page > 0 else 0 + + # Get the workspaces for current page or all for export + if export_all: + workspaces_page = all_workspaces # Get all workspaces for CSV export + else: + workspaces_page = all_workspaces[offset:offset + per_page] + + # Enhance each workspace with activity data + enhanced_workspaces = [] + for workspace in workspaces_page: + try: + enhanced_workspace = enhance_public_workspace_with_activity(workspace, force_refresh=force_refresh) + enhanced_workspaces.append(enhanced_workspace) + except Exception as enhance_e: + debug_print(f"Error enhancing workspace {workspace.get('id', 'unknown')}: {enhance_e}") + # Include the original workspace if enhancement fails + enhanced_workspaces.append(workspace) + + # Return response (paginated or all for export) + if export_all: + return jsonify({ + 'success': True, + 'workspaces': enhanced_workspaces, + 'total_count': total_count, + 'filters': { + 'search': search_term, + 'status_filter': status_filter, + 'force_refresh': force_refresh + } + }) + else: + return jsonify({ + 'workspaces': enhanced_workspaces, + 'pagination': { + 'page': page, + 'per_page': per_page, + 'total_count': total_count, + 'total_pages': total_pages, + 'has_next': page < total_pages, + 'has_prev': page > 1 + }, + 'filters': { + 'search': search_term, + 'status_filter': status_filter, + 'force_refresh': force_refresh + } + }) + + except Exception as e: + debug_print(f"Error getting public workspaces for control center: {e}") + return jsonify({'error': 'Failed to retrieve public workspaces'}), 500 + + @app.route('/api/admin/control-center/public-workspaces//status', methods=['PUT']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_update_public_workspace_status(workspace_id): + """ + Update public workspace status (active, locked, upload_disabled, inactive) + Tracks who made the change and when, logs to activity_logs + """ + try: + data = request.get_json() + if not data: + return jsonify({'error': 'No data provided'}), 400 + + new_status = data.get('status') + reason = data.get('reason') # Optional reason for the status change + + if not new_status: + return jsonify({'error': 'Status is required'}), 400 + + # Validate status values + valid_statuses = ['active', 'locked', 'upload_disabled', 'inactive'] + if new_status not in valid_statuses: + return jsonify({'error': f'Invalid status. Must be one of: {", ".join(valid_statuses)}'}), 400 + + # Get the workspace + try: + workspace = cosmos_public_workspaces_container.read_item(item=workspace_id, partition_key=workspace_id) + except: + return jsonify({'error': 'Public workspace not found'}), 404 + + # Get admin user info + admin_user = session.get('user', {}) + admin_user_id = admin_user.get('oid', 'unknown') + admin_email = admin_user.get('preferred_username', 'unknown') + + # Get old status for logging + old_status = workspace.get('status', 'active') # Default to 'active' if not set + + # Only update and log if status actually changed + if old_status != new_status: + # Update workspace status + workspace['status'] = new_status + workspace['modifiedDate'] = datetime.utcnow().isoformat() + + # Add status change metadata + if 'statusHistory' not in workspace: + workspace['statusHistory'] = [] + + workspace['statusHistory'].append({ + 'old_status': old_status, + 'new_status': new_status, + 'changed_by_user_id': admin_user_id, + 'changed_by_email': admin_email, + 'changed_at': datetime.utcnow().isoformat(), + 'reason': reason + }) + + # Update in database + cosmos_public_workspaces_container.upsert_item(workspace) + + # Log to activity_logs container for audit trail + from functions_activity_logging import log_public_workspace_status_change + log_public_workspace_status_change( + workspace_id=workspace_id, + workspace_name=workspace.get('name', 'Unknown'), + old_status=old_status, + new_status=new_status, + changed_by_user_id=admin_user_id, + changed_by_email=admin_email, + reason=reason + ) + + # Log admin action (legacy logging) + log_event("[ControlCenter] Public Workspace Status Update", { + "admin_user": admin_email, + "admin_user_id": admin_user_id, + "workspace_id": workspace_id, + "workspace_name": workspace.get('name'), + "old_status": old_status, + "new_status": new_status, + "reason": reason + }) + + return jsonify({ + 'message': 'Public workspace status updated successfully', + 'old_status': old_status, + 'new_status': new_status + }), 200 + else: + return jsonify({ + 'message': 'Status unchanged', + 'status': new_status + }), 200 + + except Exception as e: + debug_print(f"Error updating public workspace status: {e}") + return jsonify({'error': 'Failed to update public workspace status'}), 500 + + @app.route('/api/admin/control-center/public-workspaces/bulk-action', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_bulk_public_workspace_action(): + """ + Perform bulk actions on multiple public workspaces. + Actions: lock, unlock, disable_uploads, enable_uploads, delete_documents + """ + try: + data = request.get_json() + if not data: + return jsonify({'error': 'No data provided'}), 400 + + workspace_ids = data.get('workspace_ids', []) + action = data.get('action') + reason = data.get('reason') # Optional reason + + if not workspace_ids or not isinstance(workspace_ids, list): + return jsonify({'error': 'workspace_ids must be a non-empty array'}), 400 + + if not action: + return jsonify({'error': 'Action is required'}), 400 + + # Validate action + valid_actions = ['lock', 'unlock', 'disable_uploads', 'enable_uploads', 'delete_documents'] + if action not in valid_actions: + return jsonify({'error': f'Invalid action. Must be one of: {", ".join(valid_actions)}'}), 400 + + # Get admin user info + admin_user = session.get('user', {}) + admin_user_id = admin_user.get('oid', 'unknown') + admin_email = admin_user.get('preferred_username', 'unknown') + + # Map actions to status values + action_to_status = { + 'lock': 'locked', + 'unlock': 'active', + 'disable_uploads': 'upload_disabled', + 'enable_uploads': 'active' + } + + successful = [] + failed = [] + + for workspace_id in workspace_ids: + try: + # Get the workspace + workspace = cosmos_public_workspaces_container.read_item(item=workspace_id, partition_key=workspace_id) + + if action == 'delete_documents': + # Delete all documents for this workspace + # Query all documents + doc_query = "SELECT c.id FROM c WHERE c.public_workspace_id = @workspace_id" + doc_params = [{"name": "@workspace_id", "value": workspace_id}] + + docs_to_delete = list(cosmos_public_documents_container.query_items( + query=doc_query, + parameters=doc_params, + enable_cross_partition_query=True + )) + + deleted_count = 0 + for doc in docs_to_delete: + try: + delete_document_chunks(doc['id']) + delete_document(doc['id']) + deleted_count += 1 + except Exception as del_e: + debug_print(f"Error deleting document {doc['id']}: {del_e}") + + successful.append({ + 'workspace_id': workspace_id, + 'workspace_name': workspace.get('name', 'Unknown'), + 'action': action, + 'documents_deleted': deleted_count + }) + + # Log the action + log_event("[ControlCenter] Bulk Public Workspace Documents Deleted", { + "admin_user": admin_email, + "admin_user_id": admin_user_id, + "workspace_id": workspace_id, + "workspace_name": workspace.get('name'), + "documents_deleted": deleted_count, + "reason": reason + }) + + else: + # Status change action + new_status = action_to_status[action] + old_status = workspace.get('status', 'active') + + if old_status != new_status: + workspace['status'] = new_status + workspace['modifiedDate'] = datetime.utcnow().isoformat() + + # Add status history + if 'statusHistory' not in workspace: + workspace['statusHistory'] = [] + + workspace['statusHistory'].append({ + 'old_status': old_status, + 'new_status': new_status, + 'changed_by_user_id': admin_user_id, + 'changed_by_email': admin_email, + 'changed_at': datetime.utcnow().isoformat(), + 'reason': reason, + 'bulk_action': True + }) + + cosmos_public_workspaces_container.upsert_item(workspace) + + # Log activity + from functions_activity_logging import log_public_workspace_status_change + log_public_workspace_status_change( + workspace_id=workspace_id, + workspace_name=workspace.get('name', 'Unknown'), + old_status=old_status, + new_status=new_status, + changed_by_user_id=admin_user_id, + changed_by_email=admin_email, + reason=f"Bulk action: {reason}" if reason else "Bulk action" + ) + + successful.append({ + 'workspace_id': workspace_id, + 'workspace_name': workspace.get('name', 'Unknown'), + 'action': action, + 'old_status': old_status, + 'new_status': new_status + }) + + except Exception as e: + failed.append({ + 'workspace_id': workspace_id, + 'error': str(e) + }) + debug_print(f"Error processing workspace {workspace_id}: {e}") + + return jsonify({ + 'message': 'Bulk action completed', + 'successful': successful, + 'failed': failed, + 'summary': { + 'total': len(workspace_ids), + 'success': len(successful), + 'failed': len(failed) + } + }), 200 + + except Exception as e: + debug_print(f"Error performing bulk public workspace action: {e}") + return jsonify({'error': 'Failed to perform bulk action'}), 500 + + @app.route('/api/admin/control-center/public-workspaces/', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_get_public_workspace_details(workspace_id): + """ + Get detailed information about a specific public workspace. + """ + try: + # Get the workspace + workspace = cosmos_public_workspaces_container.read_item( + item=workspace_id, + partition_key=workspace_id + ) + + # Enhance with activity information + enhanced_workspace = enhance_public_workspace_with_activity(workspace) + + return jsonify(enhanced_workspace), 200 + + except Exception as e: + debug_print(f"Error getting public workspace details: {e}") + return jsonify({'error': 'Failed to retrieve workspace details'}), 500 + + + @app.route('/api/admin/control-center/public-workspaces//members', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_get_public_workspace_members(workspace_id): + """ + Get all members of a specific public workspace with their roles. + Returns admins, document managers, and owner information. + """ + try: + # Get the workspace + workspace = cosmos_public_workspaces_container.read_item( + item=workspace_id, + partition_key=workspace_id + ) + + # Create members list with roles + members = [] + + # Add owner - owner is an object with userId, email, displayName + owner = workspace.get('owner') + if owner: + members.append({ + 'userId': owner.get('userId', ''), + 'email': owner.get('email', ''), + 'displayName': owner.get('displayName', owner.get('email', 'Unknown')), + 'role': 'owner' + }) + + # Add admins - admins is an array of objects with userId, email, displayName + admins = workspace.get('admins', []) + for admin in admins: + # Handle both object format and string format (for backward compatibility) + if isinstance(admin, dict): + members.append({ + 'userId': admin.get('userId', ''), + 'email': admin.get('email', ''), + 'displayName': admin.get('displayName', admin.get('email', 'Unknown')), + 'role': 'admin' + }) + else: + # Legacy format where admin is just a userId string + try: + user = cosmos_user_settings_container.read_item( + item=admin, + partition_key=admin + ) + members.append({ + 'userId': admin, + 'email': user.get('email', ''), + 'displayName': user.get('display_name', user.get('email', '')), + 'role': 'admin' + }) + except: + pass + + # Add document managers - documentManagers is an array of objects with userId, email, displayName + doc_managers = workspace.get('documentManagers', []) + for dm in doc_managers: + # Handle both object format and string format (for backward compatibility) + if isinstance(dm, dict): + members.append({ + 'userId': dm.get('userId', ''), + 'email': dm.get('email', ''), + 'displayName': dm.get('displayName', dm.get('email', 'Unknown')), + 'role': 'documentManager' + }) + else: + # Legacy format where documentManager is just a userId string + try: + user = cosmos_user_settings_container.read_item( + item=dm, + partition_key=dm + ) + members.append({ + 'userId': dm, + 'email': user.get('email', ''), + 'displayName': user.get('display_name', user.get('email', '')), + 'role': 'documentManager' + }) + except: + pass + + return jsonify({ + 'success': True, + 'members': members, + 'workspace_name': workspace.get('name', 'Unknown') + }), 200 + + except Exception as e: + debug_print(f"Error getting workspace members: {e}") + return jsonify({'error': 'Failed to retrieve workspace members'}), 500 + + + @app.route('/api/admin/control-center/public-workspaces//add-member', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_admin_add_workspace_member(workspace_id): + """ + Admin adds a member to a public workspace (used by both single add and CSV bulk upload) + """ + try: + data = request.get_json() + user_id = data.get('userId') + name = data.get('displayName') or data.get('name') + email = data.get('email') + role = data.get('role', 'user').lower() + + if not user_id or not name or not email: + return jsonify({'error': 'Missing required fields: userId, name/displayName, email'}), 400 + + # Validate role + valid_roles = ['admin', 'document_manager', 'user'] + if role not in valid_roles: + return jsonify({'error': f'Invalid role. Must be: {", ".join(valid_roles)}'}), 400 + + admin_user = session.get('user', {}) + admin_email = admin_user.get('preferred_username', admin_user.get('email', 'unknown')) + + # Get the workspace + try: + workspace = cosmos_public_workspaces_container.read_item(item=workspace_id, partition_key=workspace_id) + except: + return jsonify({'error': 'Public workspace not found'}), 404 + + # Check if user already exists + owner = workspace.get('owner', {}) + owner_id = owner.get('userId') if isinstance(owner, dict) else owner + admins = workspace.get('admins', []) + doc_managers = workspace.get('documentManagers', []) + + # Extract user IDs from arrays (handle both object and string formats) + admin_ids = [a.get('userId') if isinstance(a, dict) else a for a in admins] + doc_manager_ids = [dm.get('userId') if isinstance(dm, dict) else dm for dm in doc_managers] + + if user_id == owner_id or user_id in admin_ids or user_id in doc_manager_ids: + return jsonify({ + 'message': f'User {email} already exists in workspace', + 'skipped': True + }), 200 + + # Create full user object + user_obj = { + 'userId': user_id, + 'displayName': name, + 'email': email + } + + # Add to appropriate role array with full user object + if role == 'admin': + workspace.setdefault('admins', []).append(user_obj) + elif role == 'document_manager': + workspace.setdefault('documentManagers', []).append(user_obj) + # Note: 'user' role doesn't have a separate array in public workspaces + # They are implicit members through document access + + # Update modification timestamp + workspace['modifiedDate'] = datetime.utcnow().isoformat() + + # Save workspace + cosmos_public_workspaces_container.upsert_item(workspace) + + # Determine the action source + source = data.get('source', 'csv') + action_type = 'add_workspace_member_directly' if source == 'single' else 'admin_add_workspace_member_csv' + + # Log to activity logs + activity_record = { + 'id': str(uuid.uuid4()), + 'activity_type': activity_type, + 'timestamp': datetime.utcnow().isoformat(), + 'admin_user_id': admin_user.get('oid') or admin_user.get('sub'), + 'admin_email': admin_email, + 'workspace_id': workspace_id, + 'workspace_name': workspace.get('name', 'Unknown'), + 'member_user_id': user_id, + 'member_email': email, + 'member_name': name, + 'member_role': role, + 'source': source, + 'description': f"Admin {admin_email} added member {name} ({email}) to workspace {workspace.get('name', workspace_id)} as {role}", + 'workspace_context': { + 'public_workspace_id': workspace_id + } + } + cosmos_activity_logs_container.create_item(body=activity_record) + + # Log to Application Insights + log_event("[ControlCenter] Admin Add Workspace Member", { + "admin_user": admin_email, + "workspace_id": workspace_id, + "workspace_name": workspace.get('name'), + "member_email": email, + "member_role": role + }) + + return jsonify({ + 'message': f'Member {email} added successfully', + 'skipped': False + }), 200 + + except Exception as e: + debug_print(f"Error adding workspace member: {e}") + return jsonify({'error': 'Failed to add workspace member'}), 500 + + + @app.route('/api/admin/control-center/public-workspaces//add-member-single', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_admin_add_workspace_member_single(workspace_id): + """ + Admin adds a single member to a public workspace via the Add Member modal + """ + try: + data = request.get_json() + user_id = data.get('userId') + display_name = data.get('displayName') + email = data.get('email') + role = data.get('role', 'document_manager').lower() + + if not user_id or not display_name or not email: + return jsonify({'error': 'Missing required fields: userId, displayName, email'}), 400 + + # Validate role - workspaces only support admin and document_manager + valid_roles = ['admin', 'document_manager'] + if role not in valid_roles: + return jsonify({'error': f'Invalid role. Must be: {", ".join(valid_roles)}'}), 400 + + admin_user = session.get('user', {}) + admin_email = admin_user.get('preferred_username', admin_user.get('email', 'unknown')) + + # Get the workspace + try: + workspace = cosmos_public_workspaces_container.read_item(item=workspace_id, partition_key=workspace_id) + except: + return jsonify({'error': 'Public workspace not found'}), 404 + + # Check if user already exists + owner = workspace.get('owner', {}) + owner_id = owner.get('userId') if isinstance(owner, dict) else owner + admins = workspace.get('admins', []) + doc_managers = workspace.get('documentManagers', []) + + # Extract user IDs from arrays (handle both object and string formats) + admin_ids = [a.get('userId') if isinstance(a, dict) else a for a in admins] + doc_manager_ids = [dm.get('userId') if isinstance(dm, dict) else dm for dm in doc_managers] + + if user_id == owner_id or user_id in admin_ids or user_id in doc_manager_ids: + return jsonify({ + 'error': f'User {email} already exists in workspace' + }), 400 + + # Add to appropriate role array with full user info + user_obj = { + 'userId': user_id, + 'displayName': display_name, + 'email': email + } + + if role == 'admin': + workspace.setdefault('admins', []).append(user_obj) + elif role == 'document_manager': + workspace.setdefault('documentManagers', []).append(user_obj) + + # Update modification timestamp + workspace['modifiedDate'] = datetime.utcnow().isoformat() + + # Save workspace + cosmos_public_workspaces_container.upsert_item(workspace) + + # Log to activity logs + activity_record = { + 'id': str(uuid.uuid4()), + 'activity_type': 'add_workspace_member_directly', + 'timestamp': datetime.utcnow().isoformat(), + 'admin_user_id': admin_user.get('oid') or admin_user.get('sub'), + 'admin_email': admin_email, + 'workspace_id': workspace_id, + 'workspace_name': workspace.get('name', 'Unknown'), + 'member_user_id': user_id, + 'member_email': email, + 'member_name': display_name, + 'member_role': role, + 'source': 'single', + 'description': f"Admin {admin_email} added member {display_name} ({email}) to workspace {workspace.get('name', workspace_id)} as {role}", + 'workspace_context': { + 'public_workspace_id': workspace_id + } + } + cosmos_activity_logs_container.create_item(body=activity_record) + + # Log to Application Insights + log_event("[ControlCenter] Admin Add Workspace Member (Single)", { + "admin_user": admin_email, + "workspace_id": workspace_id, + "workspace_name": workspace.get('name'), + "member_email": email, + "member_role": role + }) + + return jsonify({ + 'message': f'Successfully added {display_name} as {role}', + 'success': True + }), 200 + + except Exception as e: + debug_print(f"Error adding workspace member: {e}") + return jsonify({'error': 'Failed to add workspace member'}), 500 + + + @app.route('/api/admin/control-center/public-workspaces//activity', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_get_public_workspace_activity(workspace_id): + """ + Get activity timeline for a specific public workspace from activity logs + Returns document creation/deletion, member changes, status changes, and conversations + """ + try: + # Get time range filter (default: last 30 days) + days = request.args.get('days', '30') + export = request.args.get('export', 'false').lower() == 'true' + + # Calculate date filter + cutoff_date = None + if days != 'all': + try: + days_int = int(days) + cutoff_date = (datetime.utcnow() - timedelta(days=days_int)).isoformat() + except ValueError: + pass + + time_filter = "AND c.timestamp >= @cutoff_date" if cutoff_date else "" + + # Query: All activities for public workspaces (no activity type filter to show everything) + # Use SELECT * to get complete raw documents for modal display + query = f""" + SELECT * + FROM c + WHERE c.workspace_context.public_workspace_id = @workspace_id + {time_filter} + ORDER BY c.timestamp DESC + """ + + # Log the query for debugging + debug_print(f"[Workspace Activity] Querying for workspace: {workspace_id}, days: {days}") + debug_print(f"[Workspace Activity] Query: {query}") + + parameters = [ + {"name": "@workspace_id", "value": workspace_id} + ] + + if cutoff_date: + parameters.append({"name": "@cutoff_date", "value": cutoff_date}) + + debug_print(f"[Workspace Activity] Parameters: {parameters}") + + # Execute query + activities = list(cosmos_activity_logs_container.query_items( + query=query, + parameters=parameters, + enable_cross_partition_query=True + )) + + debug_print(f"[Workspace Activity] Query returned {len(activities)} activities") + + # Format activities for timeline display + formatted_activities = [] + for activity in activities: + formatted = { + 'id': activity.get('id'), + 'type': activity.get('activity_type'), + 'timestamp': activity.get('timestamp'), + 'user_id': activity.get('user_id'), + 'description': activity.get('description', '') + } + + # Add type-specific details + activity_type = activity.get('activity_type') + + if activity_type == 'document_creation': + doc = activity.get('document', {}) + formatted['document'] = { + 'file_name': doc.get('file_name'), + 'file_type': doc.get('file_type'), + 'file_size_bytes': doc.get('file_size_bytes'), + 'page_count': doc.get('page_count') + } + formatted['icon'] = 'file-earmark-plus' + formatted['color'] = 'success' + + elif activity_type == 'document_deletion': + doc = activity.get('document', {}) + formatted['document'] = { + 'file_name': doc.get('file_name'), + 'file_type': doc.get('file_type') + } + formatted['icon'] = 'file-earmark-minus' + formatted['color'] = 'danger' + + elif activity_type == 'document_metadata_update': + doc = activity.get('document', {}) + formatted['document'] = { + 'file_name': doc.get('file_name') + } + formatted['icon'] = 'pencil-square' + formatted['color'] = 'info' + + elif activity_type == 'public_workspace_status_change': + status_change = activity.get('status_change', {}) + formatted['status_change'] = { + 'from_status': status_change.get('old_status'), + 'to_status': status_change.get('new_status'), + 'changed_by': activity.get('changed_by') + } + formatted['icon'] = 'shield-check' + formatted['color'] = 'warning' + + elif activity_type == 'token_usage': + usage = activity.get('usage', {}) + formatted['token_usage'] = { + 'total_tokens': usage.get('total_tokens'), + 'prompt_tokens': usage.get('prompt_tokens'), + 'completion_tokens': usage.get('completion_tokens'), + 'model': usage.get('model'), + 'token_type': activity.get('token_type') # 'chat' or 'embedding' + } + # Add chat details if available + chat_details = activity.get('chat_details', {}) + if chat_details: + formatted['token_usage']['conversation_id'] = chat_details.get('conversation_id') + formatted['token_usage']['message_id'] = chat_details.get('message_id') + # Add embedding details if available + embedding_details = activity.get('embedding_details', {}) + if embedding_details: + formatted['token_usage']['document_id'] = embedding_details.get('document_id') + formatted['token_usage']['file_name'] = embedding_details.get('file_name') + formatted['icon'] = 'cpu' + formatted['color'] = 'info' + + else: + # Fallback for unknown activity types - still show them! + formatted['icon'] = 'circle' + formatted['color'] = 'secondary' + # Keep any additional data that might be in the activity + if activity.get('status_change'): + formatted['status_change'] = activity.get('status_change') + if activity.get('document'): + formatted['document'] = activity.get('document') + if activity.get('workspace_context'): + formatted['workspace_context'] = activity.get('workspace_context') + + formatted_activities.append(formatted) + + if export: + # Return CSV for export + import io + import csv + output = io.StringIO() + writer = csv.writer(output) + writer.writerow(['Timestamp', 'Type', 'User ID', 'Description', 'Details']) + for activity in formatted_activities: + details = '' + if activity.get('document'): + doc = activity['document'] + details = f"{doc.get('file_name', '')} - {doc.get('file_type', '')}" + elif activity.get('status_change'): + sc = activity['status_change'] + details = f"{sc.get('from_status', '')} -> {sc.get('to_status', '')}" + + writer.writerow([ + activity['timestamp'], + activity['type'], + activity['user_id'], + activity['description'], + details + ]) + + csv_content = output.getvalue() + output.close() + + from flask import make_response + response = make_response(csv_content) + response.headers['Content-Type'] = 'text/csv' + response.headers['Content-Disposition'] = f'attachment; filename="workspace_{workspace_id}_activity.csv"' + return response + + return jsonify({ + 'success': True, + 'activities': formatted_activities, + 'raw_activities': activities # Include raw activities for modal display + }), 200 + + except Exception as e: + debug_print(f"Error getting workspace activity: {e}") + import traceback + traceback.print_exc() + return jsonify({'error': 'Failed to retrieve workspace activity'}), 500 + + + @app.route('/api/admin/control-center/public-workspaces//take-ownership', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_admin_take_workspace_ownership(workspace_id): + """ + Create an approval request for admin to take ownership of a public workspace. + Requires approval from workspace owner or another admin. + + Body: + reason (str): Explanation for taking ownership (required) + """ + try: + admin_user = session.get('user', {}) + admin_user_id = admin_user.get('oid') or admin_user.get('sub') + admin_email = admin_user.get('preferred_username', admin_user.get('email', 'unknown')) + admin_display_name = admin_user.get('name', admin_email) + + if not admin_user_id: + return jsonify({'error': 'Could not identify admin user'}), 400 + + # Get request body + data = request.get_json() or {} + reason = data.get('reason', '').strip() + + if not reason: + return jsonify({'error': 'Reason is required for ownership transfer'}), 400 + + # Validate workspace exists + try: + workspace = cosmos_public_workspaces_container.read_item(item=workspace_id, partition_key=workspace_id) + except: + return jsonify({'error': 'Workspace not found'}), 404 + + # Get old owner info + old_owner = workspace.get('owner', {}) + if isinstance(old_owner, dict): + old_owner_id = old_owner.get('userId') + old_owner_email = old_owner.get('email') + else: + old_owner_id = old_owner + old_owner_email = 'unknown' + + # Create approval request (use group_id parameter as partition key for workspace) + approval = create_approval_request( + request_type=TYPE_TAKE_OWNERSHIP, + group_id=workspace_id, + requester_id=admin_user_id, + requester_email=admin_email, + requester_name=admin_display_name, + reason=reason, + metadata={ + 'old_owner_id': old_owner_id, + 'old_owner_email': old_owner_email, + 'entity_type': 'workspace' + } + ) + + # Log event + log_event("[ControlCenter] Take Workspace Ownership Request Created", { + "admin_user": admin_email, + "workspace_id": workspace_id, + "workspace_name": workspace.get('name'), + "approval_id": approval['id'], + "reason": reason + }) + + return jsonify({ + 'success': True, + 'message': 'Ownership transfer request created and pending approval', + 'approval_id': approval['id'], + 'requires_approval': True, + 'status': 'pending' + }), 201 + + except Exception as e: + debug_print(f"Error creating take workspace ownership request: {e}") + import traceback + traceback.print_exc() + return jsonify({'error': str(e)}), 500 + + @app.route('/api/admin/control-center/public-workspaces//ownership', methods=['PUT']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_update_public_workspace_ownership(workspace_id): + """ + Create an approval request to transfer public workspace ownership to another member. + Requires approval from workspace owner or another admin. + + Body: + newOwnerId (str): User ID of the new owner (required) + reason (str): Explanation for ownership transfer (required) + """ + try: + data = request.get_json() + new_owner_user_id = data.get('newOwnerId') + reason = data.get('reason', '').strip() + + if not new_owner_user_id: + return jsonify({'error': 'Missing newOwnerId'}), 400 + + if not reason: + return jsonify({'error': 'Reason is required for ownership transfer'}), 400 + + admin_user = session.get('user', {}) + admin_user_id = admin_user.get('oid') or admin_user.get('sub') + admin_email = admin_user.get('preferred_username', admin_user.get('email', 'unknown')) + admin_display_name = admin_user.get('name', admin_email) + + # Get the workspace + try: + workspace = cosmos_public_workspaces_container.read_item(item=workspace_id, partition_key=workspace_id) + except: + return jsonify({'error': 'Workspace not found'}), 404 + + # Get new owner user details + try: + new_owner_user = cosmos_user_settings_container.read_item( + item=new_owner_user_id, + partition_key=new_owner_user_id + ) + new_owner_email = new_owner_user.get('email', 'unknown') + new_owner_name = new_owner_user.get('display_name', new_owner_email) + except: + return jsonify({'error': 'New owner user not found'}), 404 + + # Check if new owner is a member of the workspace + is_member = False + current_owner = workspace.get('owner', {}) + if isinstance(current_owner, dict): + if current_owner.get('userId') == new_owner_user_id: + is_member = True + elif current_owner == new_owner_user_id: + is_member = True + + # Check admins + for admin in workspace.get('admins', []): + admin_id = admin.get('userId') if isinstance(admin, dict) else admin + if admin_id == new_owner_user_id: + is_member = True + break + + # Check documentManagers + if not is_member: + for dm in workspace.get('documentManagers', []): + dm_id = dm.get('userId') if isinstance(dm, dict) else dm + if dm_id == new_owner_user_id: + is_member = True + break + + if not is_member: + return jsonify({'error': 'Selected user is not a member of this workspace'}), 400 + + # Get old owner info + old_owner_id = None + old_owner_email = None + if isinstance(current_owner, dict): + old_owner_id = current_owner.get('userId') + old_owner_email = current_owner.get('email') + else: + old_owner_id = current_owner + + # Create approval request (use group_id parameter as partition key for workspace) + approval = create_approval_request( + request_type=TYPE_TRANSFER_OWNERSHIP, + group_id=workspace_id, + requester_id=admin_user_id, + requester_email=admin_email, + requester_name=admin_display_name, + reason=reason, + metadata={ + 'new_owner_id': new_owner_user_id, + 'new_owner_email': new_owner_email, + 'new_owner_name': new_owner_name, + 'old_owner_id': old_owner_id, + 'old_owner_email': old_owner_email, + 'entity_type': 'workspace' + } + ) + + # Log event + log_event("[ControlCenter] Transfer Workspace Ownership Request Created", { + "admin_user": admin_email, + "workspace_id": workspace_id, + "workspace_name": workspace.get('name'), + "new_owner": new_owner_email, + "old_owner_id": old_owner_id, + "approval_id": approval['id'], + "reason": reason + }) + + return jsonify({ + 'message': 'Ownership transfer approval request created', + 'approval_id': approval['id'], + 'requires_approval': True + }), 201 + + except Exception as e: + debug_print(f"Error creating workspace ownership transfer request: {e}") + import traceback + traceback.print_exc() + return jsonify({'error': 'Failed to create ownership transfer request'}), 500 + + + @app.route('/api/admin/control-center/public-workspaces//documents', methods=['DELETE']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_delete_public_workspace_documents_admin(workspace_id): + """ + Create an approval request to delete all documents in a public workspace. + Requires approval from workspace owner or another admin. + + Body: + reason (str): Explanation for deleting documents (required) + """ + try: + data = request.get_json() or {} + reason = data.get('reason', '').strip() + + if not reason: + return jsonify({'error': 'Reason is required for document deletion'}), 400 + + admin_user = session.get('user', {}) + admin_user_id = admin_user.get('oid') or admin_user.get('sub') + admin_email = admin_user.get('preferred_username', admin_user.get('email', 'unknown')) + admin_display_name = admin_user.get('name', admin_email) + + # Validate workspace exists + try: + workspace = cosmos_public_workspaces_container.read_item(item=workspace_id, partition_key=workspace_id) + except: + return jsonify({'error': 'Public workspace not found'}), 404 + + # Create approval request + approval = create_approval_request( + request_type=TYPE_DELETE_DOCUMENTS, + group_id=workspace_id, # Use workspace_id as group_id for approval system + requester_id=admin_user_id, + requester_email=admin_email, + requester_name=admin_display_name, + reason=reason, + metadata={ + 'workspace_name': workspace.get('name'), + 'entity_type': 'workspace' + } + ) + + # Log event + log_event("[ControlCenter] Delete Public Workspace Documents Request Created", { + "admin_user": admin_email, + "workspace_id": workspace_id, + "workspace_name": workspace.get('name'), + "approval_id": approval['id'], + "reason": reason + }) + + return jsonify({ + 'success': True, + 'message': 'Document deletion request created and pending approval', + 'approval_id': approval['id'], + 'status': 'pending' + }), 200 + + except Exception as e: + debug_print(f"Error creating document deletion request: {e}") + return jsonify({'error': str(e)}), 500 + + + @app.route('/api/admin/control-center/public-workspaces/', methods=['DELETE']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_delete_public_workspace_admin(workspace_id): + """ + Create an approval request to delete an entire public workspace. + Requires approval from workspace owner or another admin. + + Body: + reason (str): Explanation for deleting the workspace (required) + """ + try: + data = request.get_json() or {} + reason = data.get('reason', '').strip() + + if not reason: + return jsonify({'error': 'Reason is required for workspace deletion'}), 400 + + admin_user = session.get('user', {}) + admin_user_id = admin_user.get('oid') or admin_user.get('sub') + admin_email = admin_user.get('preferred_username', admin_user.get('email', 'unknown')) + admin_display_name = admin_user.get('name', admin_email) + + # Validate workspace exists + try: + workspace = cosmos_public_workspaces_container.read_item( + item=workspace_id, + partition_key=workspace_id + ) + except: + return jsonify({'error': 'Public workspace not found'}), 404 + + # Create approval request + approval = create_approval_request( + request_type=TYPE_DELETE_GROUP, # Reuse TYPE_DELETE_GROUP for workspace deletion + group_id=workspace_id, # Use workspace_id as group_id for approval system + requester_id=admin_user_id, + requester_email=admin_email, + requester_name=admin_display_name, + reason=reason, + metadata={ + 'workspace_name': workspace.get('name'), + 'entity_type': 'workspace' + } + ) + + # Log event + log_event("[ControlCenter] Delete Public Workspace Request Created", { + "admin_user": admin_email, + "workspace_id": workspace_id, + "workspace_name": workspace.get('name'), + "approval_id": approval['id'], + "reason": reason + }) + + return jsonify({ + 'success': True, + 'message': 'Workspace deletion request created and pending approval', + 'approval_id': approval['id'], + 'status': 'pending' + }), 200 + + except Exception as e: + debug_print(f"Error creating workspace deletion request: {e}") + return jsonify({'error': str(e)}), 500 + + # Activity Trends API + @app.route('/api/admin/control-center/activity-trends', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('dashboard') + def api_get_activity_trends(): + """ + Get activity trends data for the control center dashboard. + Returns aggregated activity data from various containers. + """ + try: + # Check if custom start_date and end_date are provided + custom_start = request.args.get('start_date') + custom_end = request.args.get('end_date') + + if custom_start and custom_end: + # Use custom date range + try: + start_date = datetime.fromisoformat(custom_start).replace(hour=0, minute=0, second=0, microsecond=0) + end_date = datetime.fromisoformat(custom_end).replace(hour=23, minute=59, second=59, microsecond=999999) + days = (end_date - start_date).days + 1 + debug_print(f"🔍 [Activity Trends API] Custom date range: {start_date} to {end_date} ({days} days)") + except ValueError: + return jsonify({'error': 'Invalid date format. Use YYYY-MM-DD format.'}), 400 + else: + # Use days parameter (default behavior) + days = int(request.args.get('days', 7)) + # Set end_date to end of current day to include all of today's records + end_date = datetime.now().replace(hour=23, minute=59, second=59, microsecond=999999) + start_date = (end_date - timedelta(days=days)).replace(hour=0, minute=0, second=0, microsecond=0) + debug_print(f"🔍 [Activity Trends API] Request for {days} days: {start_date} to {end_date}") + + # Get activity data + activity_data = get_activity_trends_data(start_date, end_date) + + debug_print(f"🔍 [Activity Trends API] Returning data: {activity_data}") + + return jsonify({ + 'success': True, + 'activity_data': activity_data, + 'period': f"{days} days", + 'start_date': start_date.isoformat(), + 'end_date': end_date.isoformat() + }) + + except Exception as e: + debug_print(f"Error getting activity trends: {e}") + print(f"❌ [Activity Trends API] Error: {e}") + return jsonify({'error': 'Failed to retrieve activity trends'}), 500 + + + + @app.route('/api/admin/control-center/activity-trends/export', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('dashboard') + def api_export_activity_trends(): + """ + Export activity trends raw data as CSV file based on selected charts and date range. + Returns detailed records with user information instead of aggregated counts. + """ + try: + debug_print("🔍 [ACTIVITY TRENDS DEBUG] Starting CSV export process") + data = request.get_json() + debug_print(f"🔍 [ACTIVITY TRENDS DEBUG] Request data: {data}") # Parse request parameters + charts = data.get('charts', ['logins', 'chats', 'documents']) # Default to all charts + time_window = data.get('time_window', '30') # Default to 30 days + start_date = data.get('start_date') # For custom range + end_date = data.get('end_date') # For custom range + debug_print(f"🔍 [ACTIVITY TRENDS DEBUG] Parsed params - charts: {charts}, time_window: {time_window}, start_date: {start_date}, end_date: {end_date}") # Determine date range + debug_print("🔍 [ACTIVITY TRENDS DEBUG] Determining date range") + if time_window == 'custom' and start_date and end_date: + try: + debug_print("🔍 [ACTIVITY TRENDS DEBUG] Processing custom dates: {start_date} to {end_date}") + start_date_obj = datetime.fromisoformat(start_date.replace('Z', '+00:00') if 'Z' in start_date else start_date) + end_date_obj = datetime.fromisoformat(end_date.replace('Z', '+00:00') if 'Z' in end_date else end_date) + end_date_obj = end_date_obj.replace(hour=23, minute=59, second=59, microsecond=999999) + debug_print(f"🔍 [ACTIVITY TRENDS DEBUG] Custom date objects created: {start_date_obj} to {end_date_obj}") + except ValueError as ve: + print(f"❌ [ACTIVITY TRENDS DEBUG] Date parsing error: {ve}") + return jsonify({'error': 'Invalid date format'}), 400 + else: + # Use predefined ranges + days = int(time_window) if time_window.isdigit() else 30 + end_date_obj = datetime.now().replace(hour=23, minute=59, second=59, microsecond=999999) + start_date_obj = end_date_obj - timedelta(days=days-1) + debug_print(f"🔍 [ACTIVITY TRENDS DEBUG] Predefined range: {days} days, from {start_date_obj} to {end_date_obj}") + + # Get raw activity data using new function + debug_print("🔍 [ACTIVITY TRENDS DEBUG] Calling get_raw_activity_trends_data") + raw_data = get_raw_activity_trends_data( + start_date_obj, + end_date_obj, + charts + ) + debug_print(f"🔍 [ACTIVITY TRENDS DEBUG] Raw data retrieved: {len(raw_data) if raw_data else 0} chart types") + + # Generate CSV content with all data types + import io + import csv + output = io.StringIO() + writer = csv.writer(output) + + # Write data for each chart type + debug_print(f"🔍 [CSV DEBUG] Processing {len(charts)} chart types: {charts}") + for chart_type in charts: + debug_print(f"🔍 [CSV DEBUG] Processing chart type: {chart_type}") + if chart_type in raw_data and raw_data[chart_type]: + debug_print(f"🔍 [CSV DEBUG] Found {len(raw_data[chart_type])} records for {chart_type}") + # Add section header + writer.writerow([]) # Empty row for separation + section_header = f"=== {chart_type.upper()} DATA ===" + debug_print(f"🔍 [CSV DEBUG] Writing section header: {section_header}") + writer.writerow([section_header]) + + # Write headers and data based on chart type + if chart_type == 'logins': + debug_print(f"🔍 [CSV DEBUG] Writing login headers for {chart_type}") + writer.writerow(['Display Name', 'Email', 'User ID', 'Login Time']) + record_count = 0 + for record in raw_data[chart_type]: + record_count += 1 + if record_count <= 3: # Debug first 3 records + debug_print(f"🔍 [CSV DEBUG] Login record {record_count} structure: {list(record.keys())}") + debug_print(f"🔍 [CSV DEBUG] Login record {record_count} data: {record}") + writer.writerow([ + record.get('display_name', ''), + record.get('email', ''), + record.get('user_id', ''), + record.get('login_time', '') + ]) + debug_print(f"🔍 [CSV DEBUG] Finished writing {record_count} login records") + + elif chart_type in ['documents', 'personal_documents', 'group_documents', 'public_documents']: + # Handle all document types with same structure + debug_print(f"🔍 [CSV DEBUG] Writing document headers for {chart_type}") + writer.writerow([ + 'Display Name', 'Email', 'User ID', 'Document ID', 'Document Filename', + 'Document Title', 'Document Page Count', 'Document Size in AI Search', + 'Document Size in Storage Account', 'Upload Date', 'Document Type' + ]) + record_count = 0 + for record in raw_data[chart_type]: + record_count += 1 + if record_count <= 3: # Log first 3 records for debugging + debug_print(f"🔍 [CSV DEBUG] Writing {chart_type} record {record_count}: {record.get('filename', 'No filename')}") + writer.writerow([ + record.get('display_name', ''), + record.get('email', ''), + record.get('user_id', ''), + record.get('document_id', ''), + record.get('filename', ''), + record.get('title', ''), + record.get('page_count', ''), + record.get('ai_search_size', ''), + record.get('storage_account_size', ''), + record.get('upload_date', ''), + record.get('document_type', chart_type.replace('_documents', '').title()) + ]) + debug_print(f"🔍 [CSV DEBUG] Finished writing {record_count} records for {chart_type}") + + elif chart_type == 'chats': + debug_print(f"🔍 [CSV DEBUG] Writing chat headers for {chart_type}") + writer.writerow([ + 'Display Name', 'Email', 'User ID', 'Chat ID', 'Chat Title', + 'Number of Messages', 'Total Size (characters)', 'Created Date' + ]) + record_count = 0 + for record in raw_data[chart_type]: + record_count += 1 + if record_count <= 3: # Debug first 3 records + debug_print(f"🔍 [CSV DEBUG] Chat record {record_count} structure: {list(record.keys())}") + debug_print(f"🔍 [CSV DEBUG] Chat record {record_count} data: {record}") + writer.writerow([ + record.get('display_name', ''), + record.get('email', ''), + record.get('user_id', ''), + record.get('chat_id', ''), + record.get('chat_title', ''), + record.get('message_count', ''), + record.get('total_size', ''), + record.get('created_date', '') + ]) + debug_print(f"🔍 [CSV DEBUG] Finished writing {record_count} chat records") + + elif chart_type == 'tokens': + debug_print(f"🔍 [CSV DEBUG] Writing token usage headers for {chart_type}") + writer.writerow([ + 'Display Name', 'Email', 'User ID', 'Token Type', 'Model Name', + 'Prompt Tokens', 'Completion Tokens', 'Total Tokens', 'Timestamp' + ]) + record_count = 0 + for record in raw_data[chart_type]: + record_count += 1 + if record_count <= 3: # Debug first 3 records + debug_print(f"🔍 [CSV DEBUG] Token record {record_count} structure: {list(record.keys())}") + debug_print(f"🔍 [CSV DEBUG] Token record {record_count} data: {record}") + writer.writerow([ + record.get('display_name', ''), + record.get('email', ''), + record.get('user_id', ''), + record.get('token_type', ''), + record.get('model_name', ''), + record.get('prompt_tokens', ''), + record.get('completion_tokens', ''), + record.get('total_tokens', ''), + record.get('timestamp', '') + ]) + debug_print(f"🔍 [CSV DEBUG] Finished writing {record_count} token usage records") + else: + debug_print(f"🔍 [CSV DEBUG] No data found for {chart_type} - available keys: {list(raw_data.keys()) if raw_data else 'None'}") + + # Add final debug info + debug_print(f"🔍 [CSV DEBUG] Finished processing all chart types. Raw data summary:") + for key, value in raw_data.items(): + if isinstance(value, list): + debug_print(f"🔍 [CSV DEBUG] - {key}: {len(value)} records") + else: + debug_print(f"🔍 [CSV DEBUG] - {key}: {type(value)} - {value}") + + csv_content = output.getvalue() + debug_print(f"🔍 [CSV DEBUG] Generated CSV content length: {len(csv_content)} characters") + debug_print(f"🔍 [CSV DEBUG] CSV content preview (first 500 chars): {csv_content[:500]}") + output.close() + + # Generate filename with timestamp + timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') + filename = f"activity_trends_raw_export_{timestamp}.csv" + + # Return CSV as downloadable response + from flask import make_response + response = make_response(csv_content) + response.headers['Content-Type'] = 'text/csv' + response.headers['Content-Disposition'] = f'attachment; filename="{filename}"' + + return response + + except Exception as e: + debug_print(f"Error exporting activity trends: {e}") + return jsonify({'error': 'Failed to export data'}), 500 + + @app.route('/api/admin/control-center/activity-trends/chat', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('dashboard') + def api_chat_activity_trends(): + """ + Create a new chat conversation with activity trends data as CSV message. + """ + try: + data = request.get_json() + + # Parse request parameters + charts = data.get('charts', ['logins', 'chats', 'documents']) # Default to all charts + time_window = data.get('time_window', '30') # Default to 30 days + start_date = data.get('start_date') # For custom range + end_date = data.get('end_date') # For custom range + + # Determine date range + if time_window == 'custom' and start_date and end_date: + try: + start_date_obj = datetime.fromisoformat(start_date.replace('Z', '+00:00') if 'Z' in start_date else start_date) + end_date_obj = datetime.fromisoformat(end_date.replace('Z', '+00:00') if 'Z' in end_date else end_date) + end_date_obj = end_date_obj.replace(hour=23, minute=59, second=59, microsecond=999999) + except ValueError: + return jsonify({'error': 'Invalid date format'}), 400 + else: + # Use predefined ranges + days = int(time_window) if time_window.isdigit() else 30 + end_date_obj = datetime.now().replace(hour=23, minute=59, second=59, microsecond=999999) + start_date_obj = end_date_obj - timedelta(days=days-1) + + # Get activity data using existing function + activity_data = get_activity_trends_data( + start_date_obj.strftime('%Y-%m-%d'), + end_date_obj.strftime('%Y-%m-%d') + ) + + # Prepare CSV data + csv_rows = [] + csv_rows.append(['Date', 'Chart Type', 'Activity Count']) + + # Process each requested chart type + for chart_type in charts: + if chart_type in activity_data: + chart_data = activity_data[chart_type] + # Sort dates for consistent output + sorted_dates = sorted(chart_data.keys()) + + for date_key in sorted_dates: + count = chart_data[date_key] + chart_display_name = { + 'logins': 'Logins', + 'chats': 'Chats', + 'documents': 'Documents', + 'personal_documents': 'Personal Documents', + 'group_documents': 'Group Documents', + 'public_documents': 'Public Documents' + }.get(chart_type, chart_type.title()) + + csv_rows.append([date_key, chart_display_name, count]) + + # Generate CSV content + import io + import csv + output = io.StringIO() + writer = csv.writer(output) + writer.writerows(csv_rows) + csv_content = output.getvalue() + output.close() + + # Get current user info + user_id = session.get('user_id') + user_email = session.get('email') + user_display_name = session.get('display_name', user_email) + + if not user_id: + return jsonify({'error': 'User not authenticated'}), 401 + + # Create new conversation + conversation_id = str(uuid.uuid4()) + timestamp = datetime.now(timezone.utc).isoformat() + + # Generate descriptive title with date range + if time_window == 'custom': + date_range = f"{start_date} to {end_date}" + else: + date_range = f"Last {time_window} Days" + + charts_text = ", ".join([c.title() for c in charts]) + conversation_title = f"Activity Trends - {charts_text} ({date_range})" + + # Create conversation document + conversation_doc = { + "id": conversation_id, + "title": conversation_title, + "user_id": user_id, + "user_email": user_email, + "user_display_name": user_display_name, + "created": timestamp, + "last_updated": timestamp, + "messages": [], + "system_message": "You are analyzing activity trends data from a control center dashboard. The user has provided activity data as a CSV file. Please analyze the data and provide insights about user activity patterns, trends, and any notable observations.", + "message_count": 0, + "settings": { + "model": "gpt-4o", + "temperature": 0.7, + "max_tokens": 4000 + } + } + + # Create the initial message with CSV data (simulate file upload) + message_id = str(uuid.uuid4()) + csv_filename = f"activity_trends_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv" + + # Create message with file attachment structure + initial_message = { + "id": message_id, + "role": "user", + "content": f"Please analyze this activity trends data from our system dashboard. The data covers {date_range} and includes {charts_text} activity.", + "timestamp": timestamp, + "files": [{ + "name": csv_filename, + "type": "text/csv", + "size": len(csv_content.encode('utf-8')), + "content": csv_content, + "id": str(uuid.uuid4()) + }] + } + + conversation_doc["messages"].append(initial_message) + conversation_doc["message_count"] = 1 + + # Save conversation to database + cosmos_conversations_container.create_item(conversation_doc) + + # Log the activity + log_event("[ControlCenter] Activity Trends Chat Created", { + "conversation_id": conversation_id, + "user_id": user_id, + "charts": charts, + "time_window": time_window, + "date_range": date_range + }) + + return jsonify({ + 'success': True, + 'conversation_id': conversation_id, + 'conversation_title': conversation_title, + 'redirect_url': f'/chat/{conversation_id}' + }), 200 + + except Exception as e: + debug_print(f"Error creating activity trends chat: {e}") + return jsonify({'error': 'Failed to create chat conversation'}), 500 + + # Data Refresh API + @app.route('/api/admin/control-center/refresh', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_refresh_control_center_data(): + """ + Refresh all Control Center metrics data and update admin timestamp. + This will recalculate all user metrics and cache them in user settings. + """ + try: + debug_print("🔄 [REFRESH DEBUG] Starting Control Center data refresh...") + debug_print("Starting Control Center data refresh...") + + # Check if request has specific user_id + from flask import request + try: + request_data = request.get_json(force=True) or {} + except: + # Handle case where no JSON body is sent + request_data = {} + + specific_user_id = request_data.get('user_id') + force_refresh = request_data.get('force_refresh', False) + + debug_print(f"🔄 [REFRESH DEBUG] Request data: user_id={specific_user_id}, force_refresh={force_refresh}") + + # Get all users to refresh their metrics + debug_print("🔄 [REFRESH DEBUG] Querying all users...") + users_query = "SELECT c.id, c.email, c.display_name, c.lastUpdated, c.settings FROM c" + all_users = list(cosmos_user_settings_container.query_items( + query=users_query, + enable_cross_partition_query=True + )) + debug_print(f"🔄 [REFRESH DEBUG] Found {len(all_users)} users to process") + + refreshed_count = 0 + failed_count = 0 + + # Refresh metrics for each user + debug_print("🔄 [REFRESH DEBUG] Starting user refresh loop...") + for user in all_users: + try: + user_id = user.get('id') + debug_print(f"🔄 [REFRESH DEBUG] Processing user {user_id}") + + # Force refresh of metrics for this user + enhanced_user = enhance_user_with_activity(user, force_refresh=True) + refreshed_count += 1 + + debug_print(f"✅ [REFRESH DEBUG] Successfully refreshed user {user_id}") + debug_print(f"Refreshed metrics for user {user_id}") + except Exception as user_error: + failed_count += 1 + debug_print(f"❌ [REFRESH DEBUG] Failed to refresh user {user.get('id')}: {user_error}") + debug_print(f"❌ [REFRESH DEBUG] User error traceback:") + import traceback + debug_print(traceback.format_exc()) + debug_print(f"Failed to refresh metrics for user {user.get('id')}: {user_error}") + + debug_print(f"🔄 [REFRESH DEBUG] User refresh loop completed. Refreshed: {refreshed_count}, Failed: {failed_count}") + + # Refresh metrics for all groups + debug_print("🔄 [REFRESH DEBUG] Starting group refresh...") + groups_refreshed_count = 0 + groups_failed_count = 0 + + try: + groups_query = "SELECT * FROM c" + all_groups = list(cosmos_groups_container.query_items( + query=groups_query, + enable_cross_partition_query=True + )) + debug_print(f"🔄 [REFRESH DEBUG] Found {len(all_groups)} groups to process") + + # Refresh metrics for each group + for group in all_groups: + try: + group_id = group.get('id') + debug_print(f"🔄 [REFRESH DEBUG] Processing group {group_id}") + + # Force refresh of metrics for this group + enhanced_group = enhance_group_with_activity(group, force_refresh=True) + groups_refreshed_count += 1 + + debug_print(f"✅ [REFRESH DEBUG] Successfully refreshed group {group_id}") + debug_print(f"Refreshed metrics for group {group_id}") + except Exception as group_error: + groups_failed_count += 1 + debug_print(f"❌ [REFRESH DEBUG] Failed to refresh group {group.get('id')}: {group_error}") + debug_print(f"❌ [REFRESH DEBUG] Group error traceback:") + import traceback + debug_print(traceback.format_exc()) + debug_print(f"Failed to refresh metrics for group {group.get('id')}: {group_error}") + + except Exception as groups_error: + debug_print(f"❌ [REFRESH DEBUG] Error querying groups: {groups_error}") + debug_print(f"Error querying groups for refresh: {groups_error}") + + debug_print(f"🔄 [REFRESH DEBUG] Group refresh loop completed. Refreshed: {groups_refreshed_count}, Failed: {groups_failed_count}") + + # Update admin settings with refresh timestamp + debug_print("🔄 [REFRESH DEBUG] Updating admin settings...") + try: + from functions_settings import get_settings, update_settings + + settings = get_settings() + if settings: + settings['control_center_last_refresh'] = datetime.now(timezone.utc).isoformat() + update_success = update_settings(settings) + + if not update_success: + debug_print("⚠️ [REFRESH DEBUG] Failed to update admin settings") + debug_print("Failed to update admin settings with refresh timestamp") + else: + debug_print("✅ [REFRESH DEBUG] Admin settings updated successfully") + debug_print("Updated admin settings with refresh timestamp") + else: + debug_print("⚠️ [REFRESH DEBUG] Could not get admin settings") + + except Exception as admin_error: + debug_print(f"❌ [REFRESH DEBUG] Admin settings update failed: {admin_error}") + debug_print(f"Error updating admin settings: {admin_error}") + + debug_print(f"🎉 [REFRESH DEBUG] Refresh completed! Users - Refreshed: {refreshed_count}, Failed: {failed_count}. Groups - Refreshed: {groups_refreshed_count}, Failed: {groups_failed_count}") + debug_print(f"Control Center data refresh completed. Users: {refreshed_count} refreshed, {failed_count} failed. Groups: {groups_refreshed_count} refreshed, {groups_failed_count} failed") + + return jsonify({ + 'success': True, + 'message': 'Control Center data refreshed successfully', + 'refreshed_users': refreshed_count, + 'failed_users': failed_count, + 'refreshed_groups': groups_refreshed_count, + 'failed_groups': groups_failed_count, + 'refresh_timestamp': datetime.now(timezone.utc).isoformat() + }), 200 + + except Exception as e: + debug_print(f"💥 [REFRESH DEBUG] MAJOR ERROR in refresh endpoint: {e}") + debug_print("💥 [REFRESH DEBUG] Full traceback:") + import traceback + debug_print(traceback.format_exc()) + debug_print(f"Error refreshing Control Center data: {e}") + return jsonify({'error': 'Failed to refresh data'}), 500 + + # Get refresh status API + @app.route('/api/admin/control-center/refresh-status', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_get_refresh_status(): + """ + Get the last refresh timestamp for Control Center data. + """ + try: + from functions_settings import get_settings + + settings = get_settings() + last_refresh = settings.get('control_center_last_refresh') + + return jsonify({ + 'last_refresh': last_refresh, + 'last_refresh_formatted': None if not last_refresh else datetime.fromisoformat(last_refresh.replace('Z', '+00:00') if 'Z' in last_refresh else last_refresh).strftime('%m/%d/%Y %I:%M %p UTC') + }), 200 + + except Exception as e: + debug_print(f"Error getting refresh status: {e}") + return jsonify({'error': 'Failed to get refresh status'}), 500 + + # Activity Log Migration APIs + @app.route('/api/admin/control-center/migrate/status', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_get_migration_status(): + """ + Check if there are conversations and documents that need to be migrated to activity logs. + Returns counts of records without the 'added_to_activity_log' flag. + """ + try: + migration_status = { + 'conversations_without_logs': 0, + 'personal_documents_without_logs': 0, + 'group_documents_without_logs': 0, + 'public_documents_without_logs': 0, + 'total_documents_without_logs': 0, + 'migration_needed': False, + 'estimated_total_records': 0 + } + + # Check conversations without the flag + try: + conversations_query = """ + SELECT VALUE COUNT(1) + FROM c + WHERE NOT IS_DEFINED(c.added_to_activity_log) OR c.added_to_activity_log = false + """ + conversations_result = list(cosmos_conversations_container.query_items( + query=conversations_query, + enable_cross_partition_query=True + )) + migration_status['conversations_without_logs'] = conversations_result[0] if conversations_result else 0 + except Exception as e: + debug_print(f"Error checking conversations migration status: {e}") + + # Check personal documents without the flag + try: + personal_docs_query = """ + SELECT VALUE COUNT(1) + FROM c + WHERE NOT IS_DEFINED(c.added_to_activity_log) OR c.added_to_activity_log = false + """ + personal_docs_result = list(cosmos_user_documents_container.query_items( + query=personal_docs_query, + enable_cross_partition_query=True + )) + migration_status['personal_documents_without_logs'] = personal_docs_result[0] if personal_docs_result else 0 + except Exception as e: + debug_print(f"Error checking personal documents migration status: {e}") + + # Check group documents without the flag + try: + group_docs_query = """ + SELECT VALUE COUNT(1) + FROM c + WHERE NOT IS_DEFINED(c.added_to_activity_log) OR c.added_to_activity_log = false + """ + group_docs_result = list(cosmos_group_documents_container.query_items( + query=group_docs_query, + enable_cross_partition_query=True + )) + migration_status['group_documents_without_logs'] = group_docs_result[0] if group_docs_result else 0 + except Exception as e: + debug_print(f"Error checking group documents migration status: {e}") + + # Check public documents without the flag + try: + public_docs_query = """ + SELECT VALUE COUNT(1) + FROM c + WHERE NOT IS_DEFINED(c.added_to_activity_log) OR c.added_to_activity_log = false + """ + public_docs_result = list(cosmos_public_documents_container.query_items( + query=public_docs_query, + enable_cross_partition_query=True + )) + migration_status['public_documents_without_logs'] = public_docs_result[0] if public_docs_result else 0 + except Exception as e: + debug_print(f"Error checking public documents migration status: {e}") + + # Calculate totals + migration_status['total_documents_without_logs'] = ( + migration_status['personal_documents_without_logs'] + + migration_status['group_documents_without_logs'] + + migration_status['public_documents_without_logs'] + ) + + migration_status['estimated_total_records'] = ( + migration_status['conversations_without_logs'] + + migration_status['total_documents_without_logs'] + ) + + migration_status['migration_needed'] = migration_status['estimated_total_records'] > 0 + + return jsonify(migration_status), 200 + + except Exception as e: + debug_print(f"Error getting migration status: {e}") + return jsonify({'error': 'Failed to get migration status'}), 500 + + @app.route('/api/admin/control-center/migrate/all', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_migrate_to_activity_logs(): + """ + Migrate all conversations and documents without activity logs. + This adds activity log records and sets the 'added_to_activity_log' flag. + + WARNING: This may take a while for large datasets and could impact performance. + Recommended to run during off-peak hours. + """ + try: + from functions_activity_logging import log_conversation_creation, log_document_creation_transaction + + results = { + 'conversations_migrated': 0, + 'conversations_failed': 0, + 'personal_documents_migrated': 0, + 'personal_documents_failed': 0, + 'group_documents_migrated': 0, + 'group_documents_failed': 0, + 'public_documents_migrated': 0, + 'public_documents_failed': 0, + 'total_migrated': 0, + 'total_failed': 0, + 'errors': [] + } + + # Migrate conversations + debug_print("Starting conversation migration...") + try: + conversations_query = """ + SELECT * + FROM c + WHERE NOT IS_DEFINED(c.added_to_activity_log) OR c.added_to_activity_log = false + """ + conversations = list(cosmos_conversations_container.query_items( + query=conversations_query, + enable_cross_partition_query=True + )) + + debug_print(f"Found {len(conversations)} conversations to migrate") + + for conv in conversations: + try: + # Create activity log directly to preserve original timestamp + activity_log = { + 'id': str(uuid.uuid4()), + 'activity_type': 'conversation_creation', + 'user_id': conv.get('user_id'), + 'timestamp': conv.get('created_at') or conv.get('last_updated') or datetime.utcnow().isoformat(), + 'created_at': conv.get('created_at') or conv.get('last_updated') or datetime.utcnow().isoformat(), + 'conversation': { + 'conversation_id': conv.get('id'), + 'title': conv.get('title', 'Untitled'), + 'context': conv.get('context', []), + 'tags': conv.get('tags', []) + }, + 'workspace_type': 'personal', + 'workspace_context': {} + } + + # Save to activity logs container + cosmos_activity_logs_container.upsert_item(activity_log) + + # Add flag to conversation + conv['added_to_activity_log'] = True + cosmos_conversations_container.upsert_item(conv) + + results['conversations_migrated'] += 1 + + except Exception as conv_error: + results['conversations_failed'] += 1 + error_msg = f"Failed to migrate conversation {conv.get('id')}: {str(conv_error)}" + debug_print(error_msg) + results['errors'].append(error_msg) + + except Exception as e: + error_msg = f"Error during conversation migration: {str(e)}" + debug_print(error_msg) + results['errors'].append(error_msg) + + # Migrate personal documents + debug_print("Starting personal documents migration...") + try: + personal_docs_query = """ + SELECT * + FROM c + WHERE NOT IS_DEFINED(c.added_to_activity_log) OR c.added_to_activity_log = false + """ + personal_docs = list(cosmos_user_documents_container.query_items( + query=personal_docs_query, + enable_cross_partition_query=True + )) + + for doc in personal_docs: + try: + # Create activity log directly to preserve original timestamp + activity_log = { + 'id': str(uuid.uuid4()), + 'user_id': doc.get('user_id'), + 'activity_type': 'document_creation', + 'workspace_type': 'personal', + 'timestamp': doc.get('upload_date') or datetime.utcnow().isoformat(), + 'created_at': doc.get('upload_date') or datetime.utcnow().isoformat(), + 'document': { + 'document_id': doc.get('id'), + 'file_name': doc.get('file_name', 'Unknown'), + 'file_type': doc.get('file_type', 'unknown'), + 'file_size_bytes': doc.get('file_size', 0), + 'page_count': doc.get('number_of_pages', 0), + 'version': doc.get('version', 1) + }, + 'embedding_usage': { + 'total_tokens': doc.get('embedding_tokens', 0), + 'model_deployment_name': doc.get('embedding_model_deployment_name', 'unknown') + }, + 'document_metadata': { + 'author': doc.get('author'), + 'title': doc.get('title'), + 'subject': doc.get('subject'), + 'publication_date': doc.get('publication_date'), + 'keywords': doc.get('keywords', []), + 'abstract': doc.get('abstract') + }, + 'workspace_context': {} + } + + # Save to activity logs container + cosmos_activity_logs_container.upsert_item(activity_log) + + # Add flag to document + doc['added_to_activity_log'] = True + cosmos_user_documents_container.upsert_item(doc) + + results['personal_documents_migrated'] += 1 + + except Exception as doc_error: + results['personal_documents_failed'] += 1 + error_msg = f"Failed to migrate personal document {doc.get('id')}: {str(doc_error)}" + debug_print(error_msg) + results['errors'].append(error_msg) + + except Exception as e: + error_msg = f"Error during personal documents migration: {str(e)}" + debug_print(error_msg) + results['errors'].append(error_msg) + + # Migrate group documents + debug_print("Starting group documents migration...") + try: + group_docs_query = """ + SELECT * + FROM c + WHERE NOT IS_DEFINED(c.added_to_activity_log) OR c.added_to_activity_log = false + """ + group_docs = list(cosmos_group_documents_container.query_items( + query=group_docs_query, + enable_cross_partition_query=True + )) + + for doc in group_docs: + try: + # Create activity log directly to preserve original timestamp + activity_log = { + 'id': str(uuid.uuid4()), + 'user_id': doc.get('user_id'), + 'activity_type': 'document_creation', + 'workspace_type': 'group', + 'timestamp': doc.get('upload_date') or datetime.utcnow().isoformat(), + 'created_at': doc.get('upload_date') or datetime.utcnow().isoformat(), + 'document': { + 'document_id': doc.get('id'), + 'file_name': doc.get('file_name', 'Unknown'), + 'file_type': doc.get('file_type', 'unknown'), + 'file_size_bytes': doc.get('file_size', 0), + 'page_count': doc.get('number_of_pages', 0), + 'version': doc.get('version', 1) + }, + 'embedding_usage': { + 'total_tokens': doc.get('embedding_tokens', 0), + 'model_deployment_name': doc.get('embedding_model_deployment_name', 'unknown') + }, + 'document_metadata': { + 'author': doc.get('author'), + 'title': doc.get('title'), + 'subject': doc.get('subject'), + 'publication_date': doc.get('publication_date'), + 'keywords': doc.get('keywords', []), + 'abstract': doc.get('abstract') + }, + 'workspace_context': { + 'group_id': doc.get('group_id') + } + } + + # Save to activity logs container + cosmos_activity_logs_container.upsert_item(activity_log) + + # Add flag to document + doc['added_to_activity_log'] = True + cosmos_group_documents_container.upsert_item(doc) + + results['group_documents_migrated'] += 1 + + except Exception as doc_error: + results['group_documents_failed'] += 1 + error_msg = f"Failed to migrate group document {doc.get('id')}: {str(doc_error)}" + debug_print(error_msg) + results['errors'].append(error_msg) + + except Exception as e: + error_msg = f"Error during group documents migration: {str(e)}" + debug_print(error_msg) + results['errors'].append(error_msg) + + # Migrate public documents + debug_print("Starting public documents migration...") + try: + public_docs_query = """ + SELECT * + FROM c + WHERE NOT IS_DEFINED(c.added_to_activity_log) OR c.added_to_activity_log = false + """ + public_docs = list(cosmos_public_documents_container.query_items( + query=public_docs_query, + enable_cross_partition_query=True + )) + + for doc in public_docs: + try: + # Create activity log directly to preserve original timestamp + activity_log = { + 'id': str(uuid.uuid4()), + 'user_id': doc.get('user_id'), + 'activity_type': 'document_creation', + 'workspace_type': 'public', + 'timestamp': doc.get('upload_date') or datetime.utcnow().isoformat(), + 'created_at': doc.get('upload_date') or datetime.utcnow().isoformat(), + 'document': { + 'document_id': doc.get('id'), + 'file_name': doc.get('file_name', 'Unknown'), + 'file_type': doc.get('file_type', 'unknown'), + 'file_size_bytes': doc.get('file_size', 0), + 'page_count': doc.get('number_of_pages', 0), + 'version': doc.get('version', 1) + }, + 'embedding_usage': { + 'total_tokens': doc.get('embedding_tokens', 0), + 'model_deployment_name': doc.get('embedding_model_deployment_name', 'unknown') + }, + 'document_metadata': { + 'author': doc.get('author'), + 'title': doc.get('title'), + 'subject': doc.get('subject'), + 'publication_date': doc.get('publication_date'), + 'keywords': doc.get('keywords', []), + 'abstract': doc.get('abstract') + }, + 'workspace_context': { + 'public_workspace_id': doc.get('public_workspace_id') + } + } + + # Save to activity logs container + cosmos_activity_logs_container.upsert_item(activity_log) + + # Add flag to document + doc['added_to_activity_log'] = True + cosmos_public_documents_container.upsert_item(doc) + + results['public_documents_migrated'] += 1 + + except Exception as doc_error: + results['public_documents_failed'] += 1 + error_msg = f"Failed to migrate public document {doc.get('id')}: {str(doc_error)}" + debug_print(error_msg) + results['errors'].append(error_msg) + + except Exception as e: + error_msg = f"Error during public documents migration: {str(e)}" + debug_print(error_msg) + results['errors'].append(error_msg) + + # Calculate totals + results['total_migrated'] = ( + results['conversations_migrated'] + + results['personal_documents_migrated'] + + results['group_documents_migrated'] + + results['public_documents_migrated'] + ) + + results['total_failed'] = ( + results['conversations_failed'] + + results['personal_documents_failed'] + + results['group_documents_failed'] + + results['public_documents_failed'] + ) + + debug_print(f"Migration complete: {results['total_migrated']} migrated, {results['total_failed']} failed") + + return jsonify(results), 200 + + except Exception as e: + debug_print(f"Error during migration: {e}") + import traceback + traceback.print_exc() + return jsonify({'error': f'Migration failed: {str(e)}'}), 500 + + @app.route('/api/admin/control-center/activity-logs', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_get_activity_logs(): + """ + Get paginated and filtered activity logs from cosmos_activity_logs_container. + Supports search and filtering by activity type. + """ + try: + # Get query parameters + page = int(request.args.get('page', 1)) + per_page = int(request.args.get('per_page', 50)) + search_term = request.args.get('search', '').strip().lower() + activity_type_filter = request.args.get('activity_type_filter', 'all').strip() + + # Build query conditions + query_conditions = [] + parameters = [] + + # Filter by activity type if not 'all' + if activity_type_filter and activity_type_filter != 'all': + query_conditions.append("c.activity_type = @activity_type") + parameters.append({"name": "@activity_type", "value": activity_type_filter}) + + # Build WHERE clause (empty if no conditions) + where_clause = " WHERE " + " AND ".join(query_conditions) if query_conditions else "" + + # Get total count for pagination + count_query = f"SELECT VALUE COUNT(1) FROM c{where_clause}" + total_items_result = list(cosmos_activity_logs_container.query_items( + query=count_query, + parameters=parameters, + enable_cross_partition_query=True + )) + total_items = total_items_result[0] if total_items_result and isinstance(total_items_result[0], int) else 0 + + # Calculate pagination + offset = (page - 1) * per_page + total_pages = (total_items + per_page - 1) // per_page if total_items > 0 else 1 + + # Get paginated results + logs_query = f""" + SELECT * FROM c{where_clause} + ORDER BY c.timestamp DESC + OFFSET {offset} LIMIT {per_page} + """ + + debug_print(f"Activity logs query: {logs_query}") + debug_print(f"Query parameters: {parameters}") + + logs = list(cosmos_activity_logs_container.query_items( + query=logs_query, + parameters=parameters, + enable_cross_partition_query=True + )) + + # Apply search filter in Python (after fetching from Cosmos) + if search_term: + filtered_logs = [] + for log in logs: + # Search in various fields + searchable_text = ' '.join([ + str(log.get('activity_type', '')), + str(log.get('user_id', '')), + str(log.get('login_method', '')), + str(log.get('conversation', {}).get('title', '')), + str(log.get('document', {}).get('file_name', '')), + str(log.get('token_type', '')), + str(log.get('workspace_type', '')) + ]).lower() + + if search_term in searchable_text: + filtered_logs.append(log) + + logs = filtered_logs + # Recalculate total_items for filtered results + total_items = len(logs) + total_pages = (total_items + per_page - 1) // per_page if total_items > 0 else 1 + + # Get unique user IDs from logs + user_ids = set(log.get('user_id') for log in logs if log.get('user_id')) + + # Fetch user information for display names/emails + user_map = {} + if user_ids: + for user_id in user_ids: + try: + user_doc = cosmos_user_settings_container.read_item( + item=user_id, + partition_key=user_id + ) + user_map[user_id] = { + 'email': user_doc.get('email', ''), + 'display_name': user_doc.get('display_name', '') + } + except: + user_map[user_id] = { + 'email': '', + 'display_name': '' + } + + return jsonify({ + 'logs': logs, + 'user_map': user_map, + 'pagination': { + 'page': page, + 'per_page': per_page, + 'total_items': total_items, + 'total_pages': total_pages, + 'has_prev': page > 1, + 'has_next': page < total_pages + } + }), 200 + + except Exception as e: + debug_print(f"Error getting activity logs: {e}") + import traceback + traceback.print_exc() + return jsonify({'error': 'Failed to fetch activity logs'}), 500 + + # ============================================================================ + # APPROVAL WORKFLOW ENDPOINTS + # ============================================================================ + + @app.route('/api/admin/control-center/approvals', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_admin_get_approvals(): + """ + Get approval requests visible to the current user. + + Query Parameters: + page (int): Page number (default: 1) + page_size (int): Items per page (default: 20) + status (str): Filter by status (pending, approved, denied, all) + action_type (str): Filter by action type + search (str): Search by group name or reason + """ + try: + user = session.get('user', {}) + user_id = user.get('oid') or user.get('sub') + + # Get user roles from session + user_roles = user.get('roles', []) + + # Get query parameters + page = int(request.args.get('page', 1)) + page_size = int(request.args.get('page_size', 20)) + status_filter = request.args.get('status', 'all') + action_type_filter = request.args.get('action_type', 'all') + search_query = request.args.get('search', '') + + # Determine include_completed based on status filter + include_completed = (status_filter == 'all' or status_filter in ['approved', 'denied']) + + # Map action_type to request_type_filter + request_type_filter = None if action_type_filter == 'all' else action_type_filter + + # Fetch approvals + result = get_pending_approvals( + user_id=user_id, + user_roles=user_roles, + page=page, + per_page=page_size, + include_completed=include_completed, + request_type_filter=request_type_filter + ) + + # Add can_approve field to each approval + approvals_with_permission = [] + for approval in result.get('approvals', []): + approval_copy = dict(approval) + # User can approve if they didn't create the request OR if they're the only admin + approval_copy['can_approve'] = (approval.get('requester_id') != user_id) + approvals_with_permission.append(approval_copy) + + # Rename fields to match frontend expectations + return jsonify({ + 'success': True, + 'approvals': approvals_with_permission, + 'total_count': result.get('total', 0), + 'page': result.get('page', 1), + 'page_size': result.get('per_page', page_size), + 'total_pages': result.get('total_pages', 0) + }), 200 + + except Exception as e: + debug_print(f"Error fetching approvals: {e}") + import traceback + debug_print(traceback.format_exc()) + return jsonify({'error': 'Failed to fetch approvals', 'details': str(e)}), 500 + + @app.route('/api/admin/control-center/approvals/', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_admin_get_approval_by_id(approval_id): + """ + Get a single approval request by ID. + + Query Parameters: + group_id (str): Group ID (partition key) + """ + try: + user = session.get('user', {}) + user_id = user.get('oid') or user.get('sub') + + group_id = request.args.get('group_id') + if not group_id: + return jsonify({'error': 'group_id query parameter is required'}), 400 + + # Get the approval + approval = cosmos_approvals_container.read_item( + item=approval_id, + partition_key=group_id + ) + + # Add can_approve field + approval['can_approve'] = (approval.get('requester_id') != user_id) + + return jsonify(approval), 200 + + except Exception as e: + debug_print(f"Error fetching approval {approval_id}: {e}") + import traceback + debug_print(traceback.format_exc()) + return jsonify({'error': 'Failed to fetch approval', 'details': str(e)}), 500 + + @app.route('/api/admin/control-center/approvals//approve', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_admin_approve_request(approval_id): + """ + Approve an approval request and execute the action. + + Body: + group_id (str): Group ID (partition key) + comment (str, optional): Approval comment + """ + try: + user = session.get('user', {}) + user_id = user.get('oid') or user.get('sub') + user_email = user.get('preferred_username', user.get('email', 'unknown')) + user_name = user.get('name', user_email) + + data = request.get_json() + group_id = data.get('group_id') + comment = data.get('comment', '') + + if not group_id: + return jsonify({'error': 'group_id is required'}), 400 + + # Approve the request + approval = approve_request( + approval_id=approval_id, + group_id=group_id, + approver_id=user_id, + approver_email=user_email, + approver_name=user_name, + comment=comment + ) + + # Execute the approved action + execution_result = _execute_approved_action(approval, user_id, user_email, user_name) + + return jsonify({ + 'success': True, + 'message': 'Request approved and executed', + 'approval': approval, + 'execution_result': execution_result + }), 200 + + except Exception as e: + debug_print(f"Error approving request: {e}") + return jsonify({'error': str(e)}), 500 + + @app.route('/api/admin/control-center/approvals//deny', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('admin') + def api_admin_deny_request(approval_id): + """ + Deny an approval request. + + Body: + group_id (str): Group ID (partition key) + comment (str): Reason for denial (required) + """ + try: + user = session.get('user', {}) + user_id = user.get('oid') or user.get('sub') + user_email = user.get('preferred_username', user.get('email', 'unknown')) + user_name = user.get('name', user_email) + + data = request.get_json() + group_id = data.get('group_id') + comment = data.get('comment', '') + + if not group_id: + return jsonify({'error': 'group_id is required'}), 400 + + if not comment: + return jsonify({'error': 'comment is required for denial'}), 400 + + # Deny the request + approval = deny_request( + approval_id=approval_id, + group_id=group_id, + denier_id=user_id, + denier_email=user_email, + denier_name=user_name, + comment=comment, + auto_denied=False + ) + + return jsonify({ + 'success': True, + 'message': 'Request denied', + 'approval': approval + }), 200 + + except Exception as e: + debug_print(f"Error denying request: {e}") + return jsonify({'error': str(e)}), 500 + + # New standalone approvals API endpoints (accessible to all users with permissions) + @app.route('/api/approvals', methods=['GET']) + @login_required + def api_get_approvals(): + """ + Get approval requests visible to the current user (admins, control center admins, and group owners). + + Query Parameters: + page (int): Page number (default: 1) + page_size (int): Items per page (default: 20) + status (str): Filter by status (pending, approved, denied, all) + action_type (str): Filter by action type + search (str): Search by group name or reason + """ + try: + user = session.get('user', {}) + user_id = user.get('oid') or user.get('sub') + user_roles = user.get('roles', []) + + # Get query parameters + page = int(request.args.get('page', 1)) + page_size = int(request.args.get('page_size', 20)) + status_filter = request.args.get('status', 'pending') + action_type_filter = request.args.get('action_type', 'all') + search_query = request.args.get('search', '') + + debug_print(f"📋 [APPROVALS API] Fetching approvals - status_filter: {status_filter}, action_type: {action_type_filter}") + + # Determine include_completed based on status filter + # 'all' means show everything, specific statuses mean show only those + include_completed = (status_filter in ['all', 'approved', 'denied', 'executed']) + + debug_print(f"📋 [APPROVALS API] include_completed: {include_completed}") + + # Map action_type to request_type_filter + request_type_filter = None if action_type_filter == 'all' else action_type_filter + + # Fetch approvals + result = get_pending_approvals( + user_id=user_id, + user_roles=user_roles, + page=page, + per_page=page_size, + include_completed=include_completed, + request_type_filter=request_type_filter, + status_filter=status_filter + ) + + # Add can_approve field to each approval + approvals_with_permission = [] + for approval in result.get('approvals', []): + approval_copy = dict(approval) + # User can approve if they didn't create the request + approval_copy['can_approve'] = (approval.get('requester_id') != user_id) + approvals_with_permission.append(approval_copy) + + return jsonify({ + 'success': True, + 'approvals': approvals_with_permission, + 'total_count': result.get('total', 0), + 'page': result.get('page', 1), + 'page_size': result.get('per_page', page_size), + 'total_pages': result.get('total_pages', 0) + }), 200 + + except Exception as e: + debug_print(f"Error fetching approvals: {e}") + import traceback + debug_print(traceback.format_exc()) + return jsonify({'error': 'Failed to fetch approvals', 'details': str(e)}), 500 + + @app.route('/api/approvals/', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + def api_get_approval_by_id(approval_id): + """ + Get a single approval request by ID. + + Query Parameters: + group_id (str): Group ID (partition key) + """ + try: + user = session.get('user', {}) + user_id = user.get('oid') or user.get('sub') + + group_id = request.args.get('group_id') + if not group_id: + return jsonify({'error': 'group_id query parameter is required'}), 400 + + # Get the approval + approval = cosmos_approvals_container.read_item( + item=approval_id, + partition_key=group_id + ) + + # Add can_approve field + approval['can_approve'] = (approval.get('requester_id') != user_id) + + return jsonify(approval), 200 + + except Exception as e: + debug_print(f"Error fetching approval {approval_id}: {e}") + import traceback + debug_print(traceback.format_exc()) + return jsonify({'error': 'Failed to fetch approval', 'details': str(e)}), 500 + + @app.route('/api/approvals//approve', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + def api_approve_request(approval_id): + """ + Approve an approval request and execute the action. + + Body: + group_id (str): Group ID (partition key) + comment (str, optional): Approval comment + """ + try: + user = session.get('user', {}) + user_id = user.get('oid') or user.get('sub') + user_email = user.get('preferred_username', user.get('email', 'unknown')) + user_name = user.get('name', user_email) + + data = request.get_json() + group_id = data.get('group_id') + comment = data.get('comment', '') + + if not group_id: + return jsonify({'error': 'group_id is required'}), 400 + + # Approve the request + approval = approve_request( + approval_id=approval_id, + group_id=group_id, + approver_id=user_id, + approver_email=user_email, + approver_name=user_name, + comment=comment + ) + + # Execute the approved action + execution_result = _execute_approved_action(approval, user_id, user_email, user_name) + + return jsonify({ + 'success': True, + 'message': 'Request approved and executed', + 'approval': approval, + 'execution_result': execution_result + }), 200 + + except Exception as e: + debug_print(f"Error approving request: {e}") + return jsonify({'error': str(e)}), 500 + + @app.route('/api/approvals//deny', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + def api_deny_request(approval_id): + """ + Deny an approval request. + + Body: + group_id (str): Group ID (partition key) + comment (str): Reason for denial (required) + """ + try: + user = session.get('user', {}) + user_id = user.get('oid') or user.get('sub') + user_email = user.get('preferred_username', user.get('email', 'unknown')) + user_name = user.get('name', user_email) + + data = request.get_json() + group_id = data.get('group_id') + comment = data.get('comment', '') + + if not group_id: + return jsonify({'error': 'group_id is required'}), 400 + + if not comment: + return jsonify({'error': 'comment is required for denial'}), 400 + + # Deny the request + approval = deny_request( + approval_id=approval_id, + group_id=group_id, + denier_id=user_id, + denier_email=user_email, + denier_name=user_name, + comment=comment, + auto_denied=False + ) + + return jsonify({ + 'success': True, + 'message': 'Request denied', + 'approval': approval + }), 200 + + except Exception as e: + debug_print(f"Error denying request: {e}") + return jsonify({'error': str(e)}), 500 + + def _execute_approved_action(approval, executor_id, executor_email, executor_name): + """ + Execute the action specified in an approved request. + + Args: + approval: Approved request document + executor_id: User ID executing the action + executor_email: Email of executor + executor_name: Display name of executor + + Returns: + Result dictionary with success status and message + """ + try: + request_type = approval['request_type'] + group_id = approval['group_id'] + + if request_type == TYPE_TAKE_OWNERSHIP: + # Execute take ownership + # Check if this is for a public workspace or group + if approval.get('metadata', {}).get('entity_type') == 'workspace': + result = _execute_take_workspace_ownership(approval, executor_id, executor_email, executor_name) + else: + result = _execute_take_ownership(approval, executor_id, executor_email, executor_name) + + elif request_type == TYPE_TRANSFER_OWNERSHIP: + # Execute transfer ownership + # Check if this is for a public workspace or group + if approval.get('metadata', {}).get('entity_type') == 'workspace': + result = _execute_transfer_workspace_ownership(approval, executor_id, executor_email, executor_name) + else: + result = _execute_transfer_ownership(approval, executor_id, executor_email, executor_name) + + elif request_type == TYPE_DELETE_DOCUMENTS: + # Check if this is for a public workspace or group + if approval.get('metadata', {}).get('entity_type') == 'workspace': + result = _execute_delete_public_workspace_documents(approval, executor_id, executor_email, executor_name) + else: + result = _execute_delete_documents(approval, executor_id, executor_email, executor_name) + + elif request_type == TYPE_DELETE_GROUP: + # Check if this is for a public workspace or group + if approval.get('metadata', {}).get('entity_type') == 'workspace': + result = _execute_delete_public_workspace(approval, executor_id, executor_email, executor_name) + else: + result = _execute_delete_group(approval, executor_id, executor_email, executor_name) + + elif request_type == TYPE_DELETE_USER_DOCUMENTS: + # Execute delete user documents + result = _execute_delete_user_documents(approval, executor_id, executor_email, executor_name) + + else: + result = {'success': False, 'message': f'Unknown request type: {request_type}'} + + # Mark approval as executed + mark_approval_executed( + approval_id=approval['id'], + group_id=group_id, + success=result['success'], + result_message=result['message'] + ) + + return result + + except Exception as e: + # Mark as failed + mark_approval_executed( + approval_id=approval['id'], + group_id=approval['group_id'], + success=False, + result_message=f"Execution error: {str(e)}" + ) + raise + + def _execute_take_ownership(approval, executor_id, executor_email, executor_name): + """Execute admin take ownership action.""" + try: + group_id = approval['group_id'] + requester_id = approval['requester_id'] + requester_email = approval['requester_email'] + + # Get the group + group = cosmos_groups_container.read_item(item=group_id, partition_key=group_id) + + old_owner = group.get('owner', {}) + old_owner_id = old_owner.get('id') + old_owner_email = old_owner.get('email', 'unknown') + + # Update owner to requester (the admin who requested) + group['owner'] = { + 'id': requester_id, + 'email': requester_email, + 'displayName': approval['requester_name'] + } + + # Remove requester from special roles if present + if requester_id in group.get('admins', []): + group['admins'].remove(requester_id) + if requester_id in group.get('documentManagers', []): + group['documentManagers'].remove(requester_id) + + # Ensure requester is in users list + requester_in_users = any(m.get('userId') == requester_id for m in group.get('users', [])) + if not requester_in_users: + group.setdefault('users', []).append({ + 'userId': requester_id, + 'email': requester_email, + 'displayName': approval['requester_name'] + }) + + # Demote old owner to regular member + if old_owner_id: + old_owner_in_users = any(m.get('userId') == old_owner_id for m in group.get('users', [])) + if not old_owner_in_users: + group.setdefault('users', []).append({ + 'userId': old_owner_id, + 'email': old_owner_email, + 'displayName': old_owner.get('displayName', old_owner_email) + }) + + if old_owner_id in group.get('admins', []): + group['admins'].remove(old_owner_id) + if old_owner_id in group.get('documentManagers', []): + group['documentManagers'].remove(old_owner_id) + + group['modifiedDate'] = datetime.utcnow().isoformat() + cosmos_groups_container.upsert_item(group) + + # Log to activity logs + activity_record = { + 'id': str(uuid.uuid4()), + 'type': 'group_ownership_change', + 'activity_type': 'admin_take_ownership_approved', + 'timestamp': datetime.utcnow().isoformat(), + 'admin_user_id': requester_id, + 'admin_email': requester_email, + 'approver_id': executor_id, + 'approver_email': executor_email, + 'group_id': group_id, + 'group_name': group.get('name', 'Unknown'), + 'old_owner_id': old_owner_id, + 'old_owner_email': old_owner_email, + 'new_owner_id': requester_id, + 'new_owner_email': requester_email, + 'approval_id': approval['id'], + 'description': f"Admin {requester_email} took ownership (approved by {executor_email})" + } + cosmos_activity_logs_container.create_item(body=activity_record) + + return { + 'success': True, + 'message': f'Ownership transferred to {requester_email}' + } + + except Exception as e: + return {'success': False, 'message': f'Failed to take ownership: {str(e)}'} + + def _execute_take_workspace_ownership(approval, executor_id, executor_email, executor_name): + """Execute admin take workspace ownership action.""" + try: + workspace_id = approval.get('workspace_id') or approval.get('group_id') + requester_id = approval['requester_id'] + requester_email = approval['requester_email'] + requester_name = approval['requester_name'] + + # Get the workspace + workspace = cosmos_public_workspaces_container.read_item(item=workspace_id, partition_key=workspace_id) + + # Get old owner info + old_owner = workspace.get('owner', {}) + if isinstance(old_owner, dict): + old_owner_id = old_owner.get('userId') + old_owner_email = old_owner.get('email') + old_owner_name = old_owner.get('displayName') + else: + # Old format where owner is just a string + old_owner_id = old_owner + # Try to get user info + try: + old_owner_user = cosmos_user_settings_container.read_item( + item=old_owner_id, + partition_key=old_owner_id + ) + old_owner_email = old_owner_user.get('email', 'unknown') + old_owner_name = old_owner_user.get('display_name', old_owner_email) + except: + old_owner_email = 'unknown' + old_owner_name = 'unknown' + + # Update owner to requester (the admin who requested) with full user object + workspace['owner'] = { + 'userId': requester_id, + 'email': requester_email, + 'displayName': requester_name + } + + # Remove requester from admins/documentManagers if present + new_admins = [] + for admin in workspace.get('admins', []): + admin_id = admin.get('userId') if isinstance(admin, dict) else admin + if admin_id != requester_id: + # Ensure admin is full object + if isinstance(admin, dict): + new_admins.append(admin) + else: + # Convert string ID to object if needed + try: + admin_user = cosmos_user_settings_container.read_item( + item=admin, + partition_key=admin + ) + new_admins.append({ + 'userId': admin, + 'email': admin_user.get('email', 'unknown'), + 'displayName': admin_user.get('display_name', 'unknown') + }) + except: + pass + workspace['admins'] = new_admins + + new_dms = [] + for dm in workspace.get('documentManagers', []): + dm_id = dm.get('userId') if isinstance(dm, dict) else dm + if dm_id != requester_id: + # Ensure dm is full object + if isinstance(dm, dict): + new_dms.append(dm) + else: + # Convert string ID to object if needed + try: + dm_user = cosmos_user_settings_container.read_item( + item=dm, + partition_key=dm + ) + new_dms.append({ + 'userId': dm, + 'email': dm_user.get('email', 'unknown'), + 'displayName': dm_user.get('display_name', 'unknown') + }) + except: + pass + workspace['documentManagers'] = new_dms + + # Demote old owner to admin if not already there + if old_owner_id and old_owner_id != requester_id: + old_owner_in_admins = any( + (a.get('userId') if isinstance(a, dict) else a) == old_owner_id + for a in workspace.get('admins', []) + ) + old_owner_in_dms = any( + (dm.get('userId') if isinstance(dm, dict) else dm) == old_owner_id + for dm in workspace.get('documentManagers', []) + ) + + if not old_owner_in_admins and not old_owner_in_dms: + # Add old owner as admin + workspace.setdefault('admins', []).append({ + 'userId': old_owner_id, + 'email': old_owner_email, + 'displayName': old_owner_name + }) + + workspace['modifiedDate'] = datetime.utcnow().isoformat() + cosmos_public_workspaces_container.upsert_item(workspace) + + # Log to activity logs + activity_record = { + 'id': str(uuid.uuid4()), + 'type': 'workspace_ownership_change', + 'activity_type': 'admin_take_ownership_approved', + 'timestamp': datetime.utcnow().isoformat(), + 'requester_id': requester_id, + 'requester_email': requester_email, + 'approver_id': executor_id, + 'approver_email': executor_email, + 'workspace_id': workspace_id, + 'workspace_name': workspace.get('name', 'Unknown'), + 'old_owner_id': old_owner_id, + 'old_owner_email': old_owner_email, + 'new_owner_id': requester_id, + 'new_owner_email': requester_email, + 'approval_id': approval['id'], + 'description': f"Admin {requester_email} took ownership (approved by {executor_email})" + } + cosmos_activity_logs_container.create_item(body=activity_record) + + return { + 'success': True, + 'message': f"Ownership transferred to {requester_email}" + } + + except Exception as e: + return {'success': False, 'message': f'Failed to take workspace ownership: {str(e)}'} + + def _execute_transfer_ownership(approval, executor_id, executor_email, executor_name): + """Execute transfer ownership action.""" + try: + group_id = approval['group_id'] + new_owner_id = approval['metadata'].get('new_owner_id') + + if not new_owner_id: + return {'success': False, 'message': 'new_owner_id not found in approval metadata'} + + # Get the group + group = cosmos_groups_container.read_item(item=group_id, partition_key=group_id) + + # Find new owner in members + new_owner_member = None + for member in group.get('users', []): + if member.get('userId') == new_owner_id: + new_owner_member = member + break + + if not new_owner_member: + return {'success': False, 'message': 'New owner not found in group members'} + + old_owner = group.get('owner', {}) + old_owner_id = old_owner.get('id') + + # Update owner + group['owner'] = { + 'id': new_owner_id, + 'email': new_owner_member.get('email'), + 'displayName': new_owner_member.get('displayName') + } + + # Remove new owner from special roles + if new_owner_id in group.get('admins', []): + group['admins'].remove(new_owner_id) + if new_owner_id in group.get('documentManagers', []): + group['documentManagers'].remove(new_owner_id) + + # Demote old owner to member + if old_owner_id: + old_owner_in_users = any(m.get('userId') == old_owner_id for m in group.get('users', [])) + if not old_owner_in_users: + group.setdefault('users', []).append({ + 'userId': old_owner_id, + 'email': old_owner.get('email'), + 'displayName': old_owner.get('displayName') + }) + + if old_owner_id in group.get('admins', []): + group['admins'].remove(old_owner_id) + if old_owner_id in group.get('documentManagers', []): + group['documentManagers'].remove(old_owner_id) + + group['modifiedDate'] = datetime.utcnow().isoformat() + cosmos_groups_container.upsert_item(group) + + # Log to activity logs + activity_record = { + 'id': str(uuid.uuid4()), + 'type': 'group_ownership_change', + 'activity_type': 'transfer_ownership_approved', + 'timestamp': datetime.utcnow().isoformat(), + 'requester_id': approval['requester_id'], + 'requester_email': approval['requester_email'], + 'approver_id': executor_id, + 'approver_email': executor_email, + 'group_id': group_id, + 'group_name': group.get('name', 'Unknown'), + 'old_owner_id': old_owner_id, + 'old_owner_email': old_owner.get('email'), + 'new_owner_id': new_owner_id, + 'new_owner_email': new_owner_member.get('email'), + 'approval_id': approval['id'], + 'description': f"Ownership transferred to {new_owner_member.get('email')} (approved by {executor_email})" + } + cosmos_activity_logs_container.create_item(body=activity_record) + + return { + 'success': True, + 'message': f"Ownership transferred to {new_owner_member.get('email')}" + } + + except Exception as e: + return {'success': False, 'message': f'Failed to transfer ownership: {str(e)}'} + + def _execute_transfer_workspace_ownership(approval, executor_id, executor_email, executor_name): + """Execute transfer workspace ownership action.""" + try: + workspace_id = approval.get('workspace_id') or approval.get('group_id') + new_owner_id = approval['metadata'].get('new_owner_id') + new_owner_email = approval['metadata'].get('new_owner_email') + new_owner_name = approval['metadata'].get('new_owner_name') + + if not new_owner_id: + return {'success': False, 'message': 'new_owner_id not found in approval metadata'} + + # Get the workspace + workspace = cosmos_public_workspaces_container.read_item(item=workspace_id, partition_key=workspace_id) + + # Get old owner info + old_owner = workspace.get('owner', {}) + if isinstance(old_owner, dict): + old_owner_id = old_owner.get('userId') + old_owner_email = old_owner.get('email') + old_owner_name = old_owner.get('displayName') + else: + # Handle case where owner is just a string (old format) + old_owner_id = old_owner + # Try to get full user info + try: + old_owner_user = cosmos_user_settings_container.read_item( + item=old_owner_id, + partition_key=old_owner_id + ) + old_owner_email = old_owner_user.get('email', 'unknown') + old_owner_name = old_owner_user.get('display_name', old_owner_email) + except: + old_owner_email = 'unknown' + old_owner_name = 'unknown' + + # Update owner with full user object + workspace['owner'] = { + 'userId': new_owner_id, + 'email': new_owner_email, + 'displayName': new_owner_name + } + + # Remove new owner from admins/documentManagers if present + new_admins = [] + for admin in workspace.get('admins', []): + admin_id = admin.get('userId') if isinstance(admin, dict) else admin + if admin_id != new_owner_id: + # Ensure admin is full object + if isinstance(admin, dict): + new_admins.append(admin) + else: + # Convert string ID to object if needed + try: + admin_user = cosmos_user_settings_container.read_item( + item=admin, + partition_key=admin + ) + new_admins.append({ + 'userId': admin, + 'email': admin_user.get('email', 'unknown'), + 'displayName': admin_user.get('display_name', 'unknown') + }) + except: + pass + workspace['admins'] = new_admins + + new_dms = [] + for dm in workspace.get('documentManagers', []): + dm_id = dm.get('userId') if isinstance(dm, dict) else dm + if dm_id != new_owner_id: + # Ensure dm is full object + if isinstance(dm, dict): + new_dms.append(dm) + else: + # Convert string ID to object if needed + try: + dm_user = cosmos_user_settings_container.read_item( + item=dm, + partition_key=dm + ) + new_dms.append({ + 'userId': dm, + 'email': dm_user.get('email', 'unknown'), + 'displayName': dm_user.get('display_name', 'unknown') + }) + except: + pass + workspace['documentManagers'] = new_dms + + # Add old owner to admins if not already there + if old_owner_id and old_owner_id != new_owner_id: + old_owner_in_admins = any( + (a.get('userId') if isinstance(a, dict) else a) == old_owner_id + for a in workspace.get('admins', []) + ) + old_owner_in_dms = any( + (dm.get('userId') if isinstance(dm, dict) else dm) == old_owner_id + for dm in workspace.get('documentManagers', []) + ) + + if not old_owner_in_admins and not old_owner_in_dms: + # Add old owner as admin + workspace.setdefault('admins', []).append({ + 'userId': old_owner_id, + 'email': old_owner_email, + 'displayName': old_owner_name + }) + + workspace['modifiedDate'] = datetime.utcnow().isoformat() + cosmos_public_workspaces_container.upsert_item(workspace) + + # Log to activity logs + activity_record = { + 'id': str(uuid.uuid4()), + 'type': 'workspace_ownership_change', + 'activity_type': 'transfer_ownership_approved', + 'timestamp': datetime.utcnow().isoformat(), + 'requester_id': approval['requester_id'], + 'requester_email': approval['requester_email'], + 'approver_id': executor_id, + 'approver_email': executor_email, + 'workspace_id': workspace_id, + 'workspace_name': workspace.get('name', 'Unknown'), + 'old_owner_id': old_owner_id, + 'old_owner_email': old_owner_email, + 'new_owner_id': new_owner_id, + 'new_owner_email': new_owner_email, + 'approval_id': approval['id'], + 'description': f"Ownership transferred to {new_owner_email} (approved by {executor_email})" + } + cosmos_activity_logs_container.create_item(body=activity_record) + + return { + 'success': True, + 'message': f"Ownership transferred to {new_owner_email}" + } + + except Exception as e: + return {'success': False, 'message': f'Failed to transfer workspace ownership: {str(e)}'} + + def _execute_delete_documents(approval, executor_id, executor_email, executor_name): + """Execute delete all documents action.""" + try: + group_id = approval['group_id'] + + debug_print(f"🔍 [DELETE_GROUP_DOCS] Starting deletion for group_id: {group_id}") + + # Query all document metadata for this group + query = "SELECT * FROM c WHERE c.group_id = @group_id AND c.type = 'document_metadata'" + parameters = [{"name": "@group_id", "value": group_id}] + + debug_print(f"🔍 [DELETE_GROUP_DOCS] Query: {query}") + debug_print(f"🔍 [DELETE_GROUP_DOCS] Parameters: {parameters}") + debug_print(f"🔍 [DELETE_GROUP_DOCS] Using partition_key: {group_id}") + + # Query with partition key for better performance + documents = list(cosmos_group_documents_container.query_items( + query=query, + parameters=parameters, + partition_key=group_id + )) + + debug_print(f"📊 [DELETE_GROUP_DOCS] Found {len(documents)} documents with partition key query") + + # If no documents found with partition key, try cross-partition query + if len(documents) == 0: + debug_print(f"⚠️ [DELETE_GROUP_DOCS] No documents found with partition key, trying cross-partition query") + documents = list(cosmos_group_documents_container.query_items( + query=query, + parameters=parameters, + enable_cross_partition_query=True + )) + debug_print(f"📊 [DELETE_GROUP_DOCS] Cross-partition query found {len(documents)} documents") + + # Log sample document for debugging + if len(documents) > 0: + sample_doc = documents[0] + debug_print(f"📄 [DELETE_GROUP_DOCS] Sample document structure: id={sample_doc.get('id')}, type={sample_doc.get('type')}, group_id={sample_doc.get('group_id')}") + + deleted_count = 0 + + # Use proper deletion APIs for each document + for doc in documents: + try: + doc_id = doc['id'] + debug_print(f"🗑️ [DELETE_GROUP_DOCS] Deleting document {doc_id}") + + # Use delete_document API which handles: + # - Blob storage deletion + # - AI Search index deletion + # - Cosmos DB metadata deletion + # Note: For group documents, we don't have a user_id, so we pass None + delete_result = delete_document( + user_id=None, + document_id=doc_id, + group_id=group_id + ) + + # Check if delete_result is valid and successful + if delete_result and delete_result.get('success'): + # Delete document chunks using proper API + delete_document_chunks( + document_id=doc_id, + group_id=group_id + ) + + deleted_count += 1 + debug_print(f"✅ [DELETE_GROUP_DOCS] Successfully deleted document {doc_id}") + else: + error_msg = delete_result.get('message') if delete_result else 'delete_document returned None' + debug_print(f"❌ [DELETE_GROUP_DOCS] Failed to delete document {doc_id}: {error_msg}") + + except Exception as doc_error: + debug_print(f"❌ [DELETE_GROUP_DOCS] Error deleting document {doc.get('id')}: {doc_error}") + + # Invalidate group search cache after deletion + try: + invalidate_group_search_cache(group_id) + debug_print(f"🔄 [DELETE_GROUP_DOCS] Invalidated search cache for group {group_id}") + except Exception as cache_error: + debug_print(f"⚠️ [DELETE_GROUP_DOCS] Could not invalidate search cache: {cache_error}") + + # Log to activity logs + activity_record = { + 'id': str(uuid.uuid4()), + 'type': 'group_documents_deletion', + 'activity_type': 'delete_all_documents_approved', + 'timestamp': datetime.utcnow().isoformat(), + 'requester_id': approval['requester_id'], + 'requester_email': approval['requester_email'], + 'approver_id': executor_id, + 'approver_email': executor_email, + 'group_id': group_id, + 'group_name': approval['group_name'], + 'documents_deleted': deleted_count, + 'approval_id': approval['id'], + 'description': f"All documents deleted from group (approved by {executor_email})" + } + cosmos_activity_logs_container.create_item(body=activity_record) + + debug_print(f"[ControlCenter] Group Documents Deleted (Approved) -- group_id: {group_id}, documents_deleted: {deleted_count}") + + return { + 'success': True, + 'message': f'Deleted {deleted_count} documents' + } + + except Exception as e: + debug_print(f"[DELETE_GROUP_DOCS] Fatal error: {e}") + return {'success': False, 'message': f'Failed to delete documents: {str(e)}'} + + def _execute_delete_public_workspace_documents(approval, executor_id, executor_email, executor_name): + """Execute delete all documents in a public workspace.""" + try: + workspace_id = approval['group_id'] # workspace_id is stored as group_id + + debug_print(f"🔍 [DELETE_WORKSPACE_DOCS] Starting deletion for workspace_id: {workspace_id}") + + # Query all documents for this workspace + query = "SELECT c.id FROM c WHERE c.public_workspace_id = @workspace_id" + parameters = [{"name": "@workspace_id", "value": workspace_id}] + + debug_print(f"🔍 [DELETE_WORKSPACE_DOCS] Query: {query}") + debug_print(f"🔍 [DELETE_WORKSPACE_DOCS] Parameters: {parameters}") + + documents = list(cosmos_public_documents_container.query_items( + query=query, + parameters=parameters, + enable_cross_partition_query=True + )) + + debug_print(f"📊 [DELETE_WORKSPACE_DOCS] Found {len(documents)} documents") + + deleted_count = 0 + for doc in documents: + try: + doc_id = doc['id'] + debug_print(f"🗑️ [DELETE_WORKSPACE_DOCS] Deleting document {doc_id}") + + # Delete document chunks and metadata using proper APIs + delete_document_chunks( + document_id=doc_id, + public_workspace_id=workspace_id + ) + + delete_document( + user_id=None, + document_id=doc_id, + public_workspace_id=workspace_id + ) + + deleted_count += 1 + debug_print(f"✅ [DELETE_WORKSPACE_DOCS] Successfully deleted document {doc_id}") + + except Exception as doc_error: + debug_print(f"❌ [DELETE_WORKSPACE_DOCS] Error deleting document {doc_id}: {doc_error}") + + # Log to activity logs + activity_record = { + 'id': str(uuid.uuid4()), + 'type': 'public_workspace_documents_deletion', + 'activity_type': 'delete_all_documents_approved', + 'timestamp': datetime.utcnow().isoformat(), + 'requester_id': approval['requester_id'], + 'requester_email': approval['requester_email'], + 'approver_id': executor_id, + 'approver_email': executor_email, + 'workspace_id': workspace_id, + 'workspace_name': approval.get('metadata', {}).get('workspace_name', 'Unknown'), + 'documents_deleted': deleted_count, + 'approval_id': approval['id'], + 'description': f"All documents deleted from public workspace (approved by {executor_email})", + 'workspace_context': { + 'public_workspace_id': workspace_id + } + } + cosmos_activity_logs_container.create_item(body=activity_record) + + debug_print(f"[ControlCenter] Public Workspace Documents Deleted (Approved) -- workspace_id: {workspace_id}, documents_deleted: {deleted_count}") + + return { + 'success': True, + 'message': f'Deleted {deleted_count} documents from public workspace' + } + + except Exception as e: + debug_print(f"[DELETE_WORKSPACE_DOCS] Fatal error: {e}") + return {'success': False, 'message': f'Failed to delete workspace documents: {str(e)}'} + + def _execute_delete_public_workspace(approval, executor_id, executor_email, executor_name): + """Execute delete entire public workspace action.""" + try: + workspace_id = approval['group_id'] # workspace_id is stored as group_id + + debug_print(f"🔍 [DELETE_WORKSPACE] Starting deletion for workspace_id: {workspace_id}") + + # First delete all documents + doc_result = _execute_delete_public_workspace_documents(approval, executor_id, executor_email, executor_name) + + if not doc_result['success']: + return doc_result + + # Delete the workspace itself + try: + cosmos_public_workspaces_container.delete_item( + item=workspace_id, + partition_key=workspace_id + ) + debug_print(f"✅ [DELETE_WORKSPACE] Successfully deleted workspace {workspace_id}") + except Exception as del_e: + debug_print(f"❌ [DELETE_WORKSPACE] Error deleting workspace {workspace_id}: {del_e}") + return {'success': False, 'message': f'Failed to delete workspace: {str(del_e)}'} + + # Log to activity logs + activity_record = { + 'id': str(uuid.uuid4()), + 'type': 'public_workspace_deletion', + 'activity_type': 'delete_workspace_approved', + 'timestamp': datetime.utcnow().isoformat(), + 'requester_id': approval['requester_id'], + 'requester_email': approval['requester_email'], + 'approver_id': executor_id, + 'approver_email': executor_email, + 'workspace_id': workspace_id, + 'workspace_name': approval.get('metadata', {}).get('workspace_name', 'Unknown'), + 'approval_id': approval['id'], + 'description': f"Public workspace completely deleted (approved by {executor_email})", + 'workspace_context': { + 'public_workspace_id': workspace_id + } + } + cosmos_activity_logs_container.create_item(body=activity_record) + + debug_print(f"[ControlCenter] Public Workspace Deleted (Approved) -- workspace_id: {workspace_id}") + + return { + 'success': True, + 'message': 'Public workspace and all documents deleted successfully' + } + + except Exception as e: + debug_print(f"[DELETE_WORKSPACE] Fatal error: {e}") + return {'success': False, 'message': f'Failed to delete workspace: {str(e)}'} + + def _execute_delete_group(approval, executor_id, executor_email, executor_name): + """Execute delete entire group action.""" + try: + group_id = approval['group_id'] + + # First delete all documents + doc_result = _execute_delete_documents(approval, executor_id, executor_email, executor_name) + + # Delete group conversations (optional - could keep for audit) + try: + query = "SELECT * FROM c WHERE c.group_id = @group_id" + parameters = [{"name": "@group_id", "value": group_id}] + + conversations = list(cosmos_group_conversations_container.query_items( + query=query, + parameters=parameters, + enable_cross_partition_query=True + )) + + for conv in conversations: + cosmos_group_conversations_container.delete_item( + item=conv['id'], + partition_key=group_id + ) + except Exception as conv_error: + debug_print(f"Error deleting conversations: {conv_error}") + + # Delete group messages (optional) + try: + messages = list(cosmos_group_messages_container.query_items( + query=query, + parameters=parameters, + enable_cross_partition_query=True + )) + + for msg in messages: + cosmos_group_messages_container.delete_item( + item=msg['id'], + partition_key=group_id + ) + except Exception as msg_error: + debug_print(f"Error deleting messages: {msg_error}") + + # Finally, delete the group itself using proper API + debug_print(f"🗑️ [DELETE GROUP] Deleting group document using delete_group() API") + delete_group(group_id) + debug_print(f"✅ [DELETE GROUP] Group {group_id} successfully deleted") + + # Log to activity logs + activity_record = { + 'id': str(uuid.uuid4()), + 'type': 'group_deletion', + 'activity_type': 'delete_group_approved', + 'timestamp': datetime.utcnow().isoformat(), + 'requester_id': approval['requester_id'], + 'requester_email': approval['requester_email'], + 'approver_id': executor_id, + 'approver_email': executor_email, + 'group_id': group_id, + 'group_name': approval['group_name'], + 'approval_id': approval['id'], + 'description': f"Group completely deleted (approved by {executor_email})" + } + cosmos_activity_logs_container.create_item(body=activity_record) + + return { + 'success': True, + 'message': 'Group completely deleted' + } + + except Exception as e: + return {'success': False, 'message': f'Failed to delete group: {str(e)}'} + + def _execute_delete_user_documents(approval, executor_id, executor_email, executor_name): + """Execute delete all user documents action.""" + try: + from functions_documents import delete_document, delete_document_chunks + from utils_cache import invalidate_personal_search_cache + + user_id = approval['metadata'].get('user_id') + user_email = approval['metadata'].get('user_email', 'unknown') + user_name = approval['metadata'].get('user_name', user_email) + + if not user_id: + return {'success': False, 'message': 'User ID not found in approval metadata'} + + # Query all personal documents for this user + # Personal documents are stored in cosmos_user_documents_container with user_id as partition key + query = "SELECT * FROM c WHERE c.user_id = @user_id" + parameters = [{"name": "@user_id", "value": user_id}] + + debug_print(f"🔍 [DELETE_USER_DOCS] Querying for user_id: {user_id}") + debug_print(f"🔍 [DELETE_USER_DOCS] Query: {query}") + debug_print(f"🔍 [DELETE_USER_DOCS] Container: cosmos_user_documents_container") + + documents = list(cosmos_user_documents_container.query_items( + query=query, + parameters=parameters, + partition_key=user_id # Use partition key for efficient query + )) + + debug_print(f"📊 [DELETE_USER_DOCS] Found {len(documents)} documents with partition key query") + if len(documents) > 0: + debug_print(f"📄 [DELETE_USER_DOCS] First document sample: id={documents[0].get('id', 'no-id')}, file_name={documents[0].get('file_name', 'no-filename')}, type={documents[0].get('type', 'no-type')}") + else: + # Try a cross-partition query to see if documents exist elsewhere + debug_print(f"⚠️ [DELETE_USER_DOCS] No documents found with partition key, trying cross-partition query...") + documents = list(cosmos_user_documents_container.query_items( + query=query, + parameters=parameters, + enable_cross_partition_query=True + )) + debug_print(f"📊 [DELETE_USER_DOCS] Cross-partition query found {len(documents)} documents") + if len(documents) > 0: + sample_doc = documents[0] + debug_print(f"📄 [DELETE_USER_DOCS] Sample doc fields: {list(sample_doc.keys())}") + debug_print(f"📄 [DELETE_USER_DOCS] Sample doc: id={sample_doc.get('id')}, type={sample_doc.get('type')}, user_id={sample_doc.get('user_id')}, file_name={sample_doc.get('file_name')}") + + deleted_count = 0 + + # Use the existing delete_document function for proper cleanup + for doc in documents: + try: + document_id = doc['id'] + debug_print(f"🗑️ [DELETE_USER_DOCS] Deleting document {document_id}: {doc.get('file_name', 'unknown')}") + + # Use the proper delete_document function which handles: + # - Blob storage deletion + # - AI Search index deletion + # - Cosmos DB document deletion + delete_document(user_id, document_id) + delete_document_chunks(document_id) + + deleted_count += 1 + debug_print(f"✅ [DELETE_USER_DOCS] Successfully deleted document {document_id}") + + except Exception as doc_error: + debug_print(f"❌ [DELETE_USER_DOCS] Error deleting document {doc.get('id')}: {doc_error}") + + # Invalidate search cache for this user + try: + invalidate_personal_search_cache(user_id) + debug_print(f"🔄 [DELETE_USER_DOCS] Invalidated search cache for user {user_id}") + except Exception as cache_error: + debug_print(f"⚠️ [DELETE_USER_DOCS] Failed to invalidate search cache: {cache_error}") + + # Log to activity logs + activity_record = { + 'id': str(uuid.uuid4()), + 'type': 'user_documents_deletion', + 'activity_type': 'delete_all_user_documents_approved', + 'timestamp': datetime.utcnow().isoformat(), + 'requester_id': approval['requester_id'], + 'requester_email': approval['requester_email'], + 'approver_id': executor_id, + 'approver_email': executor_email, + 'target_user_id': user_id, + 'target_user_email': user_email, + 'target_user_name': user_name, + 'documents_deleted': deleted_count, + 'approval_id': approval['id'], + 'description': f"All documents deleted for user {user_name} ({user_email}) - approved by {executor_email}" + } + cosmos_activity_logs_container.create_item(body=activity_record) + + # Log to AppInsights + log_event("[ControlCenter] User Documents Deleted (Approved)", { + "executor": executor_email, + "user_id": user_id, + "user_email": user_email, + "documents_deleted": deleted_count, + "approval_id": approval['id'] + }) + + return { + 'success': True, + 'message': f'Deleted {deleted_count} documents for user {user_name}' + } + + except Exception as e: + debug_print(f"Error deleting user documents: {e}") + return {'success': False, 'message': f'Failed to delete user documents: {str(e)}'} + + return jsonify({'error': 'Failed to retrieve activity logs'}), 500 \ No newline at end of file diff --git a/application/single_app/route_backend_conversations.py b/application/single_app/route_backend_conversations.py index 3d06fd0c..179b7885 100644 --- a/application/single_app/route_backend_conversations.py +++ b/application/single_app/route_backend_conversations.py @@ -7,6 +7,7 @@ from flask import Response, request from functions_debug import debug_print from swagger_wrapper import swagger_route, get_auth_security +from functions_activity_logging import log_conversation_creation, log_conversation_deletion, log_conversation_archival def register_route_backend_conversations(app): @@ -26,16 +27,40 @@ def api_get_messages(): item=conversation_id, partition_key=conversation_id ) - # Query all messages and chunks in cosmos_messages_container - message_query = f"SELECT * FROM c WHERE c.conversation_id = '{conversation_id}' ORDER BY c.timestamp ASC" + # Query all messages in cosmos_messages_container + # We'll filter for active_thread in Python since Cosmos DB boolean queries can be tricky + message_query = f""" + SELECT * FROM c + WHERE c.conversation_id = '{conversation_id}' + ORDER BY c.timestamp ASC + """ + + debug_print(f"Executing query: {message_query}") + all_items = list(cosmos_messages_container.query_items( query=message_query, partition_key=conversation_id )) - debug_print(f"Query returned {len(all_items)} total items") - for i, item in enumerate(all_items): - debug_print(f"Item {i}: id={item.get('id')}, role={item.get('role')}") + debug_print(f"Query returned {len(all_items)} total items (before filtering)") + + # Filter for active_thread = True OR active_thread is not defined (backwards compatibility) + filtered_items = [] + for item in all_items: + thread_info = item.get('metadata', {}).get('thread_info', {}) + active = thread_info.get('active_thread') + debug_print(f"Evaluating item id={item.get('id')}, role={item.get('role')}, active_thread={active}, attempt={thread_info.get('thread_attempt', 'N/A')}") + + # Include if: active_thread is True, OR active_thread is not defined, OR active_thread is None + if active is True or active is None or 'active_thread' not in thread_info: + filtered_items.append(item) + debug_print(f" ✅ Including: id={item.get('id')}, role={item.get('role')}, active={active}, attempt={thread_info.get('thread_attempt', 'N/A')}") + else: + debug_print(f" ❌ Excluding: id={item.get('id')}, role={item.get('role')}, active={active}, attempt={thread_info.get('thread_attempt', 'N/A')}") + + all_items = filtered_items + debug_print(f"After filtering: {len(all_items)} items remaining") + # Process messages and reassemble chunked images messages = [] @@ -73,6 +98,12 @@ def api_get_messages(): debug_print(f"Reassembling chunked image {image_id} with {total_chunks} chunks") debug_print(f"Available chunks in chunked_images: {list(chunked_images.get(image_id, {}).keys())}") + # Preserve extracted_text and vision_analysis from main message + extracted_text = message.get('extracted_text') + vision_analysis = message.get('vision_analysis') + + debug_print(f"Image has extracted_text: {bool(extracted_text)}, vision_analysis: {bool(vision_analysis)}") + # Start with the content from the main message (chunk 0) complete_content = message.get('content', '') debug_print(f"Main message content length: {len(complete_content)} bytes") @@ -105,6 +136,13 @@ def api_get_messages(): else: # Small enough to embed directly message['content'] = complete_content + + # IMPORTANT: Preserve extracted_text and vision_analysis in the final message + # These fields are needed by the frontend to display the info drawer + if extracted_text: + message['extracted_text'] = extracted_text + if vision_analysis: + message['vision_analysis'] = vision_analysis return jsonify({'messages': messages}) except CosmosResourceNotFoundError: @@ -271,9 +309,23 @@ def create_conversation(): 'title': 'New Conversation', 'context': [], 'tags': [], - 'strict': False + 'strict': False, + 'is_pinned': False, + 'is_hidden': False } cosmos_conversations_container.upsert_item(conversation_item) + + # Log conversation creation + log_conversation_creation( + user_id=user_id, + conversation_id=conversation_id, + title='New Conversation', + workspace_type='personal' + ) + + # Mark as logged to activity logs to prevent duplicate migration + conversation_item['added_to_activity_log'] = True + cosmos_conversations_container.upsert_item(conversation_item) return jsonify({ 'conversation_id': conversation_id, @@ -354,6 +406,16 @@ def delete_conversation(conversation_id): archived_item = dict(conversation_item) archived_item["archived_at"] = datetime.utcnow().isoformat() cosmos_archived_conversations_container.upsert_item(archived_item) + + # Log conversation archival + log_conversation_archival( + user_id=conversation_item.get('user_id'), + conversation_id=conversation_id, + title=conversation_item.get('title', 'Untitled'), + workspace_type='personal', + context=conversation_item.get('context', []), + tags=conversation_item.get('tags', []) + ) message_query = f"SELECT * FROM c WHERE c.conversation_id = '{conversation_id}'" results = list(cosmos_messages_container.query_items( @@ -369,6 +431,18 @@ def delete_conversation(conversation_id): cosmos_messages_container.delete_item(doc['id'], partition_key=conversation_id) + # Log conversation deletion before actual deletion + log_conversation_deletion( + user_id=conversation_item.get('user_id'), + conversation_id=conversation_id, + title=conversation_item.get('title', 'Untitled'), + workspace_type='personal', + context=conversation_item.get('context', []), + tags=conversation_item.get('tags', []), + is_archived=archiving_enabled, + is_bulk_operation=False + ) + try: cosmos_conversations_container.delete_item( item=conversation_id, @@ -431,6 +505,16 @@ def delete_multiple_conversations(): archived_item = dict(conversation_item) archived_item["archived_at"] = datetime.utcnow().isoformat() cosmos_archived_conversations_container.upsert_item(archived_item) + + # Log conversation archival + log_conversation_archival( + user_id=user_id, + conversation_id=conversation_id, + title=conversation_item.get('title', 'Untitled'), + workspace_type='personal', + context=conversation_item.get('context', []), + tags=conversation_item.get('tags', []) + ) # Get and archive messages if enabled message_query = f"SELECT * FROM c WHERE c.conversation_id = '{conversation_id}'" @@ -447,6 +531,18 @@ def delete_multiple_conversations(): cosmos_messages_container.delete_item(message['id'], partition_key=conversation_id) + # Log conversation deletion before actual deletion + log_conversation_deletion( + user_id=user_id, + conversation_id=conversation_id, + title=conversation_item.get('title', 'Untitled'), + workspace_type='personal', + context=conversation_item.get('context', []), + tags=conversation_item.get('tags', []), + is_archived=archiving_enabled, + is_bulk_operation=True + ) + # Delete the conversation cosmos_conversations_container.delete_item( item=conversation_id, @@ -465,6 +561,206 @@ def delete_multiple_conversations(): "failed_ids": failed_ids }), 200 + @app.route('/api/conversations//pin', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def toggle_conversation_pin(conversation_id): + """ + Toggle the pinned status of a conversation. + """ + user_id = get_current_user_id() + if not user_id: + return jsonify({'error': 'User not authenticated'}), 401 + + try: + # Retrieve the conversation + conversation_item = cosmos_conversations_container.read_item( + item=conversation_id, + partition_key=conversation_id + ) + + # Ensure that the conversation belongs to the current user + if conversation_item.get('user_id') != user_id: + return jsonify({'error': 'Forbidden'}), 403 + + # Toggle the pinned status + current_pinned = conversation_item.get('is_pinned', False) + conversation_item['is_pinned'] = not current_pinned + conversation_item['last_updated'] = datetime.utcnow().isoformat() + + # Update in Cosmos DB + cosmos_conversations_container.upsert_item(conversation_item) + + return jsonify({ + 'success': True, + 'is_pinned': conversation_item['is_pinned'] + }), 200 + + except CosmosResourceNotFoundError: + return jsonify({'error': 'Conversation not found'}), 404 + except Exception as e: + print(f"Error toggling conversation pin: {e}") + return jsonify({'error': 'Failed to toggle pin status'}), 500 + + @app.route('/api/conversations//hide', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def toggle_conversation_hide(conversation_id): + """ + Toggle the hidden status of a conversation. + """ + user_id = get_current_user_id() + if not user_id: + return jsonify({'error': 'User not authenticated'}), 401 + + try: + # Retrieve the conversation + conversation_item = cosmos_conversations_container.read_item( + item=conversation_id, + partition_key=conversation_id + ) + + # Ensure that the conversation belongs to the current user + if conversation_item.get('user_id') != user_id: + return jsonify({'error': 'Forbidden'}), 403 + + # Toggle the hidden status + current_hidden = conversation_item.get('is_hidden', False) + conversation_item['is_hidden'] = not current_hidden + conversation_item['last_updated'] = datetime.utcnow().isoformat() + + # Update in Cosmos DB + cosmos_conversations_container.upsert_item(conversation_item) + + return jsonify({ + 'success': True, + 'is_hidden': conversation_item['is_hidden'] + }), 200 + + except CosmosResourceNotFoundError: + return jsonify({'error': 'Conversation not found'}), 404 + except Exception as e: + print(f"Error toggling conversation hide: {e}") + return jsonify({'error': 'Failed to toggle hide status'}), 500 + + @app.route('/api/conversations/bulk-pin', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def bulk_pin_conversations(): + """ + Pin or unpin multiple conversations at once. + """ + user_id = get_current_user_id() + if not user_id: + return jsonify({'error': 'User not authenticated'}), 401 + + data = request.get_json() + conversation_ids = data.get('conversation_ids', []) + pin_action = data.get('action', 'pin') # 'pin' or 'unpin' + + if not conversation_ids: + return jsonify({'error': 'No conversation IDs provided'}), 400 + + if pin_action not in ['pin', 'unpin']: + return jsonify({'error': 'Invalid action. Must be "pin" or "unpin"'}), 400 + + success_count = 0 + failed_ids = [] + + for conversation_id in conversation_ids: + try: + conversation_item = cosmos_conversations_container.read_item( + item=conversation_id, + partition_key=conversation_id + ) + + # Check if the conversation belongs to the current user + if conversation_item.get('user_id') != user_id: + failed_ids.append(conversation_id) + continue + + # Set pin status + conversation_item['is_pinned'] = (pin_action == 'pin') + conversation_item['last_updated'] = datetime.utcnow().isoformat() + + # Update in Cosmos DB + cosmos_conversations_container.upsert_item(conversation_item) + success_count += 1 + + except CosmosResourceNotFoundError: + failed_ids.append(conversation_id) + except Exception as e: + print(f"Error updating conversation {conversation_id}: {str(e)}") + failed_ids.append(conversation_id) + + return jsonify({ + "success": True, + "updated_count": success_count, + "failed_ids": failed_ids, + "action": pin_action + }), 200 + + @app.route('/api/conversations/bulk-hide', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def bulk_hide_conversations(): + """ + Hide or unhide multiple conversations at once. + """ + user_id = get_current_user_id() + if not user_id: + return jsonify({'error': 'User not authenticated'}), 401 + + data = request.get_json() + conversation_ids = data.get('conversation_ids', []) + hide_action = data.get('action', 'hide') # 'hide' or 'unhide' + + if not conversation_ids: + return jsonify({'error': 'No conversation IDs provided'}), 400 + + if hide_action not in ['hide', 'unhide']: + return jsonify({'error': 'Invalid action. Must be "hide" or "unhide"'}), 400 + + success_count = 0 + failed_ids = [] + + for conversation_id in conversation_ids: + try: + conversation_item = cosmos_conversations_container.read_item( + item=conversation_id, + partition_key=conversation_id + ) + + # Check if the conversation belongs to the current user + if conversation_item.get('user_id') != user_id: + failed_ids.append(conversation_id) + continue + + # Set hide status + conversation_item['is_hidden'] = (hide_action == 'hide') + conversation_item['last_updated'] = datetime.utcnow().isoformat() + + # Update in Cosmos DB + cosmos_conversations_container.upsert_item(conversation_item) + success_count += 1 + + except CosmosResourceNotFoundError: + failed_ids.append(conversation_id) + except Exception as e: + print(f"Error updating conversation {conversation_id}: {str(e)}") + failed_ids.append(conversation_id) + + return jsonify({ + "success": True, + "updated_count": success_count, + "failed_ids": failed_ids, + "action": hide_action + }), 200 + @app.route('/api/conversations//metadata', methods=['GET']) @swagger_route(security=get_auth_security()) @login_required @@ -497,11 +793,1111 @@ def get_conversation_metadata_api(conversation_id): "classification": conversation_item.get('classification', []), "context": conversation_item.get('context', []), "tags": conversation_item.get('tags', []), - "strict": conversation_item.get('strict', False) + "strict": conversation_item.get('strict', False), + "is_pinned": conversation_item.get('is_pinned', False), + "is_hidden": conversation_item.get('is_hidden', False) }), 200 except CosmosResourceNotFoundError: return jsonify({'error': 'Conversation not found'}), 404 except Exception as e: print(f"Error retrieving conversation metadata: {e}") - return jsonify({'error': 'Failed to retrieve conversation metadata'}), 500 \ No newline at end of file + return jsonify({'error': 'Failed to retrieve conversation metadata'}), 500 + + @app.route('/api/conversations/classifications', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def get_user_classifications(): + """ + Get all unique classifications from user's conversations + """ + user_id = get_current_user_id() + if not user_id: + return jsonify({'error': 'User not authenticated'}), 401 + + try: + # Query all conversations for this user + query = f"SELECT c.classification FROM c WHERE c.user_id = '{user_id}'" + items = list(cosmos_conversations_container.query_items( + query=query, + enable_cross_partition_query=True + )) + + # Extract and flatten all classifications + classifications_set = set() + for item in items: + classifications = item.get('classification', []) + if isinstance(classifications, list): + for classification in classifications: + if classification and isinstance(classification, str): + classifications_set.add(classification.strip()) + + # Sort alphabetically + classifications_list = sorted(list(classifications_set)) + + return jsonify({ + 'success': True, + 'classifications': classifications_list + }), 200 + + except Exception as e: + print(f"Error fetching classifications: {e}") + return jsonify({'error': 'Failed to fetch classifications'}), 500 + + @app.route('/api/search_conversations', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def search_conversations(): + """ + Search conversations and messages with filters and pagination + """ + user_id = get_current_user_id() + if not user_id: + return jsonify({'error': 'User not authenticated'}), 401 + + try: + data = request.get_json() + search_term = data.get('search_term', '').strip() + date_from = data.get('date_from', '') + date_to = data.get('date_to', '') + chat_types = data.get('chat_types', []) + classifications = data.get('classifications', []) + has_files = data.get('has_files', False) + has_images = data.get('has_images', False) + page = int(data.get('page', 1)) + per_page = int(data.get('per_page', 20)) + + # Validate search term + if not search_term or len(search_term) < 3: + return jsonify({ + 'success': False, + 'error': 'Search term must be at least 3 characters' + }), 400 + + # Build conversation query with filters + # Find conversations where user is a participant (supports multi-user conversations) + # Check both old schema (user_id at root) and new schema (participant tag) + query_parts = [ + f"(c.user_id = '{user_id}' OR EXISTS(SELECT VALUE t FROM t IN c.tags WHERE t.category = 'participant' AND t.user_id = '{user_id}'))" + ] + + debug_print(f"🔍 Search parameters:") + debug_print(f" user_id: {user_id}") + debug_print(f" search_term: {search_term}") + debug_print(f" date_from: {date_from}") + debug_print(f" date_to: {date_to}") + debug_print(f" chat_types: {chat_types}") + debug_print(f" classifications: {classifications}") + + if date_from: + query_parts.append(f"c.last_updated >= '{date_from}'") + if date_to: + query_parts.append(f"c.last_updated <= '{date_to}T23:59:59'") + + conversation_query = f"SELECT * FROM c WHERE {' AND '.join(query_parts)}" + debug_print(f"\n📋 Conversation query: {conversation_query}") + + conversations = list(cosmos_conversations_container.query_items( + query=conversation_query, + enable_cross_partition_query=True, + max_item_count=-1 # Get all items, no pagination limit + )) + + debug_print(f"Found {len(conversations)} conversations from query") + + # Check if target conversation is in the results + target_conv_id = "2712dbad-560d-4d2e-a354-b8f67fcf9429" + target_conv = next((c for c in conversations if c['id'] == target_conv_id), None) + if target_conv: + debug_print(f"\n🎯 Found target conversation {target_conv_id}") + debug_print(f" chat_type: {target_conv.get('chat_type')}") + debug_print(f" title: {target_conv.get('title', 'N/A')}") + else: + debug_print(f"\n❌ Target conversation {target_conv_id} NOT in query results") + + # Filter by chat types if specified + if chat_types: + before_count = len(conversations) + filtered_out = [] + filtered_in = [] + + for c in conversations: + # Default to 'personal' if chat_type is not defined (legacy conversations) + chat_type = c.get('chat_type', 'personal') + if chat_type in chat_types: + filtered_in.append(c) + else: + filtered_out.append(c) + + conversations = filtered_in + debug_print(f"After chat_type filter: {len(conversations)} (removed {before_count - len(conversations)})") + + # Show some examples of filtered out chat types + if filtered_out: + unique_types = set(c.get('chat_type', 'None/personal') for c in filtered_out[:10]) + debug_print(f" Filtered out chat_types (sample): {unique_types}") + + # Filter by classifications if specified + if classifications: + before_count = len(conversations) + conversations = [c for c in conversations if any( + cls in (c.get('classification', []) or []) for cls in classifications + )] + debug_print(f"After classification filter: {len(conversations)} (removed {before_count - len(conversations)})") + + # Search messages in each conversation + results = [] + search_lower = search_term.lower() + + debug_print(f"🔍 Starting search for term: '{search_term}'") + debug_print(f"Found {len(conversations)} conversations to search") + + # Create a set of conversation IDs for fast lookup + conversation_ids = set(c['id'] for c in conversations) + conversation_map = {c['id']: c for c in conversations} + + # Do a single cross-partition query for all matching messages + # This is much faster than querying each conversation individually + message_query = f"SELECT * FROM m WHERE CONTAINS(m.content, '{search_term}', true) AND (m.role = 'user' OR m.role = 'assistant')" + debug_print(f"\n📋 Cross-partition message query: {message_query}") + + all_matching_messages = list(cosmos_messages_container.query_items( + query=message_query, + enable_cross_partition_query=True, + max_item_count=-1 + )) + + debug_print(f"Found {len(all_matching_messages)} total messages across all conversations") + + # Group messages by conversation and filter + messages_by_conversation = {} + for msg in all_matching_messages: + conv_id = msg.get('conversation_id') + + # Only include messages from conversations we have access to + if conv_id not in conversation_ids: + continue + + # Filter out inactive threads + thread_info = msg.get('metadata', {}).get('thread_info', {}) + active = thread_info.get('active_thread') + + # Include all messages where active_thread is not explicitly False + if active is not False: + if conv_id not in messages_by_conversation: + messages_by_conversation[conv_id] = [] + messages_by_conversation[conv_id].append(msg) + + debug_print(f"After filtering: {len(messages_by_conversation)} conversations have matching messages") + + # Build results for each conversation with matches + for conv_id, matching_messages in messages_by_conversation.items(): + + # Apply file/image filters if specified + if has_files or has_images: + filtered_messages = [] + for msg in matching_messages: + metadata = msg.get('metadata', {}) + if has_files and metadata.get('uploaded_files'): + filtered_messages.append(msg) + elif has_images and metadata.get('generated_images'): + filtered_messages.append(msg) + elif not has_files and not has_images: + filtered_messages.append(msg) + matching_messages = filtered_messages + + if matching_messages: + # Get conversation details + conversation = conversation_map.get(conv_id) + if not conversation: + continue + + # Build message snippets + message_snippets = [] + for msg in matching_messages[:5]: # Limit to 5 messages per conversation + content = msg.get('content', '') + content_lower = content.lower() + + # Find match position + match_pos = content_lower.find(search_lower) + if match_pos != -1: + # Extract 50 chars before and after + start = max(0, match_pos - 50) + end = min(len(content), match_pos + len(search_term) + 50) + snippet = content[start:end] + + # Add ellipsis if truncated + if start > 0: + snippet = '...' + snippet + if end < len(content): + snippet = snippet + '...' + + message_snippets.append({ + 'message_id': msg.get('id'), + 'content_snippet': snippet, + 'timestamp': msg.get('timestamp', ''), + 'role': msg.get('role', 'unknown') + }) + + results.append({ + 'conversation': { + 'id': conversation['id'], + 'title': conversation.get('title', 'Untitled'), + 'last_updated': conversation.get('last_updated', ''), + 'classification': conversation.get('classification', []), + 'chat_type': conversation.get('chat_type', 'personal'), + 'is_pinned': conversation.get('is_pinned', False), + 'is_hidden': conversation.get('is_hidden', False) + }, + 'messages': message_snippets, + 'match_count': len(matching_messages) + }) + + # Sort by last_updated (most recent first) + results.sort(key=lambda x: x['conversation']['last_updated'], reverse=True) + + # Pagination + total_results = len(results) + total_pages = math.ceil(total_results / per_page) if total_results > 0 else 1 + start_idx = (page - 1) * per_page + end_idx = start_idx + per_page + paginated_results = results[start_idx:end_idx] + + return jsonify({ + 'success': True, + 'total_results': total_results, + 'page': page, + 'total_pages': total_pages, + 'per_page': per_page, + 'results': paginated_results + }), 200 + + except Exception as e: + print(f"Error searching conversations: {e}") + import traceback + traceback.print_exc() + return jsonify({'error': 'Failed to search conversations'}), 500 + + @app.route('/api/user-settings/search-history', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def get_search_history(): + """Get user's search history""" + user_id = get_current_user_id() + if not user_id: + return jsonify({'error': 'User not authenticated'}), 401 + + try: + history = get_user_search_history(user_id) + return jsonify({ + 'success': True, + 'history': history + }), 200 + except Exception as e: + print(f"Error retrieving search history: {e}") + return jsonify({'error': 'Failed to retrieve search history'}), 500 + + @app.route('/api/user-settings/search-history', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def save_search_to_history(): + """Save a search term to user's history""" + user_id = get_current_user_id() + if not user_id: + return jsonify({'error': 'User not authenticated'}), 401 + + try: + data = request.get_json() + search_term = data.get('search_term', '').strip() + + if not search_term: + return jsonify({'error': 'Search term is required'}), 400 + + history = add_search_to_history(user_id, search_term) + return jsonify({ + 'success': True, + 'history': history + }), 200 + except Exception as e: + print(f"Error saving search to history: {e}") + return jsonify({'error': 'Failed to save search to history'}), 500 + + @app.route('/api/user-settings/search-history', methods=['DELETE']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def clear_search_history(): + """Clear user's search history""" + user_id = get_current_user_id() + if not user_id: + return jsonify({'error': 'User not authenticated'}), 401 + + try: + success = clear_user_search_history(user_id) + if success: + return jsonify({ + 'success': True, + 'message': 'Search history cleared' + }), 200 + else: + return jsonify({'error': 'Failed to clear search history'}), 500 + except Exception as e: + print(f"Error clearing search history: {e}") + return jsonify({'error': 'Failed to clear search history'}), 500 + + @app.route('/api/message/', methods=['DELETE']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def delete_message(message_id): + """ + Delete a message or entire thread. Only the message author can delete their messages. + If archiving is enabled, messages are marked with is_deleted=true and masked. + If archiving is disabled, messages are permanently deleted. + """ + user_id = get_current_user_id() + if not user_id: + return jsonify({'error': 'User not authenticated'}), 401 + + try: + data = request.get_json() or {} + delete_thread = data.get('delete_thread', False) + + settings = get_settings() + archiving_enabled = settings.get('enable_conversation_archiving', False) + + # Find the message using cross-partition query + query = "SELECT * FROM c WHERE c.id = @message_id" + params = [{"name": "@message_id", "value": message_id}] + message_results = list(cosmos_messages_container.query_items( + query=query, + parameters=params, + enable_cross_partition_query=True + )) + + if not message_results: + return jsonify({'error': 'Message not found'}), 404 + + message_doc = message_results[0] + conversation_id = message_doc.get('conversation_id') + + # Verify ownership - only the message author can delete their message + message_user_id = message_doc.get('metadata', {}).get('user_info', {}).get('user_id') + if not message_user_id: + # Fallback: check conversation ownership for backwards compatibility + # All messages in a conversation (user, assistant, system) belong to the conversation owner + try: + conversation = cosmos_conversations_container.read_item( + item=conversation_id, + partition_key=conversation_id + ) + if conversation.get('user_id') != user_id: + return jsonify({'error': 'You can only delete messages from your own conversations'}), 403 + except: + return jsonify({'error': 'Conversation not found'}), 404 + elif message_user_id != user_id: + return jsonify({'error': 'You can only delete your own messages'}), 403 + + # Collect messages to delete + messages_to_delete = [] + + if delete_thread and message_doc.get('role') == 'user': + # Delete entire thread: user message + system message + assistant/image messages + thread_id = message_doc.get('metadata', {}).get('thread_info', {}).get('thread_id') + thread_previous_id = message_doc.get('metadata', {}).get('thread_info', {}).get('previous_thread_id') + + if thread_id: + # Query all messages in this thread exchange (user, system, assistant messages with same thread_id) + # Do NOT include subsequent threads that reference this thread_id as previous_thread_id + thread_query = f""" + SELECT * FROM c + WHERE c.conversation_id = '{conversation_id}' + AND c.metadata.thread_info.thread_id = '{thread_id}' + """ + thread_messages = list(cosmos_messages_container.query_items( + query=thread_query, + partition_key=conversation_id + )) + messages_to_delete = thread_messages + + # THREAD CHAIN REPAIR: Update subsequent threads to maintain chain integrity + # Find messages where previous_thread_id points to the thread we're deleting + subsequent_query = f""" + SELECT * FROM c + WHERE c.conversation_id = '{conversation_id}' + AND c.metadata.thread_info.previous_thread_id = '{thread_id}' + """ + subsequent_messages = list(cosmos_messages_container.query_items( + query=subsequent_query, + partition_key=conversation_id + )) + + # Update each subsequent message to skip over the deleted thread + # Point their previous_thread_id to the deleted thread's previous_thread_id + for subsequent_msg in subsequent_messages: + # Skip messages that are being deleted (they're in the same thread) + if subsequent_msg['id'] in [m['id'] for m in messages_to_delete]: + continue + + # Update previous_thread_id to maintain chain + if 'metadata' not in subsequent_msg: + subsequent_msg['metadata'] = {} + if 'thread_info' not in subsequent_msg['metadata']: + subsequent_msg['metadata']['thread_info'] = {} + + subsequent_msg['metadata']['thread_info']['previous_thread_id'] = thread_previous_id + + # Upsert the updated message + cosmos_messages_container.upsert_item(subsequent_msg) + print(f"Repaired thread chain: Message {subsequent_msg['id']} now points to thread {thread_previous_id}") + else: + messages_to_delete = [message_doc] + else: + # Delete only the specified message + messages_to_delete = [message_doc] + + # THREAD ATTEMPT PROMOTION: If deleting an active thread attempt, promote next attempt + if messages_to_delete: + first_msg = messages_to_delete[0] + thread_id = first_msg.get('metadata', {}).get('thread_info', {}).get('thread_id') + is_active = first_msg.get('metadata', {}).get('thread_info', {}).get('active_thread', True) + + if thread_id and is_active: + # Find all other attempts for this thread_id + other_attempts_query = f""" + SELECT * FROM c + WHERE c.conversation_id = '{conversation_id}' + AND c.metadata.thread_info.thread_id = '{thread_id}' + AND c.id NOT IN ({','.join([f"'{m['id']}'" for m in messages_to_delete])}) + AND c.role = 'user' + """ + other_attempts = list(cosmos_messages_container.query_items( + query=other_attempts_query, + partition_key=conversation_id + )) + + # If there are other attempts, promote the next one (lowest thread_attempt) + if other_attempts: + # Sort by thread_attempt to find the next one + other_attempts.sort(key=lambda m: m.get('metadata', {}).get('thread_info', {}).get('thread_attempt', 0)) + next_attempt_number = other_attempts[0].get('metadata', {}).get('thread_info', {}).get('thread_attempt', 0) + + # Activate all messages with this thread_attempt + activate_query = f""" + SELECT * FROM c + WHERE c.conversation_id = '{conversation_id}' + AND c.metadata.thread_info.thread_id = '{thread_id}' + AND c.metadata.thread_info.thread_attempt = {next_attempt_number} + """ + messages_to_activate = list(cosmos_messages_container.query_items( + query=activate_query, + partition_key=conversation_id + )) + + for msg_to_activate in messages_to_activate: + if 'metadata' not in msg_to_activate: + msg_to_activate['metadata'] = {} + if 'thread_info' not in msg_to_activate['metadata']: + msg_to_activate['metadata']['thread_info'] = {} + msg_to_activate['metadata']['thread_info']['active_thread'] = True + cosmos_messages_container.upsert_item(msg_to_activate) + + print(f"Promoted thread_attempt {next_attempt_number} to active after deleting active thread {thread_id}") + + deleted_message_ids = [] + + for msg in messages_to_delete: + msg_id = msg['id'] + + if archiving_enabled: + # Mark as deleted and mask the message + if 'metadata' not in msg: + msg['metadata'] = {} + + msg['metadata']['is_deleted'] = True + msg['metadata']['deleted_by_user_id'] = user_id + msg['metadata']['deleted_timestamp'] = datetime.utcnow().isoformat() + msg['metadata']['masked'] = True + msg['metadata']['masked_by_user_id'] = user_id + msg['metadata']['masked_timestamp'] = datetime.utcnow().isoformat() + + # Archive the message + archived_msg = dict(msg) + archived_msg['archived_at'] = datetime.utcnow().isoformat() + cosmos_archived_messages_container.upsert_item(archived_msg) + + # Update the message in the main container (for conversation history exclusion) + cosmos_messages_container.upsert_item(msg) + else: + # Permanently delete the message + cosmos_messages_container.delete_item(msg_id, partition_key=conversation_id) + + deleted_message_ids.append(msg_id) + + return jsonify({ + 'success': True, + 'deleted_message_ids': deleted_message_ids, + 'archived': archiving_enabled + }), 200 + + except Exception as e: + print(f"Error deleting message: {str(e)}") + import traceback + traceback.print_exc() + return jsonify({'error': 'Failed to delete message'}), 500 + @app.route('/api/message//retry', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def retry_message(message_id): + """ + Retry/regenerate a message by creating new user+system+assistant messages + with incremented thread_attempt and same thread_id. + Only the message author can retry their messages. + """ + user_id = get_current_user_id() + if not user_id: + return jsonify({'error': 'User not authenticated'}), 401 + + try: + data = request.get_json() or {} + selected_model = data.get('model') + reasoning_effort = data.get('reasoning_effort') + agent_info = data.get('agent_info') # Get agent info if provided + + # Find the original message + query = "SELECT * FROM c WHERE c.id = @message_id" + params = [{"name": "@message_id", "value": message_id}] + message_results = list(cosmos_messages_container.query_items( + query=query, + parameters=params, + enable_cross_partition_query=True + )) + + if not message_results: + return jsonify({'error': 'Message not found'}), 404 + + original_msg = message_results[0] + conversation_id = original_msg.get('conversation_id') + original_role = original_msg.get('role') + + # Verify ownership + message_user_id = original_msg.get('metadata', {}).get('user_info', {}).get('user_id') + if not message_user_id: + # Fallback to conversation ownership + try: + conversation = cosmos_conversations_container.read_item( + item=conversation_id, + partition_key=conversation_id + ) + if conversation.get('user_id') != user_id: + return jsonify({'error': 'You can only retry messages from your own conversations'}), 403 + except: + return jsonify({'error': 'Conversation not found'}), 404 + elif message_user_id != user_id: + return jsonify({'error': 'You can only retry your own messages'}), 403 + + # Get thread info from original message + thread_id = original_msg.get('metadata', {}).get('thread_info', {}).get('thread_id') + previous_thread_id = original_msg.get('metadata', {}).get('thread_info', {}).get('previous_thread_id') + + if not thread_id: + return jsonify({'error': 'Message has no thread_id'}), 400 + + # Find current max thread_attempt for this thread_id + attempt_query = f""" + SELECT VALUE MAX(c.metadata.thread_info.thread_attempt) + FROM c + WHERE c.conversation_id = '{conversation_id}' + AND c.metadata.thread_info.thread_id = '{thread_id}' + """ + attempt_results = list(cosmos_messages_container.query_items( + query=attempt_query, + partition_key=conversation_id + )) + + current_max_attempt = attempt_results[0] if attempt_results and attempt_results[0] is not None else 0 + new_attempt = current_max_attempt + 1 + + # Set all existing attempts for this thread to active_thread=false + deactivate_query = f""" + SELECT * FROM c + WHERE c.conversation_id = '{conversation_id}' + AND c.metadata.thread_info.thread_id = '{thread_id}' + """ + existing_messages = list(cosmos_messages_container.query_items( + query=deactivate_query, + partition_key=conversation_id + )) + + print(f"🔍 Retry - Found {len(existing_messages)} existing messages to deactivate") + + for msg in existing_messages: + msg_id = msg.get('id', 'unknown') + msg_role = msg.get('role', 'unknown') + old_active = msg.get('metadata', {}).get('thread_info', {}).get('active_thread', None) + + if 'metadata' not in msg: + msg['metadata'] = {} + if 'thread_info' not in msg['metadata']: + msg['metadata']['thread_info'] = {} + msg['metadata']['thread_info']['active_thread'] = False + cosmos_messages_container.upsert_item(msg) + + print(f" ✏️ Deactivated: {msg_id} (role={msg_role}, was_active={old_active}, now_active=False)") + + # Find the original user message in this thread to get the content + # Get the FIRST user message in this thread (attempt=1) to ensure we get the original content + user_msg_query = f""" + SELECT * FROM c + WHERE c.conversation_id = '{conversation_id}' + AND c.metadata.thread_info.thread_id = '{thread_id}' + AND c.role = 'user' + ORDER BY c.metadata.thread_info.thread_attempt ASC + """ + user_msg_results = list(cosmos_messages_container.query_items( + query=user_msg_query, + partition_key=conversation_id + )) + + if not user_msg_results: + return jsonify({'error': 'User message not found in thread'}), 404 + + # Get the first user message (attempt 1) to get original content and metadata + original_user_msg = user_msg_results[0] + user_content = original_user_msg.get('content', '') + original_metadata = original_user_msg.get('metadata', {}) + original_thread_info = original_metadata.get('thread_info', {}) + + print(f"🔍 Retry - Original user message: {original_user_msg.get('id')}") + print(f"🔍 Retry - Original thread_id: {original_thread_info.get('thread_id')}") + print(f"🔍 Retry - Original previous_thread_id: {original_thread_info.get('previous_thread_id')}") + print(f"🔍 Retry - Original attempt: {original_thread_info.get('thread_attempt')}") + print(f"🔍 Retry - New attempt will be: {new_attempt}") + + # Create new user message with same content but new attempt number + import uuid + import time + import random + + new_user_message_id = f"{conversation_id}_user_{int(time.time())}_{random.randint(1000,9999)}" + + # Copy metadata but update thread_attempt and keep same thread_id and previous_thread_id from original + new_metadata = dict(original_metadata) + new_metadata['retried'] = True # Mark as retried + new_metadata['thread_info'] = { + 'thread_id': thread_id, # Keep same thread_id + 'previous_thread_id': original_thread_info.get('previous_thread_id'), # Preserve original previous_thread_id + 'active_thread': True, + 'thread_attempt': new_attempt + } + + print(f"🔍 Retry - New user message ID: {new_user_message_id}") + print(f"🔍 Retry - New thread_info: {new_metadata['thread_info']}") + + # Create new user message + new_user_message = { + 'id': new_user_message_id, + 'conversation_id': conversation_id, + 'role': 'user', + 'content': user_content, + 'timestamp': datetime.utcnow().isoformat(), + 'model_deployment_name': None, + 'metadata': new_metadata + } + cosmos_messages_container.upsert_item(new_user_message) + + # Build chat request parameters from original message metadata + chat_request = { + 'message': user_content, + 'conversation_id': conversation_id, + 'model_deployment': selected_model or original_metadata.get('model_selection', {}).get('selected_model'), + 'reasoning_effort': reasoning_effort or original_metadata.get('reasoning_effort'), + 'hybrid_search': original_metadata.get('document_search', {}).get('enabled', False), + 'selected_document_id': original_metadata.get('document_search', {}).get('document_id'), + 'doc_scope': original_metadata.get('document_search', {}).get('scope'), + 'top_n': original_metadata.get('document_search', {}).get('top_n'), + 'classifications': original_metadata.get('document_search', {}).get('classifications'), + 'image_generation': original_metadata.get('image_generation', {}).get('enabled', False), + 'active_group_id': original_metadata.get('chat_context', {}).get('group_id'), + 'active_public_workspace_id': original_metadata.get('chat_context', {}).get('public_workspace_id'), + 'chat_type': original_metadata.get('chat_context', {}).get('type', 'user'), + 'retry_user_message_id': new_user_message_id, # Pass this to skip user message creation + 'retry_thread_id': thread_id, # Pass thread_id to maintain same thread + 'retry_thread_attempt': new_attempt # Pass attempt number + } + + # Add agent_info to chat request if provided (for agent-based retry) + if agent_info: + chat_request['agent_info'] = agent_info + print(f"🤖 Retry - Using agent: {agent_info.get('display_name')} ({agent_info.get('name')})") + elif original_metadata.get('agent_selection'): + # Use original agent selection if no new agent specified + chat_request['agent_info'] = original_metadata.get('agent_selection') + print(f"🤖 Retry - Using original agent from metadata") + + print(f"🔍 Retry - Chat request params: retry_user_message_id={new_user_message_id}, retry_thread_id={thread_id}, retry_thread_attempt={new_attempt}") + + # Make internal request to chat API + from flask import g + g.conversation_id = conversation_id + + # Import and call chat function directly + # We'll need to modify the chat_api to handle retry requests + return jsonify({ + 'success': True, + 'message': 'Retry initiated', + 'thread_id': thread_id, + 'new_attempt': new_attempt, + 'user_message_id': new_user_message_id, + 'chat_request': chat_request + }), 200 + + except Exception as e: + print(f"Error retrying message: {str(e)}") + import traceback + traceback.print_exc() + return jsonify({'error': 'Failed to retry message'}), 500 + + @app.route('/api/message//edit', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def edit_message(message_id): + """ + Edit a user message and regenerate the response with the edited content. + Creates a new attempt with edited content while preserving original model/settings. + Only the message author can edit their messages. + """ + user_id = get_current_user_id() + if not user_id: + return jsonify({'error': 'User not authenticated'}), 401 + + try: + data = request.get_json() or {} + edited_content = data.get('content', '').strip() + + if not edited_content: + return jsonify({'error': 'Message content cannot be empty'}), 400 + + # Find the original message + query = "SELECT * FROM c WHERE c.id = @message_id" + params = [{"name": "@message_id", "value": message_id}] + message_results = list(cosmos_messages_container.query_items( + query=query, + parameters=params, + enable_cross_partition_query=True + )) + + if not message_results: + return jsonify({'error': 'Message not found'}), 404 + + original_msg = message_results[0] + conversation_id = original_msg.get('conversation_id') + original_role = original_msg.get('role') + + # Only allow editing user messages + if original_role != 'user': + return jsonify({'error': 'Only user messages can be edited'}), 400 + + # Verify ownership + message_user_id = original_msg.get('metadata', {}).get('user_info', {}).get('user_id') + if not message_user_id: + # Fallback to conversation ownership + try: + conversation = cosmos_conversations_container.read_item( + item=conversation_id, + partition_key=conversation_id + ) + if conversation.get('user_id') != user_id: + return jsonify({'error': 'You can only edit messages from your own conversations'}), 403 + except: + return jsonify({'error': 'Conversation not found'}), 404 + elif message_user_id != user_id: + return jsonify({'error': 'You can only edit your own messages'}), 403 + + # Get thread info from original message + thread_id = original_msg.get('metadata', {}).get('thread_info', {}).get('thread_id') + previous_thread_id = original_msg.get('metadata', {}).get('thread_info', {}).get('previous_thread_id') + + if not thread_id: + return jsonify({'error': 'Message has no thread_id'}), 400 + + # Find current max thread_attempt for this thread_id + attempt_query = f""" + SELECT VALUE MAX(c.metadata.thread_info.thread_attempt) + FROM c + WHERE c.conversation_id = '{conversation_id}' + AND c.metadata.thread_info.thread_id = '{thread_id}' + """ + attempt_results = list(cosmos_messages_container.query_items( + query=attempt_query, + partition_key=conversation_id + )) + + current_max_attempt = attempt_results[0] if attempt_results and attempt_results[0] is not None else 0 + new_attempt = current_max_attempt + 1 + + # Set all existing attempts for this thread to active_thread=false + deactivate_query = f""" + SELECT * FROM c + WHERE c.conversation_id = '{conversation_id}' + AND c.metadata.thread_info.thread_id = '{thread_id}' + """ + existing_messages = list(cosmos_messages_container.query_items( + query=deactivate_query, + partition_key=conversation_id + )) + + print(f"🔍 Edit - Found {len(existing_messages)} existing messages to deactivate") + + for msg in existing_messages: + msg_id = msg.get('id', 'unknown') + msg_role = msg.get('role', 'unknown') + old_active = msg.get('metadata', {}).get('thread_info', {}).get('active_thread', None) + + if 'metadata' not in msg: + msg['metadata'] = {} + if 'thread_info' not in msg['metadata']: + msg['metadata']['thread_info'] = {} + msg['metadata']['thread_info']['active_thread'] = False + cosmos_messages_container.upsert_item(msg) + + print(f" ✏️ Deactivated: {msg_id} (role={msg_role}, was_active={old_active}, now_active=False)") + + # Get the FIRST user message in this thread (attempt=1) to get original metadata + user_msg_query = f""" + SELECT * FROM c + WHERE c.conversation_id = '{conversation_id}' + AND c.metadata.thread_info.thread_id = '{thread_id}' + AND c.role = 'user' + ORDER BY c.metadata.thread_info.thread_attempt ASC + """ + user_msg_results = list(cosmos_messages_container.query_items( + query=user_msg_query, + partition_key=conversation_id + )) + + if not user_msg_results: + return jsonify({'error': 'User message not found in thread'}), 404 + + # Get the first user message (attempt 1) to get original metadata + original_user_msg = user_msg_results[0] + original_metadata = original_user_msg.get('metadata', {}) + original_thread_info = original_metadata.get('thread_info', {}) + + print(f"🔍 Edit - Original user message: {original_user_msg.get('id')}") + print(f"🔍 Edit - Original thread_id: {original_thread_info.get('thread_id')}") + print(f"🔍 Edit - Original previous_thread_id: {original_thread_info.get('previous_thread_id')}") + print(f"🔍 Edit - Original attempt: {original_thread_info.get('thread_attempt')}") + print(f"🔍 Edit - New attempt will be: {new_attempt}") + + # Create new user message with edited content + import time + import random + + new_user_message_id = f"{conversation_id}_user_{int(time.time())}_{random.randint(1000,9999)}" + + # Copy metadata but update thread_attempt, add edited flag, and keep same thread_id + new_metadata = dict(original_metadata) + new_metadata['edited'] = True # Mark as edited + new_metadata['thread_info'] = { + 'thread_id': thread_id, # Keep same thread_id + 'previous_thread_id': original_thread_info.get('previous_thread_id'), # Preserve original + 'active_thread': True, + 'thread_attempt': new_attempt + } + + print(f"🔍 Edit - New user message ID: {new_user_message_id}") + print(f"🔍 Edit - New thread_info: {new_metadata['thread_info']}") + print(f"🔍 Edit - Edited flag set: {new_metadata.get('edited')}") + + # Create new user message with edited content + new_user_message = { + 'id': new_user_message_id, + 'conversation_id': conversation_id, + 'role': 'user', + 'content': edited_content, # Use edited content + 'timestamp': datetime.utcnow().isoformat(), + 'model_deployment_name': None, + 'metadata': new_metadata + } + cosmos_messages_container.upsert_item(new_user_message) + + # Build chat request parameters from original message metadata + # Keep all original settings (model, reasoning, doc search, etc.) + chat_request = { + 'message': edited_content, # Use edited content + 'conversation_id': conversation_id, + 'model_deployment': original_metadata.get('model_selection', {}).get('selected_model'), + 'reasoning_effort': original_metadata.get('reasoning_effort'), + 'hybrid_search': original_metadata.get('document_search', {}).get('enabled', False), + 'selected_document_id': original_metadata.get('document_search', {}).get('document_id'), + 'doc_scope': original_metadata.get('document_search', {}).get('scope'), + 'top_n': original_metadata.get('document_search', {}).get('top_n'), + 'classifications': original_metadata.get('document_search', {}).get('classifications'), + 'image_generation': original_metadata.get('image_generation', {}).get('enabled', False), + 'active_group_id': original_metadata.get('chat_context', {}).get('group_id'), + 'active_public_workspace_id': original_metadata.get('chat_context', {}).get('public_workspace_id'), + 'chat_type': original_metadata.get('chat_context', {}).get('type', 'user'), + 'edited_user_message_id': new_user_message_id, # Pass this to skip user message creation + 'retry_thread_id': thread_id, # Pass thread_id to maintain same thread + 'retry_thread_attempt': new_attempt # Pass attempt number + } + + # Include agent_info from original metadata if present (for agent-based edits) + if original_metadata.get('agent_selection'): + agent_selection = original_metadata.get('agent_selection') + chat_request['agent_info'] = { + 'name': agent_selection.get('selected_agent'), + 'display_name': agent_selection.get('agent_display_name'), + 'id': agent_selection.get('agent_id'), + 'is_global': agent_selection.get('is_global', False), + 'is_group': agent_selection.get('is_group', False), + 'group_id': agent_selection.get('group_id'), + 'group_name': agent_selection.get('group_name') + } + print(f"🤖 Edit - Using agent: {chat_request['agent_info'].get('display_name')} ({chat_request['agent_info'].get('name')})") + + print(f"🔍 Edit - Chat request params: edited_user_message_id={new_user_message_id}, retry_thread_id={thread_id}, retry_thread_attempt={new_attempt}") + + # Return success with chat_request for frontend to call chat API + return jsonify({ + 'success': True, + 'message': 'Edit initiated', + 'thread_id': thread_id, + 'new_attempt': new_attempt, + 'user_message_id': new_user_message_id, + 'edited': True, + 'chat_request': chat_request + }), 200 + + except Exception as e: + print(f"Error editing message: {str(e)}") + import traceback + traceback.print_exc() + return jsonify({'error': 'Failed to edit message'}), 500 + + @app.route('/api/message//switch-attempt', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def switch_attempt(message_id): + """ + Switch between thread attempts by setting active_thread flags. + Cycles through attempts based on direction (prev/next). + """ + user_id = get_current_user_id() + if not user_id: + return jsonify({'error': 'User not authenticated'}), 401 + + try: + data = request.get_json() or {} + direction = data.get('direction', 'next') # 'prev' or 'next' + + # Find the current message + query = "SELECT * FROM c WHERE c.id = @message_id" + params = [{"name": "@message_id", "value": message_id}] + message_results = list(cosmos_messages_container.query_items( + query=query, + parameters=params, + enable_cross_partition_query=True + )) + + if not message_results: + return jsonify({'error': 'Message not found'}), 404 + + current_msg = message_results[0] + conversation_id = current_msg.get('conversation_id') + + # Verify ownership + message_user_id = current_msg.get('metadata', {}).get('user_info', {}).get('user_id') + if not message_user_id: + try: + conversation = cosmos_conversations_container.read_item( + item=conversation_id, + partition_key=conversation_id + ) + if conversation.get('user_id') != user_id: + return jsonify({'error': 'You can only switch attempts in your own conversations'}), 403 + except: + return jsonify({'error': 'Conversation not found'}), 404 + elif message_user_id != user_id: + return jsonify({'error': 'You can only switch attempts in your own conversations'}), 403 + + # Get thread info + thread_id = current_msg.get('metadata', {}).get('thread_info', {}).get('thread_id') + current_attempt = current_msg.get('metadata', {}).get('thread_info', {}).get('thread_attempt', 0) + + if not thread_id: + return jsonify({'error': 'Message has no thread_id'}), 400 + + # Get all attempts for this thread_id, ordered by thread_attempt + attempts_query = f""" + SELECT DISTINCT c.metadata.thread_info.thread_attempt + FROM c + WHERE c.conversation_id = '{conversation_id}' + AND c.metadata.thread_info.thread_id = '{thread_id}' + AND c.role = 'user' + ORDER BY c.metadata.thread_info.thread_attempt ASC + """ + attempts_results = list(cosmos_messages_container.query_items( + query=attempts_query, + partition_key=conversation_id + )) + + available_attempts = sorted([r.get('thread_attempt', 0) for r in attempts_results]) + + if not available_attempts: + return jsonify({'error': 'No attempts found'}), 404 + + # Find current index and determine target attempt + try: + current_index = available_attempts.index(current_attempt) + except ValueError: + current_index = 0 + + if direction == 'prev': + target_index = (current_index - 1) % len(available_attempts) + else: # 'next' + target_index = (current_index + 1) % len(available_attempts) + + target_attempt = available_attempts[target_index] + + # Deactivate all attempts for this thread + deactivate_query = f""" + SELECT * FROM c + WHERE c.conversation_id = '{conversation_id}' + AND c.metadata.thread_info.thread_id = '{thread_id}' + """ + all_thread_messages = list(cosmos_messages_container.query_items( + query=deactivate_query, + partition_key=conversation_id + )) + + # Update active_thread flags + for msg in all_thread_messages: + if 'metadata' not in msg: + msg['metadata'] = {} + if 'thread_info' not in msg['metadata']: + msg['metadata']['thread_info'] = {} + + msg_attempt = msg['metadata']['thread_info'].get('thread_attempt', 0) + msg['metadata']['thread_info']['active_thread'] = (msg_attempt == target_attempt) + cosmos_messages_container.upsert_item(msg) + + return jsonify({ + 'success': True, + 'target_attempt': target_attempt, + 'available_attempts': available_attempts + }), 200 + + except Exception as e: + print(f"Error switching attempt: {str(e)}") + import traceback + traceback.print_exc() + return jsonify({'error': 'Failed to switch attempt'}), 500 diff --git a/application/single_app/route_backend_documents.py b/application/single_app/route_backend_documents.py index 447bc032..31619f69 100644 --- a/application/single_app/route_backend_documents.py +++ b/application/single_app/route_backend_documents.py @@ -4,10 +4,14 @@ from functions_authentication import * from functions_documents import * from functions_settings import * +from utils_cache import invalidate_personal_search_cache +from functions_debug import * +from functions_activity_logging import log_document_upload, log_document_metadata_update_transaction import os import requests from flask import current_app from swagger_wrapper import swagger_route, get_auth_security +from functions_debug import debug_print def register_route_backend_documents(app): @app.route('/api/get_file_content', methods=['POST']) @@ -20,11 +24,15 @@ def get_file_content(): user_id = get_current_user_id() conversation_id = data.get('conversation_id') file_id = data.get('file_id') + + debug_print(f"[GET_FILE_CONTENT] Starting - user_id={user_id}, conversation_id={conversation_id}, file_id={file_id}") if not user_id: + debug_print(f"[GET_FILE_CONTENT] ERROR: User not authenticated") return jsonify({'error': 'User not authenticated'}), 401 if not conversation_id or not file_id: + debug_print(f"[GET_FILE_CONTENT] ERROR: Missing conversation_id or file_id") return jsonify({'error': 'Missing conversation_id or id'}), 400 try: @@ -57,36 +65,52 @@ def get_file_content(): add_file_task_to_file_processing_log(document_id=file_id, user_id=user_id, content="File not found in conversation") return jsonify({'error': 'File not found in conversation'}), 404 + debug_print(f"[GET_FILE_CONTENT] Found {len(items)} items for file_id={file_id}") + debug_print(f"[GET_FILE_CONTENT] First item structure: {json.dumps(items[0], default=str, indent=2)}") add_file_task_to_file_processing_log(document_id=file_id, user_id=user_id, content="File found, processing content: " + str(items)) items_sorted = sorted(items, key=lambda x: x.get('chunk_index', 0)) filename = items_sorted[0].get('filename', 'Untitled') is_table = items_sorted[0].get('is_table', False) + debug_print(f"[GET_FILE_CONTENT] Filename: {filename}, is_table: {is_table}") add_file_task_to_file_processing_log(document_id=file_id, user_id=user_id, content="Combining file content from chunks, filename: " + filename + ", is_table: " + str(is_table)) combined_parts = [] - for it in items_sorted: + for idx, it in enumerate(items_sorted): fc = it.get('file_content', '') + debug_print(f"[GET_FILE_CONTENT] Chunk {idx}: file_content type={type(fc).__name__}, len={len(fc) if hasattr(fc, '__len__') else 'N/A'}") if isinstance(fc, list): + debug_print(f"[GET_FILE_CONTENT] Processing list of {len(fc)} items") # If file_content is a list of dicts, join their 'content' fields text_chunks = [] - for chunk in fc: - text_chunks.append(chunk.get('content', '')) + for chunk_idx, chunk in enumerate(fc): + debug_print(f"[GET_FILE_CONTENT] List item {chunk_idx} type: {type(chunk).__name__}") + if isinstance(chunk, dict): + text_chunks.append(chunk.get('content', '')) + elif isinstance(chunk, str): + text_chunks.append(chunk) + else: + debug_print(f"[GET_FILE_CONTENT] Unexpected chunk type in list: {type(chunk).__name__}") combined_parts.append("\n".join(text_chunks)) elif isinstance(fc, str): + debug_print(f"[GET_FILE_CONTENT] Processing string content") # If it's already a string, just append combined_parts.append(fc) else: # If it's neither a list nor a string, handle as needed (e.g., skip or log) + debug_print(f"[GET_FILE_CONTENT] WARNING: Unexpected file_content type: {type(fc).__name__}, value: {fc}") pass combined_content = "\n".join(combined_parts) + debug_print(f"[GET_FILE_CONTENT] Combined content length: {len(combined_content)}") if not combined_content: add_file_task_to_file_processing_log(document_id=file_id, user_id=user_id, content="Combined file content is empty") + debug_print(f"[GET_FILE_CONTENT] ERROR: Combined content is empty") return jsonify({'error': 'File content not found'}), 404 + debug_print(f"[GET_FILE_CONTENT] Successfully returning file content") return jsonify({ 'file_content': combined_content, 'filename': filename, @@ -94,6 +118,8 @@ def get_file_content(): }), 200 except Exception as e: + debug_print(f"[GET_FILE_CONTENT] EXCEPTION: {str(e)}") + debug_print(f"[GET_FILE_CONTENT] Traceback: {traceback.format_exc()}") add_file_task_to_file_processing_log(document_id=file_id, user_id=user_id, content="Error retrieving file content: " + str(e)) return jsonify({'error': f'Error retrieving file content: {str(e)}'}), 500 @@ -101,6 +127,7 @@ def get_file_content(): @swagger_route(security=get_auth_security()) @login_required @user_required + @file_upload_required @enabled_required("enable_user_workspace") def api_user_upload_document(): user_id = get_current_user_id() @@ -188,6 +215,28 @@ def api_user_upload_document(): ) processed_docs.append({'document_id': parent_document_id, 'filename': original_filename}) + + # Log document upload activity + try: + # Get file size from the original file object before it's processed + file_size = 0 + try: + file.seek(0, 2) # Seek to end + file_size = file.tell() + file.seek(0) # Reset to beginning + except: + file_size = 0 + + log_document_upload( + user_id=user_id, + container_type='personal', + document_id=parent_document_id, + file_size=file_size, + file_type=file_ext + ) + except Exception as log_error: + # Don't let activity logging errors interrupt upload flow + print(f"Activity logging error for document upload: {log_error}") except Exception as e: upload_errors.append(f"Failed to queue processing for {original_filename}: {e}") @@ -199,6 +248,10 @@ def api_user_upload_document(): response_status = 200 if processed_docs and not upload_errors else 207 # Multi-Status if partial success/errors if not processed_docs and upload_errors: response_status = 400 # Bad Request if all failed + # Invalidate search cache for this user since documents were added + if processed_docs: + invalidate_personal_search_cache(user_id) + # NOTE: For workspace uploads, we do NOT create conversations or chat messages. # Files uploaded to workspaces are for document storage/management, not for immediate chat interaction. # Users can later search these documents in chat if needed. @@ -304,8 +357,8 @@ def api_get_user_documents(): # --- 3) First query: get total count based on filters --- try: count_query_str = f"SELECT VALUE COUNT(1) FROM c WHERE {where_clause}" - # print(f"DEBUG Count Query: {count_query_str}") # Optional Debugging - # print(f"DEBUG Count Params: {query_params}") # Optional Debugging + # debug_print(f"Count Query: {count_query_str}") # Optional Debugging + # debug_print(f"Count Params: {query_params}") # Optional Debugging count_items = list(cosmos_user_documents_container.query_items( query=count_query_str, parameters=query_params, @@ -329,8 +382,8 @@ def api_get_user_documents(): ORDER BY c._ts DESC OFFSET {offset} LIMIT {page_size} """ - # print(f"DEBUG Data Query: {data_query_str}") # Optional Debugging - # print(f"DEBUG Data Params: {query_params}") # Optional Debugging + # debug_print(f"Data Query: {data_query_str}") # Optional Debugging + # debug_print(f"Data Params: {query_params}") # Optional Debugging docs = list(cosmos_user_documents_container.query_items( query=data_query_str, parameters=query_params, @@ -406,6 +459,9 @@ def api_patch_user_document(document_id): return jsonify({'error': 'User not authenticated'}), 401 data = request.get_json() # new metadata values from the client + + # Track which fields were updated + updated_fields = {} # Update allowed fields # You can decide which fields can be updated from the client @@ -415,12 +471,14 @@ def api_patch_user_document(document_id): user_id=user_id, title=data['title'] ) + updated_fields['title'] = data['title'] if 'abstract' in data: update_document( document_id=document_id, user_id=user_id, abstract=data['abstract'] ) + updated_fields['abstract'] = data['abstract'] if 'keywords' in data: # Expect a list or a comma-delimited string if isinstance(data['keywords'], list): @@ -429,25 +487,30 @@ def api_patch_user_document(document_id): user_id=user_id, keywords=data['keywords'] ) + updated_fields['keywords'] = data['keywords'] else: # if client sends a comma-separated string of keywords + keywords_list = [kw.strip() for kw in data['keywords'].split(',')] update_document( document_id=document_id, user_id=user_id, - keywords=[kw.strip() for kw in data['keywords'].split(',')] + keywords=keywords_list ) + updated_fields['keywords'] = keywords_list if 'publication_date' in data: update_document( document_id=document_id, user_id=user_id, publication_date=data['publication_date'] ) + updated_fields['publication_date'] = data['publication_date'] if 'document_classification' in data: update_document( document_id=document_id, user_id=user_id, document_classification=data['document_classification'] ) + updated_fields['document_classification'] = data['document_classification'] # Add authors if you want to allow editing that if 'authors' in data: # if you want a list, or just store a string @@ -458,15 +521,46 @@ def api_patch_user_document(document_id): user_id=user_id, authors=data['authors'] ) + updated_fields['authors'] = data['authors'] else: + authors_list = [data['authors']] update_document( document_id=document_id, user_id=user_id, - authors=[data['authors']] + authors=authors_list ) + updated_fields['authors'] = authors_list # Save updates back to Cosmos try: + # Log the metadata update transaction if any fields were updated + if updated_fields: + # Get document details for logging - handle tuple return + doc_response = get_document(user_id, document_id) + doc = None + + # Handle tuple return (response, status_code) + if isinstance(doc_response, tuple): + resp, status_code = doc_response + if hasattr(resp, "get_json"): + doc = resp.get_json() + else: + doc = resp + elif hasattr(doc_response, "get_json"): + doc = doc_response.get_json() + else: + doc = doc_response + + if doc and isinstance(doc, dict): + log_document_metadata_update_transaction( + user_id=user_id, + document_id=document_id, + workspace_type='personal', + file_name=doc.get('file_name', 'Unknown'), + updated_fields=updated_fields, + file_type=doc.get('file_type') + ) + return jsonify({'message': 'Document metadata updated successfully'}), 200 except Exception as e: return jsonify({'error': str(e)}), 500 @@ -484,6 +578,10 @@ def api_delete_user_document(document_id): try: delete_document(user_id, document_id) delete_document_chunks(document_id) + + # Invalidate search cache since document was deleted + invalidate_personal_search_cache(user_id) + return jsonify({'message': 'Document deleted successfully'}), 200 except Exception as e: return jsonify({'error': f'Error deleting document: {str(e)}'}), 500 @@ -630,6 +728,9 @@ def api_share_document(document_id): # Share the document success = share_document_with_user(document_id, user_id, target_user_id) if success: + # Invalidate cache for both owner and target user + invalidate_personal_search_cache(user_id) + invalidate_personal_search_cache(target_user_id) return jsonify({'message': 'Document shared successfully'}), 200 else: return jsonify({'error': 'Failed to share document'}), 500 @@ -663,6 +764,9 @@ def api_unshare_document(document_id): # Unshare the document success = unshare_document_from_user(document_id, user_id, target_user_id) if success: + # Invalidate cache for both owner and target user + invalidate_personal_search_cache(user_id) + invalidate_personal_search_cache(target_user_id) return jsonify({'message': 'Document unshared successfully'}), 200 else: return jsonify({'error': 'Failed to unshare document'}), 500 @@ -781,6 +885,8 @@ def api_remove_self_from_document(document_id): # Remove user from shared_user_ids (pass user_id as both requester and target for self-removal) success = unshare_document_from_user(document_id, user_id, user_id) if success: + # Invalidate cache for user who removed themselves + invalidate_personal_search_cache(user_id) return jsonify({'message': 'Successfully removed from shared document'}), 200 else: return jsonify({'error': 'Failed to remove from shared document'}), 500 @@ -841,6 +947,11 @@ def api_approve_shared_document(document_id): print(f"Warning: Failed to update chunk {chunk_id}: {chunk_e}") except Exception as e: print(f"Warning: Failed to update chunks for document {document_id}: {e}") + + # Invalidate cache for user who approved (their search results changed) + if updated: + invalidate_personal_search_cache(user_id) + return jsonify({'message': 'Share approved' if updated else 'Already approved'}), 200 except Exception as e: return jsonify({'error': f'Error approving shared document: {str(e)}'}), 500 \ No newline at end of file diff --git a/application/single_app/route_backend_feedback.py b/application/single_app/route_backend_feedback.py index bf526f60..49167cc8 100644 --- a/application/single_app/route_backend_feedback.py +++ b/application/single_app/route_backend_feedback.py @@ -141,7 +141,7 @@ def feedback_submit(): @app.route("/feedback/review", methods=["GET"]) @swagger_route(security=get_auth_security()) @login_required - @admin_required + @feedback_admin_required @enabled_required("enable_user_feedback") def feedback_review_get(): """ @@ -247,7 +247,7 @@ def feedback_review_get(): @app.route("/feedback/review/", methods=["GET"]) @swagger_route(security=get_auth_security()) @login_required - @admin_required + @feedback_admin_required @enabled_required("enable_user_feedback") def feedback_review_get_single(feedbackId): """ @@ -283,7 +283,7 @@ def feedback_review_get_single(feedbackId): @app.route("/feedback/review/", methods=["PATCH"]) @swagger_route(security=get_auth_security()) @login_required - @admin_required + @feedback_admin_required @enabled_required("enable_user_feedback") def feedback_review_update(feedbackId): """ @@ -328,7 +328,7 @@ def feedback_review_update(feedbackId): @app.route("/feedback/retest/", methods=["POST"]) @swagger_route(security=get_auth_security()) @login_required - @admin_required + @feedback_admin_required @enabled_required("enable_user_feedback") def feedback_retest(feedbackId): """ diff --git a/application/single_app/route_backend_group_documents.py b/application/single_app/route_backend_group_documents.py index 7afbf35e..68a1c0fa 100644 --- a/application/single_app/route_backend_group_documents.py +++ b/application/single_app/route_backend_group_documents.py @@ -5,6 +5,9 @@ from functions_settings import * from functions_group import * from functions_documents import * +from utils_cache import invalidate_group_search_cache +from functions_debug import * +from functions_activity_logging import log_document_upload from flask import current_app from swagger_wrapper import swagger_route, get_auth_security @@ -39,6 +42,12 @@ def api_upload_group_document(): if not group_doc: return jsonify({'error': 'Active group not found'}), 404 + # Check if group status allows uploads + from functions_group import check_group_status_allows_operation + allowed, reason = check_group_status_allows_operation(group_doc, 'upload') + if not allowed: + return jsonify({'error': reason}), 403 + role = get_user_role_in_group(group_doc, user_id) if role not in ["Owner", "Admin", "DocumentManager"]: return jsonify({'error': 'You do not have permission to upload documents'}), 403 @@ -121,6 +130,10 @@ def api_upload_group_document(): if not processed_docs and upload_errors: response_status = 400 + # Invalidate group search cache since documents were added + if processed_docs: + invalidate_group_search_cache(active_group_id) + return jsonify({ 'message': f'Processed {len(processed_docs)} file(s). Check status periodically.', 'document_ids': [doc['document_id'] for doc in processed_docs], @@ -329,6 +342,9 @@ def api_patch_group_document(document_id): return jsonify({'error': 'You do not have permission to update documents in this group'}), 403 data = request.get_json() + + # Track which fields were updated + updated_fields = {} try: if 'title' in data: @@ -338,6 +354,7 @@ def api_patch_group_document(document_id): user_id=user_id, title=data['title'] ) + updated_fields['title'] = data['title'] if 'abstract' in data: update_document( document_id=document_id, @@ -345,6 +362,7 @@ def api_patch_group_document(document_id): user_id=user_id, abstract=data['abstract'] ) + updated_fields['abstract'] = data['abstract'] if 'keywords' in data: if isinstance(data['keywords'], list): update_document( @@ -353,13 +371,16 @@ def api_patch_group_document(document_id): user_id=user_id, keywords=data['keywords'] ) + updated_fields['keywords'] = data['keywords'] else: + keywords_list = [kw.strip() for kw in data['keywords'].split(',')] update_document( document_id=document_id, group_id=active_group_id, user_id=user_id, - keywords=[kw.strip() for kw in data['keywords'].split(',')] + keywords=keywords_list ) + updated_fields['keywords'] = keywords_list if 'publication_date' in data: update_document( document_id=document_id, @@ -367,6 +388,7 @@ def api_patch_group_document(document_id): user_id=user_id, publication_date=data['publication_date'] ) + updated_fields['publication_date'] = data['publication_date'] if 'document_classification' in data: update_document( document_id=document_id, @@ -374,6 +396,7 @@ def api_patch_group_document(document_id): user_id=user_id, document_classification=data['document_classification'] ) + updated_fields['document_classification'] = data['document_classification'] if 'authors' in data: if isinstance(data['authors'], list): update_document( @@ -382,15 +405,54 @@ def api_patch_group_document(document_id): user_id=user_id, authors=data['authors'] ) + updated_fields['authors'] = data['authors'] else: + authors_list = [data['authors']] update_document( document_id=document_id, group_id=active_group_id, user_id=user_id, - authors=[data['authors']] + authors=authors_list ) + updated_fields['authors'] = authors_list - return jsonify({'message': 'Group document metadata updated successfully'}), 200 + # Save updates back to Cosmos + try: + # Log the metadata update transaction if any fields were updated + if updated_fields: + # Get document details for logging - handle tuple return + # Get document details for logging + from functions_documents import get_document + doc_response = get_document(user_id, document_id, group_id=active_group_id) + doc = None + + # Handle tuple return (response, status_code) + if isinstance(doc_response, tuple): + resp, status_code = doc_response + if hasattr(resp, "get_json"): + doc = resp.get_json() + else: + doc = resp + elif hasattr(doc_response, "get_json"): + doc = doc_response.get_json() + else: + doc = doc_response + + if doc and isinstance(doc, dict): + from functions_activity_logging import log_document_metadata_update_transaction + log_document_metadata_update_transaction( + user_id=user_id, + document_id=document_id, + workspace_type='group', + file_name=doc.get('file_name', 'Unknown'), + updated_fields=updated_fields, + file_type=doc.get('file_type'), + group_id=active_group_id + ) + + return jsonify({'message': 'Group document metadata updated successfully'}), 200 + except Exception as e: + return jsonify({'Error updating Group document metadata': str(e)}), 500 except Exception as e: return jsonify({'error': str(e)}), 500 @@ -418,6 +480,12 @@ def api_delete_group_document(document_id): if not group_doc: return jsonify({'error': 'Active group not found'}), 404 + # Check if group status allows deletions + from functions_group import check_group_status_allows_operation + allowed, reason = check_group_status_allows_operation(group_doc, 'delete') + if not allowed: + return jsonify({'error': reason}), 403 + role = get_user_role_in_group(group_doc, user_id) if role not in ["Owner", "Admin", "DocumentManager"]: return jsonify({'error': 'You do not have permission to delete documents in this group'}), 403 @@ -425,6 +493,10 @@ def api_delete_group_document(document_id): try: delete_document(user_id=user_id, document_id=document_id, group_id=active_group_id) delete_document_chunks(document_id=document_id, group_id=active_group_id) + + # Invalidate group search cache since document was deleted + invalidate_group_search_cache(active_group_id) + return jsonify({'message': 'Group document deleted successfully'}), 200 except Exception as e: return jsonify({'error': f'Error deleting group document: {str(e)}'}), 500 @@ -614,6 +686,9 @@ def api_approve_shared_group_document(document_id): user_id=user_id, shared_group_ids=new_shared_group_ids ) + # Invalidate cache for the group that approved + invalidate_group_search_cache(active_group_id) + return jsonify({'message': 'Share approved' if updated else 'Already approved'}), 200 except Exception as e: return jsonify({'error': f'Error approving shared document: {str(e)}'}), 500 @@ -681,6 +756,10 @@ def api_share_document_with_group(document_id): shared_group_ids=shared_group_ids ) + # Invalidate cache for both groups + invalidate_group_search_cache(active_group_id) + invalidate_group_search_cache(target_group_id) + return jsonify({ 'message': 'Document shared successfully', 'document_id': document_id, @@ -747,6 +826,10 @@ def api_unshare_document_with_group(document_id): shared_group_ids=shared_group_ids ) + # Invalidate cache for both groups + invalidate_group_search_cache(active_group_id) + invalidate_group_search_cache(target_group_id) + return jsonify({ 'message': 'Document sharing removed successfully', 'document_id': document_id, diff --git a/application/single_app/route_backend_groups.py b/application/single_app/route_backend_groups.py index ca171b63..0e35d211 100644 --- a/application/single_app/route_backend_groups.py +++ b/application/single_app/route_backend_groups.py @@ -3,6 +3,8 @@ from config import * from functions_authentication import * from functions_group import * +from functions_debug import debug_print +from functions_notifications import create_notification from swagger_wrapper import swagger_route, get_auth_security def register_route_backend_groups(app): @@ -112,7 +114,8 @@ def api_list_groups(): "name": g.get("name", "Untitled Group"), # Provide default name "description": g.get("description", ""), "userRole": role, - "isActive": (g["id"] == db_active_group_id) + "isActive": (g["id"] == db_active_group_id), + "status": g.get("status", "active") # Include group status }) return jsonify({ @@ -132,6 +135,7 @@ def api_list_groups(): @login_required @user_required @create_group_role_required + @enabled_required("enable_group_creation") @enabled_required("enable_group_workspaces") def api_create_group(): """ @@ -383,6 +387,7 @@ def add_member_directly(group_id): """ user_info = get_current_user_info() user_id = user_info["userId"] + user_email = user_info.get("email", "unknown") group_doc = find_group_by_id(group_id) @@ -401,16 +406,80 @@ def add_member_directly(group_id): if get_user_role_in_group(group_doc, new_user_id): return jsonify({"error": "User is already a member"}), 400 + # Get role from request, default to 'user' + member_role = data.get("role", "user").lower() + + # Validate role + valid_roles = ['admin', 'document_manager', 'user'] + if member_role not in valid_roles: + return jsonify({"error": f"Invalid role. Must be: {', '.join(valid_roles)}"}), 400 + new_member_doc = { "userId": new_user_id, "email": data.get("email", ""), "displayName": data.get("displayName", "New User") } group_doc["users"].append(new_member_doc) + + # Add to appropriate role array + if member_role == 'admin': + if new_user_id not in group_doc.get('admins', []): + group_doc.setdefault('admins', []).append(new_user_id) + elif member_role == 'document_manager': + if new_user_id not in group_doc.get('documentManagers', []): + group_doc.setdefault('documentManagers', []).append(new_user_id) + group_doc["modifiedDate"] = datetime.utcnow().isoformat() cosmos_groups_container.upsert_item(group_doc) - return jsonify({"message": "Member added"}), 200 + + # Log activity for member addition + try: + activity_record = { + 'id': str(uuid.uuid4()), + 'activity_type': 'add_member_directly', + 'timestamp': datetime.utcnow().isoformat(), + 'added_by_user_id': user_id, + 'added_by_email': user_email, + 'added_by_role': role, + 'group_id': group_id, + 'group_name': group_doc.get('name', 'Unknown'), + 'member_user_id': new_user_id, + 'member_email': new_member_doc.get('email', ''), + 'member_name': new_member_doc.get('displayName', ''), + 'member_role': member_role, + 'description': f"{role} {user_email} added member {new_member_doc.get('displayName', '')} ({new_member_doc.get('email', '')}) to group {group_doc.get('name', group_id)} as {member_role}" + } + cosmos_activity_logs_container.create_item(body=activity_record) + except Exception as log_error: + debug_print(f"Failed to log member addition activity: {log_error}") + + # Create notification for the new member + try: + from functions_notifications import create_notification + role_display = { + 'admin': 'Admin', + 'document_manager': 'Document Manager', + 'user': 'Member' + }.get(member_role, 'Member') + + create_notification( + user_id=new_user_id, + notification_type='system_announcement', + title='Added to Group', + message=f"You have been added to the group '{group_doc.get('name', 'Unknown')}' as {role_display} by {user_email}.", + link_url=f"/manage_group/{group_id}", + metadata={ + 'group_id': group_id, + 'group_name': group_doc.get('name', 'Unknown'), + 'added_by': user_email, + 'role': member_role + } + ) + except Exception as notif_error: + debug_print(f"Failed to create member addition notification: {notif_error}") + + return jsonify({"message": "Member added", "success": True}), 200 @app.route("/api/groups//members/", methods=["DELETE"]) @swagger_route(security=get_auth_security()) @@ -438,10 +507,12 @@ def remove_member(group_id, member_id): "Transfer ownership or delete the group."}), 403 removed = False + removed_member_info = None updated_users = [] for u in group_doc["users"]: if u["userId"] == member_id: removed = True + removed_member_info = u continue updated_users.append(u) @@ -456,6 +527,26 @@ def remove_member(group_id, member_id): cosmos_groups_container.upsert_item(group_doc) if removed: + # Log activity for self-removal + from functions_activity_logging import log_group_member_deleted + user_email = user_info.get("email", "unknown") + member_name = removed_member_info.get('displayName', '') if removed_member_info else '' + member_email = removed_member_info.get('email', '') if removed_member_info else '' + description = f"Member {user_email} left group {group_doc.get('name', group_id)}" + + log_group_member_deleted( + removed_by_user_id=user_id, + removed_by_email=user_email, + removed_by_role='Member', + member_user_id=member_id, + member_email=member_email, + member_name=member_name, + group_id=group_id, + group_name=group_doc.get('name', 'Unknown'), + action='member_left_group', + description=description + ) + return jsonify({"message": "You have left the group"}), 200 else: return jsonify({"error": "You are not in this group"}), 404 @@ -469,10 +560,12 @@ def remove_member(group_id, member_id): return jsonify({"error": "Cannot remove the group owner"}), 403 removed = False + removed_member_info = None updated_users = [] for u in group_doc["users"]: if u["userId"] == member_id: removed = True + removed_member_info = u continue updated_users.append(u) group_doc["users"] = updated_users @@ -486,6 +579,26 @@ def remove_member(group_id, member_id): cosmos_groups_container.upsert_item(group_doc) if removed: + # Log activity for admin/owner removal + from functions_activity_logging import log_group_member_deleted + user_email = user_info.get("email", "unknown") + member_name = removed_member_info.get('displayName', '') if removed_member_info else '' + member_email = removed_member_info.get('email', '') if removed_member_info else '' + description = f"{role} {user_email} removed member {member_name} ({member_email}) from group {group_doc.get('name', group_id)}" + + log_group_member_deleted( + removed_by_user_id=user_id, + removed_by_email=user_email, + removed_by_role=role, + member_user_id=member_id, + member_email=member_email, + member_name=member_name, + group_id=group_id, + group_name=group_doc.get('name', 'Unknown'), + action='admin_removed_member', + description=description + ) + return jsonify({"message": "User removed"}), 200 else: return jsonify({"error": "User not found in group"}), 404 @@ -504,6 +617,7 @@ def update_member_role(group_id, member_id): """ user_info = get_current_user_info() user_id = user_info["userId"] + user_email = user_info.get("email", "unknown") group_doc = find_group_by_id(group_id) @@ -523,6 +637,15 @@ def update_member_role(group_id, member_id): if not target_role: return jsonify({"error": "Member is not in the group"}), 404 + # Get member details for logging + member_name = "Unknown" + member_email = "unknown" + for u in group_doc.get("users", []): + if u.get("userId") == member_id: + member_name = u.get("displayName", "Unknown") + member_email = u.get("email", "unknown") + break + if member_id in group_doc.get("admins", []): group_doc["admins"].remove(member_id) if member_id in group_doc.get("documentManagers", []): @@ -538,6 +661,49 @@ def update_member_role(group_id, member_id): group_doc["modifiedDate"] = datetime.utcnow().isoformat() cosmos_groups_container.upsert_item(group_doc) + # Log activity for role change + try: + activity_record = { + 'id': str(uuid.uuid4()), + 'type': 'group_member_role_changed', + 'activity_type': 'update_member_role', + 'timestamp': datetime.utcnow().isoformat(), + 'changed_by_user_id': user_id, + 'changed_by_email': user_email, + 'changed_by_role': current_role, + 'group_id': group_id, + 'group_name': group_doc.get('name', 'Unknown'), + 'member_user_id': member_id, + 'member_email': member_email, + 'member_name': member_name, + 'old_role': target_role, + 'new_role': new_role, + 'description': f"{current_role} {user_email} changed {member_name} ({member_email}) role from {target_role} to {new_role} in group {group_doc.get('name', group_id)}" + } + cosmos_activity_logs_container.create_item(body=activity_record) + except Exception as log_error: + debug_print(f"Failed to log role change activity: {log_error}") + + # Create notification for the member whose role was changed + try: + from functions_notifications import create_notification + create_notification( + user_id=member_id, + notification_type='system_announcement', + title='Role Changed', + message=f"Your role in group '{group_doc.get('name', 'Unknown')}' has been changed from {target_role} to {new_role} by {user_email}.", + link_url=f"/manage_group/{group_id}", + metadata={ + 'group_id': group_id, + 'group_name': group_doc.get('name', 'Unknown'), + 'changed_by': user_email, + 'old_role': target_role, + 'new_role': new_role + } + ) + except Exception as notif_error: + debug_print(f"Failed to create role change notification: {notif_error}") + return jsonify({"message": f"User {member_id} updated to {new_role}"}), 200 @app.route("/api/groups//members", methods=["GET"]) @@ -704,3 +870,260 @@ def get_group_file_count(group_id): file_count = item return jsonify({ "fileCount": file_count }), 200 + + @app.route("/api/groups//activity", methods=["GET"]) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + @enabled_required("enable_group_workspaces") + def api_group_activity(group_id): + """ + GET /api/groups//activity + Returns recent activity timeline for the group. + Only accessible by owner and admins. + """ + from functions_debug import debug_print + + info = get_current_user_info() + user_id = info["userId"] + + group = find_group_by_id(group_id) + if not group: + return jsonify({"error": "Not found"}), 404 + + # Check user is owner or admin (NOT document managers or regular members) + is_owner = group["owner"]["id"] == user_id + is_admin = user_id in (group.get("admins", [])) + + if not (is_owner or is_admin): + return jsonify({"error": "Forbidden - Only group owners and admins can view activity timeline"}), 403 + + # Get pagination parameters + limit = request.args.get('limit', 50, type=int) + if limit not in [10, 20, 50]: + limit = 50 + + # Get recent activity + query = f""" + SELECT TOP {limit} * + FROM a + WHERE a.workspace_context.group_id = @groupId + ORDER BY a.timestamp DESC + """ + params = [{"name": "@groupId", "value": group_id}] + + debug_print(f"[GROUP_ACTIVITY] Group ID: {group_id}") + debug_print(f"[GROUP_ACTIVITY] Query: {query}") + debug_print(f"[GROUP_ACTIVITY] Params: {params}") + + activities = [] + try: + activity_iter = cosmos_activity_logs_container.query_items( + query=query, + parameters=params, + enable_cross_partition_query=True + ) + activities = list(activity_iter) + debug_print(f"[GROUP_ACTIVITY] Found {len(activities)} activity records") + except Exception as e: + debug_print(f"[GROUP_ACTIVITY] Error querying activity: {e}") + return jsonify({"error": "Failed to retrieve activity"}), 500 + + return jsonify(activities), 200 + + @app.route("/api/groups//stats", methods=["GET"]) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + @enabled_required("enable_group_workspaces") + def api_group_stats(group_id): + """ + GET /api/groups//stats + Returns statistics for the group including documents, storage, tokens, and members. + Only accessible by owner and admins. + """ + from functions_debug import debug_print + from datetime import datetime, timedelta + + info = get_current_user_info() + user_id = info["userId"] + + group = find_group_by_id(group_id) + if not group: + return jsonify({"error": "Not found"}), 404 + + # Check user is owner or admin + is_owner = group["owner"]["id"] == user_id + is_admin = user_id in (group.get("admins", [])) + + if not (is_owner or is_admin): + return jsonify({"error": "Forbidden"}), 403 + + # Get metrics from group record + metrics = group.get("metrics", {}) + document_metrics = metrics.get("document_metrics", {}) + + total_documents = document_metrics.get("total_documents", 0) + storage_used = document_metrics.get("storage_account_size", 0) + ai_search_size = document_metrics.get("ai_search_size", 0) + storage_account_size = document_metrics.get("storage_account_size", 0) + + # Get member count + total_members = len(group.get("users", [])) + + # Get token usage from activity logs (last 30 days) + thirty_days_ago = (datetime.utcnow() - timedelta(days=30)).isoformat() + + debug_print(f"[GROUP_STATS] Group ID: {group_id}") + debug_print(f"[GROUP_STATS] Start date: {thirty_days_ago}") + + token_query = """ + SELECT a.usage + FROM a + WHERE a.workspace_context.group_id = @groupId + AND a.timestamp >= @startDate + AND a.activity_type = 'token_usage' + """ + token_params = [ + {"name": "@groupId", "value": group_id}, + {"name": "@startDate", "value": thirty_days_ago} + ] + + total_tokens = 0 + try: + token_iter = cosmos_activity_logs_container.query_items( + query=token_query, + parameters=token_params, + enable_cross_partition_query=True + ) + for item in token_iter: + usage = item.get("usage", {}) + total_tokens += usage.get("total_tokens", 0) + debug_print(f"[GROUP_STATS] Total tokens accumulated: {total_tokens}") + except Exception as e: + debug_print(f"[GROUP_STATS] Error querying total tokens: {e}") + + # Get activity data for charts (last 30 days) + doc_activity_labels = [] + doc_upload_data = [] + doc_delete_data = [] + token_usage_labels = [] + token_usage_data = [] + + # Generate labels for last 30 days + for i in range(29, -1, -1): + date = datetime.utcnow() - timedelta(days=i) + doc_activity_labels.append(date.strftime("%m/%d")) + token_usage_labels.append(date.strftime("%m/%d")) + doc_upload_data.append(0) + doc_delete_data.append(0) + token_usage_data.append(0) + + # Get document upload activity by day + doc_upload_query = """ + SELECT a.timestamp, a.created_at + FROM a + WHERE a.workspace_context.group_id = @groupId + AND a.timestamp >= @startDate + AND a.activity_type = 'document_creation' + """ + try: + activity_iter = cosmos_activity_logs_container.query_items( + query=doc_upload_query, + parameters=token_params, + enable_cross_partition_query=True + ) + for item in activity_iter: + timestamp = item.get("timestamp") or item.get("created_at") + if timestamp: + try: + dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00')) + day_date = dt.strftime("%m/%d") + if day_date in doc_activity_labels: + idx = doc_activity_labels.index(day_date) + doc_upload_data[idx] += 1 + except Exception as e: + debug_print(f"[GROUP_STATS] Error parsing timestamp: {e}") + except Exception as e: + debug_print(f"[GROUP_STATS] Error querying document uploads: {e}") + + # Get document delete activity by day + doc_delete_query = """ + SELECT a.timestamp, a.created_at + FROM a + WHERE a.workspace_context.group_id = @groupId + AND a.timestamp >= @startDate + AND a.activity_type = 'document_deletion' + """ + try: + delete_iter = cosmos_activity_logs_container.query_items( + query=doc_delete_query, + parameters=token_params, + enable_cross_partition_query=True + ) + for item in delete_iter: + timestamp = item.get("timestamp") or item.get("created_at") + if timestamp: + try: + dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00')) + day_date = dt.strftime("%m/%d") + if day_date in doc_activity_labels: + idx = doc_activity_labels.index(day_date) + doc_delete_data[idx] += 1 + except Exception as e: + debug_print(f"[GROUP_STATS] Error parsing timestamp: {e}") + except Exception as e: + debug_print(f"[GROUP_STATS] Error querying document deletes: {e}") + + # Get token usage by day + token_activity_query = """ + SELECT a.timestamp, a.created_at, a.usage + FROM a + WHERE a.workspace_context.group_id = @groupId + AND a.timestamp >= @startDate + AND a.activity_type = 'token_usage' + """ + try: + token_activity_iter = cosmos_activity_logs_container.query_items( + query=token_activity_query, + parameters=token_params, + enable_cross_partition_query=True + ) + for item in token_activity_iter: + timestamp = item.get("timestamp") or item.get("created_at") + if timestamp: + try: + dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00')) + day_date = dt.strftime("%m/%d") + if day_date in token_usage_labels: + idx = token_usage_labels.index(day_date) + usage = item.get("usage", {}) + tokens = usage.get("total_tokens", 0) + token_usage_data[idx] += tokens + except Exception as e: + debug_print(f"[GROUP_STATS] Error parsing timestamp: {e}") + except Exception as e: + debug_print(f"[GROUP_STATS] Error querying token usage: {e}") + + stats = { + "totalDocuments": total_documents, + "storageUsed": storage_used, + "storageLimit": 10737418240, # 10GB default + "totalTokens": total_tokens, + "totalMembers": total_members, + "storage": { + "ai_search_size": ai_search_size, + "storage_account_size": storage_account_size + }, + "documentActivity": { + "labels": doc_activity_labels, + "uploads": doc_upload_data, + "deletes": doc_delete_data + }, + "tokenUsage": { + "labels": token_usage_labels, + "data": token_usage_data + } + } + + return jsonify(stats), 200 diff --git a/application/single_app/route_backend_models.py b/application/single_app/route_backend_models.py index e0859453..176d112c 100644 --- a/application/single_app/route_backend_models.py +++ b/application/single_app/route_backend_models.py @@ -13,9 +13,7 @@ def register_route_backend_models(app): """ @app.route('/api/models/gpt', methods=['GET']) - @swagger_route( - security=get_auth_security() - ) + @swagger_route(security=get_auth_security()) @login_required @user_required def get_gpt_models(): @@ -75,9 +73,7 @@ def get_gpt_models(): @app.route('/api/models/embedding', methods=['GET']) - @swagger_route( - security=get_auth_security() - ) + @swagger_route(security=get_auth_security()) @login_required @user_required def get_embedding_models(): @@ -135,9 +131,7 @@ def get_embedding_models(): @app.route('/api/models/image', methods=['GET']) - @swagger_route( - security=get_auth_security() - ) + @swagger_route(security=get_auth_security()) @login_required @user_required def get_image_models(): diff --git a/application/single_app/route_backend_notifications.py b/application/single_app/route_backend_notifications.py new file mode 100644 index 00000000..8fe8dd58 --- /dev/null +++ b/application/single_app/route_backend_notifications.py @@ -0,0 +1,210 @@ +# route_backend_notifications.py + +from config import * +from functions_authentication import * +from functions_settings import * +from functions_notifications import * +from swagger_wrapper import swagger_route, get_auth_security +from functions_debug import debug_print + +def register_route_backend_notifications(app): + + @app.route("/api/notifications", methods=["GET"]) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def api_get_notifications(): + """ + Get paginated notifications for the current user. + + Query Parameters: + page (int): Page number (default: 1) + per_page (int): Items per page (default: 20) + include_read (bool): Include read notifications (default: true) + include_dismissed (bool): Include dismissed notifications (default: false) + """ + try: + user_id = get_current_user_id() + user = session.get('user', {}) + user_roles = user.get('roles', []) + + # Get query parameters + page = int(request.args.get('page', 1)) + per_page = int(request.args.get('per_page', 20)) + include_read = request.args.get('include_read', 'true').lower() == 'true' + include_dismissed = request.args.get('include_dismissed', 'false').lower() == 'true' + + # Validate per_page + if per_page not in [10, 20, 50]: + per_page = 20 + + result = get_user_notifications( + user_id=user_id, + page=page, + per_page=per_page, + include_read=include_read, + include_dismissed=include_dismissed, + user_roles=user_roles + ) + + return jsonify({ + 'success': True, + **result + }) + + except Exception as e: + debug_print(f"Error fetching notifications: {e}") + return jsonify({ + 'success': False, + 'error': 'Failed to fetch notifications' + }), 500 + + @app.route("/api/notifications/count", methods=["GET"]) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def api_get_notification_count(): + """ + Get count of unread notifications for the current user. + """ + try: + user_id = get_current_user_id() + count = get_unread_notification_count(user_id) + + return jsonify({ + 'success': True, + 'count': count + }) + + except Exception as e: + debug_print(f"Error fetching notification count: {e}") + return jsonify({ + 'success': False, + 'count': 0 + }), 500 + + @app.route("/api/notifications//read", methods=["POST"]) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def api_mark_notification_read(notification_id): + """ + Mark a notification as read. + """ + try: + user_id = get_current_user_id() + success = mark_notification_read(notification_id, user_id) + + if success: + return jsonify({ + 'success': True, + 'message': 'Notification marked as read' + }) + else: + return jsonify({ + 'success': False, + 'error': 'Failed to mark notification as read' + }), 400 + + except Exception as e: + debug_print(f"Error marking notification as read: {e}") + return jsonify({ + 'success': False, + 'error': 'Internal server error' + }), 500 + + @app.route("/api/notifications//dismiss", methods=["DELETE"]) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def api_dismiss_notification(notification_id): + """ + Dismiss a notification. + """ + try: + user_id = get_current_user_id() + success = dismiss_notification(notification_id, user_id) + + if success: + return jsonify({ + 'success': True, + 'message': 'Notification dismissed' + }) + else: + return jsonify({ + 'success': False, + 'error': 'Failed to dismiss notification' + }), 400 + + except Exception as e: + debug_print(f"Error dismissing notification: {e}") + return jsonify({ + 'success': False, + 'error': 'Internal server error' + }), 500 + + @app.route("/api/notifications/mark-all-read", methods=["POST"]) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def api_mark_all_read(): + """ + Mark all notifications as read for the current user. + """ + try: + user_id = get_current_user_id() + count = mark_all_read(user_id) + + return jsonify({ + 'success': True, + 'message': f'{count} notifications marked as read', + 'count': count + }) + + except Exception as e: + debug_print(f"Error marking all notifications as read: {e}") + return jsonify({ + 'success': False, + 'error': 'Internal server error' + }), 500 + + @app.route("/api/notifications/settings", methods=["POST"]) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def api_update_notification_settings(): + """ + Update notification settings for the current user. + + Body: + notifications_per_page (int): Number of notifications per page (10, 20, or 50) + """ + try: + user_id = get_current_user_id() + data = request.get_json() + + per_page = data.get('notifications_per_page', 20) + + # Validate per_page + if per_page not in [10, 20, 50]: + return jsonify({ + 'success': False, + 'error': 'Invalid per_page value. Must be 10, 20, or 50.' + }), 400 + + # Update user settings + update_user_settings(user_id, { + 'notifications_per_page': per_page + }) + + return jsonify({ + 'success': True, + 'message': 'Settings updated' + }) + + except Exception as e: + debug_print(f"Error updating notification settings: {e}") + return jsonify({ + 'success': False, + 'error': 'Internal server error' + }), 500 diff --git a/application/single_app/route_backend_plugins.py b/application/single_app/route_backend_plugins.py index 3ece6a8c..6f24c932 100644 --- a/application/single_app/route_backend_plugins.py +++ b/application/single_app/route_backend_plugins.py @@ -2,6 +2,7 @@ import re import builtins +import json from flask import Blueprint, jsonify, request, current_app from semantic_kernel_plugins.plugin_loader import get_all_plugin_metadata from semantic_kernel_plugins.plugin_health_checker import PluginHealthChecker, PluginErrorRecovery @@ -11,15 +12,25 @@ from swagger_wrapper import swagger_route, get_auth_security import logging import os - +from functions_debug import debug_print import importlib.util from functions_plugins import get_merged_plugin_settings from semantic_kernel_plugins.base_plugin import BasePlugin from functions_global_actions import * from functions_personal_actions import * +from functions_group import require_active_group, assert_group_role +from functions_group_actions import ( + get_group_actions, + get_group_action, + save_group_action, + delete_group_action, + validate_group_action_payload, +) +from functions_keyvault import SecretReturnType +#from functions_personal_actions import delete_personal_action - +from functions_debug import debug_print from json_schema_validation import validate_plugin def discover_plugin_types(): @@ -109,6 +120,7 @@ def get_plugin_types(): safe_manifest = {} # Only add minimal required fields based on plugin type + #TODO: This can be improved by ensuring we have additional fields from the schemas we have not created if needed. if 'databricks' in module_name.lower(): safe_manifest = { 'endpoint': 'https://example.databricks.com', @@ -151,12 +163,15 @@ def get_plugin_types(): try: plugin_instance = obj(safe_manifest) except (TypeError, ValueError, KeyError) as e: + debug_print(f"[RBEP] Failed to instantiate {attr} with safe manifest: {e}") try: plugin_instance = obj({}) except (TypeError, ValueError) as e2: + debug_print(f"[RBEP] Failed to instantiate {attr} with empty manifest: {e2}") try: plugin_instance = obj() except Exception as e3: + debug_print(f"[RBEP] Failed to instantiate {attr} with no args: {e3}") instantiation_error = e3 except Exception as e: instantiation_error = e @@ -288,6 +303,7 @@ def set_user_plugins(): plugin.setdefault('endpoint', f'sql://{plugin_type}') elif plugin_type == 'msgraph': # MS Graph plugin does not require an endpoint, but schema validation requires one + #TODO: Update to support different clouds plugin.setdefault('endpoint', 'https://graph.microsoft.com') else: # For other plugin types, require a real endpoint @@ -327,7 +343,7 @@ def set_user_plugins(): delete_personal_action(user_id, plugin_name) except Exception as e: - current_app.logger.error(f"Error saving personal actions for user {user_id}: {e}") + debug_print(f"Error saving personal actions for user {user_id}: {e}") return jsonify({'error': 'Failed to save plugins'}), 500 log_event("User plugins updated", extra={"user_id": user_id, "plugins_count": len(filtered_plugins)}) return jsonify({'success': True}) @@ -338,9 +354,6 @@ def set_user_plugins(): def delete_user_plugin(plugin_name): user_id = get_current_user_id() - # Import the new personal actions functions - from functions_personal_actions import delete_personal_action - # Try to delete from personal_actions container deleted = delete_personal_action(user_id, plugin_name) @@ -350,6 +363,202 @@ def delete_user_plugin(plugin_name): log_event("User plugin deleted", extra={"user_id": user_id, "plugin_name": plugin_name}) return jsonify({'success': True}) + +# === GROUP ACTION ENDPOINTS === + +@bpap.route('/api/group/plugins', methods=['GET']) +@swagger_route(security=get_auth_security()) +@login_required +@user_required +@enabled_required('enable_group_workspaces') +def get_group_actions_route(): + user_id = get_current_user_id() + try: + active_group = require_active_group(user_id) + assert_group_role( + user_id, + active_group, + allowed_roles=("Owner", "Admin", "DocumentManager", "User"), + ) + except ValueError as exc: + return jsonify({'error': str(exc)}), 400 + except LookupError as exc: + return jsonify({'error': str(exc)}), 404 + except PermissionError as exc: + return jsonify({'error': str(exc)}), 403 + + actions = get_group_actions(active_group, return_type=SecretReturnType.TRIGGER) + + settings = get_settings() + merge_global = bool(settings.get('merge_global_semantic_kernel_with_workspace', False)) if settings else False + + if merge_global: + global_actions = get_global_actions(return_type=SecretReturnType.TRIGGER) + merged_actions = _merge_group_and_global_actions(actions, global_actions) + else: + merged_actions = [_normalize_group_action(action) for action in actions] + merged_actions.sort(key=lambda item: (item.get('displayName') or item.get('display_name') or item.get('name') or '').lower()) + + return jsonify({'actions': merged_actions}), 200 + + +@bpap.route('/api/group/plugins/', methods=['GET']) +@swagger_route(security=get_auth_security()) +@login_required +@user_required +@enabled_required('enable_group_workspaces') +def get_group_action_route(action_id): + user_id = get_current_user_id() + try: + active_group = require_active_group(user_id) + assert_group_role( + user_id, + active_group, + allowed_roles=("Owner", "Admin", "DocumentManager", "User"), + ) + except ValueError as exc: + return jsonify({'error': str(exc)}), 400 + except LookupError as exc: + return jsonify({'error': str(exc)}), 404 + except PermissionError as exc: + return jsonify({'error': str(exc)}), 403 + + action = get_group_action(active_group, action_id, return_type=SecretReturnType.TRIGGER) + if not action: + return jsonify({'error': 'Action not found'}), 404 + return jsonify(action), 200 + + +@bpap.route('/api/group/plugins', methods=['POST']) +@swagger_route(security=get_auth_security()) +@login_required +@user_required +@enabled_required('enable_group_workspaces') +def create_group_action_route(): + user_id = get_current_user_id() + try: + active_group = require_active_group(user_id) + assert_group_role(user_id, active_group) + except ValueError as exc: + return jsonify({'error': str(exc)}), 400 + except LookupError as exc: + return jsonify({'error': str(exc)}), 404 + except PermissionError as exc: + return jsonify({'error': str(exc)}), 403 + + payload = request.get_json(silent=True) or {} + try: + validate_group_action_payload(payload, partial=False) + except ValueError as exc: + return jsonify({'error': str(exc)}), 400 + + if payload.get('is_global'): + return jsonify({'error': 'Global actions are managed centrally and cannot be created within a group.'}), 400 + + for key in ('group_id', 'last_updated', 'user_id', 'is_global', 'is_group', 'scope'): + payload.pop(key, None) + + # Merge with schema to ensure all required fields are present (same as global actions) + schema_dir = os.path.join(current_app.root_path, 'static', 'json', 'schemas') + merged = get_merged_plugin_settings(payload.get('type'), payload, schema_dir) + payload['metadata'] = merged.get('metadata', payload.get('metadata', {})) + payload['additionalFields'] = merged.get('additionalFields', payload.get('additionalFields', {})) + + try: + saved = save_group_action(active_group, payload) + except Exception as exc: + debug_print('Failed to save group action: %s', exc) + return jsonify({'error': 'Unable to save action'}), 500 + + return jsonify(saved), 201 + + +@bpap.route('/api/group/plugins/', methods=['PATCH']) +@swagger_route(security=get_auth_security()) +@login_required +@user_required +@enabled_required('enable_group_workspaces') +def update_group_action_route(action_id): + user_id = get_current_user_id() + try: + active_group = require_active_group(user_id) + assert_group_role(user_id, active_group) + except ValueError as exc: + return jsonify({'error': str(exc)}), 400 + except LookupError as exc: + return jsonify({'error': str(exc)}), 404 + except PermissionError as exc: + return jsonify({'error': str(exc)}), 403 + + existing = get_group_action(active_group, action_id, return_type=SecretReturnType.NAME) + if not existing: + return jsonify({'error': 'Action not found'}), 404 + + updates = request.get_json(silent=True) or {} + if updates.get('is_global'): + return jsonify({'error': 'Global actions cannot be modified within a group.'}), 400 + + for key in ('id', 'group_id', 'last_updated', 'user_id', 'is_global', 'is_group', 'scope'): + updates.pop(key, None) + + try: + validate_group_action_payload(updates, partial=True) + except ValueError as exc: + return jsonify({'error': str(exc)}), 400 + + merged = dict(existing) + merged.update(updates) + merged['is_global'] = False + merged['is_group'] = True + merged['id'] = existing.get('id', action_id) + + try: + validate_group_action_payload(merged, partial=False) + except ValueError as exc: + return jsonify({'error': str(exc)}), 400 + + # Merge with schema to ensure all required fields are present (same as global actions) + schema_dir = os.path.join(current_app.root_path, 'static', 'json', 'schemas') + schema_merged = get_merged_plugin_settings(merged.get('type'), merged, schema_dir) + merged['metadata'] = schema_merged.get('metadata', merged.get('metadata', {})) + merged['additionalFields'] = schema_merged.get('additionalFields', merged.get('additionalFields', {})) + + try: + saved = save_group_action(active_group, merged) + except Exception as exc: + debug_print('Failed to update group action %s: %s', action_id, exc) + return jsonify({'error': 'Unable to update action'}), 500 + + return jsonify(saved), 200 + + +@bpap.route('/api/group/plugins/', methods=['DELETE']) +@swagger_route(security=get_auth_security()) +@login_required +@user_required +@enabled_required('enable_group_workspaces') +def delete_group_action_route(action_id): + user_id = get_current_user_id() + try: + active_group = require_active_group(user_id) + assert_group_role(user_id, active_group) + except ValueError as exc: + return jsonify({'error': str(exc)}), 400 + except LookupError as exc: + return jsonify({'error': str(exc)}), 404 + except PermissionError as exc: + return jsonify({'error': str(exc)}), 403 + + try: + removed = delete_group_action(active_group, action_id) + except Exception as exc: + debug_print('Failed to delete group action %s: %s', action_id, exc) + return jsonify({'error': 'Unable to delete action'}), 500 + + if not removed: + return jsonify({'error': 'Action not found'}), 404 + return jsonify({'message': 'Action deleted'}), 200 + @bpap.route('/api/user/plugins/types', methods=['GET']) @swagger_route(security=get_auth_security()) @login_required @@ -606,6 +815,58 @@ def merge_plugin_settings(plugin_type): merged = get_merged_plugin_settings(plugin_type, current_settings, schema_dir) return jsonify(merged) + +@bpap.route('/api/plugins//auth-types', methods=['GET']) +@swagger_route(security=get_auth_security()) +@login_required +@user_required +def get_plugin_auth_types(plugin_type): + """ + Returns allowed auth types for a plugin type. Uses definition file if present, + otherwise falls back to AuthType enum in plugin.schema.json. + """ + schema_dir = os.path.join(current_app.root_path, 'static', 'json', 'schemas') + safe_type = re.sub(r'[^a-zA-Z0-9_]', '_', plugin_type).lower() + + definition_path = os.path.join(schema_dir, f'{safe_type}.definition.json') + schema_path = os.path.join(schema_dir, 'plugin.schema.json') + + allowed_auth_types = [] + source = "schema" + + try: + with open(schema_path, 'r', encoding='utf-8') as schema_file: + schema = json.load(schema_file) + allowed_auth_types = ( + schema + .get('definitions', {}) + .get('AuthType', {}) + .get('enum', []) + ) + except Exception as exc: + debug_print(f"Failed to read plugin.schema.json: {exc}") + allowed_auth_types = [] + + if os.path.exists(definition_path): + try: + with open(definition_path, 'r', encoding='utf-8') as definition_file: + definition = json.load(definition_file) + allowed_from_definition = definition.get('allowedAuthTypes') + if isinstance(allowed_from_definition, list) and allowed_from_definition: + allowed_auth_types = allowed_from_definition + source = "definition" + except Exception as exc: + debug_print(f"Failed to read {definition_path}: {exc}") + + if not allowed_auth_types: + allowed_auth_types = [] + source = "schema" + + return jsonify({ + "allowedAuthTypes": allowed_auth_types, + "source": source + }) + ########################################################################################################## # Dynamic Plugin Metadata Endpoint @@ -621,3 +882,44 @@ def list_dynamic_plugins(): """ plugins = get_all_plugin_metadata() return jsonify(plugins) + +# Helper functions for group/global action merging +def _normalize_group_action(action: dict) -> dict: + normalized = dict(action) + normalized['is_global'] = False + normalized['is_group'] = True + normalized.setdefault('scope', 'group') + return normalized + + +def _normalize_global_action(action: dict) -> dict: + normalized = dict(action) + normalized['is_global'] = True + normalized['is_group'] = False + normalized.setdefault('scope', 'global') + return normalized + + +def _merge_group_and_global_actions(group_actions, global_actions): + normalized_actions = [] + seen_names = set() + + for action in group_actions: + normalized = _normalize_group_action(action) + action_name = (normalized.get('name') or '').lower() + if action_name: + seen_names.add(action_name) + normalized_actions.append(normalized) + + for action in global_actions: + normalized = _normalize_global_action(action) + action_name = (normalized.get('name') or '').lower() + if action_name and action_name in seen_names: + continue + normalized_actions.append(normalized) + + normalized_actions.sort(key=lambda item: (item.get('displayName') or item.get('display_name') or item.get('name') or '').lower()) + return normalized_actions + + + diff --git a/application/single_app/route_backend_public_documents.py b/application/single_app/route_backend_public_documents.py index 549ef3ac..a209e9a2 100644 --- a/application/single_app/route_backend_public_documents.py +++ b/application/single_app/route_backend_public_documents.py @@ -6,7 +6,9 @@ from functions_settings import * from functions_public_workspaces import * from functions_documents import * +from utils_cache import invalidate_public_workspace_search_cache from flask import current_app +from functions_debug import * from swagger_wrapper import swagger_route, get_auth_security def register_route_backend_public_documents(app): @@ -33,8 +35,10 @@ def api_upload_public_document(): if not ws_doc: return jsonify({'error': 'Active public workspace not found'}), 404 - # check role - from functions_public_workspaces import get_user_role_in_public_workspace + allowed, reason = check_public_workspace_status_allows_operation(ws_doc, 'upload') + if not allowed: + return jsonify({'error': reason}), 403 + role = get_user_role_in_public_workspace(ws_doc, user_id) if role not in ['Owner', 'Admin', 'DocumentManager']: return jsonify({'error': 'Insufficient permissions'}), 403 @@ -95,6 +99,11 @@ def api_upload_public_document(): if tmp_path and os.path.exists(tmp_path): os.remove(tmp_path) status = 200 if processed and not errors else (207 if processed else 400) + + # Invalidate public workspace search cache since documents were added + if processed: + invalidate_public_workspace_search_cache(active_ws) + return jsonify({ 'message': f'Processed {len(processed)} file(s)', 'document_ids': [d['id'] for d in processed], @@ -264,24 +273,71 @@ def api_patch_public_document(doc_id): if role not in ['Owner','Admin','DocumentManager']: return jsonify({'error':'Access denied'}), 403 data = request.get_json() or {} + + # Track which fields were updated + updated_fields = {} + try: if 'title' in data: update_document(document_id=doc_id, public_workspace_id=active_ws, user_id=user_id, title=data['title']) + updated_fields['title'] = data['title'] if 'abstract' in data: update_document(document_id=doc_id, public_workspace_id=active_ws, user_id=user_id, abstract=data['abstract']) + updated_fields['abstract'] = data['abstract'] if 'keywords' in data: kws = data['keywords'] if isinstance(data['keywords'],list) else [k.strip() for k in data['keywords'].split(',')] update_document(document_id=doc_id, public_workspace_id=active_ws, user_id=user_id, keywords=kws) + updated_fields['keywords'] = kws if 'authors' in data: auths = data['authors'] if isinstance(data['authors'],list) else [data['authors']] update_document(document_id=doc_id, public_workspace_id=active_ws, user_id=user_id, authors=auths) + updated_fields['authors'] = auths if 'publication_date' in data: update_document(document_id=doc_id, public_workspace_id=active_ws, user_id=user_id, publication_date=data['publication_date']) + updated_fields['publication_date'] = data['publication_date'] if 'document_classification' in data: update_document(document_id=doc_id, public_workspace_id=active_ws, user_id=user_id, document_classification=data['document_classification']) - return jsonify({'message':'Metadata updated'}), 200 + updated_fields['document_classification'] = data['document_classification'] + + # Save updates back to Cosmos + try: + # Log the metadata update transaction if any fields were updated + if updated_fields: + # Get document details for logging - handle tuple return + # Get document details for logging + from functions_documents import get_document + doc_response = get_document(user_id, doc_id, public_workspace_id=active_ws) + doc = None + + # Handle tuple return (response, status_code) + if isinstance(doc_response, tuple): + resp, status_code = doc_response + if hasattr(resp, "get_json"): + doc = resp.get_json() + else: + doc = resp + elif hasattr(doc_response, "get_json"): + doc = doc_response.get_json() + else: + doc = doc_response + + if doc and isinstance(doc, dict): + from functions_activity_logging import log_document_metadata_update_transaction + log_document_metadata_update_transaction( + user_id=user_id, + document_id=doc_id, + workspace_type='public', + file_name=doc.get('file_name', 'Unknown'), + updated_fields=updated_fields, + file_type=doc.get('file_type'), + public_workspace_id=active_ws + ) + + return jsonify({'message': 'Public document metadata updated successfully'}), 200 + except Exception as e: + return jsonify({'Error updating Public document metadata': str(e)}), 500 except Exception as e: - return jsonify({'error':str(e)}), 500 + return jsonify({'error': str(e)}), 500 @app.route('/api/public_documents/', methods=['DELETE']) @swagger_route(security=get_auth_security()) @@ -293,6 +349,14 @@ def api_delete_public_document(doc_id): settings = get_user_settings(user_id) active_ws = settings['settings'].get('activePublicWorkspaceOid') ws_doc = find_public_workspace_by_id(active_ws) if active_ws else None + + # Check if workspace status allows deletions + if ws_doc: + from functions_public_workspaces import check_public_workspace_status_allows_operation + allowed, reason = check_public_workspace_status_allows_operation(ws_doc, 'delete') + if not allowed: + return jsonify({'error': reason}), 403 + from functions_public_workspaces import get_user_role_in_public_workspace role = get_user_role_in_public_workspace(ws_doc, user_id) if ws_doc else None if role not in ['Owner','Admin','DocumentManager']: @@ -300,6 +364,10 @@ def api_delete_public_document(doc_id): try: delete_document(user_id=user_id, document_id=doc_id, public_workspace_id=active_ws) delete_document_chunks(document_id=doc_id, public_workspace_id=active_ws) + + # Invalidate public workspace search cache since document was deleted + invalidate_public_workspace_search_cache(active_ws) + return jsonify({'message':'Deleted'}), 200 except Exception as e: return jsonify({'error':str(e)}), 500 diff --git a/application/single_app/route_backend_public_workspaces.py b/application/single_app/route_backend_public_workspaces.py index 3d80d9b7..bce82787 100644 --- a/application/single_app/route_backend_public_workspaces.py +++ b/application/single_app/route_backend_public_workspaces.py @@ -3,7 +3,36 @@ from config import * from functions_authentication import * from functions_public_workspaces import * +from functions_notifications import create_notification from swagger_wrapper import swagger_route, get_auth_security +from functions_debug import debug_print + + +def is_user_in_admins(user_id, admins_list): + """ + Check if user is in admins list (supports both old format ["id1", "id2"] and new format [{userId, email, displayName}]) + """ + if not admins_list: + return False + for admin in admins_list: + if isinstance(admin, str): + if admin == user_id: + return True + elif isinstance(admin, dict): + if admin.get("userId") == user_id: + return True + return False + +def remove_user_from_admins(user_id, admins_list): + """ + Remove user from admins list (supports both old and new format) + Returns updated admins list + """ + if not admins_list: + return [] + return [admin for admin in admins_list if + (isinstance(admin, str) and admin != user_id) or + (isinstance(admin, dict) and admin.get("userId") != user_id)] def get_user_details_from_graph(user_id): """ @@ -146,6 +175,7 @@ def api_list_public_workspaces(): "name": ws.get("name", ""), "description": ws.get("description", ""), "userRole": role, + "status": ws.get("status", "active"), "isActive": (ws["id"] == active_id) }) @@ -200,7 +230,7 @@ def api_get_public_workspace(ws_id): def api_update_public_workspace(ws_id): """ PATCH /api/public_workspaces/ - Body JSON: { "name": "", "description": "" } + Body JSON: { "name": "", "description": "", "heroColor": "" } """ info = get_current_user_info() user_id = info["userId"] @@ -214,6 +244,7 @@ def api_update_public_workspace(ws_id): data = request.get_json() or {} ws["name"] = data.get("name", ws.get("name")) ws["description"] = data.get("description", ws.get("description")) + ws["heroColor"] = data.get("heroColor", ws.get("heroColor", "#0078d4")) ws["modifiedDate"] = datetime.utcnow().isoformat() try: @@ -406,7 +437,7 @@ def api_list_public_members(ws_id): # must be member is_member = ( ws["owner"]["userId"] == user_id or - user_id in ws.get("admins", []) or + is_user_in_admins(user_id, ws.get("admins", [])) or any(dm["userId"] == user_id for dm in ws.get("documentManagers", [])) ) if not is_member: @@ -423,15 +454,25 @@ def api_list_public_members(ws_id): "email": ws["owner"].get("email", ""), "role": "Owner" }) - # admins - for aid in ws.get("admins", []): - admin_details = get_user_details_from_graph(aid) - results.append({ - "userId": aid, - "displayName": admin_details["displayName"], - "email": admin_details["email"], - "role": "Admin" - }) + # admins (support both old format ["id"] and new format [{userId, email, displayName}]) + for admin in ws.get("admins", []): + if isinstance(admin, str): + # Old format - fetch from Graph + admin_details = get_user_details_from_graph(admin) + results.append({ + "userId": admin, + "displayName": admin_details["displayName"], + "email": admin_details["email"], + "role": "Admin" + }) + elif isinstance(admin, dict): + # New format - use stored data + results.append({ + "userId": admin.get("userId", ""), + "displayName": admin.get("displayName", ""), + "email": admin.get("email", ""), + "role": "Admin" + }) # doc managers for dm in ws.get("documentManagers", []): results.append({ @@ -495,6 +536,25 @@ def api_add_public_member(ws_id): }) ws["modifiedDate"] = datetime.utcnow().isoformat() cosmos_public_workspaces_container.upsert_item(ws) + + # Send notification to the added member + try: + create_notification( + user_id=new_id, + notification_type='public_workspace_membership_change', + title='Added to Public Workspace', + message=f"You have been added to the public workspace '{ws.get('name', 'Unknown')}' as Document Manager.", + link_url=f"/manage_public_workspace?workspace_id={ws_id}", + metadata={ + 'workspace_id': ws_id, + 'workspace_name': ws.get('name', 'Unknown'), + 'role': 'DocumentManager', + 'added_by': info.get('email', 'Unknown') + } + ) + except Exception as notif_error: + debug_print(f"Failed to create notification for new member: {notif_error}") + return jsonify({"message": "Member added"}), 200 @app.route("/api/public_workspaces//members/", methods=["DELETE"]) @@ -522,15 +582,14 @@ def api_remove_public_member(ws_id, member_id): # only Owner/Admin can remove others role = ( "Owner" if ws["owner"]["userId"] == user_id else - "Admin" if user_id in ws.get("admins", []) else + "Admin" if is_user_in_admins(user_id, ws.get("admins", [])) else None ) if role not in ["Owner", "Admin"]: return jsonify({"error": "Forbidden"}), 403 # remove from admins if present - if member_id in ws.get("admins", []): - ws["admins"].remove(member_id) + ws["admins"] = remove_user_from_admins(member_id, ws.get("admins", [])) # remove from doc managers ws["documentManagers"] = [ dm for dm in ws.get("documentManagers", []) @@ -538,7 +597,7 @@ def api_remove_public_member(ws_id, member_id): ] ws["modifiedDate"] = datetime.utcnow().isoformat() cosmos_public_workspaces_container.upsert_item(ws) - return jsonify({"message": "Removed"}), 200 + return jsonify({"success": True, "message": "Removed"}), 200 @app.route("/api/public_workspaces//members/", methods=["PATCH"]) @swagger_route(security=get_auth_security()) @@ -562,22 +621,50 @@ def api_update_public_member_role(ws_id, member_id): role = ( "Owner" if ws["owner"]["userId"] == user_id else - "Admin" if user_id in ws.get("admins", []) else + "Admin" if is_user_in_admins(user_id, ws.get("admins", [])) else None ) if role not in ["Owner", "Admin"]: return jsonify({"error": "Forbidden"}), 403 + # Get member details (from documentManagers or Graph API) + member_name = "" + member_email = "" + for dm in ws.get("documentManagers", []): + if dm.get("userId") == member_id: + member_name = dm.get("displayName", "") + member_email = dm.get("email", "") + break + + # If not found in documentManagers, try to get from existing admins or Graph + if not member_name: + for admin in ws.get("admins", []): + if isinstance(admin, dict) and admin.get("userId") == member_id: + member_name = admin.get("displayName", "") + member_email = admin.get("email", "") + break + if not member_name: + # Fetch from Graph API + try: + details = get_user_details_from_graph(member_id) + member_name = details.get("displayName", "") + member_email = details.get("email", "") + except: + pass + # clear any existing - if member_id in ws.get("admins", []): - ws["admins"].remove(member_id) + ws["admins"] = remove_user_from_admins(member_id, ws.get("admins", [])) ws["documentManagers"] = [ dm for dm in ws.get("documentManagers", []) if dm["userId"] != member_id ] if new_role == "Admin": - ws.setdefault("admins", []).append(member_id) + ws.setdefault("admins", []).append({ + "userId": member_id, + "displayName": member_name, + "email": member_email + }) elif new_role == "DocumentManager": # need displayName/email from pending or empty ws.setdefault("documentManagers", []).append({ @@ -590,7 +677,37 @@ def api_update_public_member_role(ws_id, member_id): ws["modifiedDate"] = datetime.utcnow().isoformat() cosmos_public_workspaces_container.upsert_item(ws) - return jsonify({"message": "Role updated"}), 200 + + # Send notification to the member whose role changed + try: + # Determine old role for notification + old_role = "DocumentManager" # Default, will be corrected if needed + for admin in ws.get("admins", []): + if isinstance(admin, dict) and admin.get("userId") == member_id: + old_role = "Admin" + break + elif isinstance(admin, str) and admin == member_id: + old_role = "Admin" + break + + create_notification( + user_id=member_id, + notification_type='public_workspace_membership_change', + title='Workspace Role Changed', + message=f"Your role in the public workspace '{ws.get('name', 'Unknown')}' has been changed to {new_role}.", + link_url=f"/manage_public_workspace?workspace_id={ws_id}", + metadata={ + 'workspace_id': ws_id, + 'workspace_name': ws.get('name', 'Unknown'), + 'old_role': old_role, + 'new_role': new_role, + 'changed_by': info.get('email', 'Unknown') + } + ) + except Exception as notif_error: + debug_print(f"Failed to create notification for role change: {notif_error}") + + return jsonify({"success": True, "message": "Role updated"}), 200 @app.route("/api/public_workspaces//transferOwnership", methods=["PATCH"]) @swagger_route(security=get_auth_security()) @@ -709,3 +826,292 @@ def api_public_prompt_count(ws_id): ) prompt_count = next(count_iter, 0) return jsonify({"promptCount": prompt_count}), 200 + + @app.route("/api/public_workspaces//stats", methods=["GET"]) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + @enabled_required("enable_public_workspaces") + def api_public_workspace_stats(ws_id): + """ + GET /api/public_workspaces//stats + Returns statistics for the workspace including documents, storage, tokens, and members. + """ + info = get_current_user_info() + user_id = info["userId"] + + ws = find_public_workspace_by_id(ws_id) + if not ws: + return jsonify({"error": "Not found"}), 404 + + # Check user has access - must be member + is_member = ( + ws["owner"]["userId"] == user_id or + is_user_in_admins(user_id, ws.get("admins", [])) or + any(dm["userId"] == user_id for dm in ws.get("documentManagers", [])) + ) + if not is_member: + return jsonify({"error": "Forbidden"}), 403 + + # Get metrics from workspace record (pre-calculated) + metrics = ws.get("metrics", {}) + document_metrics = metrics.get("document_metrics", {}) + + total_documents = document_metrics.get("total_documents", 0) + storage_used = document_metrics.get("storage_account_size", 0) + + # Get member count + owner = ws.get("owner", {}) + admins = ws.get("admins", []) + doc_managers = ws.get("documentManagers", []) + total_members = 1 + len(admins) + len(doc_managers) + + # Get token usage from activity logs (last 30 days) + from datetime import datetime, timedelta + thirty_days_ago = (datetime.utcnow() - timedelta(days=30)).isoformat() + + debug_print(f"[PUBLIC_WORKSPACE_STATS] Workspace ID: {ws_id}") + debug_print(f"[PUBLIC_WORKSPACE_STATS] Start date: {thirty_days_ago}") + + token_query = """ + SELECT a.usage + FROM a + WHERE a.workspace_context.public_workspace_id = @wsId + AND a.timestamp >= @startDate + AND a.activity_type = 'token_usage' + """ + token_params = [ + {"name": "@wsId", "value": ws_id}, + {"name": "@startDate", "value": thirty_days_ago} + ] + + total_tokens = 0 + try: + token_iter = cosmos_activity_logs_container.query_items( + query=token_query, + parameters=token_params, + enable_cross_partition_query=True + ) + for item in token_iter: + usage = item.get("usage", {}) + total_tokens += usage.get("total_tokens", 0) + debug_print(f"[PUBLIC_WORKSPACE_STATS] Total tokens accumulated: {total_tokens}") + except Exception as e: + debug_print(f"[PUBLIC_WORKSPACE_STATS] Error querying total tokens: {e}") + import traceback + traceback.print_exc() + + # Get activity data for charts (last 30 days) + doc_activity_labels = [] + doc_upload_data = [] + doc_delete_data = [] + token_usage_labels = [] + token_usage_data = [] + + # Generate labels for last 30 days + for i in range(29, -1, -1): + date = datetime.utcnow() - timedelta(days=i) + doc_activity_labels.append(date.strftime("%m/%d")) + token_usage_labels.append(date.strftime("%m/%d")) + doc_upload_data.append(0) + doc_delete_data.append(0) + token_usage_data.append(0) + + # Get document upload activity by day + doc_upload_query = """ + SELECT a.timestamp, a.created_at + FROM a + WHERE a.workspace_context.public_workspace_id = @wsId + AND a.timestamp >= @startDate + AND a.activity_type = 'document_creation' + """ + debug_print(f"[PUBLIC_WORKSPACE_STATS] Document upload query: {doc_upload_query}") + debug_print(f"[PUBLIC_WORKSPACE_STATS] Query params: {token_params}") + try: + activity_iter = cosmos_activity_logs_container.query_items( + query=doc_upload_query, + parameters=token_params, + enable_cross_partition_query=True + ) + upload_results = list(activity_iter) + debug_print(f"[PUBLIC_WORKSPACE_STATS] Document upload results count: {len(upload_results)}") + + for item in upload_results: + timestamp = item.get("timestamp") or item.get("created_at") + if timestamp: + try: + dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00')) + day_date = dt.strftime("%m/%d") + if day_date in doc_activity_labels: + idx = doc_activity_labels.index(day_date) + doc_upload_data[idx] += 1 + debug_print(f"[PUBLIC_WORKSPACE_STATS] Added upload for {day_date}") + except Exception as e: + debug_print(f"[PUBLIC_WORKSPACE_STATS] Error parsing timestamp {timestamp}: {e}") + except Exception as e: + debug_print(f"[PUBLIC_WORKSPACE_STATS] Error querying document uploads: {e}") + import traceback + traceback.print_exc() + + # Get document delete activity by day + doc_delete_query = """ + SELECT a.timestamp, a.created_at + FROM a + WHERE a.workspace_context.public_workspace_id = @wsId + AND a.timestamp >= @startDate + AND a.activity_type = 'document_deletion' + """ + debug_print(f"[PUBLIC_WORKSPACE_STATS] Document delete query: {doc_delete_query}") + try: + delete_iter = cosmos_activity_logs_container.query_items( + query=doc_delete_query, + parameters=token_params, + enable_cross_partition_query=True + ) + delete_results = list(delete_iter) + debug_print(f"[PUBLIC_WORKSPACE_STATS] Document delete results count: {len(delete_results)}") + + for item in delete_results: + timestamp = item.get("timestamp") or item.get("created_at") + if timestamp: + try: + dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00')) + day_date = dt.strftime("%m/%d") + if day_date in doc_activity_labels: + idx = doc_activity_labels.index(day_date) + doc_delete_data[idx] += 1 + debug_print(f"[PUBLIC_WORKSPACE_STATS] Added delete for {day_date}") + except Exception as e: + debug_print(f"[PUBLIC_WORKSPACE_STATS] Error parsing timestamp {timestamp}: {e}") + except Exception as e: + debug_print(f"[PUBLIC_WORKSPACE_STATS] Error querying document deletes: {e}") + import traceback + traceback.print_exc() + + # Get token usage by day + token_activity_query = """ + SELECT a.timestamp, a.created_at, a.usage + FROM a + WHERE a.workspace_context.public_workspace_id = @wsId + AND a.timestamp >= @startDate + AND a.activity_type = 'token_usage' + """ + debug_print(f"[PUBLIC_WORKSPACE_STATS] Token usage query: {token_activity_query}") + try: + token_activity_iter = cosmos_activity_logs_container.query_items( + query=token_activity_query, + parameters=token_params, + enable_cross_partition_query=True + ) + token_results = list(token_activity_iter) + debug_print(f"[PUBLIC_WORKSPACE_STATS] Token usage results count: {len(token_results)}") + + for item in token_results: + timestamp = item.get("timestamp") or item.get("created_at") + if timestamp: + try: + dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00')) + day_date = dt.strftime("%m/%d") + if day_date in token_usage_labels: + idx = token_usage_labels.index(day_date) + usage = item.get("usage", {}) + tokens = usage.get("total_tokens", 0) + token_usage_data[idx] += tokens + debug_print(f"[PUBLIC_WORKSPACE_STATS] Added {tokens} tokens for {day_date}") + except Exception as e: + debug_print(f"[PUBLIC_WORKSPACE_STATS] Error parsing timestamp {timestamp}: {e}") + except Exception as e: + debug_print(f"[PUBLIC_WORKSPACE_STATS] Error querying token usage: {e}") + import traceback + traceback.print_exc() + + # Get separate storage metrics + ai_search_size = document_metrics.get("ai_search_size", 0) + storage_account_size = document_metrics.get("storage_account_size", 0) + + stats = { + "totalDocuments": total_documents, + "storageUsed": storage_used, + "storageLimit": 10737418240, # 10GB default + "totalTokens": total_tokens, + "totalMembers": total_members, + "storage": { + "ai_search_size": ai_search_size, + "storage_account_size": storage_account_size + }, + "documentActivity": { + "labels": doc_activity_labels, + "uploads": doc_upload_data, + "deletes": doc_delete_data + }, + "tokenUsage": { + "labels": token_usage_labels, + "data": token_usage_data + } + } + + debug_print(f"[PUBLIC_WORKSPACE_STATS] Final stats: {stats}") + + return jsonify(stats), 200 + + @app.route("/api/public_workspaces//activity", methods=["GET"]) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + @enabled_required("enable_public_workspaces") + def api_public_workspace_activity(ws_id): + """ + GET /api/public_workspaces//activity + Returns recent activity timeline for the workspace. + Only accessible by owner and admins. + """ + info = get_current_user_info() + user_id = info["userId"] + + ws = find_public_workspace_by_id(ws_id) + if not ws: + return jsonify({"error": "Not found"}), 404 + + # Check user is owner or admin (NOT document managers or regular members) + is_owner = ws["owner"]["userId"] == user_id + is_admin = is_user_in_admins(user_id, ws.get("admins", [])) + + if not (is_owner or is_admin): + return jsonify({"error": "Forbidden - Only workspace owners and admins can view activity timeline"}), 403 + + # Get pagination parameters + limit = request.args.get('limit', 50, type=int) + if limit not in [10, 20, 50]: + limit = 50 + + # Get recent activity + query = f""" + SELECT TOP {limit} * + FROM a + WHERE a.workspace_context.public_workspace_id = @wsId + ORDER BY a.timestamp DESC + """ + params = [{"name": "@wsId", "value": ws_id}] + + debug_print(f"[PUBLIC_WORKSPACE_ACTIVITY] Workspace ID: {ws_id}") + debug_print(f"[PUBLIC_WORKSPACE_ACTIVITY] Query: {query}") + debug_print(f"[PUBLIC_WORKSPACE_ACTIVITY] Params: {params}") + + activities = [] + try: + activity_iter = cosmos_activity_logs_container.query_items( + query=query, + parameters=params, + enable_cross_partition_query=True + ) + activities = list(activity_iter) + debug_print(f"[PUBLIC_WORKSPACE_ACTIVITY] Found {len(activities)} activity records") + if activities: + debug_print(f"[PUBLIC_WORKSPACE_ACTIVITY] Sample activity: {activities[0] if activities else 'None'}") + except Exception as e: + debug_print(f"[PUBLIC_WORKSPACE_ACTIVITY] Error querying activities: {e}") + import traceback + traceback.print_exc() + + return jsonify(activities), 200 + diff --git a/application/single_app/route_backend_retention_policy.py b/application/single_app/route_backend_retention_policy.py new file mode 100644 index 00000000..60935f60 --- /dev/null +++ b/application/single_app/route_backend_retention_policy.py @@ -0,0 +1,695 @@ +# route_backend_retention_policy.py + +from config import * +from functions_authentication import * +from functions_settings import * +from functions_retention_policy import execute_retention_policy, get_all_user_settings, get_all_groups, get_all_public_workspaces +from functions_activity_logging import log_retention_policy_force_push +from swagger_wrapper import swagger_route, get_auth_security +from functions_debug import debug_print + + +def register_route_backend_retention_policy(app): + + @app.route('/api/admin/retention-policy/settings', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @admin_required + def get_retention_policy_settings(): + """ + Get current retention policy settings and status. + """ + try: + settings = get_settings() + + return jsonify({ + 'success': True, + 'settings': { + 'enable_retention_policy_personal': settings.get('enable_retention_policy_personal', False), + 'enable_retention_policy_group': settings.get('enable_retention_policy_group', False), + 'enable_retention_policy_public': settings.get('enable_retention_policy_public', False), + 'retention_policy_execution_hour': settings.get('retention_policy_execution_hour', 2), + 'retention_policy_last_run': settings.get('retention_policy_last_run'), + 'retention_policy_next_run': settings.get('retention_policy_next_run'), + 'retention_conversation_min_days': settings.get('retention_conversation_min_days', 1), + 'retention_conversation_max_days': settings.get('retention_conversation_max_days', 3650), + 'retention_document_min_days': settings.get('retention_document_min_days', 1), + 'retention_document_max_days': settings.get('retention_document_max_days', 3650) + } + }) + + except Exception as e: + debug_print(f"Error fetching retention policy settings: {e}") + log_event(f"Fetching retention policy settings failed: {e}", level=logging.ERROR) + return jsonify({ + 'success': False, + 'error': 'Failed to fetch retention policy settings' + }), 500 + + + @app.route('/api/admin/retention-policy/settings', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @admin_required + def update_retention_policy_settings(): + """ + Update retention policy admin settings. + + Body: + enable_retention_policy_personal (bool): Enable for personal workspaces + enable_retention_policy_group (bool): Enable for group workspaces + enable_retention_policy_public (bool): Enable for public workspaces + retention_policy_execution_hour (int): Hour of day to execute (0-23) + """ + try: + data = request.get_json() + settings = get_settings() + + # Update settings if provided + if 'enable_retention_policy_personal' in data: + settings['enable_retention_policy_personal'] = bool(data['enable_retention_policy_personal']) + + if 'enable_retention_policy_group' in data: + settings['enable_retention_policy_group'] = bool(data['enable_retention_policy_group']) + + if 'enable_retention_policy_public' in data: + settings['enable_retention_policy_public'] = bool(data['enable_retention_policy_public']) + + if 'retention_policy_execution_hour' in data: + hour = int(data['retention_policy_execution_hour']) + if 0 <= hour <= 23: + settings['retention_policy_execution_hour'] = hour + + # Recalculate next run time + next_run = datetime.now(timezone.utc).replace(hour=hour, minute=0, second=0, microsecond=0) + if next_run <= datetime.now(timezone.utc): + next_run += timedelta(days=1) + settings['retention_policy_next_run'] = next_run.isoformat() + else: + return jsonify({ + 'success': False, + 'error': 'Execution hour must be between 0 and 23' + }), 400 + + update_settings(settings) + + return jsonify({ + 'success': True, + 'message': 'Retention policy settings updated successfully' + }) + + except Exception as e: + debug_print(f"Error updating retention policy settings: {e}") + log_event(f"Retention policy settings update failed: {e}", level=logging.ERROR) + return jsonify({ + 'success': False, + 'error': 'Failed to update retention policy settings' + }), 500 + + + @app.route('/api/retention-policy/defaults/', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def get_retention_policy_defaults(workspace_type): + """ + Get organization default retention policy settings for a specific workspace type. + + Args: + workspace_type: One of 'personal', 'group', or 'public' + + Returns: + JSON with default_conversation_days and default_document_days for the workspace type + """ + try: + # Validate workspace type + if workspace_type not in ['personal', 'group', 'public']: + return jsonify({ + 'success': False, + 'error': f'Invalid workspace type: {workspace_type}' + }), 400 + + settings = get_settings() + + # Get the default values for the specified workspace type + default_conversation = settings.get(f'default_retention_conversation_{workspace_type}', 'none') + default_document = settings.get(f'default_retention_document_{workspace_type}', 'none') + + # Get human-readable labels for the values + def get_retention_label(value): + if value == 'none' or value is None: + return 'No automatic deletion' + try: + days = int(value) + if days == 1: + return '1 day' + elif days == 21: + return '21 days (3 weeks)' + elif days == 90: + return '90 days (3 months)' + elif days == 180: + return '180 days (6 months)' + elif days == 365: + return '365 days (1 year)' + elif days == 730: + return '730 days (2 years)' + else: + return f'{days} days' + except (ValueError, TypeError): + return 'No automatic deletion' + + return jsonify({ + 'success': True, + 'workspace_type': workspace_type, + 'default_conversation_days': default_conversation, + 'default_document_days': default_document, + 'default_conversation_label': get_retention_label(default_conversation), + 'default_document_label': get_retention_label(default_document) + }) + + except Exception as e: + debug_print(f"Error fetching retention policy defaults: {e}") + log_event(f"Fetching retention policy defaults failed: {e}", level=logging.ERROR) + return jsonify({ + 'success': False, + 'error': 'Failed to fetch retention policy defaults' + }), 500 + + + @app.route('/api/admin/retention-policy/execute', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @admin_required + def manual_execute_retention_policy(): + """ + Manually execute retention policy for selected workspace scopes. + + Body: + scopes (list): List of workspace types to process: 'personal', 'group', 'public' + """ + try: + data = request.get_json() + scopes = data.get('scopes', []) + + if not scopes: + return jsonify({ + 'success': False, + 'error': 'No workspace scopes provided' + }), 400 + + # Validate scopes + valid_scopes = ['personal', 'group', 'public'] + invalid_scopes = [s for s in scopes if s not in valid_scopes] + if invalid_scopes: + return jsonify({ + 'success': False, + 'error': f'Invalid workspace scopes: {", ".join(invalid_scopes)}' + }), 400 + + # Execute retention policy for selected scopes + debug_print(f"Manual execution of retention policy for scopes: {scopes}") + results = execute_retention_policy(workspace_scopes=scopes, manual_execution=True) + + return jsonify({ + 'success': results.get('success', False), + 'message': 'Retention policy executed successfully' if results.get('success') else 'Retention policy execution failed', + 'results': results + }) + + except Exception as e: + debug_print(f"Error executing retention policy manually: {e}") + log_event(f"Manual retention policy execution failed: {e}", level=logging.ERROR) + return jsonify({ + 'success': False, + 'error': f'Failed to execute retention policy: {str(e)}' + }), 500 + + + @app.route('/api/admin/retention-policy/force-push', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @admin_required + def force_push_retention_defaults(): + """ + Force push organization default retention policies to all users/groups/workspaces. + This resets all custom retention policies to use the organization default ('default' value). + + Body: + scopes (list): List of workspace types to push defaults to: 'personal', 'group', 'public' + """ + try: + data = request.get_json() + scopes = data.get('scopes', []) + + if not scopes: + return jsonify({ + 'success': False, + 'error': 'No workspace scopes provided' + }), 400 + + # Validate scopes + valid_scopes = ['personal', 'group', 'public'] + invalid_scopes = [s for s in scopes if s not in valid_scopes] + if invalid_scopes: + return jsonify({ + 'success': False, + 'error': f'Invalid workspace scopes: {", ".join(invalid_scopes)}' + }), 400 + + details = {} + total_updated = 0 + + # Force push to personal workspaces (user settings) + if 'personal' in scopes: + debug_print("Force pushing retention defaults to personal workspaces...") + all_users = get_all_user_settings() + personal_count = 0 + + for user in all_users: + user_id = user.get('id') + if not user_id: + continue + + try: + # Update user's retention policy to use 'default' + user_settings = user.get('settings', {}) + user_settings['retention_policy'] = { + 'conversation_retention_days': 'default', + 'document_retention_days': 'default' + } + user['settings'] = user_settings + + cosmos_user_settings_container.upsert_item(user) + personal_count += 1 + except Exception as e: + debug_print(f"Error updating user {user_id}: {e}") + log_event(f"Error updating user {user_id} during force push: {e}", level=logging.ERROR) + continue + + details['personal'] = personal_count + total_updated += personal_count + debug_print(f"Updated {personal_count} personal workspaces") + + # Force push to group workspaces + if 'group' in scopes: + debug_print("Force pushing retention defaults to group workspaces...") + from functions_group import cosmos_groups_container + all_groups = get_all_groups() + group_count = 0 + + for group in all_groups: + group_id = group.get('id') + if not group_id: + continue + + try: + # Update group's retention policy to use 'default' + group['retention_policy'] = { + 'conversation_retention_days': 'default', + 'document_retention_days': 'default' + } + + cosmos_groups_container.upsert_item(group) + group_count += 1 + except Exception as e: + debug_print(f"Error updating group {group_id}: {e}") + log_event(f"Error updating group {group_id} during force push: {e}", level=logging.ERROR) + continue + + details['group'] = group_count + total_updated += group_count + debug_print(f"Updated {group_count} group workspaces") + + # Force push to public workspaces + if 'public' in scopes: + debug_print("Force pushing retention defaults to public workspaces...") + from functions_public_workspaces import cosmos_public_workspaces_container + all_workspaces = get_all_public_workspaces() + public_count = 0 + + for workspace in all_workspaces: + workspace_id = workspace.get('id') + if not workspace_id: + continue + + try: + # Update workspace's retention policy to use 'default' + workspace['retention_policy'] = { + 'conversation_retention_days': 'default', + 'document_retention_days': 'default' + } + + cosmos_public_workspaces_container.upsert_item(workspace) + public_count += 1 + except Exception as e: + debug_print(f"Error updating public workspace {workspace_id}: {e}") + log_event(f"Error updating public workspace {workspace_id} during force push: {e}", level=logging.ERROR) + continue + + details['public'] = public_count + total_updated += public_count + debug_print(f"Updated {public_count} public workspaces") + + # Log to activity logs for audit trail + admin_user_id = session.get('user', {}).get('oid', 'unknown') + admin_email = session.get('user', {}).get('preferred_username', session.get('user', {}).get('email', 'unknown')) + log_retention_policy_force_push( + admin_user_id=admin_user_id, + admin_email=admin_email, + scopes=scopes, + results=details, + total_updated=total_updated + ) + + log_event("retention_policy_force_push", { + "scopes": scopes, + "updated_count": total_updated, + "details": details + }) + + return jsonify({ + 'success': True, + 'message': f'Defaults pushed to {total_updated} items', + 'updated_count': total_updated, + 'scopes': scopes, + 'details': details + }) + + except Exception as e: + debug_print(f"Error force pushing retention defaults: {e}") + log_event(f"Force push retention defaults failed: {e}", level=logging.ERROR) + return jsonify({ + 'success': False, + 'error': f'Failed to push retention defaults' + }), 500 + + + @app.route('/api/retention-policy/user', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def update_user_retention_settings(): + """ + Update retention policy settings for the current user's personal workspace. + + Body: + conversation_retention_days (str|int): Number of days or 'none' + document_retention_days (str|int): Number of days or 'none' + """ + try: + user_id = get_current_user_id() + data = request.get_json() + + retention_settings = {} + + # Validate and parse conversation retention + if 'conversation_retention_days' in data: + conv_retention = data['conversation_retention_days'] + if conv_retention == 'none' or conv_retention is None: + retention_settings['conversation_retention_days'] = 'none' + else: + try: + days = int(conv_retention) + settings = get_settings() + min_days = settings.get('retention_conversation_min_days', 1) + max_days = settings.get('retention_conversation_max_days', 3650) + + if days < min_days or days > max_days: + return jsonify({ + 'success': False, + 'error': f'Conversation retention must be between {min_days} and {max_days} days' + }), 400 + + retention_settings['conversation_retention_days'] = days + except ValueError: + return jsonify({ + 'success': False, + 'error': 'Invalid conversation retention value' + }), 400 + + # Validate and parse document retention + if 'document_retention_days' in data: + doc_retention = data['document_retention_days'] + if doc_retention == 'none' or doc_retention is None: + retention_settings['document_retention_days'] = 'none' + else: + try: + days = int(doc_retention) + settings = get_settings() + min_days = settings.get('retention_document_min_days', 1) + max_days = settings.get('retention_document_max_days', 3650) + + if days < min_days or days > max_days: + return jsonify({ + 'success': False, + 'error': f'Document retention must be between {min_days} and {max_days} days' + }), 400 + + retention_settings['document_retention_days'] = days + except ValueError: + return jsonify({ + 'success': False, + 'error': 'Invalid document retention value' + }), 400 + + if not retention_settings: + return jsonify({ + 'success': False, + 'error': 'No retention settings provided' + }), 400 + + # Update user settings + update_user_settings(user_id, {'retention_policy': retention_settings}) + + return jsonify({ + 'success': True, + 'message': 'Retention settings updated successfully' + }) + + except Exception as e: + debug_print(f"Error updating user retention settings: {e}") + log_event(f"User retention settings update failed: {e}", level=logging.ERROR) + return jsonify({ + 'success': False, + 'error': 'Failed to update retention settings' + }), 500 + + + @app.route('/api/retention-policy/group/', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def update_group_retention_settings(group_id): + """ + Update retention policy settings for a group workspace. + User must be owner or admin of the group. + + Body: + conversation_retention_days (str|int): Number of days or 'none' + document_retention_days (str|int): Number of days or 'none' + """ + try: + user_id = get_current_user_id() + data = request.get_json() + + # Get group and verify permissions + from functions_group import find_group_by_id, get_user_role_in_group + group = find_group_by_id(group_id) + + if not group: + return jsonify({ + 'success': False, + 'error': 'Group not found' + }), 404 + + user_role = get_user_role_in_group(group, user_id) + if user_role not in ['Owner', 'Admin']: + return jsonify({ + 'success': False, + 'error': 'Insufficient permissions. Must be group owner or admin.' + }), 403 + + retention_settings = {} + + # Validate and parse conversation retention + if 'conversation_retention_days' in data: + conv_retention = data['conversation_retention_days'] + if conv_retention == 'none' or conv_retention is None: + retention_settings['conversation_retention_days'] = 'none' + else: + try: + days = int(conv_retention) + settings = get_settings() + min_days = settings.get('retention_conversation_min_days', 1) + max_days = settings.get('retention_conversation_max_days', 3650) + + if days < min_days or days > max_days: + return jsonify({ + 'success': False, + 'error': f'Conversation retention must be between {min_days} and {max_days} days' + }), 400 + + retention_settings['conversation_retention_days'] = days + except ValueError: + return jsonify({ + 'success': False, + 'error': 'Invalid conversation retention value' + }), 400 + + # Validate and parse document retention + if 'document_retention_days' in data: + doc_retention = data['document_retention_days'] + if doc_retention == 'none' or doc_retention is None: + retention_settings['document_retention_days'] = 'none' + else: + try: + days = int(doc_retention) + settings = get_settings() + min_days = settings.get('retention_document_min_days', 1) + max_days = settings.get('retention_document_max_days', 3650) + + if days < min_days or days > max_days: + return jsonify({ + 'success': False, + 'error': f'Document retention must be between {min_days} and {max_days} days' + }), 400 + + retention_settings['document_retention_days'] = days + except ValueError: + return jsonify({ + 'success': False, + 'error': 'Invalid document retention value' + }), 400 + + if not retention_settings: + return jsonify({ + 'success': False, + 'error': 'No retention settings provided' + }), 400 + + # Update group document + group['retention_policy'] = retention_settings + cosmos_groups_container.upsert_item(group) + + return jsonify({ + 'success': True, + 'message': 'Group retention settings updated successfully' + }) + + except Exception as e: + debug_print(f"Error updating group retention settings: {e}") + log_event(f"Group retention settings update failed: {e}", level=logging.ERROR) + return jsonify({ + 'success': False, + 'error': 'Failed to update retention settings' + }), 500 + + + @app.route('/api/retention-policy/public/', methods=['POST']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def update_public_workspace_retention_settings(public_workspace_id): + """ + Update retention policy settings for a public workspace. + User must be owner or admin of the workspace. + + Body: + conversation_retention_days (str|int): Number of days or 'none' + document_retention_days (str|int): Number of days or 'none' + """ + try: + user_id = get_current_user_id() + data = request.get_json() + + # Get workspace and verify permissions + from functions_public_workspaces import find_public_workspace_by_id, get_user_role_in_public_workspace + workspace = find_public_workspace_by_id(public_workspace_id) + + if not workspace: + return jsonify({ + 'success': False, + 'error': 'Public workspace not found' + }), 404 + + user_role = get_user_role_in_public_workspace(workspace, user_id) + if user_role not in ['Owner', 'Admin']: + return jsonify({ + 'success': False, + 'error': 'Insufficient permissions. Must be workspace owner or admin.' + }), 403 + + retention_settings = {} + + # Validate and parse conversation retention + if 'conversation_retention_days' in data: + conv_retention = data['conversation_retention_days'] + if conv_retention == 'none' or conv_retention is None: + retention_settings['conversation_retention_days'] = 'none' + else: + try: + days = int(conv_retention) + settings = get_settings() + min_days = settings.get('retention_conversation_min_days', 1) + max_days = settings.get('retention_conversation_max_days', 3650) + + if days < min_days or days > max_days: + return jsonify({ + 'success': False, + 'error': f'Conversation retention must be between {min_days} and {max_days} days' + }), 400 + + retention_settings['conversation_retention_days'] = days + except ValueError: + return jsonify({ + 'success': False, + 'error': 'Invalid conversation retention value' + }), 400 + + # Validate and parse document retention + if 'document_retention_days' in data: + doc_retention = data['document_retention_days'] + if doc_retention == 'none' or doc_retention is None: + retention_settings['document_retention_days'] = 'none' + else: + try: + days = int(doc_retention) + settings = get_settings() + min_days = settings.get('retention_document_min_days', 1) + max_days = settings.get('retention_document_max_days', 3650) + + if days < min_days or days > max_days: + return jsonify({ + 'success': False, + 'error': f'Document retention must be between {min_days} and {max_days} days' + }), 400 + + retention_settings['document_retention_days'] = days + except ValueError: + return jsonify({ + 'success': False, + 'error': 'Invalid document retention value' + }), 400 + + if not retention_settings: + return jsonify({ + 'success': False, + 'error': 'No retention settings provided' + }), 400 + + # Update workspace document + workspace['retention_policy'] = retention_settings + cosmos_public_workspaces_container.upsert_item(workspace) + + return jsonify({ + 'success': True, + 'message': 'Public workspace retention settings updated successfully' + }) + + except Exception as e: + debug_print(f"Error updating public workspace retention settings: {e}") + log_event(f"Public workspace retention settings update failed: {e}", level=logging.ERROR) + return jsonify({ + 'success': False, + 'error': 'Failed to update retention settings' + }), 500 diff --git a/application/single_app/route_backend_safety.py b/application/single_app/route_backend_safety.py index 350f4a86..73eb6e56 100644 --- a/application/single_app/route_backend_safety.py +++ b/application/single_app/route_backend_safety.py @@ -9,7 +9,7 @@ def register_route_backend_safety(app): @app.route('/api/safety/logs', methods=['GET']) @swagger_route(security=get_auth_security()) @login_required - @admin_required + @safety_violation_admin_required @enabled_required("enable_content_safety") def get_safety_logs(): """ @@ -96,7 +96,7 @@ def get_safety_logs(): @app.route('/api/safety/logs/', methods=['PATCH']) @swagger_route(security=get_auth_security()) @login_required - @admin_required + @safety_violation_admin_required @enabled_required("enable_content_safety") def update_safety_log(log_id): """ diff --git a/application/single_app/route_backend_settings.py b/application/single_app/route_backend_settings.py index 449ba546..30e10cb2 100644 --- a/application/single_app/route_backend_settings.py +++ b/application/single_app/route_backend_settings.py @@ -4,6 +4,9 @@ from functions_documents import * from functions_authentication import * from functions_settings import * +from functions_appinsights import log_event +from azure.identity import DefaultAzureCredential +from azure.keyvault.secrets import SecretClient from swagger_wrapper import swagger_route, get_auth_security import redis @@ -276,16 +279,195 @@ def test_connection(): elif test_type == 'azure_doc_intelligence': return _test_azure_doc_intelligence_connection(data) + elif test_type == 'multimodal_vision': + return _test_multimodal_vision_connection(data) + elif test_type == 'chunking_api': # If you have a chunking API test, implement it here. return jsonify({'message': 'Chunking API connection successful'}), 200 + + elif test_type == 'key_vault': + return _test_key_vault_connection(data) + + elif test_type == 'multimodal_vision': + return _test_multimodal_vision_connection(data) else: return jsonify({'error': f'Unknown test_type: {test_type}'}), 400 except Exception as e: return jsonify({'error': str(e)}), 500 + +def _test_multimodal_vision_connection(payload): + """Test multi-modal vision analysis with a sample image.""" + enable_apim = payload.get('enable_apim', False) + vision_model = payload.get('vision_model') + + if not vision_model: + return jsonify({'error': 'No vision model specified'}), 400 + # Create a simple test image (1x1 red pixel PNG) + test_image_base64 = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8/5+hHgAHggJ/PchI7wAAAABJRU5ErkJggg==" + + try: + if enable_apim: + apim_data = payload.get('apim', {}) + endpoint = apim_data.get('endpoint') + api_version = apim_data.get('api_version') + subscription_key = apim_data.get('subscription_key') + + gpt_client = AzureOpenAI( + api_version=api_version, + azure_endpoint=endpoint, + api_key=subscription_key + ) + else: + direct_data = payload.get('direct', {}) + endpoint = direct_data.get('endpoint') + api_version = direct_data.get('api_version') + auth_type = direct_data.get('auth_type', 'key') + + if auth_type == 'managed_identity': + token_provider = get_bearer_token_provider( + DefaultAzureCredential(), + cognitive_services_scope + ) + gpt_client = AzureOpenAI( + api_version=api_version, + azure_endpoint=endpoint, + azure_ad_token_provider=token_provider + ) + else: + api_key = direct_data.get('key') + gpt_client = AzureOpenAI( + api_version=api_version, + azure_endpoint=endpoint, + api_key=api_key + ) + + # Test vision analysis with simple prompt + response = gpt_client.chat.completions.create( + model=vision_model, + messages=[ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "What color is this image? Just say the color." + }, + { + "type": "image_url", + "image_url": { + "url": f"data:image/png;base64,{test_image_base64}" + } + } + ] + } + ], + max_tokens=50 + ) + + result = response.choices[0].message.content + + return jsonify({ + 'message': 'Multi-modal vision connection successful', + 'details': f'Model responded: {result}' + }), 200 + + except Exception as e: + return jsonify({'error': f'Vision test failed: {str(e)}'}), 500 + +def _test_multimodal_vision_connection(payload): + """Test multi-modal vision analysis with a sample image.""" + enable_apim = payload.get('enable_apim', False) + vision_model = payload.get('vision_model') + + if not vision_model: + return jsonify({'error': 'No vision model specified'}), 400 + + # Create a simple test image (1x1 red pixel PNG) + test_image_base64 = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8/5+hHgAHggJ/PchI7wAAAABJRU5ErkJggg==" + + try: + if enable_apim: + apim_data = payload.get('apim', {}) + endpoint = apim_data.get('endpoint') + api_version = apim_data.get('api_version') + subscription_key = apim_data.get('subscription_key') + + gpt_client = AzureOpenAI( + api_version=api_version, + azure_endpoint=endpoint, + api_key=subscription_key + ) + else: + direct_data = payload.get('direct', {}) + endpoint = direct_data.get('endpoint') + api_version = direct_data.get('api_version') + auth_type = direct_data.get('auth_type', 'key') + + if auth_type == 'managed_identity': + token_provider = get_bearer_token_provider( + DefaultAzureCredential(), + cognitive_services_scope + ) + gpt_client = AzureOpenAI( + api_version=api_version, + azure_endpoint=endpoint, + azure_ad_token_provider=token_provider + ) + else: + api_key = direct_data.get('key') + gpt_client = AzureOpenAI( + api_version=api_version, + azure_endpoint=endpoint, + api_key=api_key + ) + + # Determine which token parameter to use based on model type + # o-series and gpt-5 models require max_completion_tokens instead of max_tokens + vision_model_lower = vision_model.lower() + api_params = { + "model": vision_model, + "messages": [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "What color is this image? Just say the color." + }, + { + "type": "image_url", + "image_url": { + "url": f"data:image/png;base64,{test_image_base64}" + } + } + ] + } + ] + } + + # Use max_completion_tokens for o-series and gpt-5 models, max_tokens for others + if ('o1' in vision_model_lower or 'o3' in vision_model_lower or 'gpt-5' in vision_model_lower): + api_params["max_completion_tokens"] = 50 + else: + api_params["max_tokens"] = 50 + + # Test vision analysis with simple prompt + response = gpt_client.chat.completions.create(**api_params) + + result = response.choices[0].message.content + + return jsonify({ + 'message': 'Multi-modal vision connection successful', + 'details': f'Model responded: {result}' + }), 200 + + except Exception as e: + return jsonify({'error': f'Vision test failed: {str(e)}'}), 500 + def get_index_client() -> SearchIndexClient: """ Returns a SearchIndexClient wired up based on: @@ -323,9 +505,9 @@ def _test_gpt_connection(payload): # Decide GPT model if enable_apim: apim_data = payload.get('apim', {}) - endpoint = apim_data.get('endpoint') + endpoint = apim_data.get('endpoint') #.rstrip('/openai') api_version = apim_data.get('api_version') - gpt_model = apim_data.get('deployment') + gpt_model = apim_data.get('deployment').split(',')[0] subscription_key = apim_data.get('subscription_key') gpt_client = AzureOpenAI( @@ -383,9 +565,12 @@ def _test_redis_connection(payload): try: if redis_auth_type == 'managed_identity': # Acquire token from managed identity for Redis scope + from config import get_redis_cache_infrastructure_endpoint credential = DefaultAzureCredential() - token = credential.get_token("https://*.cacheinfra.windows.net:10225/appid/.default").token - redis_password = token + redis_hostname = redis_host.split('.')[0] + cache_endpoint = get_redis_cache_infrastructure_endpoint(redis_hostname) + token = credential.get_token(cache_endpoint) + redis_password = token.token else: if not redis_key: return jsonify({'error': 'Redis key is required for key auth'}), 400 @@ -576,43 +761,45 @@ def _test_azure_ai_search_connection(payload): """Attempt to connect to Azure Cognitive Search (or APIM-wrapped).""" enable_apim = payload.get('enable_apim', False) - if enable_apim: - apim_data = payload.get('apim', {}) - endpoint = apim_data.get('endpoint') # e.g. https://my-apim.azure-api.net/search - subscription_key = apim_data.get('subscription_key') - url = f"{endpoint.rstrip('/')}/indexes?api-version=2023-11-01" - headers = { - 'api-key': subscription_key, - 'Content-Type': 'application/json' - } - else: - direct_data = payload.get('direct', {}) - endpoint = direct_data.get('endpoint') # e.g. https://.search.windows.net - key = direct_data.get('key') - url = f"{endpoint.rstrip('/')}/indexes?api-version=2023-11-01" - - if direct_data.get('auth_type') == 'managed_identity': - if AZURE_ENVIRONMENT in ("usgovernment", "custom"): # change credential scopes for US Gov or custom environments - credential_scopes=search_resource_manager + "/.default" - arm_scope = credential_scopes - credential = DefaultAzureCredential() - arm_token = credential.get_token(arm_scope).token - headers = { - 'Authorization': f'Bearer {arm_token}', - 'Content-Type': 'application/json' - } + try: + if enable_apim: + apim_data = payload.get('apim', {}) + endpoint = apim_data.get('endpoint') + subscription_key = apim_data.get('subscription_key') + + # Use SearchIndexClient for APIM + credential = AzureKeyCredential(subscription_key) + client = SearchIndexClient(endpoint=endpoint, credential=credential) else: - headers = { - 'api-key': key, - 'Content-Type': 'application/json' - } - - # A small GET to /indexes to verify we have connectivity - resp = requests.get(url, headers=headers, timeout=10) - if resp.status_code == 200: + direct_data = payload.get('direct', {}) + endpoint = direct_data.get('endpoint') + key = direct_data.get('key') + + if direct_data.get('auth_type') == 'managed_identity': + credential = DefaultAzureCredential() + # For managed identity, use the SDK which handles authentication properly + if AZURE_ENVIRONMENT in ("usgovernment", "custom"): + client = SearchIndexClient( + endpoint=endpoint, + credential=credential, + audience=search_resource_manager + ) + else: + # For public cloud, don't use audience parameter + client = SearchIndexClient( + endpoint=endpoint, + credential=credential + ) + else: + credential = AzureKeyCredential(key) + client = SearchIndexClient(endpoint=endpoint, credential=credential) + + # Test by listing indexes (simple operation to verify connectivity) + _ = list(client.list_indexes()) return jsonify({'message': 'Azure AI search connection successful'}), 200 - else: - raise Exception(f"Azure AI search connection error: {resp.status_code} - {resp.text}") + + except Exception as e: + return jsonify({'error': f'Azure AI search connection error: {str(e)}'}), 500 def _test_azure_doc_intelligence_connection(payload): @@ -690,3 +877,35 @@ def _test_azure_doc_intelligence_connection(payload): return jsonify({'message': 'Azure document intelligence connection successful'}), 200 else: return jsonify({'error': f"Document Intelligence error: {status}"}), 500 + +def _test_key_vault_connection(payload): + """Attempt to connect to Azure Key Vault using ephemeral settings.""" + vault_name = payload.get('vault_name', '').strip() + client_id = payload.get('client_id', '').strip() + + if not vault_name: + return jsonify({'error': 'Key Vault name is required'}), 400 + + try: + vault_url = f"https://{vault_name}{KEY_VAULT_DOMAIN}" + + if client_id: + credential = DefaultAzureCredential(managed_identity_client_id=client_id) + else: + credential = DefaultAzureCredential() + + if AZURE_ENVIRONMENT == "custom": + #TODO: Needs to be tested with a custom environment + kv_client = SecretClient(vault_url=vault_url, credential=credential) + else: + kv_client = SecretClient(vault_url=vault_url, credential=credential) + + # Perform a simple list operation to verify connectivity + secrets = kv_client.list_properties_of_secrets() + _ = next(secrets, None) # Attempt to get the first secret (if any) + + return jsonify({'message': 'Key Vault connection successful'}), 200 + + except Exception as e: + log_event(f"[AKV_TEST] Key Vault connection error: {str(e)}", level="error") + return jsonify({'error': f'Key Vault connection error. Check Application Insights using "[AKV_TEST]" for details.'}), 500 \ No newline at end of file diff --git a/application/single_app/route_backend_speech.py b/application/single_app/route_backend_speech.py new file mode 100644 index 00000000..3c559ce2 --- /dev/null +++ b/application/single_app/route_backend_speech.py @@ -0,0 +1,204 @@ +# route_backend_speech.py +""" +Backend routes for speech-to-text functionality. +""" +from config import * +from functions_authentication import login_required, get_current_user_id +from functions_settings import get_settings +from functions_debug import debug_print +import azure.cognitiveservices.speech as speechsdk +import os +import tempfile + +try: + from pydub import AudioSegment + PYDUB_AVAILABLE = True +except ImportError: + PYDUB_AVAILABLE = False + print("Warning: pydub not available. Audio conversion may fail for non-WAV formats.") + +def register_route_backend_speech(app): + """Register speech-to-text routes""" + + @app.route('/api/speech/transcribe-chat', methods=['POST']) + @login_required + def transcribe_chat_audio(): + """ + Transcribe audio from chat speech input. + Expects audio blob in 'audio' field of FormData. + Returns JSON with transcribed text or error. + """ + user_id = get_current_user_id() + + # Get settings + settings = get_settings() + + # Check if speech-to-text chat input is enabled + if not settings.get('enable_speech_to_text_input', False): + return jsonify({ + 'success': False, + 'error': 'Speech-to-text chat input is not enabled' + }), 403 + + # Check if audio file was provided + if 'audio' not in request.files: + return jsonify({ + 'success': False, + 'error': 'No audio file provided' + }), 400 + + audio_file = request.files['audio'] + + if audio_file.filename == '': + return jsonify({ + 'success': False, + 'error': 'Empty audio file' + }), 400 + + print(f"[Debug] Received audio file: {audio_file.filename}") + + # Save audio to temporary WAV file + temp_audio_path = None + + try: + # Create temporary file for uploaded audio (always WAV from frontend) + with tempfile.NamedTemporaryFile(delete=False, suffix='.wav') as temp_audio: + audio_file.save(temp_audio.name) + temp_audio_path = temp_audio.name + + print(f"[Debug] Audio saved to: {temp_audio_path}") + + # Get speech configuration using existing helper + from functions_documents import _get_speech_config + + speech_endpoint = settings.get('speech_service_endpoint', '') + speech_locale = settings.get('speech_service_locale', 'en-US') + + if not speech_endpoint: + return jsonify({ + 'success': False, + 'error': 'Speech service endpoint not configured' + }), 500 + + # Get speech config + speech_config = _get_speech_config(settings, speech_endpoint, speech_locale) + + print("[Debug] Speech config obtained successfully") + + # WAV files can use direct file input + print(f"[Debug] Using WAV file directly: {temp_audio_path}") + audio_config = speechsdk.AudioConfig(filename=temp_audio_path) + + # Create speech recognizer + speech_recognizer = speechsdk.SpeechRecognizer( + speech_config=speech_config, + audio_config=audio_config + ) + + # Get audio file size for debugging + audio_file_size = os.path.getsize(temp_audio_path) + debug_print(f"[Speech] Audio file size: {audio_file_size} bytes") + + try: + debug_print("[Speech] Starting continuous recognition for longer audio...") + + # Use continuous recognition for longer audio files + all_results = [] + done = False + + def handle_recognized(evt): + """Handle recognized speech events""" + if evt.result.reason == speechsdk.ResultReason.RecognizedSpeech: + debug_print(f"[Speech] Recognized: {evt.result.text}") + all_results.append(evt.result.text) + + def handle_canceled(evt): + """Handle cancellation events""" + nonlocal done + debug_print(f"[Speech] Canceled: {evt}") + if evt.reason == speechsdk.CancellationReason.Error: + debug_print(f"[Speech] Error details: {evt.error_details}") + done = True + + def handle_session_stopped(evt): + """Handle session stopped events""" + nonlocal done + debug_print("[Speech] Session stopped") + done = True + + # Connect callbacks + speech_recognizer.recognized.connect(handle_recognized) + speech_recognizer.canceled.connect(handle_canceled) + speech_recognizer.session_stopped.connect(handle_session_stopped) + + # Start continuous recognition + speech_recognizer.start_continuous_recognition() + + # Wait for completion (timeout after 120 seconds) + import time + timeout = 120 + elapsed = 0 + while not done and elapsed < timeout: + time.sleep(0.1) + elapsed += 0.1 + + # Stop recognition + speech_recognizer.stop_continuous_recognition() + + debug_print(f"[Speech] Recognition complete. Recognized {len(all_results)} segments") + + # Combine all recognized text + if all_results: + combined_text = ' '.join(all_results) + debug_print(f"[Speech] Combined text length: {len(combined_text)} characters") + return jsonify({ + 'success': True, + 'text': combined_text + }) + else: + debug_print("[Speech] No speech recognized") + return jsonify({ + 'success': False, + 'error': 'No speech could be recognized' + }) + finally: + # Properly close the recognizer to release file handles + try: + if speech_recognizer: + # Disconnect all callbacks + speech_recognizer.recognized.disconnect_all() + speech_recognizer.canceled.disconnect_all() + speech_recognizer.session_stopped.disconnect_all() + debug_print("[Speech] Disconnected recognizer callbacks") + + # Give the recognizer time to release resources + import time + time.sleep(0.2) + + debug_print("[Speech] Speech recognizer cleanup complete") + except Exception as recognizer_cleanup_error: + print(f"[Debug] Error during recognizer cleanup: {recognizer_cleanup_error}") + + except Exception as e: + print(f"Error transcribing audio: {e}") + import traceback + traceback.print_exc() + return jsonify({ + 'success': False, + 'error': str(e) + }), 500 + + finally: + # Clean up temporary files + if temp_audio_path and os.path.exists(temp_audio_path): + try: + # Longer delay to ensure file handle is fully released on Windows + import time + time.sleep(0.3) + os.remove(temp_audio_path) + print(f"[Debug] Cleaned up temp file: {temp_audio_path}") + except PermissionError as perm_error: + # If still locked, schedule for deletion on next boot or ignore + print(f"[Debug] Temp file still locked, will be cleaned by OS: {temp_audio_path}") + except Exception as cleanup_error: + print(f"[Debug] Error cleaning up temporary files: {cleanup_error}") diff --git a/application/single_app/route_backend_tts.py b/application/single_app/route_backend_tts.py new file mode 100644 index 00000000..11d14cc3 --- /dev/null +++ b/application/single_app/route_backend_tts.py @@ -0,0 +1,238 @@ +# route_backend_tts.py + +from config import * +from functions_authentication import * +from functions_settings import * +from functions_debug import debug_print +from swagger_wrapper import swagger_route, get_auth_security +import azure.cognitiveservices.speech as speechsdk +import io +import time +import random + +def register_route_backend_tts(app): + """ + Text-to-speech API routes using Azure Speech Services + """ + + @app.route("/api/chat/tts", methods=["POST"]) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def synthesize_speech(): + """ + Synthesize text to speech using Azure Speech Service. + Expects JSON: { + "text": "Text to synthesize", + "voice": "en-US-Andrew:DragonHDLatestNeural", # optional, defaults to Andrew + "speed": 1.0 # optional, 0.5-2.0 range + } + Returns audio/wav stream + """ + try: + debug_print("[TTS] Synthesize speech request received") + + # Get settings + settings = get_settings() + + # Check if TTS is enabled + if not settings.get('enable_text_to_speech', False): + debug_print("[TTS] Text-to-speech is not enabled in settings") + return jsonify({"error": "Text-to-speech is not enabled"}), 403 + + # Validate speech service configuration + speech_key = settings.get('speech_service_key', '') + speech_region = settings.get('speech_service_location', '') + + if not speech_key or not speech_region: + debug_print("[TTS] Speech service not configured - missing key or region") + return jsonify({"error": "Speech service not configured"}), 500 + + debug_print(f"[TTS] Speech service configured - region: {speech_region}") + + # Parse request data + data = request.get_json() + if not data or 'text' not in data: + debug_print("[TTS] Invalid request - missing 'text' field") + return jsonify({"error": "Missing 'text' field in request"}), 400 + + text = data.get('text', '').strip() + if not text: + debug_print("[TTS] Invalid request - text is empty") + return jsonify({"error": "Text cannot be empty"}), 400 + + # Get voice and speed settings + voice = data.get('voice', 'en-US-Andrew:DragonHDLatestNeural') + speed = float(data.get('speed', 1.0)) + + # Clamp speed to valid range + speed = max(0.5, min(2.0, speed)) + + debug_print(f"[TTS] Request params - voice: {voice}, speed: {speed}, text_length: {len(text)}") + + # Configure speech service + speech_config = speechsdk.SpeechConfig( + subscription=speech_key, + region=speech_region + ) + speech_config.speech_synthesis_voice_name = voice + + # Set output format to high quality + speech_config.set_speech_synthesis_output_format( + speechsdk.SpeechSynthesisOutputFormat.Audio48Khz192KBitRateMonoMp3 + ) + + # Create synthesizer with no audio output config (returns audio data in result) + speech_synthesizer = speechsdk.SpeechSynthesizer( + speech_config=speech_config, + audio_config=None + ) + + # Perform synthesis with retry logic for rate limiting (429 errors) + max_retries = 3 + retry_count = 0 + last_error = None + + while retry_count <= max_retries: + try: + # Build SSML if speed adjustment needed + if speed != 1.0: + debug_print(f"[TTS] Using SSML with speed adjustment: {speed}x (attempt {retry_count + 1}/{max_retries + 1})") + speed_percent = int(speed * 100) + ssml = f""" + + + + {text} + + + + """ + result = speech_synthesizer.speak_ssml_async(ssml).get() + else: + debug_print(f"[TTS] Using plain text synthesis (attempt {retry_count + 1}/{max_retries + 1})") + result = speech_synthesizer.speak_text_async(text).get() + + # Check for rate limiting or capacity issues + if result.reason == speechsdk.ResultReason.Canceled: + cancellation_details = result.cancellation_details + if cancellation_details.reason == speechsdk.CancellationReason.Error: + error_details = cancellation_details.error_details + + # Check if it's a rate limit error (429 or similar) + if "429" in error_details or "rate" in error_details.lower() or "quota" in error_details.lower() or "throttl" in error_details.lower(): + if retry_count < max_retries: + # Randomized delay between 50-800ms with exponential backoff + base_delay = 0.05 + (retry_count * 0.1) # 50ms, 150ms, 250ms base + jitter = random.uniform(0, 0.75) # Up to 750ms jitter + delay = base_delay + jitter + debug_print(f"[TTS] Rate limit detected (429), retrying in {delay*1000:.0f}ms (attempt {retry_count + 1}/{max_retries})") + time.sleep(delay) + retry_count += 1 + last_error = error_details + continue # Retry + else: + debug_print(f"[TTS] ERROR - Rate limit exceeded after {max_retries} retries") + return jsonify({"error": "Service temporarily unavailable due to high load. Please try again."}), 429 + else: + # Other error, don't retry + error_msg = f"Speech synthesis canceled: {cancellation_details.reason} - {error_details}" + debug_print(f"[TTS] ERROR - Synthesis failed: {error_msg}") + return jsonify({"error": error_msg}), 500 + + # Success - break out of retry loop + break + + except Exception as e: + # Network or other transient errors + if retry_count < max_retries and ("timeout" in str(e).lower() or "connection" in str(e).lower()): + delay = 0.05 + (retry_count * 0.1) + random.uniform(0, 0.75) + debug_print(f"[TTS] Transient error, retrying in {delay*1000:.0f}ms: {str(e)}") + log_event(f"TTS transient error, retrying: {str(e)}", level=logging.WARNING) + time.sleep(delay) + retry_count += 1 + last_error = str(e) + continue + else: + raise # Re-raise if not retryable or out of retries + + # Check result after retries + if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted: + debug_print(f"[TTS] Synthesis completed successfully - audio_size: {len(result.audio_data)} bytes") + if retry_count > 0: + debug_print(f"[TTS] Success after {retry_count} retries") + # Get audio data + audio_data = result.audio_data + + # Return audio stream + return send_file( + io.BytesIO(audio_data), + mimetype='audio/mpeg', + as_attachment=False, + download_name='speech.mp3' + ) + + elif result.reason == speechsdk.ResultReason.Canceled: + cancellation_details = result.cancellation_details + error_msg = f"Speech synthesis canceled: {cancellation_details.reason}" + if cancellation_details.reason == speechsdk.CancellationReason.Error: + error_msg += f" - {cancellation_details.error_details}" + debug_print(f"[TTS] ERROR - Synthesis failed: {error_msg}") + print(f"[ERROR] TTS synthesis failed: {error_msg}") + return jsonify({"error": error_msg}), 500 + else: + debug_print(f"[TTS] ERROR - Unknown synthesis error, reason: {result.reason}") + return jsonify({"error": "Unknown synthesis error"}), 500 + + except ValueError as e: + debug_print(f"[TTS] ERROR - Invalid parameter: {str(e)}") + return jsonify({"error": f"Invalid parameter: {str(e)}"}), 400 + except Exception as e: + debug_print(f"[TTS] ERROR - Exception: {str(e)}") + log_event(f"TTS synthesis failed: {str(e)}", level=logging.ERROR) + print(f"[ERROR] TTS synthesis exception: {str(e)}") + import traceback + traceback.print_exc() + return jsonify({"error": f"TTS synthesis failed: {str(e)}"}), 500 + + @app.route("/api/chat/tts/voices", methods=["GET"]) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def get_available_voices(): + """ + Returns list of available DragonHD voices for TTS + """ + debug_print("[TTS] Get available voices request received") + voices = [ + {"name": "de-DE-Florian:DragonHDLatestNeural", "gender": "Male", "language": "German", "status": "GA"}, + {"name": "de-DE-Seraphina:DragonHDLatestNeural", "gender": "Female", "language": "German", "status": "GA"}, + {"name": "en-US-Adam:DragonHDLatestNeural", "gender": "Male", "language": "English (US)", "status": "GA"}, + {"name": "en-US-Alloy:DragonHDLatestNeural", "gender": "Male", "language": "English (US)", "status": "Preview"}, + {"name": "en-US-Andrew:DragonHDLatestNeural", "gender": "Male", "language": "English (US)", "status": "GA", "note": ""}, + {"name": "en-US-Andrew2:DragonHDLatestNeural", "gender": "Male", "language": "English (US)", "status": "GA", "note": "Optimized for conversational content"}, + {"name": "en-US-Andrew3:DragonHDLatestNeural", "gender": "Male", "language": "English (US)", "status": "Preview", "note": "Optimized for podcast content"}, + {"name": "en-US-Aria:DragonHDLatestNeural", "gender": "Female", "language": "English (US)", "status": "Preview"}, + {"name": "en-US-Ava:DragonHDLatestNeural", "gender": "Female", "language": "English (US)", "status": "GA"}, + {"name": "en-US-Ava3:DragonHDLatestNeural", "gender": "Female", "language": "English (US)", "status": "Preview", "note": "Optimized for podcast content"}, + {"name": "en-US-Brian:DragonHDLatestNeural", "gender": "Male", "language": "English (US)", "status": "GA"}, + {"name": "en-US-Davis:DragonHDLatestNeural", "gender": "Male", "language": "English (US)", "status": "GA"}, + {"name": "en-US-Emma:DragonHDLatestNeural", "gender": "Female", "language": "English (US)", "status": "GA"}, + {"name": "en-US-Emma2:DragonHDLatestNeural", "gender": "Female", "language": "English (US)", "status": "GA", "note": "Optimized for conversational content"}, + {"name": "en-US-Jenny:DragonHDLatestNeural", "gender": "Female", "language": "English (US)", "status": "Preview"}, + {"name": "en-US-MultiTalker-Ava-Andrew:DragonHDLatestNeural", "gender": "Multi", "language": "English (US)", "status": "Preview", "note": "Multiple speakers"}, + {"name": "en-US-Nova:DragonHDLatestNeural", "gender": "Female", "language": "English (US)", "status": "Preview"}, + {"name": "en-US-Phoebe:DragonHDLatestNeural", "gender": "Female", "language": "English (US)", "status": "Preview"}, + {"name": "en-US-Serena:DragonHDLatestNeural", "gender": "Female", "language": "English (US)", "status": "Preview"}, + {"name": "en-US-Steffan:DragonHDLatestNeural", "gender": "Male", "language": "English (US)", "status": "GA"}, + {"name": "es-ES-Tristan:DragonHDLatestNeural", "gender": "Male", "language": "Spanish (Spain)", "status": "GA"}, + {"name": "es-ES-Ximena:DragonHDLatestNeural", "gender": "Female", "language": "Spanish (Spain)", "status": "GA"}, + {"name": "fr-FR-Remy:DragonHDLatestNeural", "gender": "Male", "language": "French", "status": "GA"}, + {"name": "fr-FR-Vivienne:DragonHDLatestNeural", "gender": "Female", "language": "French", "status": "GA"}, + {"name": "ja-JP-Masaru:DragonHDLatestNeural", "gender": "Male", "language": "Japanese", "status": "GA"}, + {"name": "ja-JP-Nanami:DragonHDLatestNeural", "gender": "Female", "language": "Japanese", "status": "GA"}, + {"name": "zh-CN-Xiaochen:DragonHDLatestNeural", "gender": "Female", "language": "Chinese (Simplified)", "status": "GA"}, + {"name": "zh-CN-Yunfan:DragonHDLatestNeural", "gender": "Male", "language": "Chinese (Simplified)", "status": "GA"} + ] + + return jsonify({"voices": voices}), 200 diff --git a/application/single_app/route_backend_user_agreement.py b/application/single_app/route_backend_user_agreement.py new file mode 100644 index 00000000..f46559ff --- /dev/null +++ b/application/single_app/route_backend_user_agreement.py @@ -0,0 +1,167 @@ +# route_backend_user_agreement.py + +from config import * +from functions_authentication import * +from functions_settings import get_settings +from functions_public_workspaces import find_public_workspace_by_id +from functions_activity_logging import log_user_agreement_accepted, has_user_accepted_agreement_today +from swagger_wrapper import swagger_route, get_auth_security +from functions_debug import debug_print + + +def register_route_backend_user_agreement(app): + """ + Register user agreement API endpoints under '/api/user_agreement/...' + These endpoints handle checking and recording user agreement acceptance. + """ + + @app.route("/api/user_agreement/check", methods=["GET"]) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def api_check_user_agreement(): + """ + GET /api/user_agreement/check + Check if the current user needs to accept a user agreement for a workspace. + + Query params: + workspace_id: The workspace ID + workspace_type: The workspace type ('personal', 'group', 'public', 'chat') + action_context: The action context ('file_upload', 'chat') - optional + + Returns: + { + needsAgreement: bool, + agreementText: str (if needs agreement), + enableDailyAcceptance: bool + } + """ + info = get_current_user_info() + user_id = info["userId"] + + workspace_id = request.args.get("workspace_id") + workspace_type = request.args.get("workspace_type") + action_context = request.args.get("action_context", "file_upload") + + if not workspace_id or not workspace_type: + return jsonify({"error": "workspace_id and workspace_type are required"}), 400 + + # Validate workspace type + valid_types = ["personal", "group", "public", "chat"] + if workspace_type not in valid_types: + return jsonify({"error": f"Invalid workspace_type. Must be one of: {', '.join(valid_types)}"}), 400 + + # Get global user agreement settings from app settings + settings = get_settings() + + # Check if user agreement is enabled globally + if not settings.get("enable_user_agreement", False): + return jsonify({ + "needsAgreement": False, + "agreementText": "", + "enableDailyAcceptance": False + }), 200 + + apply_to = settings.get("user_agreement_apply_to", []) + + # Check if the agreement applies to this workspace type or action + applies = False + if workspace_type in apply_to: + applies = True + elif action_context == "chat" and "chat" in apply_to: + applies = True + + if not applies: + return jsonify({ + "needsAgreement": False, + "agreementText": "", + "enableDailyAcceptance": False + }), 200 + + # Check if daily acceptance is enabled and user already accepted today + enable_daily_acceptance = settings.get("enable_user_agreement_daily", False) + + if enable_daily_acceptance: + already_accepted = has_user_accepted_agreement_today(user_id, workspace_type, workspace_id) + if already_accepted: + debug_print(f"[USER_AGREEMENT] User {user_id} already accepted today for {workspace_type} workspace {workspace_id}") + return jsonify({ + "needsAgreement": False, + "agreementText": "", + "enableDailyAcceptance": True, + "alreadyAcceptedToday": True + }), 200 + + # User needs to accept the agreement + return jsonify({ + "needsAgreement": True, + "agreementText": settings.get("user_agreement_text", ""), + "enableDailyAcceptance": enable_daily_acceptance + }), 200 + + @app.route("/api/user_agreement/accept", methods=["POST"]) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def api_accept_user_agreement(): + """ + POST /api/user_agreement/accept + Record that a user has accepted the user agreement for a workspace. + + Body JSON: + { + workspace_id: str, + workspace_type: str ('personal', 'group', 'public'), + action_context: str (optional, e.g., 'file_upload', 'chat') + } + + Returns: + { success: bool, message: str } + """ + info = get_current_user_info() + user_id = info["userId"] + + data = request.get_json() or {} + workspace_id = data.get("workspace_id") + workspace_type = data.get("workspace_type") + action_context = data.get("action_context", "file_upload") + + if not workspace_id or not workspace_type: + return jsonify({"error": "workspace_id and workspace_type are required"}), 400 + + # Validate workspace type + valid_types = ["personal", "group", "public"] + if workspace_type not in valid_types: + return jsonify({"error": f"Invalid workspace_type. Must be one of: {', '.join(valid_types)}"}), 400 + + # Get workspace name for logging + workspace_name = None + if workspace_type == "public": + ws = find_public_workspace_by_id(workspace_id) + if ws: + workspace_name = ws.get("name", "") + + # Log the acceptance + try: + log_user_agreement_accepted( + user_id=user_id, + workspace_type=workspace_type, + workspace_id=workspace_id, + workspace_name=workspace_name, + action_context=action_context + ) + + debug_print(f"[USER_AGREEMENT] Recorded acceptance: user {user_id}, {workspace_type} workspace {workspace_id}") + + return jsonify({ + "success": True, + "message": "User agreement acceptance recorded" + }), 200 + + except Exception as e: + debug_print(f"[USER_AGREEMENT] Error recording acceptance: {str(e)}") + log_event(f"Error recording user agreement acceptance: {str(e)}", level=logging.ERROR) + return jsonify({ + "success": False, + "error": f"Failed to record acceptance: {str(e)}" + }), 500 diff --git a/application/single_app/route_backend_users.py b/application/single_app/route_backend_users.py index 99320f6e..d2ca52f8 100644 --- a/application/single_app/route_backend_users.py +++ b/application/single_app/route_backend_users.py @@ -91,7 +91,7 @@ def api_get_user_info(user_id): item=user_id, partition_key=user_id ) - print(f"[DEBUG] /api/user/info/{user_id} → doc: {user_doc}", flush=True) + print(f"/api/user/info/{user_id} → doc: {user_doc}", flush=True) return jsonify({ "user_id": user_id, "email": user_doc.get("email", ""), @@ -147,7 +147,21 @@ def user_settings(): # Basic validation could go here (e.g., check allowed keys, value types) # Example: Allowed keys - allowed_keys = {'activeGroupOid', 'layoutPreference', 'splitSizesPreference', 'dockedSidebarHidden', 'darkModeEnabled', 'preferredModelDeployment', 'agents', 'plugins', "selected_agent", 'navLayout', 'profileImage', 'enable_agents'} # Add others as needed + allowed_keys = { + 'activeGroupOid', 'layoutPreference', 'splitSizesPreference', 'dockedSidebarHidden', + 'darkModeEnabled', 'preferredModelDeployment', 'agents', 'plugins', "selected_agent", + 'navLayout', 'profileImage', 'enable_agents', 'streamingEnabled', 'reasoningEffortSettings', + # Public directory and workspace settings + 'publicDirectorySavedLists', 'publicDirectorySettings', 'activePublicWorkspaceOid', + # Chat UI settings + 'navbar_layout', 'chatLayout', 'showChatTitle', 'chatSplitSizes', + # Microphone permission settings + 'microphonePermissionState', + # Text-to-speech settings + 'ttsEnabled', 'ttsVoice', 'ttsSpeed', 'ttsAutoplay', + # Metrics and other settings + 'metrics', 'lastUpdated' + } # Add others as needed invalid_keys = set(settings_to_update.keys()) - allowed_keys if invalid_keys: print(f"Warning: Received invalid settings keys: {invalid_keys}") diff --git a/application/single_app/route_enhanced_citations.py b/application/single_app/route_enhanced_citations.py index f2eb7880..c81ef225 100644 --- a/application/single_app/route_enhanced_citations.py +++ b/application/single_app/route_enhanced_citations.py @@ -15,7 +15,8 @@ from functions_group import get_user_groups from functions_public_workspaces import get_user_visible_public_workspace_ids_from_settings from swagger_wrapper import swagger_route, get_auth_security -from config import CLIENTS, storage_account_user_documents_container_name, storage_account_group_documents_container_name, storage_account_public_documents_container_name +from config import CLIENTS, storage_account_user_documents_container_name, storage_account_group_documents_container_name, storage_account_public_documents_container_name, IMAGE_EXTENSIONS, VIDEO_EXTENSIONS, AUDIO_EXTENSIONS +from functions_debug import debug_print def register_enhanced_citations_routes(app): """Register enhanced citations routes""" @@ -48,9 +49,8 @@ def get_enhanced_citation_image(): # Check if it's an image file file_name = raw_doc['file_name'] ext = file_name.lower().split('.')[-1] if '.' in file_name else '' - image_extensions = ['jpg', 'jpeg', 'png', 'bmp', 'tiff', 'tif', 'heif'] - if ext not in image_extensions: + if ext not in IMAGE_EXTENSIONS: return jsonify({"error": "File is not an image"}), 400 # Serve the image content directly @@ -87,9 +87,8 @@ def get_enhanced_citation_video(): # Check if it's a video file file_name = raw_doc['file_name'] ext = file_name.lower().split('.')[-1] if '.' in file_name else '' - video_extensions = ['mp4', 'mov', 'avi', 'mkv', 'flv', 'webm', 'wmv'] - if ext not in video_extensions: + if ext not in VIDEO_EXTENSIONS: return jsonify({"error": "File is not a video"}), 400 # Serve the video content directly @@ -126,9 +125,8 @@ def get_enhanced_citation_audio(): # Check if it's an audio file file_name = raw_doc['file_name'] ext = file_name.lower().split('.')[-1] if '.' in file_name else '' - audio_extensions = ['mp3', 'wav', 'ogg', 'aac', 'flac', 'm4a'] - if ext not in audio_extensions: + if ext not in AUDIO_EXTENSIONS: return jsonify({"error": "File is not an audio file"}), 400 # Serve the audio content directly @@ -148,10 +146,14 @@ def get_enhanced_citation_pdf(): """ doc_id = request.args.get("doc_id") page_number = request.args.get("page", default=1, type=int) + show_all = request.args.get("show_all", "false").lower() in ['true', '1', 'yes'] + download = request.args.get("download", default=False, type=bool) if not doc_id: return jsonify({"error": "doc_id is required"}), 400 + debug_print(f"Enhanced citations PDF request - doc_id: {doc_id}, page: {page_number}, show_all: {show_all}") + user_id = get_current_user_id() if not user_id: return jsonify({"error": "User not authenticated"}), 401 @@ -171,8 +173,12 @@ def get_enhanced_citation_pdf(): if ext != 'pdf': return jsonify({"error": "File is not a PDF"}), 400 + # For download, serve the original PDF without page extraction + if download: + return serve_enhanced_citation_content(raw_doc, content_type='application/pdf', force_download=True) + # Serve the PDF content directly with page extraction logic - return serve_enhanced_citation_pdf_content(raw_doc, page_number) + return serve_enhanced_citation_pdf_content(raw_doc, page_number, show_all) except Exception as e: return jsonify({"error": str(e)}), 500 @@ -255,7 +261,7 @@ def get_blob_name(raw_doc, workspace_type): else: return f"{raw_doc['user_id']}/{raw_doc['file_name']}" -def serve_enhanced_citation_content(raw_doc, content_type=None): +def serve_enhanced_citation_content(raw_doc, content_type=None, force_download=False): """ Server-side rendering: Serve enhanced citation file content directly Based on the logic from the existing view_pdf function but serves content directly @@ -299,6 +305,9 @@ def serve_enhanced_citation_content(raw_doc, content_type=None): else: content_type = 'application/octet-stream' + # Set content disposition based on force_download parameter + disposition = 'attachment' if force_download else 'inline' + # Create Response with the blob content response = Response( content, @@ -306,7 +315,7 @@ def serve_enhanced_citation_content(raw_doc, content_type=None): headers={ 'Content-Length': str(len(content)), 'Cache-Control': 'private, max-age=300', # Cache for 5 minutes - 'Content-Disposition': f'inline; filename="{raw_doc["file_name"]}"', + 'Content-Disposition': f'{disposition}; filename="{raw_doc["file_name"]}"', 'Accept-Ranges': 'bytes' # Support range requests for video/audio } ) @@ -317,11 +326,18 @@ def serve_enhanced_citation_content(raw_doc, content_type=None): print(f"Error serving enhanced citation content: {e}") raise Exception(f"Failed to load content: {str(e)}") -def serve_enhanced_citation_pdf_content(raw_doc, page_number): +def serve_enhanced_citation_pdf_content(raw_doc, page_number, show_all=False): """ Serve PDF content with page extraction (±1 page logic from original view_pdf) Based on the logic from the existing view_pdf function but serves content directly + + Args: + raw_doc: Document metadata + page_number: Current page number + show_all: If True, show all pages instead of just ±1 pages around current """ + debug_print(f"serve_enhanced_citation_pdf_content called with show_all: {show_all}") + import io import uuid import tempfile @@ -360,17 +376,43 @@ def serve_enhanced_citation_pdf_content(raw_doc, page_number): os.remove(temp_pdf_path) return jsonify({"error": "Requested page out of range"}), 400 - # Default to just the current page - start_idx = current_idx - end_idx = current_idx - - # If a previous page exists, include it - if current_idx > 0: - start_idx = current_idx - 1 - - # If a next page exists, include it - if current_idx < total_pages - 1: - end_idx = current_idx + 1 + if show_all: + # Show all pages + start_idx = 0 + end_idx = total_pages - 1 + new_page_number = page_number # Keep original page number + else: + # Default to just the current page + start_idx = current_idx + end_idx = current_idx + + # If a previous page exists, include it + if current_idx > 0: + start_idx = current_idx - 1 + + # If a next page exists, include it + if current_idx < total_pages - 1: + end_idx = current_idx + 1 + + # Determine new_page_number (within the sub-document) + extracted_count = end_idx - start_idx + 1 + + if extracted_count == 1: + # Only current page + new_page_number = 1 + elif extracted_count == 3: + # current page is in the middle + new_page_number = 2 + else: + # Exactly 2 pages + # If start_idx == current_idx, the user is on the first page + # If current_idx == end_idx, the user is on the second page + if start_idx == current_idx: + # e.g. pages = [current, next] + new_page_number = 1 + else: + # e.g. pages = [previous, current] + new_page_number = 2 # Create new PDF with only start_idx..end_idx extracted_pdf = fitz.open() @@ -381,37 +423,31 @@ def serve_enhanced_citation_pdf_content(raw_doc, page_number): extracted_pdf.close() pdf_document.close() - # Determine new_page_number (within the sub-document) - extracted_count = end_idx - start_idx + 1 + # Return the extracted PDF + headers = { + 'Content-Length': str(len(extracted_content)), + 'Cache-Control': 'private, max-age=300', # Cache for 5 minutes + 'Content-Disposition': f'inline; filename="{raw_doc["file_name"]}"', + 'X-Sub-PDF-Page': str(new_page_number), # Custom header with page info + 'Accept-Ranges': 'bytes' + } - if extracted_count == 1: - # Only current page - new_page_number = 1 - elif extracted_count == 3: - # current page is in the middle - new_page_number = 2 + # When show_all is True, allow iframe embedding + if show_all: + debug_print(f"Setting CSP headers for iframe embedding (show_all={show_all})") + headers['Content-Security-Policy'] = ( + "default-src 'self'; " + "frame-ancestors 'self'; " # Allow embedding in same origin + "object-src 'none';" + ) + headers['X-Frame-Options'] = 'SAMEORIGIN' # Allow same-origin framing else: - # Exactly 2 pages - # If start_idx == current_idx, the user is on the first page - # If current_idx == end_idx, the user is on the second page - if start_idx == current_idx: - # e.g. pages = [current, next] - new_page_number = 1 - else: - # e.g. pages = [previous, current] - new_page_number = 2 - - # Return the extracted PDF + debug_print(f"NOT setting CSP headers for iframe embedding (show_all={show_all})") + response = Response( extracted_content, content_type='application/pdf', - headers={ - 'Content-Length': str(len(extracted_content)), - 'Cache-Control': 'private, max-age=300', # Cache for 5 minutes - 'Content-Disposition': f'inline; filename="{raw_doc["file_name"]}"', - 'X-Sub-PDF-Page': str(new_page_number), # Custom header with page info - 'Accept-Ranges': 'bytes' - } + headers=headers ) return response diff --git a/application/single_app/route_external_health.py b/application/single_app/route_external_health.py index 4e22decb..ce4508d0 100644 --- a/application/single_app/route_external_health.py +++ b/application/single_app/route_external_health.py @@ -9,6 +9,7 @@ def register_route_external_health(app): @app.route('/external/healthcheck', methods=['GET']) + @swagger_route(security=get_auth_security()) @swagger_route() @enabled_required("enable_external_healthcheck") def external_health_check(): diff --git a/application/single_app/route_external_public_documents.py b/application/single_app/route_external_public_documents.py index b4a3ea7a..d3002d53 100644 --- a/application/single_app/route_external_public_documents.py +++ b/application/single_app/route_external_public_documents.py @@ -282,6 +282,9 @@ def external_patch_public_document(document_id): return jsonify({'error': 'Active public workspace not found'}), 404 data = request.get_json() + + # Track which fields were updated + updated_fields = {} try: if 'title' in data: @@ -291,6 +294,7 @@ def external_patch_public_document(document_id): user_id=user_id, title=data['title'] ) + updated_fields['title'] = data['title'] if 'abstract' in data: update_document( document_id=document_id, @@ -298,6 +302,7 @@ def external_patch_public_document(document_id): user_id=user_id, abstract=data['abstract'] ) + updated_fields['abstract'] = data['abstract'] if 'keywords' in data: if isinstance(data['keywords'], list): update_document( @@ -306,13 +311,16 @@ def external_patch_public_document(document_id): user_id=user_id, keywords=data['keywords'] ) + updated_fields['keywords'] = data['keywords'] else: + keywords_list = [kw.strip() for kw in data['keywords'].split(',')] update_document( document_id=document_id, public_workspace_id=active_workspace_id, user_id=user_id, - keywords=[kw.strip() for kw in data['keywords'].split(',')] + keywords=keywords_list ) + updated_fields['keywords'] = keywords_list if 'publication_date' in data: update_document( document_id=document_id, @@ -320,6 +328,7 @@ def external_patch_public_document(document_id): user_id=user_id, publication_date=data['publication_date'] ) + updated_fields['publication_date'] = data['publication_date'] if 'document_classification' in data: update_document( document_id=document_id, @@ -327,6 +336,7 @@ def external_patch_public_document(document_id): user_id=user_id, document_classification=data['document_classification'] ) + updated_fields['document_classification'] = data['document_classification'] if 'authors' in data: if isinstance(data['authors'], list): update_document( @@ -335,12 +345,31 @@ def external_patch_public_document(document_id): user_id=user_id, authors=data['authors'] ) + updated_fields['authors'] = data['authors'] else: + authors_list = [data['authors']] update_document( document_id=document_id, public_workspace_id=active_workspace_id, user_id=user_id, - authors=[data['authors']] + authors=authors_list + ) + updated_fields['authors'] = authors_list + + # Log the metadata update transaction if any fields were updated + if updated_fields: + from functions_documents import get_document + from functions_activity_logging import log_document_metadata_update_transaction + doc = get_document(user_id, document_id, public_workspace_id=active_workspace_id) + if doc: + log_document_metadata_update_transaction( + user_id=user_id, + document_id=document_id, + workspace_type='public', + file_name=doc.get('file_name', 'Unknown'), + updated_fields=updated_fields, + file_type=doc.get('file_type'), + public_workspace_id=active_workspace_id ) return jsonify({'message': 'Public document metadata updated successfully'}), 200 diff --git a/application/single_app/route_frontend_admin_settings.py b/application/single_app/route_frontend_admin_settings.py index 52e33c80..ae361984 100644 --- a/application/single_app/route_frontend_admin_settings.py +++ b/application/single_app/route_frontend_admin_settings.py @@ -4,6 +4,7 @@ from functions_documents import * from functions_authentication import * from functions_settings import * +from functions_activity_logging import log_web_search_consent_acceptance from functions_logging import * from swagger_wrapper import swagger_route, get_auth_security from datetime import datetime, timedelta @@ -36,6 +37,8 @@ def admin_settings(): settings['require_member_of_create_public_workspace'] = False if 'require_member_of_safety_violation_admin' not in settings: settings['require_member_of_safety_violation_admin'] = False + if 'require_member_of_control_center_admin' not in settings: + settings['require_member_of_control_center_admin'] = False if 'require_member_of_feedback_admin' not in settings: settings['require_member_of_feedback_admin'] = False # --- End NEW Default Checks --- @@ -63,6 +66,7 @@ def admin_settings(): {"label": "Acceptable Use Policy", "url": "https://example.com/policy"}, {"label": "Prompt Ideas", "url": "https://example.com/prompts"} ] + # --- End Refined Default Checks --- if 'enable_appinsights_global_logging' not in settings: @@ -75,6 +79,13 @@ def admin_settings(): settings['per_user_semantic_kernel'] = False if 'enable_semantic_kernel' not in settings: settings['enable_semantic_kernel'] = False + + if 'web_search_consent_accepted' not in settings: + settings['web_search_consent_accepted'] = False + + # --- Add default for swagger documentation --- + if 'enable_swagger' not in settings: + settings['enable_swagger'] = True # Default enabled for development/testing if 'enable_time_plugin' not in settings: settings['enable_time_plugin'] = False if 'enable_http_plugin' not in settings: @@ -128,6 +139,9 @@ def admin_settings(): 'name': 'default_agent', 'is_global': True } + log_event("Error retrieving global agents for default selection.", level=logging.ERROR) + debug_print("Error retrieving global agents for default selection.") + if 'allow_user_agents' not in settings: settings['allow_user_agents'] = False if 'allow_user_custom_agent_endpoints' not in settings: @@ -140,6 +154,12 @@ def admin_settings(): settings['allow_group_custom_agent_endpoints'] = False if 'allow_group_plugins' not in settings: settings['allow_group_plugins'] = False + if 'enable_agent_template_gallery' not in settings: + settings['enable_agent_template_gallery'] = True + if 'agent_templates_allow_user_submission' not in settings: + settings['agent_templates_allow_user_submission'] = True + if 'agent_templates_require_approval' not in settings: + settings['agent_templates_require_approval'] = True # --- Add defaults for classification banner --- if 'classification_banner_enabled' not in settings: @@ -148,10 +168,36 @@ def admin_settings(): settings['classification_banner_text'] = '' if 'classification_banner_color' not in settings: settings['classification_banner_color'] = '#ffc107' # Bootstrap warning color + if 'classification_banner_text_color' not in settings: + settings['classification_banner_text_color'] = '#ffffff' # White text by default + + # --- Add defaults for user agreement --- + if 'enable_user_agreement' not in settings: + settings['enable_user_agreement'] = False + if 'user_agreement_text' not in settings: + settings['user_agreement_text'] = '' + if 'user_agreement_apply_to' not in settings: + settings['user_agreement_apply_to'] = [] + if 'enable_user_agreement_daily' not in settings: + settings['enable_user_agreement_daily'] = False - # --- Add defaults for left nav --- + # --- Add defaults for key vault + if 'enable_key_vault_secret_storage' not in settings: + settings['enable_key_vault_secret_storage'] = False + if 'key_vault_name' not in settings: + settings['key_vault_name'] = '' + if 'key_vault_identity' not in settings: + settings['key_vault_identity'] = '' + + # --- Add defaults for left nav --- if 'enable_left_nav_default' not in settings: settings['enable_left_nav_default'] = True + + # --- Add defaults for multimodal vision --- + if 'enable_multimodal_vision' not in settings: + settings['enable_multimodal_vision'] = False + if 'multimodal_vision_model' not in settings: + settings['multimodal_vision_model'] = '' if request.method == 'GET': # --- Model fetching logic remains the same --- @@ -167,7 +213,7 @@ def admin_settings(): pass # Replace with actual logic except Exception as e: print(f"Error retrieving GPT deployments: {e}") - # ... similar try/except for embedding and image models ... + log_event(f"Error retrieving GPT deployments: {e}", level=logging.ERROR) # Check for application updates current_version = app.config['VERSION'] @@ -210,14 +256,21 @@ def admin_settings(): settings.update(new_settings) except Exception as e: print(f"Error checking for updates: {e}") + log_event(f"Error checking for updates: {e}", level=logging.ERROR) # Get the persisted values for template rendering update_available = settings.get('update_available', False) latest_version = settings.get('latest_version_available') + + # Get user settings for profile and navigation + user_id = get_current_user_id() + user_settings = get_user_settings(user_id) return render_template( 'admin_settings.html', + app_settings=settings, # Admin needs unsanitized settings to view/edit all configuration settings=settings, + user_settings=user_settings, update_available=update_available, latest_version=latest_version, download_url=download_url @@ -229,6 +282,7 @@ def admin_settings(): if request.method == 'POST': form_data = request.form # Use a variable for easier access + user_id = get_current_user_id() # --- Fetch all other form data as before --- app_title = form_data.get('app_title', 'AI Chat Application') @@ -238,12 +292,45 @@ def admin_settings(): enable_video_file_support = form_data.get('enable_video_file_support') == 'on' enable_audio_file_support = form_data.get('enable_audio_file_support') == 'on' enable_extract_meta_data = form_data.get('enable_extract_meta_data') == 'on' + + # Vision settings + enable_multimodal_vision = form_data.get('enable_multimodal_vision') == 'on' + multimodal_vision_model = form_data.get('multimodal_vision_model', '') require_member_of_create_group = form_data.get('require_member_of_create_group') == 'on' require_member_of_create_public_workspace = form_data.get('require_member_of_create_public_workspace') == 'on' require_member_of_safety_violation_admin = form_data.get('require_member_of_safety_violation_admin') == 'on' + require_member_of_control_center_admin = form_data.get('require_member_of_control_center_admin') == 'on' + require_member_of_control_center_dashboard_reader = form_data.get('require_member_of_control_center_dashboard_reader') == 'on' require_member_of_feedback_admin = form_data.get('require_member_of_feedback_admin') == 'on' + web_search_consent_message = ( + "When you use Grounding with Bing Search, your customer data is transferred " + "outside of the Azure compliance boundary to the Grounding with Bing Search service. " + "Grounding with Bing Search is not subject to the same data processing terms " + "(including location of processing) and does not have the same compliance standards " + "and certifications as the Azure AI Agent Service, as described in the " + "Grounding with Bing Search TOU (https://www.microsoft.com/en-us/bing/apis/grounding-legal). " + "It is your responsibility to assess whether use of Grounding with Bing Search in your agent " + "meets your needs and requirements." + ) + web_search_consent_accepted = form_data.get('web_search_consent_accepted') == 'true' + requested_enable_web_search = form_data.get('enable_web_search') == 'on' + enable_web_search = requested_enable_web_search and web_search_consent_accepted + + if requested_enable_web_search and not web_search_consent_accepted: + flash('Web search requires consent before it can be enabled.', 'warning') + + if enable_web_search and web_search_consent_accepted and not settings.get('web_search_consent_accepted'): + admin_user = session.get('user', {}) + admin_email = admin_user.get('preferred_username', admin_user.get('email', 'unknown')) + log_web_search_consent_acceptance( + user_id=user_id, + admin_email=admin_email, + consent_text=web_search_consent_message, + source='admin_settings' + ) + # --- Handle Document Classification Toggle --- enable_document_classification = form_data.get('enable_document_classification') == 'on' @@ -332,25 +419,29 @@ def admin_settings(): except Exception as e: print(f"Error parsing gpt_model_json: {e}") flash('Error parsing GPT model data. Changes may not be saved.', 'warning') + log_event(f"Error parsing GPT model data: {e}", level=logging.ERROR) gpt_model_obj = settings.get('gpt_model', {'selected': [], 'all': []}) # Fallback - # ... similar try/except for embedding and image models ... + try: embedding_model_obj = json.loads(embedding_model_json) if embedding_model_json else {'selected': [], 'all': []} except Exception as e: print(f"Error parsing embedding_model_json: {e}") flash('Error parsing Embedding model data. Changes may not be saved.', 'warning') + log_event(f"Error parsing Embedding model data: {e}", level=logging.ERROR) embedding_model_obj = settings.get('embedding_model', {'selected': [], 'all': []}) # Fallback try: image_gen_model_obj = json.loads(image_gen_model_json) if image_gen_model_json else {'selected': [], 'all': []} except Exception as e: print(f"Error parsing image_gen_model_json: {e}") flash('Error parsing Image Gen model data. Changes may not be saved.', 'warning') + log_event(f"Error parsing Image Gen model data: {e}", level=logging.ERROR) image_gen_model_obj = settings.get('image_gen_model', {'selected': [], 'all': []}) # Fallback # --- Extract banner fields from form_data --- classification_banner_enabled = form_data.get('classification_banner_enabled') == 'on' classification_banner_text = form_data.get('classification_banner_text', '').strip() classification_banner_color = form_data.get('classification_banner_color', '#ffc107').strip() + classification_banner_text_color = form_data.get('classification_banner_text_color', '#ffffff').strip() # --- Application Insights Logging Toggle --- enable_appinsights_global_logging = form_data.get('enable_appinsights_global_logging') == 'on' @@ -377,24 +468,47 @@ def admin_settings(): if debug_timer_value < min_val or debug_timer_value > max_val: debug_timer_value = min(max(debug_timer_value, min_val), max_val) + # Get existing timer settings to check if they've changed + existing_debug_timer_enabled = settings.get('debug_logging_timer_enabled', False) + existing_debug_timer_value = settings.get('debug_timer_value', 1) + existing_debug_timer_unit = settings.get('debug_timer_unit', 'hours') + existing_debug_logging_enabled = settings.get('enable_debug_logging', False) + existing_debug_turnoff_time = settings.get('debug_logging_turnoff_time') + + # Determine if timer settings have changed + timer_settings_changed = ( + debug_logging_timer_enabled != existing_debug_timer_enabled or + debug_timer_value != existing_debug_timer_value or + debug_timer_unit != existing_debug_timer_unit + ) + debug_logging_newly_enabled = enable_debug_logging and not existing_debug_logging_enabled + # Calculate debug logging turnoff time if timer is enabled and debug logging is on if enable_debug_logging and debug_logging_timer_enabled: - now = datetime.now() - - if debug_timer_unit == 'minutes': - delta = timedelta(minutes=debug_timer_value) - elif debug_timer_unit == 'hours': - delta = timedelta(hours=debug_timer_value) - elif debug_timer_unit == 'days': - delta = timedelta(days=debug_timer_value) - elif debug_timer_unit == 'weeks': - delta = timedelta(weeks=debug_timer_value) + # Only recalculate turnoff time if: + # 1. Timer settings have changed (value, unit, or enabled state), OR + # 2. Debug logging was just enabled, OR + # 3. No existing turnoff time exists + if timer_settings_changed or debug_logging_newly_enabled or not existing_debug_turnoff_time: + now = datetime.now() + + if debug_timer_unit == 'minutes': + delta = timedelta(minutes=debug_timer_value) + elif debug_timer_unit == 'hours': + delta = timedelta(hours=debug_timer_value) + elif debug_timer_unit == 'days': + delta = timedelta(days=debug_timer_value) + elif debug_timer_unit == 'weeks': + delta = timedelta(weeks=debug_timer_value) + else: + delta = timedelta(hours=1) # default fallback + + debug_logging_turnoff_time = now + delta + # Convert to ISO string for JSON serialization + debug_logging_turnoff_time_str = debug_logging_turnoff_time.isoformat() else: - delta = timedelta(hours=1) # default fallback - - debug_logging_turnoff_time = now + delta - # Convert to ISO string for JSON serialization - debug_logging_turnoff_time_str = debug_logging_turnoff_time.isoformat() + # Preserve existing turnoff time + debug_logging_turnoff_time_str = existing_debug_turnoff_time else: debug_logging_turnoff_time_str = None @@ -403,6 +517,7 @@ def admin_settings(): file_timer_value = int(form_data.get('file_timer_value', 1)) file_timer_unit = form_data.get('file_timer_unit', 'hours') file_processing_logs_turnoff_time = None + enable_file_processing_logs = form_data.get('enable_file_processing_logs') == 'on' # Validate file timer values if file_timer_unit in timer_limits: @@ -410,28 +525,103 @@ def admin_settings(): if file_timer_value < min_val or file_timer_value > max_val: file_timer_value = min(max(file_timer_value, min_val), max_val) + # Get existing file timer settings to check if they've changed + existing_file_timer_enabled = settings.get('file_processing_logs_timer_enabled', False) + existing_file_timer_value = settings.get('file_timer_value', 1) + existing_file_timer_unit = settings.get('file_timer_unit', 'hours') + existing_file_processing_logs_enabled = settings.get('enable_file_processing_logs', False) + existing_file_turnoff_time = settings.get('file_processing_logs_turnoff_time') + + # Determine if timer settings have changed + file_timer_settings_changed = ( + file_processing_logs_timer_enabled != existing_file_timer_enabled or + file_timer_value != existing_file_timer_value or + file_timer_unit != existing_file_timer_unit + ) + file_processing_logs_newly_enabled = enable_file_processing_logs and not existing_file_processing_logs_enabled + # Calculate file processing logs turnoff time if timer is enabled and file processing logs are on - enable_file_processing_logs = form_data.get('enable_file_processing_logs') == 'on' if enable_file_processing_logs and file_processing_logs_timer_enabled: - now = datetime.now() - - if file_timer_unit == 'minutes': - delta = timedelta(minutes=file_timer_value) - elif file_timer_unit == 'hours': - delta = timedelta(hours=file_timer_value) - elif file_timer_unit == 'days': - delta = timedelta(days=file_timer_value) - elif file_timer_unit == 'weeks': - delta = timedelta(weeks=file_timer_value) + # Only recalculate turnoff time if: + # 1. Timer settings have changed (value, unit, or enabled state), OR + # 2. File processing logs was just enabled, OR + # 3. No existing turnoff time exists + if file_timer_settings_changed or file_processing_logs_newly_enabled or not existing_file_turnoff_time: + now = datetime.now() + + if file_timer_unit == 'minutes': + delta = timedelta(minutes=file_timer_value) + elif file_timer_unit == 'hours': + delta = timedelta(hours=file_timer_value) + elif file_timer_unit == 'days': + delta = timedelta(days=file_timer_value) + elif file_timer_unit == 'weeks': + delta = timedelta(weeks=file_timer_value) + else: + delta = timedelta(hours=1) # default fallback + + file_processing_logs_turnoff_time = now + delta + # Convert to ISO string for JSON serialization + file_processing_logs_turnoff_time_str = file_processing_logs_turnoff_time.isoformat() else: - delta = timedelta(hours=1) # default fallback - - file_processing_logs_turnoff_time = now + delta - # Convert to ISO string for JSON serialization - file_processing_logs_turnoff_time_str = file_processing_logs_turnoff_time.isoformat() + # Preserve existing turnoff time + file_processing_logs_turnoff_time_str = existing_file_turnoff_time else: file_processing_logs_turnoff_time_str = None + # --- Retention Policy Settings --- + enable_retention_policy_personal = form_data.get('enable_retention_policy_personal') == 'on' + enable_retention_policy_group = form_data.get('enable_retention_policy_group') == 'on' + enable_retention_policy_public = form_data.get('enable_retention_policy_public') == 'on' + retention_policy_execution_hour = int(form_data.get('retention_policy_execution_hour', 2)) + + # Default retention policy values for each workspace type + default_retention_conversation_personal = form_data.get('default_retention_conversation_personal', 'none') + default_retention_document_personal = form_data.get('default_retention_document_personal', 'none') + default_retention_conversation_group = form_data.get('default_retention_conversation_group', 'none') + default_retention_document_group = form_data.get('default_retention_document_group', 'none') + default_retention_conversation_public = form_data.get('default_retention_conversation_public', 'none') + default_retention_document_public = form_data.get('default_retention_document_public', 'none') + + # Validate execution hour (0-23) + if retention_policy_execution_hour < 0 or retention_policy_execution_hour > 23: + retention_policy_execution_hour = 2 # Default to 2 AM + + # Calculate next scheduled execution time if any retention policy is enabled + retention_policy_next_run = None + if enable_retention_policy_personal or enable_retention_policy_group or enable_retention_policy_public: + now = datetime.now(timezone.utc) + # Create next run datetime with the specified hour + next_run = now.replace(hour=retention_policy_execution_hour, minute=0, second=0, microsecond=0) + + # If the scheduled time has already passed today, schedule for tomorrow + if next_run <= now: + next_run = next_run + timedelta(days=1) + + retention_policy_next_run = next_run.isoformat() + + # --- User Agreement Settings --- + enable_user_agreement = form_data.get('enable_user_agreement') == 'on' + user_agreement_text = form_data.get('user_agreement_text', '').strip() + enable_user_agreement_daily = form_data.get('enable_user_agreement_daily') == 'on' + + # Build apply_to list from checkboxes + user_agreement_apply_to = [] + if form_data.get('user_agreement_apply_personal') == 'on': + user_agreement_apply_to.append('personal') + if form_data.get('user_agreement_apply_group') == 'on': + user_agreement_apply_to.append('group') + if form_data.get('user_agreement_apply_public') == 'on': + user_agreement_apply_to.append('public') + if form_data.get('user_agreement_apply_chat') == 'on': + user_agreement_apply_to.append('chat') + + # Validate word count (max 200 words) + if enable_user_agreement and user_agreement_text: + word_count = len(user_agreement_text.split()) + if word_count > 200: + flash('User Agreement text exceeds 200 word limit. Please shorten the text.', 'warning') + # --- Authentication & Redirect Settings --- enable_front_door = form_data.get('enable_front_door') == 'on' front_door_url = form_data.get('front_door_url', '').strip() @@ -478,8 +668,12 @@ def is_valid_url(url): 'enable_dark_mode_default': form_data.get('enable_dark_mode_default') == 'on', 'enable_left_nav_default': form_data.get('enable_left_nav_default') == 'on', 'enable_external_healthcheck': form_data.get('enable_external_healthcheck') == 'on', + 'enable_swagger': form_data.get('enable_swagger') == 'on', 'enable_semantic_kernel': form_data.get('enable_semantic_kernel') == 'on', 'per_user_semantic_kernel': form_data.get('per_user_semantic_kernel') == 'on', + 'enable_agent_template_gallery': form_data.get('enable_agent_template_gallery') == 'on', + 'agent_templates_allow_user_submission': form_data.get('agent_templates_allow_user_submission') == 'on', + 'agent_templates_require_approval': form_data.get('agent_templates_require_approval') == 'on', # GPT (Direct & APIM) 'enable_gpt_apim': form_data.get('enable_gpt_apim') == 'on', @@ -533,6 +727,8 @@ def is_valid_url(url): # Workspaces 'enable_user_workspace': form_data.get('enable_user_workspace') == 'on', 'enable_group_workspaces': form_data.get('enable_group_workspaces') == 'on', + # disable_group_creation is inverted: when checked (on), enable_group_creation = False + 'enable_group_creation': form_data.get('disable_group_creation') != 'on', 'enable_public_workspaces': form_data.get('enable_public_workspaces') == 'on', 'enable_file_sharing': form_data.get('enable_file_sharing') == 'on', 'enable_file_processing_logs': enable_file_processing_logs, @@ -542,6 +738,25 @@ def is_valid_url(url): 'file_processing_logs_turnoff_time': file_processing_logs_turnoff_time_str, 'require_member_of_create_group': require_member_of_create_group, 'require_member_of_create_public_workspace': require_member_of_create_public_workspace, + + # Retention Policy + 'enable_retention_policy_personal': enable_retention_policy_personal, + 'enable_retention_policy_group': enable_retention_policy_group, + 'enable_retention_policy_public': enable_retention_policy_public, + 'retention_policy_execution_hour': retention_policy_execution_hour, + 'retention_policy_next_run': retention_policy_next_run, + 'default_retention_conversation_personal': default_retention_conversation_personal, + 'default_retention_document_personal': default_retention_document_personal, + 'default_retention_conversation_group': default_retention_conversation_group, + 'default_retention_document_group': default_retention_document_group, + 'default_retention_conversation_public': default_retention_conversation_public, + 'default_retention_document_public': default_retention_document_public, + + # User Agreement + 'enable_user_agreement': enable_user_agreement, + 'user_agreement_text': user_agreement_text, + 'user_agreement_apply_to': user_agreement_apply_to, + 'enable_user_agreement_daily': enable_user_agreement_daily, # Multimedia & Metadata 'enable_video_file_support': enable_video_file_support, @@ -591,11 +806,33 @@ def is_valid_url(url): 'enable_user_feedback': form_data.get('enable_user_feedback') == 'on', 'enable_conversation_archiving': form_data.get('enable_conversation_archiving') == 'on', - # Search (Web Search Direct & APIM) - 'enable_web_search': form_data.get('enable_web_search') == 'on', - 'enable_web_search_apim': form_data.get('enable_web_search_apim') == 'on', - 'azure_apim_web_search_endpoint': form_data.get('azure_apim_web_search_endpoint', '').strip(), - 'azure_apim_web_search_subscription_key': form_data.get('azure_apim_web_search_subscription_key', '').strip(), + # Search (Web Search via Azure AI Foundry agent) + 'enable_web_search': enable_web_search, + 'web_search_consent_accepted': web_search_consent_accepted, + 'enable_web_search_user_notice': form_data.get('enable_web_search_user_notice') == 'on', + 'web_search_user_notice_text': form_data.get('web_search_user_notice_text', 'Your message will be sent to Microsoft Bing for web search. Only your current message is sent, not your conversation history.').strip(), + 'web_search_agent': { + 'agent_type': 'aifoundry', + 'azure_openai_gpt_endpoint': form_data.get('web_search_foundry_endpoint', '').strip(), + 'azure_openai_gpt_api_version': form_data.get('web_search_foundry_api_version', '').strip(), + 'azure_openai_gpt_deployment': '', + 'other_settings': { + 'azure_ai_foundry': { + 'agent_id': form_data.get('web_search_foundry_agent_id', '').strip(), + 'endpoint': form_data.get('web_search_foundry_endpoint', '').strip(), + 'api_version': form_data.get('web_search_foundry_api_version', '').strip(), + 'authentication_type': form_data.get('web_search_foundry_auth_type', 'managed_identity').strip(), + 'managed_identity_type': form_data.get('web_search_foundry_managed_identity_type', 'system_assigned').strip(), + 'managed_identity_client_id': form_data.get('web_search_foundry_managed_identity_client_id', '').strip(), + 'tenant_id': form_data.get('web_search_foundry_tenant_id', '').strip(), + 'client_id': form_data.get('web_search_foundry_client_id', '').strip(), + 'client_secret': form_data.get('web_search_foundry_client_secret', '').strip(), + 'cloud': form_data.get('web_search_foundry_cloud', '').strip(), + 'authority': form_data.get('web_search_foundry_authority', '').strip(), + 'notes': form_data.get('web_search_foundry_notes', '').strip() + } + } + }, # Search (AI Search Direct & APIM) 'azure_ai_search_endpoint': form_data.get('azure_ai_search_endpoint', '').strip(), @@ -613,6 +850,10 @@ def is_valid_url(url): 'azure_apim_document_intelligence_endpoint': form_data.get('azure_apim_document_intelligence_endpoint', '').strip(), 'azure_apim_document_intelligence_subscription_key': form_data.get('azure_apim_document_intelligence_subscription_key', '').strip(), + 'enable_key_vault_secret_storage': form_data.get('enable_key_vault_secret_storage') == 'on', + 'key_vault_name': form_data.get('key_vault_name', '').strip(), + 'key_vault_identity': form_data.get('key_vault_identity', ''), + # Authentication & Redirect Settings 'enable_front_door': enable_front_door, 'front_door_url': front_door_url, @@ -626,25 +867,39 @@ def is_valid_url(url): 'video_indexer_endpoint': form_data.get('video_indexer_endpoint', video_indexer_endpoint).strip(), 'video_indexer_location': form_data.get('video_indexer_location', '').strip(), 'video_indexer_account_id': form_data.get('video_indexer_account_id', '').strip(), - 'video_indexer_api_key': form_data.get('video_indexer_api_key', '').strip(), 'video_indexer_resource_group': form_data.get('video_indexer_resource_group', '').strip(), 'video_indexer_subscription_id': form_data.get('video_indexer_subscription_id', '').strip(), 'video_indexer_account_name': form_data.get('video_indexer_account_name', '').strip(), - 'video_indexer_arm_api_version': form_data.get('video_indexer_arm_api_version', '2021-11-10-preview').strip(), + 'video_indexer_arm_api_version': form_data.get('video_indexer_arm_api_version', '2024-01-01').strip(), 'video_index_timeout': int(form_data.get('video_index_timeout', 600)), # Audio file settings with Azure speech service 'speech_service_endpoint': form_data.get('speech_service_endpoint', '').strip(), 'speech_service_location': form_data.get('speech_service_location', '').strip(), 'speech_service_locale': form_data.get('speech_service_locale', '').strip(), + 'speech_service_authentication_type': form_data.get('speech_service_authentication_type', 'key'), 'speech_service_key': form_data.get('speech_service_key', '').strip(), + + # Speech-to-text chat input + 'enable_speech_to_text_input': form_data.get('enable_speech_to_text_input') == 'on', + + # Text-to-speech chat output + 'enable_text_to_speech': form_data.get('enable_text_to_speech') == 'on', 'metadata_extraction_model': form_data.get('metadata_extraction_model', '').strip(), + # Multi-modal vision settings + 'enable_multimodal_vision': form_data.get('enable_multimodal_vision') == 'on', + 'multimodal_vision_model': form_data.get('multimodal_vision_model', '').strip(), + # --- Banner fields --- 'classification_banner_enabled': classification_banner_enabled, 'classification_banner_text': classification_banner_text, 'classification_banner_color': classification_banner_color, + 'classification_banner_text_color': classification_banner_text_color, + + 'require_member_of_control_center_admin': require_member_of_control_center_admin, + 'require_member_of_control_center_dashboard_reader': require_member_of_control_center_dashboard_reader } # --- Prevent Legacy Fields from Being Created/Updated --- @@ -653,6 +908,16 @@ def is_valid_url(url): del new_settings['semantic_kernel_agents'] if 'semantic_kernel_plugins' in new_settings: del new_settings['semantic_kernel_plugins'] + + # Remove legacy web search keys if present + for legacy_key in [ + 'bing_search_key', + 'enable_web_search_apim', + 'azure_apim_web_search_endpoint', + 'azure_apim_web_search_subscription_key' + ]: + if legacy_key in new_settings: + del new_settings[legacy_key] logo_file = request.files.get('logo_file') if logo_file and allowed_file(logo_file.filename, ALLOWED_EXTENSIONS_IMG): @@ -733,7 +998,7 @@ def is_valid_url(url): except Exception as e: print(f"Error processing logo file: {e}") # Log the error for debugging flash(f"Error processing logo file: {e}. Existing logo preserved.", "danger") - # On error, new_settings['custom_logo_base64'] keeps its initial value (the old logo) + log_event(f"Error processing logo file: {e}", level=logging.ERROR) # Process dark mode logo file upload logo_dark_file = request.files.get('logo_dark_file') @@ -816,7 +1081,7 @@ def is_valid_url(url): except Exception as e: print(f"Error processing dark mode logo file: {e}") # Log the error for debugging flash(f"Error processing dark mode logo file: {e}. Existing dark mode logo preserved.", "danger") - # On error, new_settings['custom_logo_dark_base64'] keeps its initial value (the old logo) + log_event(f"Error processing dark mode logo file: {e}", level=logging.ERROR) # Process favicon file upload favicon_file = request.files.get('favicon_file') @@ -890,7 +1155,7 @@ def is_valid_url(url): except Exception as e: print(f"Error processing favicon file: {e}") # Log the error for debugging flash(f"Error processing favicon file: {e}. Existing favicon preserved.", "danger") - # On error, new_settings['custom_favicon_base64'] keeps its initial value (the old favicon) + log_event(f"Error processing favicon file: {e}", level=logging.ERROR) # --- Update settings in DB --- # new_settings now contains either the new logo/favicon base64 or the original ones diff --git a/application/single_app/route_frontend_authentication.py b/application/single_app/route_frontend_authentication.py index 621083b0..022ecf84 100644 --- a/application/single_app/route_frontend_authentication.py +++ b/application/single_app/route_frontend_authentication.py @@ -4,6 +4,7 @@ from config import * from functions_authentication import _build_msal_app, _load_cache, _save_cache from functions_debug import debug_print +from swagger_wrapper import swagger_route, get_auth_security def build_front_door_urls(front_door_url): """ @@ -29,6 +30,7 @@ def build_front_door_urls(front_door_url): def register_route_frontend_authentication(app): @app.route('/login') + @swagger_route(security=get_auth_security()) def login(): # Clear potentially stale cache/user info before starting new login session.pop("user", None) @@ -67,6 +69,7 @@ def login(): return redirect(auth_url) @app.route('/getAToken') # This is your redirect URI path + @swagger_route(security=get_auth_security()) def authorized(): # Check for errors passed back from Azure AD if request.args.get('error'): @@ -116,7 +119,6 @@ def authorized(): # Store user identity info (claims from ID token) debug_print(f" [claims] User {result.get('id_token_claims', {}).get('name', 'Unknown')} logged in.") debug_print(f" [claims] User claims: {result.get('id_token_claims', {})}") - debug_print(f" [claims] User token: {result.get('access_token', 'Unknown')}") session["user"] = result.get("id_token_claims") @@ -124,6 +126,16 @@ def authorized(): _save_cache(msal_app.token_cache) print(f"User {session['user'].get('name')} logged in successfully.") + + # Log the login activity + try: + from functions_activity_logging import log_user_login + user_id = session['user'].get('oid') or session['user'].get('sub') + if user_id: + log_user_login(user_id, 'azure_ad') + except Exception as e: + debug_print(f"Could not log login activity: {e}") + # Redirect to the originally intended page or home # You might want to store the original destination in the session during /login # Get settings from database, with environment variable fallback @@ -151,6 +163,7 @@ def authorized(): # This route is for API calls that need a token, not the web app login flow. This does not kick off a session. @app.route('/getATokenApi') # This is your redirect URI path + @swagger_route(security=get_auth_security()) def authorized_api(): # Check for errors passed back from Azure AD if request.args.get('error'): @@ -195,6 +208,7 @@ def authorized_api(): return jsonify(result, 200) @app.route('/logout') + @swagger_route(security=get_auth_security()) def logout(): user_name = session.get("user", {}).get("name", "User") # Get the user's email before clearing the session diff --git a/application/single_app/route_frontend_chats.py b/application/single_app/route_frontend_chats.py index 416d1e97..a7f8e6a0 100644 --- a/application/single_app/route_frontend_chats.py +++ b/application/single_app/route_frontend_chats.py @@ -7,9 +7,12 @@ from functions_documents import * from functions_group import find_group_by_id from functions_appinsights import log_event +from swagger_wrapper import swagger_route, get_auth_security +from functions_debug import debug_print def register_route_frontend_chats(app): @app.route('/chats', methods=['GET']) + @swagger_route(security=get_auth_security()) @login_required @user_required def chats(): @@ -27,25 +30,38 @@ def chats(): group_doc = find_group_by_id(active_group_id) if group_doc: active_group_name = group_doc.get("name", "") + + # Get active public workspace ID from user settings + active_public_workspace_id = user_settings["settings"].get("activePublicWorkspaceOid", "") + categories_list = public_settings.get("document_classification_categories","") if not user_id: return redirect(url_for('login')) + + # Get user display name from user settings + user_display_name = user_settings.get('display_name', '') + return render_template( 'chats.html', settings=public_settings, enable_user_feedback=enable_user_feedback, active_group_id=active_group_id, active_group_name=active_group_name, + active_public_workspace_id=active_public_workspace_id, enable_enhanced_citations=enable_enhanced_citations, enable_document_classification=enable_document_classification, document_classification_categories=categories_list, enable_extract_meta_data=enable_extract_meta_data, + user_id=user_id, + user_display_name=user_display_name, ) @app.route('/upload', methods=['POST']) + @swagger_route(security=get_auth_security()) @login_required @user_required + @file_upload_required def upload_file(): settings = get_settings() user_id = get_current_user_id() @@ -102,7 +118,8 @@ def upload_file(): file.seek(0) filename = secure_filename(file.filename) - file_ext = os.path.splitext(filename)[1].lower() + file_ext = os.path.splitext(filename)[1].lower() # e.g., '.png' + file_ext_nodot = file_ext.lstrip('.') # e.g., 'png' with tempfile.NamedTemporaryFile(delete=False) as tmp_file: file.save(tmp_file.name) @@ -110,19 +127,90 @@ def upload_file(): extracted_content = '' is_table = False + vision_analysis = None + image_base64_url = None # For storing base64-encoded images try: - if file_ext in ['.pdf', '.docx', '.pptx', '.html', '.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif', '.heif']: - extracted_content = extract_content_with_azure_di(temp_file_path) - elif file_ext == '.txt': + # Check if this is an image file + is_image_file = file_ext_nodot in IMAGE_EXTENSIONS + + if file_ext_nodot in (DOCUMENT_EXTENSIONS | {'html'}) or is_image_file: + extracted_content_raw = extract_content_with_azure_di(temp_file_path) + + # Convert pages_data list to string + if isinstance(extracted_content_raw, list): + extracted_content = "\n\n".join([ + f"[Page {page.get('page_number', 'N/A')}]\n{page.get('content', '')}" + for page in extracted_content_raw + ]) + else: + extracted_content = str(extracted_content_raw) + + # NEW: For images, convert to base64 for inline display + if is_image_file: + try: + with open(temp_file_path, 'rb') as img_file: + image_bytes = img_file.read() + base64_image = base64.b64encode(image_bytes).decode('utf-8') + + # Detect mime type + mime_type = mimetypes.guess_type(temp_file_path)[0] or 'image/png' + + # Create data URL + image_base64_url = f"data:{mime_type};base64,{base64_image}" + print(f"Converted image to base64: {filename}, size: {len(image_base64_url)} bytes") + except Exception as b64_error: + print(f"Warning: Failed to convert image to base64: {b64_error}") + + # Perform vision analysis for images if enabled + if is_image_file and settings.get('enable_multimodal_vision', False): + try: + from functions_documents import analyze_image_with_vision_model + + vision_analysis = analyze_image_with_vision_model( + temp_file_path, + user_id, + f"chat_upload_{int(time.time())}", + settings + ) + + if vision_analysis: + # Combine DI OCR with vision analysis + vision_description = vision_analysis.get('description', '') + vision_objects = vision_analysis.get('objects', []) + vision_text = vision_analysis.get('text', '') + + extracted_content += f"\n\n=== AI Vision Analysis ===\n" + extracted_content += f"Description: {vision_description}\n" + if vision_objects: + extracted_content += f"Objects detected: {', '.join(vision_objects)}\n" + if vision_text: + extracted_content += f"Text visible in image: {vision_text}\n" + + print(f"Vision analysis added to chat upload: {filename}") + except Exception as vision_error: + print(f"Warning: Vision analysis failed for chat upload: {vision_error}") + # Continue without vision analysis + + elif file_ext_nodot in {'doc', 'docm'}: + # Use docx2txt for .doc and .docm files + try: + import docx2txt + extracted_content = docx2txt.process(temp_file_path) + except ImportError: + return jsonify({'error': 'docx2txt library required for .doc/.docm files'}), 500 + elif file_ext_nodot == 'txt': extracted_content = extract_text_file(temp_file_path) - elif file_ext == '.md': + elif file_ext_nodot == 'md': extracted_content = extract_markdown_file(temp_file_path) - elif file_ext == '.json': + elif file_ext_nodot == 'json': with open(temp_file_path, 'r', encoding='utf-8') as f: parsed_json = json.load(f) extracted_content = json.dumps(parsed_json, indent=2) - elif file_ext in ['.csv', '.xls', '.xlsx']: + elif file_ext_nodot in {'xml', 'yaml', 'yml', 'log'}: + # Handle XML, YAML, and LOG files as text for inline chat + extracted_content = extract_text_file(temp_file_path) + elif file_ext_nodot in TABULAR_EXTENSIONS: extracted_content = extract_table_file(temp_file_path, file_ext) is_table = True else: @@ -135,20 +223,200 @@ def upload_file(): try: file_message_id = f"{conversation_id}_file_{int(time.time())}_{random.randint(1000,9999)}" - file_message = { - 'id': file_message_id, - 'conversation_id': conversation_id, - 'role': 'file', - 'filename': filename, - 'file_content': extracted_content, - 'is_table': is_table, - 'timestamp': datetime.utcnow().isoformat(), - 'model_deployment_name': None - } + + # For images with base64 data, store as 'image' role (like system-generated images) + if image_base64_url: + # Check if image data is too large for a single Cosmos document (2MB limit) + # Use 1.5MB as safe limit for base64 content + max_content_size = 1500000 # 1.5MB in bytes + + if len(image_base64_url) > max_content_size: + print(f"Large image detected ({len(image_base64_url)} bytes), splitting across multiple documents") + + # Extract base64 part for splitting + data_url_prefix = image_base64_url.split(',')[0] + ',' + base64_content = image_base64_url.split(',')[1] + + # Calculate chunks + chunk_size = max_content_size - len(data_url_prefix) - 200 # Room for JSON overhead + chunks = [base64_content[i:i+chunk_size] for i in range(0, len(base64_content), chunk_size)] + total_chunks = len(chunks) + + print(f"Splitting into {total_chunks} chunks of max {chunk_size} bytes each") + + # Threading logic for file upload + previous_thread_id = None + try: + last_msg_query = f"SELECT TOP 1 c.metadata.thread_info.thread_id as thread_id FROM c WHERE c.conversation_id = '{conversation_id}' ORDER BY c.timestamp DESC" + last_msgs = list(cosmos_messages_container.query_items(query=last_msg_query, partition_key=conversation_id)) + if last_msgs: + previous_thread_id = last_msgs[0].get('thread_id') + except: + pass + + current_thread_id = str(uuid.uuid4()) + + # Create main image document with first chunk + main_image_doc = { + 'id': file_message_id, + 'conversation_id': conversation_id, + 'role': 'image', + 'content': f"{data_url_prefix}{chunks[0]}", + 'filename': filename, + 'prompt': f"User uploaded: {filename}", + 'created_at': datetime.utcnow().isoformat(), + 'timestamp': datetime.utcnow().isoformat(), + 'model_deployment_name': None, + 'metadata': { + 'is_chunked': True, + 'total_chunks': total_chunks, + 'chunk_index': 0, + 'original_size': len(image_base64_url), + 'is_user_upload': True, + 'thread_info': { + 'thread_id': current_thread_id, + 'previous_thread_id': previous_thread_id, + 'active_thread': True, + 'thread_attempt': 1 + } + } + } + + # Add vision analysis and extracted text if available + if vision_analysis: + main_image_doc['vision_analysis'] = vision_analysis + if extracted_content: + main_image_doc['extracted_text'] = extracted_content + + cosmos_messages_container.upsert_item(main_image_doc) + + # Create chunk documents + for i in range(1, total_chunks): + chunk_doc = { + 'id': f"{file_message_id}_chunk_{i}", + 'conversation_id': conversation_id, + 'role': 'image_chunk', + 'content': chunks[i], + 'parent_message_id': file_message_id, + 'created_at': datetime.utcnow().isoformat(), + 'timestamp': datetime.utcnow().isoformat(), + 'metadata': { + 'is_chunk': True, + 'chunk_index': i, + 'total_chunks': total_chunks, + 'parent_message_id': file_message_id + } + } + cosmos_messages_container.upsert_item(chunk_doc) + + print(f"Created {total_chunks} chunked image documents for {filename}") + else: + # Small enough to store in single document + # Threading logic for file upload + previous_thread_id = None + try: + last_msg_query = f"SELECT TOP 1 c.metadata.thread_info.thread_id as thread_id FROM c WHERE c.conversation_id = '{conversation_id}' ORDER BY c.timestamp DESC" + last_msgs = list(cosmos_messages_container.query_items(query=last_msg_query, partition_key=conversation_id)) + if last_msgs: + previous_thread_id = last_msgs[0].get('thread_id') + except: + pass + + current_thread_id = str(uuid.uuid4()) + + image_message = { + 'id': file_message_id, + 'conversation_id': conversation_id, + 'role': 'image', + 'content': image_base64_url, + 'filename': filename, + 'prompt': f"User uploaded: {filename}", + 'created_at': datetime.utcnow().isoformat(), + 'timestamp': datetime.utcnow().isoformat(), + 'model_deployment_name': None, + 'metadata': { + 'is_chunked': False, + 'original_size': len(image_base64_url), + 'is_user_upload': True, + 'thread_info': { + 'thread_id': current_thread_id, + 'previous_thread_id': previous_thread_id, + 'active_thread': True, + 'thread_attempt': 1 + } + } + } + + # Add vision analysis and extracted text if available + if vision_analysis: + image_message['vision_analysis'] = vision_analysis + if extracted_content: + image_message['extracted_text'] = extracted_content + + cosmos_messages_container.upsert_item(image_message) + print(f"Created single image document for {filename}") + else: + # Non-image file or failed to convert to base64, store as 'file' role + # Threading logic for file upload + previous_thread_id = None + try: + last_msg_query = f"SELECT TOP 1 c.metadata.thread_info.thread_id as thread_id FROM c WHERE c.conversation_id = '{conversation_id}' ORDER BY c.timestamp DESC" + last_msgs = list(cosmos_messages_container.query_items(query=last_msg_query, partition_key=conversation_id)) + if last_msgs: + previous_thread_id = last_msgs[0].get('thread_id') + except: + pass + + current_thread_id = str(uuid.uuid4()) + + file_message = { + 'id': file_message_id, + 'conversation_id': conversation_id, + 'role': 'file', + 'filename': filename, + 'file_content': extracted_content, + 'is_table': is_table, + 'timestamp': datetime.utcnow().isoformat(), + 'model_deployment_name': None, + 'metadata': { + 'thread_info': { + 'thread_id': current_thread_id, + 'previous_thread_id': previous_thread_id, + 'active_thread': True, + 'thread_attempt': 1 + } + } + } + + # Add vision analysis if available + if vision_analysis: + file_message['vision_analysis'] = vision_analysis - cosmos_messages_container.upsert_item(file_message) + cosmos_messages_container.upsert_item(file_message) conversation_item['last_updated'] = datetime.utcnow().isoformat() + + # Check if this is the first message in the conversation (excluding the current file upload) + # and update conversation title based on filename if it's still "New Conversation" + try: + if conversation_item.get('title') == 'New Conversation': + # Query to count existing messages (excluding the one we just created) + count_query = f"SELECT VALUE COUNT(1) FROM c WHERE c.conversation_id = '{conversation_id}'" + message_counts = list(cosmos_messages_container.query_items(query=count_query, partition_key=conversation_id)) + message_count = message_counts[0] if message_counts else 0 + + # If this is the first or only message, set title based on filename + if message_count <= 1: + # Remove file extension and create a clean title + base_filename = os.path.splitext(filename)[0] + # Limit title length to 50 characters + new_title = base_filename[:50] if len(base_filename) > 50 else base_filename + conversation_item['title'] = new_title + print(f"Auto-generated conversation title from filename: {new_title}") + except Exception as title_error: + # Don't fail the upload if title generation fails + print(f"Warning: Failed to auto-generate conversation title: {title_error}") + cosmos_conversations_container.upsert_item(conversation_item) except Exception as e: @@ -158,11 +426,13 @@ def upload_file(): return jsonify({ 'message': 'File added to the conversation successfully', - 'conversation_id': conversation_id + 'conversation_id': conversation_id, + 'title': conversation_item.get('title', 'New Conversation') }), 200 # THIS IS THE OLD ROUTE, KEEPING IT FOR REFERENCE, WILL DELETE LATER @app.route("/view_pdf", methods=["GET"]) + @swagger_route(security=get_auth_security()) @login_required @user_required def view_pdf(): @@ -317,6 +587,7 @@ def view_pdf(): # --- Updated route --- @app.route('/view_document') + @swagger_route(security=get_auth_security()) @login_required @user_required def view_document(): @@ -413,10 +684,10 @@ def view_document(): # Define supported types for direct viewing/handling is_pdf = file_ext == '.pdf' - is_word = file_ext in ('.docx', '.doc') + is_word = file_ext in ('.docx', '.doc', '.docm') is_ppt = file_ext in ('.pptx', '.ppt') - is_image = file_ext in ('.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif', '.gif', '.webp') # Added more image types - is_text = file_ext in ('.txt', '.md', '.csv', '.json', '.log', '.xml', '.html', '.htm') # Common text-based types + is_image = file_ext.lstrip('.') in (IMAGE_EXTENSIONS | {'gif', 'webp'}) # Added more image types + is_text = file_ext.lstrip('.') in (BASE_ALLOWED_EXTENSIONS - {'doc', 'docm'}) # Common text-based types try: # Download the file to the specified location diff --git a/application/single_app/route_frontend_control_center.py b/application/single_app/route_frontend_control_center.py new file mode 100644 index 00000000..c5f3f44b --- /dev/null +++ b/application/single_app/route_frontend_control_center.py @@ -0,0 +1,280 @@ +# route_frontend_control_center.py + +from config import * +from functions_authentication import * +from functions_settings import * +from functions_logging import * +from swagger_wrapper import swagger_route, get_auth_security +from datetime import datetime, timedelta +import json +from functions_debug import debug_print + +def register_route_frontend_control_center(app): + @app.route('/admin/control-center', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @control_center_required('dashboard') + def control_center(): + """ + Control Center main page for administrators. + Provides dashboard overview and management tools for users, groups, and workspaces. + """ + try: + # Get settings for configuration data + settings = get_settings() + public_settings = sanitize_settings_for_user(settings) + + # Get basic statistics for dashboard + stats = get_control_center_statistics() + + # Check user's role for frontend conditional rendering + # Determine if user has full admin access (can see all tabs) + user = session.get('user', {}) + user_roles = user.get('roles', []) + require_member_of_control_center_admin = settings.get("require_member_of_control_center_admin", False) + + # User has full admin access based on which role requirement is active: + # - When require_member_of_control_center_admin is ENABLED: Only ControlCenterAdmin role grants access + # - When require_member_of_control_center_admin is DISABLED: Only regular Admin role grants access + has_control_center_admin_role = 'ControlCenterAdmin' in user_roles + has_regular_admin_role = 'Admin' in user_roles + + # Full admin access means they can see dashboard + management tabs + activity logs + if require_member_of_control_center_admin: + # ControlCenterAdmin role is required - only that role grants full access + has_full_admin_access = has_control_center_admin_role + else: + # ControlCenterAdmin requirement is disabled - only regular Admin role grants full access + has_full_admin_access = has_regular_admin_role + + return render_template('control_center.html', + app_settings=public_settings, + settings=public_settings, + statistics=stats, + has_control_center_admin=has_full_admin_access) + except Exception as e: + debug_print(f"Error loading control center: {e}") + flash(f"Error loading control center: {str(e)}", "error") + return redirect(url_for('admin_settings')) + + @app.route('/approvals', methods=['GET']) + @login_required + @user_required + def approvals(): + """ + Approval Requests page accessible to group owners, admins, and control center admins. + Shows approval requests based on user's role and permissions. + """ + try: + # Get settings for configuration data + settings = get_settings() + public_settings = sanitize_settings_for_user(settings) + + # Get user settings for profile and navigation + user_id = get_current_user_id() + user_settings = get_user_settings(user_id) + + return render_template('approvals.html', + app_settings=public_settings, + settings=public_settings, + user_settings=user_settings) + except Exception as e: + import traceback + error_trace = traceback.format_exc() + debug_print(f"Error loading approvals: {e}\n{error_trace}") + print(f"ERROR IN APPROVALS ROUTE: {e}\n{error_trace}") + flash(f"Error loading approvals: {str(e)}", "error") + return redirect(url_for('index')) + +def get_control_center_statistics(): + """ + Get aggregated statistics for the Control Center dashboard. + """ + try: + stats = { + 'total_users': 0, + 'active_users_30_days': 0, + 'total_groups': 0, + 'locked_groups': 0, + 'total_public_workspaces': 0, + 'hidden_workspaces': 0, + 'recent_activity_24h': { + 'chats': 0, + 'documents': 0, + 'logins': 0 + }, + 'blocked_users': 0, + 'alerts': [] + } + + # Get total users count + try: + user_query = "SELECT VALUE COUNT(1) FROM c" + user_result = list(cosmos_user_settings_container.query_items( + query=user_query, + enable_cross_partition_query=True + )) + stats['total_users'] = user_result[0] if user_result else 0 + except Exception as e: + debug_print(f"Could not get user count: {e}") + + # Get active users in last 30 days using login activity logs + try: + thirty_days_ago = (datetime.now() - timedelta(days=30)).isoformat() + active_users_query = """ + SELECT VALUE COUNT(1) FROM ( + SELECT DISTINCT c.user_id FROM c + WHERE c.activity_type = 'user_login' + AND c.timestamp >= @thirty_days_ago + ) + """ + active_users_params = [{"name": "@thirty_days_ago", "value": thirty_days_ago}] + active_users_result = list(cosmos_activity_logs_container.query_items( + query=active_users_query, + parameters=active_users_params, + enable_cross_partition_query=True + )) + stats['active_users_30_days'] = active_users_result[0] if active_users_result else 0 + except Exception as e: + debug_print(f"Could not get active users count: {e}") + + # Get total groups count + try: + groups_query = "SELECT VALUE COUNT(1) FROM c" + groups_result = list(cosmos_groups_container.query_items( + query=groups_query, + enable_cross_partition_query=True + )) + stats['total_groups'] = groups_result[0] if groups_result else 0 + except Exception as e: + debug_print(f"Could not get groups count: {e}") + + # Get groups created in last 30 days using createdDate + try: + thirty_days_ago = (datetime.now() - timedelta(days=30)).isoformat() + new_groups_query = """ + SELECT VALUE COUNT(1) FROM c + WHERE c.createdDate >= @thirty_days_ago + """ + new_groups_params = [{"name": "@thirty_days_ago", "value": thirty_days_ago}] + new_groups_result = list(cosmos_groups_container.query_items( + query=new_groups_query, + parameters=new_groups_params, + enable_cross_partition_query=True + )) + stats['locked_groups'] = new_groups_result[0] if new_groups_result else 0 + except Exception as e: + debug_print(f"Could not get new groups count: {e}") + + # Get total public workspaces count + try: + workspaces_query = "SELECT VALUE COUNT(1) FROM c" + workspaces_result = list(cosmos_public_workspaces_container.query_items( + query=workspaces_query, + enable_cross_partition_query=True + )) + stats['total_public_workspaces'] = workspaces_result[0] if workspaces_result else 0 + except Exception as e: + debug_print(f"Could not get public workspaces count: {e}") + + # Get public workspaces created in last 30 days using createdDate + try: + thirty_days_ago = (datetime.now() - timedelta(days=30)).isoformat() + new_workspaces_query = """ + SELECT VALUE COUNT(1) FROM c + WHERE c.createdDate >= @thirty_days_ago + """ + new_workspaces_params = [{"name": "@thirty_days_ago", "value": thirty_days_ago}] + new_workspaces_result = list(cosmos_public_workspaces_container.query_items( + query=new_workspaces_query, + parameters=new_workspaces_params, + enable_cross_partition_query=True + )) + stats['hidden_workspaces'] = new_workspaces_result[0] if new_workspaces_result else 0 + except Exception as e: + debug_print(f"Could not get new public workspaces count: {e}") + + # Get blocked users count + try: + blocked_query = """ + SELECT VALUE COUNT(1) FROM c + WHERE c.settings.access.status = "deny" + """ + blocked_result = list(cosmos_user_settings_container.query_items( + query=blocked_query, + enable_cross_partition_query=True + )) + stats['blocked_users'] = blocked_result[0] if blocked_result else 0 + except Exception as e: + debug_print(f"Could not get blocked users count: {e}") + + # Get recent activity (last 24 hours) + try: + yesterday = (datetime.now() - timedelta(days=1)).isoformat() + + # Recent logins from activity_logs + login_query = """ + SELECT VALUE COUNT(1) FROM c + WHERE c.activity_type = 'user_login' + AND c.timestamp >= @yesterday + """ + login_params = [{"name": "@yesterday", "value": yesterday}] + recent_logins = list(cosmos_activity_logs_container.query_items( + query=login_query, + parameters=login_params, + enable_cross_partition_query=True + )) + stats['recent_activity_24h']['logins'] = recent_logins[0] if recent_logins else 0 + + # Recent chat activity from conversations + chat_query = """ + SELECT VALUE COUNT(1) FROM c + WHERE c.last_updated >= @yesterday + """ + chat_params = [{"name": "@yesterday", "value": yesterday}] + recent_chats = list(cosmos_conversations_container.query_items( + query=chat_query, + parameters=chat_params, + enable_cross_partition_query=True + )) + stats['recent_activity_24h']['chats'] = recent_chats[0] if recent_chats else 0 + + # Recent document uploads from user_documents + doc_query = """ + SELECT VALUE COUNT(1) FROM c + WHERE c.upload_date >= @yesterday + """ + doc_params = [{"name": "@yesterday", "value": yesterday}] + recent_docs = list(cosmos_user_documents_container.query_items( + query=doc_query, + parameters=doc_params, + enable_cross_partition_query=True + )) + stats['recent_activity_24h']['documents'] = recent_docs[0] if recent_docs else 0 + + except Exception as e: + debug_print(f"Could not get recent activity: {e}") + + # Add alerts for blocked users + if stats['blocked_users'] > 0: + stats['alerts'].append({ + 'type': 'warning', + 'message': f"{stats['blocked_users']} user(s) currently blocked from access", + 'action': 'View Users' + }) + + return stats + + except Exception as e: + debug_print(f"Error getting control center statistics: {e}") + return { + 'total_users': 0, + 'active_users_30_days': 0, + 'total_groups': 0, + 'locked_groups': 0, + 'total_public_workspaces': 0, + 'hidden_workspaces': 0, + 'recent_activity_24h': {'chats': 0, 'documents': 0, 'logins': 0}, + 'blocked_users': 0, + 'alerts': [] + } \ No newline at end of file diff --git a/application/single_app/route_frontend_conversations.py b/application/single_app/route_frontend_conversations.py index d4ca670f..a5d3f261 100644 --- a/application/single_app/route_frontend_conversations.py +++ b/application/single_app/route_frontend_conversations.py @@ -3,9 +3,12 @@ from config import * from functions_authentication import * from functions_debug import debug_print +from functions_chat import sort_messages_by_thread +from swagger_wrapper import swagger_route, get_auth_security def register_route_frontend_conversations(app): @app.route('/conversations') + @swagger_route(security=get_auth_security()) @login_required @user_required def conversations(): @@ -26,6 +29,7 @@ def conversations(): return render_template('conversations.html', conversations=items) @app.route('/conversation/', methods=['GET']) + @swagger_route(security=get_auth_security()) @login_required @user_required def view_conversation(conversation_id): @@ -52,6 +56,7 @@ def view_conversation(conversation_id): return render_template('chat.html', conversation_id=conversation_id, messages=messages) @app.route('/conversation//messages', methods=['GET']) + @swagger_route(security=get_auth_security()) @login_required @user_required def get_conversation_messages(conversation_id): @@ -74,9 +79,46 @@ def get_conversation_messages(conversation_id): partition_key=conversation_id )) - debug_print(f"Frontend endpoint - Query returned {len(all_items)} total items") + debug_print(f"Frontend endpoint - Query returned {len(all_items)} total items (before filtering)") + + # Filter for active_thread = True OR active_thread is not defined (backwards compatibility) + filtered_items = [] + for item in all_items: + thread_info = item.get('metadata', {}).get('thread_info', {}) + active = thread_info.get('active_thread') + + # Include if: active_thread is True, OR active_thread is not defined, OR active_thread is None + if active is True or active is None or 'active_thread' not in thread_info: + filtered_items.append(item) + debug_print(f"Frontend endpoint - ✅ Including: id={item.get('id')}, role={item.get('role')}, active={active}, attempt={thread_info.get('thread_attempt', 'N/A')}") + else: + debug_print(f"Frontend endpoint - ❌ Excluding: id={item.get('id')}, role={item.get('role')}, active={active}, attempt={thread_info.get('thread_attempt', 'N/A')}") + + all_items = filtered_items + debug_print(f"Frontend endpoint - After filtering: {len(all_items)} items remaining") + + # Log thread info BEFORE sorting + debug_print(f"Frontend endpoint - BEFORE SORT:") + for item in all_items: + thread_info = item.get('metadata', {}).get('thread_info', {}) + thread_id = thread_info.get('thread_id', 'NO_THREAD_ID') + prev_thread_id = thread_info.get('previous_thread_id', 'NO_PREV') + timestamp = item.get('timestamp', 'NO_TIMESTAMP') + attempt = thread_info.get('thread_attempt', 'N/A') + debug_print(f" {item.get('id')}: thread_id={thread_id}, prev={prev_thread_id}, attempt={attempt}, timestamp={timestamp}") + + # Sort messages using threading logic + all_items = sort_messages_by_thread(all_items) + + # Log thread info AFTER sorting + debug_print(f"Frontend endpoint - AFTER SORT:") for i, item in enumerate(all_items): - debug_print(f"Frontend endpoint - Item {i}: id={item.get('id')}, role={item.get('role')}") + thread_info = item.get('metadata', {}).get('thread_info', {}) + thread_id = thread_info.get('thread_id', 'NO_THREAD_ID') + prev_thread_id = thread_info.get('previous_thread_id', 'NO_PREV') + timestamp = item.get('timestamp', 'NO_TIMESTAMP') + attempt = thread_info.get('thread_attempt', 'N/A') + debug_print(f" {i+1}. {item.get('id')}: thread_id={thread_id}, prev={prev_thread_id}, attempt={attempt}, timestamp={timestamp}") # Process messages and reassemble chunked images messages = [] @@ -149,6 +191,7 @@ def get_conversation_messages(conversation_id): return jsonify({'messages': messages}) @app.route('/api/message//metadata', methods=['GET']) + @swagger_route(security=get_auth_security()) @login_required @user_required def get_message_metadata(message_id): @@ -185,9 +228,18 @@ def get_message_metadata(message_id): except CosmosResourceNotFoundError: return jsonify({'error': 'Conversation not found'}), 404 - # Return the metadata from the message - metadata = message.get('metadata', {}) - return jsonify(metadata) + # Return appropriate data based on message role + # User messages: return metadata object only (has user_info, button_states, etc.) + # Other messages: return full document (has id, role, augmented, etc. at top level) + message_role = message.get('role', '') + + if message_role == 'user': + # User messages - return nested metadata object + metadata = message.get('metadata', {}) + return jsonify(metadata) + else: + # Assistant, image, file messages - return full document + return jsonify(message) except Exception as e: print(f"Error fetching message metadata: {str(e)}") diff --git a/application/single_app/route_frontend_feedback.py b/application/single_app/route_frontend_feedback.py index e1201530..15f13274 100644 --- a/application/single_app/route_frontend_feedback.py +++ b/application/single_app/route_frontend_feedback.py @@ -3,12 +3,13 @@ from config import * from functions_authentication import * from functions_settings import * +from swagger_wrapper import swagger_route, get_auth_security def register_route_frontend_feedback(app): @app.route("/admin/feedback_review") + @swagger_route(security=get_auth_security()) @login_required - @admin_required @feedback_admin_required @enabled_required("enable_user_feedback") def admin_feedback_review(): @@ -19,6 +20,7 @@ def admin_feedback_review(): return render_template("admin_feedback_review.html") @app.route("/my_feedback") + @swagger_route(security=get_auth_security()) @login_required @user_required @enabled_required("enable_user_feedback") diff --git a/application/single_app/route_frontend_group_workspaces.py b/application/single_app/route_frontend_group_workspaces.py index af79065f..850cc8d0 100644 --- a/application/single_app/route_frontend_group_workspaces.py +++ b/application/single_app/route_frontend_group_workspaces.py @@ -3,9 +3,11 @@ from config import * from functions_authentication import * from functions_settings import * +from swagger_wrapper import swagger_route, get_auth_security def register_route_frontend_group_workspaces(app): @app.route('/group_workspaces', methods=['GET']) + @swagger_route(security=get_auth_security()) @login_required @user_required @enabled_required("enable_group_workspaces") @@ -42,7 +44,14 @@ def group_workspaces(): ) ) legacy_count = legacy_docs_from_cosmos[0] if legacy_docs_from_cosmos else 0 - + + # Get allowed extensions from central function and build allowed extensions string + allowed_extensions = sorted(get_allowed_extensions( + enable_video=enable_video_file_support in [True, 'True', 'true'], + enable_audio=enable_audio_file_support in [True, 'True', 'true'] + )) + allowed_extensions_str = "Allowed: " + ", ".join(allowed_extensions) + return render_template( 'group_workspaces.html', settings=public_settings, @@ -51,10 +60,12 @@ def group_workspaces(): enable_video_file_support=enable_video_file_support, enable_audio_file_support=enable_audio_file_support, enable_file_sharing=enable_file_sharing, - legacy_docs_count=legacy_count + legacy_docs_count=legacy_count, + allowed_extensions=allowed_extensions_str ) @app.route('/set_active_group', methods=['POST']) + @swagger_route(security=get_auth_security()) @login_required @user_required @enabled_required("enable_group_workspaces") diff --git a/application/single_app/route_frontend_groups.py b/application/single_app/route_frontend_groups.py index 259c87b1..d48ec561 100644 --- a/application/single_app/route_frontend_groups.py +++ b/application/single_app/route_frontend_groups.py @@ -3,9 +3,11 @@ from config import * from functions_authentication import * from functions_settings import * +from swagger_wrapper import swagger_route, get_auth_security def register_route_frontend_groups(app): @app.route("/my_groups", methods=["GET"]) + @swagger_route(security=get_auth_security()) @login_required @user_required @enabled_required("enable_group_workspaces") @@ -16,15 +18,17 @@ def my_groups(): user = session.get('user', {}) settings = get_settings() require_member_of_create_group = settings.get("require_member_of_create_group", False) + enable_group_creation = settings.get("enable_group_creation", True) # Check if user can create groups - can_create_groups = True - if require_member_of_create_group: + can_create_groups = enable_group_creation # First check if group creation is enabled system-wide + if can_create_groups and require_member_of_create_group: can_create_groups = 'roles' in user and 'CreateGroups' in user['roles'] return render_template("my_groups.html", can_create_groups=can_create_groups) @app.route("/groups/", methods=["GET"]) + @swagger_route(security=get_auth_security()) @login_required @user_required @enabled_required("enable_group_workspaces") diff --git a/application/single_app/route_frontend_notifications.py b/application/single_app/route_frontend_notifications.py new file mode 100644 index 00000000..9897f01c --- /dev/null +++ b/application/single_app/route_frontend_notifications.py @@ -0,0 +1,28 @@ +# route_frontend_notifications.py + +from config import * +from functions_authentication import * +from functions_settings import * +from swagger_wrapper import swagger_route, get_auth_security + +def register_route_frontend_notifications(app): + + @app.route("/notifications") + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def notifications(): + """ + Renders the notifications page for the current user. + """ + settings = get_settings() + public_settings = sanitize_settings_for_user(settings) + user_id = get_current_user_id() + user_settings = get_user_settings(user_id) + + return render_template( + "notifications.html", + app_settings=public_settings, + settings=public_settings, + user_settings=user_settings + ) diff --git a/application/single_app/route_frontend_profile.py b/application/single_app/route_frontend_profile.py index f864372e..f03c92bb 100644 --- a/application/single_app/route_frontend_profile.py +++ b/application/single_app/route_frontend_profile.py @@ -2,15 +2,19 @@ from config import * from functions_authentication import * +from swagger_wrapper import swagger_route, get_auth_security +import traceback def register_route_frontend_profile(app): @app.route('/profile') + @swagger_route(security=get_auth_security()) @login_required def profile(): user = session.get('user') return render_template('profile.html', user=user) @app.route('/api/profile/image/refresh', methods=['POST']) + @swagger_route(security=get_auth_security()) @login_required @user_required def refresh_profile_image(): @@ -51,5 +55,308 @@ def refresh_profile_image(): return jsonify({"error": "Failed to update profile image settings"}), 500 except Exception as e: - print(f"Error refreshing profile image for user {user_id}: {e}") - return jsonify({"error": "Internal server error"}), 500 \ No newline at end of file + debug_print(f"Error refreshing profile image for user {user_id}: {e}") + log_event(f"Error refreshing profile image for user {user_id}: {str(e)}", level=logging.ERROR) + return jsonify({"error": "Internal server error"}), 500 + + @app.route('/api/user/activity-trends', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def get_user_activity_trends(): + """ + Get time-series activity trends for the current user over the last 30 days. + Returns data for login activity, conversation creation, document uploads, and token usage. + """ + try: + from datetime import datetime, timezone, timedelta + from collections import defaultdict + from config import cosmos_activity_logs_container, cosmos_conversations_container + from config import cosmos_user_documents_container, cosmos_messages_container + + user_id = get_current_user_id() + if not user_id: + return jsonify({"error": "Unable to identify user"}), 401 + + # Calculate date range for last 30 days + end_date = datetime.now(timezone.utc) + start_date = end_date - timedelta(days=30) + + # Initialize data structures for daily aggregation + logins_by_date = defaultdict(int) + conversations_by_date = defaultdict(int) + conversations_delete_by_date = defaultdict(int) + documents_upload_by_date = defaultdict(int) + documents_delete_by_date = defaultdict(int) + tokens_by_date = defaultdict(int) + + # Query 1: Get login activity from activity_logs + try: + login_query = """ + SELECT c.timestamp, c.created_at FROM c + WHERE c.user_id = @user_id + AND c.activity_type = 'user_login' + AND (c.timestamp >= @start_date OR c.created_at >= @start_date) + """ + login_params = [ + {"name": "@user_id", "value": user_id}, + {"name": "@start_date", "value": start_date.isoformat()} + ] + login_records = list(cosmos_activity_logs_container.query_items( + query=login_query, + parameters=login_params, + enable_cross_partition_query=True + )) + + for record in login_records: + timestamp = record.get('timestamp') or record.get('created_at') + if timestamp: + try: + dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00')) + date_key = dt.strftime('%Y-%m-%d') + logins_by_date[date_key] += 1 + except: + pass + except Exception as e: + debug_print(f"Error fetching login trends: {e}") + log_event(f"Error fetching login trends: {str(e)}", level=logging.ERROR) + + # Query 2: Get conversation creation activity from activity_logs + try: + conv_query = """ + SELECT c.timestamp, c.created_at FROM c + WHERE c.user_id = @user_id + AND c.activity_type = 'conversation_creation' + AND (c.timestamp >= @start_date OR c.created_at >= @start_date) + """ + conv_params = [ + {"name": "@user_id", "value": user_id}, + {"name": "@start_date", "value": start_date.isoformat()} + ] + conv_records = list(cosmos_activity_logs_container.query_items( + query=conv_query, + parameters=conv_params, + enable_cross_partition_query=True + )) + + for record in conv_records: + timestamp = record.get('timestamp') or record.get('created_at') + if timestamp: + try: + dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00')) + date_key = dt.strftime('%Y-%m-%d') + conversations_by_date[date_key] += 1 + except: + pass + except Exception as e: + debug_print(f"Error fetching conversation trends: {e}") + log_event(f"Error fetching conversation trends: {str(e)}", level=logging.ERROR) + + # Query 2b: Get conversation deletion activity from activity_logs + try: + conv_delete_query = """ + SELECT c.timestamp, c.created_at FROM c + WHERE c.user_id = @user_id + AND c.activity_type = 'conversation_deletion' + AND (c.timestamp >= @start_date OR c.created_at >= @start_date) + """ + conv_delete_records = list(cosmos_activity_logs_container.query_items( + query=conv_delete_query, + parameters=conv_params, + enable_cross_partition_query=True + )) + + for record in conv_delete_records: + timestamp = record.get('timestamp') or record.get('created_at') + if timestamp: + try: + dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00')) + date_key = dt.strftime('%Y-%m-%d') + conversations_delete_by_date[date_key] += 1 + except: + pass + except Exception as e: + debug_print(f"Error fetching conversation deletion trends: {e}") + log_event(f"Error fetching conversation deletion trends: {str(e)}", level=logging.ERROR) + + # Query 3: Get document upload activity from activity_logs + try: + doc_upload_query = """ + SELECT c.timestamp, c.created_at FROM c + WHERE c.user_id = @user_id + AND c.activity_type = 'document_creation' + AND (c.timestamp >= @start_date OR c.created_at >= @start_date) + """ + doc_params = [ + {"name": "@user_id", "value": user_id}, + {"name": "@start_date", "value": start_date.isoformat()} + ] + doc_records = list(cosmos_activity_logs_container.query_items( + query=doc_upload_query, + parameters=doc_params, + enable_cross_partition_query=True + )) + + for record in doc_records: + timestamp = record.get('timestamp') or record.get('created_at') + if timestamp: + try: + dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00')) + date_key = dt.strftime('%Y-%m-%d') + documents_upload_by_date[date_key] += 1 + except: + pass + except Exception as e: + debug_print(f"Error fetching document upload trends: {e}") + log_event(f"Error fetching document upload trends: {str(e)}", level=logging.ERROR) + + # Query 3b: Get document delete activity from activity_logs + try: + doc_delete_query = """ + SELECT c.timestamp, c.created_at FROM c + WHERE c.user_id = @user_id + AND c.activity_type = 'document_deletion' + AND (c.timestamp >= @start_date OR c.created_at >= @start_date) + """ + doc_delete_records = list(cosmos_activity_logs_container.query_items( + query=doc_delete_query, + parameters=doc_params, + enable_cross_partition_query=True + )) + + for record in doc_delete_records: + timestamp = record.get('timestamp') or record.get('created_at') + if timestamp: + try: + dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00')) + date_key = dt.strftime('%Y-%m-%d') + documents_delete_by_date[date_key] += 1 + except: + pass + except Exception as e: + debug_print(f"Error fetching document delete trends: {e}") + log_event(f"Error fetching document delete trends: {str(e)}", level=logging.ERROR) + + # Query 4: Get token usage from activity_logs + try: + token_query = """ + SELECT c.timestamp, c.created_at, c.usage FROM c + WHERE c.user_id = @user_id + AND c.activity_type = 'token_usage' + AND (c.timestamp >= @start_date OR c.created_at >= @start_date) + """ + token_params = [ + {"name": "@user_id", "value": user_id}, + {"name": "@start_date", "value": start_date.isoformat()} + ] + token_records = list(cosmos_activity_logs_container.query_items( + query=token_query, + parameters=token_params, + enable_cross_partition_query=True + )) + + for record in token_records: + timestamp = record.get('timestamp') or record.get('created_at') + if timestamp: + try: + dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00')) + date_key = dt.strftime('%Y-%m-%d') + # Extract total tokens from usage field + usage = record.get('usage', {}) + total_tokens = usage.get('total_tokens', 0) + tokens_by_date[date_key] += total_tokens + except: + pass + except Exception as e: + debug_print(f"Error fetching token usage trends: {e}") + log_event(f"Error fetching token usage trends: {str(e)}", level=logging.ERROR) + + # Generate complete date range (last 30 days) + date_range = [] + for i in range(30): + date = end_date - timedelta(days=29-i) + date_range.append(date.strftime('%Y-%m-%d')) + + # Format data for Chart.js + logins_data = [{"date": date, "count": logins_by_date.get(date, 0)} for date in date_range] + conversations_data = { + "creates": [{"date": date, "count": conversations_by_date.get(date, 0)} for date in date_range], + "deletes": [{"date": date, "count": conversations_delete_by_date.get(date, 0)} for date in date_range] + } + documents_data = { + "uploads": [{"date": date, "count": documents_upload_by_date.get(date, 0)} for date in date_range], + "deletes": [{"date": date, "count": documents_delete_by_date.get(date, 0)} for date in date_range] + } + tokens_data = [{"date": date, "tokens": tokens_by_date.get(date, 0)} for date in date_range] + + # Get storage metrics from user settings + from functions_settings import get_user_settings + user_settings = get_user_settings(user_id) + metrics = user_settings.get('settings', {}).get('metrics', {}) + document_metrics = metrics.get('document_metrics', {}) + + storage_data = { + "ai_search_size": document_metrics.get('ai_search_size', 0), + "storage_account_size": document_metrics.get('storage_account_size', 0) + } + + return jsonify({ + "success": True, + "logins": logins_data, + "conversations": conversations_data, + "documents": documents_data, + "tokens": tokens_data, + "storage": storage_data + }), 200 + + except Exception as e: + debug_print(f"Error fetching user activity trends: {e}") + log_event(f"Error fetching user activity trends: {str(e)}", level=logging.ERROR) + traceback.print_exc() + return jsonify({"error": "Failed to fetch activity trends"}), 500 + + @app.route('/api/user/settings', methods=['GET']) + @swagger_route(security=get_auth_security()) + @login_required + @user_required + def get_user_settings_api(): + """ + Get current user's settings including cached metrics. + """ + try: + from functions_settings import get_user_settings + + user_id = get_current_user_id() + if not user_id: + return jsonify({"error": "Unable to identify user"}), 401 + + user_settings = get_user_settings(user_id) + + # Extract relevant data for frontend + settings = user_settings.get('settings', {}) + metrics = settings.get('metrics', {}) + + # Return ALL settings from Cosmos for backwards compatibility + # This matches the old API behavior: return jsonify(user_settings_data), 200 + response_data = { + "success": True, + "settings": settings, # Return entire settings object + "metrics": metrics, + "retention_policy": { + "enabled": settings.get('retention_policy_enabled', False), + "days": settings.get('retention_policy_days', 30) + }, + "display_name": user_settings.get('display_name'), + "email": user_settings.get('email'), + "lastUpdated": user_settings.get('lastUpdated'), + # Add at root level for backwards compatibility with agents code + "selected_agent": settings.get('selected_agent') + } + + return jsonify(response_data), 200 + + except Exception as e: + debug_print(f"Error fetching user settings: {e}") + log_event(f"Error fetching user settings: {str(e)}", level=logging.ERROR) + traceback.print_exc() + return jsonify({"error": "Failed to fetch user settings"}), 500 \ No newline at end of file diff --git a/application/single_app/route_frontend_public_workspaces.py b/application/single_app/route_frontend_public_workspaces.py index 2d1099e4..05d5b982 100644 --- a/application/single_app/route_frontend_public_workspaces.py +++ b/application/single_app/route_frontend_public_workspaces.py @@ -3,14 +3,17 @@ from config import * from functions_authentication import * from functions_settings import * +from swagger_wrapper import swagger_route, get_auth_security def register_route_frontend_public_workspaces(app): @app.route("/my_public_workspaces", methods=["GET"]) + @swagger_route(security=get_auth_security()) @login_required @user_required @enabled_required("enable_public_workspaces") def my_public_workspaces(): user = session.get('user', {}) + user_id = get_current_user_id() settings = get_settings() require_member_of_create_public_workspace = settings.get("require_member_of_create_public_workspace", False) @@ -19,15 +22,21 @@ def my_public_workspaces(): if require_member_of_create_public_workspace: can_create_public_workspaces = 'roles' in user and 'CreatePublicWorkspaces' in user['roles'] + # Get user settings to retrieve active public workspace ID + user_settings = get_user_settings(user_id) + active_public_workspace_id = user_settings.get("settings", {}).get("activePublicWorkspaceOid", "") + public_settings = sanitize_settings_for_user(settings) return render_template( "my_public_workspaces.html", settings=public_settings, app_settings=public_settings, - can_create_public_workspaces=can_create_public_workspaces + can_create_public_workspaces=can_create_public_workspaces, + active_public_workspace_id=active_public_workspace_id ) @app.route("/public_workspaces/", methods=["GET"]) + @swagger_route(security=get_auth_security()) @login_required @user_required @enabled_required("enable_public_workspaces") @@ -42,6 +51,7 @@ def manage_public_workspace(workspace_id): ) @app.route("/public_workspaces", methods=["GET"]) + @swagger_route(security=get_auth_security()) @login_required @user_required @enabled_required("enable_public_workspaces") @@ -59,17 +69,13 @@ def public_workspaces(): enable_video_file_support = settings.get('enable_video_file_support', False) enable_audio_file_support = settings.get('enable_audio_file_support', False) - # Build allowed extensions string as in workspace.html - allowed_extensions = [ - "txt", "pdf", "docx", "xlsx", "xls", "csv", "pptx", "html", - "jpg", "jpeg", "png", "bmp", "tiff", "tif", "heif", "md", "json" - ] - if enable_video_file_support in [True, 'True', 'true']: - allowed_extensions += ["mp4", "mov", "avi", "wmv", "mkv", "webm"] - if enable_audio_file_support in [True, 'True', 'true']: - allowed_extensions += ["mp3", "wav", "ogg", "aac", "flac", "m4a"] + # Get allowed extensions from central function and build allowed extensions string + allowed_extensions = sorted(get_allowed_extensions( + enable_video=enable_video_file_support in [True, 'True', 'true'], + enable_audio=enable_audio_file_support in [True, 'True', 'true'] + )) allowed_extensions_str = "Allowed: " + ", ".join(allowed_extensions) - + return render_template( 'public_workspaces.html', settings=public_settings, @@ -82,6 +88,7 @@ def public_workspaces(): ) @app.route("/public_directory", methods=["GET"]) + @swagger_route(security=get_auth_security()) @login_required @user_required @enabled_required("enable_public_workspaces") @@ -100,6 +107,7 @@ def public_directory(): ) @app.route('/set_active_public_workspace', methods=['POST']) + @swagger_route(security=get_auth_security()) @login_required @user_required @enabled_required("enable_public_workspaces") diff --git a/application/single_app/route_frontend_safety.py b/application/single_app/route_frontend_safety.py index bc923786..32773199 100644 --- a/application/single_app/route_frontend_safety.py +++ b/application/single_app/route_frontend_safety.py @@ -3,12 +3,13 @@ from config import * from functions_authentication import * from functions_settings import * +from swagger_wrapper import swagger_route, get_auth_security def register_route_frontend_safety(app): @app.route('/admin/safety_violations', methods=['GET']) + @swagger_route(security=get_auth_security()) @login_required - @admin_required @safety_violation_admin_required @enabled_required("enable_content_safety") def admin_safety_violations(): @@ -18,6 +19,7 @@ def admin_safety_violations(): return render_template('admin_safety_violations.html') @app.route('/safety_violations', methods=['GET']) + @swagger_route(security=get_auth_security()) @login_required @user_required @enabled_required("enable_content_safety") diff --git a/application/single_app/route_frontend_workspace.py b/application/single_app/route_frontend_workspace.py index b2d0ec26..2ca1aad9 100644 --- a/application/single_app/route_frontend_workspace.py +++ b/application/single_app/route_frontend_workspace.py @@ -3,9 +3,11 @@ from config import * from functions_authentication import * from functions_settings import * +from swagger_wrapper import swagger_route, get_auth_security def register_route_frontend_workspace(app): @app.route('/workspace', methods=['GET']) + @swagger_route(security=get_auth_security()) @login_required @user_required @enabled_required("enable_user_workspace") @@ -40,7 +42,14 @@ def workspace(): ) ) legacy_count = legacy_docs_from_cosmos[0] if legacy_docs_from_cosmos else 0 - + + # Get allowed extensions from central function and build allowed extensions string + allowed_extensions = sorted(get_allowed_extensions( + enable_video=enable_video_file_support in [True, 'True', 'true'], + enable_audio=enable_audio_file_support in [True, 'True', 'true'] + )) + allowed_extensions_str = "Allowed: " + ", ".join(allowed_extensions) + return render_template( 'workspace.html', settings=public_settings, @@ -49,7 +58,8 @@ def workspace(): enable_video_file_support=enable_video_file_support, enable_audio_file_support=enable_audio_file_support, enable_file_sharing=enable_file_sharing, - legacy_docs_count=legacy_count + legacy_docs_count=legacy_count, + allowed_extensions=allowed_extensions_str ) \ No newline at end of file diff --git a/application/single_app/route_migration.py b/application/single_app/route_migration.py index d5af33b6..658c97c9 100644 --- a/application/single_app/route_migration.py +++ b/application/single_app/route_migration.py @@ -9,11 +9,15 @@ from functions_personal_agents import migrate_agents_from_user_settings, get_personal_agents from functions_personal_actions import migrate_actions_from_user_settings, get_personal_actions from functions_appinsights import log_event +from swagger_wrapper import swagger_route, get_auth_security import logging bp_migration = Blueprint('migration', __name__) @bp_migration.route('/api/migrate/agents', methods=['POST']) +@swagger_route( + security=get_auth_security() +) @login_required def migrate_user_agents(): """Migrate user agents from user settings to personal_agents container.""" @@ -41,6 +45,9 @@ def migrate_user_agents(): return jsonify({'error': 'Failed to migrate agents'}), 500 @bp_migration.route('/api/migrate/actions', methods=['POST']) +@swagger_route( + security=get_auth_security() +) @login_required def migrate_user_actions(): """Migrate user actions/plugins from user settings to personal_actions container.""" @@ -68,6 +75,9 @@ def migrate_user_actions(): return jsonify({'error': 'Failed to migrate actions'}), 500 @bp_migration.route('/api/migrate/all', methods=['POST']) +@swagger_route( + security=get_auth_security() +) @login_required def migrate_all_user_data(): """Migrate both agents and actions from user settings to personal containers.""" @@ -121,6 +131,9 @@ def migrate_all_user_data(): return jsonify({'error': 'Failed to migrate user data'}), 500 @bp_migration.route('/api/migrate/status', methods=['GET']) +@swagger_route( + security=get_auth_security() +) @login_required def get_migration_status(): """Check migration status and current data in personal containers.""" diff --git a/application/single_app/route_openapi.py b/application/single_app/route_openapi.py index ab030c20..238e9a4c 100644 --- a/application/single_app/route_openapi.py +++ b/application/single_app/route_openapi.py @@ -1,3 +1,4 @@ +# route_openapi.py """ OpenAPI Plugin Routes @@ -12,11 +13,15 @@ from functions_authentication import login_required, user_required from openapi_security import openapi_validator from openapi_auth_analyzer import analyze_openapi_authentication, get_authentication_help_text +from swagger_wrapper import swagger_route, get_auth_security +from functions_security import is_valid_storage_name +from functions_debug import debug_print def register_openapi_routes(app): """Register OpenAPI-related routes.""" @app.route('/api/openapi/upload', methods=['POST']) + @swagger_route(security=get_auth_security()) @login_required @user_required def upload_openapi_spec(): @@ -125,13 +130,14 @@ def upload_openapi_spec(): os.unlink(temp_path) except Exception as e: - current_app.logger.error(f"Error uploading OpenAPI spec: {str(e)}") + debug_print(f"Error uploading OpenAPI spec: {str(e)}") return jsonify({ 'success': False, 'error': 'Internal server error during upload' }), 500 @app.route('/api/openapi/validate-url', methods=['POST']) + @swagger_route(security=get_auth_security()) @login_required @user_required def validate_openapi_url(): @@ -186,6 +192,11 @@ def validate_openapi_url(): unique_id = str(uuid.uuid4())[:8] base_name, ext = os.path.splitext(safe_filename) stored_filename = f"{base_name}_{unique_id}{ext}" + if not is_valid_storage_name(stored_filename): + return jsonify({ + 'success': False, + 'error': 'Invalid storage filename' + }), 400 storage_path = os.path.join(upload_dir, stored_filename) # Save spec to file @@ -217,13 +228,14 @@ def validate_openapi_url(): }) except Exception as e: - current_app.logger.error(f"Error validating OpenAPI URL: {str(e)}") + debug_print(f"Error validating OpenAPI URL: {str(e)}") return jsonify({ 'success': False, 'error': 'Internal server error during validation' }), 500 @app.route('/api/openapi/download-from-url', methods=['POST']) + @swagger_route(security=get_auth_security()) @login_required @user_required def download_openapi_from_url(): @@ -293,6 +305,11 @@ def download_openapi_from_url(): unique_id = str(uuid.uuid4())[:8] base_name, ext = os.path.splitext(safe_filename) stored_filename = f"{base_name}_{unique_id}{ext}" + if not is_valid_storage_name(stored_filename): + return jsonify({ + 'success': False, + 'error': 'Invalid storage filename' + }), 400 storage_path = os.path.join(upload_dir, stored_filename) # Save spec to file @@ -321,13 +338,14 @@ def download_openapi_from_url(): }) except Exception as e: - current_app.logger.error(f"Error downloading OpenAPI spec from URL: {str(e)}") + debug_print(f"Error downloading OpenAPI spec from URL: {str(e)}") return jsonify({ 'success': False, 'error': 'Internal server error during download' }), 500 @app.route('/api/openapi/list-uploaded', methods=['GET']) + @swagger_route(security=get_auth_security()) @login_required @user_required def list_uploaded_specs(): @@ -368,7 +386,7 @@ def list_uploaded_specs(): 'last_modified': os.path.getmtime(file_path) }) except Exception as e: - current_app.logger.warning(f"Could not read spec file {filename}: {str(e)}") + debug_print(f"Could not read spec file {filename}: {str(e)}") continue return jsonify({ @@ -377,13 +395,14 @@ def list_uploaded_specs(): }) except Exception as e: - current_app.logger.error(f"Error listing OpenAPI specs: {str(e)}") + debug_print(f"Error listing OpenAPI specs: {str(e)}") return jsonify({ 'success': False, 'error': 'Internal server error while listing specifications' }), 500 @app.route('/api/openapi/analyze-auth', methods=['POST']) + @swagger_route(security=get_auth_security()) @login_required @user_required def analyze_openapi_auth(): @@ -424,7 +443,7 @@ def analyze_openapi_auth(): }) except Exception as e: - current_app.logger.error(f"Error analyzing authentication: {str(e)}") + debug_print(f"Error analyzing authentication: {str(e)}") return jsonify({ 'success': False, 'error': 'Internal server error during authentication analysis' diff --git a/application/single_app/route_plugin_logging.py b/application/single_app/route_plugin_logging.py index 3cbbb6d2..940d540e 100644 --- a/application/single_app/route_plugin_logging.py +++ b/application/single_app/route_plugin_logging.py @@ -7,12 +7,16 @@ from functions_authentication import login_required, get_current_user_id from functions_appinsights import log_event from semantic_kernel_plugins.plugin_invocation_logger import get_plugin_logger +from swagger_wrapper import swagger_route, get_auth_security import logging bpl = Blueprint('plugin_logging', __name__) @bpl.route('/api/plugins/invocations', methods=['GET']) +@swagger_route( + security=get_auth_security() +) @login_required def get_plugin_invocations(): """Get recent plugin invocations for the current user.""" @@ -57,6 +61,9 @@ def get_plugin_invocations(): @bpl.route('/api/plugins/stats', methods=['GET']) +@swagger_route( + security=get_auth_security() +) @login_required def get_plugin_stats(): """Get plugin usage statistics.""" @@ -111,6 +118,9 @@ def get_plugin_stats(): @bpl.route('/api/plugins/invocations/recent', methods=['GET']) +@swagger_route( + security=get_auth_security() +) @login_required def get_recent_invocations(): """Get the most recent plugin invocations across all users (admin only).""" @@ -151,6 +161,9 @@ def get_recent_invocations(): @bpl.route('/api/plugins/invocations/', methods=['GET']) +@swagger_route( + security=get_auth_security() +) @login_required def get_plugin_specific_invocations(plugin_name): """Get invocations for a specific plugin.""" @@ -203,6 +216,9 @@ def get_plugin_specific_invocations(plugin_name): @bpl.route('/api/plugins/clear-logs', methods=['POST']) +@swagger_route( + security=get_auth_security() +) @login_required def clear_plugin_logs(): """Clear plugin invocation logs (admin only or for testing).""" @@ -241,6 +257,9 @@ def clear_plugin_logs(): @bpl.route('/api/plugins/export-logs', methods=['GET']) +@swagger_route( + security=get_auth_security() +) @login_required def export_plugin_logs(): """Export plugin invocation logs for the current user.""" diff --git a/application/single_app/semantic_kernel_loader.py b/application/single_app/semantic_kernel_loader.py index daadc587..78f54203 100644 --- a/application/single_app/semantic_kernel_loader.py +++ b/application/single_app/semantic_kernel_loader.py @@ -1,14 +1,17 @@ -# semantic_kernel_loader.py +# semantic_kernel_loader.py """ Loader for Semantic Kernel plugins/actions from app settings. - Loads plugin/action manifests from settings (CosmosDB) - Registers plugins with the Semantic Kernel instance """ +import logging +import builtins from agent_orchestrator_groupchat import OrchestratorAgent, SCGroupChatManager from semantic_kernel import Kernel from semantic_kernel.agents import Agent from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior from semantic_kernel.core_plugins import TimePlugin, HttpPlugin from semantic_kernel.core_plugins.wait_plugin import WaitPlugin from semantic_kernel_plugins.math_plugin import MathPlugin @@ -17,19 +20,28 @@ from semantic_kernel_plugins.embedding_model_plugin import EmbeddingModelPlugin from semantic_kernel_plugins.fact_memory_plugin import FactMemoryPlugin from functions_settings import get_settings, get_user_settings +from foundry_agent_runtime import AzureAIFoundryChatCompletionAgent from functions_appinsights import log_event, get_appinsights_logger from functions_authentication import get_current_user_id from semantic_kernel_plugins.plugin_health_checker import PluginHealthChecker, PluginErrorRecovery from semantic_kernel_plugins.logged_plugin_loader import create_logged_plugin_loader from semantic_kernel_plugins.plugin_invocation_logger import get_plugin_logger +from semantic_kernel_plugins.smart_http_plugin import SmartHttpPlugin from functions_debug import debug_print from flask import g -import logging -import importlib -import os -import importlib.util -import inspect -import builtins +from functions_keyvault import validate_secret_name_dynamic, retrieve_secret_from_key_vault, retrieve_secret_from_key_vault_by_full_name, SecretReturnType +from functions_global_actions import get_global_actions +from functions_global_agents import get_global_agents +from functions_group_agents import get_group_agent, get_group_agents +from functions_group_actions import get_group_actions +from functions_group import require_active_group +from functions_personal_actions import get_personal_actions, ensure_migration_complete as ensure_actions_migration_complete +from functions_personal_agents import get_personal_agents, ensure_migration_complete as ensure_agents_migration_complete +from semantic_kernel_plugins.plugin_loader import discover_plugins +from semantic_kernel_plugins.openapi_plugin_factory import OpenApiPluginFactory +import app_settings_cache + + # Agent and Azure OpenAI chat service imports log_event("[SK Loader] Starting loader imports") @@ -92,6 +104,10 @@ def resolve_agent_config(agent, settings): debug_print(f"[SK Loader] resolve_agent_config called for agent: {agent.get('name')}") debug_print(f"[SK Loader] Agent config: {agent}") debug_print(f"[SK Loader] Agent is_global flag: {agent.get('is_global')}") + debug_print(f"[SK Loader] Agent is_group flag: {agent.get('is_group')}") + agent_type = (agent.get('agent_type') or 'local').lower() + agent['agent_type'] = agent_type + other_settings = agent.get("other_settings", {}) or {} gpt_model_obj = settings.get('gpt_model', {}) selected_model = gpt_model_obj.get('selected', [{}])[0] if gpt_model_obj.get('selected') else {} @@ -104,8 +120,24 @@ def resolve_agent_config(agent, settings): per_user_enabled = settings.get('per_user_semantic_kernel', False) allow_user_custom_agent_endpoints = settings.get('allow_user_custom_agent_endpoints', False) allow_group_custom_agent_endpoints = settings.get('allow_group_custom_agent_endpoints', False) + is_group_agent = agent.get("is_group", False) + is_global_agent = agent.get("is_global", False) + + if is_group_agent: + allow_custom_agent_endpoints = allow_group_custom_agent_endpoints + elif is_global_agent: + allow_custom_agent_endpoints = False + else: + allow_custom_agent_endpoints = allow_user_custom_agent_endpoints debug_print(f"[SK Loader] user_apim_enabled: {user_apim_enabled}, global_apim_enabled: {global_apim_enabled}, per_user_enabled: {per_user_enabled}") + debug_print(f"[SK Loader] allow_user_custom_agent_endpoints: {allow_user_custom_agent_endpoints}, allow_group_custom_agent_endpoints: {allow_group_custom_agent_endpoints}, allow_custom_agent_endpoints_resolved: {allow_custom_agent_endpoints}") + debug_print(f"[SK Loader] Max completion tokens from agent: {agent.get('max_completion_tokens')}") + + def resolve_secret_value_if_needed(value, scope_value, source, scope): + if validate_secret_name_dynamic(value): + return retrieve_secret_from_key_vault(value, scope_value, scope, source) + return value def any_filled(*fields): return any(bool(f) for f in fields) @@ -114,41 +146,109 @@ def all_filled(*fields): return all(bool(f) for f in fields) def get_user_apim(): - return ( - agent.get("azure_apim_gpt_endpoint"), - agent.get("azure_apim_gpt_subscription_key"), - agent.get("azure_apim_gpt_deployment"), - agent.get("azure_apim_gpt_api_version") - ) + endpoint = agent.get("azure_apim_gpt_endpoint") + key = agent.get("azure_apim_gpt_subscription_key") + deployment = agent.get("azure_apim_gpt_deployment") + api_version = agent.get("azure_apim_gpt_api_version") + + # Check if key vault secret storage is enabled in settings + if settings.get("enable_key_vault_secret_storage", False) and settings.get("key_vault_name") and key: + try: + if validate_secret_name_dynamic(key): + # Try to retrieve the secret from Key Vault + resolved_key = retrieve_secret_from_key_vault_by_full_name(key) + if resolved_key: + # Update the agent dict with the resolved key for this session + agent["azure_apim_gpt_subscription_key"] = resolved_key + key = resolved_key + except Exception as e: + log_event(f"[SK Loader] Failed to resolve Key Vault secret for agent '{agent.get('name')}' in get_user_apim: {e}", level=logging.ERROR, exceptionTraceback=True) + # Fallback to using the value as-is + return (endpoint, key, deployment, api_version) def get_global_apim(): - return ( - settings.get("azure_apim_gpt_endpoint"), - settings.get("azure_apim_gpt_subscription_key"), - first_if_comma(settings.get("azure_apim_gpt_deployment")), - settings.get("azure_apim_gpt_api_version") - ) + endpoint = settings.get("azure_apim_gpt_endpoint") + key = settings.get("azure_apim_gpt_subscription_key") + deployment = first_if_comma(settings.get("azure_apim_gpt_deployment")) + api_version = settings.get("azure_apim_gpt_api_version") + + # Check if key vault secret storage is enabled in settings + if settings.get("enable_key_vault_secret_storage", False) and settings.get("key_vault_name") and key: + try: + if validate_secret_name_dynamic(key): + # Try to retrieve the secret from Key Vault + resolved_key = retrieve_secret_from_key_vault_by_full_name(key) + if resolved_key: + # Update the settings dict with the resolved key for this session + settings["azure_apim_gpt_subscription_key"] = resolved_key + key = resolved_key + except Exception as e: + log_event(f"[SK Loader] Failed to resolve Key Vault secret in get_global_apim: {e}", level=logging.ERROR, exceptionTraceback=True) + # Fallback to using the value as-is + return (endpoint, key, deployment, api_version) def get_user_gpt(): - return ( - agent.get("azure_openai_gpt_endpoint"), - agent.get("azure_openai_gpt_key"), - agent.get("azure_openai_gpt_deployment"), - agent.get("azure_openai_gpt_api_version") - ) + endpoint = agent.get("azure_openai_gpt_endpoint") + key = agent.get("azure_openai_gpt_key") + deployment = agent.get("azure_openai_gpt_deployment") + api_version = agent.get("azure_openai_gpt_api_version") + + # Check if key vault secret storage is enabled in settings + if settings.get("enable_key_vault_secret_storage", False) and settings.get("key_vault_name") and key: + try: + if validate_secret_name_dynamic(key): + # Try to retrieve the secret from Key Vault + resolved_key = retrieve_secret_from_key_vault_by_full_name(key) + if resolved_key: + # Update the agent dict with the resolved key for this session + agent["azure_openai_gpt_key"] = resolved_key + key = resolved_key + except Exception as e: + log_event(f"[SK Loader] Failed to resolve Key Vault secret for agent '{agent.get('name')}' in get_user_gpt: {e}", level=logging.ERROR, exceptionTraceback=True) + # Fallback to using the value as-is + return (endpoint, key, deployment, api_version) def get_global_gpt(): - return ( - settings.get("azure_openai_gpt_endpoint") or selected_model.get("endpoint"), - settings.get("azure_openai_gpt_key") or selected_model.get("key"), - settings.get("azure_openai_gpt_deployment") or selected_model.get("deploymentName"), - settings.get("azure_openai_gpt_api_version") or selected_model.get("api_version") - ) + endpoint = settings.get("azure_openai_gpt_endpoint") or selected_model.get("endpoint") + key = settings.get("azure_openai_gpt_key") or selected_model.get("key") + deployment = settings.get("azure_openai_gpt_deployment") or selected_model.get("deploymentName") + api_version = settings.get("azure_openai_gpt_api_version") or selected_model.get("api_version") + + # Check if key vault secret storage is enabled in settings + if settings.get("enable_key_vault_secret_storage", False) and settings.get("key_vault_name") and key: + try: + if validate_secret_name_dynamic(key): + # Try to retrieve the secret from Key Vault + resolved_key = retrieve_secret_from_key_vault_by_full_name(key) + if resolved_key: + # Update the settings dict with the resolved key for this session + settings["azure_openai_gpt_key"] = resolved_key + key = resolved_key + except Exception as e: + log_event(f"[SK Loader] Failed to resolve Key Vault secret in get_global_gpt: {e}", level=logging.ERROR, exceptionTraceback=True) + # Fallback to using the value as-is + return (endpoint, key, deployment, api_version) def merge_fields(primary, fallback): return tuple(p if p not in [None, ""] else f for p, f in zip(primary, fallback)) # If per-user mode is not enabled, ignore all user/agent-specific config fields + if agent_type == "aifoundry": + return { + "name": agent.get("name"), + "display_name": agent.get("display_name", agent.get("name")), + "description": agent.get("description", ""), + "id": agent.get("id", ""), + "default_agent": agent.get("default_agent", False), + "is_global": agent.get("is_global", False), + "is_group": agent.get("is_group", False), + "group_id": agent.get("group_id"), + "group_name": agent.get("group_name"), + "agent_type": "aifoundry", + "other_settings": other_settings, + "max_completion_tokens": agent.get("max_completion_tokens", -1), + } + if not per_user_enabled: try: if global_apim_enabled: @@ -171,7 +271,13 @@ def merge_fields(primary, fallback): "id": agent.get("id", ""), "default_agent": agent.get("default_agent", False), "is_global": agent.get("is_global", False), - "enable_agent_gpt_apim": agent.get("enable_agent_gpt_apim", False) + "is_group": agent.get("is_group", False), + "group_id": agent.get("group_id"), + "group_name": agent.get("group_name"), + "enable_agent_gpt_apim": agent.get("enable_agent_gpt_apim", False), + "max_completion_tokens": agent.get("max_completion_tokens", -1), + "agent_type": agent_type or "local", + "other_settings": other_settings, } except Exception as e: log_event(f"[SK Loader] Error resolving agent config: {e}", level=logging.ERROR, exceptionTraceback=True) @@ -181,22 +287,24 @@ def merge_fields(primary, fallback): g_apim = get_global_apim() u_gpt = get_user_gpt() g_gpt = get_global_gpt() + can_use_agent_endpoints = allow_custom_agent_endpoints + user_apim_allowed = user_apim_enabled and can_use_agent_endpoints # 1. User APIM enabled and any user APIM values set: use user APIM (merge with global APIM if needed) - if user_apim_enabled and any_filled(*u_apim) and allow_user_custom_agent_endpoints: + if user_apim_allowed and any_filled(*u_apim): debug_print(f"[SK Loader] Using user APIM with global fallback") merged = merge_fields(u_apim, g_apim if global_apim_enabled and any_filled(*g_apim) else (None, None, None, None)) endpoint, key, deployment, api_version = merged # 2. User APIM enabled but no user APIM values, and global APIM enabled and present: use global APIM - elif user_apim_enabled and global_apim_enabled and any_filled(*g_apim) and allow_group_custom_agent_endpoints: + elif user_apim_enabled and global_apim_enabled and any_filled(*g_apim): debug_print(f"[SK Loader] Using global APIM (user APIM enabled but not present)") endpoint, key, deployment, api_version = g_apim # 3. User GPT config is FULLY filled: use user GPT (all fields filled) - elif all_filled(*u_gpt) and allow_user_custom_agent_endpoints: + elif all_filled(*u_gpt) and can_use_agent_endpoints: debug_print(f"[SK Loader] Using agent GPT config (all fields filled)") endpoint, key, deployment, api_version = u_gpt # 4. User GPT config is PARTIALLY filled, global APIM is NOT enabled: merge user GPT with global GPT - elif any_filled(*u_gpt) and not global_apim_enabled and allow_user_custom_agent_endpoints: + elif any_filled(*u_gpt) and not global_apim_enabled and can_use_agent_endpoints: debug_print(f"[SK Loader] Using agent GPT config (partially filled, merging with global GPT, global APIM not enabled)") endpoint, key, deployment, api_version = merge_fields(u_gpt, g_gpt) # 5. Global APIM enabled and present: use global APIM @@ -222,7 +330,13 @@ def merge_fields(primary, fallback): "id": agent.get("id", ""), "default_agent": agent.get("default_agent", False), # [Deprecated, use 'selected_agent' or 'global_selected_agent' in agent config] "is_global": agent.get("is_global", False), # Ensure we have this field - "enable_agent_gpt_apim": agent.get("enable_agent_gpt_apim", False) # Use this to check if APIM is enabled for the agent + "is_group": agent.get("is_group", False), + "group_id": agent.get("group_id"), + "group_name": agent.get("group_name"), + "enable_agent_gpt_apim": agent.get("enable_agent_gpt_apim", False), # Use this to check if APIM is enabled for the agent + "max_completion_tokens": agent.get("max_completion_tokens", -1), # -1 meant use model default determined by the service, 35-trubo is 4096, 4o is 16384, 4.1 is at least 32768 + "agent_type": agent_type or "local", + "other_settings": other_settings, } print(f"[SK Loader] Final resolved config for {agent.get('name')}: endpoint={bool(endpoint)}, key={bool(key)}, deployment={deployment}") @@ -236,9 +350,7 @@ def load_time_plugin(kernel: Kernel): ) def load_http_plugin(kernel: Kernel): - # Import the smart HTTP plugin for better content size management try: - from semantic_kernel_plugins.smart_http_plugin import SmartHttpPlugin # Use smart HTTP plugin with 75k character limit (≈50k tokens) smart_plugin = SmartHttpPlugin(max_content_size=75000, extract_text_only=True) kernel.add_plugin( @@ -335,8 +447,7 @@ def initialize_semantic_kernel(user_id: str=None, redis_client=None): "[SK Loader] Starting to load Semantic Kernel Agent and Plugins", level=logging.INFO ) - settings = get_settings() - print(f"[SK Loader] Settings check - per_user_semantic_kernel: {settings.get('per_user_semantic_kernel', False)}, user_id: {user_id}") + settings = app_settings_cache.get_settings_cache() log_event(f"[SK Loader] Settings check - per_user_semantic_kernel: {settings.get('per_user_semantic_kernel', False)}, user_id: {user_id}", level=logging.INFO) if settings.get('per_user_semantic_kernel', False) and user_id is not None: @@ -384,7 +495,7 @@ def initialize_semantic_kernel(user_id: str=None, redis_client=None): ) debug_print(f"[SK Loader] Semantic Kernel Agent and Plugins loading completed.") -def load_agent_specific_plugins(kernel, plugin_names, mode_label="global", user_id=None): +def load_agent_specific_plugins(kernel, plugin_names, settings, mode_label="global", user_id=None, group_id=None): """ Load specific plugins by name for an agent with enhanced logging. @@ -393,29 +504,44 @@ def load_agent_specific_plugins(kernel, plugin_names, mode_label="global", user_ plugin_names: List of plugin names to load (from agent's actions_to_load) mode_label: 'per-user' or 'global' for logging user_id: User ID for per-user mode + group_id: Active group identifier when loading group-scoped plugins """ if not plugin_names: + debug_print(f"[SK Loader] No plugin names provided to load_agent_specific_plugins") return print(f"[SK Loader] Loading {len(plugin_names)} agent-specific plugins: {plugin_names}") try: + merge_global = settings.get('merge_global_semantic_kernel_with_workspace', False) # Create logged plugin loader for enhanced logging logged_loader = create_logged_plugin_loader(kernel) - # Get plugin manifests based on mode - if mode_label == "per-user": - from functions_personal_actions import get_personal_actions + if mode_label == "group": + if not group_id: + debug_print(f"[SK Loader] Warning: Group mode requested without group_id. Skipping plugin load.") + all_plugin_manifests = [] + else: + all_plugin_manifests = get_group_actions(group_id, return_type=SecretReturnType.NAME) + debug_print(f"[SK Loader] Retrieved {len(all_plugin_manifests)} group plugin manifests for group {group_id}") + if merge_global: + global_plugins = get_global_actions(return_type=SecretReturnType.NAME) + all_plugin_manifests.extend(global_plugins) + debug_print(f"[SK Loader] Merged global plugins for group mode. Total manifests: {len(all_plugin_manifests)}") + elif mode_label == "per-user": if user_id: - all_plugin_manifests = get_personal_actions(user_id) - print(f"[SK Loader] Retrieved {len(all_plugin_manifests)} personal plugin manifests for user {user_id}") + all_plugin_manifests = get_personal_actions(user_id, return_type=SecretReturnType.NAME) + if merge_global: + global_plugins = get_global_actions(return_type=SecretReturnType.NAME) + for g in global_plugins: + all_plugin_manifests.append(g) + debug_print(f"[SK Loader] Retrieved {len(all_plugin_manifests)} personal plugin manifests for user {user_id}") else: - print(f"[SK Loader] Warning: No user_id provided for per-user plugin loading") + debug_print(f"[SK Loader] Warning: No user_id provided for per-user plugin loading") all_plugin_manifests = [] else: # Global mode - get from global actions container - from functions_global_actions import get_global_actions - all_plugin_manifests = get_global_actions() + all_plugin_manifests = get_global_actions(return_type=SecretReturnType.NAME) print(f"[SK Loader] Retrieved {len(all_plugin_manifests)} global plugin manifests") # Filter manifests to only include requested plugins @@ -424,6 +550,18 @@ def load_agent_specific_plugins(kernel, plugin_names, mode_label="global", user_ p for p in all_plugin_manifests if p.get('name') in plugin_names or p.get('id') in plugin_names ] + + debug_print(f"[SK Loader] Filtered to {len(plugin_manifests)} plugin manifests after matching names/IDs") + debug_print(f"[SK Loader] Plugin manifests to load: {plugin_manifests}") + + if settings.get("enable_key_vault_secret_storage", False) and settings.get("key_vault_name"): + debug_print(f"[SK Loader] Resolving Key Vault secrets in plugin manifests if needed") + try: + plugin_manifests = [resolve_key_vault_secrets_in_plugins(p, settings) for p in plugin_manifests] + debug_print(f"[SK Loader] Resolved Key Vault secrets in plugin manifests {plugin_manifests}") + except Exception as e: + log_event(f"[SK Loader] Failed to resolve Key Vault secrets in plugin manifests: {e}", level=logging.ERROR, exceptionTraceback=True) + print(f"[SK Loader] Failed to resolve Key Vault secrets in plugin manifests: {e}") if not plugin_manifests: print(f"[SK Loader] Warning: No plugin manifests found for names/IDs: {plugin_names}") @@ -468,35 +606,46 @@ def load_agent_specific_plugins(kernel, plugin_names, mode_label="global", user_ except Exception as e: log_event( - f"[SK Loader] Error in agent-specific plugin loading: {e}", + f"[SK Loader][Error] Error in agent-specific plugin loading: {e}", extra={"error": str(e), "mode": mode_label, "user_id": user_id, "plugin_names": plugin_names}, level=logging.ERROR, exceptionTraceback=True ) + print(f"[SK Loader][Error] Error in agent-specific plugin loading: {e}") # Fallback to original method - log_event("[SK Loader] Falling back to original plugin loading method due to error", level=logging.WARNING) try: # Get plugin manifests again for fallback - if mode_label == "per-user": - from functions_personal_actions import get_personal_actions + if mode_label == "group": + if group_id: + all_plugin_manifests = get_group_actions(group_id, return_type=SecretReturnType.NAME) + if merge_global: + global_plugins = get_global_actions(return_type=SecretReturnType.NAME) + all_plugin_manifests.extend(global_plugins) + else: + all_plugin_manifests = [] + elif mode_label == "per-user": if user_id: - all_plugin_manifests = get_personal_actions(user_id) + all_plugin_manifests = get_personal_actions(user_id, return_type=SecretReturnType.NAME) + if merge_global: + global_plugins = get_global_actions(return_type=SecretReturnType.NAME) + for g in global_plugins: + all_plugin_manifests.append(g) else: all_plugin_manifests = [] else: - from functions_global_actions import get_global_actions - all_plugin_manifests = get_global_actions() - + all_plugin_manifests = get_global_actions(return_type=SecretReturnType.NAME) + plugin_manifests = [p for p in all_plugin_manifests if p.get('name') in plugin_names] _load_agent_plugins_original_method(kernel, plugin_manifests, mode_label) except Exception as fallback_error: log_event( - f"[SK Loader] Fallback plugin loading also failed: {fallback_error}", + f"[SK Loader][Error] Fallback plugin loading also failed: {fallback_error}", extra={"error": str(fallback_error), "mode": mode_label, "user_id": user_id}, level=logging.ERROR, exceptionTraceback=True ) + print(f"[SK Loader][Error] Fallback plugin loading also failed: {fallback_error}") def _load_agent_plugins_original_method(kernel, plugin_manifests, mode_label="global"): @@ -505,7 +654,6 @@ def _load_agent_plugins_original_method(kernel, plugin_manifests, mode_label="gl """ try: # Load the filtered plugins using original method - from semantic_kernel_plugins.plugin_loader import discover_plugins discovered_plugins = discover_plugins() for manifest in plugin_manifests: @@ -529,12 +677,11 @@ def normalize(s): try: # Special handling for OpenAPI plugins if normalized_type == normalize('openapi') or 'openapi' in normalized_type: - from semantic_kernel_plugins.openapi_plugin_factory import OpenApiPluginFactory plugin = OpenApiPluginFactory.create_from_config(manifest) print(f"[SK Loader] Created OpenAPI plugin: {name}") else: # Standard plugin instantiation - from semantic_kernel_plugins.plugin_health_checker import PluginHealthChecker, PluginErrorRecovery + plugin_instance, instantiation_errors = PluginHealthChecker.create_plugin_safely( matched_class, manifest, name ) @@ -547,9 +694,6 @@ def normalize(s): plugin = plugin_instance - # Add plugin to kernel - from semantic_kernel.functions.kernel_plugin import KernelPlugin - # Special handling for OpenAPI plugins with dynamic functions if hasattr(plugin, 'get_kernel_plugin'): print(f"[SK Loader] Using custom kernel plugin method for: {name}") @@ -593,12 +737,26 @@ def load_single_agent_for_kernel(kernel, agent_cfg, settings, context_obj, redis context_obj.redis_client = redis_client agent_objs = {} agent_config = resolve_agent_config(agent_cfg, settings) - print(f"[SK Loader] Agent config resolved for {agent_cfg.get('name')}: endpoint={bool(agent_config.get('endpoint'))}, key={bool(agent_config.get('key'))}, deployment={agent_config.get('deployment')}") + agent_type = (agent_config.get("agent_type") or agent_cfg.get("agent_type") or "local").lower() service_id = f"aoai-chat-{agent_config['name']}" chat_service = None apim_enabled = settings.get("enable_gpt_apim", False) - - log_event(f"[SK Loader] Agent config resolved - endpoint: {bool(agent_config.get('endpoint'))}, key: {bool(agent_config.get('key'))}, deployment: {agent_config.get('deployment')}", level=logging.INFO) + + if agent_type == "aifoundry": + foundry_agent = AzureAIFoundryChatCompletionAgent(agent_config, settings) + agent_objs[agent_config["name"]] = foundry_agent + log_event( + f"[SK Loader] Registered Foundry agent: {agent_config['name']} ({mode_label})", + { + "agent_name": agent_config["name"], + "agent_id": agent_config.get("id"), + "is_global": agent_config.get("is_global", False), + }, + level=logging.INFO, + ) + return kernel, agent_objs + + log_event(f"[SK Loader] Agent config resolved for {agent_cfg.get('name')} - endpoint: {bool(agent_config.get('endpoint'))}, key: {bool(agent_config.get('key'))}, deployment: {agent_config.get('deployment')}, max_completion_tokens: {agent_config.get('max_completion_tokens')}", level=logging.INFO) if AzureChatCompletion and agent_config["endpoint"] and agent_config["key"] and agent_config["deployment"]: print(f"[SK Loader] Azure config valid for {agent_config['name']}, creating chat service...") @@ -637,8 +795,12 @@ def load_single_agent_for_kernel(kernel, agent_cfg, settings, context_obj, redis deployment_name=agent_config["deployment"], endpoint=agent_config["endpoint"], api_key=agent_config["key"], - api_version=agent_config["api_version"] + api_version=agent_config["api_version"], + # default_headers={"Ocp-Apim-Subscription-Key": agent_config["key"]} ) + if agent_config.get('max_completion_tokens', -1) > 0: + print(f"[SK Loader] Using {agent_config['max_completion_tokens']} max_completion_tokens for {agent_config['name']}") + chat_service = set_prompt_settings_for_agent(chat_service, agent_config) kernel.add_service(chat_service) log_event( f"[SK Loader] AOAI chat completion service registered for agent: {agent_config['name']} ({mode_label})", @@ -672,17 +834,31 @@ def load_single_agent_for_kernel(kernel, agent_cfg, settings, context_obj, redis return None, None if LoggingChatCompletionAgent and chat_service: print(f"[SK Loader] Creating LoggingChatCompletionAgent for {agent_config['name']}...") - # Load agent-specific plugins into the kernel before creating the agent if agent_config.get("actions_to_load"): print(f"[SK Loader] Loading agent-specific plugins: {agent_config['actions_to_load']}") - # Determine plugin source based on agent's global status, not overall mode + # Determine plugin source based on agent scope agent_is_global = agent_config.get("is_global", False) - plugin_mode = "global" if agent_is_global else mode_label - user_id = get_current_user_id() if not agent_is_global else None - print(f"[SK Loader] Agent is_global: {agent_is_global}, using plugin_mode: {plugin_mode}") - load_agent_specific_plugins(kernel, agent_config["actions_to_load"], plugin_mode, user_id=user_id) - + agent_is_group = agent_config.get("is_group", False) + if agent_is_global: + plugin_mode = "global" + elif agent_is_group: + plugin_mode = "group" + else: + plugin_mode = mode_label + + resolved_user_id = None if agent_is_global else get_current_user_id() + group_id = agent_config.get("group_id") if agent_is_group else None + print(f"[SK Loader] Agent scope - is_global: {agent_is_global}, is_group: {agent_is_group}, plugin_mode: {plugin_mode}, group_id: {group_id}") + load_agent_specific_plugins( + kernel, + agent_config["actions_to_load"], + settings, + plugin_mode, + user_id=resolved_user_id, + group_id=group_id, + ) + try: kwargs = { "name": agent_config["name"], @@ -695,7 +871,8 @@ def load_single_agent_for_kernel(kernel, agent_cfg, settings, context_obj, redis "default_agent": agent_config.get("default_agent", False), "deployment_name": agent_config["deployment"], "azure_endpoint": agent_config["endpoint"], - "api_version": agent_config["api_version"] + "api_version": agent_config["api_version"], + "function_choice_behavior": FunctionChoiceBehavior.Auto(maximum_auto_invoke_attempts=10) } # Don't pass plugins to agent since they're already loaded in kernel agent_obj = LoggingChatCompletionAgent(**kwargs) @@ -708,7 +885,9 @@ def load_single_agent_for_kernel(kernel, agent_cfg, settings, context_obj, redis "aoai_endpoint": agent_config["endpoint"], "aoai_key": f"{agent_config['key'][:3]}..." if agent_config["key"] else None, "aoai_deployment": agent_config["deployment"], - "agent_name": agent_config["name"] + "agent_name": agent_config["name"], + "max_completion_tokens": agent_config.get("max_completion_tokens", -1), + "agent_type": agent_type, }, level=logging.INFO ) @@ -737,10 +916,48 @@ def load_single_agent_for_kernel(kernel, agent_cfg, settings, context_obj, redis log_event(f"[SK Loader] load_single_agent_for_kernel completed - returning {len(agent_objs)} agents: {list(agent_objs.keys())}", level=logging.INFO) return kernel, agent_objs +def resolve_key_vault_secrets_in_plugins(plugin_manifest, settings): + """ + Resolve any Key Vault secrets in a plugin manifest. + """ + if not isinstance(plugin_manifest, dict): + raise ValueError("Plugin manifest must be a dictionary") + + kv_name = settings.get("key_vault_name") + if not kv_name: + raise ValueError("Key Vault name not configured in settings") + + def resolve_value(value): + if isinstance(value, str) and validate_secret_name_dynamic(value): + resolved = retrieve_secret_from_key_vault_by_full_name(value) + if resolved: + return resolved + else: + raise ValueError(f"Failed to retrieve secret '{value}' from Key Vault '{kv_name}'") + return value + + resolved_manifest = {} + for k, v in plugin_manifest.items(): + debug_print(f"[SK Loader] Resolving plugin manifest key: {k} with value type: {type(v)}") + if isinstance(v, str): + resolved_manifest[k] = resolve_value(v) + elif isinstance(v, list): + resolved_manifest[k] = [resolve_value(item) for item in v] + elif isinstance(v, dict): + resolved_manifest[k] = {sub_k: resolve_value(sub_v) for sub_k, sub_v in v.items()} + else: + resolved_manifest[k] = v # Leave other types unchanged + return resolved_manifest + def load_plugins_for_kernel(kernel, plugin_manifests, settings, mode_label="global"): """ DRY helper to load plugins from a manifest list (user or global). """ + if settings.get("enable_key_vault_secret_storage", False) and settings.get("key_vault_name"): + try: + plugin_manifests = [resolve_key_vault_secrets_in_plugins(p, settings) for p in plugin_manifests] + except Exception as e: + log_event(f"[SK Loader] Failed to resolve Key Vault secrets in plugin manifests: {e}", level=logging.ERROR, exceptionTraceback=True) # Create logged plugin loader for enhanced logging logged_loader = create_logged_plugin_loader(kernel) @@ -854,7 +1071,6 @@ def _load_plugins_original_method(kernel, plugin_manifests, settings, mode_label Original plugin loading method as fallback. """ try: - from semantic_kernel_plugins.plugin_loader import discover_plugins discovered_plugins = discover_plugins() for manifest in plugin_manifests: plugin_type = manifest.get('type') @@ -874,7 +1090,6 @@ def normalize(s): try: # Special handling for OpenAPI plugins if normalized_type == normalize('openapi') or 'openapi' in normalized_type: - from semantic_kernel_plugins.openapi_plugin_factory import OpenApiPluginFactory # Use the factory to create OpenAPI plugins from configuration plugin = OpenApiPluginFactory.create_from_config(manifest) else: @@ -929,8 +1144,23 @@ def load_user_semantic_kernel(kernel: Kernel, settings, user_id: str, redis_clie # Early check: Get user settings to see if agents are enabled and if an agent is selected user_settings = get_user_settings(user_id).get('settings', {}) enable_agents = user_settings.get('enable_agents', True) # Default to True for backward compatibility + + # Check if request has forced agent enablement (e.g., retry with specific agent) + from flask import g + force_enable_agents = getattr(g, 'force_enable_agents', False) + request_agent_name = getattr(g, 'request_agent_name', None) + + if force_enable_agents: + enable_agents = True + log_event(f"[SK Loader] Force enabling agents due to request agent_info (agent: {request_agent_name})", level=logging.INFO) + selected_agent = user_settings.get('selected_agent') + # Override selected_agent if request specifies one + if request_agent_name: + selected_agent = request_agent_name + log_event(f"[SK Loader] Using agent from request: {request_agent_name}", level=logging.INFO) + # If agents are disabled or no agent is selected, skip agent loading entirely if not enable_agents: print(f"[SK Loader] User {user_id} has agents disabled. Proceeding in model-only mode.") @@ -946,26 +1176,103 @@ def load_user_semantic_kernel(kernel: Kernel, settings, user_id: str, redis_clie load_core_plugins_only(kernel, settings) return kernel, None - # Redis is now optional for per-user mode. If not present, state will not persist. - - # Load agents from personal_agents container - from functions_personal_agents import get_personal_agents, ensure_migration_complete - # Ensure migration is complete (will migrate any remaining legacy data) - ensure_migration_complete(user_id) + ensure_agents_migration_complete(user_id) agents_cfg = get_personal_agents(user_id) - + print(f"[SK Loader] User settings found {len(agents_cfg)} agents for user '{user_id}'") - + # Always mark user agents as is_global: False for agent in agents_cfg: agent['is_global'] = False + # Load group agents for user's active group (if any) + try: + active_group_id = require_active_group(user_id) + group_agents = get_group_agents(active_group_id) + if group_agents: + print(f"[SK Loader] Found {len(group_agents)} group agents for active group '{active_group_id}'") + # Badge group agents with group metadata + for group_agent in group_agents: + group_agent['is_global'] = False + group_agent['is_group'] = True + agents_cfg.extend(group_agents) + print(f"[SK Loader] After merging group agents: {len(agents_cfg)} total agents") + else: + print(f"[SK Loader] No group agents found for active group '{active_group_id}'") + except ValueError: + # No active group set - this is fine, just means no group agents available + print(f"[SK Loader] User '{user_id}' has no active group - skipping group agent loading") + + # Append selected group agent (if any) to the candidate list so downstream selection logic can resolve it + selected_agent_data = selected_agent if isinstance(selected_agent, dict) else {} + selected_agent_is_group = selected_agent_data.get('is_group', False) + if selected_agent_is_group: + resolved_group_id = selected_agent_data.get('group_id') + active_group_id = None + + # Group agent MUST have a group_id + if not resolved_group_id: + log_event( + "[SK Loader] Group agent selected but no group_id provided in selection data.", + level=logging.ERROR + ) + load_core_plugins_only(kernel, settings) + return kernel, None + + try: + active_group_id = require_active_group(user_id) + if resolved_group_id != active_group_id: + debug_print( + f"[SK Loader] Selected group agent references group {resolved_group_id}, active group is {active_group_id}." + ) + log_event( + "[SK Loader] Group agent selected from the non-active group.", + level=logging.ERROR + ) + load_core_plugins_only(kernel, settings) + return kernel, None + except ValueError as err: + debug_print(f"[SK Loader] No active group available while loading group agent: {err}") + log_event( + "[SK Loader] Group agent selected but no active group in settings.", + level=logging.ERROR + ) + load_core_plugins_only(kernel, settings) + return kernel, None + + if resolved_group_id: + agent_identifier = selected_agent_data.get('id') or selected_agent_data.get('name') + group_agent_cfg = None + if agent_identifier: + group_agent_cfg = get_group_agent(resolved_group_id, agent_identifier) + if not group_agent_cfg: + # Fallback: search by name across group agents if ID lookup failed + for candidate in get_group_agents(resolved_group_id): + if candidate.get('name') == selected_agent_data.get('name'): + group_agent_cfg = candidate + break + + if group_agent_cfg: + group_agent_cfg['is_global'] = False + group_agent_cfg['is_group'] = True + group_agent_cfg.setdefault('group_id', resolved_group_id) + group_agent_cfg['group_name'] = selected_agent_data.get('group_name') + agents_cfg.append(group_agent_cfg) + log_event( + f"[SK Loader] Added group agent '{group_agent_cfg.get('name')}' from group {resolved_group_id} to candidate list.", + level=logging.INFO + ) + else: + log_event( + f"[SK Loader] Selected group agent '{selected_agent_data.get('name')}' not found for group {resolved_group_id}.", + level=logging.WARNING + ) + # PATCH: Merge global agents if enabled merge_global = settings.get('merge_global_semantic_kernel_with_workspace', False) print(f"[SK Loader] merge_global_semantic_kernel_with_workspace: {merge_global}") if merge_global: - from functions_global_agents import get_global_agents global_agents = get_global_agents() print(f"[SK Loader] Found {len(global_agents)} global agents to merge") # Mark global agents @@ -981,9 +1288,11 @@ def load_user_semantic_kernel(kernel: Kernel, settings, user_id: str, redis_clie key = f"global_{agent['name']}" all_agents[key] = agent - # Add personal agents with 'personal_' prefix + # Add personal and group agents with scoped prefixes for agent in agents_cfg: - key = f"personal_{agent['name']}" + prefix = "group" if agent.get('is_group') else "personal" + scoped_name = agent.get('name') or agent.get('id') or 'unnamed' + key = f"{prefix}_{scoped_name}" all_agents[key] = agent agents_cfg = list(all_agents.values()) @@ -998,18 +1307,13 @@ def load_user_semantic_kernel(kernel: Kernel, settings, user_id: str, redis_clie "agents": agents_cfg }, level=logging.INFO) - - # Load plugins from personal_actions container - from functions_personal_actions import get_personal_actions, ensure_migration_complete - # Ensure migration is complete (will migrate any remaining legacy data) - ensure_migration_complete(user_id) - plugin_manifests = get_personal_actions(user_id) + ensure_actions_migration_complete(user_id) + plugin_manifests = get_personal_actions(user_id, return_type=SecretReturnType.NAME) # PATCH: Merge global plugins if enabled if merge_global: - from functions_global_actions import get_global_actions - global_plugins = get_global_actions() + global_plugins = get_global_actions(return_type=SecretReturnType.NAME) # User plugins take precedence all_plugins = {p.get('name'): p for p in plugin_manifests} all_plugins.update({p.get('name'): p for p in global_plugins}) @@ -1021,30 +1325,37 @@ def load_user_semantic_kernel(kernel: Kernel, settings, user_id: str, redis_clie # Only load core Semantic Kernel plugins here if settings.get('enable_time_plugin', True): load_time_plugin(kernel) + print(f"[SK Loader] Loaded Time plugin.") log_event("[SK Loader] Loaded Time plugin.", level=logging.INFO) if settings.get('enable_fact_memory_plugin', True): load_fact_memory_plugin(kernel) + print(f"[SK Loader] Loaded Fact Memory plugin.") log_event("[SK Loader] Loaded Fact Memory plugin.", level=logging.INFO) if settings.get('enable_math_plugin', True): load_math_plugin(kernel) + print(f"[SK Loader] Loaded Math plugin.") log_event("[SK Loader] Loaded Math plugin.", level=logging.INFO) if settings.get('enable_text_plugin', True): load_text_plugin(kernel) + print(f"[SK Loader] Loaded Text plugin.") log_event("[SK Loader] Loaded Text plugin.", level=logging.INFO) if settings.get('enable_http_plugin', True): load_http_plugin(kernel) + print(f"[SK Loader] Loaded HTTP plugin.") log_event("[SK Loader] Loaded HTTP plugin.", level=logging.INFO) if settings.get('enable_wait_plugin', True): load_wait_plugin(kernel) + print(f"[SK Loader] Loaded Wait plugin.") log_event("[SK Loader] Loaded Wait plugin.", level=logging.INFO) if settings.get('enable_default_embedding_model_plugin', True): load_embedding_model_plugin(kernel, settings) + print(f"[SK Loader] Loaded Default Embedding Model plugin.") log_event("[SK Loader] Loaded Default Embedding Model plugin.", level=logging.INFO) # Get selected agent from user settings (this still needs to be in user settings for UI state) @@ -1116,6 +1427,7 @@ def load_user_semantic_kernel(kernel: Kernel, settings, user_id: str, redis_clie f"[SK Loader] User {user_id} No agent found matching global selected agent: {global_selected_agent_name}", level=logging.WARNING ) + # If still not found, DON'T use first agent - only load when explicitly selected if agent_cfg is None and agents_cfg: debug_print(f"[SK Loader] User {user_id} Agent selection final status: agent_cfg is None") @@ -1139,7 +1451,17 @@ def load_user_semantic_kernel(kernel: Kernel, settings, user_id: str, redis_clie debug_print(f"[SK Loader] User {user_id} Agent azure_deployment: {agent_cfg.get('azure_deployment', 'NOT SET')}") print(f"[SK Loader] User {user_id} Loading agent: {agent_cfg.get('name')}") - kernel, agent_objs = load_single_agent_for_kernel(kernel, agent_cfg, settings, g, redis_client=redis_client, mode_label="per-user") + agent_type = (agent_cfg.get('agent_type') or 'local').lower() + agent_cfg['agent_type'] = agent_type + if agent_type == 'local': + kernel, agent_objs = load_single_agent_for_kernel(kernel, agent_cfg, settings, g, redis_client=redis_client, mode_label="per-user") + else: + log_event( + f"[SK Loader] Unsupported agent_type '{agent_type}' for agent '{agent_cfg.get('name')}'. Defaulting to local path.", + level=logging.WARNING, + extra={'agent_type': agent_type, 'agent_name': agent_cfg.get('name')} + ) + kernel, agent_objs = load_single_agent_for_kernel(kernel, agent_cfg, settings, g, redis_client=redis_client, mode_label="per-user") print(f"[SK Loader] User {user_id} Agent loading completed. Agent objects: {type(agent_objs)} with {len(agent_objs) if agent_objs else 0} items") return kernel, agent_objs @@ -1148,8 +1470,8 @@ def load_semantic_kernel(kernel: Kernel, settings): log_event("[SK Loader] Global Semantic Kernel mode enabled. Loading global plugins and agents.", level=logging.INFO) # Conditionally load core plugins based on settings - from functions_global_actions import get_global_actions - plugin_manifests = get_global_actions() + + plugin_manifests = get_global_actions(return_type=SecretReturnType.NAME) log_event(f"[SK Loader] Found {len(plugin_manifests)} plugin manifests", level=logging.INFO) # --- Dynamic Plugin Type Loading (semantic_kernel_plugins) --- @@ -1157,7 +1479,7 @@ def load_semantic_kernel(kernel: Kernel, settings): # --- Agent and Service Loading --- # region Multi-agent Orchestration - from functions_global_agents import get_global_agents + agents_cfg = get_global_agents() enable_multi_agent_orchestration = settings.get('enable_multi_agent_orchestration', False) merge_global = settings.get('merge_global_semantic_kernel_with_workspace', False) @@ -1232,13 +1554,20 @@ def load_semantic_kernel(kernel: Kernel, settings): deployment_name=agent_config["deployment"], endpoint=agent_config["endpoint"], api_key=agent_config["key"], - api_version=agent_config["api_version"] + api_version=agent_config["api_version"], + # default_headers={"Ocp-Apim-Subscription-Key": key} ) + if agent_config.get('max_completion_tokens', -1) > 0: + print(f"[SK Loader] Using {agent_config['max_completion_tokens']} max_completion_tokens for {agent_config['name']}") + chat_service = set_prompt_settings_for_agent(chat_service, agent_config) kernel.add_service(chat_service) except Exception as e: log_event(f"[SK Loader] Failed to create or get AzureChatCompletion for agent: {agent_config['name']}: {e}", {"error": str(e)}, level=logging.ERROR, exceptionTraceback=True) if LoggingChatCompletionAgent and chat_service: try: + if agent_config.get('max_completion_tokens', -1) > 0: + print(f"[SK Loader] Using {agent_config['max_completion_tokens']} max_completion_tokens for {agent_config['name']}") + chat_service = set_prompt_settings_for_agent(chat_service, agent_config) kwargs = { "name": agent_config["name"], "instructions": agent_config["instructions"], @@ -1250,7 +1579,8 @@ def load_semantic_kernel(kernel: Kernel, settings): "default_agent": agent_config.get("default_agent", False), "deployment_name": agent_config["deployment"], "azure_endpoint": agent_config["endpoint"], - "api_version": agent_config["api_version"] + "api_version": agent_config["api_version"], + "function_choice_behavior": FunctionChoiceBehavior.Auto(maximum_auto_invoke_attempts=10) } if agent_config.get("actions_to_load"): kwargs["plugins"] = agent_config["actions_to_load"] @@ -1333,8 +1663,12 @@ def load_semantic_kernel(kernel: Kernel, settings): deployment_name=orchestrator_config["deployment"], endpoint=orchestrator_config["endpoint"], api_key=orchestrator_config["key"], - api_version=orchestrator_config["api_version"] + api_version=orchestrator_config["api_version"], + # default_headers={"Ocp-Apim-Subscription-Key": orchestrator_config["key"]} ) + if agent_config.get('max_completion_tokens', -1) > 0: + print(f"[SK Loader] Using {agent_config['max_completion_tokens']} max_completion_tokens for {agent_config['name']}") + chat_service = set_prompt_settings_for_agent(chat_service, agent_config) kernel.add_service(chat_service) if not chat_service: raise RuntimeError(f"[SK Loader] No AzureChatCompletion service available for orchestrator agent '{orchestrator_config['name']}'") @@ -1432,7 +1766,17 @@ def load_semantic_kernel(kernel: Kernel, settings): if global_selected_agent_cfg: log_event(f"[SK Loader] Using global_selected_agent: {global_selected_agent_cfg.get('name')}", level=logging.INFO) - kernel, agent_objs = load_single_agent_for_kernel(kernel, global_selected_agent_cfg, settings, builtins, redis_client=None, mode_label="global") + agent_type = (global_selected_agent_cfg.get('agent_type') or 'local').lower() + global_selected_agent_cfg['agent_type'] = agent_type + if agent_type == 'local': + kernel, agent_objs = load_single_agent_for_kernel(kernel, global_selected_agent_cfg, settings, builtins, redis_client=None, mode_label="global") + else: + log_event( + f"[SK Loader] Unsupported agent_type '{agent_type}' for global agent '{global_selected_agent_cfg.get('name')}'. Defaulting to local path.", + level=logging.WARNING, + extra={'agent_type': agent_type, 'agent_name': global_selected_agent_cfg.get('name')} + ) + kernel, agent_objs = load_single_agent_for_kernel(kernel, global_selected_agent_cfg, settings, builtins, redis_client=None, mode_label="global") log_event(f"[SK Loader] load_single_agent_for_kernel returned agent_objs: {type(agent_objs)} with {len(agent_objs) if agent_objs else 0} agents", level=logging.INFO) else: log_event("[SK Loader] No global_selected_agent found. Proceeding in kernel-only mode.", level=logging.WARNING) @@ -1447,26 +1791,25 @@ def load_semantic_kernel(kernel: Kernel, settings): if AzureChatCompletion and endpoint and key and deployment: apim_enabled = settings.get("enable_gpt_apim", False) if apim_enabled: - kernel.add_service( - AzureChatCompletion( + chat_service = AzureChatCompletion( service_id=f"aoai-chat-global", deployment_name=deployment, endpoint=endpoint, api_key=key, api_version=api_version, # default_headers={"Ocp-Apim-Subscription-Key": key} - ) ) + kernel.add_service(chat_service) else: - kernel.add_service( - AzureChatCompletion( + chat_service = AzureChatCompletion( service_id=f"aoai-chat-global", deployment_name=deployment, endpoint=endpoint, api_key=key, - api_version=api_version + api_version=api_version, + # default_headers={"Ocp-Apim-Subscription-Key": key} ) - ) + kernel.add_service(chat_service) log_event( f"[SK Loader] Azure OpenAI chat completion service registered (kernel-only mode)", { @@ -1490,4 +1833,163 @@ def load_semantic_kernel(kernel: Kernel, settings): def load_multi_agent_for_kernel(kernel: Kernel, settings): - return None, None \ No newline at end of file + return None, None + +def set_prompt_settings_for_agent(chat_service, agent_config: dict): + """ + Update the chat_service's prompt execution settings by merging agent_config overrides + into the existing settings. No prompt_settings argument is needed; all defaults are read + from the chat_service itself. + """ + if not (chat_service and agent_config): + return + + PromptExecutionSettingsClass = chat_service.get_prompt_execution_settings_class() + + # Try to get an existing settings object from the service + existing = getattr(chat_service, "prompt_execution_settings", None) + if existing is None and hasattr(chat_service, "instantiate_prompt_execution_settings"): + try: + existing = chat_service.instantiate_prompt_execution_settings() + except Exception: + existing = None + + # Convert/normalize existing settings into the concrete class if needed + if existing: + try: + prompt_exec_settings = PromptExecutionSettingsClass.from_prompt_execution_settings(existing) + except Exception: + prompt_exec_settings = PromptExecutionSettingsClass() + else: + prompt_exec_settings = PromptExecutionSettingsClass() + + # Utility to pick an override from agent_config (None means no override) + def pick(key): + return agent_config.get(key, None) + + # Handle token fields - prefer agent_config max_completion_tokens then max_tokens + desired_tokens = pick("max_completion_tokens") + if desired_tokens is None: + desired_tokens = pick("max_tokens") + + model_fields = getattr(PromptExecutionSettingsClass, "model_fields", {}) + if desired_tokens is not None: + try: + desired_tokens = int(desired_tokens) + except Exception: + desired_tokens = None + if desired_tokens and desired_tokens > 0: + # This includes reasoning tokens in addition to response tokens. max_tokens is ONLY response tokens. + if "max_completion_tokens" in model_fields: + setattr(prompt_exec_settings, "max_completion_tokens", desired_tokens) + if "max_tokens" in model_fields: + setattr(prompt_exec_settings, "max_tokens", desired_tokens) + + chat_service.get_prompt_execution_settings_class() + + # Common numeric settings + for fld in ("temperature", "top_p", "frequency_penalty", "presence_penalty"): + val = pick(fld) + if val is not None: + try: + setattr(prompt_exec_settings, fld, val) + except Exception: + # pass this to prevent additional future agent types from potentially failing + pass + + # stop sequences -> map to 'stop' which OpenAI expects + stop_seqs = pick("stop_sequences") or pick("stop") + if stop_seqs is not None: + try: + setattr(prompt_exec_settings, "stop", stop_seqs) + except Exception: + # pass this to prevent additional future agent types from potentially failing + pass + + # Reasoning effort - only add if not 'none' or empty + reasoning_effort = pick("reasoning_effort") + if reasoning_effort and reasoning_effort != "none" and "reasoning_effort" in model_fields: + try: + setattr(prompt_exec_settings, "reasoning_effort", reasoning_effort) + print(f"[SK Loader] Set reasoning_effort={reasoning_effort} for agent: {agent_config.get('name')}") + except Exception as e: + print(f"[SK Loader] Failed to set reasoning_effort for agent {agent_config.get('name')}: {e}") + pass + + if hasattr(prompt_exec_settings, 'function_choice_behavior'): + if getattr(prompt_exec_settings, 'function_choice_behavior', None) is None: + try: + prompt_exec_settings.function_choice_behavior = FunctionChoiceBehavior.Auto(maximum_auto_invoke_attempts=10) + except Exception: + # pass this to prevent additional future agent types from potentially failing + pass + else: + print(f"[SK Loader] function_choice_behavior attribute not found in prompt execution settings for agent: {agent_config.get('name')}") + + # Apply settings back to service (prefer explicit setter, do NOT set attribute if not supported) + if hasattr(chat_service, "set_prompt_execution_settings"): + try: + chat_service.set_prompt_execution_settings(prompt_exec_settings) + except Exception as e: + # Log error but do not set attribute directly to avoid Pydantic validation errors + log_event(f"[SK Loader] Failed to set prompt execution settings via setter: {e}", level=logging.ERROR, exceptionTraceback=True) + # Do not set prompt_execution_settings as an attribute if not supported by the service + + # Store reasoning_effort info for retry logic + if hasattr(chat_service, '_agent_config'): + chat_service._agent_config = agent_config + + return chat_service + + +def handle_agent_reasoning_error(chat_service, error, agent_config): + """ + Handle reasoning_effort errors by retrying without the parameter. + Similar to the retry logic in route_backend_chats.py for direct GPT calls. + + Args: + chat_service: The AzureChatCompletion service + error: The exception that occurred + agent_config: The agent configuration dict + + Returns: + bool: True if reasoning_effort was removed and service updated, False otherwise + """ + error_str = str(error).lower() + has_reasoning = agent_config.get("reasoning_effort") and agent_config.get("reasoning_effort") != "none" + + # Check if error is related to reasoning_effort parameter + if has_reasoning and ( + 'reasoning_effort' in error_str or + 'unrecognized request argument' in error_str or + 'invalid_request_error' in error_str + ): + print(f"[SK Loader] Reasoning effort not supported by model, retrying without reasoning_effort for agent: {agent_config.get('name')}") + + # Remove reasoning_effort from agent_config + agent_config["reasoning_effort"] = "" + + # Update the service's prompt execution settings without reasoning_effort + try: + PromptExecutionSettingsClass = chat_service.get_prompt_execution_settings_class() + existing = getattr(chat_service, "prompt_execution_settings", None) + + if existing: + prompt_exec_settings = PromptExecutionSettingsClass.from_prompt_execution_settings(existing) + else: + prompt_exec_settings = PromptExecutionSettingsClass() + + # Remove reasoning_effort if it exists + if hasattr(prompt_exec_settings, "reasoning_effort"): + delattr(prompt_exec_settings, "reasoning_effort") + + # Update service settings + if hasattr(chat_service, "set_prompt_execution_settings"): + chat_service.set_prompt_execution_settings(prompt_exec_settings) + + return True + except Exception as update_error: + print(f"[SK Loader] Failed to remove reasoning_effort: {update_error}") + return False + + return False diff --git a/application/single_app/semantic_kernel_plugins/PLUGIN_DYNAMIC_SECRET_STORAGE.md b/application/single_app/semantic_kernel_plugins/PLUGIN_DYNAMIC_SECRET_STORAGE.md new file mode 100644 index 00000000..6518e569 --- /dev/null +++ b/application/single_app/semantic_kernel_plugins/PLUGIN_DYNAMIC_SECRET_STORAGE.md @@ -0,0 +1,69 @@ +# PLUGIN_DYNAMIC_SECRET_STORAGE.md + +## Feature: Dynamic Secret Storage for Plugins/Actions + +**Implemented in version:** (add your current config.py version here) + +### Overview +This feature allows plugin writers to store secrets in Azure Key Vault dynamically by simply naming any key in the plugin's `additionalFields` dictionary with the suffix `__Secret`. The application will automatically detect these keys, store their values in Key Vault, and replace the value with a Key Vault reference. This works in addition to the standard `auth.key` secret handling. + + +### How It Works +- When saving a plugin, any key in `additionalFields` ending with `__Secret` (two underscores and a capital S) will be stored in Key Vault. +- The Key Vault secret name for these fields is constructed as `{pluginName-additionalsettingnamewithout__Secret}` (e.g., `loganal-alpharoemo` for plugin `loganal` and field `alpharoemo__Secret`). +- The value in the plugin dict will be replaced with the Key Vault reference (the full secret name). +- When retrieving a plugin, any Key Vault reference in `auth.key` or `additionalFields` ending with `__Secret` will be replaced with a UI trigger word (or optionally, the actual secret value). +- When deleting a plugin, any Key Vault reference in `auth.key` or `additionalFields` ending with `__Secret` will be deleted from Key Vault. + + +### Example +```json +{ + "name": "loganal", + "auth": { + "type": "key", + "key": "my-actual-secret-value" + }, + "additionalFields": { + "alpharoemo__Secret": "supersecretvalue", + "otherSetting__Secret": "anothersecret" + } +} +``` +After saving, the plugin dict will look like: +```json +{ + "name": "loganal", + "auth": { + "type": "key", + "key": "loganal--action--global--loganal" // Key Vault reference + }, + "additionalFields": { + "alpharoemo__Secret": "loganal--action-addset--global--loganal-alpharoemo", // Key Vault reference + "otherSetting__Secret": "loganal--action-addset--global--loganal-otherSetting" // Key Vault reference + } +} +``` +**Note:** The Key Vault secret name for each additional setting is constructed as `{pluginName}-{additionalsettingname}` (with __Secret removed). + + +### Benefits +- No custom code required for plugin writers to leverage Key Vault for secrets. +- Supports any number of dynamic secrets per plugin. +- Consistent with existing agent secret handling. +- Secret names are AKV-compliant and descriptive, making management and debugging easier. + + +### Usage +- To store a secret, add a key to `additionalFields` ending with `__Secret` and set its value to the secret. +- The application will handle storing, retrieving, and deleting the secret in Key Vault automatically. +- Secret names for additional settings will follow the `{pluginName-additionalsettingname}` pattern. + +### Related Files +- `functions_keyvault.py` (helpers for save, get, delete) +- `plugin.schema.json` (schema supports arbitrary additionalFields) + +### Version History +- Feature added in version: (add your current config.py version here) + +--- diff --git a/application/single_app/semantic_kernel_plugins/azure_function_plugin.py b/application/single_app/semantic_kernel_plugins/azure_function_plugin.py deleted file mode 100644 index e24fd6e5..00000000 --- a/application/single_app/semantic_kernel_plugins/azure_function_plugin.py +++ /dev/null @@ -1,91 +0,0 @@ -from typing import Dict, Any, List -from semantic_kernel_plugins.base_plugin import BasePlugin -from semantic_kernel.functions import kernel_function -from semantic_kernel_plugins.plugin_invocation_logger import plugin_function_logger -import requests -from azure.identity import DefaultAzureCredential - -class AzureFunctionPlugin(BasePlugin): - def __init__(self, manifest: Dict[str, Any]): - self.manifest = manifest - self.endpoint = manifest.get('endpoint') - self.key = manifest.get('auth', {}).get('key') - self.auth_type = manifest.get('auth', {}).get('type', 'key') - self._metadata = manifest.get('metadata', {}) - if not self.endpoint or not self.auth_type: - raise ValueError("AzureFunctionPlugin requires 'endpoint' and 'auth.type' in the manifest.") - if self.auth_type == 'identity': - self.credential = DefaultAzureCredential() - elif self.auth_type == 'key': - if not self.key: - raise ValueError("AzureFunctionPlugin requires 'auth.key' when using key authentication.") - self.credential = None - else: - raise ValueError(f"Unsupported auth.type: {self.auth_type}") - - @property - def display_name(self) -> str: - return "Azure Function" - - @property - def metadata(self) -> Dict[str, Any]: - return { - "name": self.manifest.get("name", "azure_function_plugin"), - "type": "azure_function", - "description": "Plugin for calling an Azure Function via HTTP POST or GET using function key or managed identity authentication. Use this to trigger serverless logic or workflows in Azure Functions.", - "methods": [ - { - "name": "call_function_post", - "description": "Call the Azure Function using HTTP POST.", - "parameters": [ - {"name": "payload", "type": "dict", "description": "JSON payload to send in the POST request.", "required": True} - ], - "returns": {"type": "dict", "description": "Response from the Azure Function as a JSON object."} - }, - { - "name": "call_function_get", - "description": "Call the Azure Function using HTTP GET.", - "parameters": [ - {"name": "params", "type": "dict", "description": "Query parameters for the GET request.", "required": False} - ], - "returns": {"type": "dict", "description": "Response from the Azure Function as a JSON object."} - } - ] - } - - def get_functions(self) -> List[str]: - return ["call_function_post", "call_function_get"] - - @plugin_function_logger("AzureFunctionPlugin") - @kernel_function(description="Call the Azure Function using HTTP POST.") - def call_function_post(self, payload: dict) -> dict: - url = self.endpoint - headers = {} - if self.auth_type == 'identity': - token = self.credential.get_token("https://management.azure.com/.default").token - headers["Authorization"] = f"Bearer {token}" - elif self.auth_type == 'key': - if '?' in url: - url += f"&code={self.key}" - else: - url += f"?code={self.key}" - response = requests.post(url, json=payload, headers=headers) - response.raise_for_status() - return response.json() - - @plugin_function_logger("AzureFunctionPlugin") - @kernel_function(description="Call the Azure Function using HTTP GET.") - def call_function_get(self, params: dict = None) -> dict: - url = self.endpoint - headers = {} - if self.auth_type == 'identity': - token = self.credential.get_token("https://management.azure.com/.default").token - headers["Authorization"] = f"Bearer {token}" - elif self.auth_type == 'key': - if '?' in url: - url += f"&code={self.key}" - else: - url += f"?code={self.key}" - response = requests.get(url, params=params or {}, headers=headers) - response.raise_for_status() - return response.json() diff --git a/application/single_app/semantic_kernel_plugins/base_plugin.py b/application/single_app/semantic_kernel_plugins/base_plugin.py index 4aba46dc..b12969de 100644 --- a/application/single_app/semantic_kernel_plugins/base_plugin.py +++ b/application/single_app/semantic_kernel_plugins/base_plugin.py @@ -1,5 +1,7 @@ from abc import ABC, abstractmethod from typing import Dict, Any, List, Optional +import re +import inspect class BasePlugin(ABC): @property @@ -36,8 +38,6 @@ def display_name(self) -> str: # Remove 'Plugin' suffix and format nicely name = class_name.replace('Plugin', '') - # Handle common acronyms by keeping them together - import re # Split on word boundaries while preserving acronyms parts = re.findall(r'[A-Z]+(?=[A-Z][a-z]|$)|[A-Z][a-z]*', name) @@ -45,6 +45,11 @@ def display_name(self) -> str: formatted = ' '.join(parts).replace('_', ' ').strip() return formatted if formatted else name + """ + This class provides common functionality and enforces a standard interface. + All plugins should inherit from this base class. + All plugins should call super().__init__(manifest) in their init constructor. + """ @abstractmethod def __init__(self, manifest: Optional[Dict[str, Any]] = None): self.manifest = manifest or {} @@ -64,6 +69,50 @@ def get_functions(self) -> List[str]: Default implementation returns an empty list. Override this method if you want to explicitly declare exposed functions. """ - return [] + functions = [] + # First check unbound functions on the class where decorator attributes are set + for name, fn in inspect.getmembers(self.__class__, predicate=inspect.isfunction): + if getattr(fn, "is_kernel_function", False): + functions.append(name) + + # Fallback: check bound methods on the instance (older decorators may attach to the bound method) + if not functions: + for name, method in inspect.getmembers(self, predicate=inspect.ismethod): + if getattr(method, "is_kernel_function", False): + functions.append(name) + + # Debug print for visibility during registration + for f in functions: + print(f"Registering function: {f}") + + return functions + + def _collect_kernel_methods_for_metadata(self) -> List[Dict[str, str]]: + """ + Collect methods decorated with @kernel_function by parsing the class source code. + Falls back to gathering function names and the first line of their docstring when decorator metadata isn't available. + """ + methods: List[Dict[str, str]] = [] + try: + src = inspect.getsource(self.__class__) + except Exception: + src = None + if src: + # Try to find @kernel_function(...description="...") followed by the def + regex = re.compile(r"@kernel_function\s*\(\s*[^)]*?description\s*=\s*(['\"])(.*?)\1[^)]*?\)\s*(?:\n\s*@[^\"]*?)*\n\s*def\s+([A-Za-z_][A-Za-z0-9_]*)\s*\(", re.S) + for m in regex.finditer(src): + desc = m.group(2).strip() + name = m.group(3).strip() + methods.append({"name": name, "description": desc}) + # If parsing didn't find anything, fall back to introspection of methods and docstrings + if not methods: + for name, fn in inspect.getmembers(self.__class__, predicate=inspect.isfunction): + # skip private/internal functions + if name.startswith("_"): + continue + doc = (fn.__doc__ or "").strip().splitlines() + desc = doc[0] if doc else "" + methods.append({"name": name, "description": desc}) + return methods diff --git a/application/single_app/semantic_kernel_plugins/databricks_table_example.json b/application/single_app/semantic_kernel_plugins/databricks_table_example.json deleted file mode 100644 index 32cfecdd..00000000 --- a/application/single_app/semantic_kernel_plugins/databricks_table_example.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "name": "users_table", - "type": "databricks_table", - "description": "Query the users table in Databricks.", - "endpoint": "https:///api/2.0/sql/statements", - "auth": { - "type": "key", // Authentication type, can be 'key' or 'identity', etc. - "key": "", - "managedIdentity": "" // Optional, if using identity-based auth - }, - "metadata": {}, - "additionalFields": { - "table": "users", - "warehouse_id": "", - "columns": [ - { - "name": "id", - "type": "int", - "description": "User ID" - }, - { - "name": "name", - "type": "string", - "description": "User's full name" - }, - { - "name": "email", - "type": "string", - "description": "User's email address" - } - ] - } -} \ No newline at end of file diff --git a/application/single_app/semantic_kernel_plugins/databricks_table_plugin.py b/application/single_app/semantic_kernel_plugins/databricks_table_plugin.py index e61a55d2..533a6fac 100644 --- a/application/single_app/semantic_kernel_plugins/databricks_table_plugin.py +++ b/application/single_app/semantic_kernel_plugins/databricks_table_plugin.py @@ -76,9 +76,6 @@ def metadata(self): ] } - def get_functions(self): - return ["query_table"] - @kernel_function( description=""" Query the Databricks table using parameterized SQL. Column names are listed in self.columns. diff --git a/application/single_app/semantic_kernel_plugins/logged_plugin_loader.py b/application/single_app/semantic_kernel_plugins/logged_plugin_loader.py index 5b2c7193..64443633 100644 --- a/application/single_app/semantic_kernel_plugins/logged_plugin_loader.py +++ b/application/single_app/semantic_kernel_plugins/logged_plugin_loader.py @@ -13,13 +13,14 @@ from semantic_kernel.functions import kernel_function from semantic_kernel.functions.kernel_plugin import KernelPlugin from semantic_kernel_plugins.base_plugin import BasePlugin -from semantic_kernel_plugins.plugin_invocation_logger import ( - get_plugin_logger, - plugin_function_logger, - auto_wrap_plugin_functions -) +from semantic_kernel_plugins.plugin_invocation_logger import get_plugin_logger, plugin_function_logger, auto_wrap_plugin_functions +from semantic_kernel_plugins.plugin_loader import discover_plugins from functions_appinsights import log_event - +from functions_debug import debug_print +from semantic_kernel_plugins.openapi_plugin_factory import OpenApiPluginFactory +from semantic_kernel_plugins.sql_schema_plugin import SQLSchemaPlugin +from semantic_kernel_plugins.sql_query_plugin import SQLQueryPlugin +from app_settings_cache import get_settings_cache class LoggedPluginLoader: """Enhanced plugin loader that automatically adds invocation logging.""" @@ -48,17 +49,21 @@ def load_plugin_from_manifest(self, manifest: Dict[str, Any], log_event(f"[Logged Plugin Loader] Starting to load plugin: {plugin_name} (type: {plugin_type})") if not plugin_name: - self.logger.error("Plugin manifest missing required 'name' field") + log_event(f"[Logged Plugin Loader] Plugin manifest missing required 'name' field", level=logging.ERROR) return False try: # Load the plugin instance + debug_print(f"[Logged Plugin Loader] Creating plugin instance for {plugin_name} of type {plugin_type}") plugin_instance = self._create_plugin_instance(manifest) + debug_print(f"[Logged Plugin Loader] Created plugin instance: {plugin_instance}") if not plugin_instance: + debug_print(f"[Logged Plugin Loader] Failed to create plugin instance for {plugin_name} of type {plugin_type}") return False # Enable logging if the plugin supports it if hasattr(plugin_instance, 'enable_invocation_logging'): + debug_print(f"[Logged Plugin Loader] Enabling invocation logging for {plugin_name}") plugin_instance.enable_invocation_logging(True) # Auto-wrap plugin functions with logging @@ -76,7 +81,7 @@ def load_plugin_from_manifest(self, manifest: Dict[str, Any], self._register_plugin_with_kernel(plugin_instance, plugin_name) log_event( - f"[Plugin Loader] Successfully loaded plugin: {plugin_name}", + f"[Logged Plugin Loader] Successfully loaded plugin: {plugin_name}", extra={ "plugin_name": plugin_name, "plugin_type": plugin_type, @@ -90,7 +95,7 @@ def load_plugin_from_manifest(self, manifest: Dict[str, Any], except Exception as e: log_event( - f"[Plugin Loader] Failed to load plugin: {plugin_name}", + f"[Logged Plugin Loader] Failed to load plugin: {plugin_name}", extra={ "plugin_name": plugin_name, "plugin_type": plugin_type, @@ -112,13 +117,40 @@ def _create_plugin_instance(self, manifest: Dict[str, Any]): return self._create_openapi_plugin(manifest) elif plugin_type == 'python': return self._create_python_plugin(manifest) - elif plugin_type == 'custom': - return self._create_custom_plugin(manifest) - elif plugin_type in ['sql_schema', 'sql_query']: - return self._create_sql_plugin(manifest) + #elif plugin_type in ['sql_schema', 'sql_query']: + # return self._create_sql_plugin(manifest) else: - self.logger.warning(f"Unknown plugin type: {plugin_type} for plugin: {plugin_name}") - return None + try: + debug_print(f"[Logged Plugin Loader] Attempting to discover plugin type: {plugin_type}") + discovered_plugins = discover_plugins() + plugin_type = manifest.get('type') + name = manifest.get('name') + description = manifest.get('description', '') + # Normalize for matching + def normalize(s): + return s.replace('_', '').replace('-', '').replace('plugin', '').lower() if s else '' + debug_print(f"[Logged Plugin Loader] Normalizing plugin type for matching: {plugin_type}") + normalized_type = normalize(plugin_type) + debug_print(f"[Logged Plugin Loader] Normalized plugin type: {normalized_type}") + matched_class = None + for class_name, cls in discovered_plugins.items(): + normalized_class = normalize(class_name) + print("[Logged Plugin Loader] Checking plugin class:", class_name, "normalized:", normalized_class) + if normalized_type == normalized_class or normalized_type in normalized_class: + matched_class = cls + debug_print(f"[Logged Plugin Loader] Matched class for plugin '{name}' of type '{plugin_type}': {matched_class}") + break + if matched_class: + try: + plugin = matched_class(manifest) if 'manifest' in matched_class.__init__.__code__.co_varnames else matched_class() + log_event(f"[Logged Plugin Loader] Instanced plugin: {name} (type: {plugin_type})", {"plugin_name": name, "plugin_type": plugin_type}, level=logging.INFO) + return plugin + except Exception as e: + log_event(f"[Logged Plugin Loader] Failed to instantiate plugin: {name}: {e}", {"plugin_name": name, "plugin_type": plugin_type, "error": str(e)}, level=logging.ERROR, exceptionTraceback=True) + else: + log_event(f"[Logged Plugin Loader] Unknown plugin type: {plugin_type} for plugin '{name}'", {"plugin_name": name, "plugin_type": plugin_type}, level=logging.WARNING) + except Exception as e: + log_event(f"[Logged Plugin Loader] Error discovering plugin types: {e}", {"error": str(e)}, level=logging.ERROR, exceptionTraceback=True) def _create_openapi_plugin(self, manifest: Dict[str, Any]): """Create an OpenAPI plugin instance.""" @@ -126,9 +158,6 @@ def _create_openapi_plugin(self, manifest: Dict[str, Any]): log_event(f"[Logged Plugin Loader] Attempting to create OpenAPI plugin: {plugin_name}", level=logging.DEBUG) try: - from semantic_kernel_plugins.openapi_plugin_factory import OpenApiPluginFactory - log_event(f"[Logged Plugin Loader] Successfully imported OpenApiPluginFactory", level=logging.DEBUG) - log_event(f"[Logged Plugin Loader] Creating OpenAPI plugin using factory", extra={"plugin_name": plugin_name, "manifest": manifest}, level=logging.DEBUG) @@ -176,22 +205,14 @@ def _create_python_plugin(self, manifest: Dict[str, Any]): self.logger.error(f"Failed to create Python plugin {class_name} from {module_name}: {e}") return None - def _create_custom_plugin(self, manifest: Dict[str, Any]): - """Create a custom plugin instance.""" - # This is where you'd handle custom plugin types specific to your application - self.logger.warning(f"Custom plugin type not yet implemented: {manifest}") - return None - def _create_sql_plugin(self, manifest: Dict[str, Any]): """Create a SQL plugin instance.""" plugin_type = manifest.get('type') try: if plugin_type == 'sql_schema': - from semantic_kernel_plugins.sql_schema_plugin import SQLSchemaPlugin return SQLSchemaPlugin(manifest) elif plugin_type == 'sql_query': - from semantic_kernel_plugins.sql_query_plugin import SQLQueryPlugin return SQLQueryPlugin(manifest) else: self.logger.error(f"Unknown SQL plugin type: {plugin_type}") @@ -336,7 +357,7 @@ def load_multiple_plugins(self, manifests: List[Dict[str, Any]], total_count = len(results) log_event( - f"[Plugin Loader] Loaded {successful_count}/{total_count} plugins", + f"[Logged Plugin Loader] Loaded {successful_count}/{total_count} plugins", extra={ "successful_plugins": [name for name, success in results.items() if success], "failed_plugins": [name for name, success in results.items() if not success], diff --git a/application/single_app/semantic_kernel_plugins/math_plugin.py b/application/single_app/semantic_kernel_plugins/math_plugin.py index f32301ed..159f6d78 100644 --- a/application/single_app/semantic_kernel_plugins/math_plugin.py +++ b/application/single_app/semantic_kernel_plugins/math_plugin.py @@ -1,7 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. from typing import Annotated - +from semantic_kernel_plugins.plugin_invocation_logger import plugin_function_logger from semantic_kernel.functions.kernel_function_decorator import kernel_function @@ -37,6 +37,7 @@ def add( return x + y @kernel_function(name="Subtract") + @plugin_function_logger("MathPlugin") def subtract( self, input: Annotated[int | float | str, "The number to subtract from"], @@ -45,4 +46,68 @@ def subtract( """Returns the difference of numbers provided (supports float and int).""" x = self._parse_number(input) y = self._parse_number(amount) - return x - y \ No newline at end of file + return x - y + + @kernel_function(name="Multiply") + @plugin_function_logger("MathPlugin") + def multiply( + self, + input: Annotated[int | float | str, "The first number to multiply"], + amount: Annotated[int | float | str, "The second number to multiply"], + ) -> Annotated[float, "The result"]: + """Returns the multiplication result of the values provided (supports float and int).""" + x = self._parse_number(input) + y = self._parse_number(amount) + return x * y + + @kernel_function(name="Divide") + @plugin_function_logger("MathPlugin") + def divide( + self, + input: Annotated[int | float | str, "The numerator"], + amount: Annotated[int | float | str, "The denominator"], + ) -> Annotated[float, "The result"]: + """Returns the division result of the values provided (supports float and int).""" + x = self._parse_number(input) + y = self._parse_number(amount) + if y == 0: + raise ValueError("Cannot divide by zero") + return x / y + + @kernel_function(name="Power") + @plugin_function_logger("MathPlugin") + def power( + self, + input: Annotated[int | float | str, "The base number"], + exponent: Annotated[int | float | str, "The exponent"], + ) -> Annotated[float, "The result"]: + """Returns the power result of the values provided (supports float and int).""" + x = self._parse_number(input) + y = self._parse_number(exponent) + return x**y + + @kernel_function(name="SquareRoot") + @plugin_function_logger("MathPlugin") + def square_root( + self, + input: Annotated[int | float | str, "The number to calculate the square root of"], + ) -> Annotated[float, "The result"]: + """Returns the square root of the value provided (supports float and int).""" + x = self._parse_number(input) + if x < 0: + raise ValueError("Cannot calculate square root of a negative number") + return x**0.5 + + @kernel_function(name="Modulus") + @plugin_function_logger("MathPlugin") + def modulus( + self, + input: Annotated[int | float | str, "The dividend"], + amount: Annotated[int | float | str, "The divisor"], + ) -> Annotated[float, "The result"]: + """Returns the modulus of the values provided (supports float and int).""" + x = self._parse_number(input) + y = self._parse_number(amount) + if y == 0: + raise ValueError("Cannot divide by zero for modulus operation") + return x % y \ No newline at end of file diff --git a/application/single_app/semantic_kernel_plugins/msgraph_plugin.py b/application/single_app/semantic_kernel_plugins/msgraph_plugin.py index c6d34d0d..0eeef419 100644 --- a/application/single_app/semantic_kernel_plugins/msgraph_plugin.py +++ b/application/single_app/semantic_kernel_plugins/msgraph_plugin.py @@ -53,7 +53,7 @@ def get_functions(self) -> List[str]: def _get_token(self, scopes=None): # Use the existing authentication helper to get a valid token for Graph - scopes = scopes or ["https://graph.microsoft.com/.default"] + scopes = scopes or [f"{self.manifest.get('endpoint', 'https://graph.microsoft.com').rstrip('/')}/.default"] token = get_valid_access_token(scopes=scopes) if not token: raise Exception("Could not acquire MS Graph access token. User may need to re-authenticate.") diff --git a/application/single_app/semantic_kernel_plugins/openapi_plugin.py b/application/single_app/semantic_kernel_plugins/openapi_plugin.py index 1356d817..81e8fbc4 100644 --- a/application/single_app/semantic_kernel_plugins/openapi_plugin.py +++ b/application/single_app/semantic_kernel_plugins/openapi_plugin.py @@ -892,46 +892,53 @@ def _call_api_operation(self, operation_id: str, path: str, method: str, operati api_key = self.auth.get("key", "") debug_print(f"Key auth - api_key: {api_key[:10]}...") - # Check OpenAPI spec for security schemes + # Check OpenAPI spec for security schemes (OpenAPI 3.0+) or securityDefinitions (OpenAPI 2.0/Swagger) + security_schemes = None + + # Try OpenAPI 3.0+ format first if self.openapi and "components" in self.openapi and "securitySchemes" in self.openapi["components"]: security_schemes = self.openapi["components"]["securitySchemes"] - debug_print(f"Found security schemes: {list(security_schemes.keys())}") - - # Look for apiKey scheme (query parameter) - if "apiKey" in security_schemes: - scheme = security_schemes["apiKey"] - debug_print(f"Found apiKey scheme: {scheme}") + debug_print(f"Found OpenAPI 3.0 security schemes: {list(security_schemes.keys())}") + # Fall back to OpenAPI 2.0/Swagger format + elif self.openapi and "securityDefinitions" in self.openapi: + security_schemes = self.openapi["securityDefinitions"] + debug_print(f"Found OpenAPI 2.0 securityDefinitions: {list(security_schemes.keys())}") + + if security_schemes: + # Look for any apiKey scheme with type=apiKey and in=query + auth_applied = False + for scheme_name, scheme in security_schemes.items(): if scheme.get("type") == "apiKey" and scheme.get("in") == "query": - key_name = scheme.get("name", "api-key") + key_name = scheme.get("name", "api_key") query_params[key_name] = api_key - debug_print(f"Added query parameter auth: {key_name}={api_key[:10]}...") + debug_print(f"Added query parameter auth from '{scheme_name}': {key_name}={api_key[:10]}...") logging.info(f"[OpenAPI Plugin] Using query parameter auth: {key_name}") - - # Look for headerApiKey scheme as fallback - elif "headerApiKey" in security_schemes: - scheme = security_schemes["headerApiKey"] - debug_print(f"Found headerApiKey scheme: {scheme}") - if scheme.get("type") == "apiKey" and scheme.get("in") == "header": + auth_applied = True + break + elif scheme.get("type") == "apiKey" and scheme.get("in") == "header": key_name = scheme.get("name", "x-api-key") headers[key_name] = api_key - debug_print(f"Added header auth: {key_name}={api_key[:10]}...") + debug_print(f"Added header auth from '{scheme_name}': {key_name}={api_key[:10]}...") logging.info(f"[OpenAPI Plugin] Using header auth: {key_name}") - else: + auth_applied = True + break + + if not auth_applied: debug_print(f"No matching security scheme found!") # Fallback if no security schemes found - if api_key and not any(k in query_params for k in ["api-key", "apikey"]) and not any(k.lower() in [h.lower() for h in headers.keys()] for k in ["x-api-key", "api-key"]): - # Default to query parameter - query_params["api-key"] = api_key - debug_print(f"Using fallback query parameter auth: api-key={api_key[:10]}...") - logging.info(f"[OpenAPI Plugin] Using fallback query parameter auth: api-key") + if api_key and not any(k in query_params for k in ["api-key", "api_key", "apikey"]) and not any(k.lower() in [h.lower() for h in headers.keys()] for k in ["x-api-key", "api-key"]): + # Default to query parameter with underscore + query_params["api_key"] = api_key + debug_print(f"Using fallback query parameter auth: api_key={api_key[:10]}...") + logging.info(f"[OpenAPI Plugin] Using fallback query parameter auth: api_key") else: debug_print(f"No security schemes found in OpenAPI spec") # Fallback if no security schemes found - if api_key and not any(k in query_params for k in ["api-key", "apikey"]) and not any(k.lower() in [h.lower() for h in headers.keys()] for k in ["x-api-key", "api-key"]): - # Default to query parameter - query_params["api-key"] = api_key - debug_print(f"Using fallback query parameter auth: api-key={api_key[:10]}...") - logging.info(f"[OpenAPI Plugin] Using fallback query parameter auth: api-key") + if api_key and not any(k in query_params for k in ["api-key", "api_key", "apikey"]) and not any(k.lower() in [h.lower() for h in headers.keys()] for k in ["x-api-key", "api-key"]): + # Default to query parameter with underscore + query_params["api_key"] = api_key + debug_print(f"Using fallback query parameter auth: api_key={api_key[:10]}...") + logging.info(f"[OpenAPI Plugin] Using fallback query parameter auth: api_key") elif auth_type == "bearer": token = self.auth.get("token", "") headers["Authorization"] = f"Bearer {token}" diff --git a/application/single_app/semantic_kernel_plugins/openapi_plugin_factory.py b/application/single_app/semantic_kernel_plugins/openapi_plugin_factory.py index 3380c208..d2a91477 100644 --- a/application/single_app/semantic_kernel_plugins/openapi_plugin_factory.py +++ b/application/single_app/semantic_kernel_plugins/openapi_plugin_factory.py @@ -12,6 +12,7 @@ import tempfile from typing import Dict, Any, Optional from .openapi_plugin import OpenApiPlugin +from functions_debug import debug_print class OpenApiPluginFactory: @@ -130,10 +131,48 @@ def _extract_auth_config(cls, config: Dict[str, Any]) -> Dict[str, Any]: return {} auth_type = auth_config.get('type', 'none') + debug_print(f"[Factory] auth_type: {auth_type}") if auth_type == 'none': return {} - # Return the auth config as-is since the OpenApiPlugin already handles - # the different auth types + # Check if this is basic auth stored in the 'key' field format + # Simple Chat stores basic auth as: auth.type='key', auth.key='username:password', additionalFields.auth_method='basic' + additional_fields = config.get('additionalFields', {}) + auth_method = additional_fields.get('auth_method', '') + debug_print(f"[Factory] additionalFields.auth_method: {auth_method}") + + if auth_type == 'key' and auth_method == 'basic': + # Extract username and password from the combined key + key = auth_config.get('key', '') + debug_print(f"[Factory] Applying basic auth transformation") + if ':' in key: + username, password = key.split(':', 1) + return { + 'type': 'basic', + 'username': username, + 'password': password + } + else: + # Malformed basic auth key + return {} + + # For bearer tokens stored as 'key' type + if auth_type == 'key' and auth_method == 'bearer': + token = auth_config.get('key', '') + debug_print(f"[Factory] Applying bearer auth transformation") + return { + 'type': 'bearer', + 'token': token + } + + # For OAuth2 stored as 'key' type + if auth_type == 'key' and auth_method == 'oauth2': + debug_print(f"[Factory] Applying OAuth2 auth transformation") + return { + 'type': 'bearer', # OAuth2 tokens are typically bearer tokens + 'token': auth_config.get('key', '') + } + + # Return the auth config as-is for other auth types return auth_config diff --git a/application/single_app/semantic_kernel_plugins/plugin_loader.py b/application/single_app/semantic_kernel_plugins/plugin_loader.py index 0c9ab56b..9e897c4e 100644 --- a/application/single_app/semantic_kernel_plugins/plugin_loader.py +++ b/application/single_app/semantic_kernel_plugins/plugin_loader.py @@ -2,6 +2,7 @@ import importlib.util import inspect import logging +from functions_appinsights import log_event from typing import Dict, Type, List from semantic_kernel_plugins.base_plugin import BasePlugin @@ -30,7 +31,7 @@ def discover_plugins() -> Dict[str, Type[BasePlugin]]: except Exception as e: # Log the error but continue with other plugins - logging.warning(f"Failed to load plugin module {module_name}: {str(e)}") + log_event(f"Failed to load plugin module {module_name}: {str(e)}") continue return plugins diff --git a/application/single_app/semantic_kernel_plugins/queue_storage_plugin.py b/application/single_app/semantic_kernel_plugins/queue_storage_plugin.py index f3ca9aad..58e918bc 100644 --- a/application/single_app/semantic_kernel_plugins/queue_storage_plugin.py +++ b/application/single_app/semantic_kernel_plugins/queue_storage_plugin.py @@ -10,7 +10,7 @@ def __init__(self, manifest: Dict[str, Any]): super().__init__(manifest) self.manifest = manifest self.endpoint = manifest.get('endpoint') - self.queue_name = manifest.get('queue_name') + self.queue_name = manifest.get('additional_settings', {}).get('queue_name') self.key = manifest.get('auth', {}).get('key') self.auth_type = manifest.get('auth', {}).get('type', 'key') self._metadata = manifest.get('metadata', {}) diff --git a/application/single_app/semantic_kernel_plugins/smart_http_plugin.py b/application/single_app/semantic_kernel_plugins/smart_http_plugin.py index f5209685..2292e7bc 100644 --- a/application/single_app/semantic_kernel_plugins/smart_http_plugin.py +++ b/application/single_app/semantic_kernel_plugins/smart_http_plugin.py @@ -560,6 +560,7 @@ async def _summarize_large_content(self, content: str, uri: str, page_count: int from functions_settings import get_settings from openai import AzureOpenAI from azure.identity import DefaultAzureCredential, get_bearer_token_provider + from config import cognitive_services_scope settings = get_settings() @@ -580,7 +581,6 @@ async def _summarize_large_content(self, content: str, uri: str, page_count: int ) else: if settings.get('azure_openai_gpt_authentication_type') == 'managed_identity': - cognitive_services_scope = "https://cognitiveservices.azure.com/.default" token_provider = get_bearer_token_provider( DefaultAzureCredential(), cognitive_services_scope diff --git a/application/single_app/semantic_kernel_plugins/ui_test_plugin.py b/application/single_app/semantic_kernel_plugins/ui_test_plugin.py new file mode 100644 index 00000000..44068d43 --- /dev/null +++ b/application/single_app/semantic_kernel_plugins/ui_test_plugin.py @@ -0,0 +1,80 @@ +""" +UI Test Plugin for Semantic Kernel +- Provides demonstration methods for UI testing (greeting, farewell, manifest retrieval) +- Useful for testing plugin integration and UI workflows +- Does not interact with external systems or databases +""" + +import json +import logging +from typing import Dict, Any, List, Optional, Union +from semantic_kernel_plugins.base_plugin import BasePlugin +from semantic_kernel.functions import kernel_function +from functions_appinsights import log_event +from semantic_kernel_plugins.plugin_invocation_logger import plugin_function_logger +from functions_debug import debug_print + +# Helper class to wrap results with metadata +class ResultWithMetadata: + def __init__(self, data, metadata): + self.data = data + self.metadata = metadata + def __str__(self): + return str(self.data) + def __repr__(self): + return f"ResultWithMetadata(data={self.data!r}, metadata={self.metadata!r})" + +class UITestPlugin(BasePlugin): + def __init__(self, manifest: Dict[str, Any]): + super().__init__(manifest) + + @property + def display_name(self) -> str: + return "UI Test Plugin" + + @property + def metadata(self) -> Dict[str, Any]: + return { + "name": "ui_test_plugin", + "type": "ui_test", + "description": "A plugin for UI testing and demonstration purposes.", + "methods": [ + { + "name": "greet_user", + "description": "Returns a greeting message.", + "parameters": [ + {"name": "name", "type": "str", "description": "Name to greet.", "required": True} + ], + "returns": {"type": "str", "description": "Greeting message."} + }, + { + "name": "farewell_user", + "description": "Returns a farewell message.", + "parameters": [ + {"name": "name", "type": "str", "description": "Name to bid farewell.", "required": True} + ], + "returns": {"type": "str", "description": "Farewell message."} + }, + { + "name": "get_manifest", + "description": "Returns the plugin manifest.", + "parameters": [], + "returns": {"type": "str", "description": "Manifest as JSON string."} + } + ] + } + + @kernel_function(description="A function that returns a greeting message.") + @plugin_function_logger("UITestPlugin") + def greet_user(self, name: str) -> str: + return f"Hello, {name}!" + + @kernel_function(description="A function that returns a farewell message.") + @plugin_function_logger("UITestPlugin") + def farewell_user(self, name: str) -> str: + return f"Goodbye, {name}!" + + @kernel_function(description="A function that returns the plugin manifest") + @plugin_function_logger("UITestPlugin") + def get_manifest(self) -> str: + return json.dumps(self.manifest, indent=2) \ No newline at end of file diff --git a/application/single_app/static/css/chat-speech-input.css b/application/single_app/static/css/chat-speech-input.css new file mode 100644 index 00000000..eaeba8f1 --- /dev/null +++ b/application/single_app/static/css/chat-speech-input.css @@ -0,0 +1,112 @@ +/* chat-speech-input.css */ +/* Styles for speech-to-text chat input feature */ + +/* Speech input button positioning */ +#speech-input-btn { + width: 36px; + height: 36px; + border-radius: 0.375rem; + padding: 0; + display: flex; + align-items: center; + justify-content: center; + z-index: 2; +} + +#speech-input-btn:hover { + background-color: #e9ecef; + border-color: #adb5bd; +} + +#speech-input-btn:active { + background-color: #dee2e6; +} + +/* Recording UI Container */ +#recording-container { + min-height: 38px; +} + +.recording-ui { + padding: 8px 12px; + background: #f8f9fa; + border-radius: 0.375rem; + border: 1px solid #0d6efd; +} + +/* Waveform Canvas */ +#waveform-canvas { + height: 36px; + background-color: #ffffff; + border-radius: 0.25rem; + border: 1px solid #dee2e6; +} + +/* Recording Buttons */ +.recording-buttons .btn { + min-width: 36px; + width: 36px; + height: 36px; + font-weight: 600; + display: flex; + align-items: center; + justify-content: center; + padding: 0; + border-radius: 0.375rem; +} + +.recording-buttons .btn i { + font-size: 1rem; +} + +/* Adjust textarea padding when speech button is visible */ +#user-input { + padding-left: 55px !important; +} + +/* Dark mode support */ +[data-bs-theme="dark"] #speech-input-btn { + background-color: transparent; + border-color: #6c757d; + color: #adb5bd; +} + +[data-bs-theme="dark"] #speech-input-btn:hover { + background-color: #343a40; + border-color: #adb5bd; +} + +[data-bs-theme="dark"] #speech-input-btn:active { + background-color: #495057; +} + +[data-bs-theme="dark"] .recording-ui { + background: #212529; + border-color: #0d6efd; +} + +[data-bs-theme="dark"] #waveform-canvas { + background-color: #343a40; + border-color: #495057; +} + +/* Responsive adjustments */ +@media (max-width: 768px) { + .recording-controls { + flex-direction: column; + gap: 12px; + } + + .recording-buttons { + width: 100%; + } + + .recording-buttons .btn { + flex: 1; + } + + .countdown-timer { + font-size: 1.5rem; + width: 100%; + } +} diff --git a/application/single_app/static/css/chats.css b/application/single_app/static/css/chats.css index 61ac309a..38e11c3a 100644 --- a/application/single_app/static/css/chats.css +++ b/application/single_app/static/css/chats.css @@ -472,6 +472,58 @@ body.layout-split .gutter { align-items: center; } +/* Dropdown menu in message actions */ +.message-actions .dropdown { + display: inline-block; + position: relative; +} + +.message-actions .dropdown-menu { + z-index: 9999 !important; + position: absolute !important; +} + +.message-actions .dropdown-toggle::after { + display: none; /* Hide default Bootstrap dropdown arrow */ +} + +.message-actions .dropdown-menu { + min-width: 150px; + font-size: 0.875rem; +} + +.message-actions .dropdown-item { + padding: 0.5rem 1rem; + cursor: pointer; + display: flex; + align-items: center; +} + +.message-actions .dropdown-item i { + font-size: 0.875rem; +} + +.message-actions .dropdown-item:hover { + background-color: #f8f9fa; +} + +[data-bs-theme="dark"] .message-actions .dropdown-item:hover { + background-color: #343a40; +} + +/* Message exclusion badge - icon only */ +.message-exclusion-badge { + display: inline-flex; + align-items: center; + gap: 0.25rem; + font-size: 0.875rem; + padding: 0.25rem 0.5rem; +} + +.message-exclusion-badge i { + font-size: 1rem; +} + /* User message footer styling */ .user-message .message-footer { padding-top: 5px; @@ -625,6 +677,7 @@ body.layout-split .gutter { #chatbox { padding: 5px; overflow-y: auto; + overflow-x: clip; /* Prevent horizontal scroll but allow content to be visible */ flex-grow: 1; background-color: #ffffff; /* Optional: light background color for the chat area */ } @@ -835,6 +888,7 @@ a.citation-link:hover { width: 100%; min-width: 0; /* <-- This is crucial for flex children to shrink! */ margin-bottom: 10px; + overflow: visible; /* Allow dropdown menus to appear outside message */ } /* User messages aligned to the right */ @@ -850,12 +904,13 @@ a.citation-link:hover { /* Message bubble */ .message-bubble { max-width: 90%; - min-width: 0; /* <-- This is crucial for flex children to shrink! */ - width: 100%; + min-width: 250px; /* Ensure enough width for footer buttons to display properly */ + width: auto; /* Let content determine width, but respect min-width */ padding: 10px; border-radius: 15px; position: relative; background-color: #f8f9fa; /* Default light grey */ + overflow: visible; /* Allow dropdown menus to appear outside bubble */ /* Remove fixed padding-bottom here, let content determine height */ } @@ -864,6 +919,7 @@ a.citation-link:hover { background-color: #c8e0fa; /* Blue */ color: black; border-bottom-right-radius: 0; + min-width: 250px !important; /* Ensure enough width for footer buttons */ } @@ -947,6 +1003,11 @@ a.citation-link:hover { text-decoration: underline; } +[data-bs-theme="dark"] .message-text a { + color: #ffeb3b; + text-decoration: underline; +} + .message-text a:hover { color: #0a58ca; text-decoration: none; @@ -985,6 +1046,7 @@ a.citation-link:hover { .message-content { display: flex; align-items: flex-end; + overflow: visible; /* Allow dropdown menus to appear outside content */ } .message-content.flex-row-reverse { @@ -1443,4 +1505,175 @@ ol { [data-bs-theme="dark"] .message-text table caption { color: #adb5bd; +} + +/* Search highlight styles */ +mark.search-highlight { + background-color: #ffff00; + padding: 0 2px; + border-radius: 2px; +} + +[data-bs-theme="dark"] mark.search-highlight { + background-color: #ffc107; + color: #000; +} + +/* Message pulse animation for search results */ +@keyframes messagePulse { + 0%, 100% { + transform: scale(1); + box-shadow: 0 0 0 0 rgba(13, 110, 253, 0.4); + } + 50% { + transform: scale(1.02); + box-shadow: 0 0 20px 5px rgba(13, 110, 253, 0.6); + } +} + +.message-pulse { + animation: messagePulse 1s ease-in-out 2; + transition: transform 0.2s ease, box-shadow 0.2s ease; +} + +[data-bs-theme="dark"] .message-pulse { + animation: messagePulseDark 1s ease-in-out 2; +} + +@keyframes messagePulseDark { + 0%, 100% { + transform: scale(1); + box-shadow: 0 0 0 0 rgba(13, 202, 240, 0.4); + } + 50% { + transform: scale(1.02); + box-shadow: 0 0 20px 5px rgba(13, 202, 240, 0.6); + } +} + +/* Streaming cursor animation */ +.streaming-cursor .badge { + animation: streamingPulse 1.5s ease-in-out infinite; +} + +@keyframes streamingPulse { + 0%, 100% { + opacity: 1; + } + 50% { + opacity: 0.6; + } +} + +/* Reasoning effort slider styles */ +.reasoning-slider-container { + padding: 20px 0; +} + +.reasoning-levels { + min-height: 250px; +} + +.reasoning-level { + cursor: pointer; + padding: 12px 20px; + border: 2px solid var(--bs-border-color); + border-radius: 0.5rem; + transition: all 0.2s; + min-width: 180px; + background: var(--bs-body-bg); +} + +.reasoning-level:hover { + border-color: var(--bs-primary); + background: var(--bs-primary-bg-subtle); +} + +.reasoning-level.active { + border-color: var(--bs-primary); + background: var(--bs-primary); + color: white; +} + +.reasoning-level.disabled { + opacity: 0.4; + cursor: not-allowed; +} + +.reasoning-level.disabled:hover { + border-color: var(--bs-border-color); + background: var(--bs-body-bg); +} + +.reasoning-level-icon { + font-size: 1.5rem; + margin-bottom: 5px; +} + +.reasoning-level-label { + font-weight: 600; + font-size: 0.9rem; +} + +/* Text-to-Speech Styles */ +.tts-play-btn { + padding: 0.25rem 0.5rem; + transition: all 0.2s ease; +} + +.tts-play-btn:hover { + color: var(--bs-primary) !important; + transform: scale(1.1); +} + +.tts-play-btn.btn-success { + color: var(--bs-success) !important; +} + +.tts-play-btn.btn-warning { + color: var(--bs-warning) !important; +} + +.message.tts-playing .avatar { + animation: tts-avatar-pulse 0.3s ease-in-out infinite alternate; + border-radius: 50%; +} + +.message.tts-playing .avatar.volume-low { + box-shadow: 0 0 8px rgba(13, 110, 253, 0.4); +} + +.message.tts-playing .avatar.volume-medium { + box-shadow: 0 0 15px rgba(13, 110, 253, 0.6); +} + +.message.tts-playing .avatar.volume-high { + box-shadow: 0 0 25px rgba(13, 110, 253, 0.8); +} + +.message.tts-playing .avatar.volume-peak { + box-shadow: 0 0 35px rgba(13, 110, 253, 1); +} + +/* Word-by-word highlighting for TTS */ +.tts-word { + display: inline; + transition: background-color 0.2s ease, color 0.2s ease; +} + +.tts-word.tts-current-word { + background-color: rgba(var(--bs-primary-rgb), 0.3); + color: var(--bs-primary); + font-weight: 500; + border-radius: 2px; + padding: 0 2px; +} + +@keyframes tts-avatar-pulse { + 0% { + transform: scale(1); + } + 100% { + transform: scale(1.05); + } } \ No newline at end of file diff --git a/application/single_app/static/css/dark-mode.css b/application/single_app/static/css/dark-mode.css new file mode 100644 index 00000000..e69de29b diff --git a/application/single_app/static/css/sidebar.css b/application/single_app/static/css/sidebar.css index 5ec8fead..999b44c7 100644 --- a/application/single_app/static/css/sidebar.css +++ b/application/single_app/static/css/sidebar.css @@ -45,6 +45,18 @@ body.has-classification-banner #sidebar-nav { height: calc(100vh - 40px) !important; /* Adjust height to account for banner */ } +/* Chats top-nav layout: align the fixed sidebar just below the navbar */ +nav.navbar.fixed-top + #sidebar-nav { + top: 66px !important; + height: calc(100vh - 66px); +} + +/* Account for classification banner when present */ +body.has-classification-banner nav.navbar + #sidebar-nav { + top: 98px !important; + height: calc(100vh - 98px); +} + /* Floating expand button positioning when classification banner is present */ body.has-classification-banner #floating-expand-btn { top: calc(0.5rem + 40px) !important; /* Start below the classification banner */ diff --git a/application/single_app/static/css/styles.css b/application/single_app/static/css/styles.css index 1ea286fa..e537590d 100644 --- a/application/single_app/static/css/styles.css +++ b/application/single_app/static/css/styles.css @@ -696,3 +696,161 @@ main { font-size: 0.875rem !important; } } + +/* ============= Message Masking Styles ============= */ + +/* Masked content spans */ +.masked-content { + text-decoration: line-through; + opacity: 0.5; + background-color: rgba(255, 193, 7, 0.15); + padding: 0 2px; + border-radius: 2px; + cursor: help; + transition: opacity 0.2s ease, background-color 0.2s ease; +} + +.masked-content:hover { + opacity: 0.7; + background-color: rgba(255, 193, 7, 0.25); +} + +/* Fully masked message styling */ +.fully-masked .message-bubble { + border: 2px dashed rgba(255, 193, 7, 0.5); + opacity: 0.7; + background-color: rgba(255, 193, 7, 0.05); +} + +/* Message exclusion badge in footer */ +.message-exclusion-badge { + display: flex; + align-items: center; + gap: 0.25rem; + font-size: 0.875rem; + padding: 0.25rem 0.5rem; + border-radius: 0.25rem; + background-color: rgba(255, 193, 7, 0.15); + color: #5c4503 !important; + position: absolute; + left: 50%; + transform: translateX(-50%); + white-space: nowrap; + max-width: calc(100% - 1rem); + overflow: hidden; +} + +.message-exclusion-badge i { + font-size: 1rem; + color: #5c4503 !important; + flex-shrink: 0; +} + +.message-exclusion-badge .badge-text { + overflow: hidden; + text-overflow: clip; + white-space: nowrap; +} + +/* Hide badge text on narrow message footers - show icon only */ +@container footer (max-width: 350px) { + .message-exclusion-badge .badge-text { + display: none; + } + + .message-exclusion-badge { + gap: 0; + padding: 0.25rem 0.5rem; + } +} + +/* Ensure message footer supports absolute positioning and container queries */ +.message-footer { + container-type: inline-size; + container-name: footer; +} + +/* Ensure message footer supports absolute positioning */ +.message-footer { + position: relative; +} + +/* Mask button styling */ +.mask-btn { + border: none; + background: transparent; + color: #6c757d; + font-size: 0.875rem; + padding: 4px 8px; + border-radius: 4px; + cursor: pointer; + transition: color 0.2s ease, background-color 0.2s ease; +} + +.mask-btn:hover { + color: #ffc107; + background-color: rgba(255, 193, 7, 0.1); +} + +/* Make mask button icons same size as other action buttons */ +.mask-btn i { + font-size: 1rem; + width: 16px; + height: 24px; + display: inline-flex; + align-items: center; + justify-content: center; +} + +/* Dark mode styles for masked content */ +[data-bs-theme="dark"] .masked-content { + background-color: rgba(255, 193, 7, 0.2); +} + +[data-bs-theme="dark"] .masked-content:hover { + opacity: 0.8; + background-color: rgba(255, 193, 7, 0.3); +} + +[data-bs-theme="dark"] .fully-masked .message-bubble { + border-color: rgba(255, 193, 7, 0.6); + background-color: rgba(255, 193, 7, 0.08); +} + +[data-bs-theme="dark"] .message-exclusion-badge { + background-color: rgba(255, 193, 7, 0.15); + color: #ffc107; +} + +[data-bs-theme="dark"] .mask-btn { + color: #adb5bd; +} + +[data-bs-theme="dark"] .mask-btn:hover { + color: #ffc107; + background-color: rgba(255, 193, 7, 0.15); +} + +/* Dark mode styles for links in messages */ +[data-bs-theme="dark"] .message-bubble a, +[data-bs-theme="dark"] .user-message a, +[data-bs-theme="dark"] .assistant-message a, +[data-bs-theme="dark"] .message-content a { + color: #66b3ff !important; /* Brighter blue for better visibility */ + text-decoration: underline; +} + +[data-bs-theme="dark"] .message-bubble a:hover, +[data-bs-theme="dark"] .user-message a:hover, +[data-bs-theme="dark"] .assistant-message a:hover, +[data-bs-theme="dark"] .message-content a:hover { + color: #99ccff !important; /* Even lighter blue on hover */ + text-decoration: underline; +} + +[data-bs-theme="dark"] .message-bubble a:visited, +[data-bs-theme="dark"] .user-message a:visited, +[data-bs-theme="dark"] .assistant-message a:visited, +[data-bs-theme="dark"] .message-content a:visited { + color: #b399ff !important; /* Purple-ish for visited links */ +} diff --git a/application/single_app/static/images/custom_logo.png b/application/single_app/static/images/custom_logo.png index 45a99fd3..ecf6e652 100644 Binary files a/application/single_app/static/images/custom_logo.png and b/application/single_app/static/images/custom_logo.png differ diff --git a/application/single_app/static/images/custom_logo_dark.png b/application/single_app/static/images/custom_logo_dark.png index b3beb694..4f281945 100644 Binary files a/application/single_app/static/images/custom_logo_dark.png and b/application/single_app/static/images/custom_logo_dark.png differ diff --git a/application/single_app/static/images/favicon.ico b/application/single_app/static/images/favicon.ico index d8f058f6..3dc7742a 100644 Binary files a/application/single_app/static/images/favicon.ico and b/application/single_app/static/images/favicon.ico differ diff --git a/application/single_app/static/js/admin/admin_agent_templates.js b/application/single_app/static/js/admin/admin_agent_templates.js new file mode 100644 index 00000000..4bea4924 --- /dev/null +++ b/application/single_app/static/js/admin/admin_agent_templates.js @@ -0,0 +1,515 @@ +// admin_agent_templates.js +// Admin UI logic for reviewing, approving, and deleting agent template submissions + +import { showToast } from "../chat/chat-toast.js"; + +const panel = document.getElementById("agent-templates-admin-panel"); +const tableBody = document.getElementById("agent-template-table-body"); +const statusFilters = document.getElementById("agent-template-status-filters"); +const disabledAlert = document.getElementById("agent-templates-disabled-alert"); +const searchInput = document.getElementById("agent-template-search"); +const paginationEl = document.getElementById("agent-template-pagination"); +const paginationSummary = document.getElementById("agent-template-pagination-summary"); +const paginationNav = document.getElementById("agent-template-pagination-nav"); +const modalEl = document.getElementById("agentTemplateReviewModal"); +const approveBtn = document.getElementById("agent-template-approve-btn"); +const rejectBtn = document.getElementById("agent-template-reject-btn"); +const deleteBtn = document.getElementById("agent-template-delete-btn"); +const notesInput = document.getElementById("agent-template-review-notes"); +const rejectReasonInput = document.getElementById("agent-template-reject-reason"); +const errorAlert = document.getElementById("agent-template-review-error"); +const statusBadge = document.getElementById("agent-template-review-status"); +const helperEl = document.getElementById("agent-template-review-helper"); +const descriptionEl = document.getElementById("agent-template-review-description"); +const instructionsEl = document.getElementById("agent-template-review-instructions"); +const actionsWrapper = document.getElementById("agent-template-review-actions-wrapper"); +const actionsList = document.getElementById("agent-template-review-actions"); +const settingsWrapper = document.getElementById("agent-template-review-settings-wrapper"); +const settingsEl = document.getElementById("agent-template-review-settings"); +const tagsContainer = document.getElementById("agent-template-review-tags"); +const subtitleEl = document.getElementById("agent-template-review-subtitle"); +const metaEl = document.getElementById("agent-template-review-meta"); +const titleEl = document.getElementById("agentTemplateReviewModalLabel"); + +let currentFilter = "pending"; +let templates = []; +let selectedTemplate = null; +let reviewModal = null; +let currentPage = 1; +let searchQuery = ""; +const PAGE_SIZE = 10; + +function init() { + if (!panel) { + return; + } + + if (modalEl && window.bootstrap) { + reviewModal = bootstrap.Modal.getOrCreateInstance(modalEl); + } + + if (!window.appSettings?.enable_agent_template_gallery) { + if (disabledAlert) disabledAlert.classList.remove("d-none"); + renderEmptyState("Template gallery is disabled."); + return; + } + + attachFilterHandlers(); + attachTableHandlers(); + attachSearchHandler(); + attachModalHandlers(); + loadTemplatesForFilter(currentFilter); +} + +function attachFilterHandlers() { + if (!statusFilters) { + return; + } + statusFilters.addEventListener("click", (event) => { + const button = event.target.closest("button[data-status]"); + if (!button) { + return; + } + const { status } = button.dataset; + if (!status || status === currentFilter) { + return; + } + currentFilter = status; + statusFilters.querySelectorAll("button").forEach((btn) => btn.classList.remove("active")); + button.classList.add("active"); + currentPage = 1; + loadTemplatesForFilter(currentFilter); + }); +} + +function attachTableHandlers() { + if (!tableBody) { + return; + } + tableBody.addEventListener("click", (event) => { + const reviewBtn = event.target.closest(".agent-template-review-btn"); + if (reviewBtn) { + const templateId = reviewBtn.dataset.templateId; + openReviewModal(templateId); + return; + } + const deleteBtn = event.target.closest(".agent-template-inline-delete"); + if (deleteBtn) { + const templateId = deleteBtn.dataset.templateId; + confirmAndDelete(templateId); + } + }); +} + +function attachModalHandlers() { + if (!approveBtn || !rejectBtn || !deleteBtn) { + return; + } + + approveBtn.addEventListener("click", () => handleApproval()); + rejectBtn.addEventListener("click", () => handleRejection()); + deleteBtn.addEventListener("click", () => { + if (selectedTemplate?.id) { + confirmAndDelete(selectedTemplate.id, true); + } + }); +} + +function attachSearchHandler() { + if (!searchInput) { + return; + } + searchInput.addEventListener("input", (event) => { + searchQuery = event.target.value?.trim().toLowerCase() || ""; + currentPage = 1; + renderTemplates(); + }); +} + +async function loadTemplatesForFilter(status) { + renderLoadingRow(); + try { + const query = status && status !== "all" ? `?status=${encodeURIComponent(status)}` : "?status=all"; + const response = await fetch(`/api/admin/agent-templates${query}`); + if (!response.ok) { + throw new Error("Failed to load templates."); + } + const data = await response.json(); + templates = data.templates || []; + currentPage = 1; + renderTemplates(); + } catch (error) { + console.error("Error loading agent templates", error); + renderEmptyState(error.message || "Unable to load templates."); + } +} + +function renderLoadingRow() { + if (!tableBody) return; + tableBody.innerHTML = ` +
Loading...
+ Loading templates... + `; + setSummaryMessage("Loading templates..."); + renderPaginationControls(0); +} + +function renderEmptyState(message) { + if (!tableBody) return; + tableBody.innerHTML = `${message}`; + setSummaryMessage(message); + renderPaginationControls(0); +} + +function renderTemplates() { + if (!tableBody) { + return; + } + const filtered = getFilteredTemplates(); + if (!filtered.length) { + const emptyMessage = searchQuery ? "No templates match your search." : "No templates found for this filter."; + renderEmptyState(emptyMessage); + return; + } + + const totalItems = filtered.length; + const totalPages = Math.ceil(totalItems / PAGE_SIZE) || 1; + if (currentPage > totalPages) { + currentPage = totalPages; + } + const startIndex = (currentPage - 1) * PAGE_SIZE; + const pageItems = filtered.slice(startIndex, startIndex + PAGE_SIZE); + const endIndex = startIndex + pageItems.length; + + tableBody.innerHTML = ""; + pageItems.forEach((template) => { + const row = document.createElement("tr"); + row.innerHTML = ` + +
${escapeHtml(template.title || template.display_name || "Template")}
+
${escapeHtml(template.helper_text || template.description || "")}
+ + ${renderStatusBadge(template.status)} + +
${escapeHtml(template.created_by_name || 'Unknown')}
+
${escapeHtml(template.created_by_email || '')}
+ + ${formatDate(template.updated_at || template.created_at)} + +
+ + +
+ + `; + tableBody.appendChild(row); + }); + + setSummaryMessage(`Showing ${startIndex + 1}-${endIndex} of ${totalItems} (page ${currentPage} of ${totalPages})`); + renderPaginationControls(totalPages); +} + +function getFilteredTemplates() { + if (!searchQuery) { + return templates; + } + return templates.filter((template) => { + return [ + template.title, + template.display_name, + template.created_by_name, + template.created_by_email + ].some((value) => value && value.toString().toLowerCase().includes(searchQuery)); + }); +} + +function renderStatusBadge(status) { + const normalized = (status || "pending").toLowerCase(); + const variants = { + approved: "success", + rejected: "danger", + archived: "secondary", + pending: "warning", + }; + const badgeClass = variants[normalized] || "secondary"; + return `${normalized}`; +} + +function setSummaryMessage(message = "") { + if (paginationSummary) { + paginationSummary.textContent = message; + } +} + +function renderPaginationControls(totalPages) { + if (!paginationEl) { + return; + } + + if (paginationNav) { + if (totalPages <= 1) { + paginationNav.classList.add("d-none"); + } else { + paginationNav.classList.remove("d-none"); + } + } + + if (totalPages <= 1) { + paginationEl.innerHTML = ""; + return; + } + + const maxButtons = 5; + let startPage = Math.max(1, currentPage - Math.floor(maxButtons / 2)); + let endPage = startPage + maxButtons - 1; + if (endPage > totalPages) { + endPage = totalPages; + startPage = Math.max(1, endPage - maxButtons + 1); + } + + const fragment = document.createDocumentFragment(); + fragment.appendChild(createPageItem("Previous", currentPage - 1, currentPage === 1)); + + for (let page = startPage; page <= endPage; page += 1) { + fragment.appendChild(createPageItem(page, page, false, page === currentPage)); + } + + fragment.appendChild(createPageItem("Next", currentPage + 1, currentPage === totalPages)); + + paginationEl.innerHTML = ""; + paginationEl.appendChild(fragment); +} + +function createPageItem(label, targetPage, disabled, active = false) { + const li = document.createElement("li"); + li.className = "page-item"; + if (disabled) li.classList.add("disabled"); + if (active) li.classList.add("active"); + + const button = document.createElement("button"); + button.type = "button"; + button.className = "page-link"; + button.textContent = label.toString(); + button.disabled = disabled; + button.addEventListener("click", () => { + if (disabled || targetPage === currentPage) { + return; + } + currentPage = Math.min(Math.max(targetPage, 1), Math.ceil(getFilteredTemplates().length / PAGE_SIZE) || 1); + renderTemplates(); + }); + + li.appendChild(button); + return li; +} + +function formatDate(value) { + if (!value) { + return "-"; + } + const date = new Date(value); + if (Number.isNaN(date.getTime())) { + return value; + } + return date.toLocaleString(); +} + +async function openReviewModal(templateId) { + if (!templateId || !reviewModal) { + return; + } + try { + const response = await fetch(`/api/admin/agent-templates/${templateId}`); + if (!response.ok) { + throw new Error('Failed to load template.'); + } + const data = await response.json(); + selectedTemplate = data.template; + populateReviewModal(selectedTemplate); + reviewModal.show(); + } catch (error) { + console.error('Failed to open template modal', error); + showToast(error.message || 'Unable to load template.', 'danger'); + } +} + +function populateReviewModal(template) { + if (!template) { + return; + } + titleEl.textContent = template.title || template.display_name || 'Agent Template'; + helperEl.textContent = template.helper_text || template.description || '-'; + descriptionEl.textContent = template.description || '-'; + instructionsEl.textContent = template.instructions || ''; + notesInput.value = template.review_notes || ''; + rejectReasonInput.value = template.rejection_reason || ''; + updateStatusBadge(template.status); + + const submittedBy = template.created_by_name || 'Unknown submitter'; + const submittedAt = formatDate(template.created_at); + subtitleEl.textContent = `Submitted by ${submittedBy} on ${submittedAt}`; + metaEl.textContent = `Updated ${formatDate(template.updated_at)}`; + + if (Array.isArray(template.actions_to_load) && template.actions_to_load.length) { + actionsWrapper.classList.remove('d-none'); + actionsList.innerHTML = ''; + template.actions_to_load.forEach((action) => { + const badge = document.createElement('span'); + badge.className = 'badge bg-info text-dark me-1 mb-1'; + badge.textContent = action; + actionsList.appendChild(badge); + }); + } else { + actionsWrapper.classList.add('d-none'); + actionsList.innerHTML = ''; + } + + if (template.additional_settings) { + settingsWrapper.classList.remove('d-none'); + settingsEl.textContent = template.additional_settings; + } else { + settingsWrapper.classList.add('d-none'); + settingsEl.textContent = ''; + } + + if (Array.isArray(template.tags) && template.tags.length) { + tagsContainer.classList.remove('d-none'); + tagsContainer.innerHTML = ''; + template.tags.slice(0, 8).forEach((tag) => { + const badge = document.createElement('span'); + badge.className = 'badge bg-secondary-subtle text-secondary-emphasis'; + badge.textContent = tag; + tagsContainer.appendChild(badge); + }); + } else { + tagsContainer.classList.add('d-none'); + tagsContainer.innerHTML = ''; + } + + hideModalError(); +} + +function updateStatusBadge(status) { + const normalized = (status || 'pending').toLowerCase(); + statusBadge.textContent = normalized; + statusBadge.className = 'badge'; + statusBadge.classList.add(`bg-${{ + approved: 'success', + rejected: 'danger', + archived: 'secondary', + pending: 'warning' + }[normalized] || 'secondary'}`); +} + +function hideModalError() { + if (errorAlert) { + errorAlert.classList.add('d-none'); + errorAlert.textContent = ''; + } +} + +function showModalError(message) { + if (!errorAlert) { + showToast(message, 'danger'); + return; + } + errorAlert.classList.remove('d-none'); + errorAlert.textContent = message; +} + +async function handleApproval() { + if (!selectedTemplate?.id) { + return; + } + await submitTemplateDecision(`/api/admin/agent-templates/${selectedTemplate.id}/approve`, { + notes: notesInput.value?.trim() || undefined + }, 'Template approved!'); +} + +async function handleRejection() { + if (!selectedTemplate?.id) { + return; + } + const reason = rejectReasonInput.value?.trim(); + if (!reason) { + showModalError('A rejection reason is required.'); + rejectReasonInput.focus(); + return; + } + await submitTemplateDecision(`/api/admin/agent-templates/${selectedTemplate.id}/reject`, { + reason, + notes: notesInput.value?.trim() || undefined + }, 'Template rejected.'); +} + +async function submitTemplateDecision(url, payload, successMessage) { + try { + setModalButtonsDisabled(true); + const response = await fetch(url, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(payload) + }); + const data = await response.json().catch(() => ({})); + if (!response.ok) { + throw new Error(data.error || 'Failed to update template.'); + } + showToast(successMessage, 'success'); + hideModalError(); + reviewModal?.hide(); + loadTemplatesForFilter(currentFilter); + } catch (error) { + console.error('Template decision failed', error); + showModalError(error.message || 'Failed to update template.'); + } finally { + setModalButtonsDisabled(false); + } +} + +function setModalButtonsDisabled(disabled) { + [approveBtn, rejectBtn, deleteBtn].forEach((btn) => { + if (btn) btn.disabled = disabled; + }); +} + +async function confirmAndDelete(templateId, closeModal = false) { + if (!templateId) { + return; + } + if (!confirm('Delete this template? This action cannot be undone.')) { + return; + } + try { + const response = await fetch(`/api/admin/agent-templates/${templateId}`, { + method: 'DELETE' + }); + const data = await response.json().catch(() => ({})); + if (!response.ok) { + throw new Error(data.error || 'Failed to delete template.'); + } + showToast('Template deleted.', 'success'); + if (closeModal) { + reviewModal?.hide(); + } + loadTemplatesForFilter(currentFilter); + } catch (error) { + console.error('Failed to delete template', error); + showToast(error.message || 'Failed to delete template.', 'danger'); + } +} + +function escapeHtml(value) { + const div = document.createElement('div'); + div.textContent = value || ''; + return div.innerHTML; +} + +if (document.readyState === 'loading') { + document.addEventListener('DOMContentLoaded', init); +} else { + init(); +} diff --git a/application/single_app/static/js/admin/admin_plugins.js b/application/single_app/static/js/admin/admin_plugins.js index 93c7a926..ad497f62 100644 --- a/application/single_app/static/js/admin/admin_plugins.js +++ b/application/single_app/static/js/admin/admin_plugins.js @@ -4,7 +4,7 @@ import { renderPluginsTable as sharedRenderPluginsTable, validatePluginManifest // Main logic document.addEventListener('DOMContentLoaded', function () { - if (!document.getElementById('agents-tab')) return; + if (!document.getElementById('actions-configuration')) return; // Load and render plugins table loadPlugins(); @@ -55,7 +55,11 @@ function setupSaveHandler(plugin, modal) { saveBtn.onclick = async (event) => { event.preventDefault(); - + const errorDiv = document.getElementById('plugin-modal-error'); + if (errorDiv) { + errorDiv.classList.add('d-none'); + errorDiv.textContent = ''; + } try { // Get form data from the stepper const formData = window.pluginModalStepper.getFormData(); @@ -67,8 +71,19 @@ function setupSaveHandler(plugin, modal) { return; } + const originalText = saveBtn.innerHTML; + saveBtn.innerHTML = `Saving...`; + saveBtn.disabled = true; // Save the action - await savePlugin(formData, plugin); + try { + await savePlugin(formData, plugin); + } catch (error) { + window.pluginModalStepper.showError(error.message); + return; + } finally { + saveBtn.innerHTML = originalText; + saveBtn.disabled = false; + } // Close modal and refresh if (modal && typeof modal.hide === 'function') { diff --git a/application/single_app/static/js/admin/admin_settings.js b/application/single_app/static/js/admin/admin_settings.js index d3b72980..81f80f9e 100644 --- a/application/single_app/static/js/admin/admin_settings.js +++ b/application/single_app/static/js/admin/admin_settings.js @@ -394,9 +394,17 @@ if (fetchGptBtn) { const resp = await fetch('/api/models/gpt'); const data = await resp.json(); if (resp.ok && data.models && data.models.length > 0) { + // Clear old models and replace with new ones gptAll = data.models; + + // Filter out selected models that no longer exist in the newly fetched list + gptSelected = gptSelected.filter(selected => + gptAll.some(model => model.deploymentName === selected.deploymentName) + ); + renderGPTModels(); updateGptHiddenInput(); + markFormAsModified(); } else { listDiv.innerHTML = `

Error: ${data.error || 'No GPT models found'}

`; } @@ -441,9 +449,17 @@ if (fetchEmbeddingBtn) { const resp = await fetch('/api/models/embedding'); const data = await resp.json(); if (resp.ok && data.models && data.models.length > 0) { + // Clear old models and replace with new ones embeddingAll = data.models; + + // Filter out selected models that no longer exist in the newly fetched list + embeddingSelected = embeddingSelected.filter(selected => + embeddingAll.some(model => model.deploymentName === selected.deploymentName) + ); + renderEmbeddingModels(); updateEmbeddingHiddenInput(); + markFormAsModified(); } else { listDiv.innerHTML = `

Error: ${data.error || 'No embedding models found'}

`; } @@ -480,9 +496,17 @@ if (fetchImageBtn) { const resp = await fetch('/api/models/image'); const data = await resp.json(); if (resp.ok && data.models && data.models.length > 0) { + // Clear old models and replace with new ones imageAll = data.models; + + // Filter out selected models that no longer exist in the newly fetched list + imageSelected = imageSelected.filter(selected => + imageAll.some(model => model.deploymentName === selected.deploymentName) + ); + renderImageModels(); updateImageHiddenInput(); + markFormAsModified(); } else { listDiv.innerHTML = `

Error: ${data.error || 'No image models found'}

`; } @@ -1541,23 +1565,168 @@ function setupToggles() { }); } + const enableKeyVaultCheckbox = document.getElementById('enable_key_vault_secret_storage'); + if (enableKeyVaultCheckbox) { + enableKeyVaultCheckbox.addEventListener('change', function() { + const keyVaultSettings = document.getElementById('key_vault_settings'); + keyVaultSettings.style.display = this.checked ? 'block' : 'none'; + markFormAsModified(); + }); + } + const enableWebSearch = document.getElementById('enable_web_search'); - if (enableWebSearch) { + const webSearchFoundrySettings = document.getElementById('web_search_foundry_settings'); + const webSearchConsentInput = document.getElementById('web_search_consent_accepted'); + const webSearchConsentModalEl = document.getElementById('web-search-consent-modal'); + const webSearchConsentAcceptBtn = document.getElementById('web-search-consent-accept'); + const webSearchConsentDeclineBtn = document.getElementById('web-search-consent-decline'); + let webSearchConsentModal = null; + const toggleVisibility = (element, isVisible) => { + if (!element) { + return; + } + element.classList.toggle('d-none', !isVisible); + }; + if (enableWebSearch && webSearchFoundrySettings) { + const setConsentAccepted = (value) => { + if (webSearchConsentInput) { + webSearchConsentInput.value = value ? 'true' : 'false'; + } + }; + + const showConsentModal = () => { + if (!webSearchConsentModalEl) { + showToast('Consent modal could not be loaded.', 'warning'); + return; + } + + if (!webSearchConsentModal) { + webSearchConsentModal = new bootstrap.Modal(webSearchConsentModalEl, { + backdrop: 'static', + keyboard: false + }); + } + + webSearchConsentModal.show(); + }; + + const hasConsent = () => webSearchConsentInput?.value === 'true'; + + if (enableWebSearch.checked && !hasConsent()) { + enableWebSearch.checked = false; + } + toggleVisibility(webSearchFoundrySettings, enableWebSearch.checked && hasConsent()); + enableWebSearch.addEventListener('change', function () { - document.getElementById('web_search_settings').style.display = this.checked ? 'block' : 'none'; + if (this.checked && !hasConsent()) { + this.checked = false; + toggleVisibility(webSearchFoundrySettings, false); + showConsentModal(); + return; + } + + toggleVisibility(webSearchFoundrySettings, this.checked); + markFormAsModified(); + }); + + if (webSearchConsentAcceptBtn) { + webSearchConsentAcceptBtn.addEventListener('click', () => { + setConsentAccepted(true); + enableWebSearch.checked = true; + toggleVisibility(webSearchFoundrySettings, true); + markFormAsModified(); + if (webSearchConsentModal) { + webSearchConsentModal.hide(); + } + }); + } + + if (webSearchConsentDeclineBtn) { + webSearchConsentDeclineBtn.addEventListener('click', () => { + setConsentAccepted(false); + enableWebSearch.checked = false; + toggleVisibility(webSearchFoundrySettings, false); + markFormAsModified(); + if (webSearchConsentModal) { + webSearchConsentModal.hide(); + } + }); + } + } + + // Web Search User Notice toggle + const enableWebSearchUserNotice = document.getElementById('enable_web_search_user_notice'); + const webSearchUserNoticeSettings = document.getElementById('web_search_user_notice_settings'); + if (enableWebSearchUserNotice && webSearchUserNoticeSettings) { + enableWebSearchUserNotice.addEventListener('change', function() { + toggleVisibility(webSearchUserNoticeSettings, this.checked); + markFormAsModified(); + }); + } + + const foundryAuthType = document.getElementById('web_search_foundry_auth_type'); + const foundryMiType = document.getElementById('web_search_foundry_managed_identity_type'); + const foundryCloud = document.getElementById('web_search_foundry_cloud'); + const foundrySpFields = document.getElementById('web_search_foundry_service_principal_fields'); + const foundryMiTypeContainer = document.getElementById('web_search_foundry_managed_identity_type_container'); + const foundryMiClientIdContainer = document.getElementById('web_search_foundry_managed_identity_client_id_container'); + const foundryCloudContainer = document.getElementById('web_search_foundry_cloud_container'); + const foundryAuthorityContainer = document.getElementById('web_search_foundry_authority_container'); + + function updateFoundryAuthVisibility() { + const authType = foundryAuthType?.value || 'managed_identity'; + const cloudValue = foundryCloud?.value || ''; + + toggleVisibility(foundrySpFields, authType === 'service_principal'); + toggleVisibility(foundryCloudContainer, authType === 'service_principal'); + toggleVisibility( + foundryAuthorityContainer, + authType === 'service_principal' && cloudValue === 'custom' + ); + toggleVisibility(foundryMiTypeContainer, authType === 'managed_identity'); + if (foundryMiClientIdContainer) { + const miType = foundryMiType?.value || 'system_assigned'; + toggleVisibility( + foundryMiClientIdContainer, + authType === 'managed_identity' && miType === 'user_assigned' + ); + } + } + + if (foundryAuthType || foundryMiType || foundryCloud) { + updateFoundryAuthVisibility(); + } + + if (foundryMiType) { + foundryMiType.addEventListener('change', () => { + updateFoundryAuthVisibility(); + markFormAsModified(); + }); + } + + if (foundryCloud) { + foundryCloud.addEventListener('change', () => { + updateFoundryAuthVisibility(); markFormAsModified(); }); } - const enableWebSearchApim = document.getElementById('enable_web_search_apim'); - if (enableWebSearchApim) { - enableWebSearchApim.addEventListener('change', function () { - document.getElementById('non_apim_web_search_settings').style.display = this.checked ? 'none' : 'block'; - document.getElementById('apim_web_search_settings').style.display = this.checked ? 'block' : 'none'; + if (foundryAuthType) { + foundryAuthType.addEventListener('change', () => { + updateFoundryAuthVisibility(); markFormAsModified(); }); } + const toggleFoundrySecret = document.getElementById('toggle_web_search_foundry_client_secret'); + const foundrySecretInput = document.getElementById('web_search_foundry_client_secret'); + if (toggleFoundrySecret && foundrySecretInput) { + toggleFoundrySecret.addEventListener('click', () => { + foundrySecretInput.type = foundrySecretInput.type === 'password' ? 'text' : 'password'; + toggleFoundrySecret.textContent = foundrySecretInput.type === 'password' ? 'Show' : 'Hide'; + }); + } + const enableAiSearchApim = document.getElementById('enable_ai_search_apim'); if (enableAiSearchApim) { enableAiSearchApim.addEventListener('change', function () { @@ -1630,6 +1799,15 @@ function setupToggles() { }); } + const speechAuthType = document.getElementById('speech_service_authentication_type'); + if (speechAuthType) { + speechAuthType.addEventListener('change', function () { + document.getElementById('speech_service_key_container').style.display = + (this.value === 'key') ? 'block' : 'none'; + markFormAsModified(); + }); + } + const officeAuthType = document.getElementById('office_docs_authentication_type'); const connStrGroup = document.getElementById('office_docs_storage_conn_str_group'); const urlGroup = document.getElementById('office_docs_storage_url_group'); @@ -1701,11 +1879,20 @@ function setupToggles() { } if (enableGroupWorkspacesToggle && createGroupPermissionSettingDiv) { + const enableGroupCreationSetting = document.getElementById('enable_group_creation_setting'); + // Initial state createGroupPermissionSettingDiv.style.display = enableGroupWorkspacesToggle.checked ? 'block' : 'none'; + if (enableGroupCreationSetting) { + enableGroupCreationSetting.style.display = enableGroupWorkspacesToggle.checked ? 'block' : 'none'; + } + // Listener for changes enableGroupWorkspacesToggle.addEventListener('change', function() { createGroupPermissionSettingDiv.style.display = this.checked ? 'block' : 'none'; + if (enableGroupCreationSetting) { + enableGroupCreationSetting.style.display = this.checked ? 'block' : 'none'; + } markFormAsModified(); }); } @@ -2251,6 +2438,94 @@ function setupTestButtons() { } }); } + + const testKeyVaultBtn = document.getElementById('test_key_vault_button'); + if (testKeyVaultBtn) { + testKeyVaultBtn.addEventListener('click', async () => { + const resultDiv = document.getElementById('test_key_vault_result'); + resultDiv.innerHTML = 'Testing Key Vault...'; + + const payload = { + test_type: 'key_vault', + vault_name: document.getElementById('key_vault_name').value, + client_id: document.getElementById('key_vault_identity').value + }; + + try { + const resp = await fetch('/api/admin/settings/test_connection', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(payload) + }); + const data = await resp.json(); + if (resp.ok) { + resultDiv.innerHTML = `${data.message}`; + } else { + resultDiv.innerHTML = `${data.error || 'Error testing Key Vault'}`; } + } catch (err) { + resultDiv.innerHTML = `Error: ${err.message}`; } + }); + } + + const testVisionBtn = document.getElementById('test_multimodal_vision_button'); + if (testVisionBtn) { + testVisionBtn.addEventListener('click', async () => { + const resultDiv = document.getElementById('test_multimodal_vision_result'); + resultDiv.innerHTML = 'Testing Vision Analysis...'; + + const visionModel = document.getElementById('multimodal_vision_model').value; + + if (!visionModel) { + resultDiv.innerHTML = 'Please select a vision model first'; + return; + } + + const enableApim = document.getElementById('enable_gpt_apim').checked; + + const payload = { + test_type: 'multimodal_vision', + enable_apim: enableApim, + vision_model: visionModel + }; + + if (enableApim) { + payload.apim = { + endpoint: document.getElementById('azure_apim_gpt_endpoint').value, + subscription_key: document.getElementById('azure_apim_gpt_subscription_key').value, + api_version: document.getElementById('azure_apim_gpt_api_version').value, + deployment: visionModel + }; + } else { + payload.direct = { + endpoint: document.getElementById('azure_openai_gpt_endpoint').value, + auth_type: document.getElementById('azure_openai_gpt_authentication_type').value, + key: document.getElementById('azure_openai_gpt_key').value, + api_version: document.getElementById('azure_openai_gpt_api_version').value, + deployment: visionModel + }; + } + + try { + const resp = await fetch('/api/admin/settings/test_connection', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(payload) + }); + const data = await resp.json(); + if (resp.ok) { + resultDiv.innerHTML = `
+ Success!
+ ${data.message}
+ ${data.details || ''} +
`; + } else { + resultDiv.innerHTML = `${data.error || 'Error testing Vision Analysis'}`; + } + } catch (err) { + resultDiv.innerHTML = `Error: ${err.message}`; + } + }); + } } function toggleEnhancedCitation(isEnabled) { @@ -2352,10 +2627,91 @@ if (extractToggle) { }); } +// Multi-Modal Vision UI +const visionToggle = document.getElementById('enable_multimodal_vision'); +const visionModelDiv = document.getElementById('multimodal_vision_model_settings'); +const visionSelect = document.getElementById('multimodal_vision_model'); + +function populateVisionModels() { + if (!visionSelect) return; + + // remember previously chosen value + const prev = visionSelect.getAttribute('data-prev') || ''; + + // clear out old options (except the placeholder) + visionSelect.innerHTML = ''; + + if (document.getElementById('enable_gpt_apim').checked) { + // use comma-separated APIM deployments + const text = document.getElementById('azure_apim_gpt_deployment').value || ''; + text.split(',') + .map(s => s.trim()) + .filter(s => s) + .forEach(d => { + const opt = new Option(d, d); + visionSelect.add(opt); + }); + } else { + // use direct GPT selected deployments - filter for vision-capable models + (window.gptSelected || []).forEach(m => { + // Only include models with vision capabilities + // Vision-enabled models per Azure OpenAI docs: + // - o-series reasoning models (o1, o3, etc.) + // - GPT-5 series + // - GPT-4.1 series + // - GPT-4.5 + // - GPT-4o series (gpt-4o, gpt-4o-mini) + // - GPT-4 vision models (gpt-4-vision, gpt-4-turbo-vision) + const modelNameLower = (m.modelName || '').toLowerCase(); + const isVisionCapable = + modelNameLower.includes('vision') || // gpt-4-vision, gpt-4-turbo-vision + modelNameLower.includes('gpt-4o') || // gpt-4o, gpt-4o-mini + modelNameLower.includes('gpt-4.1') || // gpt-4.1 series + modelNameLower.includes('gpt-4.5') || // gpt-4.5 + modelNameLower.includes('gpt-5') || // gpt-5 series + modelNameLower.match(/^o\d+/) || // o1, o3, etc. (o-series) + modelNameLower.includes('o1-') || // o1-preview, o1-mini + modelNameLower.includes('o3-'); // o3-mini, etc. + + if (isVisionCapable) { + const label = `${m.deploymentName} (${m.modelName})`; + const opt = new Option(label, m.deploymentName); + visionSelect.add(opt); + } + }); + } + + // restore previous + if (prev) { + visionSelect.value = prev; + } +} + +if (visionToggle && visionModelDiv) { + // show/hide the model dropdown + visionModelDiv.style.display = visionToggle.checked ? 'block' : 'none'; + visionToggle.addEventListener('change', () => { + visionModelDiv.style.display = visionToggle.checked ? 'block' : 'none'; + markFormAsModified(); + }); +} + +// Listen for vision model selection changes +if (visionSelect) { + visionSelect.addEventListener('change', () => { + // Update data-prev to remember the selection + visionSelect.setAttribute('data-prev', visionSelect.value); + markFormAsModified(); + }); +} + // when APIM‐toggle flips, repopulate const apimToggle = document.getElementById('enable_gpt_apim'); if (apimToggle) { - apimToggle.addEventListener('change', populateExtractionModels); + apimToggle.addEventListener('change', () => { + populateExtractionModels(); + populateVisionModels(); + }); } // on load, stash previous & populate @@ -2364,6 +2720,10 @@ document.addEventListener('DOMContentLoaded', () => { extractSelect.setAttribute('data-prev', extractSelect.value); populateExtractionModels(); } + if (visionSelect) { + visionSelect.setAttribute('data-prev', visionSelect.value); + populateVisionModels(); + } }); @@ -2907,28 +3267,41 @@ function handleTabNavigation(stepNumber) { 5: 'ai-models-tab', // Embedding settings (now in AI Models tab) 6: 'search-extract-tab', // AI Search settings 7: 'search-extract-tab', // Document Intelligence settings - 8: 'workspaces-tab', // Video support - 9: 'workspaces-tab', // Audio support + 8: 'search-extract-tab', // Video support + 9: 'search-extract-tab', // Audio support 10: 'safety-tab', // Content safety - 11: 'system-tab', // User feedback and archiving (renamed from other-tab) + 11: 'safety-tab', // User feedback and archiving (changed from system-tab) 12: 'citation-tab' // Enhanced Citations and Image Generation }; // Activate the appropriate tab const tabId = stepToTab[stepNumber]; if (tabId) { - const tab = document.getElementById(tabId); - if (tab) { - // Use bootstrap Tab to show the tab - const bootstrapTab = new bootstrap.Tab(tab); - bootstrapTab.show(); - - // Scroll to the relevant section after a small delay to allow tab to switch - setTimeout(() => { - // For tabs that need to jump to specific sections - scrollToRelevantSection(stepNumber, tabId); - }, 300); + // Check if we're using sidebar navigation or tab navigation + const sidebarToggle = document.getElementById('admin-settings-toggle'); + + if (sidebarToggle) { + // Using sidebar navigation - call showAdminTab function + const tabName = tabId.replace('-tab', ''); // Remove '-tab' suffix + if (typeof showAdminTab === 'function') { + showAdminTab(tabName); + } else if (typeof window.showAdminTab === 'function') { + window.showAdminTab(tabName); + } + } else { + // Using Bootstrap tabs + const tab = document.getElementById(tabId); + if (tab) { + // Use bootstrap Tab to show the tab + const bootstrapTab = new bootstrap.Tab(tab); + bootstrapTab.show(); + } } + + // Scroll to the relevant section after a small delay to allow tab to switch + setTimeout(() => { + scrollToRelevantSection(stepNumber, tabId); + }, 300); } } @@ -2942,8 +3315,26 @@ function scrollToRelevantSection(stepNumber, tabId) { let targetElement = null; switch (stepNumber) { + case 1: // App title and logo + targetElement = document.getElementById('branding-section'); + break; + case 2: // GPT settings + targetElement = document.getElementById('gpt-configuration'); + break; + case 3: // GPT model selection + targetElement = document.getElementById('gpt_models_list')?.closest('.mb-3'); + break; case 4: // Workspaces toggle section - targetElement = document.getElementById('enable_user_workspace')?.closest('.card'); + targetElement = document.getElementById('personal-workspaces-section'); + break; + case 5: // Embedding settings + targetElement = document.getElementById('embeddings-configuration'); + break; + case 6: // AI Search settings + targetElement = document.getElementById('azure-ai-search-section'); + break; + case 7: // Document Intelligence settings + targetElement = document.getElementById('document-intelligence-section'); break; case 8: // Video file support targetElement = document.getElementById('enable_video_file_support')?.closest('.form-group'); @@ -2951,6 +3342,15 @@ function scrollToRelevantSection(stepNumber, tabId) { case 9: // Audio file support targetElement = document.getElementById('enable_audio_file_support')?.closest('.form-group'); break; + case 10: // Content safety + targetElement = document.getElementById('content-safety-section'); + break; + case 11: // User feedback and archiving + targetElement = document.getElementById('user-feedback-section'); + break; + case 12: // Enhanced citations and image generation + targetElement = document.getElementById('enhanced-citations-section'); + break; default: // For other steps, no specific scrolling break; @@ -2958,7 +3358,7 @@ function scrollToRelevantSection(stepNumber, tabId) { // If we found a target element, scroll to it if (targetElement) { - targetElement.scrollIntoView({ behavior: 'smooth', block: 'center' }); + targetElement.scrollIntoView({ behavior: 'smooth', block: 'start' }); } } @@ -3096,9 +3496,14 @@ function isStepComplete(stepNumber) { // Otherwise check settings const speechEndpoint = document.getElementById('speech_service_endpoint')?.value; - const speechKey = document.getElementById('speech_service_key')?.value; + const authType = document.getElementById('speech_service_authentication_type').value; + const key = document.getElementById('speech_service_key').value; - return speechEndpoint && speechKey; + if (!speechEndpoint || (authType === 'key' && !key)) { + return false; + } else { + return true; + } case 10: // Content safety - always complete (optional) case 11: // User feedback and archiving - always complete (optional) @@ -3532,4 +3937,4 @@ function updateSaveButtonState() { saveButton.classList.add('btn-secondary'); saveButton.innerHTML = ' Save Settings'; } -} +} \ No newline at end of file diff --git a/application/single_app/static/js/admin/admin_sidebar_nav.js b/application/single_app/static/js/admin/admin_sidebar_nav.js index 59969dd7..3f1bb667 100644 --- a/application/single_app/static/js/admin/admin_sidebar_nav.js +++ b/application/single_app/static/js/admin/admin_sidebar_nav.js @@ -180,6 +180,7 @@ function scrollToSection(sectionId) { 'external-links-section': 'external-links-section', 'health-check-section': 'health-check-section', 'system-settings-section': 'system-settings-section', + 'control-center-admin-section': 'control-center-admin-section', // Logging tab sections 'application-insights-section': 'application-insights-section', 'debug-logging-section': 'debug-logging-section', @@ -202,7 +203,10 @@ function scrollToSection(sectionId) { 'user-feedback-section': 'user-feedback-section', 'permissions-section': 'permissions-section', 'conversation-archiving-section': 'conversation-archiving-section', + // Security tab sections + 'keyvault-section': 'keyvault-section', // Search & Extract tab sections + 'web-search-section': 'web-search-foundry-section', 'azure-ai-search-section': 'azure-ai-search-section', 'document-intelligence-section': 'document-intelligence-section', 'multimedia-support-section': 'multimedia-support-section' diff --git a/application/single_app/static/js/agent_modal_stepper.js b/application/single_app/static/js/agent_modal_stepper.js index 28a22a64..800751be 100644 --- a/application/single_app/static/js/agent_modal_stepper.js +++ b/application/single_app/static/js/agent_modal_stepper.js @@ -2,6 +2,7 @@ // Multi-step modal functionality for agent creation import { showToast } from "./chat/chat-toast.js"; import * as agentsCommon from "./agents_common.js"; +import { getModelSupportedLevels } from "./chat/chat-reasoning.js"; export class AgentModalStepper { constructor(isAdmin = false) { @@ -9,11 +10,18 @@ export class AgentModalStepper { this.maxSteps = 6; this.isEditMode = false; this.isAdmin = isAdmin; // Track if this is admin context + this.currentAgentType = 'local'; this.originalAgent = null; // Track original state for change detection this.actionsToSelect = null; // Store actions to select when they're loaded this.updateStepIndicatorTimeout = null; // For debouncing step indicator updates + this.templateSubmitButton = document.getElementById('agent-modal-submit-template-btn'); + this.foundryPlaceholderInstructions = 'Placeholder instructions: Azure AI Foundry agent manages its own prompt.'; this.bindEvents(); + + if (this.templateSubmitButton) { + this.templateSubmitButton.addEventListener('click', () => this.submitTemplate()); + } } bindEvents() { @@ -22,6 +30,8 @@ export class AgentModalStepper { const prevBtn = document.getElementById('agent-modal-prev'); const saveBtn = document.getElementById('agent-modal-save-btn'); const skipBtn = document.getElementById('agent-modal-skip'); + const powerUserToggle = document.getElementById('agent-power-user-toggle'); + const agentTypeRadios = document.querySelectorAll('input[name="agent-type"]'); if (nextBtn) { nextBtn.addEventListener('click', () => this.nextStep()); @@ -35,9 +45,21 @@ export class AgentModalStepper { if (skipBtn) { skipBtn.addEventListener('click', () => this.skipToEnd()); } + if (powerUserToggle) { + powerUserToggle.addEventListener('change', (e) => this.togglePowerUserMode(e.target.checked)); + } + + if (agentTypeRadios && agentTypeRadios.length) { + agentTypeRadios.forEach(r => { + r.addEventListener('change', (e) => this.handleAgentTypeChange(e.target.value)); + }); + } // Set up display name to generated name conversion this.setupNameGeneration(); + + // Set up model change listener for reasoning effort + this.setupModelChangeListener(); } setupNameGeneration() { @@ -53,6 +75,162 @@ export class AgentModalStepper { } } + setupModelChangeListener() { + const globalModelSelect = document.getElementById('agent-global-model-select'); + if (globalModelSelect) { + globalModelSelect.addEventListener('change', () => { + this.updateReasoningEffortForModel(); + }); + } + } + + handleAgentTypeChange(agentType) { + this.currentAgentType = agentType || 'local'; + this.applyAgentTypeVisibility(); + // Clear actions if switching to foundry + if (this.currentAgentType === 'aifoundry') { + this.clearSelectedActions(); + } + this.populateSummary(); + } + + applyAgentTypeVisibility() { + const isFoundry = this.currentAgentType === 'aifoundry'; + const foundryFields = document.getElementById('agent-foundry-fields'); + const modelGroup = document.getElementById('agent-global-model-group'); + const customToggle = document.getElementById('agent-custom-connection-toggle'); + const customFields = document.getElementById('agent-custom-connection-fields'); + const actionsSection = document.getElementById('agent-step-4'); + const actionsDisabled = document.getElementById('agent-actions-disabled'); + const actionsContainer = document.getElementById('agent-actions-container'); + const actionsHeader = actionsSection?.querySelector('.card'); + const summaryActionsSection = document.getElementById('summary-actions-section'); + const instructionsContainer = document.getElementById('agent-instructions-container'); + const instructionsFoundryNote = document.getElementById('agent-instructions-foundry-note'); + const instructionsInput = document.getElementById('agent-instructions'); + + if (foundryFields) foundryFields.classList.toggle('d-none', !isFoundry); + if (modelGroup) modelGroup.classList.toggle('d-none', isFoundry); + if (customToggle) customToggle.classList.toggle('d-none', isFoundry); + if (customFields) customFields.classList.toggle('d-none', isFoundry); + + if (instructionsContainer) instructionsContainer.classList.toggle('d-none', isFoundry); + if (instructionsFoundryNote) instructionsFoundryNote.classList.toggle('d-none', !isFoundry); + if (instructionsInput) { + if (isFoundry) { + instructionsInput.value = this.foundryPlaceholderInstructions; + } + } + + if (actionsSection) { + // Hide interactive actions when foundry + if (actionsDisabled) actionsDisabled.classList.toggle('d-none', !isFoundry); + if (actionsHeader) actionsHeader.classList.toggle('d-none', isFoundry); + if (actionsContainer) actionsContainer.classList.toggle('d-none', isFoundry); + const noActionsMsg = document.getElementById('agent-no-actions-message'); + if (noActionsMsg) noActionsMsg.classList.toggle('d-none', isFoundry); + const selectedSummary = document.getElementById('agent-selected-actions-summary'); + if (selectedSummary) selectedSummary.classList.toggle('d-none', isFoundry); + } + + if (summaryActionsSection) { + summaryActionsSection.classList.toggle('d-none', isFoundry); + } + + // Update helper text + const helper = document.getElementById('agent-type-helper'); + if (helper) { + helper.textContent = isFoundry + ? 'Foundry agents use Azure-managed tools. Actions step is disabled.' + : 'Local agents can attach actions and use SK plugins.'; + } + } + + updateAgentTypeLock() { + const radios = document.querySelectorAll('input[name="agent-type"]'); + if (!radios || !radios.length) { + return; + } + + const shouldDisable = this.isEditMode || this.currentStep > 1; + + radios.forEach(radio => { + radio.disabled = shouldDisable; + const wrapper = radio.closest('.form-check'); + if (wrapper) { + wrapper.classList.toggle('opacity-50', shouldDisable); + } + }); + + const selector = document.getElementById('agent-type-selector'); + if (selector) { + selector.classList.toggle('pe-none', shouldDisable); + } + } + + updateReasoningEffortForModel() { + const globalModelSelect = document.getElementById('agent-global-model-select'); + const reasoningEffortSelect = document.getElementById('agent-reasoning-effort'); + const reasoningEffortGroup = reasoningEffortSelect?.closest('.mb-3'); + + if (!globalModelSelect || !reasoningEffortSelect || !reasoningEffortGroup) { + return; + } + + const selectedModel = globalModelSelect.value; + if (!selectedModel) { + // No model selected, hide reasoning effort + reasoningEffortGroup.style.display = 'none'; + return; + } + + // Get supported levels for the selected model + const supportedLevels = getModelSupportedLevels(selectedModel); + + // If model only supports 'none', hide the field + if (supportedLevels.length === 1 && supportedLevels[0] === 'none') { + reasoningEffortGroup.style.display = 'none'; + reasoningEffortSelect.value = ''; // Clear selection + return; + } + + // Show the field + reasoningEffortGroup.style.display = 'block'; + + // Update available options based on supported levels + const currentValue = reasoningEffortSelect.value; + const allOptions = reasoningEffortSelect.querySelectorAll('option'); + + // Show/hide options based on supported levels + allOptions.forEach(option => { + const value = option.value; + if (value === '') { + // Always show the "inherit" option + option.style.display = ''; + option.disabled = false; + } else if (supportedLevels.includes(value)) { + option.style.display = ''; + option.disabled = false; + } else { + option.style.display = 'none'; + option.disabled = true; + } + }); + + // If current value is not supported, reset to inherit + if (currentValue && currentValue !== '' && !supportedLevels.includes(currentValue)) { + reasoningEffortSelect.value = ''; + } + } + + togglePowerUserMode(isEnabled) { + console.log('Toggling power user mode:', isEnabled); + const powerUserSection = document.getElementById('agent-power-user-settings'); + if (powerUserSection) { + powerUserSection.classList.toggle('d-none', !isEnabled); + } + } + generateAgentName(displayName) { if (!displayName) return ''; @@ -67,6 +245,7 @@ export class AgentModalStepper { showModal(agent = null) { this.isEditMode = !!agent; + this.currentAgentType = (agent && agent.agent_type) || 'local'; // Store original state for change detection this.originalAgent = agent ? JSON.parse(JSON.stringify(agent)) : null; @@ -99,6 +278,9 @@ export class AgentModalStepper { // Ensure generated name is populated for both new and existing agents this.updateGeneratedName(); + this.syncAgentTypeSelector(); + this.applyAgentTypeVisibility(); + this.updateAgentTypeLock(); // Load models for the modal this.loadModelsForModal(); @@ -117,6 +299,7 @@ export class AgentModalStepper { this.updateStepIndicator(); this.showStep(1); this.updateNavigationButtons(); + this.updateTemplateButtonVisibility(); console.log('Step indicators initialized'); } else { // Modal not ready yet, try again @@ -145,6 +328,14 @@ export class AgentModalStepper { } } + syncAgentTypeSelector() { + const radios = document.querySelectorAll('input[name="agent-type"]'); + if (!radios || !radios.length) return; + radios.forEach(r => { + r.checked = r.value === this.currentAgentType; + }); + } + clearFields() { // Clear all form fields const displayName = document.getElementById('agent-display-name'); @@ -180,6 +371,9 @@ export class AgentModalStepper { if (globalModelSelect) { agentsCommon.populateGlobalModelDropdown(globalModelSelect, models, selectedModel); + + // Update reasoning effort options based on selected model + this.updateReasoningEffortForModel(); } } catch (error) { console.error('Failed to load models for agent modal:', error); @@ -199,11 +393,29 @@ export class AgentModalStepper { customConnection.checked = agentsCommon.shouldEnableCustomConnection(agent); } + // Agent type selection + this.currentAgentType = agent.agent_type || 'local'; + this.syncAgentTypeSelector(); + this.applyAgentTypeVisibility(); + // Use shared function to populate all fields if (agentsCommon && typeof agentsCommon.setAgentModalFields === 'function') { agentsCommon.setAgentModalFields(agent); } + // any agent advanced settings + if (this.currentAgent + && this.currentAgent.max_completion_tokens != -1) { + const powerUserToggle = document.getElementById('agent-power-user-toggle'); + if (powerUserToggle) { + powerUserToggle.checked = true; // true/false from your agent data + const agentPowerUserSettings = document.getElementById('agent-power-user-settings'); + if (agentPowerUserSettings) { + agentPowerUserSettings.classList.remove('d-none'); + } + } + } + // Show/hide custom connection fields as needed if (customConnection) { // Find the custom fields and global model group containers @@ -231,6 +443,24 @@ export class AgentModalStepper { if (agent.actions_to_load && Array.isArray(agent.actions_to_load)) { this.actionsToSelect = agent.actions_to_load; } + + // Foundry-specific fields + if (agent.agent_type === 'aifoundry') { + const other = agent.other_settings || {}; + const foundry = (other && other.azure_ai_foundry) || {}; + const endpointEl = document.getElementById('agent-foundry-endpoint'); + const apiEl = document.getElementById('agent-foundry-api-version'); + const depEl = document.getElementById('agent-foundry-deployment'); + const idEl = document.getElementById('agent-foundry-agent-id'); + const notesEl = document.getElementById('agent-foundry-notes'); + if (endpointEl) endpointEl.value = agent.azure_openai_gpt_endpoint || ''; + if (apiEl) apiEl.value = agent.azure_openai_gpt_api_version || ''; + if (depEl) depEl.value = agent.azure_openai_gpt_deployment || ''; + if (idEl) idEl.value = foundry.agent_id || ''; + if (notesEl) notesEl.value = foundry.notes || ''; + // ensure actions cleared for UI + this.clearSelectedActions(); + } } nextStep() { @@ -249,9 +479,34 @@ export class AgentModalStepper { } } - skipToEnd() { + async skipToEnd() { // Skip to the summary step (step 6) - this.goToStep(this.maxSteps); + //if (this.actionsToSelect != null && this.actionsToSelect.length > 0) { + // this.setSelectedActions(this.actionsToSelect); + //} + const skipBtn = document.getElementById('agent-modal-skip'); + const originalText = skipBtn.innerHTML; + if (skipBtn) { + skipBtn.disabled = true; + skipBtn.innerHTML = `Skipping...`; + } + try { + if (this.currentAgentType !== 'aifoundry') { + await this.loadAvailableActions(); + } + this.goToStep(this.maxSteps); + } catch (error) { + console.error('Error loading actions:', error); + if (skipBtn) { + skipBtn.disabled = false; + skipBtn.innerHTML = originalText; + } + } finally { + if (skipBtn) { + skipBtn.disabled = false; + skipBtn.innerHTML = originalText; + } + } } goToStep(stepNumber) { @@ -261,6 +516,8 @@ export class AgentModalStepper { this.showStep(stepNumber); this.updateStepIndicator(); this.updateNavigationButtons(); + this.updateTemplateButtonVisibility(); + this.updateAgentTypeLock(); } showStep(stepNumber) { @@ -279,22 +536,31 @@ export class AgentModalStepper { } if (stepNumber === 2) { - if (!this.isAdmin) { - const customConnectionToggle = document.getElementById('agent-custom-connection-toggle'); - if (customConnectionToggle) { + const isFoundry = this.currentAgentType === 'aifoundry'; + const customConnectionToggle = document.getElementById('agent-custom-connection-toggle'); + const modelGroup = document.getElementById('agent-global-model-group'); + + if (customConnectionToggle) { + if (isFoundry) { + customConnectionToggle.classList.add('d-none'); + } else if (!this.isAdmin) { const allowUserCustom = appSettings?.allow_user_custom_agent_endpoints; - if (!allowUserCustom) { - customConnectionToggle.classList.add('d-none'); - } else { - customConnectionToggle.classList.remove('d-none'); - } + customConnectionToggle.classList.toggle('d-none', !allowUserCustom); + } else { + customConnectionToggle.classList.remove('d-none'); } } + + if (modelGroup) { + modelGroup.classList.toggle('d-none', isFoundry); + } } // Load actions when reaching step 4 if (stepNumber === 4) { - this.loadAvailableActions(); + if (this.currentAgentType !== 'aifoundry') { + this.loadAvailableActions(); + } } // Populate summary when reaching step 6 @@ -392,6 +658,27 @@ export class AgentModalStepper { } } + canSubmitTemplate() { + if (!window.appSettings || !window.appSettings.enable_agent_template_gallery) { + return false; + } + if (this.isAdmin) { + return true; + } + if (window.appSettings.allow_user_agents === false) { + return false; + } + return window.appSettings.agent_templates_allow_user_submission !== false; + } + + updateTemplateButtonVisibility() { + if (!this.templateSubmitButton) { + return; + } + const shouldShow = this.canSubmitTemplate() && this.currentStep === this.maxSteps; + this.templateSubmitButton.classList.toggle('d-none', !shouldShow); + } + validateCurrentStep() { switch (this.currentStep) { case 1: // Basic Info @@ -412,20 +699,54 @@ export class AgentModalStepper { break; case 2: // Model & Connection - // Model validation would go here + if (this.currentAgentType === 'aifoundry') { + const endpoint = document.getElementById('agent-foundry-endpoint'); + const apiVersion = document.getElementById('agent-foundry-api-version'); + const deployment = document.getElementById('agent-foundry-deployment'); + const agentId = document.getElementById('agent-foundry-agent-id'); + if (!endpoint || !endpoint.value.trim()) { + this.showError('Azure AI Foundry endpoint is required.'); + endpoint?.focus(); + return false; + } + if (!apiVersion || !apiVersion.value.trim()) { + this.showError('Azure AI Foundry API version is required.'); + apiVersion?.focus(); + return false; + } + if (!deployment || !deployment.value.trim()) { + this.showError('Foundry deployment/project is required.'); + deployment?.focus(); + return false; + } + if (!agentId || !agentId.value.trim()) { + this.showError('Foundry agent ID is required.'); + agentId?.focus(); + return false; + } + } break; case 3: // Instructions const instructions = document.getElementById('agent-instructions'); - if (!instructions || !instructions.value.trim()) { - this.showError('Please provide instructions for the agent.'); - if (instructions) instructions.focus(); - return false; - } + if (this.currentAgentType !== 'aifoundry') { + if (!instructions || !instructions.value.trim()) { + this.showError('Please provide instructions for the agent.'); + if (instructions) instructions.focus(); + return false; + } + } else { + // Ensure placeholder present + if (instructions && !instructions.value.trim()) { + instructions.value = this.foundryPlaceholderInstructions; + } + } break; case 4: // Actions - // Actions validation would go here if needed + if (this.currentAgentType !== 'aifoundry') { + // Actions validation would go here if needed + } break; case 5: // Advanced @@ -529,6 +850,10 @@ export class AgentModalStepper { } getFormModelName() { + if (this.currentAgentType === 'aifoundry') { + const foundryDeployment = document.getElementById('agent-foundry-deployment'); + return foundryDeployment?.value?.trim() || '-'; + } const customConnection = document.getElementById('agent-custom-connection')?.checked || false; let modelName = '-'; if (customConnection) { @@ -552,6 +877,7 @@ export class AgentModalStepper { const displayName = document.getElementById('agent-display-name')?.value || '-'; const generatedName = document.getElementById('agent-name')?.value || '-'; const description = document.getElementById('agent-description')?.value || '-'; + const agentType = this.currentAgentType || 'local'; // Model & Connection const customConnection = document.getElementById('agent-custom-connection')?.checked ? 'Yes' : 'No'; @@ -572,6 +898,11 @@ export class AgentModalStepper { // Update configuration document.getElementById('summary-model').textContent = modelName; document.getElementById('summary-custom-connection').textContent = customConnection; + const typeBadge = document.getElementById('summary-agent-type-badge'); + if (typeBadge) { + typeBadge.textContent = agentType === 'aifoundry' ? 'Azure AI Foundry' : 'Local (Semantic Kernel)'; + typeBadge.className = agentType === 'aifoundry' ? 'badge bg-warning text-dark' : 'badge bg-info'; + } // Update instructions document.getElementById('summary-instructions').textContent = instructions; @@ -586,10 +917,16 @@ export class AgentModalStepper { const actionsListContainer = document.getElementById('summary-actions-list'); const actionsEmptyContainer = document.getElementById('summary-actions-empty'); - if (actionsCount > 0) { + if (this.currentAgentType === 'aifoundry') { + // Hide actions entirely for Foundry + const actionsSection = document.getElementById('summary-actions-section'); + if (actionsSection) actionsSection.style.display = 'none'; + } else if (actionsCount > 0) { // Show actions list, hide empty message actionsListContainer.style.display = 'block'; actionsEmptyContainer.style.display = 'none'; + const actionsSection = document.getElementById('summary-actions-section'); + if (actionsSection) actionsSection.style.display = ''; // Clear existing content actionsListContainer.innerHTML = ''; @@ -632,6 +969,8 @@ export class AgentModalStepper { // Hide actions list, show empty message actionsListContainer.style.display = 'none'; actionsEmptyContainer.style.display = 'block'; + const actionsSection = document.getElementById('summary-actions-section'); + if (actionsSection) actionsSection.style.display = ''; } // Update creation date @@ -965,7 +1304,7 @@ export class AgentModalStepper { // Selected actions const currentActions = this.getSelectedActionIds(); - const originalActions = this.originalAgent.actions || []; + const originalActions = this.originalAgent.actions_to_load || []; // Compare fields if (currentDisplayName !== (this.originalAgent.display_name || '')) { @@ -1095,6 +1434,7 @@ export class AgentModalStepper { try { // Get agent data from form const agentData = this.getAgentFormData(); + agentData.agent_type = (this.originalAgent?.agent_type) || agentData.agent_type || 'local'; // Validate required fields if (!agentData.display_name || !agentData.name) { @@ -1127,8 +1467,12 @@ export class AgentModalStepper { } } - // Add selected actions - agentData.actions_to_load = this.getSelectedActionIds(); + // Add selected actions (skip for Foundry) + if (agentData.agent_type === 'aifoundry') { + agentData.actions_to_load = []; + } else { + agentData.actions_to_load = this.getSelectedActionIds(); + } agentData.is_global = this.isAdmin; // Set based on admin context // Ensure required schema fields are present @@ -1139,6 +1483,11 @@ export class AgentModalStepper { agentData.other_settings = JSON.parse(agentData.other_settings) || {}; } + // Clean up empty reasoning_effort (inherit from model default) + if (!agentData.reasoning_effort || agentData.reasoning_effort === '') { + delete agentData.reasoning_effort; + } + // Clean up form-specific fields that shouldn't be sent to backend const formOnlyFields = ['custom_connection', 'model']; formOnlyFields.forEach(field => { @@ -1147,30 +1496,23 @@ export class AgentModalStepper { } }); - // Validate with schema if available + // Use appropriate endpoint and save method based on context + let saveBtn = document.getElementById('agent-modal-save-btn'); + const originalText = saveBtn.innerHTML; + saveBtn.innerHTML = `Saving...`; + saveBtn.disabled = true; try { - if (!window.validateAgent) { - window.validateAgent = (await import('/static/js/validateAgent.mjs')).default; - } - const valid = window.validateAgent(agentData); - if (!valid) { - let errorMsg = 'Validation error: Invalid agent data.'; - if (window.validateAgent.errors && window.validateAgent.errors.length) { - errorMsg += '\n' + window.validateAgent.errors.map(e => `${e.instancePath} ${e.message}`).join('\n'); - } - throw new Error(errorMsg); + if (this.isAdmin) { + // Admin context - save to global agents + await this.saveGlobalAgent(agentData); + } else { + // User context - save to personal agents + await this.savePersonalAgent(agentData); } - } catch (e) { - console.warn('Schema validation failed:', e.message); - } - - // Use appropriate endpoint and save method based on context - if (this.isAdmin) { - // Admin context - save to global agents - await this.saveGlobalAgent(agentData); - } else { - // User context - save to personal agents - await this.savePersonalAgent(agentData); + //No catch to allow outer catch to handle errors + } finally { + saveBtn.innerHTML = originalText; + saveBtn.disabled = false; } } catch (error) { @@ -1186,6 +1528,9 @@ export class AgentModalStepper { } getAgentFormData() { + const agentTypeInput = document.querySelector('input[name="agent-type"]:checked'); + const selectedAgentType = agentTypeInput ? agentTypeInput.value : 'local'; + const formData = { display_name: document.getElementById('agent-display-name')?.value || '', name: document.getElementById('agent-name')?.value || '', @@ -1193,8 +1538,40 @@ export class AgentModalStepper { instructions: document.getElementById('agent-instructions')?.value || '', model: document.getElementById('agent-global-model-select')?.value || '', custom_connection: document.getElementById('agent-custom-connection')?.checked || false, - other_settings: document.getElementById('agent-additional-settings')?.value || '{}' + other_settings: document.getElementById('agent-additional-settings')?.value || '{}', + max_completion_tokens: parseInt(document.getElementById('agent-max-completion-tokens')?.value.trim()) || null, + reasoning_effort: document.getElementById('agent-reasoning-effort')?.value || '', + agent_type: selectedAgentType }; + + if (selectedAgentType === 'aifoundry') { + // Foundry required fields + formData.azure_openai_gpt_endpoint = document.getElementById('agent-foundry-endpoint')?.value?.trim() || ''; + formData.azure_openai_gpt_deployment = document.getElementById('agent-foundry-deployment')?.value?.trim() || ''; + formData.azure_openai_gpt_api_version = document.getElementById('agent-foundry-api-version')?.value?.trim() || ''; + formData.instructions = document.getElementById('agent-instructions')?.value?.trim() || this.foundryPlaceholderInstructions; + + // other_settings for foundry + let otherSettingsObj = {}; + try { + otherSettingsObj = JSON.parse(formData.other_settings || '{}'); + } catch (e) { + otherSettingsObj = {}; + } + otherSettingsObj = otherSettingsObj || {}; + const notesVal = document.getElementById('agent-foundry-notes')?.value || ''; + otherSettingsObj.azure_ai_foundry = { + ...(otherSettingsObj.azure_ai_foundry || {}), + agent_id: document.getElementById('agent-foundry-agent-id')?.value?.trim() || '', + ...(notesVal ? { notes: notesVal } : {}) + }; + formData.other_settings = JSON.stringify(otherSettingsObj); + + // Foundry agents cannot have actions + formData.actions_to_load = []; + formData.enable_agent_gpt_apim = false; + return formData; + } // Handle model and deployment configuration if (formData.custom_connection) { @@ -1351,6 +1728,100 @@ export class AgentModalStepper { window.showToast(`Agent ${this.isEditMode ? 'updated' : 'created'} successfully!`, 'success'); } } + + validateTemplateRequirements() { + const displayName = document.getElementById('agent-display-name'); + const description = document.getElementById('agent-description'); + const instructions = document.getElementById('agent-instructions'); + + if (!displayName || !displayName.value.trim()) { + this.showError('Please add a display name before submitting a template.'); + displayName?.focus(); + return false; + } + + if (!description || !description.value.trim()) { + this.showError('Please add a description before submitting a template.'); + description?.focus(); + return false; + } + + if (!instructions || !instructions.value.trim()) { + this.showError('Instructions are required before submitting a template.'); + instructions?.focus(); + return false; + } + + this.hideError(); + return true; + } + + buildTemplatePayload() { + const displayName = document.getElementById('agent-display-name')?.value?.trim() || ''; + const description = document.getElementById('agent-description')?.value?.trim() || ''; + const instructions = document.getElementById('agent-instructions')?.value || ''; + const additionalSettings = document.getElementById('agent-additional-settings')?.value || ''; + + return { + title: displayName || 'Agent Template', + display_name: displayName || 'Agent Template', + description, + helper_text: description, + instructions, + additional_settings: additionalSettings, + actions_to_load: this.getSelectedActionIds(), + source_agent_id: this.originalAgent?.id, + source_scope: this.isAdmin ? 'global' : 'personal' + }; + } + + async submitTemplate() { + if (!this.canSubmitTemplate()) { + showToast('Template submissions are disabled right now.', 'warning'); + return; + } + + if (!this.validateTemplateRequirements()) { + return; + } + + const button = this.templateSubmitButton; + if (!button) { + return; + } + + const originalHtml = button.innerHTML; + button.disabled = true; + button.innerHTML = 'Submitting...'; + + try { + const payload = { template: this.buildTemplatePayload() }; + const response = await fetch('/api/agent-templates', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(payload) + }); + + const data = await response.json().catch(() => ({})); + if (!response.ok) { + throw new Error(data.error || 'Failed to submit agent template.'); + } + + const status = data.template?.status; + const successMessage = (this.isAdmin && status === 'approved') + ? 'Template published to the gallery!' + : 'Template submitted for review.'; + showToast(successMessage, 'success'); + this.hideError(); + } catch (error) { + console.error('Template submission failed:', error); + this.showError(error.message || 'Failed to submit template.'); + showToast(error.message || 'Failed to submit template.', 'error'); + } finally { + button.disabled = false; + button.innerHTML = originalHtml; + } + } } // Global instance will be created contextually by the calling code diff --git a/application/single_app/static/js/agent_templates_gallery.js b/application/single_app/static/js/agent_templates_gallery.js new file mode 100644 index 00000000..428ebf70 --- /dev/null +++ b/application/single_app/static/js/agent_templates_gallery.js @@ -0,0 +1,278 @@ +// agent_templates_gallery.js +// Dynamically renders the agent template gallery within the agent builder + +import { showToast } from "./chat/chat-toast.js"; + +const gallerySelector = ".agent-template-gallery"; +let cachedTemplates = null; +let loadingPromise = null; + +function getGalleryElements(container) { + return { + spinner: container.querySelector(".agent-template-gallery-loading"), + emptyState: container.querySelector(".agent-template-gallery-empty"), + disabledState: container.querySelector(".agent-template-gallery-disabled"), + errorState: container.querySelector(".agent-template-gallery-error"), + errorText: container.querySelector(".agent-template-gallery-error-text"), + accordion: container.querySelector(".accordion"), + }; +} + +async function fetchTemplates() { + if (cachedTemplates) { + return cachedTemplates; + } + if (loadingPromise) { + return loadingPromise; + } + loadingPromise = fetch("/api/agent-templates") + .then(async (response) => { + if (!response.ok) { + throw new Error("Failed to load templates."); + } + const data = await response.json(); + cachedTemplates = data.templates || []; + return cachedTemplates; + }) + .catch((error) => { + cachedTemplates = []; + throw error; + }) + .finally(() => { + loadingPromise = null; + }); + return loadingPromise; +} + +function renderAccordion(accordion, templates, options = {}) { + const accordionId = options.accordionId || "agentTemplates"; + const showCopy = options.showCopy !== "false"; + const showCreate = options.showCreate !== "false"; + + accordion.innerHTML = ""; + + templates.forEach((template, index) => { + const collapseId = `${accordionId}-collapse-${index}`; + const headingId = `${accordionId}-heading-${index}`; + const instructionsId = `${accordionId}-instructions-${index}`; + + const accordionItem = document.createElement("div"); + accordionItem.className = "accordion-item"; + + const header = document.createElement("h2"); + header.className = "accordion-header"; + header.id = headingId; + + const headerButton = document.createElement("button"); + headerButton.className = `accordion-button${index === 0 ? "" : " collapsed"}`; + headerButton.type = "button"; + headerButton.setAttribute("data-bs-toggle", "collapse"); + headerButton.setAttribute("data-bs-target", `#${collapseId}`); + headerButton.textContent = template.title || template.display_name || "Agent Template"; + header.appendChild(headerButton); + + const collapse = document.createElement("div"); + collapse.id = collapseId; + collapse.className = `accordion-collapse collapse${index === 0 ? " show" : ""}`; + collapse.setAttribute("aria-labelledby", headingId); + collapse.setAttribute("data-bs-parent", `#${accordionId}`); + + const body = document.createElement("div"); + body.className = "accordion-body"; + + const headerRow = document.createElement("div"); + headerRow.className = "d-flex flex-wrap justify-content-between align-items-start gap-2 mb-3"; + + const helper = document.createElement("div"); + helper.className = "small text-muted"; + helper.textContent = template.helper_text || template.description || "Reusable agent template"; + headerRow.appendChild(helper); + + const buttonGroup = document.createElement("div"); + buttonGroup.className = "d-flex gap-2 flex-wrap"; + + if (showCopy) { + const copyBtn = document.createElement("button"); + copyBtn.type = "button"; + copyBtn.className = "btn btn-sm btn-outline-secondary"; + copyBtn.innerHTML = ' Copy'; + copyBtn.addEventListener("click", () => copyInstructions(instructionsId)); + buttonGroup.appendChild(copyBtn); + } + + if (showCreate) { + const createBtn = document.createElement("button"); + createBtn.type = "button"; + createBtn.className = "btn btn-sm btn-success agent-example-create-btn"; + createBtn.innerHTML = ' Use Template'; + const payload = { + display_name: template.display_name || template.title || "Agent Template", + description: template.description || template.helper_text || "", + instructions: template.instructions || "", + additional_settings: template.additional_settings || "", + actions_to_load: template.actions_to_load || [], + }; + createBtn.dataset.agentExample = JSON.stringify(payload); + buttonGroup.appendChild(createBtn); + } + + headerRow.appendChild(buttonGroup); + body.appendChild(headerRow); + + const metaList = document.createElement("div"); + metaList.className = "mb-3"; + + const helperLine = document.createElement("p"); + helperLine.className = "mb-1 text-muted small"; + helperLine.innerHTML = `Suggested display name: ${escapeHtml(template.display_name || template.title || "Agent Template")}`; + metaList.appendChild(helperLine); + + if (Array.isArray(template.tags) && template.tags.length) { + const tagList = document.createElement("div"); + tagList.className = "mb-1"; + template.tags.slice(0, 5).forEach((tag) => { + const badge = document.createElement("span"); + badge.className = "badge bg-secondary-subtle text-secondary-emphasis me-1 mb-1"; + badge.textContent = tag; + tagList.appendChild(badge); + }); + metaList.appendChild(tagList); + } + + if (Array.isArray(template.actions_to_load) && template.actions_to_load.length) { + const actionLine = document.createElement("p"); + actionLine.className = "mb-0 text-muted small"; + actionLine.innerHTML = `Recommended actions: ${template.actions_to_load.join(", ")}`; + metaList.appendChild(actionLine); + } + + body.appendChild(metaList); + + const description = document.createElement("p"); + description.className = "mb-3"; + description.textContent = template.description || template.helper_text || "No description provided."; + body.appendChild(description); + + const instructions = document.createElement("pre"); + instructions.className = "bg-dark text-white p-3 rounded"; + instructions.id = instructionsId; + instructions.textContent = template.instructions || ""; + body.appendChild(instructions); + + if (template.additional_settings) { + const advancedBlock = document.createElement("pre"); + advancedBlock.className = "bg-light border rounded p-3 mt-3"; + advancedBlock.textContent = template.additional_settings; + const advancedLabel = document.createElement("p"); + advancedLabel.className = "text-muted small mb-1"; + advancedLabel.textContent = "Additional settings"; + body.appendChild(advancedLabel); + body.appendChild(advancedBlock); + } + + collapse.appendChild(body); + accordionItem.appendChild(header); + accordionItem.appendChild(collapse); + accordion.appendChild(accordionItem); + }); +} + +function escapeHtml(value) { + const div = document.createElement("div"); + div.textContent = value || ""; + return div.innerHTML; +} + +function copyInstructions(instructionsId) { + const target = document.getElementById(instructionsId); + if (!target) { + return; + } + if (typeof window.copyAgentInstructionSample === "function") { + window.copyAgentInstructionSample(instructionsId); + return; + } + const text = target.textContent || ""; + if (navigator.clipboard?.writeText) { + navigator.clipboard.writeText(text).then(() => { + showToast("Instructions copied to clipboard", "success"); + }).catch(() => { + fallbackCopyText(text); + }); + } else { + fallbackCopyText(text); + } +} + +function fallbackCopyText(text) { + const textarea = document.createElement("textarea"); + textarea.value = text; + textarea.style.position = "fixed"; + textarea.style.top = "-1000px"; + document.body.appendChild(textarea); + textarea.focus(); + textarea.select(); + try { + document.execCommand("copy"); + showToast("Instructions copied to clipboard", "success"); + } catch (err) { + console.error("Clipboard copy failed", err); + showToast("Unable to copy instructions", "error"); + } finally { + document.body.removeChild(textarea); + } +} + +async function initializeGallery(container) { + const elements = getGalleryElements(container); + + if (!window.appSettings?.enable_agent_template_gallery) { + if (elements.spinner) elements.spinner.classList.add("d-none"); + if (elements.disabledState) elements.disabledState.classList.remove("d-none"); + return; + } + + try { + const templates = await fetchTemplates(); + if (elements.spinner) elements.spinner.classList.add("d-none"); + + if (!templates.length) { + if (elements.emptyState) elements.emptyState.classList.remove("d-none"); + return; + } + + if (elements.accordion) { + elements.accordion.classList.remove("d-none"); + renderAccordion(elements.accordion, templates, { + accordionId: container.dataset.accordionId, + showCopy: container.dataset.showCopy, + showCreate: container.dataset.showCreate, + }); + } + } catch (error) { + console.error("Failed to render agent templates", error); + if (elements.spinner) elements.spinner.classList.add("d-none"); + if (elements.errorState) { + elements.errorState.classList.remove("d-none"); + if (elements.errorText) { + elements.errorText.textContent = error.message || "Unexpected error"; + } + } + } +} + +function initAgentTemplateGalleries() { + const containers = document.querySelectorAll(gallerySelector); + if (!containers.length) { + return; + } + containers.forEach((container) => { + initializeGallery(container); + }); +} + +if (document.readyState === "loading") { + document.addEventListener("DOMContentLoaded", initAgentTemplateGalleries); +} else { + initAgentTemplateGalleries(); +} diff --git a/application/single_app/static/js/agents_common.js b/application/single_app/static/js/agents_common.js index fbbadf3a..e3543a0d 100644 --- a/application/single_app/static/js/agents_common.js +++ b/application/single_app/static/js/agents_common.js @@ -47,6 +47,13 @@ export function setAgentModalFields(agent, opts = {}) { root.getElementById('agent-enable-apim').checked = !!agent.enable_agent_gpt_apim; root.getElementById('agent-instructions').value = agent.instructions || ''; root.getElementById('agent-additional-settings').value = agent.other_settings ? JSON.stringify(agent.other_settings, null, 2) : '{}'; + root.getElementById('agent-max-completion-tokens').value = agent.max_completion_tokens || ''; + + // Set reasoning effort if available + const reasoningEffortSelect = root.getElementById('agent-reasoning-effort'); + if (reasoningEffortSelect) { + reasoningEffortSelect.value = agent.reasoning_effort || ''; + } // Actions handled separately } @@ -95,8 +102,10 @@ export function getAgentModalFields(opts = {}) { azure_agent_apim_gpt_api_version: root.getElementById('agent-apim-api-version').value.trim(), enable_agent_gpt_apim: root.getElementById('agent-enable-apim').checked, instructions: root.getElementById('agent-instructions').value.trim(), + max_completion_tokens: parseInt(root.getElementById('agent-max-completion-tokens').value.trim()) || null, actions_to_load: actions_to_load, - other_settings: additionalSettings + other_settings: additionalSettings, + agent_type: (opts.agent && opts.agent.agent_type) || 'local' }; } /** @@ -202,19 +211,19 @@ export async function loadGlobalModelsForModal({ export function setupApimToggle(apimToggle, apimFields, gptFields, onToggle) { if (!apimToggle || !apimFields || !gptFields) return; function updateApimFieldsVisibility() { - console.log('[DEBUG] updateApimFieldsVisibility fired. apimToggle.checked:', apimToggle.checked); + console.log('updateApimFieldsVisibility fired. apimToggle.checked:', apimToggle.checked); if (apimToggle.checked) { apimFields.style.display = 'block'; gptFields.style.display = 'none'; apimFields.classList.remove('d-none'); gptFields.classList.add('d-none'); - console.log('[DEBUG] Showing APIM fields, hiding GPT fields.'); + console.log('Showing APIM fields, hiding GPT fields.'); } else { apimFields.style.display = 'none'; gptFields.style.display = 'block'; gptFields.classList.remove('d-none'); apimFields.classList.add('d-none'); - console.log('[DEBUG] Hiding APIM fields, showing GPT fields.'); + console.log('Hiding APIM fields, showing GPT fields.'); } if (typeof onToggle === 'function') { onToggle(); @@ -365,7 +374,7 @@ export function getAvailableModels({ apimEnabled, settings, agent }) { } else { // Otherwise use gpt_model.selected (array) let rawModels = (settings && settings.gpt_model && settings.gpt_model.selected) ? settings.gpt_model.selected : []; - console.log('[DEBUG] Raw models:', rawModels); + console.log('Raw models:', rawModels); // Normalize: map deploymentName/modelName to deployment/name if present models = rawModels.map(m => { if (m.deploymentName || m.modelName) { @@ -378,7 +387,7 @@ export function getAvailableModels({ apimEnabled, settings, agent }) { return m; }); selectedModel = agent && agent.azure_openai_gpt_deployment ? agent.azure_openai_gpt_deployment : null; - console.log('[DEBUG] Available models:', selectedModel); + console.log('Available models:', selectedModel); } return { models, selectedModel }; } @@ -471,6 +480,31 @@ export async function fetchUserAgents() { return await res.json(); } +export async function fetchGroupAgentsForActiveGroup() { + if (typeof window === 'undefined' || !window.activeGroupId) { + return []; + } + try { + const res = await fetch('/api/group/agents'); + if (!res.ok) { + console.warn('Group agents request failed:', res.status, res.statusText); + return []; + } + const payload = await res.json().catch(() => ({ agents: [] })); + const agents = Array.isArray(payload.agents) ? payload.agents : []; + const activeGroupName = (typeof window !== 'undefined' && window.activeGroupName) ? window.activeGroupName : ''; + return agents.map(agent => ({ + ...agent, + is_group: true, + group_id: agent.group_id || window.activeGroupId, + group_name: agent.group_name || activeGroupName || null + })); + } catch (error) { + console.error('Failed to fetch group agents:', error); + return []; + } +} + /** * Fetch selected agent from user settings * @returns {Promise} Selected agent object or null @@ -503,23 +537,63 @@ export function populateAgentSelect(selectEl, agents, selectedAgentObj) { console.log('DEBUG: populateAgentSelect called with agents:', agents); console.log('DEBUG: Number of agents:', agents.length); agents.forEach((agent, index) => { - console.log(`DEBUG: Agent ${index}: name="${agent.name}", is_global=${agent.is_global}, display_name="${agent.display_name}"`); + console.log(`DEBUG: Agent ${index}: name="${agent.name}", is_global=${agent.is_global}, is_group=${agent.is_group}, display_name="${agent.display_name}"`); }); + const getDisplayLabel = (agent) => (agent.display_name || agent.displayName || agent.name || '').trim(); + const displayLabelCounts = agents.reduce((acc, agent) => { + const label = getDisplayLabel(agent).toLowerCase(); + if (!label) { + return acc; + } + acc[label] = (acc[label] || 0) + 1; + return acc; + }, {}); + let selectedAgentName = typeof selectedAgentObj === 'object' ? selectedAgentObj.name : selectedAgentObj; + const selectedAgentId = typeof selectedAgentObj === 'object' ? (selectedAgentObj.id || selectedAgentObj.agent_id) : null; + const selectedAgentIsGlobal = typeof selectedAgentObj === 'object' ? !!selectedAgentObj.is_global : false; + const selectedAgentIsGroup = typeof selectedAgentObj === 'object' ? !!selectedAgentObj.is_group : false; + const selectedAgentGroupId = typeof selectedAgentObj === 'object' ? (selectedAgentObj.group_id || selectedAgentObj.groupId || null) : null; console.log('DEBUG: Selected agent name:', selectedAgentName); agents.forEach(agent => { let opt = document.createElement('option'); - // Use unique value that combines name and global status to distinguish between personal and global agents with same name - opt.value = agent.is_global ? `global_${agent.name}` : `personal_${agent.name}`; - opt.textContent = (agent.display_name || agent.name) + (agent.is_global ? ' (Global)' : ''); - // For selection matching, check if this agent matches the selected agent (by name and global status) + const agentId = agent.id || agent.agent_id || agent.name; + const contextPrefix = agent.is_group ? 'group' : (agent.is_global ? 'global' : 'personal'); + opt.value = `${contextPrefix}_${agentId}`; + const groupName = agent.group_name || agent.groupName || ''; + const displayLabel = getDisplayLabel(agent); + const labelKey = displayLabel.toLowerCase(); + const hasDuplicateLabel = labelKey && displayLabelCounts[labelKey] > 1; + let labelSuffix = ''; + if (agent.is_group) { + if (hasDuplicateLabel) { + labelSuffix = ` (Group${groupName ? `: ${groupName}` : ''})`; + } + } else if (agent.is_global) { + labelSuffix = ' (Global)'; + } + opt.textContent = `${displayLabel}${labelSuffix}`; + opt.dataset.name = agent.name || ''; + opt.dataset.displayName = displayLabel; + opt.dataset.agentId = agentId || ''; + opt.dataset.isGlobal = agent.is_global ? 'true' : 'false'; + opt.dataset.isGroup = agent.is_group ? 'true' : 'false'; + opt.dataset.groupId = agent.group_id || agent.groupId || ''; + opt.dataset.groupName = groupName || ''; + // For selection matching, prefer ID if available, otherwise fallback to name/context if (selectedAgentObj && typeof selectedAgentObj === 'object') { - if (agent.name === selectedAgentObj.name && agent.is_global === selectedAgentObj.is_global) { + const candidateIds = [agentId, agent.id, agent.agent_id].filter(Boolean).map(String); + const selectedIds = [selectedAgentId].filter(Boolean).map(String); + const idMatches = selectedIds.length > 0 && selectedIds.some(selId => candidateIds.includes(selId)); + const nameMatches = agent.name === selectedAgentObj.name; + const contextMatches = (!!agent.is_global === selectedAgentIsGlobal) && (!!agent.is_group === selectedAgentIsGroup); + const groupMatches = !selectedAgentIsGroup || selectedAgentGroupId === null || String(agent.group_id || agent.groupId || '') === String(selectedAgentGroupId || ''); + if ((idMatches || nameMatches) && contextMatches && groupMatches) { opt.selected = true; } - } else if (agent.name === selectedAgentName && !agent.is_global) { + } else if (agent.name === selectedAgentName && !agent.is_global && !agent.is_group) { // Default to personal agent if just name is provided opt.selected = true; } diff --git a/application/single_app/static/js/chart-test.html b/application/single_app/static/js/chart-test.html new file mode 100644 index 00000000..f4385a0b --- /dev/null +++ b/application/single_app/static/js/chart-test.html @@ -0,0 +1,58 @@ + + + + Chart.js Test + + +
+

Chart.js Loading Test

+

Testing...

+ +
+ + + + + \ No newline at end of file diff --git a/application/single_app/static/js/chart.min.js b/application/single_app/static/js/chart.min.js new file mode 100644 index 00000000..d25d4431 --- /dev/null +++ b/application/single_app/static/js/chart.min.js @@ -0,0 +1,14 @@ +/*! + * Chart.js v4.4.0 + * https://www.chartjs.org + * (c) 2023 Chart.js Contributors + * Released under the MIT License + */ +!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?module.exports=e():"function"==typeof define&&define.amd?define(e):(t="undefined"!=typeof globalThis?globalThis:t||self).Chart=e()}(this,(function(){"use strict";var t=Object.freeze({__proto__:null,get Colors(){return Go},get Decimation(){return Qo},get Filler(){return ma},get Legend(){return ya},get SubTitle(){return ka},get Title(){return Ma},get Tooltip(){return Ba}});function e(){}const i=(()=>{let t=0;return()=>t++})();function s(t){return null==t}function n(t){if(Array.isArray&&Array.isArray(t))return!0;const e=Object.prototype.toString.call(t);return"[object"===e.slice(0,7)&&"Array]"===e.slice(-6)}function o(t){return null!==t&&"[object Object]"===Object.prototype.toString.call(t)}function a(t){return("number"==typeof t||t instanceof Number)&&isFinite(+t)}function r(t,e){return a(t)?t:e}function l(t,e){return void 0===t?e:t}const h=(t,e)=>"string"==typeof t&&t.endsWith("%")?parseFloat(t)/100:+t/e,c=(t,e)=>"string"==typeof t&&t.endsWith("%")?parseFloat(t)/100*e:+t;function d(t,e,i){if(t&&"function"==typeof t.call)return t.apply(i,e)}function u(t,e,i,s){let a,r,l;if(n(t))if(r=t.length,s)for(a=r-1;a>=0;a--)e.call(i,t[a],a);else for(a=0;at,x:t=>t.x,y:t=>t.y};function v(t){const e=t.split("."),i=[];let s="";for(const t of e)s+=t,s.endsWith("\\")?s=s.slice(0,-1)+".":(i.push(s),s="");return i}function M(t,e){const i=y[e]||(y[e]=function(t){const e=v(t);return t=>{for(const i of e){if(""===i)break;t=t&&t[i]}return t}}(e));return i(t)}function w(t){return t.charAt(0).toUpperCase()+t.slice(1)}const k=t=>void 0!==t,S=t=>"function"==typeof t,P=(t,e)=>{if(t.size!==e.size)return!1;for(const i of t)if(!e.has(i))return!1;return!0};function D(t){return"mouseup"===t.type||"click"===t.type||"contextmenu"===t.type}const C=Math.PI,O=2*C,A=O+C,T=Number.POSITIVE_INFINITY,L=C/180,E=C/2,R=C/4,I=2*C/3,z=Math.log10,F=Math.sign;function V(t,e,i){return Math.abs(t-e)t-e)).pop(),e}function N(t){return!isNaN(parseFloat(t))&&isFinite(t)}function H(t,e){const i=Math.round(t);return i-e<=t&&i+e>=t}function j(t,e,i){let s,n,o;for(s=0,n=t.length;sl&&h=Math.min(e,i)-s&&t<=Math.max(e,i)+s}function et(t,e,i){i=i||(i=>t[i]1;)s=o+n>>1,i(s)?o=s:n=s;return{lo:o,hi:n}}const it=(t,e,i,s)=>et(t,i,s?s=>{const n=t[s][e];return nt[s][e]et(t,i,(s=>t[s][e]>=i));function nt(t,e,i){let s=0,n=t.length;for(;ss&&t[n-1]>i;)n--;return s>0||n{const i="_onData"+w(e),s=t[e];Object.defineProperty(t,e,{configurable:!0,enumerable:!1,value(...e){const n=s.apply(this,e);return t._chartjs.listeners.forEach((t=>{"function"==typeof t[i]&&t[i](...e)})),n}})})))}function rt(t,e){const i=t._chartjs;if(!i)return;const s=i.listeners,n=s.indexOf(e);-1!==n&&s.splice(n,1),s.length>0||(ot.forEach((e=>{delete t[e]})),delete t._chartjs)}function lt(t){const e=new Set(t);return e.size===t.length?t:Array.from(e)}const ht="undefined"==typeof window?function(t){return t()}:window.requestAnimationFrame;function ct(t,e){let i=[],s=!1;return function(...n){i=n,s||(s=!0,ht.call(window,(()=>{s=!1,t.apply(e,i)})))}}function dt(t,e){let i;return function(...s){return e?(clearTimeout(i),i=setTimeout(t,e,s)):t.apply(this,s),e}}const ut=t=>"start"===t?"left":"end"===t?"right":"center",ft=(t,e,i)=>"start"===t?e:"end"===t?i:(e+i)/2,gt=(t,e,i,s)=>t===(s?"left":"right")?i:"center"===t?(e+i)/2:e;function pt(t,e,i){const s=e.length;let n=0,o=s;if(t._sorted){const{iScale:a,_parsed:r}=t,l=a.axis,{min:h,max:c,minDefined:d,maxDefined:u}=a.getUserBounds();d&&(n=J(Math.min(it(r,l,h).lo,i?s:it(e,l,a.getPixelForValue(h)).lo),0,s-1)),o=u?J(Math.max(it(r,a.axis,c,!0).hi+1,i?0:it(e,l,a.getPixelForValue(c),!0).hi+1),n,s)-n:s-n}return{start:n,count:o}}function mt(t){const{xScale:e,yScale:i,_scaleRanges:s}=t,n={xmin:e.min,xmax:e.max,ymin:i.min,ymax:i.max};if(!s)return t._scaleRanges=n,!0;const o=s.xmin!==e.min||s.xmax!==e.max||s.ymin!==i.min||s.ymax!==i.max;return Object.assign(s,n),o}class bt{constructor(){this._request=null,this._charts=new Map,this._running=!1,this._lastDate=void 0}_notify(t,e,i,s){const n=e.listeners[s],o=e.duration;n.forEach((s=>s({chart:t,initial:e.initial,numSteps:o,currentStep:Math.min(i-e.start,o)})))}_refresh(){this._request||(this._running=!0,this._request=ht.call(window,(()=>{this._update(),this._request=null,this._running&&this._refresh()})))}_update(t=Date.now()){let e=0;this._charts.forEach(((i,s)=>{if(!i.running||!i.items.length)return;const n=i.items;let o,a=n.length-1,r=!1;for(;a>=0;--a)o=n[a],o._active?(o._total>i.duration&&(i.duration=o._total),o.tick(t),r=!0):(n[a]=n[n.length-1],n.pop());r&&(s.draw(),this._notify(s,i,t,"progress")),n.length||(i.running=!1,this._notify(s,i,t,"complete"),i.initial=!1),e+=n.length})),this._lastDate=t,0===e&&(this._running=!1)}_getAnims(t){const e=this._charts;let i=e.get(t);return i||(i={running:!1,initial:!0,items:[],listeners:{complete:[],progress:[]}},e.set(t,i)),i}listen(t,e,i){this._getAnims(t).listeners[e].push(i)}add(t,e){e&&e.length&&this._getAnims(t).items.push(...e)}has(t){return this._getAnims(t).items.length>0}start(t){const e=this._charts.get(t);e&&(e.running=!0,e.start=Date.now(),e.duration=e.items.reduce(((t,e)=>Math.max(t,e._duration)),0),this._refresh())}running(t){if(!this._running)return!1;const e=this._charts.get(t);return!!(e&&e.running&&e.items.length)}stop(t){const e=this._charts.get(t);if(!e||!e.items.length)return;const i=e.items;let s=i.length-1;for(;s>=0;--s)i[s].cancel();e.items=[],this._notify(t,e,Date.now(),"complete")}remove(t){return this._charts.delete(t)}}var xt=new bt; +/*! + * @kurkle/color v0.3.2 + * https://github.com/kurkle/color#readme + * (c) 2023 Jukka Kurkela + * Released under the MIT License + */function _t(t){return t+.5|0}const yt=(t,e,i)=>Math.max(Math.min(t,i),e);function vt(t){return yt(_t(2.55*t),0,255)}function Mt(t){return yt(_t(255*t),0,255)}function wt(t){return yt(_t(t/2.55)/100,0,1)}function kt(t){return yt(_t(100*t),0,100)}const St={0:0,1:1,2:2,3:3,4:4,5:5,6:6,7:7,8:8,9:9,A:10,B:11,C:12,D:13,E:14,F:15,a:10,b:11,c:12,d:13,e:14,f:15},Pt=[..."0123456789ABCDEF"],Dt=t=>Pt[15&t],Ct=t=>Pt[(240&t)>>4]+Pt[15&t],Ot=t=>(240&t)>>4==(15&t);function At(t){var e=(t=>Ot(t.r)&&Ot(t.g)&&Ot(t.b)&&Ot(t.a))(t)?Dt:Ct;return t?"#"+e(t.r)+e(t.g)+e(t.b)+((t,e)=>t<255?e(t):"")(t.a,e):void 0}const Tt=/^(hsla?|hwb|hsv)\(\s*([-+.e\d]+)(?:deg)?[\s,]+([-+.e\d]+)%[\s,]+([-+.e\d]+)%(?:[\s,]+([-+.e\d]+)(%)?)?\s*\)$/;function Lt(t,e,i){const s=e*Math.min(i,1-i),n=(e,n=(e+t/30)%12)=>i-s*Math.max(Math.min(n-3,9-n,1),-1);return[n(0),n(8),n(4)]}function Et(t,e,i){const s=(s,n=(s+t/60)%6)=>i-i*e*Math.max(Math.min(n,4-n,1),0);return[s(5),s(3),s(1)]}function Rt(t,e,i){const s=Lt(t,1,.5);let n;for(e+i>1&&(n=1/(e+i),e*=n,i*=n),n=0;n<3;n++)s[n]*=1-e-i,s[n]+=e;return s}function It(t){const e=t.r/255,i=t.g/255,s=t.b/255,n=Math.max(e,i,s),o=Math.min(e,i,s),a=(n+o)/2;let r,l,h;return n!==o&&(h=n-o,l=a>.5?h/(2-n-o):h/(n+o),r=function(t,e,i,s,n){return t===n?(e-i)/s+(e>16&255,o>>8&255,255&o]}return t}(),Ht.transparent=[0,0,0,0]);const e=Ht[t.toLowerCase()];return e&&{r:e[0],g:e[1],b:e[2],a:4===e.length?e[3]:255}}const $t=/^rgba?\(\s*([-+.\d]+)(%)?[\s,]+([-+.e\d]+)(%)?[\s,]+([-+.e\d]+)(%)?(?:[\s,/]+([-+.e\d]+)(%)?)?\s*\)$/;const Yt=t=>t<=.0031308?12.92*t:1.055*Math.pow(t,1/2.4)-.055,Ut=t=>t<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4);function Xt(t,e,i){if(t){let s=It(t);s[e]=Math.max(0,Math.min(s[e]+s[e]*i,0===e?360:1)),s=Ft(s),t.r=s[0],t.g=s[1],t.b=s[2]}}function qt(t,e){return t?Object.assign(e||{},t):t}function Kt(t){var e={r:0,g:0,b:0,a:255};return Array.isArray(t)?t.length>=3&&(e={r:t[0],g:t[1],b:t[2],a:255},t.length>3&&(e.a=Mt(t[3]))):(e=qt(t,{r:0,g:0,b:0,a:1})).a=Mt(e.a),e}function Gt(t){return"r"===t.charAt(0)?function(t){const e=$t.exec(t);let i,s,n,o=255;if(e){if(e[7]!==i){const t=+e[7];o=e[8]?vt(t):yt(255*t,0,255)}return i=+e[1],s=+e[3],n=+e[5],i=255&(e[2]?vt(i):yt(i,0,255)),s=255&(e[4]?vt(s):yt(s,0,255)),n=255&(e[6]?vt(n):yt(n,0,255)),{r:i,g:s,b:n,a:o}}}(t):Bt(t)}class Zt{constructor(t){if(t instanceof Zt)return t;const e=typeof t;let i;var s,n,o;"object"===e?i=Kt(t):"string"===e&&(o=(s=t).length,"#"===s[0]&&(4===o||5===o?n={r:255&17*St[s[1]],g:255&17*St[s[2]],b:255&17*St[s[3]],a:5===o?17*St[s[4]]:255}:7!==o&&9!==o||(n={r:St[s[1]]<<4|St[s[2]],g:St[s[3]]<<4|St[s[4]],b:St[s[5]]<<4|St[s[6]],a:9===o?St[s[7]]<<4|St[s[8]]:255})),i=n||jt(t)||Gt(t)),this._rgb=i,this._valid=!!i}get valid(){return this._valid}get rgb(){var t=qt(this._rgb);return t&&(t.a=wt(t.a)),t}set rgb(t){this._rgb=Kt(t)}rgbString(){return this._valid?(t=this._rgb)&&(t.a<255?`rgba(${t.r}, ${t.g}, ${t.b}, ${wt(t.a)})`:`rgb(${t.r}, ${t.g}, ${t.b})`):void 0;var t}hexString(){return this._valid?At(this._rgb):void 0}hslString(){return this._valid?function(t){if(!t)return;const e=It(t),i=e[0],s=kt(e[1]),n=kt(e[2]);return t.a<255?`hsla(${i}, ${s}%, ${n}%, ${wt(t.a)})`:`hsl(${i}, ${s}%, ${n}%)`}(this._rgb):void 0}mix(t,e){if(t){const i=this.rgb,s=t.rgb;let n;const o=e===n?.5:e,a=2*o-1,r=i.a-s.a,l=((a*r==-1?a:(a+r)/(1+a*r))+1)/2;n=1-l,i.r=255&l*i.r+n*s.r+.5,i.g=255&l*i.g+n*s.g+.5,i.b=255&l*i.b+n*s.b+.5,i.a=o*i.a+(1-o)*s.a,this.rgb=i}return this}interpolate(t,e){return t&&(this._rgb=function(t,e,i){const s=Ut(wt(t.r)),n=Ut(wt(t.g)),o=Ut(wt(t.b));return{r:Mt(Yt(s+i*(Ut(wt(e.r))-s))),g:Mt(Yt(n+i*(Ut(wt(e.g))-n))),b:Mt(Yt(o+i*(Ut(wt(e.b))-o))),a:t.a+i*(e.a-t.a)}}(this._rgb,t._rgb,e)),this}clone(){return new Zt(this.rgb)}alpha(t){return this._rgb.a=Mt(t),this}clearer(t){return this._rgb.a*=1-t,this}greyscale(){const t=this._rgb,e=_t(.3*t.r+.59*t.g+.11*t.b);return t.r=t.g=t.b=e,this}opaquer(t){return this._rgb.a*=1+t,this}negate(){const t=this._rgb;return t.r=255-t.r,t.g=255-t.g,t.b=255-t.b,this}lighten(t){return Xt(this._rgb,2,t),this}darken(t){return Xt(this._rgb,2,-t),this}saturate(t){return Xt(this._rgb,1,t),this}desaturate(t){return Xt(this._rgb,1,-t),this}rotate(t){return function(t,e){var i=It(t);i[0]=Vt(i[0]+e),i=Ft(i),t.r=i[0],t.g=i[1],t.b=i[2]}(this._rgb,t),this}}function Jt(t){if(t&&"object"==typeof t){const e=t.toString();return"[object CanvasPattern]"===e||"[object CanvasGradient]"===e}return!1}function Qt(t){return Jt(t)?t:new Zt(t)}function te(t){return Jt(t)?t:new Zt(t).saturate(.5).darken(.1).hexString()}const ee=["x","y","borderWidth","radius","tension"],ie=["color","borderColor","backgroundColor"];const se=new Map;function ne(t,e,i){return function(t,e){e=e||{};const i=t+JSON.stringify(e);let s=se.get(i);return s||(s=new Intl.NumberFormat(t,e),se.set(i,s)),s}(e,i).format(t)}const oe={values:t=>n(t)?t:""+t,numeric(t,e,i){if(0===t)return"0";const s=this.chart.options.locale;let n,o=t;if(i.length>1){const e=Math.max(Math.abs(i[0].value),Math.abs(i[i.length-1].value));(e<1e-4||e>1e15)&&(n="scientific"),o=function(t,e){let i=e.length>3?e[2].value-e[1].value:e[1].value-e[0].value;Math.abs(i)>=1&&t!==Math.floor(t)&&(i=t-Math.floor(t));return i}(t,i)}const a=z(Math.abs(o)),r=isNaN(a)?1:Math.max(Math.min(-1*Math.floor(a),20),0),l={notation:n,minimumFractionDigits:r,maximumFractionDigits:r};return Object.assign(l,this.options.ticks.format),ne(t,s,l)},logarithmic(t,e,i){if(0===t)return"0";const s=i[e].significand||t/Math.pow(10,Math.floor(z(t)));return[1,2,3,5,10,15].includes(s)||e>.8*i.length?oe.numeric.call(this,t,e,i):""}};var ae={formatters:oe};const re=Object.create(null),le=Object.create(null);function he(t,e){if(!e)return t;const i=e.split(".");for(let e=0,s=i.length;et.chart.platform.getDevicePixelRatio(),this.elements={},this.events=["mousemove","mouseout","click","touchstart","touchmove"],this.font={family:"'Helvetica Neue', 'Helvetica', 'Arial', sans-serif",size:12,style:"normal",lineHeight:1.2,weight:null},this.hover={},this.hoverBackgroundColor=(t,e)=>te(e.backgroundColor),this.hoverBorderColor=(t,e)=>te(e.borderColor),this.hoverColor=(t,e)=>te(e.color),this.indexAxis="x",this.interaction={mode:"nearest",intersect:!0,includeInvisible:!1},this.maintainAspectRatio=!0,this.onHover=null,this.onClick=null,this.parsing=!0,this.plugins={},this.responsive=!0,this.scale=void 0,this.scales={},this.showLine=!0,this.drawActiveElementsOnTop=!0,this.describe(t),this.apply(e)}set(t,e){return ce(this,t,e)}get(t){return he(this,t)}describe(t,e){return ce(le,t,e)}override(t,e){return ce(re,t,e)}route(t,e,i,s){const n=he(this,t),a=he(this,i),r="_"+e;Object.defineProperties(n,{[r]:{value:n[e],writable:!0},[e]:{enumerable:!0,get(){const t=this[r],e=a[s];return o(t)?Object.assign({},e,t):l(t,e)},set(t){this[r]=t}}})}apply(t){t.forEach((t=>t(this)))}}var ue=new de({_scriptable:t=>!t.startsWith("on"),_indexable:t=>"events"!==t,hover:{_fallback:"interaction"},interaction:{_scriptable:!1,_indexable:!1}},[function(t){t.set("animation",{delay:void 0,duration:1e3,easing:"easeOutQuart",fn:void 0,from:void 0,loop:void 0,to:void 0,type:void 0}),t.describe("animation",{_fallback:!1,_indexable:!1,_scriptable:t=>"onProgress"!==t&&"onComplete"!==t&&"fn"!==t}),t.set("animations",{colors:{type:"color",properties:ie},numbers:{type:"number",properties:ee}}),t.describe("animations",{_fallback:"animation"}),t.set("transitions",{active:{animation:{duration:400}},resize:{animation:{duration:0}},show:{animations:{colors:{from:"transparent"},visible:{type:"boolean",duration:0}}},hide:{animations:{colors:{to:"transparent"},visible:{type:"boolean",easing:"linear",fn:t=>0|t}}}})},function(t){t.set("layout",{autoPadding:!0,padding:{top:0,right:0,bottom:0,left:0}})},function(t){t.set("scale",{display:!0,offset:!1,reverse:!1,beginAtZero:!1,bounds:"ticks",clip:!0,grace:0,grid:{display:!0,lineWidth:1,drawOnChartArea:!0,drawTicks:!0,tickLength:8,tickWidth:(t,e)=>e.lineWidth,tickColor:(t,e)=>e.color,offset:!1},border:{display:!0,dash:[],dashOffset:0,width:1},title:{display:!1,text:"",padding:{top:4,bottom:4}},ticks:{minRotation:0,maxRotation:50,mirror:!1,textStrokeWidth:0,textStrokeColor:"",padding:3,display:!0,autoSkip:!0,autoSkipPadding:3,labelOffset:0,callback:ae.formatters.values,minor:{},major:{},align:"center",crossAlign:"near",showLabelBackdrop:!1,backdropColor:"rgba(255, 255, 255, 0.75)",backdropPadding:2}}),t.route("scale.ticks","color","","color"),t.route("scale.grid","color","","borderColor"),t.route("scale.border","color","","borderColor"),t.route("scale.title","color","","color"),t.describe("scale",{_fallback:!1,_scriptable:t=>!t.startsWith("before")&&!t.startsWith("after")&&"callback"!==t&&"parser"!==t,_indexable:t=>"borderDash"!==t&&"tickBorderDash"!==t&&"dash"!==t}),t.describe("scales",{_fallback:"scale"}),t.describe("scale.ticks",{_scriptable:t=>"backdropPadding"!==t&&"callback"!==t,_indexable:t=>"backdropPadding"!==t})}]);function fe(){return"undefined"!=typeof window&&"undefined"!=typeof document}function ge(t){let e=t.parentNode;return e&&"[object ShadowRoot]"===e.toString()&&(e=e.host),e}function pe(t,e,i){let s;return"string"==typeof t?(s=parseInt(t,10),-1!==t.indexOf("%")&&(s=s/100*e.parentNode[i])):s=t,s}const me=t=>t.ownerDocument.defaultView.getComputedStyle(t,null);function be(t,e){return me(t).getPropertyValue(e)}const xe=["top","right","bottom","left"];function _e(t,e,i){const s={};i=i?"-"+i:"";for(let n=0;n<4;n++){const o=xe[n];s[o]=parseFloat(t[e+"-"+o+i])||0}return s.width=s.left+s.right,s.height=s.top+s.bottom,s}const ye=(t,e,i)=>(t>0||e>0)&&(!i||!i.shadowRoot);function ve(t,e){if("native"in t)return t;const{canvas:i,currentDevicePixelRatio:s}=e,n=me(i),o="border-box"===n.boxSizing,a=_e(n,"padding"),r=_e(n,"border","width"),{x:l,y:h,box:c}=function(t,e){const i=t.touches,s=i&&i.length?i[0]:t,{offsetX:n,offsetY:o}=s;let a,r,l=!1;if(ye(n,o,t.target))a=n,r=o;else{const t=e.getBoundingClientRect();a=s.clientX-t.left,r=s.clientY-t.top,l=!0}return{x:a,y:r,box:l}}(t,i),d=a.left+(c&&r.left),u=a.top+(c&&r.top);let{width:f,height:g}=e;return o&&(f-=a.width+r.width,g-=a.height+r.height),{x:Math.round((l-d)/f*i.width/s),y:Math.round((h-u)/g*i.height/s)}}const Me=t=>Math.round(10*t)/10;function we(t,e,i,s){const n=me(t),o=_e(n,"margin"),a=pe(n.maxWidth,t,"clientWidth")||T,r=pe(n.maxHeight,t,"clientHeight")||T,l=function(t,e,i){let s,n;if(void 0===e||void 0===i){const o=ge(t);if(o){const t=o.getBoundingClientRect(),a=me(o),r=_e(a,"border","width"),l=_e(a,"padding");e=t.width-l.width-r.width,i=t.height-l.height-r.height,s=pe(a.maxWidth,o,"clientWidth"),n=pe(a.maxHeight,o,"clientHeight")}else e=t.clientWidth,i=t.clientHeight}return{width:e,height:i,maxWidth:s||T,maxHeight:n||T}}(t,e,i);let{width:h,height:c}=l;if("content-box"===n.boxSizing){const t=_e(n,"border","width"),e=_e(n,"padding");h-=e.width+t.width,c-=e.height+t.height}h=Math.max(0,h-o.width),c=Math.max(0,s?h/s:c-o.height),h=Me(Math.min(h,a,l.maxWidth)),c=Me(Math.min(c,r,l.maxHeight)),h&&!c&&(c=Me(h/2));return(void 0!==e||void 0!==i)&&s&&l.height&&c>l.height&&(c=l.height,h=Me(Math.floor(c*s))),{width:h,height:c}}function ke(t,e,i){const s=e||1,n=Math.floor(t.height*s),o=Math.floor(t.width*s);t.height=Math.floor(t.height),t.width=Math.floor(t.width);const a=t.canvas;return a.style&&(i||!a.style.height&&!a.style.width)&&(a.style.height=`${t.height}px`,a.style.width=`${t.width}px`),(t.currentDevicePixelRatio!==s||a.height!==n||a.width!==o)&&(t.currentDevicePixelRatio=s,a.height=n,a.width=o,t.ctx.setTransform(s,0,0,s,0,0),!0)}const Se=function(){let t=!1;try{const e={get passive(){return t=!0,!1}};window.addEventListener("test",null,e),window.removeEventListener("test",null,e)}catch(t){}return t}();function Pe(t,e){const i=be(t,e),s=i&&i.match(/^(\d+)(\.\d+)?px$/);return s?+s[1]:void 0}function De(t){return!t||s(t.size)||s(t.family)?null:(t.style?t.style+" ":"")+(t.weight?t.weight+" ":"")+t.size+"px "+t.family}function Ce(t,e,i,s,n){let o=e[n];return o||(o=e[n]=t.measureText(n).width,i.push(n)),o>s&&(s=o),s}function Oe(t,e,i,s){let o=(s=s||{}).data=s.data||{},a=s.garbageCollect=s.garbageCollect||[];s.font!==e&&(o=s.data={},a=s.garbageCollect=[],s.font=e),t.save(),t.font=e;let r=0;const l=i.length;let h,c,d,u,f;for(h=0;hi.length){for(h=0;h0&&t.stroke()}}function Re(t,e,i){return i=i||.5,!e||t&&t.x>e.left-i&&t.xe.top-i&&t.y0&&""!==r.strokeColor;let c,d;for(t.save(),t.font=a.string,function(t,e){e.translation&&t.translate(e.translation[0],e.translation[1]),s(e.rotation)||t.rotate(e.rotation),e.color&&(t.fillStyle=e.color),e.textAlign&&(t.textAlign=e.textAlign),e.textBaseline&&(t.textBaseline=e.textBaseline)}(t,r),c=0;ct[0])){const o=i||t;void 0===s&&(s=ti("_fallback",t));const a={[Symbol.toStringTag]:"Object",_cacheable:!0,_scopes:t,_rootScopes:o,_fallback:s,_getTarget:n,override:i=>je([i,...t],e,o,s)};return new Proxy(a,{deleteProperty:(e,i)=>(delete e[i],delete e._keys,delete t[0][i],!0),get:(i,s)=>qe(i,s,(()=>function(t,e,i,s){let n;for(const o of e)if(n=ti(Ue(o,t),i),void 0!==n)return Xe(t,n)?Je(i,s,t,n):n}(s,e,t,i))),getOwnPropertyDescriptor:(t,e)=>Reflect.getOwnPropertyDescriptor(t._scopes[0],e),getPrototypeOf:()=>Reflect.getPrototypeOf(t[0]),has:(t,e)=>ei(t).includes(e),ownKeys:t=>ei(t),set(t,e,i){const s=t._storage||(t._storage=n());return t[e]=s[e]=i,delete t._keys,!0}})}function $e(t,e,i,s){const a={_cacheable:!1,_proxy:t,_context:e,_subProxy:i,_stack:new Set,_descriptors:Ye(t,s),setContext:e=>$e(t,e,i,s),override:n=>$e(t.override(n),e,i,s)};return new Proxy(a,{deleteProperty:(e,i)=>(delete e[i],delete t[i],!0),get:(t,e,i)=>qe(t,e,(()=>function(t,e,i){const{_proxy:s,_context:a,_subProxy:r,_descriptors:l}=t;let h=s[e];S(h)&&l.isScriptable(e)&&(h=function(t,e,i,s){const{_proxy:n,_context:o,_subProxy:a,_stack:r}=i;if(r.has(t))throw new Error("Recursion detected: "+Array.from(r).join("->")+"->"+t);r.add(t);let l=e(o,a||s);r.delete(t),Xe(t,l)&&(l=Je(n._scopes,n,t,l));return l}(e,h,t,i));n(h)&&h.length&&(h=function(t,e,i,s){const{_proxy:n,_context:a,_subProxy:r,_descriptors:l}=i;if(void 0!==a.index&&s(t))return e[a.index%e.length];if(o(e[0])){const i=e,s=n._scopes.filter((t=>t!==i));e=[];for(const o of i){const i=Je(s,n,t,o);e.push($e(i,a,r&&r[t],l))}}return e}(e,h,t,l.isIndexable));Xe(e,h)&&(h=$e(h,a,r&&r[e],l));return h}(t,e,i))),getOwnPropertyDescriptor:(e,i)=>e._descriptors.allKeys?Reflect.has(t,i)?{enumerable:!0,configurable:!0}:void 0:Reflect.getOwnPropertyDescriptor(t,i),getPrototypeOf:()=>Reflect.getPrototypeOf(t),has:(e,i)=>Reflect.has(t,i),ownKeys:()=>Reflect.ownKeys(t),set:(e,i,s)=>(t[i]=s,delete e[i],!0)})}function Ye(t,e={scriptable:!0,indexable:!0}){const{_scriptable:i=e.scriptable,_indexable:s=e.indexable,_allKeys:n=e.allKeys}=t;return{allKeys:n,scriptable:i,indexable:s,isScriptable:S(i)?i:()=>i,isIndexable:S(s)?s:()=>s}}const Ue=(t,e)=>t?t+w(e):e,Xe=(t,e)=>o(e)&&"adapters"!==t&&(null===Object.getPrototypeOf(e)||e.constructor===Object);function qe(t,e,i){if(Object.prototype.hasOwnProperty.call(t,e))return t[e];const s=i();return t[e]=s,s}function Ke(t,e,i){return S(t)?t(e,i):t}const Ge=(t,e)=>!0===t?e:"string"==typeof t?M(e,t):void 0;function Ze(t,e,i,s,n){for(const o of e){const e=Ge(i,o);if(e){t.add(e);const o=Ke(e._fallback,i,n);if(void 0!==o&&o!==i&&o!==s)return o}else if(!1===e&&void 0!==s&&i!==s)return null}return!1}function Je(t,e,i,s){const a=e._rootScopes,r=Ke(e._fallback,i,s),l=[...t,...a],h=new Set;h.add(s);let c=Qe(h,l,i,r||i,s);return null!==c&&((void 0===r||r===i||(c=Qe(h,l,r,c,s),null!==c))&&je(Array.from(h),[""],a,r,(()=>function(t,e,i){const s=t._getTarget();e in s||(s[e]={});const a=s[e];if(n(a)&&o(i))return i;return a||{}}(e,i,s))))}function Qe(t,e,i,s,n){for(;i;)i=Ze(t,e,i,s,n);return i}function ti(t,e){for(const i of e){if(!i)continue;const e=i[t];if(void 0!==e)return e}}function ei(t){let e=t._keys;return e||(e=t._keys=function(t){const e=new Set;for(const i of t)for(const t of Object.keys(i).filter((t=>!t.startsWith("_"))))e.add(t);return Array.from(e)}(t._scopes)),e}function ii(t,e,i,s){const{iScale:n}=t,{key:o="r"}=this._parsing,a=new Array(s);let r,l,h,c;for(r=0,l=s;re"x"===t?"y":"x";function ai(t,e,i,s){const n=t.skip?e:t,o=e,a=i.skip?e:i,r=q(o,n),l=q(a,o);let h=r/(r+l),c=l/(r+l);h=isNaN(h)?0:h,c=isNaN(c)?0:c;const d=s*h,u=s*c;return{previous:{x:o.x-d*(a.x-n.x),y:o.y-d*(a.y-n.y)},next:{x:o.x+u*(a.x-n.x),y:o.y+u*(a.y-n.y)}}}function ri(t,e="x"){const i=oi(e),s=t.length,n=Array(s).fill(0),o=Array(s);let a,r,l,h=ni(t,0);for(a=0;a!t.skip))),"monotone"===e.cubicInterpolationMode)ri(t,n);else{let i=s?t[t.length-1]:t[0];for(o=0,a=t.length;o0===t||1===t,di=(t,e,i)=>-Math.pow(2,10*(t-=1))*Math.sin((t-e)*O/i),ui=(t,e,i)=>Math.pow(2,-10*t)*Math.sin((t-e)*O/i)+1,fi={linear:t=>t,easeInQuad:t=>t*t,easeOutQuad:t=>-t*(t-2),easeInOutQuad:t=>(t/=.5)<1?.5*t*t:-.5*(--t*(t-2)-1),easeInCubic:t=>t*t*t,easeOutCubic:t=>(t-=1)*t*t+1,easeInOutCubic:t=>(t/=.5)<1?.5*t*t*t:.5*((t-=2)*t*t+2),easeInQuart:t=>t*t*t*t,easeOutQuart:t=>-((t-=1)*t*t*t-1),easeInOutQuart:t=>(t/=.5)<1?.5*t*t*t*t:-.5*((t-=2)*t*t*t-2),easeInQuint:t=>t*t*t*t*t,easeOutQuint:t=>(t-=1)*t*t*t*t+1,easeInOutQuint:t=>(t/=.5)<1?.5*t*t*t*t*t:.5*((t-=2)*t*t*t*t+2),easeInSine:t=>1-Math.cos(t*E),easeOutSine:t=>Math.sin(t*E),easeInOutSine:t=>-.5*(Math.cos(C*t)-1),easeInExpo:t=>0===t?0:Math.pow(2,10*(t-1)),easeOutExpo:t=>1===t?1:1-Math.pow(2,-10*t),easeInOutExpo:t=>ci(t)?t:t<.5?.5*Math.pow(2,10*(2*t-1)):.5*(2-Math.pow(2,-10*(2*t-1))),easeInCirc:t=>t>=1?t:-(Math.sqrt(1-t*t)-1),easeOutCirc:t=>Math.sqrt(1-(t-=1)*t),easeInOutCirc:t=>(t/=.5)<1?-.5*(Math.sqrt(1-t*t)-1):.5*(Math.sqrt(1-(t-=2)*t)+1),easeInElastic:t=>ci(t)?t:di(t,.075,.3),easeOutElastic:t=>ci(t)?t:ui(t,.075,.3),easeInOutElastic(t){const e=.1125;return ci(t)?t:t<.5?.5*di(2*t,e,.45):.5+.5*ui(2*t-1,e,.45)},easeInBack(t){const e=1.70158;return t*t*((e+1)*t-e)},easeOutBack(t){const e=1.70158;return(t-=1)*t*((e+1)*t+e)+1},easeInOutBack(t){let e=1.70158;return(t/=.5)<1?t*t*((1+(e*=1.525))*t-e)*.5:.5*((t-=2)*t*((1+(e*=1.525))*t+e)+2)},easeInBounce:t=>1-fi.easeOutBounce(1-t),easeOutBounce(t){const e=7.5625,i=2.75;return t<1/i?e*t*t:t<2/i?e*(t-=1.5/i)*t+.75:t<2.5/i?e*(t-=2.25/i)*t+.9375:e*(t-=2.625/i)*t+.984375},easeInOutBounce:t=>t<.5?.5*fi.easeInBounce(2*t):.5*fi.easeOutBounce(2*t-1)+.5};function gi(t,e,i,s){return{x:t.x+i*(e.x-t.x),y:t.y+i*(e.y-t.y)}}function pi(t,e,i,s){return{x:t.x+i*(e.x-t.x),y:"middle"===s?i<.5?t.y:e.y:"after"===s?i<1?t.y:e.y:i>0?e.y:t.y}}function mi(t,e,i,s){const n={x:t.cp2x,y:t.cp2y},o={x:e.cp1x,y:e.cp1y},a=gi(t,n,i),r=gi(n,o,i),l=gi(o,e,i),h=gi(a,r,i),c=gi(r,l,i);return gi(h,c,i)}const bi=/^(normal|(\d+(?:\.\d+)?)(px|em|%)?)$/,xi=/^(normal|italic|initial|inherit|unset|(oblique( -?[0-9]?[0-9]deg)?))$/;function _i(t,e){const i=(""+t).match(bi);if(!i||"normal"===i[1])return 1.2*e;switch(t=+i[2],i[3]){case"px":return t;case"%":t/=100}return e*t}const yi=t=>+t||0;function vi(t,e){const i={},s=o(e),n=s?Object.keys(e):e,a=o(t)?s?i=>l(t[i],t[e[i]]):e=>t[e]:()=>t;for(const t of n)i[t]=yi(a(t));return i}function Mi(t){return vi(t,{top:"y",right:"x",bottom:"y",left:"x"})}function wi(t){return vi(t,["topLeft","topRight","bottomLeft","bottomRight"])}function ki(t){const e=Mi(t);return e.width=e.left+e.right,e.height=e.top+e.bottom,e}function Si(t,e){t=t||{},e=e||ue.font;let i=l(t.size,e.size);"string"==typeof i&&(i=parseInt(i,10));let s=l(t.style,e.style);s&&!(""+s).match(xi)&&(console.warn('Invalid font style specified: "'+s+'"'),s=void 0);const n={family:l(t.family,e.family),lineHeight:_i(l(t.lineHeight,e.lineHeight),i),size:i,style:s,weight:l(t.weight,e.weight),string:""};return n.string=De(n),n}function Pi(t,e,i,s){let o,a,r,l=!0;for(o=0,a=t.length;oi&&0===t?0:t+e;return{min:a(s,-Math.abs(o)),max:a(n,o)}}function Ci(t,e){return Object.assign(Object.create(t),e)}function Oi(t,e,i){return t?function(t,e){return{x:i=>t+t+e-i,setWidth(t){e=t},textAlign:t=>"center"===t?t:"right"===t?"left":"right",xPlus:(t,e)=>t-e,leftForLtr:(t,e)=>t-e}}(e,i):{x:t=>t,setWidth(t){},textAlign:t=>t,xPlus:(t,e)=>t+e,leftForLtr:(t,e)=>t}}function Ai(t,e){let i,s;"ltr"!==e&&"rtl"!==e||(i=t.canvas.style,s=[i.getPropertyValue("direction"),i.getPropertyPriority("direction")],i.setProperty("direction",e,"important"),t.prevTextDirection=s)}function Ti(t,e){void 0!==e&&(delete t.prevTextDirection,t.canvas.style.setProperty("direction",e[0],e[1]))}function Li(t){return"angle"===t?{between:Z,compare:K,normalize:G}:{between:tt,compare:(t,e)=>t-e,normalize:t=>t}}function Ei({start:t,end:e,count:i,loop:s,style:n}){return{start:t%i,end:e%i,loop:s&&(e-t+1)%i==0,style:n}}function Ri(t,e,i){if(!i)return[t];const{property:s,start:n,end:o}=i,a=e.length,{compare:r,between:l,normalize:h}=Li(s),{start:c,end:d,loop:u,style:f}=function(t,e,i){const{property:s,start:n,end:o}=i,{between:a,normalize:r}=Li(s),l=e.length;let h,c,{start:d,end:u,loop:f}=t;if(f){for(d+=l,u+=l,h=0,c=l;hx||l(n,b,p)&&0!==r(n,b),v=()=>!x||0===r(o,p)||l(o,b,p);for(let t=c,i=c;t<=d;++t)m=e[t%a],m.skip||(p=h(m[s]),p!==b&&(x=l(p,n,o),null===_&&y()&&(_=0===r(p,n)?t:i),null!==_&&v()&&(g.push(Ei({start:_,end:t,loop:u,count:a,style:f})),_=null),i=t,b=p));return null!==_&&g.push(Ei({start:_,end:d,loop:u,count:a,style:f})),g}function Ii(t,e){const i=[],s=t.segments;for(let n=0;nn&&t[o%e].skip;)o--;return o%=e,{start:n,end:o}}(i,n,o,s);if(!0===s)return Fi(t,[{start:a,end:r,loop:o}],i,e);return Fi(t,function(t,e,i,s){const n=t.length,o=[];let a,r=e,l=t[e];for(a=e+1;a<=i;++a){const i=t[a%n];i.skip||i.stop?l.skip||(s=!1,o.push({start:e%n,end:(a-1)%n,loop:s}),e=r=i.stop?a:null):(r=a,l.skip&&(e=a)),l=i}return null!==r&&o.push({start:e%n,end:r%n,loop:s}),o}(i,a,r{t[a](e[i],n)&&(o.push({element:t,datasetIndex:s,index:l}),r=r||t.inRange(e.x,e.y,n))})),s&&!r?[]:o}var Xi={evaluateInteractionItems:Hi,modes:{index(t,e,i,s){const n=ve(e,t),o=i.axis||"x",a=i.includeInvisible||!1,r=i.intersect?ji(t,n,o,s,a):Yi(t,n,o,!1,s,a),l=[];return r.length?(t.getSortedVisibleDatasetMetas().forEach((t=>{const e=r[0].index,i=t.data[e];i&&!i.skip&&l.push({element:i,datasetIndex:t.index,index:e})})),l):[]},dataset(t,e,i,s){const n=ve(e,t),o=i.axis||"xy",a=i.includeInvisible||!1;let r=i.intersect?ji(t,n,o,s,a):Yi(t,n,o,!1,s,a);if(r.length>0){const e=r[0].datasetIndex,i=t.getDatasetMeta(e).data;r=[];for(let t=0;tji(t,ve(e,t),i.axis||"xy",s,i.includeInvisible||!1),nearest(t,e,i,s){const n=ve(e,t),o=i.axis||"xy",a=i.includeInvisible||!1;return Yi(t,n,o,i.intersect,s,a)},x:(t,e,i,s)=>Ui(t,ve(e,t),"x",i.intersect,s),y:(t,e,i,s)=>Ui(t,ve(e,t),"y",i.intersect,s)}};const qi=["left","top","right","bottom"];function Ki(t,e){return t.filter((t=>t.pos===e))}function Gi(t,e){return t.filter((t=>-1===qi.indexOf(t.pos)&&t.box.axis===e))}function Zi(t,e){return t.sort(((t,i)=>{const s=e?i:t,n=e?t:i;return s.weight===n.weight?s.index-n.index:s.weight-n.weight}))}function Ji(t,e){const i=function(t){const e={};for(const i of t){const{stack:t,pos:s,stackWeight:n}=i;if(!t||!qi.includes(s))continue;const o=e[t]||(e[t]={count:0,placed:0,weight:0,size:0});o.count++,o.weight+=n}return e}(t),{vBoxMaxWidth:s,hBoxMaxHeight:n}=e;let o,a,r;for(o=0,a=t.length;o{s[t]=Math.max(e[t],i[t])})),s}return s(t?["left","right"]:["top","bottom"])}function ss(t,e,i,s){const n=[];let o,a,r,l,h,c;for(o=0,a=t.length,h=0;ot.box.fullSize)),!0),s=Zi(Ki(e,"left"),!0),n=Zi(Ki(e,"right")),o=Zi(Ki(e,"top"),!0),a=Zi(Ki(e,"bottom")),r=Gi(e,"x"),l=Gi(e,"y");return{fullSize:i,leftAndTop:s.concat(o),rightAndBottom:n.concat(l).concat(a).concat(r),chartArea:Ki(e,"chartArea"),vertical:s.concat(n).concat(l),horizontal:o.concat(a).concat(r)}}(t.boxes),l=r.vertical,h=r.horizontal;u(t.boxes,(t=>{"function"==typeof t.beforeLayout&&t.beforeLayout()}));const c=l.reduce(((t,e)=>e.box.options&&!1===e.box.options.display?t:t+1),0)||1,d=Object.freeze({outerWidth:e,outerHeight:i,padding:n,availableWidth:o,availableHeight:a,vBoxMaxWidth:o/2/c,hBoxMaxHeight:a/2}),f=Object.assign({},n);ts(f,ki(s));const g=Object.assign({maxPadding:f,w:o,h:a,x:n.left,y:n.top},n),p=Ji(l.concat(h),d);ss(r.fullSize,g,d,p),ss(l,g,d,p),ss(h,g,d,p)&&ss(l,g,d,p),function(t){const e=t.maxPadding;function i(i){const s=Math.max(e[i]-t[i],0);return t[i]+=s,s}t.y+=i("top"),t.x+=i("left"),i("right"),i("bottom")}(g),os(r.leftAndTop,g,d,p),g.x+=g.w,g.y+=g.h,os(r.rightAndBottom,g,d,p),t.chartArea={left:g.left,top:g.top,right:g.left+g.w,bottom:g.top+g.h,height:g.h,width:g.w},u(r.chartArea,(e=>{const i=e.box;Object.assign(i,t.chartArea),i.update(g.w,g.h,{left:0,top:0,right:0,bottom:0})}))}};class rs{acquireContext(t,e){}releaseContext(t){return!1}addEventListener(t,e,i){}removeEventListener(t,e,i){}getDevicePixelRatio(){return 1}getMaximumSize(t,e,i,s){return e=Math.max(0,e||t.width),i=i||t.height,{width:e,height:Math.max(0,s?Math.floor(e/s):i)}}isAttached(t){return!0}updateConfig(t){}}class ls extends rs{acquireContext(t){return t&&t.getContext&&t.getContext("2d")||null}updateConfig(t){t.options.animation=!1}}const hs="$chartjs",cs={touchstart:"mousedown",touchmove:"mousemove",touchend:"mouseup",pointerenter:"mouseenter",pointerdown:"mousedown",pointermove:"mousemove",pointerup:"mouseup",pointerleave:"mouseout",pointerout:"mouseout"},ds=t=>null===t||""===t;const us=!!Se&&{passive:!0};function fs(t,e,i){t.canvas.removeEventListener(e,i,us)}function gs(t,e){for(const i of t)if(i===e||i.contains(e))return!0}function ps(t,e,i){const s=t.canvas,n=new MutationObserver((t=>{let e=!1;for(const i of t)e=e||gs(i.addedNodes,s),e=e&&!gs(i.removedNodes,s);e&&i()}));return n.observe(document,{childList:!0,subtree:!0}),n}function ms(t,e,i){const s=t.canvas,n=new MutationObserver((t=>{let e=!1;for(const i of t)e=e||gs(i.removedNodes,s),e=e&&!gs(i.addedNodes,s);e&&i()}));return n.observe(document,{childList:!0,subtree:!0}),n}const bs=new Map;let xs=0;function _s(){const t=window.devicePixelRatio;t!==xs&&(xs=t,bs.forEach(((e,i)=>{i.currentDevicePixelRatio!==t&&e()})))}function ys(t,e,i){const s=t.canvas,n=s&&ge(s);if(!n)return;const o=ct(((t,e)=>{const s=n.clientWidth;i(t,e),s{const e=t[0],i=e.contentRect.width,s=e.contentRect.height;0===i&&0===s||o(i,s)}));return a.observe(n),function(t,e){bs.size||window.addEventListener("resize",_s),bs.set(t,e)}(t,o),a}function vs(t,e,i){i&&i.disconnect(),"resize"===e&&function(t){bs.delete(t),bs.size||window.removeEventListener("resize",_s)}(t)}function Ms(t,e,i){const s=t.canvas,n=ct((e=>{null!==t.ctx&&i(function(t,e){const i=cs[t.type]||t.type,{x:s,y:n}=ve(t,e);return{type:i,chart:e,native:t,x:void 0!==s?s:null,y:void 0!==n?n:null}}(e,t))}),t);return function(t,e,i){t.addEventListener(e,i,us)}(s,e,n),n}class ws extends rs{acquireContext(t,e){const i=t&&t.getContext&&t.getContext("2d");return i&&i.canvas===t?(function(t,e){const i=t.style,s=t.getAttribute("height"),n=t.getAttribute("width");if(t[hs]={initial:{height:s,width:n,style:{display:i.display,height:i.height,width:i.width}}},i.display=i.display||"block",i.boxSizing=i.boxSizing||"border-box",ds(n)){const e=Pe(t,"width");void 0!==e&&(t.width=e)}if(ds(s))if(""===t.style.height)t.height=t.width/(e||2);else{const e=Pe(t,"height");void 0!==e&&(t.height=e)}}(t,e),i):null}releaseContext(t){const e=t.canvas;if(!e[hs])return!1;const i=e[hs].initial;["height","width"].forEach((t=>{const n=i[t];s(n)?e.removeAttribute(t):e.setAttribute(t,n)}));const n=i.style||{};return Object.keys(n).forEach((t=>{e.style[t]=n[t]})),e.width=e.width,delete e[hs],!0}addEventListener(t,e,i){this.removeEventListener(t,e);const s=t.$proxies||(t.$proxies={}),n={attach:ps,detach:ms,resize:ys}[e]||Ms;s[e]=n(t,e,i)}removeEventListener(t,e){const i=t.$proxies||(t.$proxies={}),s=i[e];if(!s)return;({attach:vs,detach:vs,resize:vs}[e]||fs)(t,e,s),i[e]=void 0}getDevicePixelRatio(){return window.devicePixelRatio}getMaximumSize(t,e,i,s){return we(t,e,i,s)}isAttached(t){const e=ge(t);return!(!e||!e.isConnected)}}function ks(t){return!fe()||"undefined"!=typeof OffscreenCanvas&&t instanceof OffscreenCanvas?ls:ws}var Ss=Object.freeze({__proto__:null,BasePlatform:rs,BasicPlatform:ls,DomPlatform:ws,_detectPlatform:ks});const Ps="transparent",Ds={boolean:(t,e,i)=>i>.5?e:t,color(t,e,i){const s=Qt(t||Ps),n=s.valid&&Qt(e||Ps);return n&&n.valid?n.mix(s,i).hexString():e},number:(t,e,i)=>t+(e-t)*i};class Cs{constructor(t,e,i,s){const n=e[i];s=Pi([t.to,s,n,t.from]);const o=Pi([t.from,n,s]);this._active=!0,this._fn=t.fn||Ds[t.type||typeof o],this._easing=fi[t.easing]||fi.linear,this._start=Math.floor(Date.now()+(t.delay||0)),this._duration=this._total=Math.floor(t.duration),this._loop=!!t.loop,this._target=e,this._prop=i,this._from=o,this._to=s,this._promises=void 0}active(){return this._active}update(t,e,i){if(this._active){this._notify(!1);const s=this._target[this._prop],n=i-this._start,o=this._duration-n;this._start=i,this._duration=Math.floor(Math.max(o,t.duration)),this._total+=n,this._loop=!!t.loop,this._to=Pi([t.to,e,s,t.from]),this._from=Pi([t.from,s,e])}}cancel(){this._active&&(this.tick(Date.now()),this._active=!1,this._notify(!1))}tick(t){const e=t-this._start,i=this._duration,s=this._prop,n=this._from,o=this._loop,a=this._to;let r;if(this._active=n!==a&&(o||e1?2-r:r,r=this._easing(Math.min(1,Math.max(0,r))),this._target[s]=this._fn(n,a,r))}wait(){const t=this._promises||(this._promises=[]);return new Promise(((e,i)=>{t.push({res:e,rej:i})}))}_notify(t){const e=t?"res":"rej",i=this._promises||[];for(let t=0;t{const a=t[s];if(!o(a))return;const r={};for(const t of e)r[t]=a[t];(n(a.properties)&&a.properties||[s]).forEach((t=>{t!==s&&i.has(t)||i.set(t,r)}))}))}_animateOptions(t,e){const i=e.options,s=function(t,e){if(!e)return;let i=t.options;if(!i)return void(t.options=e);i.$shared&&(t.options=i=Object.assign({},i,{$shared:!1,$animations:{}}));return i}(t,i);if(!s)return[];const n=this._createAnimations(s,i);return i.$shared&&function(t,e){const i=[],s=Object.keys(e);for(let e=0;e{t.options=i}),(()=>{})),n}_createAnimations(t,e){const i=this._properties,s=[],n=t.$animations||(t.$animations={}),o=Object.keys(e),a=Date.now();let r;for(r=o.length-1;r>=0;--r){const l=o[r];if("$"===l.charAt(0))continue;if("options"===l){s.push(...this._animateOptions(t,e));continue}const h=e[l];let c=n[l];const d=i.get(l);if(c){if(d&&c.active()){c.update(d,h,a);continue}c.cancel()}d&&d.duration?(n[l]=c=new Cs(d,t,l,h),s.push(c)):t[l]=h}return s}update(t,e){if(0===this._properties.size)return void Object.assign(t,e);const i=this._createAnimations(t,e);return i.length?(xt.add(this._chart,i),!0):void 0}}function As(t,e){const i=t&&t.options||{},s=i.reverse,n=void 0===i.min?e:0,o=void 0===i.max?e:0;return{start:s?o:n,end:s?n:o}}function Ts(t,e){const i=[],s=t._getSortedDatasetMetas(e);let n,o;for(n=0,o=s.length;n0||!i&&e<0)return n.index}return null}function zs(t,e){const{chart:i,_cachedMeta:s}=t,n=i._stacks||(i._stacks={}),{iScale:o,vScale:a,index:r}=s,l=o.axis,h=a.axis,c=function(t,e,i){return`${t.id}.${e.id}.${i.stack||i.type}`}(o,a,s),d=e.length;let u;for(let t=0;ti[t].axis===e)).shift()}function Vs(t,e){const i=t.controller.index,s=t.vScale&&t.vScale.axis;if(s){e=e||t._parsed;for(const t of e){const e=t._stacks;if(!e||void 0===e[s]||void 0===e[s][i])return;delete e[s][i],void 0!==e[s]._visualValues&&void 0!==e[s]._visualValues[i]&&delete e[s]._visualValues[i]}}}const Bs=t=>"reset"===t||"none"===t,Ws=(t,e)=>e?t:Object.assign({},t);class Ns{static defaults={};static datasetElementType=null;static dataElementType=null;constructor(t,e){this.chart=t,this._ctx=t.ctx,this.index=e,this._cachedDataOpts={},this._cachedMeta=this.getMeta(),this._type=this._cachedMeta.type,this.options=void 0,this._parsing=!1,this._data=void 0,this._objectData=void 0,this._sharedOptions=void 0,this._drawStart=void 0,this._drawCount=void 0,this.enableOptionSharing=!1,this.supportsDecimation=!1,this.$context=void 0,this._syncList=[],this.datasetElementType=new.target.datasetElementType,this.dataElementType=new.target.dataElementType,this.initialize()}initialize(){const t=this._cachedMeta;this.configure(),this.linkScales(),t._stacked=Es(t.vScale,t),this.addElements(),this.options.fill&&!this.chart.isPluginEnabled("filler")&&console.warn("Tried to use the 'fill' option without the 'Filler' plugin enabled. Please import and register the 'Filler' plugin and make sure it is not disabled in the options")}updateIndex(t){this.index!==t&&Vs(this._cachedMeta),this.index=t}linkScales(){const t=this.chart,e=this._cachedMeta,i=this.getDataset(),s=(t,e,i,s)=>"x"===t?e:"r"===t?s:i,n=e.xAxisID=l(i.xAxisID,Fs(t,"x")),o=e.yAxisID=l(i.yAxisID,Fs(t,"y")),a=e.rAxisID=l(i.rAxisID,Fs(t,"r")),r=e.indexAxis,h=e.iAxisID=s(r,n,o,a),c=e.vAxisID=s(r,o,n,a);e.xScale=this.getScaleForId(n),e.yScale=this.getScaleForId(o),e.rScale=this.getScaleForId(a),e.iScale=this.getScaleForId(h),e.vScale=this.getScaleForId(c)}getDataset(){return this.chart.data.datasets[this.index]}getMeta(){return this.chart.getDatasetMeta(this.index)}getScaleForId(t){return this.chart.scales[t]}_getOtherScale(t){const e=this._cachedMeta;return t===e.iScale?e.vScale:e.iScale}reset(){this._update("reset")}_destroy(){const t=this._cachedMeta;this._data&&rt(this._data,this),t._stacked&&Vs(t)}_dataCheck(){const t=this.getDataset(),e=t.data||(t.data=[]),i=this._data;if(o(e))this._data=function(t){const e=Object.keys(t),i=new Array(e.length);let s,n,o;for(s=0,n=e.length;s0&&i._parsed[t-1];if(!1===this._parsing)i._parsed=s,i._sorted=!0,d=s;else{d=n(s[t])?this.parseArrayData(i,s,t,e):o(s[t])?this.parseObjectData(i,s,t,e):this.parsePrimitiveData(i,s,t,e);const a=()=>null===c[l]||f&&c[l]t&&!e.hidden&&e._stacked&&{keys:Ts(i,!0),values:null})(e,i,this.chart),h={min:Number.POSITIVE_INFINITY,max:Number.NEGATIVE_INFINITY},{min:c,max:d}=function(t){const{min:e,max:i,minDefined:s,maxDefined:n}=t.getUserBounds();return{min:s?e:Number.NEGATIVE_INFINITY,max:n?i:Number.POSITIVE_INFINITY}}(r);let u,f;function g(){f=s[u];const e=f[r.axis];return!a(f[t.axis])||c>e||d=0;--u)if(!g()){this.updateRangeFromParsed(h,t,f,l);break}return h}getAllParsedValues(t){const e=this._cachedMeta._parsed,i=[];let s,n,o;for(s=0,n=e.length;s=0&&tthis.getContext(i,s,e)),c);return f.$shared&&(f.$shared=r,n[o]=Object.freeze(Ws(f,r))),f}_resolveAnimations(t,e,i){const s=this.chart,n=this._cachedDataOpts,o=`animation-${e}`,a=n[o];if(a)return a;let r;if(!1!==s.options.animation){const s=this.chart.config,n=s.datasetAnimationScopeKeys(this._type,e),o=s.getOptionScopes(this.getDataset(),n);r=s.createResolver(o,this.getContext(t,i,e))}const l=new Os(s,r&&r.animations);return r&&r._cacheable&&(n[o]=Object.freeze(l)),l}getSharedOptions(t){if(t.$shared)return this._sharedOptions||(this._sharedOptions=Object.assign({},t))}includeOptions(t,e){return!e||Bs(t)||this.chart._animationsDisabled}_getSharedOptions(t,e){const i=this.resolveDataElementOptions(t,e),s=this._sharedOptions,n=this.getSharedOptions(i),o=this.includeOptions(e,n)||n!==s;return this.updateSharedOptions(n,e,i),{sharedOptions:n,includeOptions:o}}updateElement(t,e,i,s){Bs(s)?Object.assign(t,i):this._resolveAnimations(e,s).update(t,i)}updateSharedOptions(t,e,i){t&&!Bs(e)&&this._resolveAnimations(void 0,e).update(t,i)}_setStyle(t,e,i,s){t.active=s;const n=this.getStyle(e,s);this._resolveAnimations(e,i,s).update(t,{options:!s&&this.getSharedOptions(n)||n})}removeHoverStyle(t,e,i){this._setStyle(t,i,"active",!1)}setHoverStyle(t,e,i){this._setStyle(t,i,"active",!0)}_removeDatasetHoverStyle(){const t=this._cachedMeta.dataset;t&&this._setStyle(t,void 0,"active",!1)}_setDatasetHoverStyle(){const t=this._cachedMeta.dataset;t&&this._setStyle(t,void 0,"active",!0)}_resyncElements(t){const e=this._data,i=this._cachedMeta.data;for(const[t,e,i]of this._syncList)this[t](e,i);this._syncList=[];const s=i.length,n=e.length,o=Math.min(n,s);o&&this.parse(0,o),n>s?this._insertElements(s,n-s,t):n{for(t.length+=e,a=t.length-1;a>=o;a--)t[a]=t[a-e]};for(r(n),a=t;a{s[t]=i[t]&&i[t].active()?i[t]._to:this[t]})),s}}function js(t,e){const i=t.options.ticks,n=function(t){const e=t.options.offset,i=t._tickSize(),s=t._length/i+(e?0:1),n=t._maxLength/i;return Math.floor(Math.min(s,n))}(t),o=Math.min(i.maxTicksLimit||n,n),a=i.major.enabled?function(t){const e=[];let i,s;for(i=0,s=t.length;io)return function(t,e,i,s){let n,o=0,a=i[0];for(s=Math.ceil(s),n=0;nn)return e}return Math.max(n,1)}(a,e,o);if(r>0){let t,i;const n=r>1?Math.round((h-l)/(r-1)):null;for($s(e,c,d,s(n)?0:l-n,l),t=0,i=r-1;t"top"===e||"left"===e?t[e]+i:t[e]-i,Us=(t,e)=>Math.min(e||t,t);function Xs(t,e){const i=[],s=t.length/e,n=t.length;let o=0;for(;oa+r)))return h}function Ks(t){return t.drawTicks?t.tickLength:0}function Gs(t,e){if(!t.display)return 0;const i=Si(t.font,e),s=ki(t.padding);return(n(t.text)?t.text.length:1)*i.lineHeight+s.height}function Zs(t,e,i){let s=ut(t);return(i&&"right"!==e||!i&&"right"===e)&&(s=(t=>"left"===t?"right":"right"===t?"left":t)(s)),s}class Js extends Hs{constructor(t){super(),this.id=t.id,this.type=t.type,this.options=void 0,this.ctx=t.ctx,this.chart=t.chart,this.top=void 0,this.bottom=void 0,this.left=void 0,this.right=void 0,this.width=void 0,this.height=void 0,this._margins={left:0,right:0,top:0,bottom:0},this.maxWidth=void 0,this.maxHeight=void 0,this.paddingTop=void 0,this.paddingBottom=void 0,this.paddingLeft=void 0,this.paddingRight=void 0,this.axis=void 0,this.labelRotation=void 0,this.min=void 0,this.max=void 0,this._range=void 0,this.ticks=[],this._gridLineItems=null,this._labelItems=null,this._labelSizes=null,this._length=0,this._maxLength=0,this._longestTextCache={},this._startPixel=void 0,this._endPixel=void 0,this._reversePixels=!1,this._userMax=void 0,this._userMin=void 0,this._suggestedMax=void 0,this._suggestedMin=void 0,this._ticksLength=0,this._borderValue=0,this._cache={},this._dataLimitsCached=!1,this.$context=void 0}init(t){this.options=t.setContext(this.getContext()),this.axis=t.axis,this._userMin=this.parse(t.min),this._userMax=this.parse(t.max),this._suggestedMin=this.parse(t.suggestedMin),this._suggestedMax=this.parse(t.suggestedMax)}parse(t,e){return t}getUserBounds(){let{_userMin:t,_userMax:e,_suggestedMin:i,_suggestedMax:s}=this;return t=r(t,Number.POSITIVE_INFINITY),e=r(e,Number.NEGATIVE_INFINITY),i=r(i,Number.POSITIVE_INFINITY),s=r(s,Number.NEGATIVE_INFINITY),{min:r(t,i),max:r(e,s),minDefined:a(t),maxDefined:a(e)}}getMinMax(t){let e,{min:i,max:s,minDefined:n,maxDefined:o}=this.getUserBounds();if(n&&o)return{min:i,max:s};const a=this.getMatchingVisibleMetas();for(let r=0,l=a.length;rs?s:i,s=n&&i>s?i:s,{min:r(i,r(s,i)),max:r(s,r(i,s))}}getPadding(){return{left:this.paddingLeft||0,top:this.paddingTop||0,right:this.paddingRight||0,bottom:this.paddingBottom||0}}getTicks(){return this.ticks}getLabels(){const t=this.chart.data;return this.options.labels||(this.isHorizontal()?t.xLabels:t.yLabels)||t.labels||[]}getLabelItems(t=this.chart.chartArea){return this._labelItems||(this._labelItems=this._computeLabelItems(t))}beforeLayout(){this._cache={},this._dataLimitsCached=!1}beforeUpdate(){d(this.options.beforeUpdate,[this])}update(t,e,i){const{beginAtZero:s,grace:n,ticks:o}=this.options,a=o.sampleSize;this.beforeUpdate(),this.maxWidth=t,this.maxHeight=e,this._margins=i=Object.assign({left:0,right:0,top:0,bottom:0},i),this.ticks=null,this._labelSizes=null,this._gridLineItems=null,this._labelItems=null,this.beforeSetDimensions(),this.setDimensions(),this.afterSetDimensions(),this._maxLength=this.isHorizontal()?this.width+i.left+i.right:this.height+i.top+i.bottom,this._dataLimitsCached||(this.beforeDataLimits(),this.determineDataLimits(),this.afterDataLimits(),this._range=Di(this,n,s),this._dataLimitsCached=!0),this.beforeBuildTicks(),this.ticks=this.buildTicks()||[],this.afterBuildTicks();const r=a=n||i<=1||!this.isHorizontal())return void(this.labelRotation=s);const h=this._getLabelSizes(),c=h.widest.width,d=h.highest.height,u=J(this.chart.width-c,0,this.maxWidth);o=t.offset?this.maxWidth/i:u/(i-1),c+6>o&&(o=u/(i-(t.offset?.5:1)),a=this.maxHeight-Ks(t.grid)-e.padding-Gs(t.title,this.chart.options.font),r=Math.sqrt(c*c+d*d),l=Y(Math.min(Math.asin(J((h.highest.height+6)/o,-1,1)),Math.asin(J(a/r,-1,1))-Math.asin(J(d/r,-1,1)))),l=Math.max(s,Math.min(n,l))),this.labelRotation=l}afterCalculateLabelRotation(){d(this.options.afterCalculateLabelRotation,[this])}afterAutoSkip(){}beforeFit(){d(this.options.beforeFit,[this])}fit(){const t={width:0,height:0},{chart:e,options:{ticks:i,title:s,grid:n}}=this,o=this._isVisible(),a=this.isHorizontal();if(o){const o=Gs(s,e.options.font);if(a?(t.width=this.maxWidth,t.height=Ks(n)+o):(t.height=this.maxHeight,t.width=Ks(n)+o),i.display&&this.ticks.length){const{first:e,last:s,widest:n,highest:o}=this._getLabelSizes(),r=2*i.padding,l=$(this.labelRotation),h=Math.cos(l),c=Math.sin(l);if(a){const e=i.mirror?0:c*n.width+h*o.height;t.height=Math.min(this.maxHeight,t.height+e+r)}else{const e=i.mirror?0:h*n.width+c*o.height;t.width=Math.min(this.maxWidth,t.width+e+r)}this._calculatePadding(e,s,c,h)}}this._handleMargins(),a?(this.width=this._length=e.width-this._margins.left-this._margins.right,this.height=t.height):(this.width=t.width,this.height=this._length=e.height-this._margins.top-this._margins.bottom)}_calculatePadding(t,e,i,s){const{ticks:{align:n,padding:o},position:a}=this.options,r=0!==this.labelRotation,l="top"!==a&&"x"===this.axis;if(this.isHorizontal()){const a=this.getPixelForTick(0)-this.left,h=this.right-this.getPixelForTick(this.ticks.length-1);let c=0,d=0;r?l?(c=s*t.width,d=i*e.height):(c=i*t.height,d=s*e.width):"start"===n?d=e.width:"end"===n?c=t.width:"inner"!==n&&(c=t.width/2,d=e.width/2),this.paddingLeft=Math.max((c-a+o)*this.width/(this.width-a),0),this.paddingRight=Math.max((d-h+o)*this.width/(this.width-h),0)}else{let i=e.height/2,s=t.height/2;"start"===n?(i=0,s=t.height):"end"===n&&(i=e.height,s=0),this.paddingTop=i+o,this.paddingBottom=s+o}}_handleMargins(){this._margins&&(this._margins.left=Math.max(this.paddingLeft,this._margins.left),this._margins.top=Math.max(this.paddingTop,this._margins.top),this._margins.right=Math.max(this.paddingRight,this._margins.right),this._margins.bottom=Math.max(this.paddingBottom,this._margins.bottom))}afterFit(){d(this.options.afterFit,[this])}isHorizontal(){const{axis:t,position:e}=this.options;return"top"===e||"bottom"===e||"x"===t}isFullSize(){return this.options.fullSize}_convertTicksToLabels(t){let e,i;for(this.beforeTickToLabelConversion(),this.generateTickLabels(t),e=0,i=t.length;e{const i=t.gc,s=i.length/2;let n;if(s>e){for(n=0;n({width:r[t]||0,height:l[t]||0});return{first:P(0),last:P(e-1),widest:P(k),highest:P(S),widths:r,heights:l}}getLabelForValue(t){return t}getPixelForValue(t,e){return NaN}getValueForPixel(t){}getPixelForTick(t){const e=this.ticks;return t<0||t>e.length-1?null:this.getPixelForValue(e[t].value)}getPixelForDecimal(t){this._reversePixels&&(t=1-t);const e=this._startPixel+t*this._length;return Q(this._alignToPixels?Ae(this.chart,e,0):e)}getDecimalForPixel(t){const e=(t-this._startPixel)/this._length;return this._reversePixels?1-e:e}getBasePixel(){return this.getPixelForValue(this.getBaseValue())}getBaseValue(){const{min:t,max:e}=this;return t<0&&e<0?e:t>0&&e>0?t:0}getContext(t){const e=this.ticks||[];if(t>=0&&ta*s?a/i:r/s:r*s0}_computeGridLineItems(t){const e=this.axis,i=this.chart,s=this.options,{grid:n,position:a,border:r}=s,h=n.offset,c=this.isHorizontal(),d=this.ticks.length+(h?1:0),u=Ks(n),f=[],g=r.setContext(this.getContext()),p=g.display?g.width:0,m=p/2,b=function(t){return Ae(i,t,p)};let x,_,y,v,M,w,k,S,P,D,C,O;if("top"===a)x=b(this.bottom),w=this.bottom-u,S=x-m,D=b(t.top)+m,O=t.bottom;else if("bottom"===a)x=b(this.top),D=t.top,O=b(t.bottom)-m,w=x+m,S=this.top+u;else if("left"===a)x=b(this.right),M=this.right-u,k=x-m,P=b(t.left)+m,C=t.right;else if("right"===a)x=b(this.left),P=t.left,C=b(t.right)-m,M=x+m,k=this.left+u;else if("x"===e){if("center"===a)x=b((t.top+t.bottom)/2+.5);else if(o(a)){const t=Object.keys(a)[0],e=a[t];x=b(this.chart.scales[t].getPixelForValue(e))}D=t.top,O=t.bottom,w=x+m,S=w+u}else if("y"===e){if("center"===a)x=b((t.left+t.right)/2);else if(o(a)){const t=Object.keys(a)[0],e=a[t];x=b(this.chart.scales[t].getPixelForValue(e))}M=x-m,k=M-u,P=t.left,C=t.right}const A=l(s.ticks.maxTicksLimit,d),T=Math.max(1,Math.ceil(d/A));for(_=0;_e.value===t));if(i>=0){return e.setContext(this.getContext(i)).lineWidth}return 0}drawGrid(t){const e=this.options.grid,i=this.ctx,s=this._gridLineItems||(this._gridLineItems=this._computeGridLineItems(t));let n,o;const a=(t,e,s)=>{s.width&&s.color&&(i.save(),i.lineWidth=s.width,i.strokeStyle=s.color,i.setLineDash(s.borderDash||[]),i.lineDashOffset=s.borderDashOffset,i.beginPath(),i.moveTo(t.x,t.y),i.lineTo(e.x,e.y),i.stroke(),i.restore())};if(e.display)for(n=0,o=s.length;n{this.drawBackground(),this.drawGrid(t),this.drawTitle()}},{z:s,draw:()=>{this.drawBorder()}},{z:e,draw:t=>{this.drawLabels(t)}}]:[{z:e,draw:t=>{this.draw(t)}}]}getMatchingVisibleMetas(t){const e=this.chart.getSortedVisibleDatasetMetas(),i=this.axis+"AxisID",s=[];let n,o;for(n=0,o=e.length;n{const s=i.split("."),n=s.pop(),o=[t].concat(s).join("."),a=e[i].split("."),r=a.pop(),l=a.join(".");ue.route(o,n,l,r)}))}(e,t.defaultRoutes);t.descriptors&&ue.describe(e,t.descriptors)}(t,o,i),this.override&&ue.override(t.id,t.overrides)),o}get(t){return this.items[t]}unregister(t){const e=this.items,i=t.id,s=this.scope;i in e&&delete e[i],s&&i in ue[s]&&(delete ue[s][i],this.override&&delete re[i])}}class tn{constructor(){this.controllers=new Qs(Ns,"datasets",!0),this.elements=new Qs(Hs,"elements"),this.plugins=new Qs(Object,"plugins"),this.scales=new Qs(Js,"scales"),this._typedRegistries=[this.controllers,this.scales,this.elements]}add(...t){this._each("register",t)}remove(...t){this._each("unregister",t)}addControllers(...t){this._each("register",t,this.controllers)}addElements(...t){this._each("register",t,this.elements)}addPlugins(...t){this._each("register",t,this.plugins)}addScales(...t){this._each("register",t,this.scales)}getController(t){return this._get(t,this.controllers,"controller")}getElement(t){return this._get(t,this.elements,"element")}getPlugin(t){return this._get(t,this.plugins,"plugin")}getScale(t){return this._get(t,this.scales,"scale")}removeControllers(...t){this._each("unregister",t,this.controllers)}removeElements(...t){this._each("unregister",t,this.elements)}removePlugins(...t){this._each("unregister",t,this.plugins)}removeScales(...t){this._each("unregister",t,this.scales)}_each(t,e,i){[...e].forEach((e=>{const s=i||this._getRegistryForType(e);i||s.isForType(e)||s===this.plugins&&e.id?this._exec(t,s,e):u(e,(e=>{const s=i||this._getRegistryForType(e);this._exec(t,s,e)}))}))}_exec(t,e,i){const s=w(t);d(i["before"+s],[],i),e[t](i),d(i["after"+s],[],i)}_getRegistryForType(t){for(let e=0;et.filter((t=>!e.some((e=>t.plugin.id===e.plugin.id))));this._notify(s(e,i),t,"stop"),this._notify(s(i,e),t,"start")}}function nn(t,e){return e||!1!==t?!0===t?{}:t:null}function on(t,{plugin:e,local:i},s,n){const o=t.pluginScopeKeys(e),a=t.getOptionScopes(s,o);return i&&e.defaults&&a.push(e.defaults),t.createResolver(a,n,[""],{scriptable:!1,indexable:!1,allKeys:!0})}function an(t,e){const i=ue.datasets[t]||{};return((e.datasets||{})[t]||{}).indexAxis||e.indexAxis||i.indexAxis||"x"}function rn(t){if("x"===t||"y"===t||"r"===t)return t}function ln(t,...e){if(rn(t))return t;for(const s of e){const e=s.axis||("top"===(i=s.position)||"bottom"===i?"x":"left"===i||"right"===i?"y":void 0)||t.length>1&&rn(t[0].toLowerCase());if(e)return e}var i;throw new Error(`Cannot determine type of '${t}' axis. Please provide 'axis' or 'position' option.`)}function hn(t,e,i){if(i[e+"AxisID"]===t)return{axis:e}}function cn(t,e){const i=re[t.type]||{scales:{}},s=e.scales||{},n=an(t.type,e),a=Object.create(null);return Object.keys(s).forEach((e=>{const r=s[e];if(!o(r))return console.error(`Invalid scale configuration for scale: ${e}`);if(r._proxy)return console.warn(`Ignoring resolver passed as options for scale: ${e}`);const l=ln(e,r,function(t,e){if(e.data&&e.data.datasets){const i=e.data.datasets.filter((e=>e.xAxisID===t||e.yAxisID===t));if(i.length)return hn(t,"x",i[0])||hn(t,"y",i[0])}return{}}(e,t),ue.scales[r.type]),h=function(t,e){return t===e?"_index_":"_value_"}(l,n),c=i.scales||{};a[e]=x(Object.create(null),[{axis:l},r,c[l],c[h]])})),t.data.datasets.forEach((i=>{const n=i.type||t.type,o=i.indexAxis||an(n,e),r=(re[n]||{}).scales||{};Object.keys(r).forEach((t=>{const e=function(t,e){let i=t;return"_index_"===t?i=e:"_value_"===t&&(i="x"===e?"y":"x"),i}(t,o),n=i[e+"AxisID"]||e;a[n]=a[n]||Object.create(null),x(a[n],[{axis:e},s[n],r[t]])}))})),Object.keys(a).forEach((t=>{const e=a[t];x(e,[ue.scales[e.type],ue.scale])})),a}function dn(t){const e=t.options||(t.options={});e.plugins=l(e.plugins,{}),e.scales=cn(t,e)}function un(t){return(t=t||{}).datasets=t.datasets||[],t.labels=t.labels||[],t}const fn=new Map,gn=new Set;function pn(t,e){let i=fn.get(t);return i||(i=e(),fn.set(t,i),gn.add(i)),i}const mn=(t,e,i)=>{const s=M(e,i);void 0!==s&&t.add(s)};class bn{constructor(t){this._config=function(t){return(t=t||{}).data=un(t.data),dn(t),t}(t),this._scopeCache=new Map,this._resolverCache=new Map}get platform(){return this._config.platform}get type(){return this._config.type}set type(t){this._config.type=t}get data(){return this._config.data}set data(t){this._config.data=un(t)}get options(){return this._config.options}set options(t){this._config.options=t}get plugins(){return this._config.plugins}update(){const t=this._config;this.clearCache(),dn(t)}clearCache(){this._scopeCache.clear(),this._resolverCache.clear()}datasetScopeKeys(t){return pn(t,(()=>[[`datasets.${t}`,""]]))}datasetAnimationScopeKeys(t,e){return pn(`${t}.transition.${e}`,(()=>[[`datasets.${t}.transitions.${e}`,`transitions.${e}`],[`datasets.${t}`,""]]))}datasetElementScopeKeys(t,e){return pn(`${t}-${e}`,(()=>[[`datasets.${t}.elements.${e}`,`datasets.${t}`,`elements.${e}`,""]]))}pluginScopeKeys(t){const e=t.id;return pn(`${this.type}-plugin-${e}`,(()=>[[`plugins.${e}`,...t.additionalOptionScopes||[]]]))}_cachedScopes(t,e){const i=this._scopeCache;let s=i.get(t);return s&&!e||(s=new Map,i.set(t,s)),s}getOptionScopes(t,e,i){const{options:s,type:n}=this,o=this._cachedScopes(t,i),a=o.get(e);if(a)return a;const r=new Set;e.forEach((e=>{t&&(r.add(t),e.forEach((e=>mn(r,t,e)))),e.forEach((t=>mn(r,s,t))),e.forEach((t=>mn(r,re[n]||{},t))),e.forEach((t=>mn(r,ue,t))),e.forEach((t=>mn(r,le,t)))}));const l=Array.from(r);return 0===l.length&&l.push(Object.create(null)),gn.has(e)&&o.set(e,l),l}chartOptionScopes(){const{options:t,type:e}=this;return[t,re[e]||{},ue.datasets[e]||{},{type:e},ue,le]}resolveNamedOptions(t,e,i,s=[""]){const o={$shared:!0},{resolver:a,subPrefixes:r}=xn(this._resolverCache,t,s);let l=a;if(function(t,e){const{isScriptable:i,isIndexable:s}=Ye(t);for(const o of e){const e=i(o),a=s(o),r=(a||e)&&t[o];if(e&&(S(r)||_n(r))||a&&n(r))return!0}return!1}(a,e)){o.$shared=!1;l=$e(a,i=S(i)?i():i,this.createResolver(t,i,r))}for(const t of e)o[t]=l[t];return o}createResolver(t,e,i=[""],s){const{resolver:n}=xn(this._resolverCache,t,i);return o(e)?$e(n,e,void 0,s):n}}function xn(t,e,i){let s=t.get(e);s||(s=new Map,t.set(e,s));const n=i.join();let o=s.get(n);if(!o){o={resolver:je(e,i),subPrefixes:i.filter((t=>!t.toLowerCase().includes("hover")))},s.set(n,o)}return o}const _n=t=>o(t)&&Object.getOwnPropertyNames(t).reduce(((e,i)=>e||S(t[i])),!1);const yn=["top","bottom","left","right","chartArea"];function vn(t,e){return"top"===t||"bottom"===t||-1===yn.indexOf(t)&&"x"===e}function Mn(t,e){return function(i,s){return i[t]===s[t]?i[e]-s[e]:i[t]-s[t]}}function wn(t){const e=t.chart,i=e.options.animation;e.notifyPlugins("afterRender"),d(i&&i.onComplete,[t],e)}function kn(t){const e=t.chart,i=e.options.animation;d(i&&i.onProgress,[t],e)}function Sn(t){return fe()&&"string"==typeof t?t=document.getElementById(t):t&&t.length&&(t=t[0]),t&&t.canvas&&(t=t.canvas),t}const Pn={},Dn=t=>{const e=Sn(t);return Object.values(Pn).filter((t=>t.canvas===e)).pop()};function Cn(t,e,i){const s=Object.keys(t);for(const n of s){const s=+n;if(s>=e){const o=t[n];delete t[n],(i>0||s>e)&&(t[s+i]=o)}}}function On(t,e,i){return t.options.clip?t[i]:e[i]}class An{static defaults=ue;static instances=Pn;static overrides=re;static registry=en;static version="4.4.0";static getChart=Dn;static register(...t){en.add(...t),Tn()}static unregister(...t){en.remove(...t),Tn()}constructor(t,e){const s=this.config=new bn(e),n=Sn(t),o=Dn(n);if(o)throw new Error("Canvas is already in use. Chart with ID '"+o.id+"' must be destroyed before the canvas with ID '"+o.canvas.id+"' can be reused.");const a=s.createResolver(s.chartOptionScopes(),this.getContext());this.platform=new(s.platform||ks(n)),this.platform.updateConfig(s);const r=this.platform.acquireContext(n,a.aspectRatio),l=r&&r.canvas,h=l&&l.height,c=l&&l.width;this.id=i(),this.ctx=r,this.canvas=l,this.width=c,this.height=h,this._options=a,this._aspectRatio=this.aspectRatio,this._layers=[],this._metasets=[],this._stacks=void 0,this.boxes=[],this.currentDevicePixelRatio=void 0,this.chartArea=void 0,this._active=[],this._lastEvent=void 0,this._listeners={},this._responsiveListeners=void 0,this._sortedMetasets=[],this.scales={},this._plugins=new sn,this.$proxies={},this._hiddenIndices={},this.attached=!1,this._animationsDisabled=void 0,this.$context=void 0,this._doResize=dt((t=>this.update(t)),a.resizeDelay||0),this._dataChanges=[],Pn[this.id]=this,r&&l?(xt.listen(this,"complete",wn),xt.listen(this,"progress",kn),this._initialize(),this.attached&&this.update()):console.error("Failed to create chart: can't acquire context from the given item")}get aspectRatio(){const{options:{aspectRatio:t,maintainAspectRatio:e},width:i,height:n,_aspectRatio:o}=this;return s(t)?e&&o?o:n?i/n:null:t}get data(){return this.config.data}set data(t){this.config.data=t}get options(){return this._options}set options(t){this.config.options=t}get registry(){return en}_initialize(){return this.notifyPlugins("beforeInit"),this.options.responsive?this.resize():ke(this,this.options.devicePixelRatio),this.bindEvents(),this.notifyPlugins("afterInit"),this}clear(){return Te(this.canvas,this.ctx),this}stop(){return xt.stop(this),this}resize(t,e){xt.running(this)?this._resizeBeforeDraw={width:t,height:e}:this._resize(t,e)}_resize(t,e){const i=this.options,s=this.canvas,n=i.maintainAspectRatio&&this.aspectRatio,o=this.platform.getMaximumSize(s,t,e,n),a=i.devicePixelRatio||this.platform.getDevicePixelRatio(),r=this.width?"resize":"attach";this.width=o.width,this.height=o.height,this._aspectRatio=this.aspectRatio,ke(this,a,!0)&&(this.notifyPlugins("resize",{size:o}),d(i.onResize,[this,o],this),this.attached&&this._doResize(r)&&this.render())}ensureScalesHaveIDs(){u(this.options.scales||{},((t,e)=>{t.id=e}))}buildOrUpdateScales(){const t=this.options,e=t.scales,i=this.scales,s=Object.keys(i).reduce(((t,e)=>(t[e]=!1,t)),{});let n=[];e&&(n=n.concat(Object.keys(e).map((t=>{const i=e[t],s=ln(t,i),n="r"===s,o="x"===s;return{options:i,dposition:n?"chartArea":o?"bottom":"left",dtype:n?"radialLinear":o?"category":"linear"}})))),u(n,(e=>{const n=e.options,o=n.id,a=ln(o,n),r=l(n.type,e.dtype);void 0!==n.position&&vn(n.position,a)===vn(e.dposition)||(n.position=e.dposition),s[o]=!0;let h=null;if(o in i&&i[o].type===r)h=i[o];else{h=new(en.getScale(r))({id:o,type:r,ctx:this.ctx,chart:this}),i[h.id]=h}h.init(n,t)})),u(s,((t,e)=>{t||delete i[e]})),u(i,(t=>{as.configure(this,t,t.options),as.addBox(this,t)}))}_updateMetasets(){const t=this._metasets,e=this.data.datasets.length,i=t.length;if(t.sort(((t,e)=>t.index-e.index)),i>e){for(let t=e;te.length&&delete this._stacks,t.forEach(((t,i)=>{0===e.filter((e=>e===t._dataset)).length&&this._destroyDatasetMeta(i)}))}buildOrUpdateControllers(){const t=[],e=this.data.datasets;let i,s;for(this._removeUnreferencedMetasets(),i=0,s=e.length;i{this.getDatasetMeta(e).controller.reset()}),this)}reset(){this._resetElements(),this.notifyPlugins("reset")}update(t){const e=this.config;e.update();const i=this._options=e.createResolver(e.chartOptionScopes(),this.getContext()),s=this._animationsDisabled=!i.animation;if(this._updateScales(),this._checkEventBindings(),this._updateHiddenIndices(),this._plugins.invalidate(),!1===this.notifyPlugins("beforeUpdate",{mode:t,cancelable:!0}))return;const n=this.buildOrUpdateControllers();this.notifyPlugins("beforeElementsUpdate");let o=0;for(let t=0,e=this.data.datasets.length;t{t.reset()})),this._updateDatasets(t),this.notifyPlugins("afterUpdate",{mode:t}),this._layers.sort(Mn("z","_idx"));const{_active:a,_lastEvent:r}=this;r?this._eventHandler(r,!0):a.length&&this._updateHoverStyles(a,a,!0),this.render()}_updateScales(){u(this.scales,(t=>{as.removeBox(this,t)})),this.ensureScalesHaveIDs(),this.buildOrUpdateScales()}_checkEventBindings(){const t=this.options,e=new Set(Object.keys(this._listeners)),i=new Set(t.events);P(e,i)&&!!this._responsiveListeners===t.responsive||(this.unbindEvents(),this.bindEvents())}_updateHiddenIndices(){const{_hiddenIndices:t}=this,e=this._getUniformDataChanges()||[];for(const{method:i,start:s,count:n}of e){Cn(t,s,"_removeElements"===i?-n:n)}}_getUniformDataChanges(){const t=this._dataChanges;if(!t||!t.length)return;this._dataChanges=[];const e=this.data.datasets.length,i=e=>new Set(t.filter((t=>t[0]===e)).map(((t,e)=>e+","+t.splice(1).join(",")))),s=i(0);for(let t=1;tt.split(","))).map((t=>({method:t[1],start:+t[2],count:+t[3]})))}_updateLayout(t){if(!1===this.notifyPlugins("beforeLayout",{cancelable:!0}))return;as.update(this,this.width,this.height,t);const e=this.chartArea,i=e.width<=0||e.height<=0;this._layers=[],u(this.boxes,(t=>{i&&"chartArea"===t.position||(t.configure&&t.configure(),this._layers.push(...t._layers()))}),this),this._layers.forEach(((t,e)=>{t._idx=e})),this.notifyPlugins("afterLayout")}_updateDatasets(t){if(!1!==this.notifyPlugins("beforeDatasetsUpdate",{mode:t,cancelable:!0})){for(let t=0,e=this.data.datasets.length;t=0;--e)this._drawDataset(t[e]);this.notifyPlugins("afterDatasetsDraw")}_drawDataset(t){const e=this.ctx,i=t._clip,s=!i.disabled,n=function(t,e){const{xScale:i,yScale:s}=t;return i&&s?{left:On(i,e,"left"),right:On(i,e,"right"),top:On(s,e,"top"),bottom:On(s,e,"bottom")}:e}(t,this.chartArea),o={meta:t,index:t.index,cancelable:!0};!1!==this.notifyPlugins("beforeDatasetDraw",o)&&(s&&Ie(e,{left:!1===i.left?0:n.left-i.left,right:!1===i.right?this.width:n.right+i.right,top:!1===i.top?0:n.top-i.top,bottom:!1===i.bottom?this.height:n.bottom+i.bottom}),t.controller.draw(),s&&ze(e),o.cancelable=!1,this.notifyPlugins("afterDatasetDraw",o))}isPointInArea(t){return Re(t,this.chartArea,this._minPadding)}getElementsAtEventForMode(t,e,i,s){const n=Xi.modes[e];return"function"==typeof n?n(this,t,i,s):[]}getDatasetMeta(t){const e=this.data.datasets[t],i=this._metasets;let s=i.filter((t=>t&&t._dataset===e)).pop();return s||(s={type:null,data:[],dataset:null,controller:null,hidden:null,xAxisID:null,yAxisID:null,order:e&&e.order||0,index:t,_dataset:e,_parsed:[],_sorted:!1},i.push(s)),s}getContext(){return this.$context||(this.$context=Ci(null,{chart:this,type:"chart"}))}getVisibleDatasetCount(){return this.getSortedVisibleDatasetMetas().length}isDatasetVisible(t){const e=this.data.datasets[t];if(!e)return!1;const i=this.getDatasetMeta(t);return"boolean"==typeof i.hidden?!i.hidden:!e.hidden}setDatasetVisibility(t,e){this.getDatasetMeta(t).hidden=!e}toggleDataVisibility(t){this._hiddenIndices[t]=!this._hiddenIndices[t]}getDataVisibility(t){return!this._hiddenIndices[t]}_updateVisibility(t,e,i){const s=i?"show":"hide",n=this.getDatasetMeta(t),o=n.controller._resolveAnimations(void 0,s);k(e)?(n.data[e].hidden=!i,this.update()):(this.setDatasetVisibility(t,i),o.update(n,{visible:i}),this.update((e=>e.datasetIndex===t?s:void 0)))}hide(t,e){this._updateVisibility(t,e,!1)}show(t,e){this._updateVisibility(t,e,!0)}_destroyDatasetMeta(t){const e=this._metasets[t];e&&e.controller&&e.controller._destroy(),delete this._metasets[t]}_stop(){let t,e;for(this.stop(),xt.remove(this),t=0,e=this.data.datasets.length;t{e.addEventListener(this,i,s),t[i]=s},s=(t,e,i)=>{t.offsetX=e,t.offsetY=i,this._eventHandler(t)};u(this.options.events,(t=>i(t,s)))}bindResponsiveEvents(){this._responsiveListeners||(this._responsiveListeners={});const t=this._responsiveListeners,e=this.platform,i=(i,s)=>{e.addEventListener(this,i,s),t[i]=s},s=(i,s)=>{t[i]&&(e.removeEventListener(this,i,s),delete t[i])},n=(t,e)=>{this.canvas&&this.resize(t,e)};let o;const a=()=>{s("attach",a),this.attached=!0,this.resize(),i("resize",n),i("detach",o)};o=()=>{this.attached=!1,s("resize",n),this._stop(),this._resize(0,0),i("attach",a)},e.isAttached(this.canvas)?a():o()}unbindEvents(){u(this._listeners,((t,e)=>{this.platform.removeEventListener(this,e,t)})),this._listeners={},u(this._responsiveListeners,((t,e)=>{this.platform.removeEventListener(this,e,t)})),this._responsiveListeners=void 0}updateHoverStyle(t,e,i){const s=i?"set":"remove";let n,o,a,r;for("dataset"===e&&(n=this.getDatasetMeta(t[0].datasetIndex),n.controller["_"+s+"DatasetHoverStyle"]()),a=0,r=t.length;a{const i=this.getDatasetMeta(t);if(!i)throw new Error("No dataset found at index "+t);return{datasetIndex:t,element:i.data[e],index:e}}));!f(i,e)&&(this._active=i,this._lastEvent=null,this._updateHoverStyles(i,e))}notifyPlugins(t,e,i){return this._plugins.notify(this,t,e,i)}isPluginEnabled(t){return 1===this._plugins._cache.filter((e=>e.plugin.id===t)).length}_updateHoverStyles(t,e,i){const s=this.options.hover,n=(t,e)=>t.filter((t=>!e.some((e=>t.datasetIndex===e.datasetIndex&&t.index===e.index)))),o=n(e,t),a=i?t:n(t,e);o.length&&this.updateHoverStyle(o,s.mode,!1),a.length&&s.mode&&this.updateHoverStyle(a,s.mode,!0)}_eventHandler(t,e){const i={event:t,replay:e,cancelable:!0,inChartArea:this.isPointInArea(t)},s=e=>(e.options.events||this.options.events).includes(t.native.type);if(!1===this.notifyPlugins("beforeEvent",i,s))return;const n=this._handleEvent(t,e,i.inChartArea);return i.cancelable=!1,this.notifyPlugins("afterEvent",i,s),(n||i.changed)&&this.render(),this}_handleEvent(t,e,i){const{_active:s=[],options:n}=this,o=e,a=this._getActiveElements(t,s,i,o),r=D(t),l=function(t,e,i,s){return i&&"mouseout"!==t.type?s?e:t:null}(t,this._lastEvent,i,r);i&&(this._lastEvent=null,d(n.onHover,[t,a,this],this),r&&d(n.onClick,[t,a,this],this));const h=!f(a,s);return(h||e)&&(this._active=a,this._updateHoverStyles(a,s,e)),this._lastEvent=l,h}_getActiveElements(t,e,i,s){if("mouseout"===t.type)return[];if(!i)return e;const n=this.options.hover;return this.getElementsAtEventForMode(t,n.mode,n,s)}}function Tn(){return u(An.instances,(t=>t._plugins.invalidate()))}function Ln(){throw new Error("This method is not implemented: Check that a complete date adapter is provided.")}class En{static override(t){Object.assign(En.prototype,t)}options;constructor(t){this.options=t||{}}init(){}formats(){return Ln()}parse(){return Ln()}format(){return Ln()}add(){return Ln()}diff(){return Ln()}startOf(){return Ln()}endOf(){return Ln()}}var Rn={_date:En};function In(t){const e=t.iScale,i=function(t,e){if(!t._cache.$bar){const i=t.getMatchingVisibleMetas(e);let s=[];for(let e=0,n=i.length;et-e)))}return t._cache.$bar}(e,t.type);let s,n,o,a,r=e._length;const l=()=>{32767!==o&&-32768!==o&&(k(a)&&(r=Math.min(r,Math.abs(o-a)||r)),a=o)};for(s=0,n=i.length;sMath.abs(r)&&(l=r,h=a),e[i.axis]=h,e._custom={barStart:l,barEnd:h,start:n,end:o,min:a,max:r}}(t,e,i,s):e[i.axis]=i.parse(t,s),e}function Fn(t,e,i,s){const n=t.iScale,o=t.vScale,a=n.getLabels(),r=n===o,l=[];let h,c,d,u;for(h=i,c=i+s;ht.x,i="left",s="right"):(e=t.base"spacing"!==t,_indexable:t=>"spacing"!==t&&!t.startsWith("borderDash")&&!t.startsWith("hoverBorderDash")};static overrides={aspectRatio:1,plugins:{legend:{labels:{generateLabels(t){const e=t.data;if(e.labels.length&&e.datasets.length){const{labels:{pointStyle:i,color:s}}=t.legend.options;return e.labels.map(((e,n)=>{const o=t.getDatasetMeta(0).controller.getStyle(n);return{text:e,fillStyle:o.backgroundColor,strokeStyle:o.borderColor,fontColor:s,lineWidth:o.borderWidth,pointStyle:i,hidden:!t.getDataVisibility(n),index:n}}))}return[]}},onClick(t,e,i){i.chart.toggleDataVisibility(e.index),i.chart.update()}}}};constructor(t,e){super(t,e),this.enableOptionSharing=!0,this.innerRadius=void 0,this.outerRadius=void 0,this.offsetX=void 0,this.offsetY=void 0}linkScales(){}parse(t,e){const i=this.getDataset().data,s=this._cachedMeta;if(!1===this._parsing)s._parsed=i;else{let n,a,r=t=>+i[t];if(o(i[t])){const{key:t="value"}=this._parsing;r=e=>+M(i[e],t)}for(n=t,a=t+e;nZ(t,r,l,!0)?1:Math.max(e,e*i,s,s*i),g=(t,e,s)=>Z(t,r,l,!0)?-1:Math.min(e,e*i,s,s*i),p=f(0,h,d),m=f(E,c,u),b=g(C,h,d),x=g(C+E,c,u);s=(p-b)/2,n=(m-x)/2,o=-(p+b)/2,a=-(m+x)/2}return{ratioX:s,ratioY:n,offsetX:o,offsetY:a}}(u,d,r),b=(i.width-o)/f,x=(i.height-o)/g,_=Math.max(Math.min(b,x)/2,0),y=c(this.options.radius,_),v=(y-Math.max(y*r,0))/this._getVisibleDatasetWeightTotal();this.offsetX=p*y,this.offsetY=m*y,s.total=this.calculateTotal(),this.outerRadius=y-v*this._getRingWeightOffset(this.index),this.innerRadius=Math.max(this.outerRadius-v*l,0),this.updateElements(n,0,n.length,t)}_circumference(t,e){const i=this.options,s=this._cachedMeta,n=this._getCircumference();return e&&i.animation.animateRotate||!this.chart.getDataVisibility(t)||null===s._parsed[t]||s.data[t].hidden?0:this.calculateCircumference(s._parsed[t]*n/O)}updateElements(t,e,i,s){const n="reset"===s,o=this.chart,a=o.chartArea,r=o.options.animation,l=(a.left+a.right)/2,h=(a.top+a.bottom)/2,c=n&&r.animateScale,d=c?0:this.innerRadius,u=c?0:this.outerRadius,{sharedOptions:f,includeOptions:g}=this._getSharedOptions(e,s);let p,m=this._getRotation();for(p=0;p0&&!isNaN(t)?O*(Math.abs(t)/e):0}getLabelAndValue(t){const e=this._cachedMeta,i=this.chart,s=i.data.labels||[],n=ne(e._parsed[t],i.options.locale);return{label:s[t]||"",value:n}}getMaxBorderWidth(t){let e=0;const i=this.chart;let s,n,o,a,r;if(!t)for(s=0,n=i.data.datasets.length;s{const o=t.getDatasetMeta(0).controller.getStyle(n);return{text:e,fillStyle:o.backgroundColor,strokeStyle:o.borderColor,fontColor:s,lineWidth:o.borderWidth,pointStyle:i,hidden:!t.getDataVisibility(n),index:n}}))}return[]}},onClick(t,e,i){i.chart.toggleDataVisibility(e.index),i.chart.update()}}},scales:{r:{type:"radialLinear",angleLines:{display:!1},beginAtZero:!0,grid:{circular:!0},pointLabels:{display:!1},startAngle:0}}};constructor(t,e){super(t,e),this.innerRadius=void 0,this.outerRadius=void 0}getLabelAndValue(t){const e=this._cachedMeta,i=this.chart,s=i.data.labels||[],n=ne(e._parsed[t].r,i.options.locale);return{label:s[t]||"",value:n}}parseObjectData(t,e,i,s){return ii.bind(this)(t,e,i,s)}update(t){const e=this._cachedMeta.data;this._updateRadius(),this.updateElements(e,0,e.length,t)}getMinMax(){const t=this._cachedMeta,e={min:Number.POSITIVE_INFINITY,max:Number.NEGATIVE_INFINITY};return t.data.forEach(((t,i)=>{const s=this.getParsed(i).r;!isNaN(s)&&this.chart.getDataVisibility(i)&&(se.max&&(e.max=s))})),e}_updateRadius(){const t=this.chart,e=t.chartArea,i=t.options,s=Math.min(e.right-e.left,e.bottom-e.top),n=Math.max(s/2,0),o=(n-Math.max(i.cutoutPercentage?n/100*i.cutoutPercentage:1,0))/t.getVisibleDatasetCount();this.outerRadius=n-o*this.index,this.innerRadius=this.outerRadius-o}updateElements(t,e,i,s){const n="reset"===s,o=this.chart,a=o.options.animation,r=this._cachedMeta.rScale,l=r.xCenter,h=r.yCenter,c=r.getIndexAngle(0)-.5*C;let d,u=c;const f=360/this.countVisibleElements();for(d=0;d{!isNaN(this.getParsed(i).r)&&this.chart.getDataVisibility(i)&&e++})),e}_computeAngle(t,e,i){return this.chart.getDataVisibility(t)?$(this.resolveDataElementOptions(t,e).angle||i):0}}var Yn=Object.freeze({__proto__:null,BarController:class extends Ns{static id="bar";static defaults={datasetElementType:!1,dataElementType:"bar",categoryPercentage:.8,barPercentage:.9,grouped:!0,animations:{numbers:{type:"number",properties:["x","y","base","width","height"]}}};static overrides={scales:{_index_:{type:"category",offset:!0,grid:{offset:!0}},_value_:{type:"linear",beginAtZero:!0}}};parsePrimitiveData(t,e,i,s){return Fn(t,e,i,s)}parseArrayData(t,e,i,s){return Fn(t,e,i,s)}parseObjectData(t,e,i,s){const{iScale:n,vScale:o}=t,{xAxisKey:a="x",yAxisKey:r="y"}=this._parsing,l="x"===n.axis?a:r,h="x"===o.axis?a:r,c=[];let d,u,f,g;for(d=i,u=i+s;dt.controller.options.grouped)),o=i.options.stacked,a=[],r=t=>{const i=t.controller.getParsed(e),n=i&&i[t.vScale.axis];if(s(n)||isNaN(n))return!0};for(const i of n)if((void 0===e||!r(i))&&((!1===o||-1===a.indexOf(i.stack)||void 0===o&&void 0===i.stack)&&a.push(i.stack),i.index===t))break;return a.length||a.push(void 0),a}_getStackCount(t){return this._getStacks(void 0,t).length}_getStackIndex(t,e,i){const s=this._getStacks(t,i),n=void 0!==e?s.indexOf(e):-1;return-1===n?s.length-1:n}_getRuler(){const t=this.options,e=this._cachedMeta,i=e.iScale,s=[];let n,o;for(n=0,o=e.data.length;n=i?1:-1)}(u,e,r)*a,f===r&&(b-=u/2);const t=e.getPixelForDecimal(0),s=e.getPixelForDecimal(1),o=Math.min(t,s),h=Math.max(t,s);b=Math.max(Math.min(b,h),o),d=b+u,i&&!c&&(l._stacks[e.axis]._visualValues[n]=e.getValueForPixel(d)-e.getValueForPixel(b))}if(b===e.getPixelForValue(r)){const t=F(u)*e.getLineWidthForValue(r)/2;b+=t,u-=t}return{size:u,base:b,head:d,center:d+u/2}}_calculateBarIndexPixels(t,e){const i=e.scale,n=this.options,o=n.skipNull,a=l(n.maxBarThickness,1/0);let r,h;if(e.grouped){const i=o?this._getStackCount(t):e.stackCount,l="flex"===n.barThickness?function(t,e,i,s){const n=e.pixels,o=n[t];let a=t>0?n[t-1]:null,r=t=0;--i)e=Math.max(e,t[i].size(this.resolveDataElementOptions(i))/2);return e>0&&e}getLabelAndValue(t){const e=this._cachedMeta,i=this.chart.data.labels||[],{xScale:s,yScale:n}=e,o=this.getParsed(t),a=s.getLabelForValue(o.x),r=n.getLabelForValue(o.y),l=o._custom;return{label:i[t]||"",value:"("+a+", "+r+(l?", "+l:"")+")"}}update(t){const e=this._cachedMeta.data;this.updateElements(e,0,e.length,t)}updateElements(t,e,i,s){const n="reset"===s,{iScale:o,vScale:a}=this._cachedMeta,{sharedOptions:r,includeOptions:l}=this._getSharedOptions(e,s),h=o.axis,c=a.axis;for(let d=e;d0&&this.getParsed(e-1);for(let i=0;i<_;++i){const g=t[i],_=b?g:{};if(i=x){_.skip=!0;continue}const v=this.getParsed(i),M=s(v[f]),w=_[u]=a.getPixelForValue(v[u],i),k=_[f]=o||M?r.getBasePixel():r.getPixelForValue(l?this.applyStack(r,v,l):v[f],i);_.skip=isNaN(w)||isNaN(k)||M,_.stop=i>0&&Math.abs(v[u]-y[u])>m,p&&(_.parsed=v,_.raw=h.data[i]),d&&(_.options=c||this.resolveDataElementOptions(i,g.active?"active":n)),b||this.updateElement(g,i,_,n),y=v}}getMaxOverflow(){const t=this._cachedMeta,e=t.dataset,i=e.options&&e.options.borderWidth||0,s=t.data||[];if(!s.length)return i;const n=s[0].size(this.resolveDataElementOptions(0)),o=s[s.length-1].size(this.resolveDataElementOptions(s.length-1));return Math.max(i,n,o)/2}draw(){const t=this._cachedMeta;t.dataset.updateControlPoints(this.chart.chartArea,t.iScale.axis),super.draw()}},PieController:class extends jn{static id="pie";static defaults={cutout:0,rotation:0,circumference:360,radius:"100%"}},PolarAreaController:$n,RadarController:class extends Ns{static id="radar";static defaults={datasetElementType:"line",dataElementType:"point",indexAxis:"r",showLine:!0,elements:{line:{fill:"start"}}};static overrides={aspectRatio:1,scales:{r:{type:"radialLinear"}}};getLabelAndValue(t){const e=this._cachedMeta.vScale,i=this.getParsed(t);return{label:e.getLabels()[t],value:""+e.getLabelForValue(i[e.axis])}}parseObjectData(t,e,i,s){return ii.bind(this)(t,e,i,s)}update(t){const e=this._cachedMeta,i=e.dataset,s=e.data||[],n=e.iScale.getLabels();if(i.points=s,"resize"!==t){const e=this.resolveDatasetElementOptions(t);this.options.showLine||(e.borderWidth=0);const o={_loop:!0,_fullLoop:n.length===s.length,options:e};this.updateElement(i,void 0,o,t)}this.updateElements(s,0,s.length,t)}updateElements(t,e,i,s){const n=this._cachedMeta.rScale,o="reset"===s;for(let a=e;a0&&this.getParsed(e-1);for(let c=e;c0&&Math.abs(i[f]-_[f])>b,m&&(p.parsed=i,p.raw=h.data[c]),u&&(p.options=d||this.resolveDataElementOptions(c,e.active?"active":n)),x||this.updateElement(e,c,p,n),_=i}this.updateSharedOptions(d,n,c)}getMaxOverflow(){const t=this._cachedMeta,e=t.data||[];if(!this.options.showLine){let t=0;for(let i=e.length-1;i>=0;--i)t=Math.max(t,e[i].size(this.resolveDataElementOptions(i))/2);return t>0&&t}const i=t.dataset,s=i.options&&i.options.borderWidth||0;if(!e.length)return s;const n=e[0].size(this.resolveDataElementOptions(0)),o=e[e.length-1].size(this.resolveDataElementOptions(e.length-1));return Math.max(s,n,o)/2}}});function Un(t,e,i,s){const n=vi(t.options.borderRadius,["outerStart","outerEnd","innerStart","innerEnd"]);const o=(i-e)/2,a=Math.min(o,s*e/2),r=t=>{const e=(i-Math.min(o,t))*s/2;return J(t,0,Math.min(o,e))};return{outerStart:r(n.outerStart),outerEnd:r(n.outerEnd),innerStart:J(n.innerStart,0,a),innerEnd:J(n.innerEnd,0,a)}}function Xn(t,e,i,s){return{x:i+t*Math.cos(e),y:s+t*Math.sin(e)}}function qn(t,e,i,s,n,o){const{x:a,y:r,startAngle:l,pixelMargin:h,innerRadius:c}=e,d=Math.max(e.outerRadius+s+i-h,0),u=c>0?c+s+i+h:0;let f=0;const g=n-l;if(s){const t=((c>0?c-s:0)+(d>0?d-s:0))/2;f=(g-(0!==t?g*t/(t+s):g))/2}const p=(g-Math.max(.001,g*d-i/C)/d)/2,m=l+p+f,b=n-p-f,{outerStart:x,outerEnd:_,innerStart:y,innerEnd:v}=Un(e,u,d,b-m),M=d-x,w=d-_,k=m+x/M,S=b-_/w,P=u+y,D=u+v,O=m+y/P,A=b-v/D;if(t.beginPath(),o){const e=(k+S)/2;if(t.arc(a,r,d,k,e),t.arc(a,r,d,e,S),_>0){const e=Xn(w,S,a,r);t.arc(e.x,e.y,_,S,b+E)}const i=Xn(D,b,a,r);if(t.lineTo(i.x,i.y),v>0){const e=Xn(D,A,a,r);t.arc(e.x,e.y,v,b+E,A+Math.PI)}const s=(b-v/u+(m+y/u))/2;if(t.arc(a,r,u,b-v/u,s,!0),t.arc(a,r,u,s,m+y/u,!0),y>0){const e=Xn(P,O,a,r);t.arc(e.x,e.y,y,O+Math.PI,m-E)}const n=Xn(M,m,a,r);if(t.lineTo(n.x,n.y),x>0){const e=Xn(M,k,a,r);t.arc(e.x,e.y,x,m-E,k)}}else{t.moveTo(a,r);const e=Math.cos(k)*d+a,i=Math.sin(k)*d+r;t.lineTo(e,i);const s=Math.cos(S)*d+a,n=Math.sin(S)*d+r;t.lineTo(s,n)}t.closePath()}function Kn(t,e,i,s,n){const{fullCircles:o,startAngle:a,circumference:r,options:l}=e,{borderWidth:h,borderJoinStyle:c,borderDash:d,borderDashOffset:u}=l,f="inner"===l.borderAlign;if(!h)return;t.setLineDash(d||[]),t.lineDashOffset=u,f?(t.lineWidth=2*h,t.lineJoin=c||"round"):(t.lineWidth=h,t.lineJoin=c||"bevel");let g=e.endAngle;if(o){qn(t,e,i,s,g,n);for(let e=0;en?(h=n/l,t.arc(o,a,l,i+h,s-h,!0)):t.arc(o,a,n,i+E,s-E),t.closePath(),t.clip()}(t,e,g),o||(qn(t,e,i,s,g,n),t.stroke())}function Gn(t,e,i=e){t.lineCap=l(i.borderCapStyle,e.borderCapStyle),t.setLineDash(l(i.borderDash,e.borderDash)),t.lineDashOffset=l(i.borderDashOffset,e.borderDashOffset),t.lineJoin=l(i.borderJoinStyle,e.borderJoinStyle),t.lineWidth=l(i.borderWidth,e.borderWidth),t.strokeStyle=l(i.borderColor,e.borderColor)}function Zn(t,e,i){t.lineTo(i.x,i.y)}function Jn(t,e,i={}){const s=t.length,{start:n=0,end:o=s-1}=i,{start:a,end:r}=e,l=Math.max(n,a),h=Math.min(o,r),c=nr&&o>r;return{count:s,start:l,loop:e.loop,ilen:h(a+(h?r-t:t))%o,_=()=>{f!==g&&(t.lineTo(m,g),t.lineTo(m,f),t.lineTo(m,p))};for(l&&(d=n[x(0)],t.moveTo(d.x,d.y)),c=0;c<=r;++c){if(d=n[x(c)],d.skip)continue;const e=d.x,i=d.y,s=0|e;s===u?(ig&&(g=i),m=(b*m+e)/++b):(_(),t.lineTo(e,i),u=s,b=0,f=g=i),p=i}_()}function eo(t){const e=t.options,i=e.borderDash&&e.borderDash.length;return!(t._decimated||t._loop||e.tension||"monotone"===e.cubicInterpolationMode||e.stepped||i)?to:Qn}const io="function"==typeof Path2D;function so(t,e,i,s){io&&!e.options.segment?function(t,e,i,s){let n=e._path;n||(n=e._path=new Path2D,e.path(n,i,s)&&n.closePath()),Gn(t,e.options),t.stroke(n)}(t,e,i,s):function(t,e,i,s){const{segments:n,options:o}=e,a=eo(e);for(const r of n)Gn(t,o,r.style),t.beginPath(),a(t,e,r,{start:i,end:i+s-1})&&t.closePath(),t.stroke()}(t,e,i,s)}class no extends Hs{static id="line";static defaults={borderCapStyle:"butt",borderDash:[],borderDashOffset:0,borderJoinStyle:"miter",borderWidth:3,capBezierPoints:!0,cubicInterpolationMode:"default",fill:!1,spanGaps:!1,stepped:!1,tension:0};static defaultRoutes={backgroundColor:"backgroundColor",borderColor:"borderColor"};static descriptors={_scriptable:!0,_indexable:t=>"borderDash"!==t&&"fill"!==t};constructor(t){super(),this.animated=!0,this.options=void 0,this._chart=void 0,this._loop=void 0,this._fullLoop=void 0,this._path=void 0,this._points=void 0,this._segments=void 0,this._decimated=!1,this._pointsUpdated=!1,this._datasetIndex=void 0,t&&Object.assign(this,t)}updateControlPoints(t,e){const i=this.options;if((i.tension||"monotone"===i.cubicInterpolationMode)&&!i.stepped&&!this._pointsUpdated){const s=i.spanGaps?this._loop:this._fullLoop;hi(this._points,i,t,s,e),this._pointsUpdated=!0}}set points(t){this._points=t,delete this._segments,delete this._path,this._pointsUpdated=!1}get points(){return this._points}get segments(){return this._segments||(this._segments=zi(this,this.options.segment))}first(){const t=this.segments,e=this.points;return t.length&&e[t[0].start]}last(){const t=this.segments,e=this.points,i=t.length;return i&&e[t[i-1].end]}interpolate(t,e){const i=this.options,s=t[e],n=this.points,o=Ii(this,{property:e,start:s,end:s});if(!o.length)return;const a=[],r=function(t){return t.stepped?pi:t.tension||"monotone"===t.cubicInterpolationMode?mi:gi}(i);let l,h;for(l=0,h=o.length;l"borderDash"!==t};circumference;endAngle;fullCircles;innerRadius;outerRadius;pixelMargin;startAngle;constructor(t){super(),this.options=void 0,this.circumference=void 0,this.startAngle=void 0,this.endAngle=void 0,this.innerRadius=void 0,this.outerRadius=void 0,this.pixelMargin=0,this.fullCircles=0,t&&Object.assign(this,t)}inRange(t,e,i){const s=this.getProps(["x","y"],i),{angle:n,distance:o}=X(s,{x:t,y:e}),{startAngle:a,endAngle:r,innerRadius:h,outerRadius:c,circumference:d}=this.getProps(["startAngle","endAngle","innerRadius","outerRadius","circumference"],i),u=(this.options.spacing+this.options.borderWidth)/2,f=l(d,r-a)>=O||Z(n,a,r),g=tt(o,h+u,c+u);return f&&g}getCenterPoint(t){const{x:e,y:i,startAngle:s,endAngle:n,innerRadius:o,outerRadius:a}=this.getProps(["x","y","startAngle","endAngle","innerRadius","outerRadius"],t),{offset:r,spacing:l}=this.options,h=(s+n)/2,c=(o+a+l+r)/2;return{x:e+Math.cos(h)*c,y:i+Math.sin(h)*c}}tooltipPosition(t){return this.getCenterPoint(t)}draw(t){const{options:e,circumference:i}=this,s=(e.offset||0)/4,n=(e.spacing||0)/2,o=e.circular;if(this.pixelMargin="inner"===e.borderAlign?.33:0,this.fullCircles=i>O?Math.floor(i/O):0,0===i||this.innerRadius<0||this.outerRadius<0)return;t.save();const a=(this.startAngle+this.endAngle)/2;t.translate(Math.cos(a)*s,Math.sin(a)*s);const r=s*(1-Math.sin(Math.min(C,i||0)));t.fillStyle=e.backgroundColor,t.strokeStyle=e.borderColor,function(t,e,i,s,n){const{fullCircles:o,startAngle:a,circumference:r}=e;let l=e.endAngle;if(o){qn(t,e,i,s,l,n);for(let e=0;e("string"==typeof e?(i=t.push(e)-1,s.unshift({index:i,label:e})):isNaN(e)&&(i=null),i))(t,e,i,s);return n!==t.lastIndexOf(e)?i:n}function po(t){const e=this.getLabels();return t>=0&&ts=e?s:t,a=t=>n=i?n:t;if(t){const t=F(s),e=F(n);t<0&&e<0?a(0):t>0&&e>0&&o(0)}if(s===n){let e=0===n?1:Math.abs(.05*n);a(n+e),t||o(s-e)}this.min=s,this.max=n}getTickLimit(){const t=this.options.ticks;let e,{maxTicksLimit:i,stepSize:s}=t;return s?(e=Math.ceil(this.max/s)-Math.floor(this.min/s)+1,e>1e3&&(console.warn(`scales.${this.id}.ticks.stepSize: ${s} would result generating up to ${e} ticks. Limiting to 1000.`),e=1e3)):(e=this.computeTickLimit(),i=i||11),i&&(e=Math.min(i,e)),e}computeTickLimit(){return Number.POSITIVE_INFINITY}buildTicks(){const t=this.options,e=t.ticks;let i=this.getTickLimit();i=Math.max(2,i);const n=function(t,e){const i=[],{bounds:n,step:o,min:a,max:r,precision:l,count:h,maxTicks:c,maxDigits:d,includeBounds:u}=t,f=o||1,g=c-1,{min:p,max:m}=e,b=!s(a),x=!s(r),_=!s(h),y=(m-p)/(d+1);let v,M,w,k,S=B((m-p)/g/f)*f;if(S<1e-14&&!b&&!x)return[{value:p},{value:m}];k=Math.ceil(m/S)-Math.floor(p/S),k>g&&(S=B(k*S/g/f)*f),s(l)||(v=Math.pow(10,l),S=Math.ceil(S*v)/v),"ticks"===n?(M=Math.floor(p/S)*S,w=Math.ceil(m/S)*S):(M=p,w=m),b&&x&&o&&H((r-a)/o,S/1e3)?(k=Math.round(Math.min((r-a)/S,c)),S=(r-a)/k,M=a,w=r):_?(M=b?a:M,w=x?r:w,k=h-1,S=(w-M)/k):(k=(w-M)/S,k=V(k,Math.round(k),S/1e3)?Math.round(k):Math.ceil(k));const P=Math.max(U(S),U(M));v=Math.pow(10,s(l)?P:l),M=Math.round(M*v)/v,w=Math.round(w*v)/v;let D=0;for(b&&(u&&M!==a?(i.push({value:a}),Mr)break;i.push({value:t})}return x&&u&&w!==r?i.length&&V(i[i.length-1].value,r,mo(r,y,t))?i[i.length-1].value=r:i.push({value:r}):x&&w!==r||i.push({value:w}),i}({maxTicks:i,bounds:t.bounds,min:t.min,max:t.max,precision:e.precision,step:e.stepSize,count:e.count,maxDigits:this._maxDigits(),horizontal:this.isHorizontal(),minRotation:e.minRotation||0,includeBounds:!1!==e.includeBounds},this._range||this);return"ticks"===t.bounds&&j(n,this,"value"),t.reverse?(n.reverse(),this.start=this.max,this.end=this.min):(this.start=this.min,this.end=this.max),n}configure(){const t=this.ticks;let e=this.min,i=this.max;if(super.configure(),this.options.offset&&t.length){const s=(i-e)/Math.max(t.length-1,1)/2;e-=s,i+=s}this._startValue=e,this._endValue=i,this._valueRange=i-e}getLabelForValue(t){return ne(t,this.chart.options.locale,this.options.ticks.format)}}class xo extends bo{static id="linear";static defaults={ticks:{callback:ae.formatters.numeric}};determineDataLimits(){const{min:t,max:e}=this.getMinMax(!0);this.min=a(t)?t:0,this.max=a(e)?e:1,this.handleTickRangeOptions()}computeTickLimit(){const t=this.isHorizontal(),e=t?this.width:this.height,i=$(this.options.ticks.minRotation),s=(t?Math.sin(i):Math.cos(i))||.001,n=this._resolveTickFontOptions(0);return Math.ceil(e/Math.min(40,n.lineHeight/s))}getPixelForValue(t){return null===t?NaN:this.getPixelForDecimal((t-this._startValue)/this._valueRange)}getValueForPixel(t){return this._startValue+this.getDecimalForPixel(t)*this._valueRange}}const _o=t=>Math.floor(z(t)),yo=(t,e)=>Math.pow(10,_o(t)+e);function vo(t){return 1===t/Math.pow(10,_o(t))}function Mo(t,e,i){const s=Math.pow(10,i),n=Math.floor(t/s);return Math.ceil(e/s)-n}function wo(t,{min:e,max:i}){e=r(t.min,e);const s=[],n=_o(e);let o=function(t,e){let i=_o(e-t);for(;Mo(t,e,i)>10;)i++;for(;Mo(t,e,i)<10;)i--;return Math.min(i,_o(t))}(e,i),a=o<0?Math.pow(10,Math.abs(o)):1;const l=Math.pow(10,o),h=n>o?Math.pow(10,n):0,c=Math.round((e-h)*a)/a,d=Math.floor((e-h)/l/10)*l*10;let u=Math.floor((c-d)/Math.pow(10,o)),f=r(t.min,Math.round((h+d+u*Math.pow(10,o))*a)/a);for(;f=10?u=u<15?15:20:u++,u>=20&&(o++,u=2,a=o>=0?1:a),f=Math.round((h+d+u*Math.pow(10,o))*a)/a;const g=r(t.max,f);return s.push({value:g,major:vo(g),significand:u}),s}class ko extends Js{static id="logarithmic";static defaults={ticks:{callback:ae.formatters.logarithmic,major:{enabled:!0}}};constructor(t){super(t),this.start=void 0,this.end=void 0,this._startValue=void 0,this._valueRange=0}parse(t,e){const i=bo.prototype.parse.apply(this,[t,e]);if(0!==i)return a(i)&&i>0?i:null;this._zero=!0}determineDataLimits(){const{min:t,max:e}=this.getMinMax(!0);this.min=a(t)?Math.max(0,t):null,this.max=a(e)?Math.max(0,e):null,this.options.beginAtZero&&(this._zero=!0),this._zero&&this.min!==this._suggestedMin&&!a(this._userMin)&&(this.min=t===yo(this.min,0)?yo(this.min,-1):yo(this.min,0)),this.handleTickRangeOptions()}handleTickRangeOptions(){const{minDefined:t,maxDefined:e}=this.getUserBounds();let i=this.min,s=this.max;const n=e=>i=t?i:e,o=t=>s=e?s:t;i===s&&(i<=0?(n(1),o(10)):(n(yo(i,-1)),o(yo(s,1)))),i<=0&&n(yo(s,-1)),s<=0&&o(yo(i,1)),this.min=i,this.max=s}buildTicks(){const t=this.options,e=wo({min:this._userMin,max:this._userMax},this);return"ticks"===t.bounds&&j(e,this,"value"),t.reverse?(e.reverse(),this.start=this.max,this.end=this.min):(this.start=this.min,this.end=this.max),e}getLabelForValue(t){return void 0===t?"0":ne(t,this.chart.options.locale,this.options.ticks.format)}configure(){const t=this.min;super.configure(),this._startValue=z(t),this._valueRange=z(this.max)-z(t)}getPixelForValue(t){return void 0!==t&&0!==t||(t=this.min),null===t||isNaN(t)?NaN:this.getPixelForDecimal(t===this.min?0:(z(t)-this._startValue)/this._valueRange)}getValueForPixel(t){const e=this.getDecimalForPixel(t);return Math.pow(10,this._startValue+e*this._valueRange)}}function So(t){const e=t.ticks;if(e.display&&t.display){const t=ki(e.backdropPadding);return l(e.font&&e.font.size,ue.font.size)+t.height}return 0}function Po(t,e,i,s,n){return t===s||t===n?{start:e-i/2,end:e+i/2}:tn?{start:e-i,end:e}:{start:e,end:e+i}}function Do(t){const e={l:t.left+t._padding.left,r:t.right-t._padding.right,t:t.top+t._padding.top,b:t.bottom-t._padding.bottom},i=Object.assign({},e),s=[],o=[],a=t._pointLabels.length,r=t.options.pointLabels,l=r.centerPointLabels?C/a:0;for(let u=0;ue.r&&(r=(s.end-e.r)/o,t.r=Math.max(t.r,e.r+r)),n.starte.b&&(l=(n.end-e.b)/a,t.b=Math.max(t.b,e.b+l))}function Oo(t,e,i){const s=t.drawingArea,{extra:n,additionalAngle:o,padding:a,size:r}=i,l=t.getPointPosition(e,s+n+a,o),h=Math.round(Y(G(l.angle+E))),c=function(t,e,i){90===i||270===i?t-=e/2:(i>270||i<90)&&(t-=e);return t}(l.y,r.h,h),d=function(t){if(0===t||180===t)return"center";if(t<180)return"left";return"right"}(h),u=function(t,e,i){"right"===i?t-=e:"center"===i&&(t-=e/2);return t}(l.x,r.w,d);return{visible:!0,x:l.x,y:c,textAlign:d,left:u,top:c,right:u+r.w,bottom:c+r.h}}function Ao(t,e){if(!e)return!0;const{left:i,top:s,right:n,bottom:o}=t;return!(Re({x:i,y:s},e)||Re({x:i,y:o},e)||Re({x:n,y:s},e)||Re({x:n,y:o},e))}function To(t,e,i){const{left:n,top:o,right:a,bottom:r}=i,{backdropColor:l}=e;if(!s(l)){const i=wi(e.borderRadius),s=ki(e.backdropPadding);t.fillStyle=l;const h=n-s.left,c=o-s.top,d=a-n+s.width,u=r-o+s.height;Object.values(i).some((t=>0!==t))?(t.beginPath(),He(t,{x:h,y:c,w:d,h:u,radius:i}),t.fill()):t.fillRect(h,c,d,u)}}function Lo(t,e,i,s){const{ctx:n}=t;if(i)n.arc(t.xCenter,t.yCenter,e,0,O);else{let i=t.getPointPosition(0,e);n.moveTo(i.x,i.y);for(let o=1;ot,padding:5,centerPointLabels:!1}};static defaultRoutes={"angleLines.color":"borderColor","pointLabels.color":"color","ticks.color":"color"};static descriptors={angleLines:{_fallback:"grid"}};constructor(t){super(t),this.xCenter=void 0,this.yCenter=void 0,this.drawingArea=void 0,this._pointLabels=[],this._pointLabelItems=[]}setDimensions(){const t=this._padding=ki(So(this.options)/2),e=this.width=this.maxWidth-t.width,i=this.height=this.maxHeight-t.height;this.xCenter=Math.floor(this.left+e/2+t.left),this.yCenter=Math.floor(this.top+i/2+t.top),this.drawingArea=Math.floor(Math.min(e,i)/2)}determineDataLimits(){const{min:t,max:e}=this.getMinMax(!1);this.min=a(t)&&!isNaN(t)?t:0,this.max=a(e)&&!isNaN(e)?e:0,this.handleTickRangeOptions()}computeTickLimit(){return Math.ceil(this.drawingArea/So(this.options))}generateTickLabels(t){bo.prototype.generateTickLabels.call(this,t),this._pointLabels=this.getLabels().map(((t,e)=>{const i=d(this.options.pointLabels.callback,[t,e],this);return i||0===i?i:""})).filter(((t,e)=>this.chart.getDataVisibility(e)))}fit(){const t=this.options;t.display&&t.pointLabels.display?Do(this):this.setCenterPoint(0,0,0,0)}setCenterPoint(t,e,i,s){this.xCenter+=Math.floor((t-e)/2),this.yCenter+=Math.floor((i-s)/2),this.drawingArea-=Math.min(this.drawingArea/2,Math.max(t,e,i,s))}getIndexAngle(t){return G(t*(O/(this._pointLabels.length||1))+$(this.options.startAngle||0))}getDistanceFromCenterForValue(t){if(s(t))return NaN;const e=this.drawingArea/(this.max-this.min);return this.options.reverse?(this.max-t)*e:(t-this.min)*e}getValueForDistanceFromCenter(t){if(s(t))return NaN;const e=t/(this.drawingArea/(this.max-this.min));return this.options.reverse?this.max-e:this.min+e}getPointLabelContext(t){const e=this._pointLabels||[];if(t>=0&&t=0;n--){const e=t._pointLabelItems[n];if(!e.visible)continue;const o=s.setContext(t.getPointLabelContext(n));To(i,o,e);const a=Si(o.font),{x:r,y:l,textAlign:h}=e;Ne(i,t._pointLabels[n],r,l+a.lineHeight/2,a,{color:o.color,textAlign:h,textBaseline:"middle"})}}(this,o),s.display&&this.ticks.forEach(((t,e)=>{if(0!==e){r=this.getDistanceFromCenterForValue(t.value);const i=this.getContext(e),a=s.setContext(i),l=n.setContext(i);!function(t,e,i,s,n){const o=t.ctx,a=e.circular,{color:r,lineWidth:l}=e;!a&&!s||!r||!l||i<0||(o.save(),o.strokeStyle=r,o.lineWidth=l,o.setLineDash(n.dash),o.lineDashOffset=n.dashOffset,o.beginPath(),Lo(t,i,a,s),o.closePath(),o.stroke(),o.restore())}(this,a,r,o,l)}})),i.display){for(t.save(),a=o-1;a>=0;a--){const s=i.setContext(this.getPointLabelContext(a)),{color:n,lineWidth:o}=s;o&&n&&(t.lineWidth=o,t.strokeStyle=n,t.setLineDash(s.borderDash),t.lineDashOffset=s.borderDashOffset,r=this.getDistanceFromCenterForValue(e.ticks.reverse?this.min:this.max),l=this.getPointPosition(a,r),t.beginPath(),t.moveTo(this.xCenter,this.yCenter),t.lineTo(l.x,l.y),t.stroke())}t.restore()}}drawBorder(){}drawLabels(){const t=this.ctx,e=this.options,i=e.ticks;if(!i.display)return;const s=this.getIndexAngle(0);let n,o;t.save(),t.translate(this.xCenter,this.yCenter),t.rotate(s),t.textAlign="center",t.textBaseline="middle",this.ticks.forEach(((s,a)=>{if(0===a&&!e.reverse)return;const r=i.setContext(this.getContext(a)),l=Si(r.font);if(n=this.getDistanceFromCenterForValue(this.ticks[a].value),r.showLabelBackdrop){t.font=l.string,o=t.measureText(s.label).width,t.fillStyle=r.backdropColor;const e=ki(r.backdropPadding);t.fillRect(-o/2-e.left,-n-l.size/2-e.top,o+e.width,l.size+e.height)}Ne(t,s.label,0,-n,l,{color:r.color,strokeColor:r.textStrokeColor,strokeWidth:r.textStrokeWidth})})),t.restore()}drawTitle(){}}const Ro={millisecond:{common:!0,size:1,steps:1e3},second:{common:!0,size:1e3,steps:60},minute:{common:!0,size:6e4,steps:60},hour:{common:!0,size:36e5,steps:24},day:{common:!0,size:864e5,steps:30},week:{common:!1,size:6048e5,steps:4},month:{common:!0,size:2628e6,steps:12},quarter:{common:!1,size:7884e6,steps:4},year:{common:!0,size:3154e7}},Io=Object.keys(Ro);function zo(t,e){return t-e}function Fo(t,e){if(s(e))return null;const i=t._adapter,{parser:n,round:o,isoWeekday:r}=t._parseOpts;let l=e;return"function"==typeof n&&(l=n(l)),a(l)||(l="string"==typeof n?i.parse(l,n):i.parse(l)),null===l?null:(o&&(l="week"!==o||!N(r)&&!0!==r?i.startOf(l,o):i.startOf(l,"isoWeek",r)),+l)}function Vo(t,e,i,s){const n=Io.length;for(let o=Io.indexOf(t);o=e?i[s]:i[n]]=!0}}else t[e]=!0}function Wo(t,e,i){const s=[],n={},o=e.length;let a,r;for(a=0;a=0&&(e[l].major=!0);return e}(t,s,n,i):s}class No extends Js{static id="time";static defaults={bounds:"data",adapters:{},time:{parser:!1,unit:!1,round:!1,isoWeekday:!1,minUnit:"millisecond",displayFormats:{}},ticks:{source:"auto",callback:!1,major:{enabled:!1}}};constructor(t){super(t),this._cache={data:[],labels:[],all:[]},this._unit="day",this._majorUnit=void 0,this._offsets={},this._normalized=!1,this._parseOpts=void 0}init(t,e={}){const i=t.time||(t.time={}),s=this._adapter=new Rn._date(t.adapters.date);s.init(e),x(i.displayFormats,s.formats()),this._parseOpts={parser:i.parser,round:i.round,isoWeekday:i.isoWeekday},super.init(t),this._normalized=e.normalized}parse(t,e){return void 0===t?null:Fo(this,t)}beforeLayout(){super.beforeLayout(),this._cache={data:[],labels:[],all:[]}}determineDataLimits(){const t=this.options,e=this._adapter,i=t.time.unit||"day";let{min:s,max:n,minDefined:o,maxDefined:r}=this.getUserBounds();function l(t){o||isNaN(t.min)||(s=Math.min(s,t.min)),r||isNaN(t.max)||(n=Math.max(n,t.max))}o&&r||(l(this._getLabelBounds()),"ticks"===t.bounds&&"labels"===t.ticks.source||l(this.getMinMax(!1))),s=a(s)&&!isNaN(s)?s:+e.startOf(Date.now(),i),n=a(n)&&!isNaN(n)?n:+e.endOf(Date.now(),i)+1,this.min=Math.min(s,n-1),this.max=Math.max(s+1,n)}_getLabelBounds(){const t=this.getLabelTimestamps();let e=Number.POSITIVE_INFINITY,i=Number.NEGATIVE_INFINITY;return t.length&&(e=t[0],i=t[t.length-1]),{min:e,max:i}}buildTicks(){const t=this.options,e=t.time,i=t.ticks,s="labels"===i.source?this.getLabelTimestamps():this._generate();"ticks"===t.bounds&&s.length&&(this.min=this._userMin||s[0],this.max=this._userMax||s[s.length-1]);const n=this.min,o=nt(s,n,this.max);return this._unit=e.unit||(i.autoSkip?Vo(e.minUnit,this.min,this.max,this._getLabelCapacity(n)):function(t,e,i,s,n){for(let o=Io.length-1;o>=Io.indexOf(i);o--){const i=Io[o];if(Ro[i].common&&t._adapter.diff(n,s,i)>=e-1)return i}return Io[i?Io.indexOf(i):0]}(this,o.length,e.minUnit,this.min,this.max)),this._majorUnit=i.major.enabled&&"year"!==this._unit?function(t){for(let e=Io.indexOf(t)+1,i=Io.length;e+t.value)))}initOffsets(t=[]){let e,i,s=0,n=0;this.options.offset&&t.length&&(e=this.getDecimalForValue(t[0]),s=1===t.length?1-e:(this.getDecimalForValue(t[1])-e)/2,i=this.getDecimalForValue(t[t.length-1]),n=1===t.length?i:(i-this.getDecimalForValue(t[t.length-2]))/2);const o=t.length<3?.5:.25;s=J(s,0,o),n=J(n,0,o),this._offsets={start:s,end:n,factor:1/(s+1+n)}}_generate(){const t=this._adapter,e=this.min,i=this.max,s=this.options,n=s.time,o=n.unit||Vo(n.minUnit,e,i,this._getLabelCapacity(e)),a=l(s.ticks.stepSize,1),r="week"===o&&n.isoWeekday,h=N(r)||!0===r,c={};let d,u,f=e;if(h&&(f=+t.startOf(f,"isoWeek",r)),f=+t.startOf(f,h?"day":o),t.diff(i,e,o)>1e5*a)throw new Error(e+" and "+i+" are too far apart with stepSize of "+a+" "+o);const g="data"===s.ticks.source&&this.getDataTimestamps();for(d=f,u=0;d+t))}getLabelForValue(t){const e=this._adapter,i=this.options.time;return i.tooltipFormat?e.format(t,i.tooltipFormat):e.format(t,i.displayFormats.datetime)}format(t,e){const i=this.options.time.displayFormats,s=this._unit,n=e||i[s];return this._adapter.format(t,n)}_tickFormatFunction(t,e,i,s){const n=this.options,o=n.ticks.callback;if(o)return d(o,[t,e,i],this);const a=n.time.displayFormats,r=this._unit,l=this._majorUnit,h=r&&a[r],c=l&&a[l],u=i[e],f=l&&c&&u&&u.major;return this._adapter.format(t,s||(f?c:h))}generateTickLabels(t){let e,i,s;for(e=0,i=t.length;e0?a:1}getDataTimestamps(){let t,e,i=this._cache.data||[];if(i.length)return i;const s=this.getMatchingVisibleMetas();if(this._normalized&&s.length)return this._cache.data=s[0].controller.getAllParsedValues(this);for(t=0,e=s.length;t=t[r].pos&&e<=t[l].pos&&({lo:r,hi:l}=it(t,"pos",e)),({pos:s,time:o}=t[r]),({pos:n,time:a}=t[l])):(e>=t[r].time&&e<=t[l].time&&({lo:r,hi:l}=it(t,"time",e)),({time:s,pos:o}=t[r]),({time:n,pos:a}=t[l]));const h=n-s;return h?o+(a-o)*(e-s)/h:o}var jo=Object.freeze({__proto__:null,CategoryScale:class extends Js{static id="category";static defaults={ticks:{callback:po}};constructor(t){super(t),this._startValue=void 0,this._valueRange=0,this._addedLabels=[]}init(t){const e=this._addedLabels;if(e.length){const t=this.getLabels();for(const{index:i,label:s}of e)t[i]===s&&t.splice(i,1);this._addedLabels=[]}super.init(t)}parse(t,e){if(s(t))return null;const i=this.getLabels();return((t,e)=>null===t?null:J(Math.round(t),0,e))(e=isFinite(e)&&i[e]===t?e:go(i,t,l(e,t),this._addedLabels),i.length-1)}determineDataLimits(){const{minDefined:t,maxDefined:e}=this.getUserBounds();let{min:i,max:s}=this.getMinMax(!0);"ticks"===this.options.bounds&&(t||(i=0),e||(s=this.getLabels().length-1)),this.min=i,this.max=s}buildTicks(){const t=this.min,e=this.max,i=this.options.offset,s=[];let n=this.getLabels();n=0===t&&e===n.length-1?n:n.slice(t,e+1),this._valueRange=Math.max(n.length-(i?0:1),1),this._startValue=this.min-(i?.5:0);for(let i=t;i<=e;i++)s.push({value:i});return s}getLabelForValue(t){return po.call(this,t)}configure(){super.configure(),this.isHorizontal()||(this._reversePixels=!this._reversePixels)}getPixelForValue(t){return"number"!=typeof t&&(t=this.parse(t)),null===t?NaN:this.getPixelForDecimal((t-this._startValue)/this._valueRange)}getPixelForTick(t){const e=this.ticks;return t<0||t>e.length-1?null:this.getPixelForValue(e[t].value)}getValueForPixel(t){return Math.round(this._startValue+this.getDecimalForPixel(t)*this._valueRange)}getBasePixel(){return this.bottom}},LinearScale:xo,LogarithmicScale:ko,RadialLinearScale:Eo,TimeScale:No,TimeSeriesScale:class extends No{static id="timeseries";static defaults=No.defaults;constructor(t){super(t),this._table=[],this._minPos=void 0,this._tableRange=void 0}initOffsets(){const t=this._getTimestampsForTable(),e=this._table=this.buildLookupTable(t);this._minPos=Ho(e,this.min),this._tableRange=Ho(e,this.max)-this._minPos,super.initOffsets(t)}buildLookupTable(t){const{min:e,max:i}=this,s=[],n=[];let o,a,r,l,h;for(o=0,a=t.length;o=e&&l<=i&&s.push(l);if(s.length<2)return[{time:e,pos:0},{time:i,pos:1}];for(o=0,a=s.length;ot-e))}_getTimestampsForTable(){let t=this._cache.all||[];if(t.length)return t;const e=this.getDataTimestamps(),i=this.getLabelTimestamps();return t=e.length&&i.length?this.normalize(e.concat(i)):e.length?e:i,t=this._cache.all=t,t}getDecimalForValue(t){return(Ho(this._table,t)-this._minPos)/this._tableRange}getValueForPixel(t){const e=this._offsets,i=this.getDecimalForPixel(t)/e.factor-e.end;return Ho(this._table,i*this._tableRange+this._minPos,!0)}}});const $o=["rgb(54, 162, 235)","rgb(255, 99, 132)","rgb(255, 159, 64)","rgb(255, 205, 86)","rgb(75, 192, 192)","rgb(153, 102, 255)","rgb(201, 203, 207)"],Yo=$o.map((t=>t.replace("rgb(","rgba(").replace(")",", 0.5)")));function Uo(t){return $o[t%$o.length]}function Xo(t){return Yo[t%Yo.length]}function qo(t){let e=0;return(i,s)=>{const n=t.getDatasetMeta(s).controller;n instanceof jn?e=function(t,e){return t.backgroundColor=t.data.map((()=>Uo(e++))),e}(i,e):n instanceof $n?e=function(t,e){return t.backgroundColor=t.data.map((()=>Xo(e++))),e}(i,e):n&&(e=function(t,e){return t.borderColor=Uo(e),t.backgroundColor=Xo(e),++e}(i,e))}}function Ko(t){let e;for(e in t)if(t[e].borderColor||t[e].backgroundColor)return!0;return!1}var Go={id:"colors",defaults:{enabled:!0,forceOverride:!1},beforeLayout(t,e,i){if(!i.enabled)return;const{data:{datasets:s},options:n}=t.config,{elements:o}=n;if(!i.forceOverride&&(Ko(s)||(a=n)&&(a.borderColor||a.backgroundColor)||o&&Ko(o)))return;var a;const r=qo(t);s.forEach(r)}};function Zo(t){if(t._decimated){const e=t._data;delete t._decimated,delete t._data,Object.defineProperty(t,"data",{configurable:!0,enumerable:!0,writable:!0,value:e})}}function Jo(t){t.data.datasets.forEach((t=>{Zo(t)}))}var Qo={id:"decimation",defaults:{algorithm:"min-max",enabled:!1},beforeElementsUpdate:(t,e,i)=>{if(!i.enabled)return void Jo(t);const n=t.width;t.data.datasets.forEach(((e,o)=>{const{_data:a,indexAxis:r}=e,l=t.getDatasetMeta(o),h=a||e.data;if("y"===Pi([r,t.options.indexAxis]))return;if(!l.controller.supportsDecimation)return;const c=t.scales[l.xAxisID];if("linear"!==c.type&&"time"!==c.type)return;if(t.options.parsing)return;let{start:d,count:u}=function(t,e){const i=e.length;let s,n=0;const{iScale:o}=t,{min:a,max:r,minDefined:l,maxDefined:h}=o.getUserBounds();return l&&(n=J(it(e,o.axis,a).lo,0,i-1)),s=h?J(it(e,o.axis,r).hi+1,n,i)-n:i-n,{start:n,count:s}}(l,h);if(u<=(i.threshold||4*n))return void Zo(e);let f;switch(s(a)&&(e._data=h,delete e.data,Object.defineProperty(e,"data",{configurable:!0,enumerable:!0,get:function(){return this._decimated},set:function(t){this._data=t}})),i.algorithm){case"lttb":f=function(t,e,i,s,n){const o=n.samples||s;if(o>=i)return t.slice(e,e+i);const a=[],r=(i-2)/(o-2);let l=0;const h=e+i-1;let c,d,u,f,g,p=e;for(a[l++]=t[p],c=0;cu&&(u=f,d=t[s],g=s);a[l++]=d,p=g}return a[l++]=t[h],a}(h,d,u,n,i);break;case"min-max":f=function(t,e,i,n){let o,a,r,l,h,c,d,u,f,g,p=0,m=0;const b=[],x=e+i-1,_=t[e].x,y=t[x].x-_;for(o=e;og&&(g=l,d=o),p=(m*p+a.x)/++m;else{const i=o-1;if(!s(c)&&!s(d)){const e=Math.min(c,d),s=Math.max(c,d);e!==u&&e!==i&&b.push({...t[e],x:p}),s!==u&&s!==i&&b.push({...t[s],x:p})}o>0&&i!==u&&b.push(t[i]),b.push(a),h=e,m=0,f=g=l,c=d=u=o}}return b}(h,d,u,n);break;default:throw new Error(`Unsupported decimation algorithm '${i.algorithm}'`)}e._decimated=f}))},destroy(t){Jo(t)}};function ta(t,e,i,s){if(s)return;let n=e[t],o=i[t];return"angle"===t&&(n=G(n),o=G(o)),{property:t,start:n,end:o}}function ea(t,e,i){for(;e>t;e--){const t=i[e];if(!isNaN(t.x)&&!isNaN(t.y))break}return e}function ia(t,e,i,s){return t&&e?s(t[i],e[i]):t?t[i]:e?e[i]:0}function sa(t,e){let i=[],s=!1;return n(t)?(s=!0,i=t):i=function(t,e){const{x:i=null,y:s=null}=t||{},n=e.points,o=[];return e.segments.forEach((({start:t,end:e})=>{e=ea(t,e,n);const a=n[t],r=n[e];null!==s?(o.push({x:a.x,y:s}),o.push({x:r.x,y:s})):null!==i&&(o.push({x:i,y:a.y}),o.push({x:i,y:r.y}))})),o}(t,e),i.length?new no({points:i,options:{tension:0},_loop:s,_fullLoop:s}):null}function na(t){return t&&!1!==t.fill}function oa(t,e,i){let s=t[e].fill;const n=[e];let o;if(!i)return s;for(;!1!==s&&-1===n.indexOf(s);){if(!a(s))return s;if(o=t[s],!o)return!1;if(o.visible)return s;n.push(s),s=o.fill}return!1}function aa(t,e,i){const s=function(t){const e=t.options,i=e.fill;let s=l(i&&i.target,i);void 0===s&&(s=!!e.backgroundColor);if(!1===s||null===s)return!1;if(!0===s)return"origin";return s}(t);if(o(s))return!isNaN(s.value)&&s;let n=parseFloat(s);return a(n)&&Math.floor(n)===n?function(t,e,i,s){"-"!==t&&"+"!==t||(i=e+i);if(i===e||i<0||i>=s)return!1;return i}(s[0],e,n,i):["origin","start","end","stack","shape"].indexOf(s)>=0&&s}function ra(t,e,i){const s=[];for(let n=0;n=0;--e){const i=n[e].$filler;i&&(i.line.updateControlPoints(o,i.axis),s&&i.fill&&da(t.ctx,i,o))}},beforeDatasetsDraw(t,e,i){if("beforeDatasetsDraw"!==i.drawTime)return;const s=t.getSortedVisibleDatasetMetas();for(let e=s.length-1;e>=0;--e){const i=s[e].$filler;na(i)&&da(t.ctx,i,t.chartArea)}},beforeDatasetDraw(t,e,i){const s=e.meta.$filler;na(s)&&"beforeDatasetDraw"===i.drawTime&&da(t.ctx,s,t.chartArea)},defaults:{propagate:!0,drawTime:"beforeDatasetDraw"}};const ba=(t,e)=>{let{boxHeight:i=e,boxWidth:s=e}=t;return t.usePointStyle&&(i=Math.min(i,e),s=t.pointStyleWidth||Math.min(s,e)),{boxWidth:s,boxHeight:i,itemHeight:Math.max(e,i)}};class xa extends Hs{constructor(t){super(),this._added=!1,this.legendHitBoxes=[],this._hoveredItem=null,this.doughnutMode=!1,this.chart=t.chart,this.options=t.options,this.ctx=t.ctx,this.legendItems=void 0,this.columnSizes=void 0,this.lineWidths=void 0,this.maxHeight=void 0,this.maxWidth=void 0,this.top=void 0,this.bottom=void 0,this.left=void 0,this.right=void 0,this.height=void 0,this.width=void 0,this._margins=void 0,this.position=void 0,this.weight=void 0,this.fullSize=void 0}update(t,e,i){this.maxWidth=t,this.maxHeight=e,this._margins=i,this.setDimensions(),this.buildLabels(),this.fit()}setDimensions(){this.isHorizontal()?(this.width=this.maxWidth,this.left=this._margins.left,this.right=this.width):(this.height=this.maxHeight,this.top=this._margins.top,this.bottom=this.height)}buildLabels(){const t=this.options.labels||{};let e=d(t.generateLabels,[this.chart],this)||[];t.filter&&(e=e.filter((e=>t.filter(e,this.chart.data)))),t.sort&&(e=e.sort(((e,i)=>t.sort(e,i,this.chart.data)))),this.options.reverse&&e.reverse(),this.legendItems=e}fit(){const{options:t,ctx:e}=this;if(!t.display)return void(this.width=this.height=0);const i=t.labels,s=Si(i.font),n=s.size,o=this._computeTitleHeight(),{boxWidth:a,itemHeight:r}=ba(i,n);let l,h;e.font=s.string,this.isHorizontal()?(l=this.maxWidth,h=this._fitRows(o,n,a,r)+10):(h=this.maxHeight,l=this._fitCols(o,s,a,r)+10),this.width=Math.min(l,t.maxWidth||this.maxWidth),this.height=Math.min(h,t.maxHeight||this.maxHeight)}_fitRows(t,e,i,s){const{ctx:n,maxWidth:o,options:{labels:{padding:a}}}=this,r=this.legendHitBoxes=[],l=this.lineWidths=[0],h=s+a;let c=t;n.textAlign="left",n.textBaseline="middle";let d=-1,u=-h;return this.legendItems.forEach(((t,f)=>{const g=i+e/2+n.measureText(t.text).width;(0===f||l[l.length-1]+g+2*a>o)&&(c+=h,l[l.length-(f>0?0:1)]=0,u+=h,d++),r[f]={left:0,top:u,row:d,width:g,height:s},l[l.length-1]+=g+a})),c}_fitCols(t,e,i,s){const{ctx:n,maxHeight:o,options:{labels:{padding:a}}}=this,r=this.legendHitBoxes=[],l=this.columnSizes=[],h=o-t;let c=a,d=0,u=0,f=0,g=0;return this.legendItems.forEach(((t,o)=>{const{itemWidth:p,itemHeight:m}=function(t,e,i,s,n){const o=function(t,e,i,s){let n=t.text;n&&"string"!=typeof n&&(n=n.reduce(((t,e)=>t.length>e.length?t:e)));return e+i.size/2+s.measureText(n).width}(s,t,e,i),a=function(t,e,i){let s=t;"string"!=typeof e.text&&(s=_a(e,i));return s}(n,s,e.lineHeight);return{itemWidth:o,itemHeight:a}}(i,e,n,t,s);o>0&&u+m+2*a>h&&(c+=d+a,l.push({width:d,height:u}),f+=d+a,g++,d=u=0),r[o]={left:f,top:u,col:g,width:p,height:m},d=Math.max(d,p),u+=m+a})),c+=d,l.push({width:d,height:u}),c}adjustHitBoxes(){if(!this.options.display)return;const t=this._computeTitleHeight(),{legendHitBoxes:e,options:{align:i,labels:{padding:s},rtl:n}}=this,o=Oi(n,this.left,this.width);if(this.isHorizontal()){let n=0,a=ft(i,this.left+s,this.right-this.lineWidths[n]);for(const r of e)n!==r.row&&(n=r.row,a=ft(i,this.left+s,this.right-this.lineWidths[n])),r.top+=this.top+t+s,r.left=o.leftForLtr(o.x(a),r.width),a+=r.width+s}else{let n=0,a=ft(i,this.top+t+s,this.bottom-this.columnSizes[n].height);for(const r of e)r.col!==n&&(n=r.col,a=ft(i,this.top+t+s,this.bottom-this.columnSizes[n].height)),r.top=a,r.left+=this.left+s,r.left=o.leftForLtr(o.x(r.left),r.width),a+=r.height+s}}isHorizontal(){return"top"===this.options.position||"bottom"===this.options.position}draw(){if(this.options.display){const t=this.ctx;Ie(t,this),this._draw(),ze(t)}}_draw(){const{options:t,columnSizes:e,lineWidths:i,ctx:s}=this,{align:n,labels:o}=t,a=ue.color,r=Oi(t.rtl,this.left,this.width),h=Si(o.font),{padding:c}=o,d=h.size,u=d/2;let f;this.drawTitle(),s.textAlign=r.textAlign("left"),s.textBaseline="middle",s.lineWidth=.5,s.font=h.string;const{boxWidth:g,boxHeight:p,itemHeight:m}=ba(o,d),b=this.isHorizontal(),x=this._computeTitleHeight();f=b?{x:ft(n,this.left+c,this.right-i[0]),y:this.top+c+x,line:0}:{x:this.left+c,y:ft(n,this.top+x+c,this.bottom-e[0].height),line:0},Ai(this.ctx,t.textDirection);const _=m+c;this.legendItems.forEach(((y,v)=>{s.strokeStyle=y.fontColor,s.fillStyle=y.fontColor;const M=s.measureText(y.text).width,w=r.textAlign(y.textAlign||(y.textAlign=o.textAlign)),k=g+u+M;let S=f.x,P=f.y;r.setWidth(this.width),b?v>0&&S+k+c>this.right&&(P=f.y+=_,f.line++,S=f.x=ft(n,this.left+c,this.right-i[f.line])):v>0&&P+_>this.bottom&&(S=f.x=S+e[f.line].width+c,f.line++,P=f.y=ft(n,this.top+x+c,this.bottom-e[f.line].height));if(function(t,e,i){if(isNaN(g)||g<=0||isNaN(p)||p<0)return;s.save();const n=l(i.lineWidth,1);if(s.fillStyle=l(i.fillStyle,a),s.lineCap=l(i.lineCap,"butt"),s.lineDashOffset=l(i.lineDashOffset,0),s.lineJoin=l(i.lineJoin,"miter"),s.lineWidth=n,s.strokeStyle=l(i.strokeStyle,a),s.setLineDash(l(i.lineDash,[])),o.usePointStyle){const a={radius:p*Math.SQRT2/2,pointStyle:i.pointStyle,rotation:i.rotation,borderWidth:n},l=r.xPlus(t,g/2);Ee(s,a,l,e+u,o.pointStyleWidth&&g)}else{const o=e+Math.max((d-p)/2,0),a=r.leftForLtr(t,g),l=wi(i.borderRadius);s.beginPath(),Object.values(l).some((t=>0!==t))?He(s,{x:a,y:o,w:g,h:p,radius:l}):s.rect(a,o,g,p),s.fill(),0!==n&&s.stroke()}s.restore()}(r.x(S),P,y),S=gt(w,S+g+u,b?S+k:this.right,t.rtl),function(t,e,i){Ne(s,i.text,t,e+m/2,h,{strikethrough:i.hidden,textAlign:r.textAlign(i.textAlign)})}(r.x(S),P,y),b)f.x+=k+c;else if("string"!=typeof y.text){const t=h.lineHeight;f.y+=_a(y,t)+c}else f.y+=_})),Ti(this.ctx,t.textDirection)}drawTitle(){const t=this.options,e=t.title,i=Si(e.font),s=ki(e.padding);if(!e.display)return;const n=Oi(t.rtl,this.left,this.width),o=this.ctx,a=e.position,r=i.size/2,l=s.top+r;let h,c=this.left,d=this.width;if(this.isHorizontal())d=Math.max(...this.lineWidths),h=this.top+l,c=ft(t.align,c,this.right-d);else{const e=this.columnSizes.reduce(((t,e)=>Math.max(t,e.height)),0);h=l+ft(t.align,this.top,this.bottom-e-t.labels.padding-this._computeTitleHeight())}const u=ft(a,c,c+d);o.textAlign=n.textAlign(ut(a)),o.textBaseline="middle",o.strokeStyle=e.color,o.fillStyle=e.color,o.font=i.string,Ne(o,e.text,u,h,i)}_computeTitleHeight(){const t=this.options.title,e=Si(t.font),i=ki(t.padding);return t.display?e.lineHeight+i.height:0}_getLegendItemAt(t,e){let i,s,n;if(tt(t,this.left,this.right)&&tt(e,this.top,this.bottom))for(n=this.legendHitBoxes,i=0;it.chart.options.color,boxWidth:40,padding:10,generateLabels(t){const e=t.data.datasets,{labels:{usePointStyle:i,pointStyle:s,textAlign:n,color:o,useBorderRadius:a,borderRadius:r}}=t.legend.options;return t._getSortedDatasetMetas().map((t=>{const l=t.controller.getStyle(i?0:void 0),h=ki(l.borderWidth);return{text:e[t.index].label,fillStyle:l.backgroundColor,fontColor:o,hidden:!t.visible,lineCap:l.borderCapStyle,lineDash:l.borderDash,lineDashOffset:l.borderDashOffset,lineJoin:l.borderJoinStyle,lineWidth:(h.width+h.height)/4,strokeStyle:l.borderColor,pointStyle:s||l.pointStyle,rotation:l.rotation,textAlign:n||l.textAlign,borderRadius:a&&(r||l.borderRadius),datasetIndex:t.index}}),this)}},title:{color:t=>t.chart.options.color,display:!1,position:"center",text:""}},descriptors:{_scriptable:t=>!t.startsWith("on"),labels:{_scriptable:t=>!["generateLabels","filter","sort"].includes(t)}}};class va extends Hs{constructor(t){super(),this.chart=t.chart,this.options=t.options,this.ctx=t.ctx,this._padding=void 0,this.top=void 0,this.bottom=void 0,this.left=void 0,this.right=void 0,this.width=void 0,this.height=void 0,this.position=void 0,this.weight=void 0,this.fullSize=void 0}update(t,e){const i=this.options;if(this.left=0,this.top=0,!i.display)return void(this.width=this.height=this.right=this.bottom=0);this.width=this.right=t,this.height=this.bottom=e;const s=n(i.text)?i.text.length:1;this._padding=ki(i.padding);const o=s*Si(i.font).lineHeight+this._padding.height;this.isHorizontal()?this.height=o:this.width=o}isHorizontal(){const t=this.options.position;return"top"===t||"bottom"===t}_drawArgs(t){const{top:e,left:i,bottom:s,right:n,options:o}=this,a=o.align;let r,l,h,c=0;return this.isHorizontal()?(l=ft(a,i,n),h=e+t,r=n-i):("left"===o.position?(l=i+t,h=ft(a,s,e),c=-.5*C):(l=n-t,h=ft(a,e,s),c=.5*C),r=s-e),{titleX:l,titleY:h,maxWidth:r,rotation:c}}draw(){const t=this.ctx,e=this.options;if(!e.display)return;const i=Si(e.font),s=i.lineHeight/2+this._padding.top,{titleX:n,titleY:o,maxWidth:a,rotation:r}=this._drawArgs(s);Ne(t,e.text,0,0,i,{color:e.color,maxWidth:a,rotation:r,textAlign:ut(e.align),textBaseline:"middle",translation:[n,o]})}}var Ma={id:"title",_element:va,start(t,e,i){!function(t,e){const i=new va({ctx:t.ctx,options:e,chart:t});as.configure(t,i,e),as.addBox(t,i),t.titleBlock=i}(t,i)},stop(t){const e=t.titleBlock;as.removeBox(t,e),delete t.titleBlock},beforeUpdate(t,e,i){const s=t.titleBlock;as.configure(t,s,i),s.options=i},defaults:{align:"center",display:!1,font:{weight:"bold"},fullSize:!0,padding:10,position:"top",text:"",weight:2e3},defaultRoutes:{color:"color"},descriptors:{_scriptable:!0,_indexable:!1}};const wa=new WeakMap;var ka={id:"subtitle",start(t,e,i){const s=new va({ctx:t.ctx,options:i,chart:t});as.configure(t,s,i),as.addBox(t,s),wa.set(t,s)},stop(t){as.removeBox(t,wa.get(t)),wa.delete(t)},beforeUpdate(t,e,i){const s=wa.get(t);as.configure(t,s,i),s.options=i},defaults:{align:"center",display:!1,font:{weight:"normal"},fullSize:!0,padding:0,position:"top",text:"",weight:1500},defaultRoutes:{color:"color"},descriptors:{_scriptable:!0,_indexable:!1}};const Sa={average(t){if(!t.length)return!1;let e,i,s=0,n=0,o=0;for(e=0,i=t.length;e-1?t.split("\n"):t}function Ca(t,e){const{element:i,datasetIndex:s,index:n}=e,o=t.getDatasetMeta(s).controller,{label:a,value:r}=o.getLabelAndValue(n);return{chart:t,label:a,parsed:o.getParsed(n),raw:t.data.datasets[s].data[n],formattedValue:r,dataset:o.getDataset(),dataIndex:n,datasetIndex:s,element:i}}function Oa(t,e){const i=t.chart.ctx,{body:s,footer:n,title:o}=t,{boxWidth:a,boxHeight:r}=e,l=Si(e.bodyFont),h=Si(e.titleFont),c=Si(e.footerFont),d=o.length,f=n.length,g=s.length,p=ki(e.padding);let m=p.height,b=0,x=s.reduce(((t,e)=>t+e.before.length+e.lines.length+e.after.length),0);if(x+=t.beforeBody.length+t.afterBody.length,d&&(m+=d*h.lineHeight+(d-1)*e.titleSpacing+e.titleMarginBottom),x){m+=g*(e.displayColors?Math.max(r,l.lineHeight):l.lineHeight)+(x-g)*l.lineHeight+(x-1)*e.bodySpacing}f&&(m+=e.footerMarginTop+f*c.lineHeight+(f-1)*e.footerSpacing);let _=0;const y=function(t){b=Math.max(b,i.measureText(t).width+_)};return i.save(),i.font=h.string,u(t.title,y),i.font=l.string,u(t.beforeBody.concat(t.afterBody),y),_=e.displayColors?a+2+e.boxPadding:0,u(s,(t=>{u(t.before,y),u(t.lines,y),u(t.after,y)})),_=0,i.font=c.string,u(t.footer,y),i.restore(),b+=p.width,{width:b,height:m}}function Aa(t,e,i,s){const{x:n,width:o}=i,{width:a,chartArea:{left:r,right:l}}=t;let h="center";return"center"===s?h=n<=(r+l)/2?"left":"right":n<=o/2?h="left":n>=a-o/2&&(h="right"),function(t,e,i,s){const{x:n,width:o}=s,a=i.caretSize+i.caretPadding;return"left"===t&&n+o+a>e.width||"right"===t&&n-o-a<0||void 0}(h,t,e,i)&&(h="center"),h}function Ta(t,e,i){const s=i.yAlign||e.yAlign||function(t,e){const{y:i,height:s}=e;return it.height-s/2?"bottom":"center"}(t,i);return{xAlign:i.xAlign||e.xAlign||Aa(t,e,i,s),yAlign:s}}function La(t,e,i,s){const{caretSize:n,caretPadding:o,cornerRadius:a}=t,{xAlign:r,yAlign:l}=i,h=n+o,{topLeft:c,topRight:d,bottomLeft:u,bottomRight:f}=wi(a);let g=function(t,e){let{x:i,width:s}=t;return"right"===e?i-=s:"center"===e&&(i-=s/2),i}(e,r);const p=function(t,e,i){let{y:s,height:n}=t;return"top"===e?s+=i:s-="bottom"===e?n+i:n/2,s}(e,l,h);return"center"===l?"left"===r?g+=h:"right"===r&&(g-=h):"left"===r?g-=Math.max(c,u)+n:"right"===r&&(g+=Math.max(d,f)+n),{x:J(g,0,s.width-e.width),y:J(p,0,s.height-e.height)}}function Ea(t,e,i){const s=ki(i.padding);return"center"===e?t.x+t.width/2:"right"===e?t.x+t.width-s.right:t.x+s.left}function Ra(t){return Pa([],Da(t))}function Ia(t,e){const i=e&&e.dataset&&e.dataset.tooltip&&e.dataset.tooltip.callbacks;return i?t.override(i):t}const za={beforeTitle:e,title(t){if(t.length>0){const e=t[0],i=e.chart.data.labels,s=i?i.length:0;if(this&&this.options&&"dataset"===this.options.mode)return e.dataset.label||"";if(e.label)return e.label;if(s>0&&e.dataIndex{const e={before:[],lines:[],after:[]},n=Ia(i,t);Pa(e.before,Da(Fa(n,"beforeLabel",this,t))),Pa(e.lines,Fa(n,"label",this,t)),Pa(e.after,Da(Fa(n,"afterLabel",this,t))),s.push(e)})),s}getAfterBody(t,e){return Ra(Fa(e.callbacks,"afterBody",this,t))}getFooter(t,e){const{callbacks:i}=e,s=Fa(i,"beforeFooter",this,t),n=Fa(i,"footer",this,t),o=Fa(i,"afterFooter",this,t);let a=[];return a=Pa(a,Da(s)),a=Pa(a,Da(n)),a=Pa(a,Da(o)),a}_createItems(t){const e=this._active,i=this.chart.data,s=[],n=[],o=[];let a,r,l=[];for(a=0,r=e.length;at.filter(e,s,n,i)))),t.itemSort&&(l=l.sort(((e,s)=>t.itemSort(e,s,i)))),u(l,(e=>{const i=Ia(t.callbacks,e);s.push(Fa(i,"labelColor",this,e)),n.push(Fa(i,"labelPointStyle",this,e)),o.push(Fa(i,"labelTextColor",this,e))})),this.labelColors=s,this.labelPointStyles=n,this.labelTextColors=o,this.dataPoints=l,l}update(t,e){const i=this.options.setContext(this.getContext()),s=this._active;let n,o=[];if(s.length){const t=Sa[i.position].call(this,s,this._eventPosition);o=this._createItems(i),this.title=this.getTitle(o,i),this.beforeBody=this.getBeforeBody(o,i),this.body=this.getBody(o,i),this.afterBody=this.getAfterBody(o,i),this.footer=this.getFooter(o,i);const e=this._size=Oa(this,i),a=Object.assign({},t,e),r=Ta(this.chart,i,a),l=La(i,a,r,this.chart);this.xAlign=r.xAlign,this.yAlign=r.yAlign,n={opacity:1,x:l.x,y:l.y,width:e.width,height:e.height,caretX:t.x,caretY:t.y}}else 0!==this.opacity&&(n={opacity:0});this._tooltipItems=o,this.$context=void 0,n&&this._resolveAnimations().update(this,n),t&&i.external&&i.external.call(this,{chart:this.chart,tooltip:this,replay:e})}drawCaret(t,e,i,s){const n=this.getCaretPosition(t,i,s);e.lineTo(n.x1,n.y1),e.lineTo(n.x2,n.y2),e.lineTo(n.x3,n.y3)}getCaretPosition(t,e,i){const{xAlign:s,yAlign:n}=this,{caretSize:o,cornerRadius:a}=i,{topLeft:r,topRight:l,bottomLeft:h,bottomRight:c}=wi(a),{x:d,y:u}=t,{width:f,height:g}=e;let p,m,b,x,_,y;return"center"===n?(_=u+g/2,"left"===s?(p=d,m=p-o,x=_+o,y=_-o):(p=d+f,m=p+o,x=_-o,y=_+o),b=p):(m="left"===s?d+Math.max(r,h)+o:"right"===s?d+f-Math.max(l,c)-o:this.caretX,"top"===n?(x=u,_=x-o,p=m-o,b=m+o):(x=u+g,_=x+o,p=m+o,b=m-o),y=x),{x1:p,x2:m,x3:b,y1:x,y2:_,y3:y}}drawTitle(t,e,i){const s=this.title,n=s.length;let o,a,r;if(n){const l=Oi(i.rtl,this.x,this.width);for(t.x=Ea(this,i.titleAlign,i),e.textAlign=l.textAlign(i.titleAlign),e.textBaseline="middle",o=Si(i.titleFont),a=i.titleSpacing,e.fillStyle=i.titleColor,e.font=o.string,r=0;r0!==t))?(t.beginPath(),t.fillStyle=n.multiKeyBackground,He(t,{x:e,y:g,w:h,h:l,radius:r}),t.fill(),t.stroke(),t.fillStyle=a.backgroundColor,t.beginPath(),He(t,{x:i,y:g+1,w:h-2,h:l-2,radius:r}),t.fill()):(t.fillStyle=n.multiKeyBackground,t.fillRect(e,g,h,l),t.strokeRect(e,g,h,l),t.fillStyle=a.backgroundColor,t.fillRect(i,g+1,h-2,l-2))}t.fillStyle=this.labelTextColors[i]}drawBody(t,e,i){const{body:s}=this,{bodySpacing:n,bodyAlign:o,displayColors:a,boxHeight:r,boxWidth:l,boxPadding:h}=i,c=Si(i.bodyFont);let d=c.lineHeight,f=0;const g=Oi(i.rtl,this.x,this.width),p=function(i){e.fillText(i,g.x(t.x+f),t.y+d/2),t.y+=d+n},m=g.textAlign(o);let b,x,_,y,v,M,w;for(e.textAlign=o,e.textBaseline="middle",e.font=c.string,t.x=Ea(this,m,i),e.fillStyle=i.bodyColor,u(this.beforeBody,p),f=a&&"right"!==m?"center"===o?l/2+h:l+2+h:0,y=0,M=s.length;y0&&e.stroke()}_updateAnimationTarget(t){const e=this.chart,i=this.$animations,s=i&&i.x,n=i&&i.y;if(s||n){const i=Sa[t.position].call(this,this._active,this._eventPosition);if(!i)return;const o=this._size=Oa(this,t),a=Object.assign({},i,this._size),r=Ta(e,t,a),l=La(t,a,r,e);s._to===l.x&&n._to===l.y||(this.xAlign=r.xAlign,this.yAlign=r.yAlign,this.width=o.width,this.height=o.height,this.caretX=i.x,this.caretY=i.y,this._resolveAnimations().update(this,l))}}_willRender(){return!!this.opacity}draw(t){const e=this.options.setContext(this.getContext());let i=this.opacity;if(!i)return;this._updateAnimationTarget(e);const s={width:this.width,height:this.height},n={x:this.x,y:this.y};i=Math.abs(i)<.001?0:i;const o=ki(e.padding),a=this.title.length||this.beforeBody.length||this.body.length||this.afterBody.length||this.footer.length;e.enabled&&a&&(t.save(),t.globalAlpha=i,this.drawBackground(n,t,s,e),Ai(t,e.textDirection),n.y+=o.top,this.drawTitle(n,t,e),this.drawBody(n,t,e),this.drawFooter(n,t,e),Ti(t,e.textDirection),t.restore())}getActiveElements(){return this._active||[]}setActiveElements(t,e){const i=this._active,s=t.map((({datasetIndex:t,index:e})=>{const i=this.chart.getDatasetMeta(t);if(!i)throw new Error("Cannot find a dataset at index "+t);return{datasetIndex:t,element:i.data[e],index:e}})),n=!f(i,s),o=this._positionChanged(s,e);(n||o)&&(this._active=s,this._eventPosition=e,this._ignoreReplayEvents=!0,this.update(!0))}handleEvent(t,e,i=!0){if(e&&this._ignoreReplayEvents)return!1;this._ignoreReplayEvents=!1;const s=this.options,n=this._active||[],o=this._getActiveElements(t,n,e,i),a=this._positionChanged(o,t),r=e||!f(o,n)||a;return r&&(this._active=o,(s.enabled||s.external)&&(this._eventPosition={x:t.x,y:t.y},this.update(!0,e))),r}_getActiveElements(t,e,i,s){const n=this.options;if("mouseout"===t.type)return[];if(!s)return e;const o=this.chart.getElementsAtEventForMode(t,n.mode,n,i);return n.reverse&&o.reverse(),o}_positionChanged(t,e){const{caretX:i,caretY:s,options:n}=this,o=Sa[n.position].call(this,t,e);return!1!==o&&(i!==o.x||s!==o.y)}}var Ba={id:"tooltip",_element:Va,positioners:Sa,afterInit(t,e,i){i&&(t.tooltip=new Va({chart:t,options:i}))},beforeUpdate(t,e,i){t.tooltip&&t.tooltip.initialize(i)},reset(t,e,i){t.tooltip&&t.tooltip.initialize(i)},afterDraw(t){const e=t.tooltip;if(e&&e._willRender()){const i={tooltip:e};if(!1===t.notifyPlugins("beforeTooltipDraw",{...i,cancelable:!0}))return;e.draw(t.ctx),t.notifyPlugins("afterTooltipDraw",i)}},afterEvent(t,e){if(t.tooltip){const i=e.replay;t.tooltip.handleEvent(e.event,i,e.inChartArea)&&(e.changed=!0)}},defaults:{enabled:!0,external:null,position:"average",backgroundColor:"rgba(0,0,0,0.8)",titleColor:"#fff",titleFont:{weight:"bold"},titleSpacing:2,titleMarginBottom:6,titleAlign:"left",bodyColor:"#fff",bodySpacing:2,bodyFont:{},bodyAlign:"left",footerColor:"#fff",footerSpacing:2,footerMarginTop:6,footerFont:{weight:"bold"},footerAlign:"left",padding:6,caretPadding:2,caretSize:5,cornerRadius:6,boxHeight:(t,e)=>e.bodyFont.size,boxWidth:(t,e)=>e.bodyFont.size,multiKeyBackground:"#fff",displayColors:!0,boxPadding:0,borderColor:"rgba(0,0,0,0)",borderWidth:0,animation:{duration:400,easing:"easeOutQuart"},animations:{numbers:{type:"number",properties:["x","y","width","height","caretX","caretY"]},opacity:{easing:"linear",duration:200}},callbacks:za},defaultRoutes:{bodyFont:"font",footerFont:"font",titleFont:"font"},descriptors:{_scriptable:t=>"filter"!==t&&"itemSort"!==t&&"external"!==t,_indexable:!1,callbacks:{_scriptable:!1,_indexable:!1},animation:{_fallback:!1},animations:{_fallback:"animation"}},additionalOptionScopes:["interaction"]};return An.register(Yn,jo,fo,t),An.helpers={...Wi},An._adapters=Rn,An.Animation=Cs,An.Animations=Os,An.animator=xt,An.controllers=en.controllers.items,An.DatasetController=Ns,An.Element=Hs,An.elements=fo,An.Interaction=Xi,An.layouts=as,An.platforms=Ss,An.Scale=Js,An.Ticks=ae,Object.assign(An,Yn,jo,fo,t,Ss),An.Chart=An,"undefined"!=typeof window&&(window.Chart=An),An})); +//# sourceMappingURL=chart.umd.js.map diff --git a/application/single_app/static/js/chat/chat-agents.js b/application/single_app/static/js/chat/chat-agents.js index ace05642..b1e4f5fe 100644 --- a/application/single_app/static/js/chat/chat-agents.js +++ b/application/single_app/static/js/chat/chat-agents.js @@ -1,10 +1,27 @@ // chat-agents.js -import { fetchUserAgents, fetchSelectedAgent, populateAgentSelect, setSelectedAgent, getUserSetting, setUserSetting } from '../agents_common.js'; +import { + fetchUserAgents, + fetchGroupAgentsForActiveGroup, + fetchSelectedAgent, + populateAgentSelect, + setSelectedAgent, + getUserSetting, + setUserSetting +} from '../agents_common.js'; const enableAgentsBtn = document.getElementById("enable-agents-btn"); const agentSelectContainer = document.getElementById("agent-select-container"); const modelSelectContainer = document.getElementById("model-select-container"); +/** + * Check if agents are currently enabled + * @returns {boolean} True if agents are active + */ +export function areAgentsEnabled() { + const enableAgentsBtn = document.getElementById("enable-agents-btn"); + return enableAgentsBtn && enableAgentsBtn.classList.contains('active'); +} + export async function initializeAgentInteractions() { if (enableAgentsBtn && agentSelectContainer) { // On load, sync UI with enable_agents setting @@ -43,40 +60,38 @@ export async function initializeAgentInteractions() { export async function populateAgentDropdown() { const agentSelect = agentSelectContainer.querySelector('select'); try { - const agents = await fetchUserAgents(); - const selectedAgent = await fetchSelectedAgent(); - populateAgentSelect(agentSelect, agents, selectedAgent); - agentSelect.onchange = async function() { - const selectedValue = agentSelect.value; - console.log('DEBUG: Agent dropdown changed to:', selectedValue); - console.log('DEBUG: Available agents:', agents); - - // Parse the selected value to extract name and global status - let selectedAgentObj = null; - if (selectedValue.startsWith('global_')) { - const agentName = selectedValue.substring(7); // Remove 'global_' prefix - selectedAgentObj = agents.find(a => a.name === agentName && a.is_global === true); - } else if (selectedValue.startsWith('personal_')) { - const agentName = selectedValue.substring(9); // Remove 'personal_' prefix - selectedAgentObj = agents.find(a => a.name === agentName && a.is_global === false); - } else { - // Fallback for agents without prefix (backwards compatibility) - selectedAgentObj = agents.find(a => a.name === selectedValue); + const [userAgents, selectedAgent] = await Promise.all([ + fetchUserAgents(), + fetchSelectedAgent() + ]); + const groupAgents = await fetchGroupAgentsForActiveGroup(); + const combinedAgents = [...userAgents, ...groupAgents]; + const personalAgents = combinedAgents.filter(agent => !agent.is_global && !agent.is_group); + const activeGroupAgents = combinedAgents.filter(agent => agent.is_group); + const globalAgents = combinedAgents.filter(agent => agent.is_global); + const orderedAgents = [...personalAgents, ...activeGroupAgents, ...globalAgents]; + populateAgentSelect(agentSelect, orderedAgents, selectedAgent); + agentSelect.onchange = async function () { + const selectedOption = agentSelect.options[agentSelect.selectedIndex]; + if (!selectedOption) { + return; } - - console.log('DEBUG: Found agent object:', selectedAgentObj); - - if (selectedAgentObj) { - const payload = { name: selectedAgentObj.name, is_global: !!selectedAgentObj.is_global }; - console.log('DEBUG: Setting selected agent payload:', payload); - console.log('DEBUG: Agent is_global flag:', selectedAgentObj.is_global); - console.log('DEBUG: !!selectedAgentObj.is_global:', !!selectedAgentObj.is_global); - - await setSelectedAgent(payload); - console.log('DEBUG: Agent selection saved successfully'); - } else { - console.log('DEBUG: No agent found with value:', selectedValue); + const payload = { + name: selectedOption.dataset.name || '', + display_name: selectedOption.dataset.displayName || selectedOption.textContent || '', + id: selectedOption.dataset.agentId || null, + is_global: selectedOption.dataset.isGlobal === 'true', + is_group: selectedOption.dataset.isGroup === 'true', + group_id: selectedOption.dataset.groupId || null, + group_name: selectedOption.dataset.groupName || (window.activeGroupName || null) + }; + console.log('DEBUG: Agent dropdown changed with payload:', payload); + if (!payload.name) { + console.warn('Selected agent is missing a name, skipping settings update.'); + return; } + await setSelectedAgent(payload); + console.log('DEBUG: Agent selection saved successfully'); }; } catch (e) { console.error('Error loading agents:', e); diff --git a/application/single_app/static/js/chat/chat-citations.js b/application/single_app/static/js/chat/chat-citations.js index 9f24d000..abad0af0 100644 --- a/application/single_app/static/js/chat/chat-citations.js +++ b/application/single_app/static/js/chat/chat-citations.js @@ -226,6 +226,64 @@ export function showImagePopup(imageSrc) { modal.show(); } +export function showMetadataModal(metadataType, metadataContent, fileName) { + // Create or reuse the metadata modal + let modalContainer = document.getElementById("metadata-modal"); + if (!modalContainer) { + modalContainer = document.createElement("div"); + modalContainer.id = "metadata-modal"; + modalContainer.classList.add("modal", "fade"); + modalContainer.tabIndex = -1; + modalContainer.setAttribute("aria-hidden", "true"); + + modalContainer.innerHTML = ` + + `; + document.body.appendChild(modalContainer); + } + + // Update modal content + const modalTitle = modalContainer.querySelector("#metadata-modal-title"); + const fileNameEl = modalContainer.querySelector("#metadata-file-name"); + const metadataTypeEl = modalContainer.querySelector("#metadata-type"); + const metadataContentEl = modalContainer.querySelector("#metadata-content"); + + if (modalTitle) { + modalTitle.textContent = `Document Metadata - ${metadataType.charAt(0).toUpperCase() + metadataType.slice(1)}`; + } + if (fileNameEl) { + fileNameEl.textContent = fileName; + } + if (metadataTypeEl) { + metadataTypeEl.textContent = metadataType.charAt(0).toUpperCase() + metadataType.slice(1); + } + if (metadataContentEl) { + metadataContentEl.textContent = metadataContent; + } + + const modal = new bootstrap.Modal(modalContainer); + modal.show(); +} + export function showAgentCitationModal(toolName, toolArgs, toolResult) { // Create or reuse the agent citation modal let modalContainer = document.getElementById("agent-citation-modal"); @@ -248,6 +306,13 @@ export function showAgentCitationModal(toolName, toolArgs, toolResult) {
Tool Name:
+
+
Source:
+
+ +
+
+
Function Arguments:

@@ -267,17 +332,20 @@ export function showAgentCitationModal(toolName, toolArgs, toolResult) {
   const toolNameEl = document.getElementById("agent-tool-name");
   const toolArgsEl = document.getElementById("agent-tool-args");
   const toolResultEl = document.getElementById("agent-tool-result");
+  const toolSourceEl = document.getElementById("agent-tool-source");
+  const toolUrlEl = document.getElementById("agent-tool-url");
+  const toolUrlMetaEl = document.getElementById("agent-tool-url-meta");
 
   if (toolNameEl) {
     toolNameEl.textContent = toolName || "Unknown";
   }
   
+  let parsedArgs = null;
   if (toolArgsEl) {
     // Handle empty or no parameters more gracefully
     let argsContent = "";
     
     try {
-      let parsedArgs;
       if (!toolArgs || toolArgs === "" || toolArgs === "{}") {
         argsContent = "No parameters required";
       } else {
@@ -321,9 +389,9 @@ export function showAgentCitationModal(toolName, toolArgs, toolResult) {
   if (toolResultEl) {
     // Handle result formatting and truncation with expand/collapse
     let resultContent = "";
+    let parsedResult = null;
     
     try {
-      let parsedResult;
       if (!toolResult || toolResult === "" || toolResult === "{}") {
         resultContent = "No result";
       } else if (toolResult === "[object Object]") {
@@ -341,6 +409,9 @@ export function showAgentCitationModal(toolName, toolArgs, toolResult) {
     } catch (e) {
       resultContent = toolResult || "No result";
     }
+
+    const citationDetails = extractAgentCitationDetails(parsedResult || parsedArgs);
+    updateAgentCitationSource(toolSourceEl, toolUrlEl, toolUrlMetaEl, citationDetails);
     
     // Add truncation with expand/collapse if content is long
     if (resultContent.length > 300) {
@@ -366,6 +437,63 @@ export function showAgentCitationModal(toolName, toolArgs, toolResult) {
   modal.show();
 }
 
+function extractAgentCitationDetails(source) {
+  if (!source || typeof source !== "object") {
+    return null;
+  }
+
+  const url = source.url;
+  if (!isValidHttpUrl(url)) {
+    return null;
+  }
+
+  return {
+    url,
+    title: source.title || null,
+    quote: source.quote || null,
+    citationType: source.citation_type || null,
+  };
+}
+
+function updateAgentCitationSource(containerEl, linkEl, metaEl, details) {
+  if (!containerEl || !linkEl || !metaEl) {
+    return;
+  }
+
+  if (!details || !details.url) {
+    containerEl.classList.add("d-none");
+    linkEl.textContent = "";
+    linkEl.removeAttribute("href");
+    metaEl.textContent = "";
+    return;
+  }
+
+  containerEl.classList.remove("d-none");
+  linkEl.href = details.url;
+  linkEl.textContent = details.title || details.url;
+
+  const metaParts = [];
+  if (details.citationType) {
+    metaParts.push(`Type: ${details.citationType}`);
+  }
+  if (details.quote) {
+    metaParts.push(`Quote: ${details.quote}`);
+  }
+  metaEl.textContent = metaParts.join(" • ");
+}
+
+function isValidHttpUrl(value) {
+  if (!value || typeof value !== "string") {
+    return false;
+  }
+  try {
+    const parsed = new URL(value);
+    return parsed.protocol === "http:" || parsed.protocol === "https:";
+  } catch (error) {
+    return false;
+  }
+}
+
 // --- MODIFIED: Added citationId parameter and fallback in catch ---
 export function showPdfModal(docId, pageNumber, citationId) {
   const fetchUrl = `/view_pdf?doc_id=${encodeURIComponent(docId)}&page=${encodeURIComponent(pageNumber)}`;
@@ -460,6 +588,18 @@ if (chatboxEl) {
           return;
       }
 
+      // Check if this is a metadata citation
+      const isMetadata = target.getAttribute("data-is-metadata") === "true";
+      if (isMetadata) {
+          // Show metadata content directly in a modal
+          const metadataType = target.getAttribute("data-metadata-type");
+          const metadataContent = target.getAttribute("data-metadata-content");
+          const fileName = citationId.split('_')[0]; // Extract filename from citation ID
+          
+          showMetadataModal(metadataType, metadataContent, fileName);
+          return;
+      }
+
       const { docId, pageNumber } = parseDocIdAndPage(citationId);
 
       // Safety check: Ensure docId and pageNumber were parsed correctly
diff --git a/application/single_app/static/js/chat/chat-conversation-details.js b/application/single_app/static/js/chat/chat-conversation-details.js
index 54ec93ae..e700b758 100644
--- a/application/single_app/static/js/chat/chat-conversation-details.js
+++ b/application/single_app/static/js/chat/chat-conversation-details.js
@@ -43,9 +43,11 @@ export async function showConversationDetails(conversationId) {
     
     const metadata = await response.json();
     
-    // Update modal title with conversation title
+    // Update modal title with conversation title, pin icon, and hidden icon
+    const pinIcon = metadata.is_pinned ? '' : '';
+    const hiddenIcon = metadata.is_hidden ? '' : '';
     modalTitle.innerHTML = `
-      
+      ${pinIcon}${hiddenIcon}
       ${metadata.title || 'Conversation Details'}
     `;
     
@@ -73,7 +75,7 @@ export async function showConversationDetails(conversationId) {
  * @returns {string} HTML string
  */
 function renderConversationMetadata(metadata, conversationId) {
-  const { context = [], tags = [], strict = false, classification = [], last_updated, chat_type = 'personal' } = metadata;
+  const { context = [], tags = [], strict = false, classification = [], last_updated, chat_type = 'personal', is_pinned = false, is_hidden = false } = metadata;
   
   // Organize tags by category
   const tagsByCategory = {
@@ -118,6 +120,9 @@ function renderConversationMetadata(metadata, conversationId) {
               
Classifications: ${formatClassifications(classification)}
+
+ Status: ${is_pinned ? 'Pinned' : ''} ${is_hidden ? 'Hidden' : ''}${!is_pinned && !is_hidden ? 'Normal' : ''} +
diff --git a/application/single_app/static/js/chat/chat-conversations.js b/application/single_app/static/js/chat/chat-conversations.js index e17f95f9..9eb3e61f 100644 --- a/application/single_app/static/js/chat/chat-conversations.js +++ b/application/single_app/static/js/chat/chat-conversations.js @@ -8,6 +8,8 @@ import { toggleConversationInfoButton } from "./chat-conversation-info-button.js const newConversationBtn = document.getElementById("new-conversation-btn"); const deleteSelectedBtn = document.getElementById("delete-selected-btn"); +const pinSelectedBtn = document.getElementById("pin-selected-btn"); +const hideSelectedBtn = document.getElementById("hide-selected-btn"); const conversationsList = document.getElementById("conversations-list"); const currentConversationTitleEl = document.getElementById("current-conversation-title"); const currentConversationClassificationsEl = document.getElementById("current-conversation-classifications"); @@ -19,6 +21,11 @@ let selectedConversations = new Set(); let currentlyEditingId = null; // Track which item is being edited let selectionModeActive = false; // Track if selection mode is active let selectionModeTimer = null; // Timer for auto-hiding checkboxes +let showHiddenConversations = false; // Track if hidden conversations should be shown +let allConversations = []; // Store all conversations for client-side filtering +let isLoadingConversations = false; // Prevent concurrent loads +let showQuickSearch = false; // Track if quick search input is visible +let quickSearchTerm = ""; // Current search term // Clear selected conversations when loading the page document.addEventListener('DOMContentLoaded', () => { @@ -26,19 +33,74 @@ document.addEventListener('DOMContentLoaded', () => { if (deleteSelectedBtn) { deleteSelectedBtn.style.display = "none"; } + + // Set up quick search event listeners + const searchBtn = document.getElementById('sidebar-search-btn'); + const searchInput = document.getElementById('sidebar-search-input'); + const searchClearBtn = document.getElementById('sidebar-search-clear'); + const searchExpandBtn = document.getElementById('sidebar-search-expand'); + + if (searchBtn) { + searchBtn.addEventListener('click', (e) => { + e.stopPropagation(); + toggleQuickSearch(); + }); + } + + if (searchInput) { + searchInput.addEventListener('keyup', (e) => { + quickSearchTerm = e.target.value; + loadConversations(); + }); + + // Prevent conversation toggle when clicking in input + searchInput.addEventListener('click', (e) => { + e.stopPropagation(); + }); + } + + if (searchClearBtn) { + searchClearBtn.addEventListener('click', (e) => { + e.stopPropagation(); + clearQuickSearch(); + }); + } + + if (searchExpandBtn) { + searchExpandBtn.addEventListener('click', (e) => { + e.stopPropagation(); + // Open advanced search modal (will be implemented in chat-search-modal.js) + if (window.chatSearchModal && window.chatSearchModal.openAdvancedSearchModal) { + window.chatSearchModal.openAdvancedSearchModal(); + } + }); + } }); // Function to enter selection mode function enterSelectionMode() { + const wasInactive = !selectionModeActive; selectionModeActive = true; if (conversationsList) { conversationsList.classList.add('selection-mode'); } - // Show delete button + // Show action buttons if (deleteSelectedBtn) { deleteSelectedBtn.style.display = "block"; } + if (pinSelectedBtn) { + pinSelectedBtn.style.display = "block"; + } + if (hideSelectedBtn) { + hideSelectedBtn.style.display = "block"; + } + + // Only reload conversations if we're transitioning from inactive to active + // This shows hidden conversations in selection mode + if (wasInactive) { + loadConversations(); + } // Update sidebar to show selection mode hints if (window.chatSidebarConversations && window.chatSidebarConversations.setSidebarSelectionMode) { @@ -56,10 +118,16 @@ function exitSelectionMode() { conversationsList.classList.remove('selection-mode'); } - // Hide delete button + // Hide action buttons if (deleteSelectedBtn) { deleteSelectedBtn.style.display = "none"; } + if (pinSelectedBtn) { + pinSelectedBtn.style.display = "none"; + } + if (hideSelectedBtn) { + hideSelectedBtn.style.display = "none"; + } // Clear any selections selectedConversations.clear(); @@ -90,6 +158,9 @@ function exitSelectionMode() { clearTimeout(selectionModeTimer); selectionModeTimer = null; } + + // Reload conversations to hide hidden ones if toggle is off + loadConversations(); } // Function to reset the selection mode timer @@ -107,35 +178,180 @@ function resetSelectionModeTimer() { }, 5000); } +// Quick search functions +function toggleQuickSearch() { + const searchContainer = document.getElementById('sidebar-search-container'); + const searchInput = document.getElementById('sidebar-search-input'); + const conversationsSection = document.getElementById('conversations-section'); + const conversationsCaret = document.getElementById('conversations-caret'); + + if (!searchContainer) return; + + showQuickSearch = !showQuickSearch; + + if (showQuickSearch) { + searchContainer.style.display = 'block'; + // Expand conversations section if collapsed + if (conversationsSection) { + const listContainer = document.getElementById('conversations-list-container'); + if (listContainer && listContainer.style.display === 'none') { + listContainer.style.display = 'block'; + if (conversationsCaret) { + conversationsCaret.classList.add('rotate-180'); + } + } + } + // Focus on search input + setTimeout(() => searchInput && searchInput.focus(), 100); + } else { + searchContainer.style.display = 'none'; + clearQuickSearch(); + } +} + +function applyQuickSearchFilter(conversations) { + if (!quickSearchTerm || quickSearchTerm.trim() === '') { + return conversations; + } + + const searchLower = quickSearchTerm.toLowerCase().trim(); + return conversations.filter(convo => { + const titleLower = (convo.title || '').toLowerCase(); + return titleLower.includes(searchLower); + }); +} + +function clearQuickSearch() { + quickSearchTerm = ''; + const searchInput = document.getElementById('sidebar-search-input'); + if (searchInput) { + searchInput.value = ''; + } + loadConversations(); +} + export function loadConversations() { if (!conversationsList) return; + + // Prevent concurrent loads + if (isLoadingConversations) { + console.log('Load already in progress, skipping...'); + return; + } + + isLoadingConversations = true; conversationsList.innerHTML = '
Loading conversations...
'; // Loading state - fetch("/api/get_conversations") + return fetch("/api/get_conversations") .then(response => response.ok ? response.json() : response.json().then(err => Promise.reject(err))) .then(data => { conversationsList.innerHTML = ""; // Clear loading state if (!data.conversations || data.conversations.length === 0) { conversationsList.innerHTML = '
No conversations yet.
'; + allConversations = []; + updateHiddenToggleButton(); return; } - data.conversations.forEach(convo => { - conversationsList.appendChild(createConversationItem(convo)); + + // Store all conversations for client-side operations + allConversations = data.conversations; + + // Sort conversations: pinned first (by last_updated), then unpinned (by last_updated) + const sortedConversations = [...allConversations].sort((a, b) => { + const aPinned = a.is_pinned || false; + const bPinned = b.is_pinned || false; + + // If pin status differs, pinned comes first + if (aPinned !== bPinned) { + return bPinned ? 1 : -1; + } + + // If same pin status, sort by last_updated (most recent first) + const aDate = new Date(a.last_updated); + const bDate = new Date(b.last_updated); + return bDate - aDate; + }); + + // Filter conversations based on show/hide mode and selection mode + let filteredConversations = sortedConversations.filter(convo => { + const isHidden = convo.is_hidden || false; + // Show hidden conversations if toggle is on OR if we're in selection mode + return !isHidden || showHiddenConversations || selectionModeActive; }); + // Apply quick search filter + filteredConversations = applyQuickSearchFilter(filteredConversations); + + if (filteredConversations.length === 0) { + conversationsList.innerHTML = '
No visible conversations. Click the eye icon to show hidden conversations.
'; + } else { + filteredConversations.forEach(convo => { + conversationsList.appendChild(createConversationItem(convo)); + }); + } + + // Update the show/hide toggle button + updateHiddenToggleButton(); + // Also load sidebar conversations if the sidebar exists if (window.chatSidebarConversations && window.chatSidebarConversations.loadSidebarConversations) { window.chatSidebarConversations.loadSidebarConversations(); } + // Reset loading flag + isLoadingConversations = false; + // Optionally, select the first conversation or highlight the active one if ID is known }) .catch(error => { console.error("Error loading conversations:", error); conversationsList.innerHTML = `
Error loading conversations: ${error.error || 'Unknown error'}
`; + isLoadingConversations = false; // Reset flag on error too }); } +// Ensure a conversation exists in the list; fetch metadata if missing +export async function ensureConversationPresent(conversationId) { + if (!conversationId) throw new Error('No conversationId provided'); + + // Already in list + const existing = document.querySelector(`.conversation-item[data-conversation-id="${conversationId}"]`); + if (existing) return existing; + + // Fetch metadata to validate ownership and get details + const res = await fetch(`/api/conversations/${conversationId}/metadata`); + if (!res.ok) { + const err = await res.json().catch(() => ({})); + throw new Error(err.error || `Failed to load conversation ${conversationId}`); + } + const metadata = await res.json(); + + // Build a conversation object compatible with createConversationItem + const convo = { + id: conversationId, + title: metadata.title || 'Conversation', + last_updated: metadata.last_updated || new Date().toISOString(), + classification: metadata.classification || [], + context: metadata.context || [], + chat_type: metadata.chat_type || null, + is_pinned: metadata.is_pinned || false, + is_hidden: metadata.is_hidden || false, + }; + + // Keep allConversations in sync + allConversations = [convo, ...allConversations.filter(c => c.id !== conversationId)]; + + const convoItem = createConversationItem(convo); + conversationsList.prepend(convoItem); + + // Refresh sidebar so it appears there too + if (window.chatSidebarConversations && window.chatSidebarConversations.loadSidebarConversations) { + window.chatSidebarConversations.loadSidebarConversations(); + } + + return convoItem; +} + export function createConversationItem(convo) { const convoItem = document.createElement("div"); // Changed from to
for better semantics with checkboxes convoItem.classList.add("list-group-item", "list-group-item-action", "conversation-item", "d-flex", "align-items-center"); // Use action class @@ -215,7 +431,16 @@ export function createConversationItem(convo) { const titleSpan = document.createElement("span"); titleSpan.classList.add("conversation-title", "text-truncate"); // Bold and truncate - titleSpan.textContent = convo.title; + + // Add pin icon if conversation is pinned + const isPinned = convo.is_pinned || false; + if (isPinned) { + const pinIcon = document.createElement("i"); + pinIcon.classList.add("bi", "bi-pin-angle", "me-1"); + titleSpan.appendChild(pinIcon); + } + + titleSpan.appendChild(document.createTextNode(convo.title)); titleSpan.title = convo.title; // Tooltip for full title const dateSpan = document.createElement("small"); @@ -250,6 +475,26 @@ export function createConversationItem(convo) { detailsA.innerHTML = 'Details'; detailsLi.appendChild(detailsA); + // Add Pin option + const pinLi = document.createElement("li"); + const pinA = document.createElement("a"); + pinA.classList.add("dropdown-item", "pin-btn"); + pinA.href = "#"; + // isPinned already declared above for title icon + pinA.innerHTML = `${isPinned ? 'Unpin' : 'Pin'}`; + pinA.setAttribute("data-is-pinned", isPinned); + pinLi.appendChild(pinA); + + // Add Hide option + const hideLi = document.createElement("li"); + const hideA = document.createElement("a"); + hideA.classList.add("dropdown-item", "hide-btn"); + hideA.href = "#"; + const isHidden = convo.is_hidden || false; + hideA.innerHTML = `${isHidden ? 'Unhide' : 'Hide'}`; + hideA.setAttribute("data-is-hidden", isHidden); + hideLi.appendChild(hideA); + // Add Select option const selectLi = document.createElement("li"); const selectA = document.createElement("a"); @@ -273,6 +518,8 @@ export function createConversationItem(convo) { deleteLi.appendChild(deleteA); dropdownMenu.appendChild(detailsLi); + dropdownMenu.appendChild(pinLi); + dropdownMenu.appendChild(hideLi); dropdownMenu.appendChild(selectLi); dropdownMenu.appendChild(editLi); dropdownMenu.appendChild(deleteLi); @@ -323,6 +570,30 @@ export function createConversationItem(convo) { enterSelectionMode(); }); + // Add event listener for the Pin button + pinA.addEventListener("click", (event) => { + event.preventDefault(); + event.stopPropagation(); + closeDropdownMenu(dropdownBtn); + toggleConversationPin(convo.id); + }); + + // Add event listener for the Hide button + hideA.addEventListener("click", (event) => { + event.preventDefault(); + event.stopPropagation(); + closeDropdownMenu(dropdownBtn); + toggleConversationHide(convo.id); + }); + + // Add event listener for the Details button + detailsA.addEventListener("click", (event) => { + event.preventDefault(); + event.stopPropagation(); + closeDropdownMenu(dropdownBtn); + showConversationDetails(convo.id); + }); + return convoItem; } @@ -531,17 +802,36 @@ export async function selectConversation(conversationId) { const conversationTitle = convoItem.getAttribute("data-conversation-title") || "Conversation"; // Use stored title - // Update Header Title - if (currentConversationTitleEl) { - currentConversationTitleEl.textContent = conversationTitle; - } - - // Fetch the latest conversation metadata to get accurate chat_type + // Fetch the latest conversation metadata to get accurate chat_type, pin, and hide status try { const response = await fetch(`/api/conversations/${conversationId}/metadata`); if (response.ok) { const metadata = await response.json(); + // Update Header Title with pin icon and hidden status + if (currentConversationTitleEl) { + currentConversationTitleEl.innerHTML = ''; + + // Add pin icon if pinned + if (metadata.is_pinned) { + const pinIcon = document.createElement("i"); + pinIcon.classList.add("bi", "bi-pin-angle", "me-2"); + pinIcon.title = "Pinned"; + currentConversationTitleEl.appendChild(pinIcon); + } + + // Add hidden icon if hidden + if (metadata.is_hidden) { + const hiddenIcon = document.createElement("i"); + hiddenIcon.classList.add("bi", "bi-eye-slash", "me-2", "text-muted"); + hiddenIcon.title = "Hidden"; + currentConversationTitleEl.appendChild(hiddenIcon); + } + + // Add title text + currentConversationTitleEl.appendChild(document.createTextNode(conversationTitle)); + } + console.log(`selectConversation: Fetched metadata for ${conversationId}:`, metadata); // Update conversation item with accurate chat_type from metadata @@ -674,6 +964,8 @@ export async function selectConversation(conversationId) { setSidebarActiveConversation(conversationId); } + updateConversationUrl(conversationId); + // Clear any "edit mode" state if switching conversations if (currentlyEditingId && currentlyEditingId !== conversationId) { const editingItem = document.querySelector(`.conversation-item[data-conversation-id="${currentlyEditingId}"]`); @@ -744,6 +1036,14 @@ export function deleteConversation(conversationId) { export async function createNewConversation(callback) { // Disable new button? Show loading? if (newConversationBtn) newConversationBtn.disabled = true; + + // Clear the chatbox immediately when creating new conversation + const chatbox = document.getElementById("chatbox"); + if (chatbox && !callback) { + // Only clear if there's no callback (i.e., not sending a message immediately) + chatbox.innerHTML = ""; + } + try { const response = await fetch("/api/create_conversation", { method: "POST", @@ -764,14 +1064,25 @@ export async function createNewConversation(callback) { currentConversationId = data.conversation_id; // Add to list (pass empty classifications for new convo) addConversationToList(data.conversation_id, data.title /* Use title from API if provided */, []); - // Select the new conversation to update header and chatbox - selectConversation(data.conversation_id); + + // Don't call selectConversation here if we're about to send a message + // because selectConversation clears the chatbox, which would remove + // the user message that's about to be appended by actuallySendMessage + // Instead, just update the UI elements directly + window.currentConversationId = data.conversation_id; + const titleEl = document.getElementById("current-conversation-title"); + if (titleEl) { + titleEl.textContent = data.title || "New Conversation"; + } + updateConversationUrl(data.conversation_id); + console.log('[createNewConversation] Created conversation without reload:', data.conversation_id); // Execute callback if provided (e.g., to send the first message) if (typeof callback === "function") { callback(); } + } catch (error) { console.error("Error creating conversation:", error); showToast(`Failed to create a new conversation: ${error.message}`, "danger"); @@ -808,11 +1119,109 @@ function updateSelectedConversations(conversationId, isSelected) { window.chatSidebarConversations.updateSidebarDeleteButton(selectedConversations.size); } - // Show/hide the delete button based on selection + // Show/hide the action buttons based on selection if (selectedConversations.size > 0) { - deleteSelectedBtn.style.display = "block"; + if (deleteSelectedBtn) deleteSelectedBtn.style.display = "block"; + if (pinSelectedBtn) pinSelectedBtn.style.display = "block"; + if (hideSelectedBtn) hideSelectedBtn.style.display = "block"; } else { - deleteSelectedBtn.style.display = "none"; + if (deleteSelectedBtn) deleteSelectedBtn.style.display = "none"; + if (pinSelectedBtn) pinSelectedBtn.style.display = "none"; + if (hideSelectedBtn) hideSelectedBtn.style.display = "none"; + } +} + +// Function to bulk pin/unpin conversations +async function bulkPinConversations() { + if (selectedConversations.size === 0) return; + + const action = confirm(`Pin ${selectedConversations.size} conversation(s)?`) ? 'pin' : null; + if (!action) return; + + const conversationIds = Array.from(selectedConversations); + + try { + const response = await fetch('/api/conversations/bulk-pin', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + conversation_ids: conversationIds, + action: action + }) + }); + + if (!response.ok) { + const error = await response.json(); + throw new Error(error.error || 'Failed to pin conversations'); + } + + const result = await response.json(); + + // Clear selections and exit selection mode + selectedConversations.clear(); + exitSelectionMode(); + + // Reload conversations to reflect new sort order + loadConversations(); + + // Also reload sidebar conversations if the sidebar exists + if (window.chatSidebarConversations && window.chatSidebarConversations.loadSidebarConversations) { + window.chatSidebarConversations.loadSidebarConversations(); + } + + showToast(`${result.updated_count} conversation(s) ${action === 'pin' ? 'pinned' : 'unpinned'}.`, "success"); + } catch (error) { + console.error("Error pinning conversations:", error); + showToast(`Error pinning conversations: ${error.message}`, "danger"); + } +} + +// Function to bulk hide/unhide conversations +async function bulkHideConversations() { + if (selectedConversations.size === 0) return; + + const action = confirm(`Hide ${selectedConversations.size} conversation(s)?`) ? 'hide' : null; + if (!action) return; + + const conversationIds = Array.from(selectedConversations); + + try { + const response = await fetch('/api/conversations/bulk-hide', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + conversation_ids: conversationIds, + action: action + }) + }); + + if (!response.ok) { + const error = await response.json(); + throw new Error(error.error || 'Failed to hide conversations'); + } + + const result = await response.json(); + + // Clear selections and exit selection mode + selectedConversations.clear(); + exitSelectionMode(); + + // Reload conversations to reflect filtering + loadConversations(); + + // Also reload sidebar conversations if the sidebar exists + if (window.chatSidebarConversations && window.chatSidebarConversations.loadSidebarConversations) { + window.chatSidebarConversations.loadSidebarConversations(); + } + + showToast(`${result.updated_count} conversation(s) ${action === 'hide' ? 'hidden' : 'unhidden'}.`, "success"); + } catch (error) { + console.error("Error hiding conversations:", error); + showToast(`Error hiding conversations: ${error.message}`, "danger"); } } @@ -858,7 +1267,9 @@ async function deleteSelectedConversations() { // Clear the selected conversations set and exit selection mode selectedConversations.clear(); - deleteSelectedBtn.style.display = "none"; + if (deleteSelectedBtn) deleteSelectedBtn.style.display = "none"; + if (pinSelectedBtn) pinSelectedBtn.style.display = "none"; + if (hideSelectedBtn) hideSelectedBtn.style.display = "none"; exitSelectionMode(); // Also reload sidebar conversations if the sidebar exists @@ -873,6 +1284,109 @@ async function deleteSelectedConversations() { } } +// Toggle conversation pin status +async function toggleConversationPin(conversationId) { + try { + const response = await fetch(`/api/conversations/${conversationId}/pin`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + } + }); + + if (!response.ok) { + const error = await response.json(); + throw new Error(error.error || 'Failed to toggle pin status'); + } + + const data = await response.json(); + + // Reload conversations to reflect new sort order + loadConversations(); + + // Also reload sidebar conversations if the sidebar exists + if (window.chatSidebarConversations && window.chatSidebarConversations.loadSidebarConversations) { + window.chatSidebarConversations.loadSidebarConversations(); + } + + showToast(data.is_pinned ? "Conversation pinned." : "Conversation unpinned.", "success"); + } catch (error) { + console.error("Error toggling pin status:", error); + showToast(`Error toggling pin: ${error.message}`, "danger"); + } +} + +// Toggle conversation hide status +async function toggleConversationHide(conversationId) { + try { + const response = await fetch(`/api/conversations/${conversationId}/hide`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + } + }); + + if (!response.ok) { + const error = await response.json(); + throw new Error(error.error || 'Failed to toggle hide status'); + } + + const data = await response.json(); + + // Reload conversations to reflect filtering + loadConversations(); + + // Also reload sidebar conversations if the sidebar exists + if (window.chatSidebarConversations && window.chatSidebarConversations.loadSidebarConversations) { + window.chatSidebarConversations.loadSidebarConversations(); + } + + showToast(data.is_hidden ? "Conversation hidden." : "Conversation unhidden.", "success"); + } catch (error) { + console.error("Error toggling hide status:", error); + showToast(`Error toggling hide: ${error.message}`, "danger"); + } +} + +// Update the show/hide toggle button visibility and badge +function updateHiddenToggleButton() { + let toggleBtn = document.getElementById("toggle-hidden-btn"); + + // Count hidden conversations + const hiddenCount = allConversations.filter(c => c.is_hidden || false).length; + + if (hiddenCount > 0) { + // Create button if it doesn't exist + if (!toggleBtn) { + toggleBtn = document.createElement("button"); + toggleBtn.id = "toggle-hidden-btn"; + toggleBtn.classList.add("btn", "btn-outline-secondary", "btn-sm", "ms-2"); + toggleBtn.title = "Show/hide hidden conversations"; + + // Insert after the new conversation button + if (newConversationBtn && newConversationBtn.parentElement) { + newConversationBtn.parentElement.insertBefore(toggleBtn, newConversationBtn.nextSibling); + } + + // Add click event + toggleBtn.addEventListener("click", () => { + showHiddenConversations = !showHiddenConversations; + loadConversations(); + }); + } + + // Update button content based on current state + const icon = showHiddenConversations ? "bi-eye-slash" : "bi-eye"; + toggleBtn.innerHTML = ` ${hiddenCount}`; + toggleBtn.style.display = "inline-block"; + } else { + // Hide button if no hidden conversations + if (toggleBtn) { + toggleBtn.style.display = "none"; + } + } +} + // --- Event Listeners --- if (newConversationBtn) { newConversationBtn.addEventListener("click", () => { @@ -889,6 +1403,59 @@ if (deleteSelectedBtn) { deleteSelectedBtn.addEventListener("click", deleteSelectedConversations); } +if (pinSelectedBtn) { + pinSelectedBtn.addEventListener("click", bulkPinConversations); +} + +if (hideSelectedBtn) { + hideSelectedBtn.addEventListener("click", bulkHideConversations); +} + +// Helper function to set show hidden conversations state and return a promise +export function setShowHiddenConversations(value) { + showHiddenConversations = value; + + // If enabling hidden conversations and the list is already loaded, just re-render + if (value && allConversations.length > 0) { + // Re-filter and render without fetching + const sortedConversations = [...allConversations].sort((a, b) => { + const aPinned = a.is_pinned || false; + const bPinned = b.is_pinned || false; + if (aPinned !== bPinned) return bPinned ? 1 : -1; + const aDate = new Date(a.last_updated); + const bDate = new Date(b.last_updated); + return bDate - aDate; + }); + + let filteredConversations = sortedConversations.filter(convo => { + const isHidden = convo.is_hidden || false; + return !isHidden || showHiddenConversations || selectionModeActive; + }); + + filteredConversations = applyQuickSearchFilter(filteredConversations); + + if (conversationsList) { + conversationsList.innerHTML = ""; + if (filteredConversations.length === 0) { + conversationsList.innerHTML = '
No visible conversations.
'; + } else { + filteredConversations.forEach(convo => { + conversationsList.appendChild(createConversationItem(convo)); + }); + } + } + + updateHiddenToggleButton(); + + if (window.chatSidebarConversations && window.chatSidebarConversations.loadSidebarConversations) { + window.chatSidebarConversations.loadSidebarConversations(); + } + } else { + // Otherwise do a full reload + loadConversations(); + } +} + // Expose functions globally for sidebar integration window.chatConversations = { selectConversation, @@ -898,10 +1465,14 @@ window.chatConversations = { deleteConversation, toggleConversationSelection, deleteSelectedConversations, + bulkPinConversations, + bulkHideConversations, exitSelectionMode, isSelectionModeActive: () => selectionModeActive, getSelectedConversations: () => Array.from(selectedConversations), getCurrentConversationId: () => currentConversationId, + getQuickSearchTerm: () => quickSearchTerm, + setShowHiddenConversations, updateConversationHeader: (conversationId, newTitle) => { // Update header if this is the currently active conversation if (currentConversationId === conversationId) { @@ -1041,4 +1612,16 @@ function addChatTypeBadges(convoItem, classificationsEl) { // If chatType is unknown/null or model-only, don't add any workspace badges console.log(`addChatTypeBadges: No badges added for chatType="${chatType}" (likely model-only conversation)`); } +} + +function updateConversationUrl(conversationId) { + if (!conversationId) return; + + try { + const url = new URL(window.location.href); + url.searchParams.set('conversationId', conversationId); + window.history.replaceState({}, '', url.toString()); + } catch (error) { + console.warn('Failed to update conversation URL:', error); + } } \ No newline at end of file diff --git a/application/single_app/static/js/chat/chat-documents.js b/application/single_app/static/js/chat/chat-documents.js index c2dbefad..174a7c7d 100644 --- a/application/single_app/static/js/chat/chat-documents.js +++ b/application/single_app/static/js/chat/chat-documents.js @@ -1,7 +1,7 @@ // chat-documents.js -import { showToast } from "./chat-toast.js"; // Assuming you have this -import { toBoolean } from "./chat-utils.js"; // Import the toBoolean helper +import { showToast } from "./chat-toast.js"; +import { toBoolean } from "./chat-utils.js"; export const docScopeSelect = document.getElementById("doc-scope-select"); const searchDocumentsBtn = document.getElementById("search-documents-btn"); diff --git a/application/single_app/static/js/chat/chat-edit.js b/application/single_app/static/js/chat/chat-edit.js new file mode 100644 index 00000000..0e09b0d6 --- /dev/null +++ b/application/single_app/static/js/chat/chat-edit.js @@ -0,0 +1,223 @@ +// chat-edit.js +// Handles message edit functionality + +import { showToast } from './chat-toast.js'; +import { showLoadingIndicatorInChatbox, hideLoadingIndicatorInChatbox } from './chat-loading-indicator.js'; + +/** + * Handle edit button click - opens edit modal + */ +export function handleEditButtonClick(messageDiv, messageId, messageType) { + console.log(`✏️ Edit button clicked for ${messageType} message: ${messageId}`); + + // Store message info for edit execution + window.pendingMessageEdit = { + messageDiv, + messageId, + messageType + }; + + // Get the current message content + const messageTextDiv = messageDiv.querySelector('.message-text'); + const currentContent = messageTextDiv ? messageTextDiv.textContent : ''; + + // Populate edit modal with current content + const editTextarea = document.getElementById('edit-message-content'); + if (editTextarea) { + editTextarea.value = currentContent; + } + + // Get original message metadata to display settings info + fetch(`/api/message/${messageId}/metadata`) + .then(response => response.json()) + .then(metadata => { + console.log('📊 Original message metadata:', metadata); + + // Store metadata for later use in executeMessageEdit + window.pendingMessageEdit.metadata = metadata; + + // Display original settings in modal + const settingsInfoDiv = document.getElementById('edit-original-settings-info'); + if (settingsInfoDiv) { + const agentSelection = metadata?.agent_selection; + const modelName = metadata?.model_selection?.selected_model; + const reasoningEffort = metadata?.reasoning_effort; + const docSearchEnabled = metadata?.document_search?.enabled || false; + + let settingsHtml = 'Original settings: '; + + // Show agent if used, otherwise show model + if (agentSelection && (agentSelection.agent_display_name || agentSelection.selected_agent)) { + const agentName = agentSelection.agent_display_name || agentSelection.selected_agent; + settingsHtml += `🤖 ${agentName}`; + } else if (modelName) { + settingsHtml += `${modelName}`; + } else { + settingsHtml += 'Default model'; + } + + if (reasoningEffort) { + settingsHtml += `, Reasoning: ${reasoningEffort}`; + } + + if (docSearchEnabled) { + settingsHtml += `, Document search enabled`; + } + + settingsHtml += ''; + settingsInfoDiv.innerHTML = settingsHtml; + } + }) + .catch(error => { + console.error('❌ Error fetching message metadata:', error); + }); + + // Show the edit modal + const editModal = new bootstrap.Modal(document.getElementById('edit-message-modal')); + editModal.show(); +} + +/** + * Execute message edit - called when user confirms edit in modal + */ +window.executeMessageEdit = function() { + const pendingEdit = window.pendingMessageEdit; + if (!pendingEdit) { + console.error('❌ No pending edit found'); + return; + } + + const { messageDiv, messageId, messageType } = pendingEdit; + + console.log(`🚀 Executing edit for ${messageType} message: ${messageId}`); + + // Get edited content from textarea + const editTextarea = document.getElementById('edit-message-content'); + const editedContent = editTextarea ? editTextarea.value.trim() : ''; + + if (!editedContent) { + showToast('error', 'Message content cannot be empty'); + return; + } + + console.log(`📝 Edited content length: ${editedContent.length} characters`); + + // Close the modal explicitly + const modalElement = document.getElementById('edit-message-modal'); + if (modalElement) { + const modalInstance = bootstrap.Modal.getInstance(modalElement); + if (modalInstance) { + modalInstance.hide(); + } + } + + // Wait a bit for modal to close, then show loading indicator + setTimeout(() => { + console.log('⏰ Modal closed, showing AI typing indicator...'); + + // Show "AI is typing..." indicator + showLoadingIndicatorInChatbox(); + + // Call edit API endpoint + console.log('📡 Calling edit API endpoint...'); + fetch(`/api/message/${messageId}/edit`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + content: editedContent + }) + }) + .then(response => { + if (!response.ok) { + return response.json().then(data => { + throw new Error(data.error || 'Edit failed'); + }); + } + return response.json(); + }) + .then(data => { + console.log('✅ Edit API response:', data); + + if (data.success && data.chat_request) { + console.log('🔄 Edit initiated, calling chat API with:'); + console.log(' edited_user_message_id:', data.chat_request.edited_user_message_id); + console.log(' retry_thread_id:', data.chat_request.retry_thread_id); + console.log(' retry_thread_attempt:', data.chat_request.retry_thread_attempt); + console.log(' Full chat_request:', data.chat_request); + + // Call chat API with the edit parameters + return fetch('/api/chat', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + credentials: 'same-origin', + body: JSON.stringify(data.chat_request) + }); + } else { + throw new Error('Edit response missing chat_request'); + } + }) + .then(response => { + if (!response.ok) { + return response.json().then(data => { + throw new Error(data.error || 'Chat API failed'); + }); + } + return response.json(); + }) + .then(chatData => { + console.log('✅ Chat API response:', chatData); + + // Hide typing indicator + hideLoadingIndicatorInChatbox(); + console.log('🧹 Typing indicator removed'); + + // Get current conversation ID using the proper API + const conversationId = window.chatConversations?.getCurrentConversationId(); + + console.log(`🔍 Current conversation ID: ${conversationId}`); + + // Reload messages to show edited message and new response + if (conversationId) { + console.log('🔄 Reloading messages for conversation:', conversationId); + + // Import loadMessages dynamically + import('./chat-messages.js').then(module => { + console.log('📦 chat-messages.js module loaded, calling loadMessages...'); + module.loadMessages(conversationId); + // No toast - the reloaded messages are enough feedback + }).catch(err => { + console.error('❌ Error loading chat-messages module:', err); + showToast('error', 'Failed to reload messages'); + }); + } else { + console.error('❌ No currentConversationId found!'); + + // Try to force a page refresh as fallback + console.log('🔄 Attempting page refresh as fallback...'); + setTimeout(() => { + window.location.reload(); + }, 1000); + } + }) + .catch(error => { + console.error('❌ Edit error:', error); + + // Hide typing indicator on error + hideLoadingIndicatorInChatbox(); + + showToast('error', `Edit failed: ${error.message}`); + }) + .finally(() => { + // Clean up pending edit + window.pendingMessageEdit = null; + }); + + }, 300); // End of setTimeout - wait 300ms for modal to close +}; + +// Make functions available globally for event handlers +window.handleEditButtonClick = handleEditButtonClick; diff --git a/application/single_app/static/js/chat/chat-enhanced-citations.js b/application/single_app/static/js/chat/chat-enhanced-citations.js index 18c75229..dcda708b 100644 --- a/application/single_app/static/js/chat/chat-enhanced-citations.js +++ b/application/single_app/static/js/chat/chat-enhanced-citations.js @@ -15,8 +15,8 @@ export function getFileType(fileName) { const ext = fileName.toLowerCase().split('.').pop(); - const imageExtensions = ['jpg', 'jpeg', 'png', 'bmp', 'tiff', 'tif', 'heif']; - const videoExtensions = ['mp4', 'mov', 'avi', 'mkv', 'flv', 'webm', 'wmv']; + const imageExtensions = ['jpg', 'jpeg', 'png', 'bmp', 'tiff', 'tif']; + const videoExtensions = ['mp4', 'mov', 'avi', 'mkv', 'flv', 'webm', 'wmv', 'm4v', '3gp']; const audioExtensions = ['mp3', 'wav', 'ogg', 'aac', 'flac', 'm4a']; if (imageExtensions.includes(ext)) return 'image'; diff --git a/application/single_app/static/js/chat/chat-feedback.js b/application/single_app/static/js/chat/chat-feedback.js index 0db9f54e..e02fc29c 100644 --- a/application/single_app/static/js/chat/chat-feedback.js +++ b/application/single_app/static/js/chat/chat-feedback.js @@ -8,18 +8,9 @@ const feedbackForm = document.getElementById("feedback-form"); export function renderFeedbackIcons(messageId, conversationId) { if (toBoolean(window.enableUserFeedback)) { return ` - +
  • +
  • +
  • `; } else { @@ -57,8 +48,10 @@ document.addEventListener("click", function (event) { const feedbackBtn = event.target.closest(".feedback-btn"); if (!feedbackBtn) return; + event.preventDefault(); + const feedbackType = feedbackBtn.getAttribute("data-feedback-type"); - const messageId = feedbackBtn.closest(".feedback-icons").getAttribute("data-ai-message-id"); + const messageId = feedbackBtn.getAttribute("data-ai-message-id"); const conversationId = feedbackBtn.getAttribute("data-conversation-id"); feedbackBtn.classList.add("clicked"); @@ -70,6 +63,11 @@ document.addEventListener("click", function (event) { feedbackBtn.classList.remove("clicked"); }, 500); } else { + // Remove clicked class immediately for negative feedback since modal will show + setTimeout(() => { + feedbackBtn.classList.remove("clicked"); + }, 100); + const modalEl = new bootstrap.Modal(document.getElementById("feedback-modal")); document.getElementById("feedback-ai-response-id").value = messageId; document.getElementById("feedback-conversation-id").value = conversationId; diff --git a/application/single_app/static/js/chat/chat-input-actions.js b/application/single_app/static/js/chat/chat-input-actions.js index ad8e8088..77851319 100644 --- a/application/single_app/static/js/chat/chat-input-actions.js +++ b/application/single_app/static/js/chat/chat-input-actions.js @@ -86,6 +86,16 @@ export function uploadFileToConversation(file) { .then((data) => { if (data.conversation_id) { currentConversationId = data.conversation_id; + + // If a title was returned and it's different from "New Conversation", + // update the conversation title in the UI + if (data.title && data.title !== "New Conversation") { + const currentConversationTitleEl = document.getElementById("current-conversation-title"); + if (currentConversationTitleEl) { + currentConversationTitleEl.textContent = data.title; + } + } + loadMessages(currentConversationId); loadConversations(); } else { @@ -298,6 +308,8 @@ if (imageGenBtn) { const docBtn = document.getElementById("search-documents-btn"); const webBtn = document.getElementById("search-web-btn"); const fileBtn = document.getElementById("choose-file-btn"); + const streamingBtn = document.getElementById("streaming-toggle-btn"); + const modelSelectContainer = document.getElementById("model-select-container"); if (isImageGenEnabled) { if (docBtn) { @@ -312,17 +324,61 @@ if (imageGenBtn) { fileBtn.disabled = true; fileBtn.classList.remove("active"); } + // Hide streaming toggle and model selector for image generation + if (streamingBtn) { + streamingBtn.style.display = "none"; + } + if (modelSelectContainer) { + modelSelectContainer.style.display = "none"; + } } else { if (docBtn) docBtn.disabled = false; if (webBtn) webBtn.disabled = false; if (fileBtn) fileBtn.disabled = false; + // Show streaming toggle and model selector when not in image generation mode + if (streamingBtn) { + streamingBtn.style.display = "flex"; + } + if (modelSelectContainer) { + modelSelectContainer.style.display = "block"; + } } }); } if (webSearchBtn) { + const webSearchNoticeContainer = document.getElementById("web-search-notice-container"); + const webSearchNoticeDismiss = document.getElementById("web-search-notice-dismiss"); + const webSearchNoticeSessionKey = "webSearchNoticeDismissed"; + + // Check if notice was dismissed this session + const isNoticeDismissed = () => sessionStorage.getItem(webSearchNoticeSessionKey) === "true"; + + // Show/hide notice based on web search state + const updateWebSearchNotice = (isActive) => { + if (webSearchNoticeContainer && window.appSettings?.enable_web_search_user_notice) { + if (isActive && !isNoticeDismissed()) { + webSearchNoticeContainer.style.display = "block"; + } else { + webSearchNoticeContainer.style.display = "none"; + } + } + }; + + // Dismiss button handler + if (webSearchNoticeDismiss) { + webSearchNoticeDismiss.addEventListener("click", function() { + sessionStorage.setItem(webSearchNoticeSessionKey, "true"); + if (webSearchNoticeContainer) { + webSearchNoticeContainer.style.display = "none"; + } + }); + } + webSearchBtn.addEventListener("click", function () { this.classList.toggle("active"); + const isActive = this.classList.contains("active"); + updateWebSearchNotice(isActive); }); } @@ -348,13 +404,29 @@ if (fileInputEl) { // Hide the upload button since we're auto-uploading uploadBtn.style.display = "none"; - // Automatically upload the file - if (!currentConversationId) { - createNewConversation(() => { + // Check for user agreement before uploading + const doUpload = () => { + if (!currentConversationId) { + createNewConversation(() => { + uploadFileToConversation(file); + }); + } else { uploadFileToConversation(file); - }); + } + }; + + // Check if UserAgreementManager exists and check for agreement + if (window.UserAgreementManager) { + window.UserAgreementManager.checkBeforeUpload( + fileInputEl.files, + 'chat', + 'default', + function(files) { + doUpload(); + } + ); } else { - uploadFileToConversation(file); + doUpload(); } } else { resetFileButton(); @@ -381,12 +453,29 @@ if (uploadBtn) { return; } - if (!currentConversationId) { - createNewConversation(() => { + // Check for user agreement before uploading + const doUpload = () => { + if (!currentConversationId) { + createNewConversation(() => { + uploadFileToConversation(file); + }); + } else { uploadFileToConversation(file); - }); + } + }; + + // Check if UserAgreementManager exists and check for agreement + if (window.UserAgreementManager) { + window.UserAgreementManager.checkBeforeUpload( + fileInput.files, + 'chat', + 'default', + function(files) { + doUpload(); + } + ); } else { - uploadFileToConversation(file); + doUpload(); } }); } diff --git a/application/single_app/static/js/chat/chat-layout.js b/application/single_app/static/js/chat/chat-layout.js index 8b07e498..d2206c5c 100644 --- a/application/single_app/static/js/chat/chat-layout.js +++ b/application/single_app/static/js/chat/chat-layout.js @@ -70,7 +70,10 @@ export function saveUserSetting(settingUpdate) { if (!response.ok) { throw new Error(`HTTP error! status: ${response.status}`); } - console.log('User setting saved successfully:', settingUpdate); + return response.json(); + }) + .then(result => { + console.log('User setting saved successfully:', settingUpdate, 'Response:', result); }) .catch(error => { console.error('Failed to save user setting:', error); diff --git a/application/single_app/static/js/chat/chat-messages.js b/application/single_app/static/js/chat/chat-messages.js index b5419eee..45dbf6f3 100644 --- a/application/single_app/static/js/chat/chat-messages.js +++ b/application/single_app/static/js/chat/chat-messages.js @@ -15,7 +15,23 @@ import { import { updateSidebarConversationTitle } from "./chat-sidebar-conversations.js"; import { escapeHtml, isColorLight, addTargetBlankToExternalLinks } from "./chat-utils.js"; import { showToast } from "./chat-toast.js"; +import { autoplayTTSIfEnabled } from "./chat-tts.js"; import { saveUserSetting } from "./chat-layout.js"; +import { isStreamingEnabled, sendMessageWithStreaming } from "./chat-streaming.js"; +import { getCurrentReasoningEffort, isReasoningEffortEnabled } from './chat-reasoning.js'; +import { areAgentsEnabled } from './chat-agents.js'; + +// Conditionally import TTS if enabled +let ttsModule = null; +if (typeof window.appSettings !== 'undefined' && window.appSettings.enable_text_to_speech) { + import('./chat-tts.js').then(module => { + ttsModule = module; + console.log('TTS module loaded'); + module.initializeTTS(); + }).catch(error => { + console.error('Failed to load TTS module:', error); + }); +} /** * Unwraps markdown tables that are mistakenly wrapped in code blocks. @@ -359,12 +375,21 @@ function createCitationsHtml( const displayText = `${escapeHtml(cite.file_name)}, Page ${ cite.page_number || "N/A" }`; + + // Check if this is a metadata citation + const isMetadata = cite.metadata_type ? true : false; + const metadataType = cite.metadata_type || ''; + const metadataContent = cite.metadata_content || ''; + citationsHtml += ` - ${displayText} + ${displayText} `; }); } @@ -435,6 +460,9 @@ function createCitationsHtml( } export function loadMessages(conversationId) { + // Clear search highlights when loading a different conversation + clearSearchHighlight(); + fetch(`/conversation/${conversationId}/messages`) .then((response) => response.json()) .then((data) => { @@ -444,10 +472,15 @@ export function loadMessages(conversationId) { chatbox.innerHTML = ""; console.log(`--- Loading messages for ${conversationId} ---`); data.messages.forEach((msg) => { + // Skip deleted messages (when conversation archiving is enabled) + if (msg.metadata && msg.metadata.is_deleted === true) { + console.log(`Skipping deleted message: ${msg.id}`); + return; + } console.log(`[loadMessages Loop] -------- START Message ID: ${msg.id} --------`); console.log(`[loadMessages Loop] Role: ${msg.role}`); if (msg.role === "user") { - appendMessage("You", msg.content, null, msg.id); + appendMessage("You", msg.content, null, msg.id, false, [], [], [], null, null, msg); } else if (msg.role === "assistant") { console.log(` [loadMessages Loop] Full Assistant msg object:`, JSON.stringify(msg)); // Stringify to see exact keys console.log(` [loadMessages Loop] Checking keys: msg.id=${msg.id}, msg.augmented=${msg.augmented}, msg.hybrid_citations exists=${'hybrid_citations' in msg}, msg.web_search_citations exists=${'web_search_citations' in msg}, msg.agent_citations exists=${'agent_citations' in msg}`); @@ -467,15 +500,25 @@ export function loadMessages(conversationId) { const arg9 = msg.agent_display_name; // Get agent display name const arg10 = msg.agent_name; // Get agent name console.log(` [loadMessages Loop] Calling appendMessage with -> sender: ${senderType}, id: ${arg4}, augmented: ${arg5} (type: ${typeof arg5}), hybrid_len: ${arg6?.length}, web_len: ${arg7?.length}, agent_len: ${arg8?.length}, agent_display: ${arg9}`); + console.log(` [loadMessages Loop] Message metadata:`, msg.metadata); - appendMessage(senderType, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10); + appendMessage(senderType, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, msg); console.log(`[loadMessages Loop] -------- END Message ID: ${msg.id} --------`); } else if (msg.role === "file") { - appendMessage("File", msg); + // Pass file message with proper parameters including message ID + appendMessage("File", msg, null, msg.id, false, [], [], [], null, null, msg); } else if (msg.role === "image") { // Validate image URL before calling appendMessage if (msg.content && msg.content !== 'null' && msg.content.trim() !== '') { - appendMessage("image", msg.content, msg.model_deployment_name, msg.id, false, [], [], [], msg.agent_display_name, msg.agent_name); + // Debug logging for image message metadata + console.log(`[loadMessages] Image message ${msg.id}:`, { + hasExtractedText: !!msg.extracted_text, + hasVisionAnalysis: !!msg.vision_analysis, + isUserUpload: msg.metadata?.is_user_upload, + filename: msg.filename + }); + // Pass the full message object for images that may have metadata (uploaded images) + appendMessage("image", msg.content, msg.model_deployment_name, msg.id, false, [], [], [], msg.agent_display_name, msg.agent_name, msg); } else { console.error(`[loadMessages] Invalid image URL for message ${msg.id}: "${msg.content}"`); // Show error message instead of broken image @@ -489,6 +532,18 @@ export function loadMessages(conversationId) { .catch((error) => { console.error("Error loading messages:", error); if (chatbox) chatbox.innerHTML = `
    Error loading messages.
    `; + }) + .finally(() => { + // Check if there's a search highlight to apply + if (window.searchHighlight && window.searchHighlight.term) { + const elapsed = Date.now() - window.searchHighlight.timestamp; + if (elapsed < 30000) { // Within 30 seconds + setTimeout(() => applySearchHighlight(window.searchHighlight.term), 100); + } else { + // Clear expired highlight + window.searchHighlight = null; + } + } }); } @@ -502,7 +557,9 @@ export function appendMessage( webCitations = [], agentCitations = [], agentDisplayName = null, - agentName = null + agentName = null, + fullMessageObject = null, + isNewMessage = false ) { if (!chatbox || sender === "System") return; @@ -567,15 +624,57 @@ export function appendMessage( // --- Footer Content (Copy, Feedback, Citations) --- const feedbackHtml = renderFeedbackIcons(messageId, currentConversationId); const hiddenTextId = `copy-md-${messageId || Date.now()}`; + + // Check if message is masked + const isMasked = fullMessageObject?.metadata?.masked || (fullMessageObject?.metadata?.masked_ranges && fullMessageObject.metadata.masked_ranges.length > 0); + const maskIcon = isMasked ? 'bi-front' : 'bi-back'; + const maskTitle = isMasked ? 'Unmask all masked content' : 'Mask entire message'; + + // TTS button (only for AI messages) + const ttsButtonHtml = (sender === 'AI' && typeof window.appSettings !== 'undefined' && window.appSettings.enable_text_to_speech) ? ` + + ` : ''; + const copyButtonHtml = ` - `; - const copyAndFeedbackHtml = `
    ${copyButtonHtml}${feedbackHtml}
    `; + + const maskButtonHtml = ` + + `; + const actionsDropdownHtml = ` + + `; + const carouselButtonsHtml = ` + + + `; + const copyAndFeedbackHtml = `
    ${actionsDropdownHtml}${ttsButtonHtml}${copyButtonHtml}${maskButtonHtml}${carouselButtonsHtml}
    `; const citationsButtonsHtml = createCitationsHtml( hybridCitations, @@ -634,13 +733,24 @@ export function appendMessage( if (shouldShowCitations) { console.log(">>> Will generate and include citation elements."); const citationsContainerId = `citations-${messageId || Date.now()}`; - citationToggleHtml = `
    `; - citationContentContainerHtml = ``; + citationToggleHtml = ``; + // citationsButtonsHtml already contains a
    wrapper + // Just add ID and display style by wrapping minimally + citationContentContainerHtml = ``; } else { console.log(">>> Will NOT generate citation elements."); } - const footerContentHtml = ``; + const metadataContainerId = `metadata-${messageId || Date.now()}`; + const metadataContainerHtml = ``; + + const footerContentHtml = ``; // Build AI message inner HTML messageDiv.innerHTML = ` @@ -650,6 +760,7 @@ export function appendMessage(
    ${senderLabel}
    ${mainMessageHtml} ${citationContentContainerHtml} + ${metadataContainerHtml} ${footerContentHtml}
    `; @@ -657,6 +768,11 @@ export function appendMessage( messageDiv.classList.add(messageClass); // Add AI message class chatbox.appendChild(messageDiv); // Append AI message + // Auto-play TTS if enabled (only for new messages, not when loading history) + if (isNewMessage && typeof autoplayTTSIfEnabled === 'function') { + autoplayTTSIfEnabled(messageId, messageContent); + } + // Highlight code blocks in the messages messageDiv.querySelectorAll('pre code[class^="language-"]').forEach((block) => { const match = block.className.match(/language-([a-zA-Z0-9]+)/); @@ -666,8 +782,121 @@ export function appendMessage( if (window.Prism) Prism.highlightElement(block); }); + // Apply masked state if message has masking + if (fullMessageObject?.metadata) { + console.log('Applying masked state for AI message:', messageId, fullMessageObject.metadata); + applyMaskedState(messageDiv, fullMessageObject.metadata); + } else { + console.log('No metadata found for AI message:', messageId, 'fullMessageObject:', fullMessageObject); + } + // --- Attach Event Listeners specifically for AI message --- attachCodeBlockCopyButtons(messageDiv.querySelector(".message-text")); + + const metadataBtn = messageDiv.querySelector(".metadata-info-btn"); + if (metadataBtn) { + metadataBtn.addEventListener("click", () => { + const metadataContainer = messageDiv.querySelector('.metadata-container'); + if (metadataContainer) { + const isVisible = metadataContainer.style.display !== 'none'; + metadataContainer.style.display = isVisible ? 'none' : 'block'; + metadataBtn.setAttribute('aria-expanded', !isVisible); + metadataBtn.title = isVisible ? 'Show metadata' : 'Hide metadata'; + + // Toggle icon + const icon = metadataBtn.querySelector('i'); + if (icon) { + icon.className = isVisible ? 'bi bi-info-circle' : 'bi bi-chevron-up'; + } + + // Load metadata if container is empty (first open) + if (!isVisible && metadataContainer.innerHTML.includes('Loading metadata')) { + loadMessageMetadataForDisplay(messageId, metadataContainer); + } + } + }); + } + + const maskBtn = messageDiv.querySelector(".mask-btn"); + if (maskBtn) { + // Update tooltip dynamically on hover + maskBtn.addEventListener("mouseenter", () => { + updateMaskButtonTooltip(maskBtn, messageDiv); + }); + + // Handle mask button click + maskBtn.addEventListener("click", () => { + handleMaskButtonClick(messageDiv, messageId, messageContent); + }); + } + + const dropdownDeleteBtn = messageDiv.querySelector(".dropdown-delete-btn"); + if (dropdownDeleteBtn) { + dropdownDeleteBtn.addEventListener("click", (e) => { + e.preventDefault(); + // Always read the message ID from the DOM attribute dynamically + const currentMessageId = messageDiv.getAttribute('data-message-id'); + console.log(`🗑️ AI Delete button clicked - using message ID from DOM: ${currentMessageId}`); + handleDeleteButtonClick(messageDiv, currentMessageId, 'assistant'); + }); + } + + const dropdownRetryBtn = messageDiv.querySelector(".dropdown-retry-btn"); + if (dropdownRetryBtn) { + dropdownRetryBtn.addEventListener("click", (e) => { + e.preventDefault(); + // Always read the message ID from the DOM attribute dynamically + const currentMessageId = messageDiv.getAttribute('data-message-id'); + console.log(`🔄 AI Retry button clicked - using message ID from DOM: ${currentMessageId}`); + handleRetryButtonClick(messageDiv, currentMessageId, 'assistant'); + }); + } + + // Handle dropdown positioning manually - move to chatbox container + const dropdownToggle = messageDiv.querySelector(".message-actions .dropdown button[data-bs-toggle='dropdown']"); + const dropdownMenu = messageDiv.querySelector(".message-actions .dropdown-menu"); + if (dropdownToggle && dropdownMenu) { + dropdownToggle.addEventListener("show.bs.dropdown", () => { + // Move dropdown menu to chatbox to escape message bubble + const chatbox = document.getElementById('chatbox'); + if (chatbox) { + dropdownMenu.remove(); + chatbox.appendChild(dropdownMenu); + + // Position relative to button + const rect = dropdownToggle.getBoundingClientRect(); + const chatboxRect = chatbox.getBoundingClientRect(); + dropdownMenu.style.position = 'absolute'; + dropdownMenu.style.top = `${rect.bottom - chatboxRect.top + chatbox.scrollTop + 2}px`; + dropdownMenu.style.left = `${rect.left - chatboxRect.left}px`; + dropdownMenu.style.zIndex = '9999'; + } + }); + + // Return menu to original position when closed + dropdownToggle.addEventListener("hidden.bs.dropdown", () => { + const dropdown = messageDiv.querySelector(".message-actions .dropdown"); + if (dropdown && dropdownMenu.parentElement !== dropdown) { + dropdownMenu.remove(); + dropdown.appendChild(dropdownMenu); + } + }); + } + + const carouselPrevBtn = messageDiv.querySelector(".carousel-prev-btn"); + if (carouselPrevBtn) { + carouselPrevBtn.addEventListener("click", () => { + handleCarouselClick(messageId, 'prev'); + }); + } + + const carouselNextBtn = messageDiv.querySelector(".carousel-next-btn"); + if (carouselNextBtn) { + carouselNextBtn.addEventListener("click", () => { + handleCarouselClick(messageId, 'next'); + }); + } + const copyBtn = messageDiv.querySelector(".copy-btn"); copyBtn?.addEventListener("click", () => { /* ... copy logic ... */ @@ -726,6 +955,11 @@ export function appendMessage( // --- Handle ALL OTHER message types --- } else { + // Declare variables for image metadata checks (needed for footer logic) + let isUserUpload = false; + let hasExtractedText = false; + let hasVisionAnalysis = false; + // Determine variables based on sender type if (sender === "You") { messageClass = "user-message"; @@ -764,15 +998,31 @@ export function appendMessage( } else { senderLabel = "Image"; } - - avatarImg = "/static/images/ai-avatar.png"; // Or a specific image icon - avatarAltText = "Generated Image"; + + // Check if this is a user-uploaded image with metadata + isUserUpload = fullMessageObject?.metadata?.is_user_upload || false; + hasExtractedText = fullMessageObject?.extracted_text || false; + hasVisionAnalysis = fullMessageObject?.vision_analysis || false; + + // Use agent display name if available, otherwise show AI with model + if (isUserUpload) { + senderLabel = "Uploaded Image"; + } else if (agentDisplayName) { + senderLabel = agentDisplayName; + } else if (modelName) { + senderLabel = `AI (${modelName})`; + } else { + senderLabel = "Image"; + } + + avatarImg = isUserUpload ? "/static/images/user-avatar.png" : "/static/images/ai-avatar.png"; + avatarAltText = isUserUpload ? "Uploaded Image" : "Generated Image"; // Validate image URL before creating img tag if (messageContent && messageContent !== 'null' && messageContent.trim() !== '') { - messageContentHtml = `Generated Image`; + messageContentHtml = `${isUserUpload ? 'Uploaded' : 'Generated'} Image`; } else { - messageContentHtml = `
    Failed to generate image - invalid response from image service
    `; + messageContentHtml = `
    Failed to ${isUserUpload ? 'load' : 'generate'} image - invalid response from image service
    `; } } else if (sender === "safety") { messageClass = "safety-message"; @@ -806,21 +1056,88 @@ export function appendMessage( // This runs for "You", "File", "image", "safety", "Error", and the fallback "unknown" messageDiv.classList.add(messageClass); // Add the determined class - // Create user message footer if this is a user message + // Create message footer for user, image, and file messages let messageFooterHtml = ""; let metadataContainerHtml = ""; if (sender === "You") { const metadataContainerId = `metadata-${messageId || Date.now()}`; + const isMasked = fullMessageObject?.metadata?.masked || (fullMessageObject?.metadata?.masked_ranges && fullMessageObject.metadata.masked_ranges.length > 0); + const maskIcon = isMasked ? 'bi-front' : 'bi-back'; + const maskTitle = isMasked ? 'Unmask all masked content' : 'Mask entire message'; + messageFooterHtml = ` `; metadataContainerHtml = ``; + } else if (sender === "image" || sender === "File") { + // Image and file messages get mask button on left, metadata button on right side + const metadataContainerId = `metadata-${messageId || Date.now()}`; + + // Check if message is masked + const isMasked = fullMessageObject?.metadata?.masked || (fullMessageObject?.metadata?.masked_ranges && fullMessageObject.metadata.masked_ranges.length > 0); + const maskIcon = isMasked ? 'bi-front' : 'bi-back'; + const maskTitle = isMasked ? 'Unmask all masked content' : 'Mask entire message'; + + // For images with extracted text or vision analysis, add View Text button like citation button + let imageInfoToggleHtml = ''; + let imageInfoContainerHtml = ''; + if (sender === "image" && isUserUpload && (hasExtractedText || hasVisionAnalysis)) { + const infoContainerId = `image-info-${messageId || Date.now()}`; + imageInfoToggleHtml = ``; + imageInfoContainerHtml = ``; + } + + messageFooterHtml = ` + `; + metadataContainerHtml = imageInfoContainerHtml + ``; } // Set innerHTML using the variables determined above @@ -834,7 +1151,11 @@ export function appendMessage( : "" }
    -
    ${senderLabel}
    +
    + ${senderLabel} + ${fullMessageObject?.metadata?.edited ? 'Edited' : ''} + ${fullMessageObject?.metadata?.retried ? 'Retried' : ''} +
    ${messageContentHtml}
    ${metadataContainerHtml} ${messageFooterHtml} @@ -857,7 +1178,115 @@ export function appendMessage( // Add event listeners for user message buttons if (sender === "You") { attachUserMessageEventListeners(messageDiv, messageId, messageContent); + + // Apply masked state if message has masking + if (fullMessageObject?.metadata) { + console.log('Applying masked state for user message:', messageId, fullMessageObject.metadata); + applyMaskedState(messageDiv, fullMessageObject.metadata); + } else { + console.log('No metadata found for user message:', messageId, 'fullMessageObject:', fullMessageObject); + } + } + + // Add event listener for image info button (uploaded images) + if (sender === "image" && fullMessageObject?.metadata?.is_user_upload) { + const imageInfoBtn = messageDiv.querySelector('.image-info-btn'); + if (imageInfoBtn) { + imageInfoBtn.addEventListener('click', () => { + toggleImageInfo(messageDiv, messageId, fullMessageObject); + }); + } + } + + // Add event listener for mask button (image and file messages) + if (sender === "image" || sender === "File") { + const maskBtn = messageDiv.querySelector('.mask-btn'); + if (maskBtn) { + // Update tooltip dynamically on hover + maskBtn.addEventListener("mouseenter", () => { + updateMaskButtonTooltip(maskBtn, messageDiv); + }); + + // Handle mask button click + maskBtn.addEventListener("click", () => { + handleMaskButtonClick(messageDiv, messageId, messageContent); + }); + } + + // Apply masked state if message has masking + if (fullMessageObject?.metadata) { + console.log('Applying masked state for image/file message:', messageId, fullMessageObject.metadata); + applyMaskedState(messageDiv, fullMessageObject.metadata); + } + } + + // Add event listener for metadata button (image and file messages) + if (sender === "image" || sender === "File") { + const metadataBtn = messageDiv.querySelector('.metadata-info-btn'); + if (metadataBtn) { + metadataBtn.addEventListener('click', () => { + const metadataContainer = messageDiv.querySelector('.metadata-container'); + if (metadataContainer) { + const isVisible = metadataContainer.style.display !== 'none'; + metadataContainer.style.display = isVisible ? 'none' : 'block'; + metadataBtn.setAttribute('aria-expanded', !isVisible); + metadataBtn.title = isVisible ? 'Show metadata' : 'Hide metadata'; + + // Toggle icon + const icon = metadataBtn.querySelector('i'); + if (icon) { + icon.className = isVisible ? 'bi bi-info-circle' : 'bi bi-chevron-up'; + } + + // Load metadata if container is empty (first open) + if (!isVisible && metadataContainer.innerHTML.includes('Loading metadata')) { + loadMessageMetadataForDisplay(messageId, metadataContainer); + } + } + }); + } + + // Add delete button event listener from dropdown + const dropdownDeleteBtn = messageDiv.querySelector('.dropdown-delete-btn'); + if (dropdownDeleteBtn) { + dropdownDeleteBtn.addEventListener('click', (e) => { + e.preventDefault(); + // Always read the message ID from the DOM attribute dynamically + const currentMessageId = messageDiv.getAttribute('data-message-id'); + console.log(`🗑️ Image/File Delete button clicked - using message ID from DOM: ${currentMessageId}`); + handleDeleteButtonClick(messageDiv, currentMessageId, sender === "image" ? 'image' : 'file'); + }); + } + + // Handle dropdown positioning manually for image/file messages - move to chatbox + const dropdownToggle = messageDiv.querySelector(".message-footer .dropdown button[data-bs-toggle='dropdown']"); + const dropdownMenu = messageDiv.querySelector(".message-footer .dropdown-menu"); + if (dropdownToggle && dropdownMenu) { + dropdownToggle.addEventListener("show.bs.dropdown", () => { + const chatbox = document.getElementById('chatbox'); + if (chatbox) { + dropdownMenu.remove(); + chatbox.appendChild(dropdownMenu); + + const rect = dropdownToggle.getBoundingClientRect(); + const chatboxRect = chatbox.getBoundingClientRect(); + dropdownMenu.style.position = 'absolute'; + dropdownMenu.style.top = `${rect.bottom - chatboxRect.top + chatbox.scrollTop + 2}px`; + dropdownMenu.style.left = `${rect.left - chatboxRect.left}px`; + dropdownMenu.style.zIndex = '9999'; + } + }); + + dropdownToggle.addEventListener("hidden.bs.dropdown", () => { + const dropdown = messageDiv.querySelector(".message-footer .dropdown"); + if (dropdown && dropdownMenu.parentElement !== dropdown) { + dropdownMenu.remove(); + dropdown.appendChild(dropdownMenu); + } + }); + } } + scrollChatToBottom(); } // End of the large 'else' block for non-AI messages } @@ -921,7 +1350,11 @@ export function actuallySendMessage(finalMessageToSend) { userInput.style.height = ""; // Update send button visibility after clearing input updateSendButtonVisibility(); - showLoadingIndicatorInChatbox(); + + // Only show loading indicator if NOT using streaming (streaming creates its own placeholder) + if (!isStreamingEnabled()) { + showLoadingIndicatorInChatbox(); + } const modelDeployment = modelSelect?.value; @@ -993,11 +1426,15 @@ export function actuallySendMessage(finalMessageToSend) { const agentSelect = document.getElementById("agent-select"); if (agentSelectContainer && agentSelectContainer.style.display !== "none" && agentSelect) { const selectedAgentOption = agentSelect.options[agentSelect.selectedIndex]; - if (selectedAgentOption && selectedAgentOption.value) { + if (selectedAgentOption) { agentInfo = { - name: selectedAgentOption.value, - display_name: selectedAgentOption.textContent, - is_global: selectedAgentOption.textContent.includes("(Global)") + id: selectedAgentOption.dataset.agentId || null, + name: selectedAgentOption.dataset.name || selectedAgentOption.value || '', + display_name: selectedAgentOption.dataset.displayName || selectedAgentOption.textContent, + is_global: selectedAgentOption.dataset.isGlobal === 'true', + is_group: selectedAgentOption.dataset.isGroup === 'true', + group_id: selectedAgentOption.dataset.groupId || null, + group_name: selectedAgentOption.dataset.groupName || null }; } } @@ -1023,26 +1460,53 @@ export function actuallySendMessage(finalMessageToSend) { // Fallback: if group_id is null/empty, use window.activeGroupId const finalGroupId = group_id || window.activeGroupId || null; + const webSearchToggle = document.getElementById("search-web-btn"); + const webSearchEnabled = webSearchToggle ? webSearchToggle.classList.contains("active") : false; + + // Prepare message data object + // Get active public workspace ID from user settings (similar to active_group_id) + const finalPublicWorkspaceId = window.activePublicWorkspaceId || null; + + const messageData = { + message: finalMessageToSend, + conversation_id: currentConversationId, + hybrid_search: hybridSearchEnabled, + web_search_enabled: webSearchEnabled, + selected_document_id: selectedDocumentId, + classifications: classificationsToSend, + image_generation: imageGenEnabled, + doc_scope: effectiveDocScope, + chat_type: chat_type, + active_group_id: finalGroupId, + active_public_workspace_id: finalPublicWorkspaceId, + model_deployment: modelDeployment, + prompt_info: promptInfo, + agent_info: agentInfo, + reasoning_effort: getCurrentReasoningEffort() + }; + + // Check if streaming is enabled (but not for image generation) + const agentsEnabled = typeof areAgentsEnabled === 'function' && areAgentsEnabled(); + if (isStreamingEnabled() && !imageGenEnabled) { + const streamInitiated = sendMessageWithStreaming( + messageData, + tempUserMessageId, + currentConversationId + ); + if (streamInitiated) { + return; // Streaming handles the rest + } + // If streaming failed to initiate, fall through to regular fetch + } + + // Regular non-streaming fetch fetch("/api/chat", { method: "POST", headers: { "Content-Type": "application/json", }, credentials: "same-origin", - body: JSON.stringify({ - message: finalMessageToSend, - conversation_id: currentConversationId, - hybrid_search: hybridSearchEnabled, - selected_document_id: selectedDocumentId, - classifications: classificationsToSend, - image_generation: imageGenEnabled, - doc_scope: effectiveDocScope, - chat_type: chat_type, - active_group_id: finalGroupId, // for backward compatibility - model_deployment: modelDeployment, - prompt_info: promptInfo, - agent_info: agentInfo - }), + body: JSON.stringify(messageData), }) .then((response) => { if (!response.ok) { @@ -1078,10 +1542,15 @@ export function actuallySendMessage(finalMessageToSend) { console.log("data.web_search_citations:", data.web_search_citations); console.log("data.agent_citations:", data.agent_citations); console.log(`data.message_id: ${data.message_id}`); + console.log(`data.user_message_id: ${data.user_message_id}`); + console.log(`tempUserMessageId: ${tempUserMessageId}`); // Update the user message with the real message ID if (data.user_message_id) { + console.log(`🔄 Calling updateUserMessageId(${tempUserMessageId}, ${data.user_message_id})`); updateUserMessageId(tempUserMessageId, data.user_message_id); + } else { + console.warn(`⚠️ No user_message_id in response! User message will keep temporary ID: ${tempUserMessageId}`); } if (data.reply) { @@ -1096,7 +1565,9 @@ export function actuallySendMessage(finalMessageToSend) { data.web_search_citations, // Pass web citations data.agent_citations, // Pass agent citations data.agent_display_name, // Pass agent display name - data.agent_name // Pass agent name + data.agent_name, // Pass agent name + null, // fullMessageObject + true // isNewMessage - trigger autoplay for new responses ); } // Show kernel fallback notice if present @@ -1119,6 +1590,11 @@ export function actuallySendMessage(finalMessageToSend) { ); } + if (data.reload_messages && currentConversationId) { + console.log("Reload flag received from backend - refreshing messages."); + loadMessages(currentConversationId); + } + // Update conversation list item and header if needed if (data.conversation_id) { currentConversationId = data.conversation_id; // Update current ID @@ -1169,12 +1645,17 @@ export function actuallySendMessage(finalMessageToSend) { } } else { // New conversation case + console.log('[sendMessage] New conversation created, adding to list without reload'); addConversationToList( currentConversationId, data.conversation_title, data.classification || [] ); - selectConversation(currentConversationId); // Select the newly added one + // Don't call selectConversation here - messages are already displayed + // Just update the current conversation ID and title + window.currentConversationId = currentConversationId; + document.getElementById("current-conversation-title").textContent = data.conversation_title || "New Conversation"; + console.log('[sendMessage] New conversation setup complete, conversation ID:', currentConversationId); } } }) @@ -1335,7 +1816,7 @@ if (promptSelect) { } // Helper function to update user message ID after backend response -function updateUserMessageId(tempId, realId) { +export function updateUserMessageId(tempId, realId) { console.log(`🔄 Updating message ID: ${tempId} -> ${realId}`); // Find the message with the temporary ID @@ -1402,6 +1883,7 @@ function updateUserMessageId(tempId, realId) { function attachUserMessageEventListeners(messageDiv, messageId, messageContent) { const copyBtn = messageDiv.querySelector(".copy-user-btn"); const metadataToggleBtn = messageDiv.querySelector(".metadata-toggle-btn"); + const maskBtn = messageDiv.querySelector(".mask-btn"); if (copyBtn) { copyBtn.addEventListener("click", () => { @@ -1426,6 +1908,99 @@ function attachUserMessageEventListeners(messageDiv, messageId, messageContent) toggleUserMessageMetadata(messageDiv, messageId); }); } + + if (maskBtn) { + // Update tooltip dynamically on hover + maskBtn.addEventListener("mouseenter", () => { + updateMaskButtonTooltip(maskBtn, messageDiv); + }); + + // Handle mask button click + maskBtn.addEventListener("click", () => { + handleMaskButtonClick(messageDiv, messageId, messageContent); + }); + } + + const dropdownDeleteBtn = messageDiv.querySelector(".dropdown-delete-btn"); + if (dropdownDeleteBtn) { + dropdownDeleteBtn.addEventListener("click", (e) => { + e.preventDefault(); + // Always read the message ID from the DOM attribute dynamically + // This ensures we use the updated ID after updateUserMessageId is called + const currentMessageId = messageDiv.getAttribute('data-message-id'); + console.log(`🗑️ Delete button clicked - using message ID from DOM: ${currentMessageId}`); + handleDeleteButtonClick(messageDiv, currentMessageId, 'user'); + }); + } + + const dropdownRetryBtn = messageDiv.querySelector(".dropdown-retry-btn"); + if (dropdownRetryBtn) { + dropdownRetryBtn.addEventListener("click", (e) => { + e.preventDefault(); + // Always read the message ID from the DOM attribute dynamically + const currentMessageId = messageDiv.getAttribute('data-message-id'); + console.log(`🔄 Retry button clicked - using message ID from DOM: ${currentMessageId}`); + handleRetryButtonClick(messageDiv, currentMessageId, 'user'); + }); + } + + const dropdownEditBtn = messageDiv.querySelector(".dropdown-edit-btn"); + if (dropdownEditBtn) { + dropdownEditBtn.addEventListener("click", (e) => { + e.preventDefault(); + // Always read the message ID from the DOM attribute dynamically + const currentMessageId = messageDiv.getAttribute('data-message-id'); + console.log(`✏️ Edit button clicked - using message ID from DOM: ${currentMessageId}`); + // Import chat-edit module dynamically + import('./chat-edit.js').then(module => { + module.handleEditButtonClick(messageDiv, currentMessageId, 'user'); + }).catch(err => { + console.error('❌ Error loading chat-edit module:', err); + }); + }); + } + + // Handle dropdown positioning manually for user messages - move to chatbox + const dropdownToggle = messageDiv.querySelector(".message-footer .dropdown button[data-bs-toggle='dropdown']"); + const dropdownMenu = messageDiv.querySelector(".message-footer .dropdown-menu"); + if (dropdownToggle && dropdownMenu) { + dropdownToggle.addEventListener("show.bs.dropdown", () => { + const chatbox = document.getElementById('chatbox'); + if (chatbox) { + dropdownMenu.remove(); + chatbox.appendChild(dropdownMenu); + + const rect = dropdownToggle.getBoundingClientRect(); + const chatboxRect = chatbox.getBoundingClientRect(); + dropdownMenu.style.position = 'absolute'; + dropdownMenu.style.top = `${rect.bottom - chatboxRect.top + chatbox.scrollTop + 2}px`; + dropdownMenu.style.left = `${rect.left - chatboxRect.left}px`; + dropdownMenu.style.zIndex = '9999'; + } + }); + + dropdownToggle.addEventListener("hidden.bs.dropdown", () => { + const dropdown = messageDiv.querySelector(".message-footer .dropdown"); + if (dropdown && dropdownMenu.parentElement !== dropdown) { + dropdownMenu.remove(); + dropdown.appendChild(dropdownMenu); + } + }); + } + + const carouselPrevBtn = messageDiv.querySelector(".carousel-prev-btn"); + if (carouselPrevBtn) { + carouselPrevBtn.addEventListener("click", () => { + handleCarouselClick(messageId, 'prev'); + }); + } + + const carouselNextBtn = messageDiv.querySelector(".carousel-next-btn"); + if (carouselNextBtn) { + carouselNextBtn.addEventListener("click", () => { + handleCarouselClick(messageId, 'next'); + }); + } } // Function to toggle user message metadata drawer @@ -1547,6 +2122,28 @@ function loadUserMessageMetadata(messageId, container, retryCount = 0) { if (data) { console.log(`✅ Successfully loaded metadata for ${messageId}`); container.innerHTML = formatMetadataForDrawer(data); + + // Attach event listeners to View Text buttons + const viewTextButtons = container.querySelectorAll('.view-text-btn'); + viewTextButtons.forEach(btn => { + btn.addEventListener('click', function() { + const imageId = this.getAttribute('data-image-id'); + const collapseElement = document.getElementById(`${imageId}-info`); + + if (collapseElement) { + const bsCollapse = new bootstrap.Collapse(collapseElement, { + toggle: true + }); + + // Update button text + if (collapseElement.classList.contains('show')) { + this.innerHTML = 'View Text'; + } else { + this.innerHTML = 'Hide Text'; + } + } + }); + }); } }) .catch(error => { @@ -1600,217 +2197,255 @@ function formatMetadataForDrawer(metadata) { // User Information Section if (metadata.user_info) { - content += ''; + } + + // Thread Information Section (priority display) + if (metadata.thread_info) { + const ti = metadata.thread_info; + content += '
    '; + content += '
    Thread Information
    '; + content += '
    '; + + content += `
    Thread ID: ${escapeHtml(ti.thread_id || 'N/A')}
    `; + + content += `
    Previous Thread: ${escapeHtml(ti.previous_thread_id || 'None')}
    `; + + const activeThreadBadge = ti.active_thread ? + 'Yes' : + 'No'; + content += `
    Active: ${activeThreadBadge}
    `; + + content += `
    Attempt: ${ti.thread_attempt || 1}
    `; + + content += '
    '; } // Button States Section if (metadata.button_states) { - content += ''; } // Workspace Search Section if (metadata.workspace_search) { - content += ''; } // Prompt Selection Section if (metadata.prompt_selection) { - content += ''; } // Agent Selection Section if (metadata.agent_selection) { - content += ''; } // Model Selection Section if (metadata.model_selection) { - content += ''; + } + + // Uploaded Images Section + if (metadata.uploaded_images && metadata.uploaded_images.length > 0) { + content += '
    '; + content += '
    Uploaded Image
    '; + content += '
    '; + + metadata.uploaded_images.forEach((image, index) => { + const imageId = `image-${messageId || Date.now()}-${index}`; + content += ``; // End item wrapper + }); + + content += '
    '; // End ms-3 small and mb-3 } // Chat Context Section if (metadata.chat_context) { - content += ''; } if (!content) { @@ -1842,3 +2477,843 @@ if (modelSelect) { saveUserSetting({ 'preferredModelDeployment': selectedModel }); }); } + +/** + * Toggle the image info drawer for uploaded images + * Shows extracted text (OCR) and vision analysis + */ +function toggleImageInfo(messageDiv, messageId, fullMessageObject) { + const toggleBtn = messageDiv.querySelector('.image-info-btn'); + const targetId = toggleBtn.getAttribute('aria-controls'); + const infoContainer = messageDiv.querySelector(`#${targetId}`); + + if (!infoContainer) { + console.error(`Image info container not found for targetId: ${targetId}`); + return; + } + + const isExpanded = infoContainer.style.display !== "none"; + + // Store current scroll position to maintain user's view + const currentScrollTop = document.getElementById('chat-messages-container')?.scrollTop || window.pageYOffset; + + if (isExpanded) { + // Hide the info + infoContainer.style.display = "none"; + toggleBtn.setAttribute("aria-expanded", false); + toggleBtn.title = "View extracted text"; + toggleBtn.innerHTML = ''; + } else { + // Show the info + infoContainer.style.display = "block"; + toggleBtn.setAttribute("aria-expanded", true); + toggleBtn.title = "Hide extracted text"; + toggleBtn.innerHTML = ''; + + // Load image info if not already loaded + const contentDiv = infoContainer.querySelector('.image-info-content'); + if (contentDiv && (contentDiv.innerHTML.trim() === '' || contentDiv.innerHTML.includes('Loading image information...'))) { + loadImageInfo(fullMessageObject, contentDiv); + } + } + + // Restore scroll position after DOM changes + setTimeout(() => { + if (document.getElementById('chat-messages-container')) { + document.getElementById('chat-messages-container').scrollTop = currentScrollTop; + } else { + window.scrollTo(0, currentScrollTop); + } + }, 10); +} + +/** + * Toggle the metadata drawer for AI, image, and file messages + */ +function toggleMessageMetadata(messageDiv, messageId) { + const existingDrawer = messageDiv.querySelector('.message-metadata-drawer'); + + if (existingDrawer) { + // Drawer exists, remove it + existingDrawer.remove(); + return; + } + + // Create new drawer + const drawerDiv = document.createElement('div'); + drawerDiv.className = 'message-metadata-drawer mt-2 p-3 border rounded bg-light'; + drawerDiv.innerHTML = '
    Loading...
    '; + + messageDiv.appendChild(drawerDiv); + + // Load metadata + loadMessageMetadataForDisplay(messageId, drawerDiv); +} + +/** + * Load message metadata into the drawer for AI/image/file messages + */ +function loadMessageMetadataForDisplay(messageId, container) { + fetch(`/api/message/${messageId}/metadata`) + .then(response => { + if (!response.ok) { + throw new Error('Failed to load metadata'); + } + return response.json(); + }) + .then(data => { + if (!data) { + container.innerHTML = '

    No metadata available

    '; + return; + } + + const metadata = data; + let html = ''; + container.innerHTML = html; + }) + .catch(error => { + console.error('Error loading message metadata:', error); + container.innerHTML = '
    Failed to load metadata
    '; + }); +} + +/** + * Load image extracted text and vision analysis into the info drawer + */ +function loadImageInfo(fullMessageObject, container) { + const extractedText = fullMessageObject?.extracted_text || ''; + const visionAnalysis = fullMessageObject?.vision_analysis || null; + const filename = fullMessageObject?.filename || 'Image'; + + let content = '
    '; + + // Filename + content += `
    Filename: ${escapeHtml(filename)}
    `; + + // Extracted Text (OCR from Document Intelligence) + if (extractedText && extractedText.trim()) { + content += '
    '; + content += 'Extracted Text (OCR):'; + content += '
    '; + content += escapeHtml(extractedText); + content += '
    '; + } + + // Vision Analysis (AI-generated description, objects, text) + if (visionAnalysis) { + content += '
    '; + content += 'AI Vision Analysis:'; + + // Model name can be either 'model' or 'model_name' + const modelName = visionAnalysis.model || visionAnalysis.model_name; + if (modelName) { + content += `
    Model: ${escapeHtml(modelName)}
    `; + } + + if (visionAnalysis.description) { + content += '
    Description:
    '; + content += escapeHtml(visionAnalysis.description); + content += '
    '; + } + + if (visionAnalysis.objects && Array.isArray(visionAnalysis.objects) && visionAnalysis.objects.length > 0) { + content += '
    Objects Detected:
    '; + content += visionAnalysis.objects.map(obj => `${escapeHtml(obj)}`).join(''); + content += '
    '; + } + + if (visionAnalysis.text && visionAnalysis.text.trim()) { + content += '
    Text Visible in Image:
    '; + content += escapeHtml(visionAnalysis.text); + content += '
    '; + } + + // Contextual analysis can be either 'analysis' or 'contextual_analysis' + const analysis = visionAnalysis.analysis || visionAnalysis.contextual_analysis; + if (analysis && analysis.trim()) { + content += '
    Contextual Analysis:
    '; + content += escapeHtml(analysis); + content += '
    '; + } + + content += '
    '; + } + + content += '
    '; + + if (!extractedText && !visionAnalysis) { + content = '
    No extracted text or analysis available for this image.
    '; + } + + container.innerHTML = content; +} + +// Search highlight functions +export function applySearchHighlight(searchTerm) { + if (!searchTerm || searchTerm.trim() === '') return; + + // Clear any existing highlights first + clearSearchHighlight(); + + const chatbox = document.getElementById('chatbox'); + if (!chatbox) return; + + // Find all message content elements + const messageContents = chatbox.querySelectorAll('.message-content, .ai-response'); + + // Escape special regex characters in search term + const escapedTerm = searchTerm.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); + const regex = new RegExp(`(${escapedTerm})`, 'gi'); + + messageContents.forEach(element => { + const walker = document.createTreeWalker( + element, + NodeFilter.SHOW_TEXT, + null, + false + ); + + const textNodes = []; + let node; + while (node = walker.nextNode()) { + if (node.nodeValue.trim() !== '') { + textNodes.push(node); + } + } + + textNodes.forEach(textNode => { + const text = textNode.nodeValue; + if (regex.test(text)) { + const span = document.createElement('span'); + span.innerHTML = text.replace(regex, '$1'); + textNode.parentNode.replaceChild(span, textNode); + } + }); + }); + + // Set timeout to clear highlights after 30 seconds + if (window.searchHighlight) { + if (window.searchHighlight.timeoutId) { + clearTimeout(window.searchHighlight.timeoutId); + } + window.searchHighlight.timeoutId = setTimeout(() => { + clearSearchHighlight(); + window.searchHighlight = null; + }, 30000); + } +} + +export function clearSearchHighlight() { + const chatbox = document.getElementById('chatbox'); + if (!chatbox) return; + + // Find all highlight marks + const highlights = chatbox.querySelectorAll('mark.search-highlight'); + highlights.forEach(mark => { + const text = document.createTextNode(mark.textContent); + mark.parentNode.replaceChild(text, mark); + }); + + // Clear timeout if exists + if (window.searchHighlight && window.searchHighlight.timeoutId) { + clearTimeout(window.searchHighlight.timeoutId); + window.searchHighlight.timeoutId = null; + } +} + +export function scrollToMessageSmooth(messageId) { + if (!messageId) return; + + const chatbox = document.getElementById('chatbox'); + if (!chatbox) return; + + // Find message by data-message-id attribute + const messageElement = chatbox.querySelector(`[data-message-id="${messageId}"]`); + if (!messageElement) { + console.warn(`Message with ID ${messageId} not found`); + return; + } + + // Scroll smoothly to message + messageElement.scrollIntoView({ + behavior: 'smooth', + block: 'center' + }); + + // Add pulse animation + messageElement.classList.add('message-pulse'); + + // Remove pulse after 2 seconds + setTimeout(() => { + messageElement.classList.remove('message-pulse'); + }, 2000); +} + +// ============= Message Masking Functions ============= + +/** + * Apply masked state to a message when loading from database + */ +function applyMaskedState(messageDiv, metadata) { + if (!metadata) return; + + const messageText = messageDiv.querySelector('.message-text'); + const messageFooter = messageDiv.querySelector('.message-footer'); + + if (!messageText) return; + + // Check if entire message is masked + if (metadata.masked) { + messageDiv.classList.add('fully-masked'); + + // Add exclusion badge to footer if not already present + if (messageFooter && !messageFooter.querySelector('.message-exclusion-badge')) { + const badge = document.createElement('div'); + badge.className = 'message-exclusion-badge text-warning small'; + badge.innerHTML = ''; + messageFooter.appendChild(badge); + } + return; + } + + // Apply masked ranges if they exist + if (metadata.masked_ranges && metadata.masked_ranges.length > 0) { + const content = messageText.textContent; + let htmlContent = ''; + let lastIndex = 0; + + // Sort masked ranges by start position + const sortedRanges = [...metadata.masked_ranges].sort((a, b) => a.start - b.start); + + // Build HTML with masked spans + sortedRanges.forEach(range => { + // Add text before masked range + if (range.start > lastIndex) { + htmlContent += escapeHtml(content.substring(lastIndex, range.start)); + } + + // Add masked span + const maskedText = escapeHtml(content.substring(range.start, range.end)); + const timestamp = new Date(range.timestamp).toLocaleDateString(); + htmlContent += `${maskedText}`; + + lastIndex = range.end; + }); + + // Add remaining text after last masked range + if (lastIndex < content.length) { + htmlContent += escapeHtml(content.substring(lastIndex)); + } + + // Update message text with masked content + messageText.innerHTML = htmlContent; + } +} + +/** + * Update mask button tooltip based on current selection and mask state + */ +function updateMaskButtonTooltip(maskBtn, messageDiv) { + const messageBubble = messageDiv.querySelector('.message-bubble'); + if (!messageBubble) return; + + // Check if there's a text selection within this message + const selection = window.getSelection(); + const hasSelection = selection && selection.toString().trim().length > 0; + + // Verify selection is within this message bubble + let selectionInMessage = false; + if (hasSelection && selection.anchorNode) { + selectionInMessage = messageBubble.contains(selection.anchorNode); + } + + // Check current mask state + const isMasked = messageDiv.querySelector('.masked-content') || messageDiv.classList.contains('fully-masked'); + + // Update tooltip based on state + if (isMasked) { + maskBtn.title = 'Unmask all masked content'; + } else if (selectionInMessage) { + maskBtn.title = 'Mask selected content'; + } else { + maskBtn.title = 'Mask entire message'; + } +} + +/** + * Handle mask button click - masks entire message or selected content + */ +function handleMaskButtonClick(messageDiv, messageId, messageContent) { + const messageBubble = messageDiv.querySelector('.message-bubble'); + const messageText = messageDiv.querySelector('.message-text'); + const maskBtn = messageDiv.querySelector('.mask-btn'); + + if (!messageBubble || !messageText || !maskBtn) { + console.error('Required elements not found for masking'); + return; + } + + // Check if message is currently masked + const isMasked = messageDiv.querySelector('.masked-content') || messageDiv.classList.contains('fully-masked'); + + if (isMasked) { + // Unmask all + unmaskMessage(messageDiv, messageId, maskBtn); + return; + } + + // Check for text selection within message + const selection = window.getSelection(); + const hasSelection = selection && selection.toString().trim().length > 0; + + let selectionInMessage = false; + if (hasSelection && selection.anchorNode) { + selectionInMessage = messageBubble.contains(selection.anchorNode); + } + + if (selectionInMessage) { + // Mask selection + maskSelection(messageDiv, messageId, selection, messageText, maskBtn); + } else { + // Mask entire message + maskEntireMessage(messageDiv, messageId, maskBtn); + } +} + +/** + * Mask the entire message + */ +function maskEntireMessage(messageDiv, messageId, maskBtn) { + console.log(`Masking entire message: ${messageId}`); + + // Get user info + const userDisplayName = window.currentUser?.display_name || 'Unknown User'; + const userId = window.currentUser?.id || 'unknown'; + + console.log('Mask entire message - User info:', { userId, userDisplayName, windowCurrentUser: window.currentUser }); + + const payload = { + action: 'mask_all', + user_id: userId, + display_name: userDisplayName + }; + + console.log('Mask entire message - Sending payload:', payload); + + // Call API to mask message + fetch(`/api/message/${messageId}/mask`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(payload) + }) + .then(response => { + console.log('Mask entire message - Response status:', response.status); + if (!response.ok) { + return response.json().then(err => { + console.error('Mask entire message - Error response:', err); + throw new Error(err.error || 'Failed to mask message'); + }); + } + return response.json(); + }) + .then(data => { + console.log('Mask entire message - Success response:', data); + if (data.success) { + // Add fully-masked class and exclusion badge + messageDiv.classList.add('fully-masked'); + + // Update mask button + const icon = maskBtn.querySelector('i'); + icon.className = 'bi bi-front'; + maskBtn.title = 'Unmask all masked content'; + + // Add exclusion badge to footer if not already present + const messageFooter = messageDiv.querySelector('.message-footer'); + if (messageFooter && !messageFooter.querySelector('.message-exclusion-badge')) { + const badge = document.createElement('div'); + badge.className = 'message-exclusion-badge text-warning small'; + badge.innerHTML = ''; + messageFooter.appendChild(badge); + } + + showToast('Message masked successfully', 'success'); + } else { + showToast('Failed to mask message', 'error'); + } + }) + .catch(error => { + console.error('Error masking message:', error); + showToast('Error masking message', 'error'); + }); +} + +/** + * Mask selected text content + */ +function maskSelection(messageDiv, messageId, selection, messageText, maskBtn) { + const selectedText = selection.toString().trim(); + console.log(`Masking selection in message: ${messageId}`); + + // Get the range and calculate character offsets + const range = selection.getRangeAt(0); + const preSelectionRange = range.cloneRange(); + preSelectionRange.selectNodeContents(messageText); + preSelectionRange.setEnd(range.startContainer, range.startOffset); + const start = preSelectionRange.toString().length; + const end = start + selectedText.length; + + // Get user info + const userDisplayName = window.currentUser?.display_name || 'Unknown User'; + const userId = window.currentUser?.id || 'unknown'; + + console.log('Mask selection - User info:', { userId, userDisplayName, windowCurrentUser: window.currentUser }); + console.log('Mask selection - Range:', { start, end, selectedText }); + + const payload = { + action: 'mask_selection', + selection: { + start: start, + end: end, + text: selectedText + }, + user_id: userId, + display_name: userDisplayName + }; + + console.log('Mask selection - Sending payload:', payload); + + // Call API to mask selection + fetch(`/api/message/${messageId}/mask`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(payload) + }) + .then(response => { + console.log('Mask selection - Response status:', response.status); + if (!response.ok) { + return response.json().then(err => { + console.error('Mask selection - Error response:', err); + throw new Error(err.error || 'Failed to mask selection'); + }); + } + return response.json(); + }) + .then(data => { + console.log('Mask selection - Success response:', data); + if (data.success) { + // Wrap selected text with masked span + const maskId = data.masked_ranges[data.masked_ranges.length - 1].id; + const span = document.createElement('span'); + span.className = 'masked-content'; + span.setAttribute('data-mask-id', maskId); + span.setAttribute('data-user-id', userId); + span.setAttribute('data-display-name', userDisplayName); + span.title = `Masked by ${userDisplayName}`; + + // Use extractContents and insertNode to handle complex selections + try { + const contents = range.extractContents(); + span.appendChild(contents); + range.insertNode(span); + } catch (e) { + console.error('Error wrapping selection:', e); + // Fallback: reload the message to show the masked content + location.reload(); + return; + } + selection.removeAllRanges(); + + // Update mask button + const icon = maskBtn.querySelector('i'); + icon.className = 'bi bi-front'; + maskBtn.title = 'Unmask all masked content'; + + showToast('Selection masked successfully', 'success'); + } else { + showToast('Failed to mask selection', 'error'); + } + }) + .catch(error => { + console.error('Error masking selection:', error); + showToast('Error masking selection', 'error'); + }); +} + +/** + * Unmask all masked content in a message + */ +function unmaskMessage(messageDiv, messageId, maskBtn) { + console.log(`Unmasking message: ${messageId}`); + + // Call API to unmask + fetch(`/api/message/${messageId}/mask`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + action: 'unmask_all' + }) + }) + .then(response => response.json()) + .then(data => { + if (data.success) { + // Remove fully-masked class + messageDiv.classList.remove('fully-masked'); + + // Remove all masked-content spans + const maskedSpans = messageDiv.querySelectorAll('.masked-content'); + maskedSpans.forEach(span => { + const text = document.createTextNode(span.textContent); + span.parentNode.replaceChild(text, span); + }); + + // Remove exclusion badge + const badge = messageDiv.querySelector('.message-exclusion-badge'); + if (badge) { + badge.remove(); + } + + // Update mask button + const icon = maskBtn.querySelector('i'); + icon.className = 'bi bi-back'; + maskBtn.title = 'Mask entire message'; + + showToast('Message unmasked successfully', 'success'); + } else { + showToast('Failed to unmask message', 'error'); + } + }) + .catch(error => { + console.error('Error unmasking message:', error); + showToast('Error unmasking message', 'error'); + }); +} + +// ============= Message Deletion Functions ============= + +/** + * Handle delete button click - shows confirmation modal + */ +function handleDeleteButtonClick(messageDiv, messageId, messageType) { + console.log(`Delete button clicked for ${messageType} message: ${messageId}`); + + // Store message info for deletion confirmation + window.pendingMessageDeletion = { + messageDiv, + messageId, + messageType + }; + + // Show appropriate confirmation modal + if (messageType === 'user') { + // User message - offer thread deletion option + const modal = document.getElementById('delete-message-modal'); + if (modal) { + const bsModal = new bootstrap.Modal(modal); + bsModal.show(); + } + } else { + // AI, image, or file message - single confirmation + const modal = document.getElementById('delete-single-message-modal'); + if (modal) { + // Update modal text based on message type + const modalBody = modal.querySelector('.modal-body p'); + if (modalBody) { + if (messageType === 'assistant') { + modalBody.textContent = 'Are you sure you want to delete this AI response? This action cannot be undone.'; + } else if (messageType === 'image') { + modalBody.textContent = 'Are you sure you want to delete this image? This action cannot be undone.'; + } else if (messageType === 'file') { + modalBody.textContent = 'Are you sure you want to delete this file? This action cannot be undone.'; + } + } + const bsModal = new bootstrap.Modal(modal); + bsModal.show(); + } + } +} + +/** + * Execute message deletion via API + */ +function executeMessageDeletion(deleteThread = false) { + const pendingDeletion = window.pendingMessageDeletion; + if (!pendingDeletion) { + console.error('No pending message deletion'); + return; + } + + const { messageDiv, messageId, messageType } = pendingDeletion; + + console.log(`Executing deletion for message ${messageId}, deleteThread: ${deleteThread}`); + console.log(`Message div:`, messageDiv); + console.log(`Message ID from DOM:`, messageDiv ? messageDiv.getAttribute('data-message-id') : 'N/A'); + + // Call delete API + fetch(`/api/message/${messageId}`, { + method: 'DELETE', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + delete_thread: deleteThread + }) + }) + .then(response => { + if (!response.ok) { + return response.json().then(data => { + const errorMsg = data.error || 'Failed to delete message'; + console.error(`Delete API error (${response.status}):`, errorMsg); + console.error(`Failed message ID:`, messageId); + + // Add specific error message for 404 + if (response.status === 404) { + throw new Error(`Message not found in database. This may happen if the message was just created and hasn't fully synced yet. Try refreshing the page and deleting again.`); + } + throw new Error(errorMsg); + }).catch(jsonError => { + // If response.json() fails, throw a generic error + if (response.status === 404) { + throw new Error(`Message not found in database. Message ID: ${messageId}. Try refreshing the page.`); + } + throw new Error(`Failed to delete message (status ${response.status})`); + }); + } + return response.json(); + }) + .then(data => { + console.log('Delete API response:', data); + + if (data.success) { + // Remove message(s) from DOM + const deletedIds = data.deleted_message_ids || [messageId]; + deletedIds.forEach(id => { + const msgDiv = document.querySelector(`[data-message-id="${id}"]`); + if (msgDiv) { + msgDiv.remove(); + console.log(`Removed message ${id} from DOM`); + } + }); + + // Show success message + const archiveMsg = data.archived ? ' (archived)' : ''; + const countMsg = deletedIds.length > 1 ? `${deletedIds.length} messages` : 'Message'; + showToast(`${countMsg} deleted successfully${archiveMsg}`, 'success'); + + // Clean up pending deletion + delete window.pendingMessageDeletion; + + // Optionally reload conversation list to update preview + if (typeof loadConversations === 'function') { + loadConversations(); + } + } else { + showToast('Failed to delete message', 'error'); + } + }) + .catch(error => { + console.error('Error deleting message:', error); + + // If we got a 404, suggest reloading messages + if (error.message && error.message.includes('not found')) { + showToast(error.message + ' Click here to reload messages.', 'error', 8000, () => { + // Reload messages when toast is clicked + if (window.currentConversationId) { + loadMessages(window.currentConversationId); + } + }); + } else { + showToast(error.message || 'Failed to delete message', 'error'); + } + + // Clean up pending deletion + delete window.pendingMessageDeletion; + }); +} + +// Expose functions globally +window.chatMessages = { + applySearchHighlight, + clearSearchHighlight, + scrollToMessageSmooth +}; + +// Expose deletion function globally for modal buttons +window.executeMessageDeletion = executeMessageDeletion; diff --git a/application/single_app/static/js/chat/chat-onload.js b/application/single_app/static/js/chat/chat-onload.js index e4852f7d..e20f7240 100644 --- a/application/single_app/static/js/chat/chat-onload.js +++ b/application/single_app/static/js/chat/chat-onload.js @@ -1,6 +1,6 @@ // chat-onload.js -import { loadConversations } from "./chat-conversations.js"; +import { loadConversations, selectConversation, ensureConversationPresent } from "./chat-conversations.js"; // Import handleDocumentSelectChange import { loadAllDocs, populateDocumentSelectScope, handleDocumentSelectChange } from "./chat-documents.js"; import { getUrlParameter } from "./chat-utils.js"; // Assuming getUrlParameter is in chat-utils.js now @@ -8,14 +8,31 @@ import { loadUserPrompts, loadGroupPrompts, initializePromptInteractions } from import { loadUserSettings } from "./chat-layout.js"; import { showToast } from "./chat-toast.js"; import { initConversationInfoButton } from "./chat-conversation-info-button.js"; +import { initializeStreamingToggle } from "./chat-streaming.js"; +import { initializeReasoningToggle } from "./chat-reasoning.js"; +import { initializeSpeechInput } from "./chat-speech-input.js"; -window.addEventListener('DOMContentLoaded', () => { +window.addEventListener('DOMContentLoaded', async () => { console.log("DOM Content Loaded. Starting initializations."); // Log start - loadConversations(); // Load conversations immediately + // Load conversations immediately (awaitable so deep-link can run after) + await loadConversations(); // Initialize the conversation info button initConversationInfoButton(); + + // Initialize streaming toggle + initializeStreamingToggle(); + + // Initialize reasoning toggle + initializeReasoningToggle(); + + // Initialize speech input + try { + initializeSpeechInput(); + } catch (error) { + console.warn('Speech input initialization failed:', error); + } // Grab references to the relevant elements const userInput = document.getElementById("user-input"); @@ -62,13 +79,13 @@ window.addEventListener('DOMContentLoaded', () => { } // Load documents, prompts, and user settings - Promise.all([ - loadAllDocs(), - loadUserPrompts(), - loadGroupPrompts(), - loadUserSettings() - ]) - .then(([docsResult, userPromptsResult, groupPromptsResult, userSettings]) => { + try { + const [docsResult, userPromptsResult, groupPromptsResult, userSettings] = await Promise.all([ + loadAllDocs(), + loadUserPrompts(), + loadGroupPrompts(), + loadUserSettings() + ]); console.log("Initial data (Docs, Prompts, Settings) loaded successfully."); // Log success // Set the preferred model if available @@ -183,13 +200,24 @@ window.addEventListener('DOMContentLoaded', () => { initializePromptInteractions(); + // Deep-link: conversationId query param + const conversationId = getUrlParameter("conversationId") || getUrlParameter("conversation_id"); + if (conversationId) { + try { + await ensureConversationPresent(conversationId); + await selectConversation(conversationId); + } catch (err) { + console.error('Failed to load conversation from URL param:', err); + showToast('Could not open that conversation.', 'danger'); + } + } + console.log("All initializations complete."); // Log end - }) - .catch((err) => { + } catch (err) { console.error("Error during initial data loading or setup:", err); // Maybe try to initialize prompts even if doc loading fails? Depends on requirements. // console.log("Attempting to initialize prompts despite data load error..."); // initializePromptInteractions(); - }); + } }); diff --git a/application/single_app/static/js/chat/chat-reasoning.js b/application/single_app/static/js/chat/chat-reasoning.js new file mode 100644 index 00000000..252fba91 --- /dev/null +++ b/application/single_app/static/js/chat/chat-reasoning.js @@ -0,0 +1,384 @@ +// chat-reasoning.js +import { loadUserSettings, saveUserSetting } from './chat-layout.js'; +import { showToast } from './chat-toast.js'; + +let reasoningEffortSettings = {}; // Per-model settings: {modelName: 'low', ...} + +/** + * Initialize the reasoning effort toggle button + */ +export function initializeReasoningToggle() { + const reasoningToggleBtn = document.getElementById('reasoning-toggle-btn'); + if (!reasoningToggleBtn) { + console.warn('Reasoning toggle button not found'); + return; + } + + console.log('Initializing reasoning toggle...'); + + // Load initial state from user settings + loadUserSettings().then(settings => { + console.log('Loaded reasoning settings:', settings); + reasoningEffortSettings = settings.reasoningEffortSettings || {}; + console.log('Reasoning effort settings:', reasoningEffortSettings); + + // Update icon based on current model + updateReasoningIconForCurrentModel(); + }).catch(error => { + console.error('Error loading reasoning settings:', error); + }); + + // Handle toggle click - show slider modal + reasoningToggleBtn.addEventListener('click', () => { + showReasoningSlider(); + }); + + // Listen for model changes + const modelSelect = document.getElementById('model-select'); + if (modelSelect) { + modelSelect.addEventListener('change', () => { + updateReasoningIconForCurrentModel(); + updateReasoningButtonVisibility(); + }); + } + + // Listen for image generation toggle - hide reasoning button when image gen is active + const imageGenBtn = document.getElementById('image-generate-btn'); + if (imageGenBtn) { + const observer = new MutationObserver(() => { + updateReasoningButtonVisibility(); + }); + observer.observe(imageGenBtn, { attributes: true, attributeFilter: ['class'] }); + } + + // Listen for agents toggle - hide reasoning button when agents are active + const enableAgentsBtn = document.getElementById('enable-agents-btn'); + if (enableAgentsBtn) { + const observer = new MutationObserver(() => { + updateReasoningButtonVisibility(); + }); + observer.observe(enableAgentsBtn, { attributes: true, attributeFilter: ['class'] }); + } + + updateReasoningButtonVisibility(); +} + +/** + * Update reasoning button visibility based on image generation state, agent state, and model support + */ +function updateReasoningButtonVisibility() { + const reasoningToggleBtn = document.getElementById('reasoning-toggle-btn'); + const imageGenBtn = document.getElementById('image-generate-btn'); + const enableAgentsBtn = document.getElementById('enable-agents-btn'); + + if (!reasoningToggleBtn) return; + + // Hide reasoning button when image generation is active + if (imageGenBtn && imageGenBtn.classList.contains('active')) { + reasoningToggleBtn.style.display = 'none'; + return; + } + + // Hide reasoning button when agents are active + if (enableAgentsBtn && enableAgentsBtn.classList.contains('active')) { + reasoningToggleBtn.style.display = 'none'; + return; + } + + // Hide reasoning button if current model doesn't support reasoning + const modelName = getCurrentModelName(); + if (modelName) { + const supportedLevels = getModelSupportedLevels(modelName); + // If model only supports 'none', hide the button + if (supportedLevels.length === 1 && supportedLevels[0] === 'none') { + reasoningToggleBtn.style.display = 'none'; + return; + } + } + + // Otherwise show the button + reasoningToggleBtn.style.display = 'flex'; +} + +/** + * Get the current model name from the model selector + */ +function getCurrentModelName() { + const modelSelect = document.getElementById('model-select'); + if (!modelSelect || !modelSelect.value) { + return null; + } + return modelSelect.value; +} + +/** + * Determine which reasoning effort levels are supported by a given model + * @param {string} modelName - The name of the model + * @returns {Array} Array of supported effort levels + */ +export function getModelSupportedLevels(modelName) { + if (!modelName) { + return ['none', 'minimal', 'low', 'medium', 'high']; + } + + const lowerModelName = modelName.toLowerCase(); + + // Models without reasoning support: gpt-4o, gpt-4.1, gpt-4.1-mini, gpt-5-chat, gpt-5-codex + if (lowerModelName.includes('gpt-4o') || + lowerModelName.includes('gpt-4.1') || + lowerModelName.includes('gpt-5-chat') || + lowerModelName.includes('gpt-5-codex')) { + return ['none']; + } + + // gpt-5-pro: high only + if (lowerModelName.includes('gpt-5-pro')) { + return ['high']; + } + + // gpt-5.1 series: none, minimal, medium, high (skip low/2 bars) + if (lowerModelName.includes('gpt-5.1')) { + return ['none', 'minimal', 'medium', 'high']; + } + + // gpt-5 series (but not 5.1, 5-pro, 5-chat, or 5-codex): minimal, low, medium, high + // Includes: gpt-5, gpt-5-nano, gpt-5-mini + if (lowerModelName.includes('gpt-5')) { + return ['minimal', 'low', 'medium', 'high']; + } + + // o-series (o1, o3, etc): low, medium, high + if (lowerModelName.match(/\bo[0-9]/)) { + return ['low', 'medium', 'high']; + } + + // Default: all levels + return ['none', 'minimal', 'low', 'medium', 'high']; +} + +/** + * Get the reasoning effort level for the current model + * @returns {string} The effort level (none, minimal, low, medium, high) + */ +export function getCurrentModelReasoningEffort() { + const modelName = getCurrentModelName(); + if (!modelName) { + return 'low'; // Default + } + + const supportedLevels = getModelSupportedLevels(modelName); + const savedEffort = reasoningEffortSettings[modelName]; + + // If gpt-5-pro, always return high + if (modelName.toLowerCase().includes('gpt-5-pro')) { + return 'high'; + } + + // If saved effort exists and is supported, use it + if (savedEffort && supportedLevels.includes(savedEffort)) { + return savedEffort; + } + + // Default to 'low' if supported, otherwise first supported level + if (supportedLevels.includes('low')) { + return 'low'; + } + + return supportedLevels[0]; +} + +/** + * Update the reasoning icon based on the current model's saved effort + */ +function updateReasoningIconForCurrentModel() { + const effort = getCurrentModelReasoningEffort(); + updateReasoningIcon(effort); +} + +/** + * Update the reasoning toggle button icon based on effort level + * @param {string} level - The effort level (none, minimal, low, medium, high) + */ +export function updateReasoningIcon(level) { + const reasoningToggleBtn = document.getElementById('reasoning-toggle-btn'); + if (!reasoningToggleBtn) return; + + const iconElement = reasoningToggleBtn.querySelector('i'); + if (!iconElement) return; + + // Map effort levels to Bootstrap Icons signal strength + const iconMap = { + 'none': 'bi-reception-0', + 'minimal': 'bi-reception-1', + 'low': 'bi-reception-2', + 'medium': 'bi-reception-3', + 'high': 'bi-reception-4' + }; + + // Remove all reception classes + iconElement.className = ''; + + // Add the appropriate icon class + const iconClass = iconMap[level] || 'bi-reception-2'; + iconElement.classList.add('bi', iconClass); + + // Update tooltip + const labelMap = { + 'none': 'No reasoning effort', + 'minimal': 'Minimal reasoning effort', + 'low': 'Low reasoning effort', + 'medium': 'Medium reasoning effort', + 'high': 'High reasoning effort' + }; + reasoningToggleBtn.title = labelMap[level] || 'Configure reasoning effort'; +} + +/** + * Show the reasoning effort slider modal + */ +export function showReasoningSlider() { + const modelName = getCurrentModelName(); + if (!modelName) { + showToast('Please select a model first', 'warning'); + return; + } + + const modal = new bootstrap.Modal(document.getElementById('reasoning-slider-modal')); + const modelNameElement = document.getElementById('reasoning-model-name'); + const levelsContainer = document.querySelector('.reasoning-levels'); + + if (!modelNameElement || !levelsContainer) { + console.error('Reasoning modal elements not found'); + return; + } + + // Set model name + modelNameElement.textContent = modelName; + + // Get supported levels and current effort + const supportedLevels = getModelSupportedLevels(modelName); + const currentEffort = getCurrentModelReasoningEffort(); + + // All possible levels in order (for display from bottom to top) + const allLevels = ['none', 'minimal', 'low', 'medium', 'high']; + const levelLabels = { + 'none': 'None', + 'minimal': 'Minimal', + 'low': 'Low', + 'medium': 'Medium', + 'high': 'High' + }; + const levelIcons = { + 'none': 'bi-reception-0', + 'minimal': 'bi-reception-1', + 'low': 'bi-reception-2', + 'medium': 'bi-reception-3', + 'high': 'bi-reception-4' + }; + const levelDescriptions = { + 'none': 'No additional reasoning - fastest responses, suitable for simple questions', + 'minimal': 'Light reasoning - quick responses with basic logical steps', + 'low': 'Moderate reasoning - balanced speed and thoughtfulness for everyday questions', + 'medium': 'Enhanced reasoning - more deliberate thinking for complex questions', + 'high': 'Maximum reasoning - deepest analysis for challenging problems and nuanced topics' + }; + + // Build level buttons (reversed for bottom-to-top display) + levelsContainer.innerHTML = ''; + allLevels.forEach(level => { + const isSupported = supportedLevels.includes(level); + const isActive = level === currentEffort; + + const levelDiv = document.createElement('div'); + levelDiv.className = `reasoning-level ${isActive ? 'active' : ''} ${!isSupported ? 'disabled' : ''}`; + levelDiv.dataset.level = level; + levelDiv.title = levelDescriptions[level]; + + levelDiv.innerHTML = ` +
    + +
    +
    ${levelLabels[level]}
    + `; + + if (isSupported) { + levelDiv.addEventListener('click', () => { + selectReasoningLevel(level, modelName); + }); + } + + levelsContainer.appendChild(levelDiv); + }); + + modal.show(); +} + +/** + * Handle selection of a reasoning level + * @param {string} level - The selected effort level + * @param {string} modelName - The model name + */ +function selectReasoningLevel(level, modelName) { + // Update the settings + reasoningEffortSettings[modelName] = level; + + // Save to user settings + saveReasoningEffort(modelName, level); + + // Update UI + updateReasoningIcon(level); + + // Update active state in modal + document.querySelectorAll('.reasoning-level').forEach(el => { + el.classList.remove('active'); + if (el.dataset.level === level) { + el.classList.add('active'); + } + }); + + // Show feedback + const levelLabels = { + 'none': 'None', + 'minimal': 'Minimal', + 'low': 'Low', + 'medium': 'Medium', + 'high': 'High' + }; + showToast(`Reasoning effort set to ${levelLabels[level]} for ${modelName}`, 'success'); + + // Close modal after a short delay + setTimeout(() => { + const modal = bootstrap.Modal.getInstance(document.getElementById('reasoning-slider-modal')); + if (modal) { + modal.hide(); + } + }, 500); +} + +/** + * Save the reasoning effort setting for a model + * @param {string} modelName - The model name + * @param {string} effort - The effort level + */ +export function saveReasoningEffort(modelName, effort) { + reasoningEffortSettings[modelName] = effort; + saveUserSetting({ reasoningEffortSettings }); +} + +/** + * Check if reasoning effort is enabled for the current model + * @returns {boolean} True if reasoning effort is enabled + */ +export function isReasoningEffortEnabled() { + const effort = getCurrentModelReasoningEffort(); + return effort && effort !== 'none'; +} + +/** + * Get the current reasoning effort to send to the backend + * @returns {string|null} The effort level or null if 'none' + */ +export function getCurrentReasoningEffort() { + const effort = getCurrentModelReasoningEffort(); + return effort === 'none' ? null : effort; +} diff --git a/application/single_app/static/js/chat/chat-retry.js b/application/single_app/static/js/chat/chat-retry.js new file mode 100644 index 00000000..55cfbf8e --- /dev/null +++ b/application/single_app/static/js/chat/chat-retry.js @@ -0,0 +1,393 @@ +// chat-retry.js +// Handles message retry/regenerate functionality + +import { showToast } from './chat-toast.js'; +import { showLoadingIndicatorInChatbox, hideLoadingIndicatorInChatbox } from './chat-loading-indicator.js'; + +/** + * Populate retry agent dropdown with available agents + */ +async function populateRetryAgentDropdown() { + const retryAgentSelect = document.getElementById('retry-agent-select'); + if (!retryAgentSelect) return; + + try { + // Import agent functions dynamically + const agentsModule = await import('../agents_common.js'); + const { fetchUserAgents, fetchGroupAgentsForActiveGroup, fetchSelectedAgent, populateAgentSelect } = agentsModule; + + // Fetch available agents + const [userAgents, selectedAgent] = await Promise.all([ + fetchUserAgents(), + fetchSelectedAgent() + ]); + const groupAgents = await fetchGroupAgentsForActiveGroup(); + + // Combine and order agents + const combinedAgents = [...userAgents, ...groupAgents]; + const personalAgents = combinedAgents.filter(agent => !agent.is_global && !agent.is_group); + const activeGroupAgents = combinedAgents.filter(agent => agent.is_group); + const globalAgents = combinedAgents.filter(agent => agent.is_global); + const orderedAgents = [...personalAgents, ...activeGroupAgents, ...globalAgents]; + + // Populate retry agent select using shared function + populateAgentSelect(retryAgentSelect, orderedAgents, selectedAgent); + + console.log(`✅ Populated retry agent dropdown with ${orderedAgents.length} agents`); + } catch (error) { + console.error('❌ Error populating retry agent dropdown:', error); + } +} + +/** + * Handle retry button click - opens retry modal + */ +export async function handleRetryButtonClick(messageDiv, messageId, messageType) { + console.log(`🔄 Retry button clicked for ${messageType} message: ${messageId}`); + + // Store message info for retry execution + window.pendingMessageRetry = { + messageDiv, + messageId, + messageType + }; + + // Populate retry modal with current model options + const modelSelect = document.getElementById('model-select'); + const retryModelSelect = document.getElementById('retry-model-select'); + + if (modelSelect && retryModelSelect) { + // Clone model options from main select + retryModelSelect.innerHTML = modelSelect.innerHTML; + retryModelSelect.value = modelSelect.value; // Set to currently selected model + } + + // Populate retry modal with agent options (always load fresh from API) + const retryAgentSelect = document.getElementById('retry-agent-select'); + if (retryAgentSelect) { + await populateRetryAgentDropdown(); + } + + // Determine if original message used agents or models + const enableAgentsBtn = document.getElementById('enable-agents-btn'); + const agentSelectContainer = document.getElementById('agent-select-container'); + const isAgentMode = enableAgentsBtn && enableAgentsBtn.classList.contains('active') && + agentSelectContainer && agentSelectContainer.style.display !== 'none'; + + // Set retry mode based on current state + const retryModeModel = document.getElementById('retry-mode-model'); + const retryModeAgent = document.getElementById('retry-mode-agent'); + const retryModelContainer = document.getElementById('retry-model-container'); + const retryAgentContainer = document.getElementById('retry-agent-container'); + + if (isAgentMode && retryModeAgent) { + retryModeAgent.checked = true; + if (retryModelContainer) retryModelContainer.style.display = 'none'; + if (retryAgentContainer) retryAgentContainer.style.display = 'block'; + } else if (retryModeModel) { + retryModeModel.checked = true; + if (retryModelContainer) retryModelContainer.style.display = 'block'; + if (retryAgentContainer) retryAgentContainer.style.display = 'none'; + } + + // Add event listeners for mode toggle + if (retryModeModel) { + retryModeModel.addEventListener('change', function() { + if (this.checked) { + if (retryModelContainer) retryModelContainer.style.display = 'block'; + if (retryAgentContainer) retryAgentContainer.style.display = 'none'; + updateReasoningVisibility(); + } + }); + } + + if (retryModeAgent) { + retryModeAgent.addEventListener('change', function() { + if (this.checked) { + if (retryModelContainer) retryModelContainer.style.display = 'none'; + if (retryAgentContainer) retryAgentContainer.style.display = 'block'; + updateReasoningVisibility(); + } + }); + } + + // Function to update reasoning visibility based on selected model or agent + function updateReasoningVisibility() { + const retryReasoningContainer = document.getElementById('retry-reasoning-container'); + const retryReasoningLevels = document.getElementById('retry-reasoning-levels'); + + let showReasoning = false; + + if (retryModeModel && retryModeModel.checked) { + const selectedModel = retryModelSelect ? retryModelSelect.value : null; + showReasoning = selectedModel && selectedModel.includes('o1'); + } else if (retryModeAgent && retryModeAgent.checked) { + // Check if agent uses o1 model (you could enhance this by checking agent config) + const selectedAgent = retryAgentSelect ? retryAgentSelect.value : null; + // For now, we'll show reasoning for agents too if they use o1 models + // This could be enhanced by fetching agent model info + showReasoning = false; // Default to false for agents unless we can determine model + } + + if (retryReasoningContainer) { + retryReasoningContainer.style.display = showReasoning ? 'block' : 'none'; + + // Populate reasoning levels if empty and showing + if (showReasoning && retryReasoningLevels && !retryReasoningLevels.hasChildNodes()) { + const levels = [ + { value: 'low', label: 'Low', description: 'Faster responses' }, + { value: 'medium', label: 'Medium', description: 'Balanced' }, + { value: 'high', label: 'High', description: 'More thorough reasoning' } + ]; + + levels.forEach(level => { + const div = document.createElement('div'); + div.className = 'form-check'; + div.innerHTML = ` + + + `; + retryReasoningLevels.appendChild(div); + }); + } + } + } + + // Initial reasoning visibility + updateReasoningVisibility(); + + // Update reasoning visibility when model changes in retry modal + if (retryModelSelect) { + retryModelSelect.addEventListener('change', updateReasoningVisibility); + } + + // Update reasoning visibility when agent changes in retry modal + if (retryAgentSelect) { + retryAgentSelect.addEventListener('change', updateReasoningVisibility); + } + + // Show the retry modal + const retryModal = new bootstrap.Modal(document.getElementById('retry-message-modal')); + retryModal.show(); +} + +/** + * Execute message retry - called when user confirms retry in modal + */ +window.executeMessageRetry = function() { + const pendingRetry = window.pendingMessageRetry; + if (!pendingRetry) { + console.error('❌ No pending retry found'); + return; + } + + const { messageDiv, messageId, messageType } = pendingRetry; + + console.log(`🚀 Executing retry for ${messageType} message: ${messageId}`); + + // Determine retry mode (model or agent) + const retryModeModel = document.getElementById('retry-mode-model'); + const retryModeAgent = document.getElementById('retry-mode-agent'); + const isAgentMode = retryModeAgent && retryModeAgent.checked; + + // Prepare retry request body + const requestBody = {}; + + if (isAgentMode) { + // Agent mode - get agent info + const retryAgentSelect = document.getElementById('retry-agent-select'); + if (retryAgentSelect) { + const selectedOption = retryAgentSelect.options[retryAgentSelect.selectedIndex]; + if (selectedOption) { + requestBody.agent_info = { + id: selectedOption.dataset.agentId || null, + name: selectedOption.dataset.name || '', + display_name: selectedOption.dataset.displayName || selectedOption.textContent || '', + is_global: selectedOption.dataset.isGlobal === 'true', + is_group: selectedOption.dataset.isGroup === 'true', + group_id: selectedOption.dataset.groupId || null, + group_name: selectedOption.dataset.groupName || null + }; + console.log(`🤖 Retry with agent:`, requestBody.agent_info); + } + } + } else { + // Model mode - get model and reasoning effort + const retryModelSelect = document.getElementById('retry-model-select'); + const selectedModel = retryModelSelect ? retryModelSelect.value : null; + requestBody.model = selectedModel; + + let reasoningEffort = null; + const retryReasoningContainer = document.getElementById('retry-reasoning-container'); + if (retryReasoningContainer && retryReasoningContainer.style.display !== 'none') { + const selectedReasoning = document.querySelector('input[name="retry-reasoning-effort"]:checked'); + reasoningEffort = selectedReasoning ? selectedReasoning.value : null; + } + requestBody.reasoning_effort = reasoningEffort; + + console.log(`🧠 Retry with model: ${selectedModel}, Reasoning: ${reasoningEffort}`); + } + + // Close the modal explicitly + const modalElement = document.getElementById('retry-message-modal'); + if (modalElement) { + const modalInstance = bootstrap.Modal.getInstance(modalElement); + if (modalInstance) { + modalInstance.hide(); + } + } + + // Wait a bit for modal to close, then show loading indicator + setTimeout(() => { + console.log('⏰ Modal closed, showing AI typing indicator...'); + + // Show "AI is typing..." indicator + showLoadingIndicatorInChatbox(); + + // Call retry API endpoint + console.log('📡 Calling retry API endpoint...'); + fetch(`/api/message/${messageId}/retry`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(requestBody) + }) + .then(response => { + if (!response.ok) { + return response.json().then(data => { + throw new Error(data.error || 'Retry failed'); + }); + } + return response.json(); + }) + .then(data => { + console.log('✅ Retry API response:', data); + + if (data.success && data.chat_request) { + console.log('🔄 Retry initiated, calling chat API with:'); + console.log(' retry_user_message_id:', data.chat_request.retry_user_message_id); + console.log(' retry_thread_id:', data.chat_request.retry_thread_id); + console.log(' retry_thread_attempt:', data.chat_request.retry_thread_attempt); + console.log(' Full chat_request:', data.chat_request); + + // Call chat API with the retry parameters + return fetch('/api/chat', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + credentials: 'same-origin', + body: JSON.stringify(data.chat_request) + }); + } else { + throw new Error('Retry response missing chat_request'); + } + }) + .then(response => { + if (!response.ok) { + return response.json().then(data => { + throw new Error(data.error || 'Chat API failed'); + }); + } + return response.json(); + }) + .then(chatData => { + console.log('✅ Chat API response:', chatData); + + // Hide typing indicator + hideLoadingIndicatorInChatbox(); + console.log('🧹 Typing indicator removed'); + + // Get current conversation ID using the proper API + const conversationId = window.chatConversations?.getCurrentConversationId(); + + console.log(`🔍 Current conversation ID: ${conversationId}`); + + // Reload messages to show new attempt (which will automatically hide old attempts) + if (conversationId) { + console.log('🔄 Reloading messages for conversation:', conversationId); + + // Import loadMessages dynamically + import('./chat-messages.js').then(module => { + console.log('📦 chat-messages.js module loaded, calling loadMessages...'); + module.loadMessages(conversationId); + // No toast - the reloaded messages are enough feedback + }).catch(err => { + console.error('❌ Error loading chat-messages module:', err); + showToast('error', 'Failed to reload messages'); + }); + } else { + console.error('❌ No currentConversationId found!'); + + // Try to force a page refresh as fallback + console.log('🔄 Attempting page refresh as fallback...'); + setTimeout(() => { + window.location.reload(); + }, 1000); + } + }) + .catch(error => { + console.error('❌ Retry error:', error); + + // Hide typing indicator on error + hideLoadingIndicatorInChatbox(); + + showToast('error', `Retry failed: ${error.message}`); + }) + .finally(() => { + // Clean up pending retry + window.pendingMessageRetry = null; + }); + + }, 300); // End of setTimeout - wait 300ms for modal to close +}; + +/** + * Handle carousel navigation (switch between retry attempts) + */ +export function handleCarouselNavigation(messageDiv, messageId, direction) { + console.log(`🎠 Carousel ${direction} clicked for message: ${messageId}`); + + // Call switch-attempt API endpoint + fetch(`/api/message/${messageId}/switch-attempt`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + direction: direction // 'prev' or 'next' + }) + }) + .then(response => { + if (!response.ok) { + return response.json().then(data => { + throw new Error(data.error || 'Switch attempt failed'); + }); + } + return response.json(); + }) + .then(data => { + console.log(`✅ Switched to attempt ${data.new_active_attempt}:`, data); + + // Reload messages to show new active attempt + if (window.currentConversationId) { + import('./chat-messages.js').then(module => { + module.loadMessages(window.currentConversationId); + showToast('info', `Switched to attempt ${data.new_active_attempt}`); + }); + } + }) + .catch(error => { + console.error('❌ Carousel navigation error:', error); + showToast('error', `Failed to switch attempt: ${error.message}`); + }); +} + +// Make functions available globally for event handlers in chat-messages.js +window.handleRetryButtonClick = handleRetryButtonClick; +window.handleCarouselNavigation = handleCarouselNavigation; diff --git a/application/single_app/static/js/chat/chat-search-modal.js b/application/single_app/static/js/chat/chat-search-modal.js new file mode 100644 index 00000000..b935525f --- /dev/null +++ b/application/single_app/static/js/chat/chat-search-modal.js @@ -0,0 +1,518 @@ +// chat-search-modal.js +// Advanced search modal functionality + +import { showToast } from "./chat-toast.js"; + +let currentSearchParams = null; +let currentPage = 1; +let advancedSearchModal = null; + +// Initialize modal when DOM is ready +document.addEventListener('DOMContentLoaded', () => { + const modalElement = document.getElementById('advancedSearchModal'); + if (modalElement) { + advancedSearchModal = new bootstrap.Modal(modalElement); + + // Set up event listeners + setupEventListeners(); + } +}); + +function setupEventListeners() { + // Search button + const searchBtn = document.getElementById('performSearchBtn'); + if (searchBtn) { + searchBtn.addEventListener('click', () => { + performAdvancedSearch(1); + }); + } + + // Clear filters button + const clearBtn = document.getElementById('clearFiltersBtn'); + if (clearBtn) { + clearBtn.addEventListener('click', clearFilters); + } + + // Clear history button + const clearHistoryBtn = document.getElementById('clearHistoryBtn'); + if (clearHistoryBtn) { + clearHistoryBtn.addEventListener('click', clearSearchHistory); + } + + // Pagination buttons + const prevBtn = document.getElementById('searchPrevBtn'); + const nextBtn = document.getElementById('searchNextBtn'); + + if (prevBtn) { + prevBtn.addEventListener('click', () => { + if (currentPage > 1) { + performAdvancedSearch(currentPage - 1); + } + }); + } + + if (nextBtn) { + nextBtn.addEventListener('click', () => { + performAdvancedSearch(currentPage + 1); + }); + } + + // Enter key in search input + const searchInput = document.getElementById('searchMessageInput'); + if (searchInput) { + searchInput.addEventListener('keypress', (e) => { + if (e.key === 'Enter') { + e.preventDefault(); + performAdvancedSearch(1); + } + }); + } +} + +export function openAdvancedSearchModal() { + if (advancedSearchModal) { + advancedSearchModal.show(); + + // Load classifications and history when modal opens + loadClassifications(); + loadSearchHistory(); + } +} + +async function loadClassifications() { + try { + const response = await fetch('/api/conversations/classifications', { + method: 'GET', + headers: { + 'Content-Type': 'application/json' + } + }); + + if (!response.ok) { + throw new Error('Failed to load classifications'); + } + + const data = await response.json(); + const select = document.getElementById('searchClassifications'); + + if (select && data.classifications) { + // Clear loading option + select.innerHTML = ''; + + if (data.classifications.length === 0) { + select.innerHTML = ''; + } else { + data.classifications.forEach(classification => { + const option = document.createElement('option'); + option.value = classification; + option.textContent = classification; + select.appendChild(option); + }); + } + } + } catch (error) { + console.error('Error loading classifications:', error); + const select = document.getElementById('searchClassifications'); + if (select) { + select.innerHTML = ''; + } + } +} + +async function loadSearchHistory() { + try { + const response = await fetch('/api/user-settings/search-history', { + method: 'GET', + headers: { + 'Content-Type': 'application/json' + } + }); + + if (!response.ok) { + throw new Error('Failed to load search history'); + } + + const data = await response.json(); + const historyList = document.getElementById('searchHistoryList'); + + if (historyList && data.history) { + if (data.history.length === 0) { + historyList.innerHTML = ` +
    + +

    No search history yet

    +
    + `; + } else { + historyList.innerHTML = ''; + const listGroup = document.createElement('div'); + listGroup.className = 'list-group'; + + data.history.forEach(item => { + const listItem = document.createElement('a'); + listItem.href = '#'; + listItem.className = 'list-group-item list-group-item-action d-flex justify-content-between align-items-center'; + listItem.innerHTML = ` + ${escapeHtml(item.term)} + ${formatDate(item.timestamp)} + `; + + listItem.addEventListener('click', (e) => { + e.preventDefault(); + populateSearchFromHistory(item.term); + }); + + listGroup.appendChild(listItem); + }); + + historyList.appendChild(listGroup); + } + } + } catch (error) { + console.error('Error loading search history:', error); + } +} + +function populateSearchFromHistory(searchTerm) { + const searchInput = document.getElementById('searchMessageInput'); + if (searchInput) { + searchInput.value = searchTerm; + } + + // Switch to search tab + const searchTab = document.getElementById('search-tab'); + if (searchTab) { + searchTab.click(); + } + + // Perform search + performAdvancedSearch(1); +} + +async function performAdvancedSearch(page = 1) { + const searchTerm = document.getElementById('searchMessageInput').value.trim(); + + // Validate search term + if (!searchTerm || searchTerm.length < 3) { + showToast('Please enter at least 3 characters to search', 'warning'); + return; + } + + // Collect form values + const dateFrom = document.getElementById('searchDateFrom').value; + const dateTo = document.getElementById('searchDateTo').value; + + const chatTypes = []; + if (document.getElementById('chatTypePersonal').checked) chatTypes.push('personal'); + if (document.getElementById('chatTypeGroupSingle').checked) chatTypes.push('group-single-user'); + if (document.getElementById('chatTypeGroupMulti').checked) chatTypes.push('group-multi-user'); + if (document.getElementById('chatTypePublic').checked) chatTypes.push('public'); + + const classSelect = document.getElementById('searchClassifications'); + const classifications = Array.from(classSelect.selectedOptions).map(opt => opt.value); + + const hasFiles = document.getElementById('searchHasFiles').checked; + const hasImages = document.getElementById('searchHasImages').checked; + + currentSearchParams = { + search_term: searchTerm, + date_from: dateFrom, + date_to: dateTo, + chat_types: chatTypes, + classifications: classifications, + has_files: hasFiles, + has_images: hasImages, + page: page, + per_page: 20 + }; + + currentPage = page; + + // Show loading + const loadingDiv = document.getElementById('searchResultsLoading'); + const contentDiv = document.getElementById('searchResultsContent'); + const emptyDiv = document.getElementById('searchResultsEmpty'); + const paginationDiv = document.getElementById('searchPagination'); + + if (loadingDiv) loadingDiv.style.display = 'block'; + if (contentDiv) contentDiv.innerHTML = ''; + if (emptyDiv) emptyDiv.style.display = 'none'; + if (paginationDiv) paginationDiv.style.display = 'none'; + + try { + const response = await fetch('/api/search_conversations', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify(currentSearchParams) + }); + + if (!response.ok) { + const error = await response.json(); + throw new Error(error.error || 'Search failed'); + } + + const data = await response.json(); + + // Hide loading + if (loadingDiv) loadingDiv.style.display = 'none'; + + if (data.total_results === 0) { + if (emptyDiv) emptyDiv.style.display = 'block'; + } else { + // Render results + renderSearchResults(data); + + // Save to history (only on first page) + if (page === 1) { + saveSearchToHistory(searchTerm); + } + } + + } catch (error) { + console.error('Search error:', error); + if (loadingDiv) loadingDiv.style.display = 'none'; + showToast(error.message || 'Failed to search conversations', 'error'); + } +} + +function renderSearchResults(data) { + const contentDiv = document.getElementById('searchResultsContent'); + const paginationDiv = document.getElementById('searchPagination'); + + if (!contentDiv) return; + + contentDiv.innerHTML = ''; + + // Show result count + const resultHeader = document.createElement('div'); + resultHeader.className = 'mb-3'; + resultHeader.innerHTML = `
    Found ${data.total_results} result${data.total_results !== 1 ? 's' : ''}
    `; + contentDiv.appendChild(resultHeader); + + // Render each conversation result + data.results.forEach(result => { + const card = document.createElement('div'); + card.className = 'card mb-3'; + + const cardBody = document.createElement('div'); + cardBody.className = 'card-body'; + + // Conversation title and metadata + const titleDiv = document.createElement('div'); + titleDiv.className = 'd-flex justify-content-between align-items-start mb-2'; + + const titleText = document.createElement('h6'); + titleText.className = 'card-title mb-0'; + titleText.innerHTML = ` + ${result.conversation.is_pinned ? '' : ''} + ${escapeHtml(result.conversation.title)} + `; + + const metaText = document.createElement('small'); + metaText.className = 'text-muted'; + metaText.textContent = formatDate(result.conversation.last_updated); + + titleDiv.appendChild(titleText); + titleDiv.appendChild(metaText); + cardBody.appendChild(titleDiv); + + // Classifications and chat type + if (result.conversation.classification && result.conversation.classification.length > 0) { + const badgesDiv = document.createElement('div'); + badgesDiv.className = 'mb-2'; + result.conversation.classification.forEach(cls => { + const badge = document.createElement('span'); + badge.className = 'badge bg-secondary me-1'; + badge.textContent = cls; + badgesDiv.appendChild(badge); + }); + cardBody.appendChild(badgesDiv); + } + + // Message matches + const matchesDiv = document.createElement('div'); + matchesDiv.className = 'mt-2'; + matchesDiv.innerHTML = `${result.match_count} message${result.match_count !== 1 ? 's' : ''} matched:`; + + result.messages.forEach(msg => { + const msgDiv = document.createElement('div'); + msgDiv.className = 'border-start border-primary border-3 ps-2 py-1 mb-2 mt-2'; + msgDiv.style.cursor = 'pointer'; + msgDiv.innerHTML = highlightSearchTerm(escapeHtml(msg.content_snippet), currentSearchParams.search_term); + + msgDiv.addEventListener('click', () => { + navigateToMessageWithHighlight(result.conversation.id, msg.message_id, currentSearchParams.search_term); + }); + + msgDiv.addEventListener('mouseenter', () => { + msgDiv.classList.add('bg-light'); + }); + msgDiv.addEventListener('mouseleave', () => { + msgDiv.classList.remove('bg-light'); + }); + + matchesDiv.appendChild(msgDiv); + }); + + cardBody.appendChild(matchesDiv); + card.appendChild(cardBody); + contentDiv.appendChild(card); + }); + + // Update pagination + if (paginationDiv && data.total_pages > 1) { + paginationDiv.style.display = 'flex'; + + const prevBtn = document.getElementById('searchPrevBtn'); + const nextBtn = document.getElementById('searchNextBtn'); + const pageInfo = document.getElementById('searchPageInfo'); + + if (prevBtn) { + prevBtn.disabled = currentPage === 1; + } + + if (nextBtn) { + nextBtn.disabled = currentPage === data.total_pages; + } + + if (pageInfo) { + pageInfo.textContent = `Page ${currentPage} of ${data.total_pages}`; + } + } +} + +function highlightSearchTerm(text, searchTerm) { + const escaped = escapeHtml(searchTerm); + const regex = new RegExp(`(${escaped})`, 'gi'); + return text.replace(regex, '$1'); +} + +function navigateToMessageWithHighlight(convId, msgId, searchTerm) { + // Close the modal + if (advancedSearchModal) { + advancedSearchModal.hide(); + } + + // Set global search highlight state + window.searchHighlight = { + term: searchTerm, + timestamp: Date.now(), + timeoutId: null + }; + + // Load the conversation + if (window.chatConversations && window.chatConversations.selectConversation) { + window.chatConversations.selectConversation(convId); + + // Wait for messages to load, then scroll and highlight + setTimeout(() => { + if (window.chatMessages) { + if (window.chatMessages.scrollToMessageSmooth) { + window.chatMessages.scrollToMessageSmooth(msgId); + } + if (window.chatMessages.applySearchHighlight) { + window.chatMessages.applySearchHighlight(searchTerm); + } + } + }, 500); + } +} + +async function saveSearchToHistory(searchTerm) { + try { + await fetch('/api/user-settings/search-history', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ search_term: searchTerm }) + }); + + // Reload history in background + loadSearchHistory(); + } catch (error) { + console.error('Error saving search to history:', error); + } +} + +async function clearSearchHistory() { + if (!confirm('Are you sure you want to clear your search history?')) { + return; + } + + try { + const response = await fetch('/api/user-settings/search-history', { + method: 'DELETE', + headers: { + 'Content-Type': 'application/json' + } + }); + + if (!response.ok) { + throw new Error('Failed to clear history'); + } + + showToast('Search history cleared', 'success'); + loadSearchHistory(); + + } catch (error) { + console.error('Error clearing search history:', error); + showToast('Failed to clear search history', 'error'); + } +} + +function clearFilters() { + // Clear search input + const searchInput = document.getElementById('searchMessageInput'); + if (searchInput) searchInput.value = ''; + + // Clear dates + const dateFrom = document.getElementById('searchDateFrom'); + const dateTo = document.getElementById('searchDateTo'); + if (dateFrom) dateFrom.value = ''; + if (dateTo) dateTo.value = ''; + + // Check all chat types + document.getElementById('chatTypePersonal').checked = true; + document.getElementById('chatTypeGroupSingle').checked = true; + document.getElementById('chatTypeGroupMulti').checked = true; + document.getElementById('chatTypePublic').checked = true; + + // Clear classifications + const classSelect = document.getElementById('searchClassifications'); + if (classSelect) { + Array.from(classSelect.options).forEach(opt => opt.selected = false); + } + + // Uncheck filters + document.getElementById('searchHasFiles').checked = false; + document.getElementById('searchHasImages').checked = false; + + // Clear results + const contentDiv = document.getElementById('searchResultsContent'); + const emptyDiv = document.getElementById('searchResultsEmpty'); + const paginationDiv = document.getElementById('searchPagination'); + + if (contentDiv) contentDiv.innerHTML = ''; + if (emptyDiv) emptyDiv.style.display = 'none'; + if (paginationDiv) paginationDiv.style.display = 'none'; +} + +function escapeHtml(text) { + const div = document.createElement('div'); + div.textContent = text; + return div.innerHTML; +} + +function formatDate(isoString) { + if (!isoString) return ''; + const date = new Date(isoString); + return date.toLocaleDateString() + ' ' + date.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' }); +} + +// Expose function globally +window.chatSearchModal = { + openAdvancedSearchModal +}; diff --git a/application/single_app/static/js/chat/chat-sidebar-conversations.js b/application/single_app/static/js/chat/chat-sidebar-conversations.js index bfbba5c6..c8bd3729 100644 --- a/application/single_app/static/js/chat/chat-sidebar-conversations.js +++ b/application/single_app/static/js/chat/chat-sidebar-conversations.js @@ -7,11 +7,23 @@ const sidebarConversationsList = document.getElementById("sidebar-conversations- const sidebarNewChatBtn = document.getElementById("sidebar-new-chat-btn"); let currentActiveConversationId = null; +let sidebarShowHiddenConversations = false; // Track if hidden conversations should be shown in sidebar +let isLoadingSidebarConversations = false; // Prevent concurrent sidebar loads +let pendingSidebarReload = false; // Track if a reload is pending // Load conversations for the sidebar export function loadSidebarConversations() { if (!sidebarConversationsList) return; + // If already loading, mark that we need to reload again after current load finishes + if (isLoadingSidebarConversations) { + console.log('Sidebar load already in progress, marking pending reload...'); + pendingSidebarReload = true; + return; + } + + isLoadingSidebarConversations = true; + pendingSidebarReload = false; // Clear any pending reload flag sidebarConversationsList.innerHTML = '
    Loading conversations...
    '; fetch("/api/get_conversations") @@ -20,9 +32,55 @@ export function loadSidebarConversations() { sidebarConversationsList.innerHTML = ""; if (!data.conversations || data.conversations.length === 0) { sidebarConversationsList.innerHTML = '
    No conversations yet.
    '; + + // Reset loading flag even when no conversations + isLoadingSidebarConversations = false; + + // Check for pending reload even when no conversations + if (pendingSidebarReload) { + console.log('Pending reload detected (no conversations), reloading sidebar...'); + setTimeout(() => loadSidebarConversations(), 100); + } return; } - data.conversations.forEach(convo => { + + // Sort conversations: pinned first (by last_updated), then unpinned (by last_updated) + const sortedConversations = [...data.conversations].sort((a, b) => { + const aPinned = a.is_pinned || false; + const bPinned = b.is_pinned || false; + + // If pin status differs, pinned comes first + if (aPinned !== bPinned) { + return bPinned ? 1 : -1; + } + + // If same pin status, sort by last_updated (most recent first) + const aDate = new Date(a.last_updated); + const bDate = new Date(b.last_updated); + return bDate - aDate; + }); + + // Filter conversations based on show/hide hidden setting + let visibleConversations = sortedConversations.filter(convo => { + const isHidden = convo.is_hidden || false; + // Show hidden conversations if toggle is on OR if we're in selection mode + const isSelectionMode = window.chatConversations && window.chatConversations.isSelectionModeActive && window.chatConversations.isSelectionModeActive(); + return !isHidden || sidebarShowHiddenConversations || isSelectionMode; + }); + + // Apply quick search filter if active + if (window.chatConversations && window.chatConversations.getQuickSearchTerm) { + const searchTerm = window.chatConversations.getQuickSearchTerm(); + if (searchTerm && searchTerm.trim() !== '') { + const searchLower = searchTerm.toLowerCase().trim(); + visibleConversations = visibleConversations.filter(convo => { + const titleLower = (convo.title || '').toLowerCase(); + return titleLower.includes(searchLower); + }); + } + } + + visibleConversations.forEach(convo => { sidebarConversationsList.appendChild(createSidebarConversationItem(convo)); }); @@ -38,10 +96,26 @@ export function loadSidebarConversations() { }); } } + + // Reset loading flag + isLoadingSidebarConversations = false; + + // If a reload was requested while we were loading, reload now + if (pendingSidebarReload) { + console.log('Pending reload detected, reloading sidebar conversations...'); + setTimeout(() => loadSidebarConversations(), 100); // Small delay to prevent rapid reloads + } }) .catch(error => { console.error("Error loading sidebar conversations:", error); sidebarConversationsList.innerHTML = `
    Error loading conversations: ${error.error || 'Unknown error'}
    `; + isLoadingSidebarConversations = false; // Reset flag on error too + + // If a reload was requested while we were loading, reload now even after error + if (pendingSidebarReload) { + console.log('Pending reload detected after error, retrying...'); + setTimeout(() => loadSidebarConversations(), 500); // Longer delay after error + } }); } @@ -50,16 +124,36 @@ function createSidebarConversationItem(convo) { const convoItem = document.createElement("div"); convoItem.classList.add("sidebar-conversation-item"); convoItem.setAttribute("data-conversation-id", convo.id); + if (convo.chat_type) { + convoItem.setAttribute("data-chat-type", convo.chat_type); + } + let groupName = null; + if (Array.isArray(convo.context)) { + const primaryGroupContext = convo.context.find(ctx => ctx.type === "primary" && ctx.scope === "group"); + if (primaryGroupContext) { + groupName = primaryGroupContext.name || null; + } + } + if (groupName) { + convoItem.setAttribute("data-group-name", groupName); + } + + const isPinned = convo.is_pinned || false; + const isHidden = convo.is_hidden || false; + const pinIcon = isPinned ? '' : ''; + const hiddenIcon = isHidden ? '' : ''; convoItem.innerHTML = `
    - +
    `; + + const headerRow = convoItem.querySelector(".d-flex.justify-content-between.align-items-center"); + const dropdownElement = headerRow ? headerRow.querySelector('.conversation-dropdown') : null; + const originalTitleElement = headerRow ? headerRow.querySelector('.sidebar-conversation-title') : null; + + if (headerRow && dropdownElement && originalTitleElement) { + // Verify the dropdown is actually a child of headerRow before attempting manipulation + if (!headerRow.contains(dropdownElement)) { + console.error('Dropdown element is not a child of headerRow', { headerRow, dropdownElement }); + return convoItem; + } + + const titleWrapper = document.createElement('div'); + titleWrapper.classList.add('sidebar-conversation-header', 'd-flex', 'align-items-center', 'flex-grow-1', 'overflow-hidden', 'gap-2'); + + // Remove the original title from headerRow + originalTitleElement.remove(); + + // Add styling to title + originalTitleElement.classList.add('flex-grow-1', 'text-truncate'); + originalTitleElement.style.minWidth = '0'; + + // Add title to wrapper + titleWrapper.appendChild(originalTitleElement); + + const isGroupConversation = (convo.chat_type && convo.chat_type.startsWith('group')) || groupName; + if (isGroupConversation) { + const badge = document.createElement('span'); + badge.classList.add('badge', 'bg-info', 'sidebar-conversation-group-badge'); + badge.textContent = 'group'; + badge.title = groupName ? `Group conversation: ${groupName}` : 'Group conversation'; + titleWrapper.appendChild(badge); + } + + // Verify dropdown is still a valid child right before insertion + try { + if (headerRow.contains(dropdownElement) && dropdownElement.parentNode === headerRow) { + // Insert the wrapper before the dropdown + headerRow.insertBefore(titleWrapper, dropdownElement); + } else { + // Fallback: just append to headerRow if dropdown reference is invalid + console.warn('Dropdown element became invalid, appending wrapper instead', { convo: convo.id }); + headerRow.appendChild(titleWrapper); + } + } catch (err) { + // Final fallback: append wrapper if insertBefore fails + console.error('Error inserting titleWrapper, using appendChild fallback:', err, { convo: convo.id }); + try { + headerRow.appendChild(titleWrapper); + } catch (appendErr) { + console.error('Critical error: Could not append titleWrapper:', appendErr, { convo: convo.id }); + } + } + } // Add double-click editing to title const titleElement = convoItem.querySelector('.sidebar-conversation-title'); @@ -111,19 +259,128 @@ function createSidebarConversationItem(convo) { return; } - // Normal mode: select the conversation - setActiveConversation(convo.id); - // Call selectConversation from chat-conversations.js through global reference - if (window.chatConversations && window.chatConversations.selectConversation) { - window.chatConversations.selectConversation(convo.id); + // If this conversation is hidden, ensure the main conversation list also shows hidden conversations + if (convo.is_hidden && window.chatConversations && window.chatConversations.setShowHiddenConversations) { + window.chatConversations.setShowHiddenConversations(true); + + // Wait a moment for the DOM to update before selecting + setTimeout(() => { + setActiveConversation(convo.id); + if (window.chatConversations && window.chatConversations.selectConversation) { + window.chatConversations.selectConversation(convo.id); + } + }, 50); + } else { + // Normal mode: select the conversation immediately + setActiveConversation(convo.id); + // Call selectConversation from chat-conversations.js through global reference + if (window.chatConversations && window.chatConversations.selectConversation) { + window.chatConversations.selectConversation(convo.id); + } } }); // Add dropdown menu event handlers + const detailsBtn = convoItem.querySelector('.details-btn'); + const pinBtn = convoItem.querySelector('.pin-btn'); + const hideBtn = convoItem.querySelector('.hide-btn'); const selectBtn = convoItem.querySelector('.select-btn'); const editBtn = convoItem.querySelector('.edit-btn'); const deleteBtn = convoItem.querySelector('.delete-btn'); + if (detailsBtn) { + detailsBtn.addEventListener('click', (e) => { + e.preventDefault(); + e.stopPropagation(); + // Close dropdown after action + const dropdownBtn = convoItem.querySelector('[data-bs-toggle="dropdown"]'); + if (dropdownBtn) { + const dropdownInstance = bootstrap.Dropdown.getInstance(dropdownBtn); + if (dropdownInstance) { + dropdownInstance.hide(); + } + } + // Show conversation details + if (window.showConversationDetails) { + window.showConversationDetails(convo.id); + } + }); + } + + if (pinBtn) { + pinBtn.addEventListener('click', async (e) => { + e.preventDefault(); + e.stopPropagation(); + // Close dropdown after action + const dropdownBtn = convoItem.querySelector('[data-bs-toggle="dropdown"]'); + if (dropdownBtn) { + const dropdownInstance = bootstrap.Dropdown.getInstance(dropdownBtn); + if (dropdownInstance) { + dropdownInstance.hide(); + } + } + // Toggle pin status + try { + const response = await fetch(`/api/conversations/${convo.id}/pin`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' } + }); + if (response.ok) { + const data = await response.json(); + loadSidebarConversations(); + if (window.chatConversations && window.chatConversations.loadConversations) { + window.chatConversations.loadConversations(); + } + if (window.showToast) { + showToast(data.is_pinned ? "Conversation pinned." : "Conversation unpinned.", "success"); + } + } + } catch (error) { + console.error("Error toggling pin:", error); + if (window.showToast) { + showToast("Error toggling pin status.", "danger"); + } + } + }); + } + + if (hideBtn) { + hideBtn.addEventListener('click', async (e) => { + e.preventDefault(); + e.stopPropagation(); + // Close dropdown after action + const dropdownBtn = convoItem.querySelector('[data-bs-toggle="dropdown"]'); + if (dropdownBtn) { + const dropdownInstance = bootstrap.Dropdown.getInstance(dropdownBtn); + if (dropdownInstance) { + dropdownInstance.hide(); + } + } + // Toggle hide status + try { + const response = await fetch(`/api/conversations/${convo.id}/hide`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' } + }); + if (response.ok) { + const data = await response.json(); + loadSidebarConversations(); + if (window.chatConversations && window.chatConversations.loadConversations) { + window.chatConversations.loadConversations(); + } + if (window.showToast) { + showToast(data.is_hidden ? "Conversation hidden." : "Conversation unhidden.", "success"); + } + } + } catch (error) { + console.error("Error toggling hide:", error); + if (window.showToast) { + showToast("Error toggling hide status.", "danger"); + } + } + }); + } + if (selectBtn) { selectBtn.addEventListener('click', (e) => { e.preventDefault(); @@ -248,6 +505,10 @@ export function setSidebarSelectionMode(isActive) { const conversationsToggle = document.getElementById('conversations-toggle'); const conversationsActions = document.getElementById('conversations-actions'); const sidebarDeleteBtn = document.getElementById('sidebar-delete-selected-btn'); + const sidebarPinBtn = document.getElementById('sidebar-pin-selected-btn'); + const sidebarHideBtn = document.getElementById('sidebar-hide-selected-btn'); + const sidebarSettingsBtn = document.getElementById('sidebar-conversations-settings-btn'); + const sidebarSearchBtn = document.getElementById('sidebar-search-btn'); sidebarItems.forEach(item => { if (isActive) { @@ -264,6 +525,13 @@ export function setSidebarSelectionMode(isActive) { conversationsToggle.style.fontWeight = '600'; conversationsActions.style.display = 'flex !important'; conversationsActions.style.setProperty('display', 'flex', 'important'); + // Hide the search and eye buttons in selection mode + if (sidebarSettingsBtn) { + sidebarSettingsBtn.style.display = 'none'; + } + if (sidebarSearchBtn) { + sidebarSearchBtn.style.display = 'none'; + } // Add a selection indicator button let indicator = conversationsToggle.querySelector('.selection-indicator'); if (!indicator) { @@ -301,6 +569,19 @@ export function setSidebarSelectionMode(isActive) { if (sidebarDeleteBtn) { sidebarDeleteBtn.style.display = 'none'; } + if (sidebarPinBtn) { + sidebarPinBtn.style.display = 'none'; + } + if (sidebarHideBtn) { + sidebarHideBtn.style.display = 'none'; + } + // Show the search and eye buttons again when exiting selection mode + if (sidebarSettingsBtn) { + sidebarSettingsBtn.style.display = 'inline-block'; + } + if (sidebarSearchBtn) { + sidebarSearchBtn.style.display = 'inline-block'; + } // Remove selection indicator const indicator = conversationsToggle.querySelector('.selection-indicator'); if (indicator) { @@ -310,16 +591,35 @@ export function setSidebarSelectionMode(isActive) { } } -// Update sidebar delete button visibility based on selection count +// Update sidebar action buttons visibility based on selection count export function updateSidebarDeleteButton(selectedCount) { const sidebarDeleteBtn = document.getElementById('sidebar-delete-selected-btn'); - if (sidebarDeleteBtn) { - if (selectedCount > 0) { + const sidebarPinBtn = document.getElementById('sidebar-pin-selected-btn'); + const sidebarHideBtn = document.getElementById('sidebar-hide-selected-btn'); + + if (selectedCount > 0) { + if (sidebarDeleteBtn) { sidebarDeleteBtn.style.display = 'inline-flex'; sidebarDeleteBtn.title = `Delete ${selectedCount} selected conversation${selectedCount > 1 ? 's' : ''}`; - } else { + } + if (sidebarPinBtn) { + sidebarPinBtn.style.display = 'inline-flex'; + sidebarPinBtn.title = `Pin ${selectedCount} selected conversation${selectedCount > 1 ? 's' : ''}`; + } + if (sidebarHideBtn) { + sidebarHideBtn.style.display = 'inline-flex'; + sidebarHideBtn.title = `Hide ${selectedCount} selected conversation${selectedCount > 1 ? 's' : ''}`; + } + } else { + if (sidebarDeleteBtn) { sidebarDeleteBtn.style.display = 'none'; } + if (sidebarPinBtn) { + sidebarPinBtn.style.display = 'none'; + } + if (sidebarHideBtn) { + sidebarHideBtn.style.display = 'none'; + } } } @@ -482,6 +782,32 @@ document.addEventListener('DOMContentLoaded', () => { }); } + // Handle sidebar pin selected button click + const sidebarPinBtn = document.getElementById('sidebar-pin-selected-btn'); + if (sidebarPinBtn) { + sidebarPinBtn.addEventListener('click', (e) => { + e.preventDefault(); + e.stopPropagation(); + // Trigger the main pin selected functionality + if (window.chatConversations && window.chatConversations.bulkPinConversations) { + window.chatConversations.bulkPinConversations(); + } + }); + } + + // Handle sidebar hide selected button click + const sidebarHideBtn = document.getElementById('sidebar-hide-selected-btn'); + if (sidebarHideBtn) { + sidebarHideBtn.addEventListener('click', (e) => { + e.preventDefault(); + e.stopPropagation(); + // Trigger the main hide selected functionality + if (window.chatConversations && window.chatConversations.bulkHideConversations) { + window.chatConversations.bulkHideConversations(); + } + }); + } + // Handle sidebar delete selected button click const sidebarDeleteBtn = document.getElementById('sidebar-delete-selected-btn'); if (sidebarDeleteBtn) { @@ -494,6 +820,39 @@ document.addEventListener('DOMContentLoaded', () => { } }); } + + // Handle sidebar settings button click (toggle show/hide hidden conversations) + const sidebarSettingsBtn = document.getElementById('sidebar-conversations-settings-btn'); + if (sidebarSettingsBtn) { + sidebarSettingsBtn.addEventListener('click', (e) => { + e.preventDefault(); + e.stopPropagation(); + + // Toggle show hidden conversations + sidebarShowHiddenConversations = !sidebarShowHiddenConversations; + + // Update button appearance based on state + const icon = sidebarSettingsBtn.querySelector('i'); + if (icon) { + if (sidebarShowHiddenConversations) { + icon.classList.remove('bi-eye'); + icon.classList.add('bi-eye-fill'); + sidebarSettingsBtn.classList.remove('text-muted'); + sidebarSettingsBtn.classList.add('text-primary'); + sidebarSettingsBtn.title = 'Showing hidden conversations (click to hide)'; + } else { + icon.classList.remove('bi-eye-fill'); + icon.classList.add('bi-eye'); + sidebarSettingsBtn.classList.remove('text-primary'); + sidebarSettingsBtn.classList.add('text-muted'); + sidebarSettingsBtn.title = 'Show/Hide hidden conversations'; + } + } + + // Reload conversations to apply filter + loadSidebarConversations(); + }); + } } }); diff --git a/application/single_app/static/js/chat/chat-speech-input.js b/application/single_app/static/js/chat/chat-speech-input.js new file mode 100644 index 00000000..e9baab23 --- /dev/null +++ b/application/single_app/static/js/chat/chat-speech-input.js @@ -0,0 +1,980 @@ +// chat-speech-input.js +/** + * Speech-to-text chat input module + * Handles voice recording with visual waveform feedback and transcription + */ + +import { showToast } from './chat-toast.js'; +import { sendMessage } from './chat-messages.js'; +import { saveUserSetting } from './chat-layout.js'; + +let mediaRecorder = null; +let audioChunks = []; +let recordingStartTime = null; +let countdownInterval = null; +let autoSendTimeout = null; +let autoSendCountdown = null; +let audioContext = null; +let analyser = null; +let animationFrame = null; +let stream = null; +let waveformData = []; // Store waveform amplitudes over time +let isCanceling = false; // Flag to track if recording is being canceled +let microphonePermissionState = 'prompt'; // 'granted', 'denied', or 'prompt' +let userMicrophonePreference = 'ask-every-session'; // User's permission preference +let sessionPermissionRequested = false; // Track if permission was requested this session + +const MAX_RECORDING_DURATION = 90; // seconds +let remainingTime = MAX_RECORDING_DURATION; + +/** + * Check if browser supports required APIs + */ +function checkBrowserSupport() { + if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) { + return { supported: false, message: 'Your browser does not support audio recording' }; + } + + if (!window.MediaRecorder) { + return { supported: false, message: 'Your browser does not support MediaRecorder API' }; + } + + if (!window.AudioContext && !window.webkitAudioContext) { + return { supported: false, message: 'Your browser does not support Web Audio API' }; + } + + return { supported: true }; +} + +/** + * Initialize speech input functionality + */ +export function initializeSpeechInput() { + console.log('Initializing speech input...'); + + const speechBtn = document.getElementById('speech-input-btn'); + + if (!speechBtn) { + console.warn('Speech input button not found in DOM'); + return; // Speech input not enabled + } + + console.log('Speech input button found:', speechBtn); + + // Check browser support + const support = checkBrowserSupport(); + if (!support.supported) { + speechBtn.style.display = 'none'; + console.warn('Speech input disabled:', support.message); + return; + } + + console.log('Browser supports speech input'); + + // Load user microphone preferences + loadMicrophonePreference().then(() => { + // Check permission state and update icon + checkMicrophonePermissionState(); + }); + + // Attach event listener + speechBtn.addEventListener('click', handleSpeechButtonClick); + + // Attach recording control listeners + const cancelBtn = document.getElementById('cancel-recording-btn'); + const sendBtn = document.getElementById('send-recording-btn'); + + if (cancelBtn) { + cancelBtn.addEventListener('click', cancelRecording); + console.log('Cancel button listener attached'); + } + + if (sendBtn) { + sendBtn.addEventListener('click', stopAndSendRecording); + console.log('Send button listener attached'); + } + + console.log('Speech input initialization complete'); +} + +/** + * Handle speech button click - check permission state first + */ +async function handleSpeechButtonClick() { + console.log('Speech button clicked!'); + + // If permission is denied, navigate to profile settings + if (microphonePermissionState === 'denied') { + console.log('Microphone permission denied, redirecting to profile settings'); + window.location.href = '/profile#speech-settings'; + return; + } + + // Check if we should request permission based on user preference + if (shouldRequestPermission()) { + await checkMicrophonePermissionState(); + } + + // Start recording + startRecording(); +} + +/** + * Check if we should request permission based on user preference + */ +function shouldRequestPermission() { + switch (userMicrophonePreference) { + case 'remember': + // Only request once ever + return microphonePermissionState === 'prompt'; + case 'ask-every-session': + // Request once per browser session + return !sessionPermissionRequested; + case 'ask-every-page-load': + // Request on every page load + return true; + default: + return !sessionPermissionRequested; + } +} + +/** + * Load user's microphone permission preference from settings + */ +async function loadMicrophonePreference() { + try { + const response = await fetch('/api/user/settings'); + const data = await response.json(); + const settings = data.settings || {}; + + // Microphone permission preference removed - browser controls permission state + console.log('Loaded microphone preference:', userMicrophonePreference); + + return userMicrophonePreference; + } catch (error) { + console.error('Error loading microphone preference:', error); + userMicrophonePreference = 'ask-every-session'; + return userMicrophonePreference; + } +} + +/** + * Check microphone permission state and update UI + */ +async function checkMicrophonePermissionState() { + try { + // Try to get media to check permission state + const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); + + // Permission granted + stream.getTracks().forEach(track => track.stop()); + microphonePermissionState = 'granted'; + sessionPermissionRequested = true; + updateMicrophoneIconState('granted'); + + // Save state if preference is 'remember' + if (userMicrophonePreference === 'remember') { + await savePermissionState('granted'); + } + + } catch (error) { + if (error.name === 'NotAllowedError' || error.name === 'PermissionDeniedError') { + microphonePermissionState = 'denied'; + sessionPermissionRequested = true; + updateMicrophoneIconState('denied'); + + // Save state if preference is 'remember' + if (userMicrophonePreference === 'remember') { + await savePermissionState('denied'); + } + } else { + console.error('Error checking microphone permission:', error); + microphonePermissionState = 'prompt'; + updateMicrophoneIconState('prompt'); + } + } +} + +/** + * Update microphone icon state with color and tooltip + */ +function updateMicrophoneIconState(state) { + const speechBtn = document.getElementById('speech-input-btn'); + if (!speechBtn) return; + + const icon = speechBtn.querySelector('i'); + if (!icon) return; + + // Remove existing state classes + icon.classList.remove('text-success', 'text-danger', 'text-secondary'); + + switch(state) { + case 'granted': + icon.classList.add('text-success'); + speechBtn.title = 'Voice Input (Microphone access granted)'; + break; + case 'denied': + icon.classList.add('text-danger'); + speechBtn.title = 'Microphone access denied - Click to manage permissions'; + break; + case 'prompt': + default: + icon.classList.add('text-secondary'); + speechBtn.title = 'Voice Input (Click to enable microphone)'; + break; + } + + console.log('Updated microphone icon state:', state); +} + +/** + * Save permission state to user settings + */ +async function savePermissionState(state) { + try { + await saveUserSetting({ + microphonePermissionState: state + }); + console.log('Saved microphone permission state:', state); + } catch (error) { + console.error('Error saving microphone permission state:', error); + } +} + +/** + * Start recording audio + */ +async function startRecording() { + try { + // Request microphone permission + stream = await navigator.mediaDevices.getUserMedia({ + audio: { + sampleRate: 16000, // Azure Speech SDK works well with 16kHz + channelCount: 1, // Mono + echoCancellation: true, + noiseSuppression: true + } + }); + + // Set up MediaRecorder - try WAV first, fallback to WebM + let options = {}; + let fileExtension = 'webm'; + + // Try WAV format first (best for Azure Speech SDK, no conversion needed) + if (MediaRecorder.isTypeSupported('audio/wav')) { + options.mimeType = 'audio/wav'; + fileExtension = 'wav'; + } + // Try WebM with Opus codec + else if (MediaRecorder.isTypeSupported('audio/webm;codecs=opus')) { + options.mimeType = 'audio/webm;codecs=opus'; + fileExtension = 'webm'; + } + // Fallback to default WebM + else if (MediaRecorder.isTypeSupported('audio/webm')) { + options.mimeType = 'audio/webm'; + fileExtension = 'webm'; + } + + console.log('Using audio format:', options.mimeType || 'default'); + + console.log('Using audio format:', options.mimeType || 'default'); + + mediaRecorder = new MediaRecorder(stream, options); + + // Store the file extension for later use + mediaRecorder.fileExtension = fileExtension; + audioChunks = []; + isCanceling = false; // Reset cancel flag when starting new recording + + mediaRecorder.addEventListener('dataavailable', (event) => { + if (event.data.size > 0) { + console.log('[Recording] Audio chunk received, size:', event.data.size); + audioChunks.push(event.data); + } + }); + + mediaRecorder.addEventListener('stop', handleRecordingStop); + + // Start recording - request data every second for better chunk collection + mediaRecorder.start(1000); // Timeslice: 1000ms + recordingStartTime = Date.now(); + remainingTime = MAX_RECORDING_DURATION; + + console.log('[Recording] Started with 1-second timeslice for better chunk collection'); + + // Reset waveform data + waveformData = []; + + // Show recording UI + showRecordingUI(); + + // Start waveform visualization + startWaveformVisualization(stream); + + // Start countdown timer + startCountdown(); + + // Update permission state to granted + microphonePermissionState = 'granted'; + sessionPermissionRequested = true; + updateMicrophoneIconState('granted'); + + // Save state if preference is 'remember' + if (userMicrophonePreference === 'remember') { + await savePermissionState('granted'); + } + + } catch (error) { + console.error('Error starting recording:', error); + + if (error.name === 'NotAllowedError' || error.name === 'PermissionDeniedError') { + microphonePermissionState = 'denied'; + sessionPermissionRequested = true; + updateMicrophoneIconState('denied'); + + // Save state if preference is 'remember' + if (userMicrophonePreference === 'remember') { + await savePermissionState('denied'); + } + + showToast('Microphone permission denied. Click the microphone icon to manage permissions.', 'warning'); + } else { + showToast('Error starting recording: ' + error.message, 'danger'); + } + } +} + +/** + * Stop recording and send for transcription + */ +function stopAndSendRecording() { + if (mediaRecorder && mediaRecorder.state === 'recording') { + const recordingDuration = (Date.now() - recordingStartTime) / 1000; + console.log('[Recording] Stopping recording after', recordingDuration.toFixed(2), 'seconds'); + console.log('[Recording] Total chunks collected so far:', audioChunks.length); + + mediaRecorder.stop(); + + // Stop all tracks + if (stream) { + stream.getTracks().forEach(track => track.stop()); + } + } +} + +/** +/** + * Cancel recording + */ +function cancelRecording() { + // Set cancel flag BEFORE stopping the recorder + isCanceling = true; + + if (mediaRecorder && mediaRecorder.state === 'recording') { + mediaRecorder.stop(); + + // Stop all tracks + if (stream) { + stream.getTracks().forEach(track => track.stop()); + } + } + + // Clear waveform data + waveformData = []; + + // Clear audio chunks + audioChunks = []; + + // Reset UI + hideRecordingUI(); + stopWaveformVisualization(); + stopCountdown(); +} + +/** + * Convert audio blob to WAV format using Web Audio API + * @param {Blob} audioBlob - The audio blob to convert + * @returns {Promise} WAV formatted audio blob + */ +async function convertToWav(audioBlob) { + console.log('Converting audio to WAV format...'); + + // Create audio context + const audioContext = new (window.AudioContext || window.webkitAudioContext)({ + sampleRate: 16000 // 16kHz for Azure Speech SDK + }); + + // Convert blob to array buffer + const arrayBuffer = await audioBlob.arrayBuffer(); + + // Decode audio data + const audioBuffer = await audioContext.decodeAudioData(arrayBuffer); + + console.log('Audio decoded:', { + sampleRate: audioBuffer.sampleRate, + duration: audioBuffer.duration, + channels: audioBuffer.numberOfChannels + }); + + // Get audio data (convert to mono if needed) + let audioData; + if (audioBuffer.numberOfChannels > 1) { + // Mix down to mono + const left = audioBuffer.getChannelData(0); + const right = audioBuffer.getChannelData(1); + audioData = new Float32Array(left.length); + for (let i = 0; i < left.length; i++) { + audioData[i] = (left[i] + right[i]) / 2; + } + } else { + audioData = audioBuffer.getChannelData(0); + } + + // Convert float32 to int16 (WAV PCM format) + const int16Data = new Int16Array(audioData.length); + for (let i = 0; i < audioData.length; i++) { + const s = Math.max(-1, Math.min(1, audioData[i])); + int16Data[i] = s < 0 ? s * 0x8000 : s * 0x7FFF; + } + + // Create WAV file + const wavBlob = createWavBlob(int16Data, audioBuffer.sampleRate); + + console.log('WAV conversion complete:', { + originalSize: audioBlob.size, + wavSize: wavBlob.size, + sampleRate: audioBuffer.sampleRate + }); + + // Close audio context + await audioContext.close(); + + return wavBlob; +} + +/** + * Create a WAV blob from PCM data + * @param {Int16Array} samples - PCM audio samples + * @param {number} sampleRate - Sample rate in Hz + * @returns {Blob} WAV formatted blob + */ +function createWavBlob(samples, sampleRate) { + const buffer = new ArrayBuffer(44 + samples.length * 2); + const view = new DataView(buffer); + + // Write WAV header + const writeString = (offset, string) => { + for (let i = 0; i < string.length; i++) { + view.setUint8(offset + i, string.charCodeAt(i)); + } + }; + + writeString(0, 'RIFF'); + view.setUint32(4, 36 + samples.length * 2, true); + writeString(8, 'WAVE'); + writeString(12, 'fmt '); + view.setUint32(16, 16, true); // fmt chunk size + view.setUint16(20, 1, true); // PCM format + view.setUint16(22, 1, true); // Mono channel + view.setUint32(24, sampleRate, true); + view.setUint32(28, sampleRate * 2, true); // byte rate + view.setUint16(32, 2, true); // block align + view.setUint16(34, 16, true); // bits per sample + writeString(36, 'data'); + view.setUint32(40, samples.length * 2, true); + + // Write PCM data + const offset = 44; + for (let i = 0; i < samples.length; i++) { + view.setInt16(offset + i * 2, samples[i], true); + } + + return new Blob([buffer], { type: 'audio/wav' }); +} + +/** + * Handle recording stop event + */ +async function handleRecordingStop() { + if (isCanceling) { + console.log('Recording canceled by user'); + hideRecordingUI(); + isCanceling = false; // Reset flag + return; + } + + // Check if recording was canceled + stopWaveformVisualization(); + stopCountdown(); + + // Check if recording was canceled (no chunks) + if (audioChunks.length === 0) { + hideRecordingUI(); + return; + } + + // Get the MIME type from the MediaRecorder + const mimeType = mediaRecorder && mediaRecorder.mimeType ? mediaRecorder.mimeType : 'audio/webm'; + + // Create blob from chunks with correct MIME type + const originalBlob = new Blob(audioChunks, { type: mimeType }); + + console.log('Original audio blob created:', { type: mimeType, size: originalBlob.size }); + + // Show processing state + const sendBtn = document.getElementById('send-recording-btn'); + const cancelBtn = document.getElementById('cancel-recording-btn'); + + if (sendBtn) { + sendBtn.disabled = true; + sendBtn.innerHTML = ''; + } + + if (cancelBtn) { + cancelBtn.disabled = true; + } + + try { + // Convert to WAV format for Azure Speech SDK compatibility + const wavBlob = await convertToWav(originalBlob); + + console.log('[Recording] WAV conversion complete, sending to backend'); + + // Update button text - keep same spinner + if (sendBtn) { + sendBtn.innerHTML = ''; + } + + // Send to backend for transcription + const formData = new FormData(); + formData.append('audio', wavBlob, 'recording.wav'); + + console.log('[Recording] Sending WAV audio to backend, size:', wavBlob.size); + + const response = await fetch('/api/speech/transcribe-chat', { + method: 'POST', + body: formData + }); + + const result = await response.json(); + + if (result.success && result.text) { + // Append transcribed text to existing input + const userInput = document.getElementById('user-input'); + if (userInput) { + console.log('[Speech Input] Transcription successful:', result.text); + + // Check if there's existing text + const existingText = userInput.value.trim(); + + if (existingText) { + // Append with newline separator + userInput.value = existingText + '\n' + result.text; + } else { + // No existing text, just set the transcription + userInput.value = result.text; + } + + console.log('[Speech Input] User input updated, value length:', userInput.value.length); + + // Adjust textarea height + userInput.style.height = ''; + userInput.style.height = Math.min(userInput.scrollHeight, 200) + 'px'; + + // Trigger input change to show send button + if (window.handleInputChange) { + window.handleInputChange(); + } + } + + showToast('Voice message transcribed successfully', 'success'); + + console.log('[Speech Input] Starting auto-send countdown...'); + // Start auto-send countdown + startAutoSendCountdown(); + } else { + showToast(result.error || 'Failed to transcribe audio', 'danger'); + } + + } catch (error) { + console.error('Error transcribing audio:', error); + showToast('Error transcribing audio: ' + error.message, 'danger'); + } finally { + // Reset UI + hideRecordingUI(); + + if (sendBtn) { + sendBtn.disabled = false; + sendBtn.innerHTML = ''; + } + + if (cancelBtn) { + cancelBtn.disabled = false; + } + } +} + +/** + * Show recording UI and hide normal input + */ +function showRecordingUI() { + const normalContainer = document.getElementById('normal-input-container'); + const recordingContainer = document.getElementById('recording-container'); + + if (normalContainer) { + normalContainer.style.display = 'none'; + } + + if (recordingContainer) { + recordingContainer.style.display = 'block'; + } +} + +/** + * Hide recording UI and show normal input + */ +function hideRecordingUI() { + const normalContainer = document.getElementById('normal-input-container'); + const recordingContainer = document.getElementById('recording-container'); + + if (normalContainer) { + normalContainer.style.display = 'block'; + } + + if (recordingContainer) { + recordingContainer.style.display = 'none'; + } +} + +/** + * Start waveform visualization + */ +function startWaveformVisualization(audioStream) { + const canvas = document.getElementById('waveform-canvas'); + if (!canvas) return; + + const canvasCtx = canvas.getContext('2d'); + + // Set canvas size - height is now 36px to match buttons + canvas.width = canvas.offsetWidth; + canvas.height = 36; + + // Create audio context and analyser + const AudioContext = window.AudioContext || window.webkitAudioContext; + audioContext = new AudioContext(); + analyser = audioContext.createAnalyser(); + analyser.fftSize = 256; + + const source = audioContext.createMediaStreamSource(audioStream); + source.connect(analyser); + + const bufferLength = analyser.frequencyBinCount; + const dataArray = new Uint8Array(bufferLength); + + // Draw function + function draw() { + animationFrame = requestAnimationFrame(draw); + + analyser.getByteFrequencyData(dataArray); + + // Calculate average amplitude for this frame + let sum = 0; + for (let i = 0; i < bufferLength; i++) { + sum += dataArray[i]; + } + const avgAmplitude = sum / bufferLength / 255; // Normalize to 0-1 + + // Store amplitude for this frame (keep as 0-1, we'll handle centering in drawing) + waveformData.push(avgAmplitude); + + // Calculate progress (how much of the recording time has elapsed) + const elapsed = Date.now() - recordingStartTime; + const elapsedSeconds = elapsed / 1000; + + // Check if we've hit the time limit FIRST (before clamping progress) + if (elapsedSeconds >= MAX_RECORDING_DURATION) { + console.log('[Recording] Time limit reached at', elapsedSeconds.toFixed(2), 'seconds, auto-stopping...'); + stopAndSendRecording(); + return; // Stop the animation loop + } + + const progress = Math.min(elapsed / (MAX_RECORDING_DURATION * 1000), 1); + const progressWidth = canvas.width * progress; + + // Check if dark mode is active + const isDarkMode = document.documentElement.getAttribute('data-bs-theme') === 'dark'; + + // Clear canvas with appropriate background color + canvasCtx.fillStyle = isDarkMode ? '#343a40' : '#f8f9fa'; + canvasCtx.fillRect(0, 0, canvas.width, canvas.height); + + // Draw unfilled area (dashed line at center) + canvasCtx.setLineDash([5, 5]); + canvasCtx.strokeStyle = isDarkMode ? '#495057' : '#dee2e6'; + canvasCtx.lineWidth = 1; + canvasCtx.beginPath(); + canvasCtx.moveTo(progressWidth, canvas.height / 2); + canvasCtx.lineTo(canvas.width, canvas.height / 2); + canvasCtx.stroke(); + canvasCtx.setLineDash([]); + + // Draw recorded waveform (filled area) - vertical bars + if (waveformData.length > 1) { + const centerY = canvas.height / 2; + const maxBarHeight = canvas.height * 1.95; // Bars can extend 48% of canvas height in each direction (96% total) + const barSpacing = 3; // Pixels between bars + const pointsToShow = Math.floor(progressWidth / barSpacing); + const step = waveformData.length / pointsToShow; + + // Determine waveform color based on progress + let waveformColor = '#0d6efd'; // Default blue + if (progress >= 0.95) { + waveformColor = '#dc3545'; // Red + } else if (progress >= 0.85) { + waveformColor = '#ffc107'; // Yellow + } + + canvasCtx.lineWidth = 2; + canvasCtx.strokeStyle = waveformColor; + + for (let i = 0; i < pointsToShow && i < waveformData.length; i++) { + const dataIndex = Math.floor(i * step); + const amplitude = waveformData[dataIndex]; + const x = i * barSpacing; + + // Draw vertical bar from center, extending both up and down + const barHeight = amplitude * maxBarHeight; + + canvasCtx.beginPath(); + canvasCtx.moveTo(x, centerY - barHeight); + canvasCtx.lineTo(x, centerY + barHeight); + canvasCtx.stroke(); + } + } + } + + draw(); +} + +/** + * Stop waveform visualization + */ +function stopWaveformVisualization() { + if (animationFrame) { + cancelAnimationFrame(animationFrame); + animationFrame = null; + } + + if (audioContext) { + audioContext.close(); + audioContext = null; + } + + analyser = null; +} + +/** + * Start countdown timer (progress bar) + */ +function startCountdown() { + const timerBar = document.getElementById('recording-timer-bar'); + if (!timerBar) return; + + const startTime = Date.now(); + const duration = MAX_RECORDING_DURATION * 1000; // Convert to milliseconds + + const updateProgress = () => { + const elapsed = Date.now() - startTime; + const remaining = duration - elapsed; + + if (remaining <= 0) { + // Time's up - auto stop recording + remainingTime = 0; + stopAndSendRecording(); + } else { + // Calculate percentage remaining based on actual elapsed time + const percentRemaining = (remaining / duration) * 100; + remainingTime = Math.ceil(remaining / 1000); + + // Update bar width using CSS variable + document.documentElement.style.setProperty('--recording-timer-width', percentRemaining + '%'); + + // Change color classes when time is running out + timerBar.classList.remove('warning', 'danger'); + if (percentRemaining <= 10) { + timerBar.classList.add('danger'); + } else if (percentRemaining <= 30) { + timerBar.classList.add('warning'); + } + + // Continue animation + countdownInterval = requestAnimationFrame(updateProgress); + } + }; + + // Start the animation loop + countdownInterval = requestAnimationFrame(updateProgress); +} + +/** + * Stop countdown timer + */ +function stopCountdown() { + if (countdownInterval) { + cancelAnimationFrame(countdownInterval); + countdownInterval = null; + } + + const timerBar = document.getElementById('recording-timer-bar'); + if (timerBar) { + document.documentElement.style.setProperty('--recording-timer-width', '100%'); + timerBar.classList.remove('warning', 'danger'); + } + + remainingTime = MAX_RECORDING_DURATION; +} + +/** + * Start auto-send countdown after transcription + */ +function startAutoSendCountdown() { + console.log('[Auto-Send] Starting countdown...'); + + const totalCountdown = 5; // seconds + let countdown = totalCountdown; + const sendBtn = document.getElementById('send-btn'); + + if (!sendBtn) { + console.error('[Auto-Send] Send button not found!'); + return; + } + + console.log('[Auto-Send] Send button found, current conversation ID:', window.currentConversationId || 'NEW'); + + // Store original button state + const originalHTML = sendBtn.innerHTML; + const originalDisabled = sendBtn.disabled; + + // Add a progress background element + const progressBg = document.createElement('div'); + progressBg.style.cssText = ` + position: absolute; + top: 0; + left: 0; + height: 100%; + width: 0%; + background: linear-gradient(90deg, #0d6efd, #0dcaf0); + border-radius: 0.375rem; + transition: width 0.1s linear; + z-index: -1; + `; + sendBtn.style.position = 'relative'; + sendBtn.style.overflow = 'hidden'; + sendBtn.appendChild(progressBg); + + // Update button appearance for countdown mode + sendBtn.style.color = 'white'; + sendBtn.classList.add('btn-primary'); + sendBtn.classList.remove('btn-warning'); + + // Click handler to cancel auto-send + const cancelAutoSend = (event) => { + // Prevent default action and stop event propagation + event.preventDefault(); + event.stopPropagation(); + event.stopImmediatePropagation(); + + console.log('[Auto-Send] Cancelled by user'); + clearAutoSend(); + + // Remove progress background + if (progressBg.parentNode) { + progressBg.remove(); + } + + sendBtn.innerHTML = originalHTML; + sendBtn.disabled = originalDisabled; + sendBtn.style.color = ''; + sendBtn.classList.remove('btn-warning'); + sendBtn.classList.add('btn-primary'); + sendBtn.removeEventListener('click', cancelAutoSend, true); + showToast('Auto-send cancelled. Click Send when ready.', 'info'); + }; + + // Add event listener with capture phase to intercept before other handlers + sendBtn.addEventListener('click', cancelAutoSend, true); + + // Animation frame for smooth progress + const startTime = Date.now(); + const duration = totalCountdown * 1000; // milliseconds + + const updateProgress = () => { + const elapsed = Date.now() - startTime; + const progress = Math.min(elapsed / duration, 1); + const percentage = progress * 100; + + // Update progress background width + progressBg.style.width = percentage + '%'; + + if (progress < 1) { + autoSendCountdown = requestAnimationFrame(updateProgress); + } else { + // Countdown complete - send immediately + console.log('[Auto-Send] ===== COUNTDOWN COMPLETE ====='); + console.log('[Auto-Send] Current conversation ID:', window.currentConversationId || 'NEW'); + console.log('[Auto-Send] User input value:', document.getElementById('user-input')?.value); + console.log('[Auto-Send] Chatbox children count:', document.getElementById('chatbox')?.children.length); + + // Remove progress background + if (progressBg.parentNode) { + progressBg.remove(); + } + + // Restore button to original state + sendBtn.innerHTML = originalHTML; + sendBtn.disabled = originalDisabled; + sendBtn.style.color = ''; + sendBtn.classList.remove('btn-warning', 'auto-sending'); + sendBtn.classList.add('btn-primary'); + + // Remove the cancel listener + sendBtn.removeEventListener('click', cancelAutoSend, true); + + // Clear the auto-send state + autoSendCountdown = null; + autoSendTimeout = null; + + console.log('[Auto-Send] About to trigger click...'); + // Trigger the send by programmatically clicking the button + // This ensures all normal send handlers fire + requestAnimationFrame(() => { + console.log('[Auto-Send] Clicking send button NOW'); + sendBtn.click(); + console.log('[Auto-Send] Click triggered, conversation ID after:', window.currentConversationId || 'NEW'); + }); + } + }; + + // Start the animation + autoSendCountdown = requestAnimationFrame(updateProgress); + + // Also store timeout reference for cleanup + autoSendTimeout = autoSendCountdown; +} + +/** + * Clear auto-send countdown + */ +function clearAutoSend() { + if (autoSendCountdown) { + cancelAnimationFrame(autoSendCountdown); + autoSendCountdown = null; + } + + if (autoSendTimeout) { + clearTimeout(autoSendTimeout); + autoSendTimeout = null; + } +} + diff --git a/application/single_app/static/js/chat/chat-streaming.js b/application/single_app/static/js/chat/chat-streaming.js new file mode 100644 index 00000000..1519890a --- /dev/null +++ b/application/single_app/static/js/chat/chat-streaming.js @@ -0,0 +1,369 @@ +// chat-streaming.js +import { appendMessage, updateUserMessageId } from './chat-messages.js'; +import { hideLoadingIndicatorInChatbox, showLoadingIndicatorInChatbox } from './chat-loading-indicator.js'; +import { loadUserSettings, saveUserSetting } from './chat-layout.js'; +import { showToast } from './chat-toast.js'; +import { updateSidebarConversationTitle } from './chat-sidebar-conversations.js'; + +let streamingEnabled = false; +let currentEventSource = null; + +export function initializeStreamingToggle() { + const streamingToggleBtn = document.getElementById('streaming-toggle-btn'); + if (!streamingToggleBtn) { + console.warn('Streaming toggle button not found'); + return; + } + + console.log('Initializing streaming toggle...'); + + // Load initial state from user settings + loadUserSettings().then(settings => { + console.log('Loaded user settings:', settings); + streamingEnabled = settings.streamingEnabled === true; + console.log('Streaming enabled:', streamingEnabled); + updateStreamingButtonState(); + updateStreamingButtonVisibility(); + }).catch(error => { + console.error('Error loading streaming settings:', error); + }); + + // Handle toggle click + streamingToggleBtn.addEventListener('click', () => { + streamingEnabled = !streamingEnabled; + console.log('Streaming toggled to:', streamingEnabled); + + // Save the setting + console.log('Saving streaming setting...'); + saveUserSetting({ streamingEnabled }); + + updateStreamingButtonState(); + + const message = streamingEnabled + ? 'Streaming enabled - responses will appear in real-time' + : 'Streaming disabled - responses will appear when complete'; + showToast(message, 'info'); + }); + + // Listen for agents toggle - hide streaming button when agents are active + const enableAgentsBtn = document.getElementById('enable-agents-btn'); + if (enableAgentsBtn) { + const observer = new MutationObserver(() => { + updateStreamingButtonVisibility(); + }); + observer.observe(enableAgentsBtn, { attributes: true, attributeFilter: ['class'] }); + } + + updateStreamingButtonVisibility(); +} + +function updateStreamingButtonState() { + const streamingToggleBtn = document.getElementById('streaming-toggle-btn'); + if (!streamingToggleBtn) return; + + // Check if TTS autoplay is enabled + let ttsAutoplayEnabled = false; + if (typeof window.appSettings !== 'undefined' && window.appSettings.enable_text_to_speech) { + const cachedSettings = JSON.parse(localStorage.getItem('userSettings') || '{}'); + ttsAutoplayEnabled = cachedSettings.settings?.ttsAutoplay === true; + } + + if (ttsAutoplayEnabled) { + // Disable streaming button when TTS autoplay is on + streamingToggleBtn.classList.remove('btn-primary'); + streamingToggleBtn.classList.add('btn-outline-secondary', 'disabled'); + streamingToggleBtn.disabled = true; + streamingToggleBtn.title = 'Streaming disabled - TTS autoplay is enabled. Disable TTS autoplay in your profile to enable streaming.'; + } else if (streamingEnabled) { + streamingToggleBtn.classList.remove('btn-outline-secondary', 'disabled'); + streamingToggleBtn.classList.add('btn-primary'); + streamingToggleBtn.disabled = false; + streamingToggleBtn.title = 'Streaming enabled - click to disable'; + } else { + streamingToggleBtn.classList.remove('btn-primary', 'disabled'); + streamingToggleBtn.classList.add('btn-outline-secondary'); + streamingToggleBtn.disabled = false; + streamingToggleBtn.title = 'Streaming disabled - click to enable'; + } +} + +/** + * Update streaming button visibility based on agent state + */ +function updateStreamingButtonVisibility() { + const streamingToggleBtn = document.getElementById('streaming-toggle-btn'); + const enableAgentsBtn = document.getElementById('enable-agents-btn'); + + if (!streamingToggleBtn) return; + + // Show streaming button even when agents are active (agents now support streaming) + streamingToggleBtn.style.display = 'flex'; +} + +export function isStreamingEnabled() { + // Check if TTS autoplay is enabled - streaming is incompatible with TTS autoplay + if (typeof window.appSettings !== 'undefined' && window.appSettings.enable_text_to_speech) { + // Dynamically check TTS settings + loadUserSettings().then(settings => { + if (settings.ttsAutoplay === true) { + console.log('TTS autoplay enabled - streaming disabled'); + } + }).catch(error => { + console.error('Error checking TTS settings:', error); + }); + + // Synchronous check using cached value if available + const cachedSettings = JSON.parse(localStorage.getItem('userSettings') || '{}'); + if (cachedSettings.settings?.ttsAutoplay === true) { + return false; // Disable streaming when TTS autoplay is active + } + } + + // Check if image generation is active - streaming is incompatible with image gen + const imageGenBtn = document.getElementById('image-generate-btn'); + if (imageGenBtn && imageGenBtn.classList.contains('active')) { + return false; // Disable streaming when image generation is active + } + return streamingEnabled; +} + +export function sendMessageWithStreaming(messageData, tempUserMessageId, currentConversationId) { + if (!streamingEnabled) { + return null; // Caller should use regular fetch + } + + // Double-check: never stream if image generation is active + const imageGenBtn = document.getElementById('image-generate-btn'); + if (imageGenBtn && imageGenBtn.classList.contains('active')) { + return null; // Force regular fetch for image generation + } + + // Close any existing connection + if (currentEventSource) { + currentEventSource.close(); + currentEventSource = null; + } + + // Create a unique message ID for the AI response + const tempAiMessageId = `temp_ai_${Date.now()}`; + let accumulatedContent = ''; + let streamError = false; + let streamErrorMessage = ''; + + // Create placeholder message with streaming indicator + appendMessage('AI', ' Streaming...', null, tempAiMessageId); + + // Create timeout (5 minutes) + const streamTimeout = setTimeout(() => { + if (currentEventSource) { + currentEventSource.close(); + currentEventSource = null; + streamError = true; + streamErrorMessage = 'Stream timeout (5 minutes exceeded)'; + handleStreamError(tempAiMessageId, accumulatedContent, streamErrorMessage); + } + }, 5 * 60 * 1000); // 5 minutes + + // Use fetch to POST, then read the streaming response + fetch('/api/chat/stream', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + credentials: 'same-origin', + body: JSON.stringify(messageData) + }).then(response => { + if (!response.ok) { + return response.json().then(errData => { + throw new Error(errData.error || `HTTP error! status: ${response.status}`); + }); + } + + // Read the streaming response + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + + function readStream() { + reader.read().then(({ done, value }) => { + if (done) { + clearTimeout(streamTimeout); + return; + } + + const chunk = decoder.decode(value, { stream: true }); + const lines = chunk.split('\n'); + + for (const line of lines) { + if (line.startsWith('data: ')) { + try { + const jsonStr = line.substring(6); // Remove 'data: ' + const data = JSON.parse(jsonStr); + + if (data.error) { + clearTimeout(streamTimeout); + streamError = true; + streamErrorMessage = data.error; + handleStreamError(tempAiMessageId, data.partial_content || accumulatedContent, data.error); + return; + } + + if (data.content) { + // Append chunk to accumulated content + accumulatedContent += data.content; + updateStreamingMessage(tempAiMessageId, accumulatedContent); + } + + if (data.done) { + clearTimeout(streamTimeout); + + // Update with final metadata + finalizeStreamingMessage( + tempAiMessageId, + tempUserMessageId, + data + ); + + currentEventSource = null; + return; + } + } catch (e) { + console.error('Error parsing SSE data:', e); + } + } + } + + readStream(); // Continue reading + }).catch(err => { + clearTimeout(streamTimeout); + console.error('Stream reading error:', err); + handleStreamError(tempAiMessageId, accumulatedContent, err.message); + }); + } + + readStream(); + + }).catch(error => { + clearTimeout(streamTimeout); + console.error('Streaming request error:', error); + showToast(`Error: ${error.message}`, 'error'); + + // Remove placeholder message + const msgElement = document.querySelector(`[data-message-id="${tempAiMessageId}"]`); + if (msgElement) { + msgElement.remove(); + } + }); + + return true; // Indicates streaming was initiated +} + +function updateStreamingMessage(messageId, content) { + const messageElement = document.querySelector(`[data-message-id="${messageId}"]`); + if (!messageElement) return; + + const contentElement = messageElement.querySelector('.message-text'); + if (contentElement) { + // Render markdown during streaming for proper formatting + if (typeof marked !== 'undefined' && typeof DOMPurify !== 'undefined') { + const renderedContent = DOMPurify.sanitize(marked.parse(content)); + contentElement.innerHTML = renderedContent; + } else { + contentElement.textContent = content; + } + + // Add subtle streaming cursor indicator + if (!messageElement.querySelector('.streaming-cursor')) { + const cursor = document.createElement('span'); + cursor.className = 'streaming-cursor'; + cursor.innerHTML = ' Streaming'; + contentElement.appendChild(cursor); + } + } +} + +function handleStreamError(messageId, partialContent, errorMessage) { + const messageElement = document.querySelector(`[data-message-id="${messageId}"]`); + if (!messageElement) return; + + const contentElement = messageElement.querySelector('.message-text'); + if (contentElement) { + // Remove streaming cursor + const cursor = contentElement.querySelector('.streaming-cursor'); + if (cursor) cursor.remove(); + + // Show partial content with error banner + let finalContent = partialContent || 'Stream interrupted before any content was received.'; + + // Parse markdown for partial content + if (typeof marked !== 'undefined' && typeof DOMPurify !== 'undefined') { + finalContent = DOMPurify.sanitize(marked.parse(finalContent)); + } + + contentElement.innerHTML = finalContent; + + // Add error banner + const errorBanner = document.createElement('div'); + errorBanner.className = 'alert alert-warning mt-2 mb-0'; + errorBanner.innerHTML = ` + + Stream interrupted: ${errorMessage} +
    + Response may be incomplete. The partial content above has been saved. + `; + contentElement.appendChild(errorBanner); + } + + showToast(`Stream error: ${errorMessage}`, 'error'); +} + +function finalizeStreamingMessage(messageId, userMessageId, finalData) { + const messageElement = document.querySelector(`[data-message-id="${messageId}"]`); + if (!messageElement) return; + + // Update user message ID first + if (finalData.user_message_id && userMessageId) { + updateUserMessageId(userMessageId, finalData.user_message_id); + } + + // Remove the temporary streaming message + messageElement.remove(); + + // Create proper message with all metadata using appendMessage + appendMessage( + 'AI', + finalData.full_content || '', + finalData.model_deployment_name, + finalData.message_id, + finalData.augmented, + finalData.hybrid_citations || [], + [], + finalData.agent_citations || [], + finalData.agent_display_name || null, + finalData.agent_name || null, + null, + true // isNewMessage - trigger autoplay for new streaming responses + ); + + // Update conversation if needed + if (finalData.conversation_id && window.currentConversationId !== finalData.conversation_id) { + window.currentConversationId = finalData.conversation_id; + } + + if (finalData.conversation_title) { + const titleElement = document.getElementById('current-conversation-title'); + if (titleElement && titleElement.textContent === 'New Conversation') { + titleElement.textContent = finalData.conversation_title; + } + + // Update sidebar conversation title in real-time + updateSidebarConversationTitle(finalData.conversation_id, finalData.conversation_title); + } +} + +export function cancelStreaming() { + if (currentEventSource) { + currentEventSource.close(); + currentEventSource = null; + showToast('Streaming cancelled', 'info'); + } +} diff --git a/application/single_app/static/js/chat/chat-tts.js b/application/single_app/static/js/chat/chat-tts.js new file mode 100644 index 00000000..23a48c70 --- /dev/null +++ b/application/single_app/static/js/chat/chat-tts.js @@ -0,0 +1,1056 @@ +// chat-tts.js - Text-to-Speech functionality for chat messages + +import { showToast } from './chat-toast.js'; + +// TTS State Management +let ttsEnabled = false; +let ttsAutoplay = false; +let ttsVoice = 'en-US-Andrew:DragonHDLatestNeural'; +let ttsSpeed = 1.0; +let currentPlayingAudio = null; +let currentPlayingMessageId = null; +let audioQueue = []; // Queue for chunked audio playback +let isQueueing = false; // Track if we're still loading chunks +let wordHighlightInterval = null; // Track word highlighting interval +let currentWordIndex = 0; // Current word being highlighted +let totalWords = 0; // Total words in current chunk +let wordOffset = 0; // Starting word index for current chunk +let highlightState = null; // Store state for pause/resume: { messageId, chunkText, duration, startWordIndex, msPerWord } + +// Audio visualization +let audioContext = null; +let analyser = null; +let volumeCheckInterval = null; +let currentAudioSource = null; + +/** + * Initialize TTS settings from user preferences + */ +export async function initializeTTS() { + try { + const response = await fetch('/api/user/settings'); + if (!response.ok) { + throw new Error('Failed to load user settings'); + } + + const data = await response.json(); + const settings = data.settings || {}; + + ttsEnabled = settings.ttsEnabled || false; + ttsAutoplay = settings.ttsAutoplay || false; + ttsVoice = settings.ttsVoice || 'en-US-Andrew:DragonHDLatestNeural'; + ttsSpeed = settings.ttsSpeed || 1.0; + + console.log('TTS initialized:', { ttsEnabled, ttsAutoplay, ttsVoice, ttsSpeed }); + + // Update button state after loading settings + updateAutoplayButton(); + + } catch (error) { + console.error('Error initializing TTS:', error); + } +} + +/** + * Check if TTS is enabled + */ +export function isTTSEnabled() { + return ttsEnabled; +} + +/** + * Check if TTS autoplay is enabled + */ +export function isTTSAutoplayEnabled() { + return ttsAutoplay; +} + +/** + * Play text-to-speech for a message with chunked delivery for faster start + */ +export async function playTTS(messageId, text) { + // Stop any currently playing audio + stopTTS(); + + if (!text || text.trim() === '') { + showToast('No text to read', 'warning'); + return; + } + + try { + // Update button to show loading state + updateTTSButton(messageId, 'loading'); + + // Strip HTML tags and get plain text + const tempDiv = document.createElement('div'); + tempDiv.innerHTML = text; + const plainText = tempDiv.textContent || tempDiv.innerText || ''; + + // Split text into word-based chunks + // Group 1: Progressive chunks (10, 15, 20, 25, 30 words) + // Group 2+: Remaining in 40-word chunks + const words = plainText.split(/\s+/); + const chunks = []; + + let index = 0; + + // Group 1: Progressive chunks + if (words.length > index) { + chunks.push(words.slice(index, index + 10).join(' ')); + index += 10; + } + if (words.length > index) { + chunks.push(words.slice(index, index + 15).join(' ')); + index += 15; + } + if (words.length > index) { + chunks.push(words.slice(index, index + 20).join(' ')); + index += 20; + } + if (words.length > index) { + chunks.push(words.slice(index, index + 25).join(' ')); + index += 25; + } + if (words.length > index) { + chunks.push(words.slice(index, index + 30).join(' ')); + index += 30; + } + + // Group 2+: Remaining words in 40-word chunks + while (index < words.length) { + chunks.push(words.slice(index, index + 40).join(' ')); + index += 40; + } + + console.log(`[TTS] Split into ${chunks.length} chunks:`, chunks.map(c => `${c.split(/\s+/).length} words`)); + + // Synthesize chunks 1 and 2 in parallel + const firstChunk = chunks.shift(); + const secondChunk = chunks.length > 0 ? chunks.shift() : null; + + console.log('[TTS] Synthesizing chunks 1 and 2 in parallel...'); + const parallelPromises = [synthesizeChunk(firstChunk, messageId)]; + if (secondChunk) { + parallelPromises.push(synthesizeChunk(secondChunk, messageId)); + } + + const [firstAudio, secondAudio] = await Promise.all(parallelPromises); + if (!firstAudio) return; + + // Track word offsets for each chunk + let currentWordOffset = 0; + const firstChunkWordCount = firstChunk.trim().split(/\s+/).length; + + // Queue chunk 2 immediately (it's already synthesized) + if (secondChunk && secondAudio) { + const secondChunkWordCount = secondChunk.trim().split(/\s+/).length; + audioQueue.push({ + audio: secondAudio, + url: secondAudio.src, + text: secondChunk, + wordOffset: firstChunkWordCount // Start after first chunk's words + }); + console.log('[TTS] Chunk 2 pre-queued, ready to play after chunk 1'); + } + + // Start playing first chunk + console.log('[TTS] Playing chunk 1 immediately'); + currentPlayingAudio = firstAudio; + currentPlayingMessageId = messageId; + + // Setup audio event handlers + currentPlayingAudio.onloadedmetadata = () => { + // Audio metadata loaded, duration is now available + const duration = currentPlayingAudio.duration; + startWordHighlighting(messageId, firstChunk, duration, 0); // Start at word 0 + }; + + // If metadata is already loaded, start highlighting immediately + if (currentPlayingAudio.duration && !isNaN(currentPlayingAudio.duration)) { + const duration = currentPlayingAudio.duration; + startWordHighlighting(messageId, firstChunk, duration, 0); + } + + currentPlayingAudio.onpause = () => { + updateTTSButton(messageId, 'paused'); + console.log('[TTS] Audio paused event fired'); + pauseWordHighlighting(); + }; + + currentPlayingAudio.onplay = () => { + console.log('[TTS] Audio play event fired, highlightState exists:', !!highlightState, 'interval is null:', wordHighlightInterval === null); + updateTTSButton(messageId, 'playing'); + highlightPlayingMessage(messageId, true); + // Resume word highlighting if we were paused (highlightState exists but no active interval) + if (highlightState && wordHighlightInterval === null) { + console.log('[TTS] Resuming from pause'); + resumeWordHighlighting(); + } + }; + + currentPlayingAudio.onended = () => { + // Play next chunk from queue if available + playNextChunk(messageId); + }; + + currentPlayingAudio.onerror = (error) => { + console.error('Audio playback error:', error); + showToast('Error playing audio', 'danger'); + updateTTSButton(messageId, 'stopped'); + highlightPlayingMessage(messageId, false); + currentPlayingAudio = null; + currentPlayingMessageId = null; + audioQueue = []; + }; + + // Start playback of first chunk + await currentPlayingAudio.play(); + + // Synthesize remaining chunks in groups while audio is playing + if (chunks.length > 0) { + isQueueing = true; + // Calculate starting word offset for remaining chunks (after chunks 1 and 2) + const firstChunkWords = firstChunk.trim().split(/\s+/).length; + const secondChunkWords = secondChunk ? secondChunk.trim().split(/\s+/).length : 0; + const startingOffset = firstChunkWords + secondChunkWords; + + queueChunksInGroups(chunks, messageId, startingOffset).then(() => { + isQueueing = false; + console.log(`[TTS] All chunks queued successfully`); + }).catch(error => { + console.error('[TTS] Error queueing chunks:', error); + isQueueing = false; + }); + } else { + console.log('[TTS] No remaining chunks - single chunk playback'); + } + + } catch (error) { + console.error('Error playing TTS:', error); + showToast(`TTS Error: ${error.message}`, 'danger'); + updateTTSButton(messageId, 'stopped'); + currentPlayingAudio = null; + currentPlayingMessageId = null; + audioQueue = []; + isQueueing = false; + } +} + +/** + * Synthesize a text chunk and return Audio element + */ +async function synthesizeChunk(text, messageId) { + try { + const response = await fetch('/api/chat/tts', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + text: text, + voice: ttsVoice, + speed: ttsSpeed + }) + }); + + if (!response.ok) { + const errorData = await response.json(); + throw new Error(errorData.error || 'Failed to generate speech'); + } + + // Get audio blob + const audioBlob = await response.blob(); + const audioUrl = URL.createObjectURL(audioBlob); + + return new Audio(audioUrl); + + } catch (error) { + console.error('Error synthesizing chunk:', error); + throw error; + } +} + +/** + * Queue chunks in groups with parallel synthesis: + * - Group 1: Chunks 3-7 all in parallel (5 chunks) + * - Group 2+: Remaining chunks in batches of 5, all parallel within each batch + */ +async function queueChunksInGroups(chunks, messageId, startingWordOffset = 0) { + console.log(`[TTS] Queueing ${chunks.length} remaining chunks in groups of 5 (parallel within each group)`); + + try { + let groupNum = 1; + let chunkNumOffset = 3; // Start at chunk 3 since chunks 1 and 2 are already handled + let currentWordOffset = startingWordOffset; + + while (chunks.length > 0) { + // Take up to 5 chunks for this group + const groupSize = Math.min(5, chunks.length); + const groupChunks = chunks.splice(0, groupSize); + + console.log(`[TTS] Group ${groupNum}: Synthesizing ${groupSize} chunks in parallel`); + + // Synthesize all chunks in this group in parallel + const synthesisPromises = groupChunks.map((text, index) => { + const chunkNum = chunkNumOffset + index; + const wordCount = text.split(/\s+/).length; + const thisChunkOffset = currentWordOffset; + + // Increment offset for next chunk + currentWordOffset += wordCount; + + console.log(`[TTS] Starting synthesis for chunk ${chunkNum} (${wordCount} words, offset: ${thisChunkOffset})`); + return synthesizeChunk(text, messageId).then(audio => ({ + chunkNum: chunkNum, + audio: audio, + url: audio ? audio.src : null, + text: text, + wordOffset: thisChunkOffset + })); + }); + + // Wait for all chunks in this group to complete + const results = await Promise.all(synthesisPromises); + + // Add to queue in order + results.forEach(result => { + if (result.audio) { + audioQueue.push({ + audio: result.audio, + url: result.url, + text: result.text, + wordOffset: result.wordOffset + }); + console.log(`[TTS] Chunk ${result.chunkNum} queued (${result.text.split(/\s+/).length} words, offset: ${result.wordOffset}), queue size: ${audioQueue.length}`); + } + }); + + console.log(`[TTS] Group ${groupNum} complete, ${chunks.length} chunks remaining`); + chunkNumOffset += groupSize; + groupNum++; + } + + console.log(`[TTS] All ${groupNum - 1} groups complete, total queue size: ${audioQueue.length}`); + + } catch (error) { + console.error('[TTS] Error in group queueing:', error); + throw error; + } +} + +/** + * Queue multiple text chunks for background synthesis (in parallel) + */ +async function queueMultipleChunks(chunks, messageId) { + console.log(`[TTS] Queueing ${chunks.length} chunks in parallel`); + + try { + // Start all syntheses in parallel + const synthesisPromises = chunks.map((text, index) => { + console.log(`[TTS] Starting synthesis for chunk ${index + 1}/${chunks.length}: ${text.split(/\s+/).length} words`); + return synthesizeChunk(text, messageId).then(audio => ({ + index: index, + audio: audio, + url: audio.src, + text: text + })); + }); + + // Wait for all to complete + const results = await Promise.all(synthesisPromises); + + // Sort by original order (in case they complete out of order) + results.sort((a, b) => a.index - b.index); + + // Add to queue in correct order + results.forEach((result, i) => { + audioQueue.push({ + audio: result.audio, + url: result.url, + text: result.text + }); + console.log(`[TTS] Queued chunk ${i + 1}: ${result.text.split(/\s+/).length} words, queue size: ${audioQueue.length}`); + }); + + console.log(`[TTS] All ${chunks.length} chunks synthesized and queued in parallel, final queue size: ${audioQueue.length}`); + + } catch (error) { + console.error('[TTS] Error during parallel synthesis:', error); + // Even if some fail, queue whatever succeeded + } +} + +/** + * Play next chunk from queue + */ +function playNextChunk(messageId) { + console.log(`[TTS] playNextChunk called - queue: ${audioQueue.length}, isQueueing: ${isQueueing}`); + + if (audioQueue.length === 0) { + // Check if we're still loading chunks + if (isQueueing) { + console.log('[TTS] Queue empty but still loading chunks, waiting...'); + // Wait a bit and try again + setTimeout(() => playNextChunk(messageId), 100); + return; + } + + // No more chunks, end playback + console.log('[TTS] Playback complete'); + updateTTSButton(messageId, 'stopped'); + highlightPlayingMessage(messageId, false); + currentPlayingAudio = null; + currentPlayingMessageId = null; + return; + } + + // Get next chunk + const nextChunk = audioQueue.shift(); + console.log(`[TTS] Playing next chunk, ${audioQueue.length} remaining in queue`); + + // Cleanup previous audio URL + if (currentPlayingAudio && currentPlayingAudio.src) { + URL.revokeObjectURL(currentPlayingAudio.src); + } + + currentPlayingAudio = nextChunk.audio; + + // Setup handlers for next chunk + currentPlayingAudio.onloadedmetadata = () => { + // Start word highlighting for this chunk when metadata is loaded + const duration = currentPlayingAudio.duration; + const chunkText = nextChunk.text || ''; + const wordOffset = nextChunk.wordOffset || 0; + startWordHighlighting(messageId, chunkText, duration, wordOffset); + }; + + // If metadata is already loaded, start highlighting immediately + if (currentPlayingAudio.duration && !isNaN(currentPlayingAudio.duration)) { + const duration = currentPlayingAudio.duration; + const chunkText = nextChunk.text || ''; + const wordOffset = nextChunk.wordOffset || 0; + startWordHighlighting(messageId, chunkText, duration, wordOffset); + } + + currentPlayingAudio.onpause = () => { + // Audio paused - pause word highlighting + console.log('[TTS] Chunk audio paused event fired'); + pauseWordHighlighting(); + }; + + currentPlayingAudio.onplay = () => { + // Audio playing/resumed - resume word highlighting and restart visualization + console.log('[TTS] Chunk audio play event fired, highlightState exists:', !!highlightState, 'interval is null:', wordHighlightInterval === null); + + // Restart audio visualization for new chunk + startAudioVisualization(messageId); + + if (highlightState && wordHighlightInterval === null) { + console.log('[TTS] Resuming from pause in chunk'); + resumeWordHighlighting(); + } + }; + + currentPlayingAudio.onended = () => { + URL.revokeObjectURL(nextChunk.url); + playNextChunk(messageId); + }; + + currentPlayingAudio.onerror = (error) => { + console.error('Error playing queued chunk:', error); + URL.revokeObjectURL(nextChunk.url); + playNextChunk(messageId); // Try next chunk + }; + + // Play next chunk + currentPlayingAudio.play().catch(error => { + console.error('Error starting next chunk:', error); + playNextChunk(messageId); // Try next chunk + }); +} + + +/** + * Stop currently playing TTS + */ +export function stopTTS() { + if (currentPlayingAudio) { + currentPlayingAudio.pause(); + currentPlayingAudio = null; + + if (currentPlayingMessageId) { + updateTTSButton(currentPlayingMessageId, 'stopped'); + highlightPlayingMessage(currentPlayingMessageId, false); + currentPlayingMessageId = null; + } + } + + // Clear audio queue and revoke URLs + audioQueue.forEach(chunk => { + if (chunk.url) { + URL.revokeObjectURL(chunk.url); + } + }); + audioQueue = []; + isQueueing = false; +} + +/** + * Pause currently playing TTS + */ +export function pauseTTS() { + if (currentPlayingAudio && !currentPlayingAudio.paused) { + currentPlayingAudio.pause(); + if (currentPlayingMessageId) { + updateTTSButton(currentPlayingMessageId, 'paused'); + } + } +} + +/** + * Resume paused TTS + */ +export function resumeTTS() { + if (currentPlayingAudio && currentPlayingAudio.paused) { + currentPlayingAudio.play(); + if (currentPlayingMessageId) { + updateTTSButton(currentPlayingMessageId, 'playing'); + } + } +} + +/** + * Update TTS button state + */ +function updateTTSButton(messageId, state) { + const button = document.querySelector(`[data-message-id="${messageId}"] .tts-play-btn`); + if (!button) { + console.log('[TTS] Button not found for message:', messageId); + return; + } + + const icon = button.querySelector('i'); + if (!icon) { + console.log('[TTS] Icon not found in button for message:', messageId); + return; + } + + // Remove all state classes + icon.classList.remove('bi-volume-up', 'bi-pause-fill', 'bi-stop-fill'); + button.classList.remove('btn-primary', 'btn-success', 'btn-warning'); + button.disabled = false; + + switch (state) { + case 'loading': + icon.className = 'bi bi-hourglass-split'; + button.disabled = true; + button.title = 'One moment, I’m taking a look'; + break; + + case 'playing': + icon.className = 'bi bi-pause-fill'; + button.classList.add('btn-success'); + button.title = 'Hold on, pause what you are reading'; + break; + + case 'paused': + icon.className = 'bi bi-volume-up'; + button.classList.add('btn-warning'); + button.title = 'Go ahead, continue reading'; + break; + + case 'stopped': + default: + icon.className = 'bi bi-volume-up'; + button.title = 'Read this to me'; + break; + } +} + +/** + * Highlight message being read + */ +/** + * Prepare message text for word-by-word highlighting + */ +function prepareMessageForHighlighting(messageId) { + const messageElement = document.querySelector(`[data-message-id="${messageId}"]`); + if (!messageElement) return; + + const messageTextDiv = messageElement.querySelector('.message-text'); + if (!messageTextDiv || messageTextDiv.dataset.ttsWrapped === 'true') return; + + // Function to wrap words in text nodes only, not HTML + function wrapWordsInTextNodes(node) { + if (node.nodeType === Node.TEXT_NODE) { + // This is a text node - wrap its words + const text = node.textContent; + if (text.trim().length === 0) return; // Skip whitespace-only nodes + + const words = text.split(/(\s+)/); // Split but keep whitespace + const fragment = document.createDocumentFragment(); + + words.forEach(word => { + if (/\S/.test(word)) { + // Non-whitespace word - wrap it + const span = document.createElement('span'); + span.className = 'tts-word'; + span.textContent = word; + fragment.appendChild(span); + } else { + // Whitespace - keep as text + fragment.appendChild(document.createTextNode(word)); + } + }); + + node.parentNode.replaceChild(fragment, node); + } else if (node.nodeType === Node.ELEMENT_NODE) { + // This is an element - recurse into its children + // Convert to array to avoid live NodeList issues + Array.from(node.childNodes).forEach(child => wrapWordsInTextNodes(child)); + } + } + + wrapWordsInTextNodes(messageTextDiv); + messageTextDiv.dataset.ttsWrapped = 'true'; +} + +/** + * Start highlighting words progressively during playback + */ +function startWordHighlighting(messageId, chunkText, duration, startWordIndex = 0) { + // Clear any existing highlighting + stopWordHighlighting(); + + // Validate duration + if (!duration || duration === 0 || isNaN(duration)) { + console.log('[TTS] Invalid duration for word highlighting, skipping'); + return; + } + + // Prepare message for highlighting if not already done + prepareMessageForHighlighting(messageId); + + const messageElement = document.querySelector(`[data-message-id="${messageId}"]`); + if (!messageElement) return; + + const allWordElements = messageElement.querySelectorAll('.tts-word'); + if (allWordElements.length === 0) return; + + // Count words in this chunk + const chunkWords = chunkText.trim().split(/\s+/).length; + + // Calculate which words to highlight for this chunk + wordOffset = startWordIndex; + totalWords = Math.min(chunkWords, allWordElements.length - wordOffset); + currentWordIndex = 0; + + if (totalWords <= 0) { + console.log('[TTS] No words to highlight for this chunk'); + return; + } + + // Calculate time per word (in milliseconds) + const msPerWord = (duration * 1000) / totalWords; + + // Store state for pause/resume + highlightState = { + messageId: messageId, + chunkText: chunkText, + duration: duration, + startWordIndex: startWordIndex, + msPerWord: msPerWord, + allWordElements: allWordElements + }; + + console.log(`[TTS] Word highlighting: chunk has ${chunkWords} words, highlighting words ${wordOffset} to ${wordOffset + totalWords - 1}, ${duration.toFixed(2)}s duration, ${msPerWord.toFixed(0)}ms per word`); + + // Highlight first word immediately + const firstWordIndex = wordOffset; + if (firstWordIndex < allWordElements.length) { + allWordElements[firstWordIndex].classList.add('tts-current-word'); + } + + // Set interval to highlight next words + wordHighlightInterval = setInterval(() => { + // Check if audio is paused - if so, stop highlighting + if (currentPlayingAudio && currentPlayingAudio.paused) { + console.log('[TTS] Audio paused, stopping word highlight interval'); + pauseWordHighlighting(); + return; + } + + // Remove highlight from previous word + const prevIndex = wordOffset + currentWordIndex; + if (prevIndex < allWordElements.length) { + allWordElements[prevIndex].classList.remove('tts-current-word'); + } + + currentWordIndex++; + + // Add highlight to current word + const nextIndex = wordOffset + currentWordIndex; + if (currentWordIndex < totalWords && nextIndex < allWordElements.length) { + allWordElements[nextIndex].classList.add('tts-current-word'); + } else { + // Reached the end of this chunk, clear interval + stopWordHighlighting(); + } + }, msPerWord); +} + +/** + * Pause word highlighting (keep state for resume) + */ +function pauseWordHighlighting() { + console.log('[TTS] Pausing word highlighting, currentWordIndex:', currentWordIndex); + if (wordHighlightInterval) { + clearInterval(wordHighlightInterval); + wordHighlightInterval = null; + } + // Keep currentWordIndex, totalWords, wordOffset, and highlightState for resume +} + +/** + * Resume word highlighting from current audio position + */ +function resumeWordHighlighting() { + if (!highlightState || !currentPlayingAudio) return; + + const { messageId, msPerWord, allWordElements } = highlightState; + + // Calculate current word position based on audio time + const elapsedTime = currentPlayingAudio.currentTime * 1000; // Convert to ms + const calculatedWordIndex = Math.floor(elapsedTime / msPerWord); + + // Update currentWordIndex to match audio position + currentWordIndex = Math.min(calculatedWordIndex, totalWords - 1); + + console.log(`[TTS] Resuming word highlighting from word ${currentWordIndex} (audio time: ${currentPlayingAudio.currentTime.toFixed(2)}s)`); + + // Highlight current word + const currentIndex = wordOffset + currentWordIndex; + if (currentIndex < allWordElements.length) { + allWordElements[currentIndex].classList.add('tts-current-word'); + } + + // Continue highlighting from this point + wordHighlightInterval = setInterval(() => { + // Check if audio is paused - if so, stop highlighting + if (currentPlayingAudio && currentPlayingAudio.paused) { + console.log('[TTS] Audio paused during resume, stopping word highlight interval'); + pauseWordHighlighting(); + return; + } + + // Remove highlight from previous word + const prevIndex = wordOffset + currentWordIndex; + if (prevIndex < allWordElements.length) { + allWordElements[prevIndex].classList.remove('tts-current-word'); + } + + currentWordIndex++; + + // Add highlight to current word + const nextIndex = wordOffset + currentWordIndex; + if (currentWordIndex < totalWords && nextIndex < allWordElements.length) { + allWordElements[nextIndex].classList.add('tts-current-word'); + } else { + // Reached the end of this chunk, clear interval + stopWordHighlighting(); + } + }, msPerWord); +} + +/** + * Stop word highlighting + */ +function stopWordHighlighting() { + if (wordHighlightInterval) { + clearInterval(wordHighlightInterval); + wordHighlightInterval = null; + } + + // Remove all word highlights + if (currentPlayingMessageId) { + const messageElement = document.querySelector(`[data-message-id="${currentPlayingMessageId}"]`); + if (messageElement) { + const wordElements = messageElement.querySelectorAll('.tts-word'); + wordElements.forEach(word => word.classList.remove('tts-current-word')); + } + } + + currentWordIndex = 0; + totalWords = 0; + highlightState = null; +} + +/** + * Start audio visualization for avatar pulsing based on volume + */ +function startAudioVisualization(messageId) { + if (!currentPlayingAudio) return; + + try { + // Create AudioContext if not exists + if (!audioContext) { + audioContext = new (window.AudioContext || window.webkitAudioContext)(); + } + + // Create analyzer if not exists + if (!analyser) { + analyser = audioContext.createAnalyser(); + analyser.fftSize = 256; + } + + // Only create a new source if we don't have one or audio element changed + if (!currentAudioSource || currentAudioSource.mediaElement !== currentPlayingAudio) { + // Disconnect old source if exists + if (currentAudioSource) { + try { + currentAudioSource.disconnect(); + } catch (e) { + // Ignore disconnect errors + } + } + + // Create new source and connect + currentAudioSource = audioContext.createMediaElementSource(currentPlayingAudio); + currentAudioSource.connect(analyser); + analyser.connect(audioContext.destination); + } + + const dataArray = new Uint8Array(analyser.frequencyBinCount); + const avatar = document.querySelector(`[data-message-id="${messageId}"] .avatar`); + + if (!avatar) return; + + // Clear any existing interval + if (volumeCheckInterval) { + clearInterval(volumeCheckInterval); + } + + // Update avatar glow based on volume + volumeCheckInterval = setInterval(() => { + if (!currentPlayingAudio || currentPlayingAudio.paused || currentPlayingAudio.ended) { + return; // Don't stop completely, just pause updates + } + + analyser.getByteFrequencyData(dataArray); + + // Calculate average volume + const sum = dataArray.reduce((a, b) => a + b, 0); + const average = sum / dataArray.length; + + // Remove all volume classes + avatar.classList.remove('volume-low', 'volume-medium', 'volume-high', 'volume-peak'); + + // Add appropriate class based on volume level + if (average < 30) { + avatar.classList.add('volume-low'); + } else if (average < 60) { + avatar.classList.add('volume-medium'); + } else if (average < 90) { + avatar.classList.add('volume-high'); + } else { + avatar.classList.add('volume-peak'); + } + }, 50); // Update every 50ms for smooth visualization + + } catch (error) { + console.error('[TTS] Error setting up audio visualization:', error); + } +} + +/** + * Stop audio visualization + */ +function stopAudioVisualization(messageId) { + if (volumeCheckInterval) { + clearInterval(volumeCheckInterval); + volumeCheckInterval = null; + } + + // Remove volume classes from avatar + if (messageId) { + const avatar = document.querySelector(`[data-message-id="${messageId}"] .avatar`); + if (avatar) { + avatar.classList.remove('volume-low', 'volume-medium', 'volume-high', 'volume-peak'); + } + } +} + +function highlightPlayingMessage(messageId, highlight) { + const messageElement = document.querySelector(`[data-message-id="${messageId}"]`); + if (!messageElement) return; + + if (highlight) { + messageElement.classList.add('tts-playing'); + startAudioVisualization(messageId); + } else { + messageElement.classList.remove('tts-playing'); + stopAudioVisualization(messageId); + stopWordHighlighting(); + } +} + +/** + * Handle TTS button click + */ +export function handleTTSButtonClick(messageId, text) { + // If text is not provided, extract it from the DOM + if (!text || text.trim() === '') { + const messageElement = document.querySelector(`[data-message-id="${messageId}"]`); + if (messageElement) { + const messageTextDiv = messageElement.querySelector('.message-text'); + if (messageTextDiv) { + text = messageTextDiv.innerText || messageTextDiv.textContent; + } + } + + // If still no text, show error + if (!text || text.trim() === '') { + showToast('No text to read', 'warning'); + return; + } + } + + // If this message is currently playing, pause it + if (currentPlayingMessageId === messageId && currentPlayingAudio) { + if (currentPlayingAudio.paused) { + resumeTTS(); + } else { + pauseTTS(); + } + } else { + // Play this message + playTTS(messageId, text); + } +} + +/** + * Create TTS button HTML + */ +export function createTTSButton(messageId) { + return ` + + `; +} + +/** + * Auto-play TTS for new AI messages if enabled + */ +export function autoplayTTSIfEnabled(messageId, text) { + console.log('[TTS Autoplay] Check:', { ttsEnabled, ttsAutoplay, messageId, hasText: !!text }); + if (ttsEnabled && ttsAutoplay) { + console.log('[TTS Autoplay] Playing message:', messageId); + + // Wait for button to be rendered before playing + const waitForButton = (attempts = 0) => { + const button = document.querySelector(`[data-message-id="${messageId}"] .tts-play-btn`); + + if (button) { + console.log('[TTS Autoplay] Button found, starting playback'); + playTTS(messageId, text); + } else if (attempts < 10) { + // Retry up to 10 times (1 second total) + console.log(`[TTS Autoplay] Button not found, retry ${attempts + 1}/10`); + setTimeout(() => waitForButton(attempts + 1), 100); + } else { + console.warn('[TTS Autoplay] Button not found after 10 attempts, skipping autoplay'); + } + }; + + // Start checking for button after small delay + setTimeout(() => waitForButton(), 100); + } +} + +/** + * Toggle TTS autoplay on/off + */ +export async function toggleTTSAutoplay() { + ttsAutoplay = !ttsAutoplay; + + console.log('[TTS Autoplay] Toggled to:', ttsAutoplay); + + // Save to user settings + try { + const response = await fetch('/api/user/settings', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + settings: { + ttsAutoplay: ttsAutoplay + } + }) + }); + + if (!response.ok) { + throw new Error('Failed to save autoplay setting'); + } + + // Update button UI + updateAutoplayButton(); + + // Show toast notification + const message = ttsAutoplay ? 'AI Voice enabled' : 'AI Voice disabled'; + showToast(message, 'success'); + + } catch (error) { + console.error('Error saving AI Voice setting:', error); + showToast('Failed to save AI Voice setting', 'danger'); + // Revert the toggle + ttsAutoplay = !ttsAutoplay; + } +} + +/** + * Update the autoplay button UI based on current state + */ +export function updateAutoplayButton() { + const button = document.getElementById('tts-autoplay-toggle-btn'); + if (!button) return; + + const icon = button.querySelector('i'); + if (ttsAutoplay) { + icon.className = 'bi bi-volume-up-fill'; + button.title = 'Auto voice response enabled - click to disable'; + button.classList.remove('btn-outline-secondary'); + button.classList.add('btn-primary'); + } else { + icon.className = 'bi bi-volume-mute'; + button.title = 'Auto voice response disabled - click to enable'; + button.classList.remove('btn-primary'); + button.classList.add('btn-outline-secondary'); + } +} + +/** + * Initialize the autoplay button state and event listener + */ +export function initializeAutoplayButton() { + const button = document.getElementById('tts-autoplay-toggle-btn'); + if (!button) return; + + button.addEventListener('click', toggleTTSAutoplay); + updateAutoplayButton(); +} + +// Export functions for global access +window.chatTTS = { + handleButtonClick: handleTTSButtonClick, + stop: stopTTS, + pause: pauseTTS, + resume: resumeTTS, + toggleAutoplay: toggleTTSAutoplay +}; + +// Initialize autoplay button when module loads +initializeAutoplayButton(); diff --git a/application/single_app/static/js/control-center-sidebar-nav.js b/application/single_app/static/js/control-center-sidebar-nav.js new file mode 100644 index 00000000..1af23e43 --- /dev/null +++ b/application/single_app/static/js/control-center-sidebar-nav.js @@ -0,0 +1,267 @@ +// Control Center Sidebar Navigation +document.addEventListener('DOMContentLoaded', function() { + // Only initialize if we're on control center page with sidebar nav + if (!document.getElementById('control-center-toggle')) return; + + // Initialize control center sidebar + initControlCenterSidebarNav(); +}); + +function initControlCenterSidebarNav() { + // Set up collapsible control center section + const controlCenterToggle = document.getElementById('control-center-toggle'); + const controlCenterSection = document.getElementById('control-center-section'); + const controlCenterCaret = document.getElementById('control-center-caret'); + const controlCenterSearchBtn = document.getElementById('control-center-search-btn'); + const controlCenterSearchContainer = document.getElementById('control-center-search-container'); + const controlCenterSearchInput = document.getElementById('control-center-search-input'); + const controlCenterSearchClear = document.getElementById('control-center-search-clear'); + + if (controlCenterToggle) { + controlCenterToggle.addEventListener('click', function(e) { + // Don't toggle if clicking on search button + if (e.target.closest('#control-center-search-btn')) { + return; + } + + const isCollapsed = controlCenterSection.style.display === 'none'; + controlCenterSection.style.display = isCollapsed ? 'block' : 'none'; + controlCenterCaret.classList.toggle('rotate-180', !isCollapsed); + }); + } + + // Set up control center search functionality + if (controlCenterSearchBtn) { + controlCenterSearchBtn.addEventListener('click', function(e) { + e.stopPropagation(); + const isVisible = controlCenterSearchContainer.style.display !== 'none'; + controlCenterSearchContainer.style.display = isVisible ? 'none' : 'block'; + + if (!isVisible) { + // Ensure control center section is expanded when search is opened + controlCenterSection.style.display = 'block'; + controlCenterCaret.classList.add('rotate-180'); + + // Focus on search input + setTimeout(() => controlCenterSearchInput.focus(), 100); + } else { + // Clear search when hiding + clearControlCenterSearch(); + } + }); + } + + // Set up search input functionality + if (controlCenterSearchInput) { + controlCenterSearchInput.addEventListener('input', function() { + filterControlCenterSections(this.value); + }); + + controlCenterSearchInput.addEventListener('keydown', function(e) { + if (e.key === 'Escape') { + clearControlCenterSearch(); + controlCenterSearchContainer.style.display = 'none'; + } + }); + } + + // Set up clear button + if (controlCenterSearchClear) { + controlCenterSearchClear.addEventListener('click', function() { + clearControlCenterSearch(); + }); + } + + // Set up tab navigation + document.querySelectorAll('.control-center-nav-tab').forEach(tabLink => { + tabLink.addEventListener('click', function(e) { + e.preventDefault(); + const tabId = this.getAttribute('data-tab'); + showControlCenterTab(tabId); + + // Update active state for main tabs + document.querySelectorAll('.control-center-nav-tab').forEach(link => { + link.classList.remove('active'); + }); + this.classList.add('active'); + + // Clear section active states + document.querySelectorAll('.control-center-nav-section').forEach(link => { + link.classList.remove('active'); + }); + + // Toggle submenu if it exists + const submenu = document.getElementById(tabId + '-submenu'); + if (submenu) { + const isVisible = submenu.style.display !== 'none'; + + // Close all other submenus first + document.querySelectorAll('[id$="-submenu"]').forEach(menu => { + if (menu !== submenu) { + menu.style.display = 'none'; + } + }); + + // Toggle the current submenu + submenu.style.display = isVisible ? 'none' : 'block'; + } else { + // Close all submenus if this tab doesn't have one + document.querySelectorAll('[id$="-submenu"]').forEach(menu => { + menu.style.display = 'none'; + }); + } + }); + }); + + // Set up section navigation + document.querySelectorAll('.control-center-nav-section').forEach(sectionLink => { + sectionLink.addEventListener('click', function(e) { + e.preventDefault(); + const tabId = this.getAttribute('data-tab'); + const sectionId = this.getAttribute('data-section'); + showControlCenterTab(tabId); + scrollToSection(sectionId); + + // Update active state + document.querySelectorAll('.control-center-nav-section').forEach(link => { + link.classList.remove('active'); + }); + this.classList.add('active'); + }); + }); + + // Set the initial active tab (Dashboard) - but only if no tab is already active + const activeTab = document.querySelector('.control-center-nav-tab.active, .control-center-nav-section.active'); + if (!activeTab) { + const firstTab = document.querySelector('.control-center-nav-tab[data-tab="dashboard"]'); + if (firstTab) { + firstTab.classList.add('active'); + showControlCenterTab('dashboard'); + } + } else { + console.log('initControlCenterSidebarNav - Found existing active tab, preserving current state:', activeTab.getAttribute('data-tab')); + } +} + +function showControlCenterTab(tabId) { + // Hide all tab panes + document.querySelectorAll('.tab-pane').forEach(pane => { + pane.classList.remove('show', 'active'); + }); + + // Show the selected tab pane + const targetPane = document.getElementById(tabId); + if (targetPane) { + targetPane.classList.add('show', 'active'); + } + + // Load tab-specific data when Activity Logs tab is shown + if (tabId === 'activity-logs' && window.controlCenter) { + console.log('Activity Logs tab activated via sidebar, loading logs...'); + setTimeout(() => { + window.controlCenter.loadActivityLogs(); + }, 100); + } + + // Update Bootstrap tab buttons (if using top tabs instead of sidebar) + const targetTabBtn = document.querySelector(`[data-bs-target="#${tabId}"]`); + if (targetTabBtn) { + // Remove active from all tab buttons + document.querySelectorAll('[data-bs-toggle="tab"]').forEach(btn => { + btn.classList.remove('active'); + btn.setAttribute('aria-selected', 'false'); + }); + + // Activate the target tab button + targetTabBtn.classList.add('active'); + targetTabBtn.setAttribute('aria-selected', 'true'); + } + + // Initialize tab-specific functionality if needed + if (typeof window.initializeControlCenterTab === 'function') { + window.initializeControlCenterTab(tabId); + } +} + +function scrollToSection(sectionId) { + setTimeout(() => { + const element = document.getElementById(sectionId); + if (element) { + element.scrollIntoView({ + behavior: 'smooth', + block: 'start', + inline: 'nearest' + }); + + // Add highlight effect + element.classList.add('highlight-section'); + setTimeout(() => { + element.classList.remove('highlight-section'); + }, 2000); + } + }, 100); +} + +function clearControlCenterSearch() { + const searchInput = document.getElementById('control-center-search-input'); + if (searchInput) { + searchInput.value = ''; + filterControlCenterSections(''); + } +} + +function filterControlCenterSections(searchTerm) { + const term = searchTerm.toLowerCase().trim(); + const allNavItems = document.querySelectorAll('#control-center-section .nav-item'); + + allNavItems.forEach(item => { + const link = item.querySelector('a'); + if (link) { + const text = link.textContent.toLowerCase(); + const shouldShow = term === '' || text.includes(term); + item.style.display = shouldShow ? '' : 'none'; + } + }); + + // If searching, expand all submenus to show matches + if (term) { + document.querySelectorAll('[id$="-submenu"]').forEach(submenu => { + const hasVisibleItems = Array.from(submenu.querySelectorAll('.nav-item')) + .some(item => item.style.display !== 'none'); + + if (hasVisibleItems) { + submenu.style.display = 'block'; + } + }); + } +} + +// Add CSS for highlight effect +if (!document.getElementById('control-center-sidebar-styles')) { + const style = document.createElement('style'); + style.id = 'control-center-sidebar-styles'; + style.textContent = ` + .rotate-180 { + transform: rotate(180deg); + } + + .highlight-section { + background-color: rgba(13, 110, 253, 0.1) !important; + border-left: 4px solid #0d6efd !important; + transition: all 0.3s ease; + } + + .control-center-nav-tab.active, + .control-center-nav-section.active { + background-color: rgba(13, 110, 253, 0.1); + color: #0d6efd; + font-weight: 600; + } + + .control-center-nav-tab:hover, + .control-center-nav-section:hover { + background-color: rgba(0, 0, 0, 0.05); + } + `; + document.head.appendChild(style); +} \ No newline at end of file diff --git a/application/single_app/static/js/control-center.js b/application/single_app/static/js/control-center.js new file mode 100644 index 00000000..7e79d22a --- /dev/null +++ b/application/single_app/static/js/control-center.js @@ -0,0 +1,3999 @@ +// control-center.js +// Control Center JavaScript functionality +// Handles user management, pagination, modals, and API interactions + +import { showToast } from "./chat/chat-toast.js"; + +function parseDateKey(dateStr) { + if (!dateStr) { + return null; + } + + const parts = dateStr.split("-"); + if (parts.length === 3) { + const year = Number(parts[0]); + const month = Number(parts[1]); + const day = Number(parts[2]); + if (Number.isFinite(year) && Number.isFinite(month) && Number.isFinite(day)) { + return new Date(year, month - 1, day); + } + } + + const parsed = new Date(dateStr); + return Number.isNaN(parsed.getTime()) ? null : parsed; +} + +// Group Table Sorter - similar to user table but for groups +class GroupTableSorter { + constructor(tableId) { + this.table = document.getElementById(tableId); + this.currentSort = { column: null, direction: 'asc' }; + this.initializeSorting(); + } + + initializeSorting() { + if (!this.table) return; + + const headers = this.table.querySelectorAll('th.sortable'); + headers.forEach(header => { + header.addEventListener('click', () => { + const sortKey = header.getAttribute('data-sort'); + this.sortTable(sortKey, header); + }); + }); + } + + sortTable(sortKey, headerElement) { + const tbody = this.table.querySelector('tbody'); + const rows = Array.from(tbody.querySelectorAll('tr')).filter(row => + !row.querySelector('td[colspan]') // Exclude loading/empty rows + ); + + // Toggle sort direction + if (this.currentSort.column === sortKey) { + this.currentSort.direction = this.currentSort.direction === 'asc' ? 'desc' : 'asc'; + } else { + this.currentSort.direction = 'asc'; + } + this.currentSort.column = sortKey; + + // Remove sorting classes from all headers + this.table.querySelectorAll('th.sortable').forEach(th => { + th.classList.remove('sort-asc', 'sort-desc'); + }); + + // Add sorting class to current header + headerElement.classList.add(this.currentSort.direction === 'asc' ? 'sort-asc' : 'sort-desc'); + + // Sort rows + const sortedRows = rows.sort((a, b) => { + let aValue = this.getCellValue(a, sortKey); + let bValue = this.getCellValue(b, sortKey); + + // Handle different data types + if (sortKey === 'members' || sortKey === 'documents') { + // Numeric sorting for numbers and dates + aValue = this.parseNumericValue(aValue); + bValue = this.parseNumericValue(bValue); + + if (this.currentSort.direction === 'asc') { + return aValue - bValue; + } else { + return bValue - aValue; + } + } else { + // String sorting for text values + const result = aValue.localeCompare(bValue, undefined, { numeric: true, sensitivity: 'base' }); + return this.currentSort.direction === 'asc' ? result : -result; + } + }); + + // Clear tbody and append sorted rows + tbody.innerHTML = ''; + sortedRows.forEach(row => tbody.appendChild(row)); + } + + getCellValue(row, sortKey) { + const cellIndex = this.getColumnIndex(sortKey); + if (cellIndex === -1) return ''; + + const cell = row.cells[cellIndex]; + if (!cell) return ''; + + // Extract text content, handling different cell structures + let value = ''; + + switch (sortKey) { + case 'name': + // Extract group name + const nameElement = cell.querySelector('.fw-bold') || cell; + value = nameElement.textContent.trim(); + break; + case 'owner': + // Extract owner name + value = cell.textContent.trim(); + break; + case 'members': + // Extract member count + const memberText = cell.textContent.trim(); + const memberMatch = memberText.match(/(\d+)/); + value = memberMatch ? memberMatch[1] : '0'; + break; + case 'status': + // Extract status from badge + const statusBadge = cell.querySelector('.group-status-badge, .badge'); + value = statusBadge ? statusBadge.textContent.trim() : cell.textContent.trim(); + break; + case 'documents': + // Extract document count + const docText = cell.textContent.trim(); + const docMatch = docText.match(/(\d+)/); + value = docMatch ? docMatch[1] : '0'; + break; + default: + value = cell.textContent.trim(); + } + + return value; + } + + getColumnIndex(sortKey) { + const headers = this.table.querySelectorAll('th'); + for (let i = 0; i < headers.length; i++) { + if (headers[i].getAttribute('data-sort') === sortKey) { + return i; + } + } + return -1; + } + + parseNumericValue(value) { + if (!value || value === '' || value.toLowerCase() === 'never') return 0; + + // Extract numeric value from string + const numMatch = value.match(/(\d+)/); + return numMatch ? parseInt(numMatch[1]) : 0; + } +} + +class ControlCenter { + constructor() { + this.currentPage = 1; + this.usersPerPage = 50; + this.searchTerm = ''; + this.accessFilter = 'all'; + this.selectedUsers = new Set(); + this.selectedGroups = new Set(); + this.selectedPublicWorkspaces = new Set(); + this.currentUser = null; + this.loginsChart = null; + this.chatsChart = null; + this.documentsChart = null; + this.tokensChart = null; + this.currentTrendDays = 30; + + // Activity Logs state + this.activityLogsPage = 1; + this.activityLogsPerPage = 50; + this.activityLogsSearch = ''; + this.activityTypeFilter = 'all'; + + this.init(); + } + + init() { + this.bindEvents(); + + // Check if user has admin role (passed from backend) + const hasAdminRole = window.hasControlCenterAdmin === true; + + // Only load admin features if user has ControlCenterAdmin role + if (hasAdminRole) { + this.loadUsers(); + + // Also load groups and public workspaces on initial page load + // This ensures they get their cached metrics on first load + setTimeout(() => { + this.loadGroups(); + this.loadPublicWorkspaces(); + }, 500); // Small delay to ensure DOM is ready + } + + // Always load activity trends (available to all Control Center users) + this.loadActivityTrends(); + } + + bindEvents() { + // Tab switching + document.getElementById('users-tab')?.addEventListener('click', () => { + setTimeout(() => this.loadUsers(), 100); + }); + + document.getElementById('groups-tab')?.addEventListener('click', () => { + setTimeout(() => this.loadGroups(), 100); + }); + + document.getElementById('workspaces-tab')?.addEventListener('click', () => { + setTimeout(() => this.loadPublicWorkspaces(), 100); + }); + + document.getElementById('activity-logs-tab')?.addEventListener('click', () => { + console.log('Activity Logs tab clicked!'); + setTimeout(() => { + console.log('Calling loadActivityLogs...'); + this.loadActivityLogs(); + }, 100); + }); + + // Also use shown.bs.tab as backup + document.getElementById('activity-logs-tab')?.addEventListener('shown.bs.tab', () => { + console.log('Activity Logs tab shown event fired'); + }); + + // Search and filter controls + document.getElementById('userSearchInput')?.addEventListener('input', + this.debounce(() => this.handleSearchChange(), 300)); + document.getElementById('accessFilterSelect')?.addEventListener('change', + () => this.handleFilterChange()); + + // Public workspace search and filter controls + document.getElementById('publicWorkspaceSearchInput')?.addEventListener('input', + this.debounce((e) => this.searchPublicWorkspaces(e.target.value), 300)); + document.getElementById('publicWorkspaceStatusFilterSelect')?.addEventListener('change', + (e) => this.filterPublicWorkspacesByStatus(e.target.value)); + + // Export buttons + document.getElementById('exportGroupsBtn')?.addEventListener('click', + () => this.exportGroupsToCSV()); + document.getElementById('exportPublicWorkspacesBtn')?.addEventListener('click', + () => this.exportPublicWorkspacesToCSV()); + + // Bulk action buttons + document.getElementById('bulkPublicWorkspaceActionBtn')?.addEventListener('click', + () => this.showPublicWorkspaceBulkActionModal()); + + // Select all checkboxes + document.getElementById('selectAllPublicWorkspaces')?.addEventListener('change', + (e) => this.handleSelectAllPublicWorkspaces(e)); + + // Additional refresh buttons + document.getElementById('refreshGroupsBtn')?.addEventListener('click', + () => this.loadGroups()); + document.getElementById('refreshPublicWorkspacesBtn')?.addEventListener('click', + () => this.refreshPublicWorkspaces()); + + // Refresh buttons - these reload cached data, don't recalculate metrics + document.getElementById('refreshUsersBtn')?.addEventListener('click', + () => this.loadUsers()); + document.getElementById('refreshStatsBtn')?.addEventListener('click', + () => this.refreshStats()); + + // Export button + document.getElementById('exportUsersBtn')?.addEventListener('click', + () => this.exportUsersToCSV()); + + // User selection + document.getElementById('selectAllUsers')?.addEventListener('change', + (e) => this.handleSelectAll(e)); + + // Bulk actions + document.getElementById('bulkActionBtn')?.addEventListener('click', + () => this.showBulkActionModal()); + document.getElementById('executeBulkActionBtn')?.addEventListener('click', + () => this.executeBulkAction()); + + // User management modal + document.getElementById('saveUserChangesBtn')?.addEventListener('click', + () => this.saveUserChanges()); + document.getElementById('deleteUserDocumentsBtn')?.addEventListener('click', + () => this.deleteUserDocuments()); + document.getElementById('confirmDeleteUserDocumentsBtn')?.addEventListener('click', + () => this.confirmDeleteUserDocuments()); + + // Modal controls + document.getElementById('accessStatusSelect')?.addEventListener('change', + () => this.toggleAccessDateTime()); + document.getElementById('fileUploadStatusSelect')?.addEventListener('change', + () => this.toggleFileUploadDateTime()); + document.getElementById('bulkActionType')?.addEventListener('change', + () => this.toggleBulkActionSettings()); + document.getElementById('bulkStatusSelect')?.addEventListener('change', + () => this.toggleBulkDateTime()); + + // Alert action buttons + document.querySelectorAll('[data-action]').forEach(btn => { + btn.addEventListener('click', (e) => { + const action = e.target.getAttribute('data-action'); + if (action === 'View Users') { + this.switchToUsersTab(); + } + }); + }); + + // Activity trends time period buttons + document.getElementById('trend-7days')?.addEventListener('click', + () => this.changeTrendPeriod(7)); + document.getElementById('trend-30days')?.addEventListener('click', + () => this.changeTrendPeriod(30)); + document.getElementById('trend-90days')?.addEventListener('click', + () => this.changeTrendPeriod(90)); + document.getElementById('trend-custom')?.addEventListener('click', + () => this.toggleCustomDateRange()); + + // Custom date range handlers + document.getElementById('applyCustomRange')?.addEventListener('click', + () => this.applyCustomDateRange()); + + // Export functionality + document.getElementById('executeExportBtn')?.addEventListener('click', + () => this.exportActivityTrends()); + + // Export modal - show/hide custom date range based on radio selection + document.querySelectorAll('input[name="exportTimeWindow"]').forEach(radio => { + radio.addEventListener('change', () => this.toggleExportCustomDateRange()); + }); + + // Chat functionality + document.getElementById('executeChatBtn')?.addEventListener('click', + () => this.chatActivityTrends()); + + // Chat modal - show/hide custom date range based on radio selection + document.querySelectorAll('input[name="chatTimeWindow"]').forEach(radio => { + radio.addEventListener('change', () => this.toggleChatCustomDateRange()); + }); + + // Activity Logs event handlers + document.getElementById('activityLogsSearchInput')?.addEventListener('input', + this.debounce(() => this.handleActivityLogsSearchChange(), 300)); + document.getElementById('activityTypeFilterSelect')?.addEventListener('change', + () => this.handleActivityLogsFilterChange()); + document.getElementById('activityLogsPerPageSelect')?.addEventListener('change', + (e) => this.handleActivityLogsPerPageChange(e)); + document.getElementById('exportActivityLogsBtn')?.addEventListener('click', + () => this.exportActivityLogsToCSV()); + document.getElementById('refreshActivityLogsBtn')?.addEventListener('click', + () => this.loadActivityLogs()); + } + + debounce(func, wait) { + let timeout; + return function executedFunction(...args) { + const later = () => { + clearTimeout(timeout); + func(...args); + }; + clearTimeout(timeout); + timeout = setTimeout(later, wait); + }; + } + + async loadUsers() { + this.showLoading(true); + + try { + const params = new URLSearchParams({ + page: this.currentPage, + per_page: this.usersPerPage, + search: this.searchTerm, + access_filter: this.accessFilter + }); + + const response = await fetch(`/api/admin/control-center/users?${params}`); + const data = await response.json(); + + if (response.ok) { + this.renderUsers(data.users); + this.renderPagination(data.pagination); + } else { + this.showError('Failed to load users: ' + (data.error || 'Unknown error')); + } + } catch (error) { + this.showError('Network error: ' + error.message); + } finally { + this.showLoading(false); + } + } + + renderUsers(users) { + const tbody = document.getElementById('usersTableBody'); + if (!tbody) return; + + if (users.length === 0) { + tbody.innerHTML = ` + + + +
    No users found
    + + + `; + return; + } + + tbody.innerHTML = users.map(user => { + const isSelected = this.selectedUsers.has(user.id); + return ` + + + + + +
    + ${this.renderUserAvatar(user)} +
    +
    ${this.escapeHtml(user.display_name || 'Unknown User')}
    +
    ${this.escapeHtml(user.email || '')}
    +
    ID: ${user.id}
    +
    +
    + + + ${this.renderAccessBadge(user.access_status)} + + + ${this.renderFileUploadBadge(user.file_upload_status)} + + + ${this.renderLoginActivity(user.activity.login_metrics)} + + + ${this.renderChatMetrics(user.activity.chat_metrics)} + + + ${this.renderDocumentMetrics(user.activity.document_metrics)} + + + + + + `; + }).join(''); + + // Bind checkbox events + tbody.querySelectorAll('.user-checkbox').forEach(checkbox => { + checkbox.addEventListener('change', (e) => this.handleUserSelection(e)); + }); + + this.updateBulkActionButton(); + } + + renderAccessBadge(status) { + if (status === 'allow') { + return 'Allowed'; + } else if (status === 'deny') { + return 'Denied'; + } else if (status.startsWith('deny_until_')) { + const dateStr = status.substring(11); + return `Temporary`; + } + return 'Unknown'; + } + + renderFileUploadBadge(status) { + if (status === 'allow') { + return 'Allowed'; + } else if (status === 'deny') { + return 'Denied'; + } else if (status.startsWith('deny_until_')) { + const dateStr = status.substring(11); + return `Temporary`; + } + return 'Unknown'; + } + + renderUserAvatar(user) { + if (user.profile_image) { + return ` + ${this.escapeHtml(user.display_name || user.email)} + `; + } else { + // Fallback to initials + const initials = (user.display_name || user.email || 'U').slice(0, 2).toUpperCase(); + return ` +
    + + ${initials} + +
    + `; + } + } + + renderChatMetrics(chatMetrics) { + const totalConversations = chatMetrics?.total_conversations || 0; + const totalMessages = chatMetrics?.total_messages || 0; + const messageSize = chatMetrics?.total_message_size || 0; + + return ` +
    +
    Total: ${totalConversations} convos
    +
    Messages: ${totalMessages}
    +
    Size: ${this.formatBytes(messageSize)}
    +
    + `; + } + + renderDocumentMetrics(docMetrics) { + const totalDocs = docMetrics?.total_documents || 0; + const aiSearchSize = docMetrics?.ai_search_size || 0; + const storageSize = docMetrics?.storage_account_size || 0; + // Always get enhanced citation setting from app settings, not user data + const enhancedCitation = (typeof appSettings !== 'undefined' && appSettings.enable_enhanced_citations) || false; + const personalWorkspace = docMetrics?.personal_workspace_enabled; + + let html = ` +
    +
    Total Docs: ${totalDocs}
    +
    AI Search: ${this.formatBytes(aiSearchSize)}
    + `; + + if (enhancedCitation) { + html += `
    Storage: ${this.formatBytes(storageSize)}
    `; + } + + const features = []; + if (personalWorkspace) features.push('Personal'); + if (enhancedCitation) features.push('Enhanced'); + + if (features.length > 0) { + html += `
    (${features.join(' + ')})
    `; + } + + html += '
    '; + return html; + } + + renderGroupDocumentMetrics(docMetrics) { + const totalDocs = docMetrics?.total_documents || 0; + const aiSearchSize = docMetrics?.ai_search_size || 0; + const storageSize = docMetrics?.storage_account_size || 0; + // Always get enhanced citation setting from app settings, not user data + const enhancedCitation = (typeof appSettings !== 'undefined' && appSettings.enable_enhanced_citations) || false; + + let html = ` +
    +
    Total Docs: ${totalDocs}
    +
    AI Search: ${this.formatBytes(aiSearchSize)}
    + `; + + if (enhancedCitation) { + html += `
    Storage: ${this.formatBytes(storageSize)}
    `; + } + + html += '
    (Enhanced)
    '; + html += '
    '; + return html; + } + + renderLoginActivity(loginMetrics) { + const totalLogins = loginMetrics?.total_logins || 0; + const lastLogin = loginMetrics?.last_login; + + let lastLoginFormatted = 'None'; + if (lastLogin) { + try { + const date = new Date(lastLogin); + // Format as MM/DD/YYYY + lastLoginFormatted = date.toLocaleDateString('en-US', { + month: '2-digit', + day: '2-digit', + year: 'numeric' + }); + } catch { + lastLoginFormatted = 'None'; + } + } + + return ` +
    +
    Last Login: ${lastLoginFormatted}
    +
    Total Logins: ${totalLogins}
    +
    + `; + } + + renderPagination(pagination) { + const paginationInfo = document.getElementById('usersPaginationInfo'); + const paginationNav = document.getElementById('usersPagination'); + + if (paginationInfo) { + const start = (pagination.page - 1) * pagination.per_page + 1; + const end = Math.min(pagination.page * pagination.per_page, pagination.total_items); + paginationInfo.textContent = `Showing ${start}-${end} of ${pagination.total_items} users`; + } + + if (paginationNav) { + let paginationHtml = ''; + + // Previous button + paginationHtml += ` +
  • + + + +
  • + `; + + // Page numbers + const startPage = Math.max(1, pagination.page - 2); + const endPage = Math.min(pagination.total_pages, pagination.page + 2); + + if (startPage > 1) { + paginationHtml += ` +
  • + 1 +
  • + `; + if (startPage > 2) { + paginationHtml += '
  • ...
  • '; + } + } + + for (let i = startPage; i <= endPage; i++) { + paginationHtml += ` +
  • + ${i} +
  • + `; + } + + if (endPage < pagination.total_pages) { + if (endPage < pagination.total_pages - 1) { + paginationHtml += '
  • ...
  • '; + } + paginationHtml += ` +
  • + ${pagination.total_pages} +
  • + `; + } + + // Next button + paginationHtml += ` +
  • + + + +
  • + `; + + paginationNav.innerHTML = paginationHtml; + } + } + + goToPage(page) { + this.currentPage = page; + this.loadUsers(); + } + + handleSearchChange() { + this.searchTerm = document.getElementById('userSearchInput')?.value || ''; + this.currentPage = 1; + this.loadUsers(); + } + + handleFilterChange() { + this.accessFilter = document.getElementById('accessFilterSelect')?.value || 'all'; + this.currentPage = 1; + this.loadUsers(); + } + + handleSelectAll(e) { + const checkboxes = document.querySelectorAll('.user-checkbox'); + checkboxes.forEach(checkbox => { + checkbox.checked = e.target.checked; + if (e.target.checked) { + this.selectedUsers.add(checkbox.value); + } else { + this.selectedUsers.delete(checkbox.value); + } + }); + this.updateBulkActionButton(); + } + + handleUserSelection(e) { + if (e.target.checked) { + this.selectedUsers.add(e.target.value); + } else { + this.selectedUsers.delete(e.target.value); + } + + // Update select all checkbox + const allCheckboxes = document.querySelectorAll('.user-checkbox'); + const checkedCheckboxes = document.querySelectorAll('.user-checkbox:checked'); + const selectAllCheckbox = document.getElementById('selectAllUsers'); + + if (selectAllCheckbox) { + if (checkedCheckboxes.length === 0) { + selectAllCheckbox.indeterminate = false; + selectAllCheckbox.checked = false; + } else if (checkedCheckboxes.length === allCheckboxes.length) { + selectAllCheckbox.indeterminate = false; + selectAllCheckbox.checked = true; + } else { + selectAllCheckbox.indeterminate = true; + } + } + + this.updateBulkActionButton(); + } + + updateBulkActionButton() { + const bulkActionBtn = document.getElementById('bulkActionBtn'); + if (bulkActionBtn) { + bulkActionBtn.disabled = this.selectedUsers.size === 0; + } + + const selectedCount = document.getElementById('selectedUserCount'); + if (selectedCount) { + selectedCount.textContent = this.selectedUsers.size; + } + } + + updatePublicWorkspaceBulkActionButton() { + const bulkActionBtn = document.getElementById('publicWorkspaceBulkActionBtn'); + if (bulkActionBtn) { + bulkActionBtn.disabled = this.selectedPublicWorkspaces.size === 0; + } + + const selectedCount = document.getElementById('selectedPublicWorkspaceCount'); + if (selectedCount) { + selectedCount.textContent = this.selectedPublicWorkspaces.size; + } + } + + updateGroupBulkActionButton() { + const bulkActionBtn = document.getElementById('groupBulkActionBtn'); + if (bulkActionBtn) { + bulkActionBtn.disabled = this.selectedGroups.size === 0; + } + + const selectedCount = document.getElementById('selectedGroupCount'); + if (selectedCount) { + selectedCount.textContent = this.selectedGroups.size; + } + } + + async showUserModal(userId) { + try { + // Find user data in current page + const userCheckbox = document.querySelector(`input[value="${userId}"]`); + if (!userCheckbox) return; + + const userRow = userCheckbox.closest('tr'); + const cells = userRow.querySelectorAll('td'); + + // Extract user info from table row + const nameCell = cells[1]; + const userName = nameCell.querySelector('.fw-semibold')?.textContent || 'Unknown User'; + const userEmail = nameCell.querySelectorAll('.text-muted')[0]?.textContent || ''; + + // Extract document count from cell 6 (Document Metrics column) + const docMetricsCell = cells[6]; + const totalDocsText = docMetricsCell?.querySelector('div > div:first-child')?.textContent || ''; + const docCount = totalDocsText.match(/Total Docs:\s*(\d+)/)?.[1] || '0'; + + // Extract last login from cell 4 (Login Activity column) + const loginActivityCell = cells[4]; + const lastLoginText = loginActivityCell?.querySelector('div > div:first-child')?.textContent || ''; + const lastLogin = lastLoginText.replace('Last Login:', '').trim() || 'None'; + + // Populate modal + document.getElementById('modalUserName').textContent = userName; + document.getElementById('modalUserEmail').textContent = userEmail; + document.getElementById('modalUserDocuments').textContent = `${docCount} docs`; + document.getElementById('modalUserLastActivity').textContent = lastLogin; + + // Set current user + this.currentUser = { id: userId, name: userName, email: userEmail }; + + // Reset form + document.getElementById('accessStatusSelect').value = 'allow'; + document.getElementById('fileUploadStatusSelect').value = 'allow'; + document.getElementById('accessDateTime').value = ''; + document.getElementById('fileUploadDateTime').value = ''; + this.toggleAccessDateTime(); + this.toggleFileUploadDateTime(); + + // Show modal + const modal = new bootstrap.Modal(document.getElementById('userManagementModal')); + modal.show(); + + } catch (error) { + this.showError('Failed to load user details'); + } + } + + toggleAccessDateTime() { + const select = document.getElementById('accessStatusSelect'); + const group = document.getElementById('accessDateTimeGroup'); + if (select && group) { + group.style.display = select.value === 'deny_until' ? 'block' : 'none'; + } + } + + toggleFileUploadDateTime() { + const select = document.getElementById('fileUploadStatusSelect'); + const group = document.getElementById('fileUploadDateTimeGroup'); + if (select && group) { + group.style.display = select.value === 'deny_until' ? 'block' : 'none'; + } + } + + deleteUserDocuments() { + if (!this.currentUser) { + this.showError('No user selected'); + return; + } + + // Clear previous reason and show confirmation modal + document.getElementById('deleteUserDocumentsReason').value = ''; + const deleteModal = new bootstrap.Modal(document.getElementById('deleteUserDocumentsModal')); + deleteModal.show(); + } + + async confirmDeleteUserDocuments() { + if (!this.currentUser) { + this.showError('No user selected'); + return; + } + + const reason = document.getElementById('deleteUserDocumentsReason').value.trim(); + const confirmBtn = document.getElementById('confirmDeleteUserDocumentsBtn'); + + if (!reason) { + this.showError('Please provide a reason for deleting this user\'s documents'); + return; + } + + // Disable button during request + confirmBtn.disabled = true; + confirmBtn.innerHTML = 'Submitting...'; + + try { + const response = await fetch(`/api/admin/control-center/users/${this.currentUser.id}/delete-documents`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ reason }) + }); + + const data = await response.json(); + + if (!response.ok) { + throw new Error(data.error || 'Failed to create document deletion request'); + } + + // Close both modals + bootstrap.Modal.getInstance(document.getElementById('deleteUserDocumentsModal')).hide(); + bootstrap.Modal.getInstance(document.getElementById('userManagementModal')).hide(); + + this.showSuccess('Document deletion request created successfully. It requires approval from another admin.'); + + // Refresh user list + this.loadUsers(); + + } catch (error) { + this.showError(error.message); + } finally { + confirmBtn.disabled = false; + confirmBtn.innerHTML = 'Submit Request'; + } + } + + async saveUserChanges() { + if (!this.currentUser) return; + + this.showLoading(true); + + try { + const accessStatus = document.getElementById('accessStatusSelect').value; + const fileUploadStatus = document.getElementById('fileUploadStatusSelect').value; + + // Update access control + let accessDateTime = null; + if (accessStatus === 'deny_until') { + const dateTimeInput = document.getElementById('accessDateTime').value; + if (dateTimeInput) { + accessDateTime = new Date(dateTimeInput).toISOString(); + } + } + + const accessResponse = await fetch(`/api/admin/control-center/users/${this.currentUser.id}/access`, { + method: 'PATCH', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + status: accessStatus === 'deny_until' ? 'deny' : accessStatus, + datetime_to_allow: accessDateTime + }) + }); + + if (!accessResponse.ok) { + const errorData = await accessResponse.json(); + throw new Error(errorData.error || 'Failed to update access'); + } + + // Update file upload permissions + let fileUploadDateTime = null; + if (fileUploadStatus === 'deny_until') { + const dateTimeInput = document.getElementById('fileUploadDateTime').value; + if (dateTimeInput) { + fileUploadDateTime = new Date(dateTimeInput).toISOString(); + } + } + + const fileUploadResponse = await fetch(`/api/admin/control-center/users/${this.currentUser.id}/file-uploads`, { + method: 'PATCH', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + status: fileUploadStatus === 'deny_until' ? 'deny' : fileUploadStatus, + datetime_to_allow: fileUploadDateTime + }) + }); + + if (!fileUploadResponse.ok) { + const errorData = await fileUploadResponse.json(); + throw new Error(errorData.error || 'Failed to update file upload permissions'); + } + + // Close modal and refresh + bootstrap.Modal.getInstance(document.getElementById('userManagementModal')).hide(); + this.showSuccess('User settings updated successfully'); + this.loadUsers(); + + } catch (error) { + this.showError(error.message); + } finally { + this.showLoading(false); + } + } + + showBulkActionModal() { + if (this.selectedUsers.size === 0) return; + + // Reset form + document.getElementById('bulkActionType').value = ''; + document.getElementById('bulkStatusSelect').value = 'allow'; + document.getElementById('bulkDateTime').value = ''; + this.toggleBulkActionSettings(); + this.toggleBulkDateTime(); + + const modal = new bootstrap.Modal(document.getElementById('bulkActionModal')); + modal.show(); + } + + toggleBulkActionSettings() { + const actionType = document.getElementById('bulkActionType').value; + const settingsGroup = document.getElementById('bulkActionSettings'); + if (settingsGroup) { + settingsGroup.style.display = actionType ? 'block' : 'none'; + } + } + + toggleBulkDateTime() { + const select = document.getElementById('bulkStatusSelect'); + const group = document.getElementById('bulkDateTimeGroup'); + if (select && group) { + group.style.display = select.value === 'deny_until' ? 'block' : 'none'; + } + } + + async executeBulkAction() { + const actionType = document.getElementById('bulkActionType').value; + const status = document.getElementById('bulkStatusSelect').value; + + if (!actionType || this.selectedUsers.size === 0) return; + + this.showLoading(true); + + try { + let datetimeToAllow = null; + if (status === 'deny_until') { + const dateTimeInput = document.getElementById('bulkDateTime').value; + if (dateTimeInput) { + datetimeToAllow = new Date(dateTimeInput).toISOString(); + } + } + + const response = await fetch('/api/admin/control-center/users/bulk-action', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + user_ids: Array.from(this.selectedUsers), + action_type: actionType, + settings: { + status: status === 'deny_until' ? 'deny' : status, + datetime_to_allow: datetimeToAllow + } + }) + }); + + const data = await response.json(); + + if (response.ok) { + bootstrap.Modal.getInstance(document.getElementById('bulkActionModal')).hide(); + this.showSuccess(data.message); + this.selectedUsers.clear(); + this.loadUsers(); + } else { + throw new Error(data.error || 'Bulk action failed'); + } + + } catch (error) { + this.showError(error.message); + } finally { + this.showLoading(false); + } + } + + switchToUsersTab() { + const usersTab = document.getElementById('users-tab'); + if (usersTab) { + usersTab.click(); + } + } + + async refreshStats() { + // Reload the page to refresh statistics + window.location.reload(); + } + + async exportUsersToCSV() { + try { + // Show loading state + const exportBtn = document.getElementById('exportUsersBtn'); + const originalText = exportBtn.innerHTML; + exportBtn.disabled = true; + exportBtn.innerHTML = 'Exporting...'; + + // Get all users data (not just current page) + const response = await fetch('/api/admin/control-center/users?all=true'); + if (!response.ok) { + throw new Error(`Failed to fetch users: ${response.status}`); + } + + const data = await response.json(); + if (!data.success) { + throw new Error(data.message || 'Failed to fetch users'); + } + + // Convert users data to CSV + const csvContent = this.convertUsersToCSV(data.users); + + // Create and download CSV file + const blob = new Blob([csvContent], { type: 'text/csv;charset=utf-8;' }); + const link = document.createElement('a'); + const url = URL.createObjectURL(blob); + link.setAttribute('href', url); + + // Generate filename with current date + const now = new Date(); + const dateStr = now.toISOString().split('T')[0]; // YYYY-MM-DD format + link.setAttribute('download', `users_export_${dateStr}.csv`); + + // Trigger download + link.style.visibility = 'hidden'; + document.body.appendChild(link); + link.click(); + document.body.removeChild(link); + + this.showSuccess(`Successfully exported ${data.users.length} users to CSV`); + + } catch (error) { + console.error('Export error:', error); + this.showError(`Export failed: ${error.message}`); + } finally { + // Restore button state + const exportBtn = document.getElementById('exportUsersBtn'); + exportBtn.disabled = false; + exportBtn.innerHTML = 'Export'; + } + } + + convertUsersToCSV(users) { + if (!users || users.length === 0) { + return 'No users to export'; + } + + // Define CSV headers + const headers = [ + 'Name', + 'Email', + 'Access Status', + 'File Upload Status', + 'Last Login', + 'Total Logins', + 'Total Conversations', + 'Total Messages', + 'Total Documents', + 'AI Search Size (MB)' + ]; + + // Add enhanced citations column if enabled + const enhancedCitationsEnabled = (typeof appSettings !== 'undefined' && appSettings.enable_enhanced_citations) || false; + if (enhancedCitationsEnabled) { + headers.push('Storage Account Size (MB)'); + } + + // Convert users to CSV rows + const csvRows = [headers.join(',')]; + + users.forEach(user => { + const activity = user.activity || {}; + const loginMetrics = activity.login_metrics || {}; + const chatMetrics = activity.chat_metrics || {}; + const docMetrics = activity.document_metrics || {}; + + const row = [ + this.escapeCSVField(user.display_name || ''), + this.escapeCSVField(user.email || user.mail || ''), + this.escapeCSVField(user.access_status || ''), + this.escapeCSVField(user.file_upload_status || ''), + this.escapeCSVField(loginMetrics.last_login || 'Never'), + loginMetrics.total_logins || 0, + chatMetrics.total_conversations || 0, + chatMetrics.total_messages || 0, + docMetrics.total_documents || 0, + this.formatBytesForCSV(docMetrics.ai_search_size || 0) + ]; + + // Add storage account size if enhanced citations is enabled + if (enhancedCitationsEnabled) { + row.push(this.formatBytesForCSV(docMetrics.storage_account_size || 0)); + } + + csvRows.push(row.join(',')); + }); + + return csvRows.join('\n'); + } + + escapeCSVField(field) { + if (field === null || field === undefined) { + return ''; + } + + const stringField = String(field); + + // If field contains comma, quote, or newline, wrap in quotes and escape quotes + if (stringField.includes(',') || stringField.includes('"') || stringField.includes('\n')) { + return '"' + stringField.replace(/"/g, '""') + '"'; + } + + return stringField; + } + + formatBytesForCSV(bytes) { + if (!bytes || bytes === 0) return '0'; + + // Convert to MB and round to 2 decimal places + const mb = bytes / (1024 * 1024); + return Math.round(mb * 100) / 100; + } + + convertGroupsToCSV(groups) { + if (!groups || groups.length === 0) { + return 'No groups to export'; + } + + // Define CSV headers + const headers = [ + 'Group Name', + 'Description', + 'Owner Name', + 'Owner Email', + 'Member Count', + 'Status', + 'Total Documents', + 'AI Search Size (MB)', + 'Storage Account Size (MB)', + 'Group ID' + ]; + + // Convert groups to CSV rows + const csvRows = [headers.join(',')]; + + groups.forEach(group => { + const activity = group.activity || {}; + const docMetrics = activity.document_metrics || {}; + const ownerName = group.owner?.displayName || group.owner?.display_name || 'Unknown'; + const ownerEmail = group.owner?.email || ''; + + const row = [ + this.escapeCSVField(group.name || ''), + this.escapeCSVField(group.description || ''), + this.escapeCSVField(ownerName), + this.escapeCSVField(ownerEmail), + group.member_count || 0, + 'Active', + docMetrics.total_documents || 0, + this.formatBytesForCSV(docMetrics.ai_search_size || 0), + this.formatBytesForCSV(docMetrics.storage_account_size || 0), + this.escapeCSVField(group.id || '') + ]; + + csvRows.push(row.join(',')); + }); + + return csvRows.join('\n'); + } + + convertPublicWorkspacesToCSV(workspaces) { + if (!workspaces || workspaces.length === 0) { + return 'No public workspaces to export'; + } + + // Define CSV headers + const headers = [ + 'Workspace Name', + 'Description', + 'Owner Name', + 'Owner Email', + 'Member Count', + 'Status', + 'Total Documents', + 'AI Search Size (MB)', + 'Storage Account Size (MB)', + 'Workspace ID' + ]; + + // Convert workspaces to CSV rows + const csvRows = [headers.join(',')]; + + workspaces.forEach(workspace => { + const activity = workspace.activity || {}; + const docMetrics = activity.document_metrics || {}; + const ownerName = workspace.owner?.displayName || workspace.owner?.display_name || workspace.owner_name || 'Unknown'; + const ownerEmail = workspace.owner?.email || workspace.owner_email || ''; + + const row = [ + this.escapeCSVField(workspace.name || ''), + this.escapeCSVField(workspace.description || ''), + this.escapeCSVField(ownerName), + this.escapeCSVField(ownerEmail), + workspace.member_count || 0, + 'Active', + docMetrics.total_documents || 0, + this.formatBytesForCSV(docMetrics.ai_search_size || 0), + this.formatBytesForCSV(docMetrics.storage_account_size || 0), + this.escapeCSVField(workspace.id || '') + ]; + + csvRows.push(row.join(',')); + }); + + return csvRows.join('\n'); + } + + showLoading(show) { + const overlay = document.getElementById('loadingOverlay'); + if (overlay) { + overlay.classList.toggle('d-none', !show); + } + } + + showSuccess(message) { + this.showToast(message, 'success'); + } + + showError(message) { + this.showToast(message, 'danger'); + } + + showToast(message, type = 'info') { + // Create toast HTML + const toastHtml = ` + + `; + + // Get or create toast container + let toastContainer = document.getElementById('toastContainer'); + if (!toastContainer) { + toastContainer = document.createElement('div'); + toastContainer.id = 'toastContainer'; + toastContainer.className = 'toast-container position-fixed top-0 end-0 p-3'; + toastContainer.style.zIndex = '11000'; + document.body.appendChild(toastContainer); + } + + // Add toast to container + toastContainer.insertAdjacentHTML('beforeend', toastHtml); + + // Show toast + const toastElement = toastContainer.lastElementChild; + const toast = new bootstrap.Toast(toastElement); + toast.show(); + + // Remove toast element after it's hidden + toastElement.addEventListener('hidden.bs.toast', () => { + toastElement.remove(); + }); + } + + escapeHtml(text) { + const div = document.createElement('div'); + div.textContent = text; + return div.innerHTML; + } + + formatDate(dateString) { + if (!dateString) return 'Never'; + try { + return new Date(dateString).toLocaleDateString(); + } catch { + return 'Invalid date'; + } + } + + formatBytes(bytes) { + if (bytes === 0) return '0 B'; + const k = 1024; + const sizes = ['B', 'KB', 'MB', 'GB']; + const i = Math.floor(Math.log(bytes) / Math.log(k)); + return parseFloat((bytes / Math.pow(k, i)).toFixed(1)) + ' ' + sizes[i]; + } + + // Activity Trends Methods + async loadActivityTrends() { + try { + if (appSettings?.enable_debug_logging) { + console.log('🔍 [Frontend Debug] Loading activity trends for', this.currentTrendDays, 'days'); + } + + // Build API URL with custom date range if specified + let apiUrl = `/api/admin/control-center/activity-trends?days=${this.currentTrendDays}`; + if (this.customStartDate && this.customEndDate) { + apiUrl += `&start_date=${this.customStartDate}&end_date=${this.customEndDate}`; + } + + const response = await fetch(apiUrl); + if (appSettings?.enable_debug_logging) { + console.log('🔍 [Frontend Debug] API response status:', response.status); + } + + const data = await response.json(); + if (appSettings?.enable_debug_logging) { + console.log('🔍 [Frontend Debug] API response data:', data); + } + + if (response.ok) { + if (appSettings?.enable_debug_logging) { + console.log('🔍 [Frontend Debug] Activity data received:', data.activity_data); + } + // Render all four charts + this.renderLoginsChart(data.activity_data); + this.renderChatsChart(data.activity_data); + this.renderDocumentsChart(data.activity_data); // Now renders both personal and group + this.renderTokensChart(data.activity_data); + // Ensure main loading overlay is hidden after all charts are created + this.showLoading(false); + } else { + console.error('❌ [Frontend Debug] API error:', data.error); + this.showAllChartsError(); + // Ensure main loading overlay is hidden on API error + this.showLoading(false); + } + } catch (error) { + console.error('❌ [Frontend Debug] Exception loading activity trends:', error); + this.showAllChartsError(); + // Ensure main loading overlay is hidden on error + this.showLoading(false); + } + } + + renderLoginsChart(activityData) { + if (appSettings?.enable_debug_logging) { + console.log('🔍 [Frontend Debug] Rendering logins chart with data:', activityData.logins); + } + this.renderSingleChart('loginsChart', 'logins', activityData.logins, { + label: 'Logins', + backgroundColor: 'rgba(255, 193, 7, 0.2)', + borderColor: '#ffc107' + }); + } + + renderChatsChart(activityData) { + if (appSettings?.enable_debug_logging) { + console.log('🔍 [Frontend Debug] Rendering chats chart with data:', activityData); + } + + // Check if Chart.js is available + if (typeof Chart === 'undefined') { + console.error(`❌ [Frontend Debug] Chart.js is not loaded. Cannot render chats chart.`); + this.showChartError('chatsChart', 'chats'); + return; + } + + const canvas = document.getElementById('chatsChart'); + if (!canvas) { + console.error(`❌ [Frontend Debug] Chart canvas element chatsChart not found`); + return; + } + + const ctx = canvas.getContext('2d'); + if (!ctx) { + console.error(`❌ [Frontend Debug] Could not get 2D context from chatsChart canvas`); + return; + } + + // Show canvas + canvas.style.display = 'block'; + + // Destroy existing chart if it exists + if (this.chatsChart) { + this.chatsChart.destroy(); + } + + // Get data for created and deleted chats + const createdData = activityData.chats_created || {}; + const deletedData = activityData.chats_deleted || {}; + const allDates = [...new Set([...Object.keys(createdData), ...Object.keys(deletedData)])].sort(); + + const labels = allDates.map(date => { + const dateObj = parseDateKey(date); + return dateObj + ? dateObj.toLocaleDateString('en-US', { month: 'short', day: 'numeric' }) + : date; + }); + + const createdValues = allDates.map(date => createdData[date] || 0); + const deletedValues = allDates.map(date => deletedData[date] || 0); + + const datasets = [ + { + label: 'New Chats', + data: createdValues, + borderColor: '#0d6efd', + backgroundColor: 'rgba(13, 110, 253, 0.1)', + borderWidth: 2, + fill: true, + tension: 0.4, + type: 'line' + }, + { + label: 'Deleted Chats', + data: deletedValues, + backgroundColor: 'rgba(220, 53, 69, 0.7)', + borderColor: '#dc3545', + borderWidth: 1, + type: 'bar' + } + ]; + + this.chatsChart = new Chart(ctx, { + type: 'bar', + data: { + labels: labels, + datasets: datasets + }, + options: { + responsive: true, + maintainAspectRatio: false, + plugins: { + legend: { + display: true, + position: 'top', + labels: { + usePointStyle: true, + padding: 15 + } + } + }, + scales: { + x: { + display: true, + grid: { display: false } + }, + y: { + display: true, + beginAtZero: true, + grid: { color: 'rgba(0, 0, 0, 0.1)' }, + ticks: { precision: 0 } + } + } + } + }); + } + + renderDocumentsChart(activityData) { + if (appSettings?.enable_debug_logging) { + console.log('🔍 [Frontend Debug] Rendering documents chart with creation/deletion data'); + console.log('🔍 [Frontend Debug] Personal created:', activityData.personal_documents_created); + console.log('🔍 [Frontend Debug] Personal deleted:', activityData.personal_documents_deleted); + console.log('🔍 [Frontend Debug] Group created:', activityData.group_documents_created); + console.log('🔍 [Frontend Debug] Group deleted:', activityData.group_documents_deleted); + console.log('🔍 [Frontend Debug] Public created:', activityData.public_documents_created); + console.log('🔍 [Frontend Debug] Public deleted:', activityData.public_documents_deleted); + } + + // Render combined chart with creations (lines) and deletions (bars) + this.renderCombinedDocumentsChart('documentsChart', { + personal_created: activityData.personal_documents_created || {}, + personal_deleted: activityData.personal_documents_deleted || {}, + group_created: activityData.group_documents_created || {}, + group_deleted: activityData.group_documents_deleted || {}, + public_created: activityData.public_documents_created || {}, + public_deleted: activityData.public_documents_deleted || {} + }); + } + + renderCombinedDocumentsChart(canvasId, documentsData) { + // Check if Chart.js is available + if (typeof Chart === 'undefined') { + console.error(`❌ [Frontend Debug] Chart.js is not loaded. Cannot render documents chart.`); + this.showChartError(canvasId, 'documents'); + return; + } + + const canvas = document.getElementById(canvasId); + if (!canvas) { + console.error(`❌ [Frontend Debug] Chart canvas element ${canvasId} not found`); + return; + } + + const ctx = canvas.getContext('2d'); + if (!ctx) { + console.error(`❌ [Frontend Debug] Could not get 2D context from ${canvasId} canvas`); + return; + } + + console.log(`✅ [Frontend Debug] Chart.js loaded, ${canvasId} canvas found, context ready`); + + // Show canvas + canvas.style.display = 'block'; + console.log(`🔍 [Frontend Debug] ${canvasId} canvas displayed`); + + // Destroy existing chart if it exists + if (this.documentsChart) { + console.log(`🔍 [Frontend Debug] Destroying existing documents chart`); + this.documentsChart.destroy(); + } + + // Prepare data for Chart.js - get all unique dates and sort them + const allDates = [...new Set([ + ...Object.keys(documentsData.personal_created || {}), + ...Object.keys(documentsData.personal_deleted || {}), + ...Object.keys(documentsData.group_created || {}), + ...Object.keys(documentsData.group_deleted || {}), + ...Object.keys(documentsData.public_created || {}), + ...Object.keys(documentsData.public_deleted || {}) + ])].sort(); + + console.log(`🔍 [Frontend Debug] Documents date range:`, allDates); + + const labels = allDates.map(date => { + const dateObj = parseDateKey(date); + return dateObj + ? dateObj.toLocaleDateString('en-US', { month: 'short', day: 'numeric' }) + : date; + }); + + // Prepare datasets - lines for creations, bars for deletions + const personalCreated = allDates.map(date => (documentsData.personal_created || {})[date] || 0); + const personalDeleted = allDates.map(date => (documentsData.personal_deleted || {})[date] || 0); + const groupCreated = allDates.map(date => (documentsData.group_created || {})[date] || 0); + const groupDeleted = allDates.map(date => (documentsData.group_deleted || {})[date] || 0); + const publicCreated = allDates.map(date => (documentsData.public_created || {})[date] || 0); + const publicDeleted = allDates.map(date => (documentsData.public_deleted || {})[date] || 0); + + console.log(`🔍 [Frontend Debug] Personal created:`, personalCreated); + console.log(`🔍 [Frontend Debug] Personal deleted:`, personalDeleted); + console.log(`🔍 [Frontend Debug] Group created:`, groupCreated); + console.log(`🔍 [Frontend Debug] Group deleted:`, groupDeleted); + console.log(`🔍 [Frontend Debug] Public created:`, publicCreated); + console.log(`🔍 [Frontend Debug] Public deleted:`, publicDeleted); + + const datasets = [ + // Lines for new documents + { + label: 'Personal (New)', + data: personalCreated, + borderColor: '#90EE90', + backgroundColor: 'rgba(144, 238, 144, 0.1)', + borderWidth: 2, + fill: true, + tension: 0.4, + type: 'line' + }, + { + label: 'Group (New)', + data: groupCreated, + borderColor: '#228B22', + backgroundColor: 'rgba(34, 139, 34, 0.1)', + borderWidth: 2, + fill: true, + tension: 0.4, + type: 'line' + }, + { + label: 'Public (New)', + data: publicCreated, + borderColor: '#006400', + backgroundColor: 'rgba(0, 100, 0, 0.1)', + borderWidth: 2, + fill: true, + tension: 0.4, + type: 'line' + }, + // Bars for deleted documents + { + label: 'Personal (Deleted)', + data: personalDeleted, + backgroundColor: 'rgba(255, 182, 193, 0.7)', + borderColor: '#FFB6C1', + borderWidth: 1, + type: 'bar' + }, + { + label: 'Group (Deleted)', + data: groupDeleted, + backgroundColor: 'rgba(220, 53, 69, 0.7)', + borderColor: '#dc3545', + borderWidth: 1, + type: 'bar' + }, + { + label: 'Public (Deleted)', + data: publicDeleted, + backgroundColor: 'rgba(139, 0, 0, 0.7)', + borderColor: '#8B0000', + borderWidth: 1, + type: 'bar' + } + ]; + + console.log(`🔍 [Frontend Debug] Documents datasets prepared:`, datasets); + + // Create new chart + try { + this.documentsChart = new Chart(ctx, { + type: 'bar', + data: { + labels: labels, + datasets: datasets + }, + options: { + responsive: true, + maintainAspectRatio: false, + plugins: { + legend: { + display: true, + position: 'top', + labels: { + usePointStyle: true, + padding: 10, + font: { size: 10 } + } + }, + tooltip: { + mode: 'index', + intersect: false, + callbacks: { + title: function(context) { + const dataIndex = context[0].dataIndex; + const dateStr = allDates[dataIndex]; + const date = parseDateKey(dateStr); + if (!date) { + return dateStr; + } + return date.toLocaleDateString('en-US', { + weekday: 'long', + year: 'numeric', + month: 'long', + day: 'numeric' + }); + } + } + } + }, + scales: { + x: { + display: true, + grid: { + display: false + } + }, + y: { + display: true, + beginAtZero: true, + grid: { + color: 'rgba(0, 0, 0, 0.1)' + }, + ticks: { + precision: 0 + } + } + }, + interaction: { + intersect: false, + mode: 'index' + } + } + }); + + console.log(`✅ [Frontend Debug] Documents chart created successfully`); + + } catch (error) { + console.error(`❌ [Frontend Debug] Error creating documents chart:`, error); + this.showChartError(canvasId, 'documents'); + } + } + + renderTokensChart(activityData) { + if (appSettings?.enable_debug_logging) { + console.log('🔍 [Frontend Debug] Rendering tokens chart with data:', activityData.tokens); + } + + // Render combined chart with embedding, chat, and web search tokens + this.renderCombinedTokensChart('tokensChart', activityData.tokens || {}); + } + + renderCombinedTokensChart(canvasId, tokensData) { + // Check if Chart.js is available + if (typeof Chart === 'undefined') { + console.error(`❌ [Frontend Debug] Chart.js is not loaded. Cannot render tokens chart.`); + this.showChartError(canvasId, 'tokens'); + return; + } + + const canvas = document.getElementById(canvasId); + if (!canvas) { + console.error(`❌ [Frontend Debug] Chart canvas element ${canvasId} not found`); + return; + } + + const ctx = canvas.getContext('2d'); + if (!ctx) { + console.error(`❌ [Frontend Debug] Could not get 2D context from ${canvasId} canvas`); + return; + } + + // Show canvas + canvas.style.display = 'block'; + + // Destroy existing chart if it exists + if (this.tokensChart) { + if (appSettings?.enable_debug_logging) { + console.log('🔍 [Frontend Debug] Destroying existing tokens chart'); + } + this.tokensChart.destroy(); + } + + // Prepare data from tokens object (format: { "YYYY-MM-DD": { "embedding": count, "chat": count, "web_search": count } }) + const allDates = Object.keys(tokensData).sort(); + if (appSettings?.enable_debug_logging) { + console.log('🔍 [Frontend Debug] Token dates:', allDates); + } + + // Format labels for display + const labels = allDates.map(dateStr => { + const date = parseDateKey(dateStr); + return date + ? date.toLocaleDateString('en-US', { month: 'short', day: 'numeric' }) + : dateStr; + }); + + // Extract embedding, chat, and web search token counts + const embeddingTokens = allDates.map(date => tokensData[date]?.embedding || 0); + const chatTokens = allDates.map(date => tokensData[date]?.chat || 0); + const webSearchTokens = allDates.map(date => tokensData[date]?.web_search || 0); + + if (appSettings?.enable_debug_logging) { + console.log('🔍 [Frontend Debug] Embedding tokens:', embeddingTokens); + console.log('🔍 [Frontend Debug] Chat tokens:', chatTokens); + console.log('🔍 [Frontend Debug] Web search tokens:', webSearchTokens); + } + + // Create datasets + const datasets = [ + { + label: 'Embedding Tokens', + data: embeddingTokens, + backgroundColor: 'rgba(111, 66, 193, 0.2)', + borderColor: '#6f42c1', + borderWidth: 2, + fill: false, + tension: 0.4, + pointRadius: 3, + pointHoverRadius: 5, + pointBackgroundColor: '#6f42c1' + }, + { + label: 'Chat Tokens', + data: chatTokens, + backgroundColor: 'rgba(13, 202, 240, 0.2)', + borderColor: '#0dcaf0', + borderWidth: 2, + fill: false, + tension: 0.4, + pointRadius: 3, + pointHoverRadius: 5, + pointBackgroundColor: '#0dcaf0' + }, + { + label: 'Web Search Tokens', + data: webSearchTokens, + backgroundColor: 'rgba(32, 201, 151, 0.2)', + borderColor: '#20c997', + borderWidth: 2, + fill: false, + tension: 0.4, + pointRadius: 3, + pointHoverRadius: 5, + pointBackgroundColor: '#20c997' + } + ]; + + console.log(`🔍 [Frontend Debug] Token datasets prepared:`, datasets); + + // Create new chart + try { + this.tokensChart = new Chart(ctx, { + type: 'line', + data: { + labels: labels, + datasets: datasets + }, + options: { + responsive: true, + maintainAspectRatio: false, + plugins: { + legend: { + display: true, + position: 'top', + labels: { + usePointStyle: true, + padding: 15 + } + }, + tooltip: { + mode: 'index', + intersect: false, + callbacks: { + title: function(context) { + const dataIndex = context[0].dataIndex; + const dateStr = allDates[dataIndex]; + const date = parseDateKey(dateStr); + if (!date) { + return dateStr; + } + return date.toLocaleDateString('en-US', { + weekday: 'long', + year: 'numeric', + month: 'long', + day: 'numeric' + }); + }, + label: function(context) { + let label = context.dataset.label || ''; + if (label) { + label += ': '; + } + label += context.parsed.y.toLocaleString() + ' tokens'; + return label; + } + } + } + }, + scales: { + x: { + display: true, + grid: { + display: false + } + }, + y: { + display: true, + beginAtZero: true, + grid: { + color: 'rgba(0, 0, 0, 0.1)' + }, + ticks: { + precision: 0, + callback: function(value) { + return value.toLocaleString(); + } + } + } + }, + interaction: { + intersect: false, + mode: 'index' + } + } + }); + + console.log(`✅ [Frontend Debug] Tokens chart created successfully`); + + } catch (error) { + console.error(`❌ [Frontend Debug] Error creating tokens chart:`, error); + this.showChartError(canvasId, 'tokens'); + } + } + + renderSingleChart(canvasId, chartType, chartData, chartConfig) { + // Check if Chart.js is available + if (typeof Chart === 'undefined') { + console.error(`❌ [Frontend Debug] Chart.js is not loaded. Cannot render ${chartType} chart.`); + this.showChartError(canvasId, chartType); + return; + } + + const canvas = document.getElementById(canvasId); + if (!canvas) { + console.error(`❌ [Frontend Debug] Chart canvas element ${canvasId} not found`); + return; + } + + const ctx = canvas.getContext('2d'); + if (!ctx) { + console.error(`❌ [Frontend Debug] Could not get 2D context from ${canvasId} canvas`); + return; + } + + console.log(`✅ [Frontend Debug] Chart.js loaded, ${canvasId} canvas found, context ready`); + + // Show canvas + canvas.style.display = 'block'; + console.log(`🔍 [Frontend Debug] ${canvasId} canvas displayed`); + + // Destroy existing chart if it exists + const chartProperty = chartType + 'Chart'; + if (this[chartProperty]) { + console.log(`🔍 [Frontend Debug] Destroying existing ${chartType} chart`); + this[chartProperty].destroy(); + } + + // Prepare data for Chart.js - convert object format to arrays + console.log(`🔍 [Frontend Debug] Processing ${chartType} data structure...`); + + // Get dates and sort them + const dates = Object.keys(chartData || {}).sort(); + console.log(`🔍 [Frontend Debug] ${chartType} date range:`, dates); + + const labels = dates.map(date => { + const dateObj = parseDateKey(date); + return dateObj + ? dateObj.toLocaleDateString('en-US', { month: 'short', day: 'numeric' }) + : date; + }); + + const data = dates.map(date => chartData[date] || 0); + + console.log(`🔍 [Frontend Debug] ${chartType} chart labels:`, labels); + console.log(`🔍 [Frontend Debug] ${chartType} chart data:`, data); + + const dataset = { + label: chartConfig.label, + data: data, + backgroundColor: chartConfig.backgroundColor, + borderColor: chartConfig.borderColor, + borderWidth: 2, + fill: false, + tension: 0.1 + }; + + console.log(`🔍 [Frontend Debug] ${chartType} dataset prepared:`, dataset); + + // Create new chart + try { + this[chartProperty] = new Chart(ctx, { + type: 'line', + data: { + labels: labels, + datasets: [dataset] + }, + options: { + responsive: true, + maintainAspectRatio: false, + plugins: { + legend: { + display: false // Charts have headers instead + }, + tooltip: { + mode: 'index', + intersect: false, + callbacks: { + title: function(context) { + const dataIndex = context[0].dataIndex; + const dateStr = dates[dataIndex]; + const date = parseDateKey(dateStr); + if (!date) { + return dateStr; + } + return date.toLocaleDateString('en-US', { + weekday: 'long', + year: 'numeric', + month: 'long', + day: 'numeric' + }); + } + } + } + }, + scales: { + x: { + display: true, + grid: { + display: false + } + }, + y: { + display: true, + beginAtZero: true, + grid: { + color: 'rgba(0, 0, 0, 0.1)' + }, + ticks: { + precision: 0 + } + } + }, + interaction: { + intersect: false, + mode: 'index' + }, + elements: { + point: { + radius: 4, + hoverRadius: 6 + } + } + } + }); + + console.log(`✅ [Frontend Debug] ${chartType} chart created successfully`); + + } catch (error) { + console.error(`❌ [Frontend Debug] Error creating ${chartType} chart:`, error); + this.showChartError(canvasId, chartType); + } + } + + changeTrendPeriod(days) { + // Update button active states + document.querySelectorAll('[id^="trend-"]').forEach(btn => { + btn.classList.remove('active'); + }); + document.getElementById(`trend-${days}days`).classList.add('active'); + + // Clear custom date range when switching to preset periods + this.customStartDate = null; + this.customEndDate = null; + + // Collapse custom date range if it's open + const customDateRange = document.getElementById('customDateRange'); + if (customDateRange && customDateRange.classList.contains('show')) { + const collapse = new bootstrap.Collapse(customDateRange, {toggle: false}); + collapse.hide(); + } + + // Update current period and reload data + this.currentTrendDays = days; + this.loadActivityTrends(); + } + + toggleCustomDateRange() { + // Update button active states + document.querySelectorAll('[id^="trend-"]').forEach(btn => { + btn.classList.remove('active'); + }); + document.getElementById('trend-custom').classList.add('active'); + + // Set default dates (last 30 days) + const endDate = new Date(); + const startDate = new Date(); + startDate.setDate(startDate.getDate() - 29); + + document.getElementById('startDate').value = startDate.toISOString().split('T')[0]; + document.getElementById('endDate').value = endDate.toISOString().split('T')[0]; + } + + applyCustomDateRange() { + const startDate = document.getElementById('startDate').value; + const endDate = document.getElementById('endDate').value; + + if (!startDate || !endDate) { + showToast('Please select both start and end dates.', 'warning'); + return; + } + + if (new Date(startDate) > new Date(endDate)) { + showToast('Start date must be before end date.', 'warning'); + return; + } + + // Calculate days difference for API call + const start = new Date(startDate); + const end = new Date(endDate); + const diffTime = Math.abs(end - start); + const diffDays = Math.ceil(diffTime / (1000 * 60 * 60 * 24)) + 1; + + this.currentTrendDays = diffDays; + this.customStartDate = startDate; + this.customEndDate = endDate; + + this.loadActivityTrends(); + } + + toggleExportCustomDateRange() { + const customRadio = document.getElementById('exportCustom'); + const customDateRange = document.getElementById('exportCustomDateRange'); + + if (customRadio.checked) { + customDateRange.style.display = 'block'; + // Set default dates + const endDate = new Date(); + const startDate = new Date(); + startDate.setDate(startDate.getDate() - 29); + + document.getElementById('exportStartDate').value = startDate.toISOString().split('T')[0]; + document.getElementById('exportEndDate').value = endDate.toISOString().split('T')[0]; + } else { + customDateRange.style.display = 'none'; + } + } + + async exportActivityTrends() { + try { + // Get selected charts + const selectedCharts = []; + if (document.getElementById('exportLogins').checked) selectedCharts.push('logins'); + if (document.getElementById('exportChats').checked) selectedCharts.push('chats'); + if (document.getElementById('exportPersonalDocuments').checked) selectedCharts.push('personal_documents'); + if (document.getElementById('exportGroupDocuments').checked) selectedCharts.push('group_documents'); + if (document.getElementById('exportPublicDocuments').checked) selectedCharts.push('public_documents'); + if (document.getElementById('exportTokens').checked) selectedCharts.push('tokens'); + + if (selectedCharts.length === 0) { + showToast('Please select at least one chart to export.', 'warning'); + return; + } + + // Get selected time window + const timeWindowRadio = document.querySelector('input[name="exportTimeWindow"]:checked'); + const timeWindow = timeWindowRadio.value; + + let exportData = { + charts: selectedCharts, + time_window: timeWindow + }; + + // Add custom dates if selected + if (timeWindow === 'custom') { + const startDate = document.getElementById('exportStartDate').value; + const endDate = document.getElementById('exportEndDate').value; + + if (!startDate || !endDate) { + showToast('Please select both start and end dates for custom range.', 'warning'); + return; + } + + if (new Date(startDate) > new Date(endDate)) { + showToast('Start date must be before end date.', 'warning'); + return; + } + + exportData.start_date = startDate; + exportData.end_date = endDate; + } + + // Show loading state + const exportBtn = document.getElementById('executeExportBtn'); + const originalText = exportBtn.innerHTML; + exportBtn.innerHTML = 'Exporting...'; + exportBtn.disabled = true; + + // Make API call + const response = await fetch('/api/admin/control-center/activity-trends/export', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(exportData) + }); + + if (response.ok) { + // Create download link + const blob = await response.blob(); + const url = window.URL.createObjectURL(blob); + const a = document.createElement('a'); + a.style.display = 'none'; + a.href = url; + + // Get filename from response headers or generate one + const contentDisposition = response.headers.get('Content-Disposition'); + const filename = contentDisposition + ? contentDisposition.split('filename=')[1].replace(/"/g, '') + : 'activity_trends_export.csv'; + + a.download = filename; + document.body.appendChild(a); + a.click(); + window.URL.revokeObjectURL(url); + document.body.removeChild(a); + + // Close modal + const modal = bootstrap.Modal.getInstance(document.getElementById('exportModal')); + modal.hide(); + + // Show success message + this.showAlert('success', 'Activity trends exported successfully!'); + + } else { + const errorData = await response.json(); + throw new Error(errorData.error || 'Export failed'); + } + + } catch (error) { + console.error('Export error:', error); + this.showAlert('danger', `Export failed: ${error.message}`); + } finally { + // Reset button state + const exportBtn = document.getElementById('executeExportBtn'); + exportBtn.innerHTML = 'Export CSV'; + exportBtn.disabled = false; + } + } + + // Activity Logs Methods + async loadActivityLogs() { + console.log('=== loadActivityLogs CALLED ==='); + console.log('this:', this); + console.log('State:', { + activityLogsPage: this.activityLogsPage, + activityLogsPerPage: this.activityLogsPerPage, + activityLogsSearch: this.activityLogsSearch, + activityTypeFilter: this.activityTypeFilter + }); + + try { + const params = new URLSearchParams({ + page: this.activityLogsPage, + per_page: this.activityLogsPerPage, + search: this.activityLogsSearch, + activity_type_filter: this.activityTypeFilter + }); + + const url = `/api/admin/control-center/activity-logs?${params}`; + console.log('Fetching from:', url); + + const response = await fetch(url); + console.log('Response received:', response.status); + + if (!response.ok) { + throw new Error('Failed to load activity logs'); + } + + const data = await response.json(); + console.log('Activity logs loaded:', data); + + this.renderActivityLogs(data.logs, data.user_map); + this.renderActivityLogsPagination(data.pagination); + + } catch (error) { + console.error('Error loading activity logs:', error); + this.showActivityLogsError('Failed to load activity logs. Please try again.'); + } + } + + renderActivityLogs(logs, userMap) { + // Store logs for modal access + this.currentActivityLogs = logs; + + const tbody = document.getElementById('activityLogsTableBody'); + if (!tbody) return; + + if (!logs || logs.length === 0) { + tbody.innerHTML = ` + + +
    No activity logs found
    + + + `; + return; + } + + tbody.innerHTML = logs.map(log => { + // Handle user identification - some activities may not have user_id (system activities) + let userName = 'System'; + if (log.user_id) { + const user = userMap[log.user_id] || {}; + userName = user.display_name || user.email || log.user_id || 'Unknown User'; + } else if (log.admin_email) { + userName = log.admin_email; + } else if (log.requester_email) { + userName = log.requester_email; + } else if (log.added_by_email) { + userName = log.added_by_email; + } + + const timestamp = new Date(log.timestamp).toLocaleString(); + const activityType = this.formatActivityType(log.activity_type); + const details = this.formatActivityDetails(log); + const workspaceType = log.workspace_type || 'N/A'; + + const logIndex = logs.indexOf(log); + return ` + + ${timestamp} + ${activityType} + ${this.escapeHtml(userName)} + ${details} + ${this.capitalizeFirst(workspaceType)} + + `; + }).join(''); + } + + formatActivityType(activityType) { + const typeMap = { + 'user_login': 'User Login', + 'conversation_creation': 'Conversation Created', + 'conversation_deletion': 'Conversation Deleted', + 'conversation_archival': 'Conversation Archived', + 'document_creation': 'Document Created', + 'document_deletion': 'Document Deleted', + 'document_metadata_update': 'Document Metadata Updated', + 'token_usage': 'Token Usage', + 'group_status_change': 'Group Status Change', + 'group_member_deleted': 'Group Member Deleted', + 'add_member_directly': 'Add Member Directly', + 'admin_take_ownership_approved': 'Admin Take Ownership (Approved)', + 'delete_group_approved': 'Delete Group (Approved)', + 'delete_all_documents_approved': 'Delete All Documents (Approved)' + }; + return typeMap[activityType] || activityType.replace(/_/g, ' ').replace(/\b\w/g, l => l.toUpperCase()); + } + + formatActivityDetails(log) { + const activityType = log.activity_type; + + switch (activityType) { + case 'user_login': + return `Login method: ${log.login_method || log.details?.login_method || 'N/A'}`; + + case 'conversation_creation': + const convTitle = log.conversation?.title || 'Untitled'; + const convId = log.conversation?.conversation_id || 'N/A'; + return `Title: ${this.escapeHtml(convTitle)}
    ID: ${convId}`; + + case 'conversation_deletion': + const delTitle = log.conversation?.title || 'Untitled'; + const delId = log.conversation?.conversation_id || 'N/A'; + return `Deleted: ${this.escapeHtml(delTitle)}
    ID: ${delId}`; + + case 'conversation_archival': + const archTitle = log.conversation?.title || 'Untitled'; + const archId = log.conversation?.conversation_id || 'N/A'; + return `Archived: ${this.escapeHtml(archTitle)}
    ID: ${archId}`; + + case 'document_creation': + const fileName = log.document?.file_name || 'Unknown'; + const fileType = log.document?.file_type || ''; + return `File: ${this.escapeHtml(fileName)}
    Type: ${fileType}`; + + case 'document_deletion': + const delFileName = log.document?.file_name || 'Unknown'; + const delFileType = log.document?.file_type || ''; + return `Deleted: ${this.escapeHtml(delFileName)}
    Type: ${delFileType}`; + + case 'document_metadata_update': + const updatedFileName = log.document?.file_name || 'Unknown'; + const updatedFields = Object.keys(log.updated_fields || {}).join(', ') || 'N/A'; + return `File: ${this.escapeHtml(updatedFileName)}
    Updated: ${updatedFields}`; + + case 'token_usage': + const tokenType = log.token_type || 'unknown'; + const totalTokens = log.usage?.total_tokens || 0; + const model = log.usage?.model || 'N/A'; + return `Type: ${tokenType}
    Tokens: ${totalTokens.toLocaleString()}
    Model: ${model}`; + + case 'group_status_change': + const groupName = log.group?.group_name || 'Unknown Group'; + const oldStatus = log.status_change?.old_status || 'N/A'; + const newStatus = log.status_change?.new_status || 'N/A'; + return `Group: ${this.escapeHtml(groupName)}
    Status: ${oldStatus} → ${newStatus}`; + + case 'group_member_deleted': + const memberName = log.removed_member?.name || log.removed_member?.email || 'Unknown'; + const memberGroupName = log.group?.group_name || 'Unknown Group'; + return `Removed: ${this.escapeHtml(memberName)}
    From: ${this.escapeHtml(memberGroupName)}`; + + case 'add_member_directly': + const addedMemberName = log.member_name || log.member_email || 'Unknown'; + const addedToGroup = log.group_name || 'Unknown Group'; + const memberRole = log.member_role || 'user'; + return `Added: ${this.escapeHtml(addedMemberName)}
    To: ${this.escapeHtml(addedToGroup)} (${memberRole})`; + + case 'admin_take_ownership_approved': + const ownershipGroup = log.group_name || 'Unknown Group'; + const oldOwner = log.old_owner_email || 'Unknown'; + const newOwner = log.new_owner_email || 'Unknown'; + const approver = log.approver_email || 'N/A'; + return `Group: ${this.escapeHtml(ownershipGroup)}
    Old Owner: ${this.escapeHtml(oldOwner)}
    New Owner: ${this.escapeHtml(newOwner)}
    Approved by: ${this.escapeHtml(approver)}`; + + case 'delete_group_approved': + const deletedGroup = log.group_name || 'Unknown Group'; + const requester = log.requester_email || 'Unknown'; + const delApprover = log.approver_email || 'N/A'; + return `Group: ${this.escapeHtml(deletedGroup)}
    Requested by: ${this.escapeHtml(requester)}
    Approved by: ${this.escapeHtml(delApprover)}`; + + case 'delete_all_documents_approved': + const docsGroup = log.group_name || 'Unknown Group'; + const docsDeleted = log.documents_deleted !== undefined ? log.documents_deleted : 'N/A'; + const docsRequester = log.requester_email || 'Unknown'; + const docsApprover = log.approver_email || 'N/A'; + return `Group: ${this.escapeHtml(docsGroup)}
    Documents Deleted: ${docsDeleted}
    Requested by: ${this.escapeHtml(docsRequester)}
    Approved by: ${this.escapeHtml(docsApprover)}`; + + case 'public_workspace_status_change': + const workspaceName = log.public_workspace?.workspace_name || log.workspace_context?.public_workspace_name || log.public_workspace_name || 'Unknown Workspace'; + const wsOldStatus = log.status_change?.old_status || 'N/A'; + const wsNewStatus = log.status_change?.new_status || 'N/A'; + return `Workspace: ${this.escapeHtml(workspaceName)}
    Status: ${wsOldStatus} → ${wsNewStatus}`; + + case 'admin_take_workspace_ownership_approved': + const wsOwnershipName = log.workspace_name || log.public_workspace_name || 'Unknown Workspace'; + const wsOldOwner = log.old_owner_email || 'Unknown'; + const wsNewOwner = log.new_owner_email || 'Unknown'; + const wsApprover = log.approver_email || 'N/A'; + return `Workspace: ${this.escapeHtml(wsOwnershipName)}
    Old Owner: ${this.escapeHtml(wsOldOwner)}
    New Owner: ${this.escapeHtml(wsNewOwner)}
    Approved by: ${this.escapeHtml(wsApprover)}`; + + case 'transfer_workspace_ownership_approved': + const wsTransferName = log.workspace_name || log.public_workspace_name || 'Unknown Workspace'; + const wsTransferOldOwner = log.old_owner_email || 'Unknown'; + const wsTransferNewOwner = log.new_owner_email || 'Unknown'; + const wsTransferApprover = log.approver_email || 'N/A'; + return `Workspace: ${this.escapeHtml(wsTransferName)}
    Old Owner: ${this.escapeHtml(wsTransferOldOwner)}
    New Owner: ${this.escapeHtml(wsTransferNewOwner)}
    Approved by: ${this.escapeHtml(wsTransferApprover)}`; + + case 'transfer_ownership_approved': + const transferGroup = log.group_name || 'Unknown Group'; + const transferOldOwner = log.old_owner_email || 'Unknown'; + const transferNewOwner = log.new_owner_email || 'Unknown'; + const transferApprover = log.approver_email || 'N/A'; + return `Group: ${this.escapeHtml(transferGroup)}
    Old Owner: ${this.escapeHtml(transferOldOwner)}
    New Owner: ${this.escapeHtml(transferNewOwner)}
    Approved by: ${this.escapeHtml(transferApprover)}`; + + case 'add_workspace_member_directly': + const wsAddedMemberName = log.member_name || log.member_email || 'Unknown'; + const wsAddedTo = log.workspace_name || log.public_workspace_name || 'Unknown Workspace'; + const wsMemberRole = log.member_role || 'user'; + return `Added: ${this.escapeHtml(wsAddedMemberName)}
    To: ${this.escapeHtml(wsAddedTo)} (${wsMemberRole})`; + + case 'delete_workspace_documents_approved': + const wsDocsName = log.workspace_name || log.public_workspace_name || 'Unknown Workspace'; + const wsDocsDeleted = log.documents_deleted !== undefined ? log.documents_deleted : 'N/A'; + const wsDocsRequester = log.requester_email || 'Unknown'; + const wsDocsApprover = log.approver_email || 'N/A'; + return `Workspace: ${this.escapeHtml(wsDocsName)}
    Documents Deleted: ${wsDocsDeleted}
    Requested by: ${this.escapeHtml(wsDocsRequester)}
    Approved by: ${this.escapeHtml(wsDocsApprover)}`; + + case 'delete_workspace_approved': + const deletedWorkspace = log.workspace_name || log.public_workspace_name || 'Unknown Workspace'; + const wsDelRequester = log.requester_email || 'Unknown'; + const wsDelApprover = log.approver_email || 'N/A'; + return `Workspace: ${this.escapeHtml(deletedWorkspace)}
    Requested by: ${this.escapeHtml(wsDelRequester)}
    Approved by: ${this.escapeHtml(wsDelApprover)}`; + + default: + return 'N/A'; + } + } + + renderActivityLogsPagination(pagination) { + const paginationInfo = document.getElementById('activityLogsPaginationInfo'); + const paginationNav = document.getElementById('activityLogsPagination'); + + if (paginationInfo) { + const start = (pagination.page - 1) * pagination.per_page + 1; + const end = Math.min(pagination.page * pagination.per_page, pagination.total_items); + paginationInfo.textContent = `Showing ${start}-${end} of ${pagination.total_items} logs`; + } + + if (paginationNav) { + let paginationHtml = ''; + + // Previous button + paginationHtml += ` +
  • + + + +
  • + `; + + // Page numbers + const startPage = Math.max(1, pagination.page - 2); + const endPage = Math.min(pagination.total_pages, pagination.page + 2); + + if (startPage > 1) { + paginationHtml += ` +
  • + 1 +
  • + `; + if (startPage > 2) { + paginationHtml += '
  • ...
  • '; + } + } + + for (let i = startPage; i <= endPage; i++) { + paginationHtml += ` +
  • + ${i} +
  • + `; + } + + if (endPage < pagination.total_pages) { + if (endPage < pagination.total_pages - 1) { + paginationHtml += '
  • ...
  • '; + } + paginationHtml += ` +
  • + ${pagination.total_pages} +
  • + `; + } + + // Next button + paginationHtml += ` +
  • + + + +
  • + `; + + paginationNav.innerHTML = paginationHtml; + } + } + + goToActivityLogsPage(page) { + this.activityLogsPage = page; + this.loadActivityLogs(); + } + + handleActivityLogsSearchChange() { + const searchInput = document.getElementById('activityLogsSearchInput'); + this.activityLogsSearch = searchInput ? searchInput.value : ''; + this.activityLogsPage = 1; + this.loadActivityLogs(); + } + + handleActivityLogsFilterChange() { + const filterSelect = document.getElementById('activityTypeFilterSelect'); + this.activityTypeFilter = filterSelect ? filterSelect.value : 'all'; + this.activityLogsPage = 1; + this.loadActivityLogs(); + } + + handleActivityLogsPerPageChange(event) { + this.activityLogsPerPage = parseInt(event.target.value); + this.activityLogsPage = 1; + this.loadActivityLogs(); + } + + async exportActivityLogsToCSV() { + try { + // Get current filtered data + const params = new URLSearchParams({ + page: 1, + per_page: 10000, // Get all for export + search: this.activityLogsSearch, + activity_type_filter: this.activityTypeFilter + }); + + const response = await fetch(`/api/admin/control-center/activity-logs?${params}`); + + if (!response.ok) { + throw new Error('Failed to load activity logs for export'); + } + + const data = await response.json(); + + // Convert to CSV + const headers = ['Timestamp', 'Activity Type', 'User ID', 'User Email', 'User Name', 'Details', 'Workspace Type']; + const csvRows = [headers.join(',')]; + + data.logs.forEach(log => { + const user = data.user_map[log.user_id] || {}; + const timestamp = new Date(log.timestamp).toISOString(); + const activityType = log.activity_type; + const userId = log.user_id; + const userEmail = user.email || ''; + const userName = user.display_name || ''; + const details = this.getActivityDetailsForCSV(log); + const workspaceType = log.workspace_type || ''; + + const row = [ + timestamp, + activityType, + userId, + userEmail, + userName, + details, + workspaceType + ].map(field => `"${String(field).replace(/"/g, '""')}"`); + + csvRows.push(row.join(',')); + }); + + // Download CSV + const csvContent = csvRows.join('\n'); + const blob = new Blob([csvContent], { type: 'text/csv;charset=utf-8;' }); + const link = document.createElement('a'); + const url = URL.createObjectURL(blob); + link.setAttribute('href', url); + link.setAttribute('download', `activity_logs_${new Date().toISOString().split('T')[0]}.csv`); + link.style.visibility = 'hidden'; + document.body.appendChild(link); + link.click(); + document.body.removeChild(link); + + } catch (error) { + console.error('Error exporting activity logs:', error); + showToast('Failed to export activity logs. Please try again.', 'danger'); + } + } + + getActivityDetailsForCSV(log) { + const activityType = log.activity_type; + + switch (activityType) { + case 'user_login': + return `Login method: ${log.login_method || log.details?.login_method || 'N/A'}`; + + case 'conversation_creation': + return `Title: ${log.conversation?.title || 'Untitled'}, ID: ${log.conversation?.conversation_id || 'N/A'}`; + + case 'document_creation': + return `File: ${log.document?.file_name || 'Unknown'}, Type: ${log.document?.file_type || ''}`; + + case 'token_usage': + return `Type: ${log.token_type || 'unknown'}, Tokens: ${log.usage?.total_tokens || 0}, Model: ${log.usage?.model || 'N/A'}`; + + case 'conversation_deletion': + return `Deleted: ${log.conversation?.title || 'Untitled'}, ID: ${log.conversation?.conversation_id || 'N/A'}`; + + case 'conversation_archival': + return `Archived: ${log.conversation?.title || 'Untitled'}, ID: ${log.conversation?.conversation_id || 'N/A'}`; + + default: + return 'N/A'; + } + } + + showActivityLogsError(message) { + const tbody = document.getElementById('activityLogsTableBody'); + if (tbody) { + tbody.innerHTML = ` + + +
    + ${message} +
    + + + `; + } + } + + capitalizeFirst(str) { + if (!str) return ''; + return str.charAt(0).toUpperCase() + str.slice(1); + } + + showRawLogModal(logIndex) { + if (!this.currentActivityLogs || !this.currentActivityLogs[logIndex]) { + showToast('Log data not available', 'warning'); + return; + } + + const log = this.currentActivityLogs[logIndex]; + const modalBody = document.getElementById('rawLogModalBody'); + const modalTitle = document.getElementById('rawLogModalTitle'); + + if (!modalBody || !modalTitle) { + showToast('Modal elements not found', 'danger'); + return; + } + + // Set title + const activityType = this.formatActivityType(log.activity_type); + const timestamp = new Date(log.timestamp).toLocaleString(); + modalTitle.textContent = `${activityType} - ${timestamp}`; + + // Display JSON with pretty formatting + modalBody.innerHTML = `
    ${this.escapeHtml(JSON.stringify(log, null, 2))}
    `; + + // Show modal + const modal = new bootstrap.Modal(document.getElementById('rawLogModal')); + modal.show(); + } + + copyRawLogToClipboard() { + const rawLogText = document.getElementById('rawLogModalBody')?.textContent; + if (!rawLogText) { + showToast('No log data to copy', 'warning'); + return; + } + + navigator.clipboard.writeText(rawLogText).then(() => { + this.showToast('Log data copied to clipboard', 'success'); + }).catch(err => { + console.error('Failed to copy:', err); + showToast('Failed to copy to clipboard', 'danger'); + }); + } + + escapeHtml(text) { + // Handle undefined, null, or non-string values + if (text === undefined || text === null) { + return ''; + } + // Convert to string if not already + text = String(text); + + const map = { + '&': '&', + '<': '<', + '>': '>', + '"': '"', + "'": ''' + }; + return text.replace(/[&<>"']/g, m => map[m]); + } + + toggleChatCustomDateRange() { + const customRadio = document.getElementById('chatCustom'); + const customDateRange = document.getElementById('chatCustomDateRange'); + + if (customRadio.checked) { + customDateRange.style.display = 'block'; + // Set default dates + const endDate = new Date(); + const startDate = new Date(); + startDate.setDate(startDate.getDate() - 29); + + document.getElementById('chatStartDate').value = startDate.toISOString().split('T')[0]; + document.getElementById('chatEndDate').value = endDate.toISOString().split('T')[0]; + } else { + customDateRange.style.display = 'none'; + } + } + + async chatActivityTrends() { + try { + // Get selected charts + const selectedCharts = []; + if (document.getElementById('chatLogins').checked) selectedCharts.push('logins'); + if (document.getElementById('chatChats').checked) selectedCharts.push('chats'); + if (document.getElementById('chatDocuments').checked) selectedCharts.push('documents'); + + if (selectedCharts.length === 0) { + showToast('Please select at least one chart to include in the chat.', 'warning'); + return; + } + + // Get selected time window + const timeWindowRadio = document.querySelector('input[name="chatTimeWindow"]:checked'); + const timeWindow = timeWindowRadio.value; + + let chatData = { + charts: selectedCharts, + time_window: timeWindow + }; + + // Add custom dates if selected + if (timeWindow === 'custom') { + const startDate = document.getElementById('chatStartDate').value; + const endDate = document.getElementById('chatEndDate').value; + + if (!startDate || !endDate) { + showToast('Please select both start and end dates for custom range.', 'warning'); + return; + } + + if (new Date(startDate) > new Date(endDate)) { + showToast('Start date must be before end date.', 'warning'); + return; + } + + chatData.start_date = startDate; + chatData.end_date = endDate; + } + + // Show loading state + const chatBtn = document.getElementById('executeChatBtn'); + const originalText = chatBtn.innerHTML; + chatBtn.innerHTML = 'Creating Chat...'; + chatBtn.disabled = true; + + // Make API call + const response = await fetch('/api/admin/control-center/activity-trends/chat', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(chatData) + }); + + const result = await response.json(); + + if (response.ok && result.success) { + // Close modal + const modal = bootstrap.Modal.getInstance(document.getElementById('chatModal')); + modal.hide(); + + // Show success message + this.showAlert('success', 'Chat conversation created successfully! Redirecting...'); + + // Redirect to the new conversation + setTimeout(() => { + window.location.href = result.redirect_url; + }, 1500); + + } else { + throw new Error(result.error || 'Failed to create chat conversation'); + } + + } catch (error) { + console.error('Chat creation error:', error); + this.showAlert('danger', `Failed to create chat: ${error.message}`); + } finally { + // Reset button state + const chatBtn = document.getElementById('executeChatBtn'); + chatBtn.innerHTML = 'Start Chat'; + chatBtn.disabled = false; + } + } + + destroyAllCharts() { + // Destroy all chart instances + if (this.loginsChart) { + this.loginsChart.destroy(); + this.loginsChart = null; + } + if (this.chatsChart) { + this.chatsChart.destroy(); + this.chatsChart = null; + } + if (this.documentsChart) { + this.documentsChart.destroy(); + this.documentsChart = null; + } + if (this.tokensChart) { + this.tokensChart.destroy(); + this.tokensChart = null; + } + if (this.personalDocumentsChart) { + this.personalDocumentsChart.destroy(); + this.personalDocumentsChart = null; + } + if (this.groupDocumentsChart) { + this.groupDocumentsChart.destroy(); + this.groupDocumentsChart = null; + } + if (appSettings?.enable_debug_logging) { + console.log('🔍 [Frontend Debug] All charts destroyed'); + } + } + + showAllChartsError() { + // Show error for all four charts + this.showChartError('loginsChart', 'logins'); + this.showChartError('chatsChart', 'chats'); + this.showChartError('documentsChart', 'documents'); + this.showChartError('tokensChart', 'tokens'); + + // Ensure main loading overlay is hidden when showing error + this.showLoading(false); + if (appSettings?.enable_debug_logging) { + console.log('🔍 [Frontend Debug] Main loading overlay hidden after all charts error'); + } + } + + showChartError(canvasId, chartType) { + const canvas = document.getElementById(canvasId); + const chartProperty = chartType + 'Chart'; + + if (canvas) { + // Hide canvas + canvas.style.display = 'none'; + + // Destroy existing chart if it exists + if (this[chartProperty]) { + this[chartProperty].destroy(); + this[chartProperty] = null; + } + + // Find the chart container (parent of canvas) + const chartContainer = canvas.parentElement; + if (chartContainer) { + chartContainer.innerHTML = ` + +
    + +

    Unable to load ${chartType}

    + +
    + `; + } + } + } + + async loadGroups() { + console.log('🔄 ControlCenter.loadGroups() called - using direct API approach like loadUsers()'); + + const tbody = document.getElementById('groupsTableBody'); + if (!tbody) { + console.warn('⚠️ Groups table body not found'); + return; + } + + // Show loading state like users do + tbody.innerHTML = ` + + +
    + Loading... +
    +
    Loading groups...
    + + + `; + + try { + // Get current filter values like users do + const searchTerm = document.getElementById('groupSearchInput')?.value || ''; + const statusFilter = document.getElementById('groupStatusFilterSelect')?.value || 'all'; + + // Build API URL with filters - same pattern as loadUsers + // Use cached metrics by default (force_refresh=false) to get pre-calculated data + const params = new URLSearchParams({ + page: 1, + per_page: 100, + search: searchTerm, + status_filter: statusFilter, + force_refresh: 'false' // Use cached metrics for performance + }); + + console.log('📡 Fetching groups from API:', `/api/admin/control-center/groups?${params}`); + + const response = await fetch(`/api/admin/control-center/groups?${params}`); + + if (!response.ok) { + throw new Error(`HTTP ${response.status}: ${response.statusText}`); + } + + const data = await response.json(); + console.log('📊 Groups data received:', { + groupCount: data.groups ? data.groups.length : 0, + sampleGroup: data.groups && data.groups[0] ? { + id: data.groups[0].id, + name: data.groups[0].name, + hasCachedMetrics: !!data.groups[0].activity, + storageSize: data.groups[0].activity?.document_metrics?.storage_account_size + } : null + }); + + // Render groups data directly like users + this.renderGroups(data.groups || []); + + console.log('✅ Groups loaded and rendered successfully'); + + } catch (error) { + console.error('❌ Error loading groups:', error); + + // Show error state like users do + tbody.innerHTML = ` + + + +
    Error loading groups: ${error.message}
    + + + + `; + } + } + + renderGroups(groups) { + const tbody = document.getElementById('groupsTableBody'); + if (!tbody) return; + + console.log('🎨 Rendering', groups.length, 'groups'); + + if (groups.length === 0) { + tbody.innerHTML = ` + + + +
    No groups found
    + + + `; + return; + } + + // Render groups using the same pattern as users + tbody.innerHTML = groups.map(group => this.createGroupRow(group)).join(''); + + // Add event listeners to checkboxes + const checkboxes = tbody.querySelectorAll('.group-checkbox'); + checkboxes.forEach(checkbox => { + checkbox.addEventListener('change', (e) => { + if (e.target.checked) { + this.selectedGroups.add(e.target.value); + } else { + this.selectedGroups.delete(e.target.value); + } + this.updateGroupBulkActionButton(); + }); + }); + + // Update bulk action button state + this.updateGroupBulkActionButton(); + + // Initialize sorting after data is loaded + if (!window.groupTableSorter) { + window.groupTableSorter = new GroupTableSorter('groupsTable'); + } + } + + createGroupRow(group) { + // Format storage size + const storageSize = group.metrics?.document_metrics?.storage_account_size || group.activity?.document_metrics?.storage_account_size || 0; + const storageSizeFormatted = storageSize > 0 ? this.formatBytes(storageSize) : '0 B'; + + // Format AI search size + const aiSearchSize = group.metrics?.document_metrics?.ai_search_size || group.activity?.document_metrics?.ai_search_size || 0; + const aiSearchSizeFormatted = aiSearchSize > 0 ? this.formatBytes(aiSearchSize) : '0 B'; + + // Get document metrics + const totalDocs = group.metrics?.document_metrics?.total_documents || group.activity?.document_metrics?.total_documents || 0; + + // Get group info + const memberCount = group.member_count || (group.users ? group.users.length : 0); + const ownerName = group.owner?.displayName || group.owner?.display_name || 'Unknown'; + const ownerEmail = group.owner?.email || ''; + + // Get status and format badge + const status = group.status || 'active'; + const statusConfig = { + 'active': { class: 'bg-success', text: 'Active' }, + 'locked': { class: 'bg-warning text-dark', text: 'Locked' }, + 'upload_disabled': { class: 'bg-info text-dark', text: 'Upload Disabled' }, + 'inactive': { class: 'bg-secondary', text: 'Inactive' } + }; + const statusInfo = statusConfig[status] || statusConfig['active']; + + return ` + + + + + +
    ${this.escapeHtml(group.name || 'Unnamed Group')}
    +
    ${this.escapeHtml(group.description || 'No description')}
    +
    ID: ${group.id}
    + + +
    ${this.escapeHtml(ownerName)}
    +
    ${this.escapeHtml(ownerEmail)}
    + + +
    ${memberCount} member${memberCount === 1 ? '' : 's'}
    + + + ${statusInfo.text} + + +
    +
    Total Docs: ${totalDocs}
    +
    AI Search: ${aiSearchSizeFormatted}
    +
    Storage: ${storageSizeFormatted}
    + ${storageSize > 0 ? '
    (Enhanced)
    ' : ''} +
    + + + + + + `; + } + + formatBytes(bytes) { + if (bytes === 0) return '0 B'; + const k = 1024; + const sizes = ['B', 'KB', 'MB', 'GB', 'TB']; + const i = Math.floor(Math.log(bytes) / Math.log(k)); + return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i]; + } + + manageGroup(groupId) { + // Call the GroupManager's manageGroup function directly + console.log('ControlCenter.manageGroup() redirecting to GroupManager.manageGroup()'); + if (typeof GroupManager !== 'undefined' && GroupManager.manageGroup) { + GroupManager.manageGroup(groupId); + } else { + console.error('GroupManager not found or manageGroup method not available'); + showToast('Group management functionality is not available', 'danger'); + } + } + + // Public Workspaces Management Methods + async loadPublicWorkspaces() { + console.log('🌐 ControlCenter.loadPublicWorkspaces() called - using same approach as loadGroups()'); + + const tbody = document.getElementById('publicWorkspacesTableBody'); + if (!tbody) { + console.warn('⚠️ Public workspaces table body not found'); + return; + } + + // Show loading state like groups do + tbody.innerHTML = ` + + +
    + Loading... +
    +
    Loading public workspaces...
    + + + `; + + try { + // Get current filter values like groups do + const searchTerm = document.getElementById('publicWorkspaceSearchInput')?.value || ''; + const statusFilter = document.getElementById('publicWorkspaceStatusFilterSelect')?.value || 'all'; + + // Build API URL with filters - same pattern as loadGroups + // Use cached metrics by default (force_refresh=false) to get pre-calculated data + const params = new URLSearchParams({ + page: 1, + per_page: 100, + search: searchTerm, + status_filter: statusFilter, + force_refresh: 'false' // Use cached metrics for performance + }); + + console.log('📡 Fetching public workspaces from API:', `/api/admin/control-center/public-workspaces?${params}`); + + const response = await fetch(`/api/admin/control-center/public-workspaces?${params}`); + + if (!response.ok) { + throw new Error(`HTTP ${response.status}: ${response.statusText}`); + } + + const data = await response.json(); + console.log('📊 Public workspaces data received:', { + workspaceCount: data.workspaces ? data.workspaces.length : 0, + sampleWorkspace: data.workspaces && data.workspaces[0] ? { + id: data.workspaces[0].id, + name: data.workspaces[0].name, + hasCachedMetrics: !!data.workspaces[0].activity, + storageSize: data.workspaces[0].activity?.document_metrics?.storage_account_size + } : null + }); + + // Render workspaces data directly like groups + this.renderPublicWorkspaces(data.workspaces || []); + + console.log('✅ Public workspaces loaded and rendered successfully'); + + } catch (error) { + console.error('❌ Error loading public workspaces:', error); + + // Show error state like groups do + tbody.innerHTML = ` + + + +
    Error loading public workspaces: ${error.message}
    + + + + `; + } + } + + renderPublicWorkspaces(workspaces) { + const tbody = document.getElementById('publicWorkspacesTableBody'); + if (!tbody) return; + + console.log('🎨 Rendering', workspaces.length, 'public workspaces'); + + if (workspaces.length === 0) { + tbody.innerHTML = ` + + + +
    No public workspaces found
    + + + `; + return; + } + + // Render workspaces using the same pattern as groups + tbody.innerHTML = workspaces.map(workspace => this.createPublicWorkspaceRow(workspace)).join(''); + + // Add event listeners to checkboxes + const checkboxes = tbody.querySelectorAll('.public-workspace-checkbox'); + checkboxes.forEach(checkbox => { + checkbox.addEventListener('change', (e) => { + if (e.target.checked) { + this.selectedPublicWorkspaces.add(e.target.value); + } else { + this.selectedPublicWorkspaces.delete(e.target.value); + } + this.updatePublicWorkspaceBulkActionButton(); + }); + }); + + // Update bulk action button state + this.updatePublicWorkspaceBulkActionButton(); + } + + createPublicWorkspaceRow(workspace) { + // Format storage size + const storageSize = workspace.activity?.document_metrics?.storage_account_size || 0; + const storageSizeFormatted = storageSize > 0 ? this.formatBytes(storageSize) : '0 B'; + + // Format AI search size + const aiSearchSize = workspace.activity?.document_metrics?.ai_search_size || 0; + const aiSearchSizeFormatted = aiSearchSize > 0 ? this.formatBytes(aiSearchSize) : '0 B'; + + // Get document metrics + const totalDocs = workspace.activity?.document_metrics?.total_documents || 0; + + // Get workspace info + const memberCount = workspace.member_count || 0; + const ownerName = workspace.owner?.displayName || workspace.owner?.display_name || workspace.owner_name || 'Unknown'; + const ownerEmail = workspace.owner?.email || workspace.owner_email || ''; + + // Get status and format badge + const status = workspace.status || 'active'; + const statusConfig = { + 'active': { class: 'bg-success', text: 'Active' }, + 'locked': { class: 'bg-warning text-dark', text: 'Locked' }, + 'upload_disabled': { class: 'bg-info text-dark', text: 'Upload Disabled' }, + 'inactive': { class: 'bg-secondary', text: 'Inactive' } + }; + const statusInfo = statusConfig[status] || statusConfig['active']; + + return ` + + + + + +
    ${workspace.name || 'Unnamed Workspace'}
    +
    ${workspace.description || 'No description'}
    +
    ID: ${workspace.id}
    + + +
    ${ownerName}
    +
    ${ownerEmail}
    + + +
    ${memberCount} member${memberCount !== 1 ? 's' : ''}
    + + + ${statusInfo.text} + + +
    +
    Total Docs: ${totalDocs}
    +
    AI Search: ${aiSearchSizeFormatted}
    +
    Storage: ${storageSizeFormatted}
    + ${workspace.activity?.document_metrics?.storage_account_size > 0 ? '
    (Enhanced)
    ' : ''} +
    + + + + + + `; + } + + managePublicWorkspace(workspaceId) { + console.log('Managing workspace:', workspaceId); + if (window.WorkspaceManager) { + WorkspaceManager.manageWorkspace(workspaceId); + } else { + showToast('Workspace manager not loaded', 'danger'); + } + } + + searchPublicWorkspaces(searchTerm) { + // Debounce search like groups + clearTimeout(this.publicWorkspaceSearchTimeout); + this.publicWorkspaceSearchTimeout = setTimeout(() => { + this.loadPublicWorkspaces(); + }, 300); + } + + filterPublicWorkspacesByStatus(status) { + // Reload with new filter + this.loadPublicWorkspaces(); + } + + refreshPublicWorkspaces() { + console.log('🌐 Refreshing public workspaces with fresh data...'); + + // Get current search and filter values + const searchInput = document.getElementById('publicWorkspaceSearchInput'); + const statusSelect = document.getElementById('publicWorkspaceStatusFilterSelect'); + + const searchTerm = searchInput ? searchInput.value.trim() : ''; + const statusFilter = statusSelect ? statusSelect.value : 'all'; + + // Build API URL with force_refresh=true + const params = new URLSearchParams({ + page: 1, + per_page: 100, + search: searchTerm, + status_filter: statusFilter, + force_refresh: 'true' // Force fresh calculation + }); + + fetch(`/api/admin/control-center/public-workspaces?${params}`) + .then(response => { + if (!response.ok) { + throw new Error(`HTTP ${response.status}: ${response.statusText}`); + } + return response.json(); + }) + .then(data => { + console.log('🌍 Refreshed public workspaces data received:', data); + this.renderPublicWorkspaces(data.workspaces || []); + + // Show success message + this.showAlert('success', 'Public workspaces refreshed successfully'); + }) + .catch(error => { + console.error('Error refreshing public workspaces:', error); + this.showAlert('danger', `Error refreshing public workspaces: ${error.message}`); + }); + } + + async exportGroupsToCSV() { + try { + // Show loading state + const exportBtn = document.getElementById('exportGroupsBtn'); + const originalText = exportBtn.innerHTML; + exportBtn.disabled = true; + exportBtn.innerHTML = 'Exporting...'; + + // Get all groups data + const response = await fetch('/api/admin/control-center/groups?all=true&force_refresh=false'); + if (!response.ok) { + throw new Error(`Failed to fetch groups: ${response.status}`); + } + + const data = await response.json(); + if (!data.success) { + throw new Error(data.message || 'Failed to fetch groups'); + } + + // Convert groups data to CSV + const csvContent = this.convertGroupsToCSV(data.groups || []); + + // Create and download CSV file + const blob = new Blob([csvContent], { type: 'text/csv;charset=utf-8;' }); + const link = document.createElement('a'); + const url = URL.createObjectURL(blob); + link.setAttribute('href', url); + + // Generate filename with current date + const now = new Date(); + const dateStr = now.toISOString().split('T')[0]; // YYYY-MM-DD format + link.setAttribute('download', `groups_export_${dateStr}.csv`); + + // Trigger download + link.style.visibility = 'hidden'; + document.body.appendChild(link); + link.click(); + document.body.removeChild(link); + + this.showAlert('success', `Successfully exported ${data.groups?.length || 0} groups to CSV`); + + } catch (error) { + console.error('Export error:', error); + this.showAlert('danger', `Export failed: ${error.message}`); + } finally { + // Restore button state + const exportBtn = document.getElementById('exportGroupsBtn'); + if (exportBtn) { + exportBtn.disabled = false; + exportBtn.innerHTML = 'Export'; + } + } + } + + async exportPublicWorkspacesToCSV() { + try { + // Show loading state + const exportBtn = document.getElementById('exportPublicWorkspacesBtn'); + const originalText = exportBtn.innerHTML; + exportBtn.disabled = true; + exportBtn.innerHTML = 'Exporting...'; + + // Get all public workspaces data + const response = await fetch('/api/admin/control-center/public-workspaces?all=true&force_refresh=false'); + if (!response.ok) { + throw new Error(`Failed to fetch public workspaces: ${response.status}`); + } + + const data = await response.json(); + if (!data.success) { + throw new Error(data.message || 'Failed to fetch public workspaces'); + } + + // Convert workspaces data to CSV + const csvContent = this.convertPublicWorkspacesToCSV(data.workspaces || []); + + // Create and download CSV file + const blob = new Blob([csvContent], { type: 'text/csv;charset=utf-8;' }); + const link = document.createElement('a'); + const url = URL.createObjectURL(blob); + link.setAttribute('href', url); + + // Generate filename with current date + const now = new Date(); + const dateStr = now.toISOString().split('T')[0]; // YYYY-MM-DD format + link.setAttribute('download', `public_workspaces_export_${dateStr}.csv`); + + // Trigger download + link.style.visibility = 'hidden'; + document.body.appendChild(link); + link.click(); + document.body.removeChild(link); + + this.showAlert('success', `Successfully exported ${data.workspaces?.length || 0} public workspaces to CSV`); + + } catch (error) { + console.error('Export error:', error); + this.showAlert('danger', `Export failed: ${error.message}`); + } finally { + // Restore button state + const exportBtn = document.getElementById('exportPublicWorkspacesBtn'); + if (exportBtn) { + exportBtn.disabled = false; + exportBtn.innerHTML = 'Export'; + } + } + } + + showAlert(type, message) { + // Create alert element + const alertDiv = document.createElement('div'); + alertDiv.className = `alert alert-${type} alert-dismissible fade show position-fixed`; + alertDiv.style.cssText = 'top: 20px; right: 20px; z-index: 9999; min-width: 300px;'; + alertDiv.innerHTML = ` + ${message} + + `; + + // Add to page + document.body.appendChild(alertDiv); + + // Auto-remove after 5 seconds + setTimeout(() => { + if (alertDiv.parentNode) { + alertDiv.remove(); + } + }, 5000); + } +} + +// Global functions for refresh functionality +async function refreshControlCenterData() { + console.log('Refresh function called'); + + const refreshBtn = document.getElementById('refreshDataBtn'); + const refreshBtnText = document.getElementById('refreshBtnText'); + + console.log('Elements found:', { + refreshBtn: !!refreshBtn, + refreshBtnText: !!refreshBtnText + }); + + // Check if elements exist + if (!refreshBtn || !refreshBtnText) { + console.error('Refresh button elements not found'); + console.log('Available elements:', { + refreshDataBtn: document.getElementById('refreshDataBtn'), + refreshBtnText: document.getElementById('refreshBtnText'), + allButtons: document.querySelectorAll('button'), + elementsWithRefresh: document.querySelectorAll('[id*="refresh"]') + }); + showAlert('Refresh button not found. Please reload the page.', 'danger'); + return; + } + + const originalText = refreshBtnText ? refreshBtnText.textContent : 'Refresh Data'; + const iconElement = refreshBtn ? refreshBtn.querySelector('i') : null; + + try { + // Update button state + refreshBtn.disabled = true; + refreshBtnText.textContent = 'Refreshing...'; + if (iconElement) { + iconElement.className = 'bi bi-arrow-repeat me-1 fa-spin'; + } + + // Call refresh API + const response = await fetch('/api/admin/control-center/refresh', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + } + }); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + const result = await response.json(); + + if (result.success) { + // Show success message with both users and groups + const usersMsg = `${result.refreshed_users} users`; + const groupsMsg = `${result.refreshed_groups || 0} groups`; + showAlert(`Data refreshed successfully! Updated ${usersMsg} and ${groupsMsg}.`, 'success'); + + console.log('🎉 Data refresh completed:', { + refreshed_users: result.refreshed_users, + refreshed_groups: result.refreshed_groups, + timestamp: new Date().toISOString() + }); + + // Update last refresh timestamp + await loadRefreshStatus(); + + console.log('🔄 Starting UI refresh...'); + // Refresh the currently active tab content + await refreshActiveTabContent(); + + console.log('✅ Data refresh and view refresh completed successfully'); + } else { + throw new Error(result.message || 'Failed to refresh data'); + } + + } catch (error) { + console.error('Error refreshing data:', error); + showAlert(`Failed to refresh data: ${error.message}`, 'danger'); + } finally { + // Restore button state + if (refreshBtn) { + refreshBtn.disabled = false; + } + if (refreshBtnText) { + refreshBtnText.textContent = originalText; + } + if (iconElement) { + iconElement.className = 'bi bi-arrow-clockwise me-1'; + } + } +} + +async function loadRefreshStatus() { + // Only admins can see refresh status + if (window.hasControlCenterAdmin !== true) { + return; + } + + try { + const response = await fetch('/api/admin/control-center/refresh-status'); + if (response.ok) { + const result = await response.json(); + const lastRefreshElement = document.getElementById('lastRefreshTime'); + + if (lastRefreshElement) { + if (result.last_refresh_formatted) { + lastRefreshElement.textContent = result.last_refresh_formatted; + if (lastRefreshElement.parentElement) { + lastRefreshElement.parentElement.style.display = ''; + } + } else { + lastRefreshElement.textContent = 'Never'; + if (lastRefreshElement.parentElement) { + lastRefreshElement.parentElement.style.display = ''; + } + } + } else { + console.warn('lastRefreshTime element not found'); + } + } else { + console.error('Failed to load refresh status:', response.status); + } + } catch (error) { + console.error('Error loading refresh status:', error); + const lastRefreshElement = document.getElementById('lastRefreshTime'); + if (lastRefreshElement) { + lastRefreshElement.textContent = 'Error loading'; + } + } + + // Load and display auto-refresh schedule info + try { + const response = await fetch('/api/admin/control-center/refresh-status'); + if (response.ok) { + const result = await response.json(); + const autoRefreshInfoElement = document.getElementById('autoRefreshInfo'); + const autoRefreshStatusElement = document.getElementById('autoRefreshStatus'); + + if (autoRefreshInfoElement && autoRefreshStatusElement) { + if (result.auto_refresh_enabled) { + // Build status text + let statusText = `Auto-refresh: daily at ${result.auto_refresh_hour_formatted || result.auto_refresh_hour + ':00 UTC'}`; + if (result.auto_refresh_next_run_formatted) { + statusText += ` (next: ${result.auto_refresh_next_run_formatted})`; + } + autoRefreshStatusElement.textContent = statusText; + autoRefreshInfoElement.classList.remove('d-none'); + } else { + autoRefreshInfoElement.classList.add('d-none'); + } + } + } + } catch (autoRefreshError) { + console.error('Error loading auto-refresh status:', autoRefreshError); + } +} + +async function refreshActiveTabContent() { + try { + console.log('🔄 Refreshing active tab content...'); + + // Check which tab is currently active + const activeTab = document.querySelector('.nav-link.active'); + const activeTabContent = document.querySelector('.tab-pane.active'); + + console.log('🔍 Tab detection:', { + activeTab: activeTab ? activeTab.id : 'none', + activeTabContent: activeTabContent ? activeTabContent.id : 'none', + allTabs: Array.from(document.querySelectorAll('.nav-link')).map(t => ({id: t.id, active: t.classList.contains('active')})), + windowControlCenter: !!window.controlCenter, + groupManager: typeof GroupManager !== 'undefined' + }); + + if (!activeTab) { + console.log('📝 No active tab found, checking for direct content...'); + // If no tabs (sidebar navigation), refresh both users and groups + if (window.controlCenter && window.controlCenter.loadUsers) { + console.log('👤 Refreshing users in sidebar mode...'); + await window.controlCenter.loadUsers(); + } + + if (window.controlCenter && window.controlCenter.loadGroups) { + console.log('👥 Refreshing groups in sidebar mode...'); + await window.controlCenter.loadGroups(); + } + return; + } + + const tabId = activeTab ? activeTab.id : null; + console.log('🎯 Active tab detected:', tabId); + + // Refresh content based on active tab + switch (tabId) { + case 'dashboard-tab': + console.log('Refreshing dashboard content...'); + // Refresh dashboard stats if available + if (window.controlCenter && window.controlCenter.refreshStats) { + await window.controlCenter.refreshStats(); + } + break; + + case 'users-tab': + console.log('Refreshing users table...'); + // Refresh users table + if (window.controlCenter && window.controlCenter.loadUsers) { + await window.controlCenter.loadUsers(); + } + break; + + case 'groups-tab': + console.log('👥 Refreshing groups content...'); + // Refresh groups using ControlCenter method (same pattern as users) + if (window.controlCenter && window.controlCenter.loadGroups) { + await window.controlCenter.loadGroups(); + } + break; + + case 'workspaces-tab': + console.log('Refreshing workspaces content...'); + // Refresh public workspaces if available + if (window.controlCenter && window.controlCenter.loadPublicWorkspaces) { + await window.controlCenter.loadPublicWorkspaces(); + } + break; + + case 'activity-tab': + console.log('Refreshing activity trends...'); + // Refresh activity trends if available + if (window.controlCenter && window.controlCenter.loadActivityTrends) { + await window.controlCenter.loadActivityTrends(); + } + break; + + default: + console.log('🤔 Unknown or no active tab, attempting to refresh all content...'); + // Default fallback - refresh both users and groups using ControlCenter methods + if (window.controlCenter && window.controlCenter.loadUsers) { + console.log('👤 Fallback: Refreshing users'); + await window.controlCenter.loadUsers(); + } + + if (window.controlCenter && window.controlCenter.loadGroups) { + console.log('👥 Fallback: Refreshing groups'); + await window.controlCenter.loadGroups(); + } + break; + } + + console.log('Active tab content refresh completed'); + + } catch (error) { + console.error('Error refreshing active tab content:', error); + // Don't throw the error to avoid breaking the main refresh flow + } +} + +function showAlert(message, type = 'info') { + // Create alert element + const alertDiv = document.createElement('div'); + alertDiv.className = `alert alert-${type} alert-dismissible fade show position-fixed`; + alertDiv.style.cssText = 'top: 20px; right: 20px; z-index: 9999; min-width: 300px;'; + alertDiv.innerHTML = ` + ${message} + + `; + + // Add to page + document.body.appendChild(alertDiv); + + // Auto-remove after 5 seconds + setTimeout(() => { + if (alertDiv.parentNode) { + alertDiv.remove(); + } + }, 5000); +} + +// Activity Log Migration Functions +async function checkMigrationStatus() { + // Only admins can see migration status + if (window.hasControlCenterAdmin !== true) { + return; + } + + try { + const response = await fetch('/api/admin/control-center/migrate/status'); + if (!response.ok) { + throw new Error('Failed to fetch migration status'); + } + + const data = await response.json(); + + if (data.migration_needed) { + // Update banner with counts + document.getElementById('migrationConversationCount').textContent = data.conversations_without_logs.toLocaleString(); + document.getElementById('migrationDocumentCount').textContent = data.total_documents_without_logs.toLocaleString(); + + // Show the banner + const banner = document.getElementById('migrationBanner'); + if (banner) { + banner.style.display = 'block'; + } + } else { + // Hide banner if no migration needed + const banner = document.getElementById('migrationBanner'); + if (banner) { + banner.style.display = 'none'; + } + } + + return data; + } catch (error) { + console.error('Error checking migration status:', error); + return null; + } +} + +function showMigrationProgress() { + const progressDiv = document.getElementById('migrationProgress'); + const migrateBtn = document.getElementById('migrateBannerBtn'); + + if (progressDiv) { + progressDiv.style.display = 'block'; + } + + if (migrateBtn) { + migrateBtn.disabled = true; + migrateBtn.innerHTML = ' Migrating...'; + } +} + +function hideMigrationProgress() { + const progressDiv = document.getElementById('migrationProgress'); + const migrateBtn = document.getElementById('migrateBannerBtn'); + + if (progressDiv) { + progressDiv.style.display = 'none'; + } + + if (migrateBtn) { + migrateBtn.disabled = false; + migrateBtn.innerHTML = ' Migrate Now'; + } +} + +function updateMigrationProgress(percent, statusText) { + const progressBar = document.getElementById('migrationProgressBar'); + const progressText = document.getElementById('migrationProgressText'); + const statusTextEl = document.getElementById('migrationStatusText'); + + if (progressBar) { + progressBar.style.width = percent + '%'; + progressBar.setAttribute('aria-valuenow', percent); + } + + if (progressText) { + progressText.textContent = percent + '%'; + } + + if (statusTextEl && statusText) { + statusTextEl.textContent = statusText; + } +} + +function hideMigrationBanner() { + const banner = document.getElementById('migrationBanner'); + if (banner) { + banner.style.display = 'none'; + } +} + +async function performMigration() { + // Show confirmation modal + const modal = new bootstrap.Modal(document.getElementById('migrationConfirmModal')); + modal.show(); +} + +async function executeMigration() { + // Close the confirmation modal + const modal = bootstrap.Modal.getInstance(document.getElementById('migrationConfirmModal')); + if (modal) { + modal.hide(); + } + + try { + showMigrationProgress(); + updateMigrationProgress(10, 'Starting migration...'); + + const response = await fetch('/api/admin/control-center/migrate/all', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + } + }); + + updateMigrationProgress(50, 'Processing records...'); + + if (!response.ok) { + throw new Error('Migration request failed'); + } + + const result = await response.json(); + + updateMigrationProgress(90, 'Finalizing...'); + + // Show results + setTimeout(() => { + updateMigrationProgress(100, 'Migration completed!'); + + setTimeout(() => { + hideMigrationProgress(); + hideMigrationBanner(); + + // Show detailed results + const totalMigrated = result.total_migrated || 0; + const totalFailed = result.total_failed || 0; + + let message = `Migration completed! Conversations: ${result.conversations_migrated || 0}, Personal docs: ${result.personal_documents_migrated || 0}, Group docs: ${result.group_documents_migrated || 0}, Public docs: ${result.public_documents_migrated || 0}. Total: ${totalMigrated} records migrated`; + + if (totalFailed > 0) { + message += `. Warning: ${totalFailed} records failed (check logs)`; + } + + showToast(message, 'success'); + + // Refresh activity trends to show new data + if (window.controlCenter) { + window.controlCenter.loadActivityTrends(); + } + }, 1500); + }, 500); + + } catch (error) { + console.error('Migration error:', error); + hideMigrationProgress(); + showToast(`Migration failed: ${error.message}. Check console and server logs for details.`, 'danger'); + } +} + +// Make migration functions globally accessible +window.checkMigrationStatus = checkMigrationStatus; +window.performMigration = performMigration; + +// Make refresh function globally accessible for debugging +window.refreshControlCenterData = refreshControlCenterData; +window.loadRefreshStatus = loadRefreshStatus; +window.refreshActiveTabContent = refreshActiveTabContent; +window.debugControlCenterElements = function() { + console.log('=== Control Center Elements Debug ==='); + console.log('refreshDataBtn:', document.getElementById('refreshDataBtn')); + console.log('refreshBtnText:', document.getElementById('refreshBtnText')); + console.log('lastRefreshTime:', document.getElementById('lastRefreshTime')); + console.log('lastRefreshInfo:', document.getElementById('lastRefreshInfo')); + console.log('All buttons:', Array.from(document.querySelectorAll('button')).map(b => ({id: b.id, text: b.textContent.trim()}))); + console.log('All spans:', Array.from(document.querySelectorAll('span')).map(s => ({id: s.id, text: s.textContent.trim()}))); +}; + +// Initialize Control Center when DOM is loaded +document.addEventListener('DOMContentLoaded', function() { + window.controlCenter = new ControlCenter(); + + // Export GroupTableSorter to window for global access + window.GroupTableSorter = GroupTableSorter; + + // Wire up migration confirmation button + const confirmMigrationBtn = document.getElementById('confirmMigrationBtn'); + if (confirmMigrationBtn) { + confirmMigrationBtn.addEventListener('click', executeMigration); + } + + // Debug: Log element availability + console.log('Control Center Elements Check on DOM Ready:'); + window.debugControlCenterElements(); + + // Only load admin features if user has ControlCenterAdmin role + const hasAdminRole = window.hasControlCenterAdmin === true; + + if (hasAdminRole) { + // Load initial refresh status with a slight delay to ensure elements are rendered + setTimeout(() => { + loadRefreshStatus(); + + // Check migration status + checkMigrationStatus(); + }, 100); + } +}); \ No newline at end of file diff --git a/application/single_app/static/js/group/manage_group.js b/application/single_app/static/js/group/manage_group.js index 3575297e..d6372838 100644 --- a/application/single_app/static/js/group/manage_group.js +++ b/application/single_app/static/js/group/manage_group.js @@ -1,4 +1,5 @@ // manage_group.js +import { showToast } from "../chat/chat-toast.js"; let currentUserRole = null; @@ -58,6 +59,121 @@ $(document).ready(function () { } }); + // Add event delegation for select user button in search results + $(document).on("click", ".select-user-btn", function () { + const id = $(this).data("user-id"); + const name = $(this).data("user-name"); + const email = $(this).data("user-email"); + selectUserForAdd(id, name, email); + }); + + // Add event delegation for remove member button + $(document).on("click", ".remove-member-btn", function () { + const userId = $(this).data("user-id"); + removeMember(userId); + }); + + // Add event delegation for change role button + $(document).on("click", ".change-role-btn", function () { + const userId = $(this).data("user-id"); + const currentRole = $(this).data("user-role"); + openChangeRoleModal(userId, currentRole); + $("#changeRoleModal").modal("show"); + }); + + // Add event delegation for approve/reject request buttons + $(document).on("click", ".approve-request-btn", function () { + const requestId = $(this).data("request-id"); + approveRequest(requestId); + }); + + $(document).on("click", ".reject-request-btn", function () { + const requestId = $(this).data("request-id"); + rejectRequest(requestId); + }); + + // CSV Bulk Upload Events + $("#addBulkMemberBtn").on("click", function () { + $("#csvBulkUploadModal").modal("show"); + }); + + $("#csvExampleBtn").on("click", downloadCsvExample); + $("#csvConfigBtn").on("click", showCsvConfig); + $("#csvFileInput").on("change", handleCsvFileSelect); + $("#csvNextBtn").on("click", startCsvUpload); + $("#csvDoneBtn").on("click", function () { + resetCsvModal(); + loadMembers(); + }); + + // Reset CSV modal when closed + $("#csvBulkUploadModal").on("hidden.bs.modal", function () { + resetCsvModal(); + }); + + // Activity timeline pagination + $('input[name="activityLimit"]').on('change', function() { + const limit = parseInt($(this).val()); + loadActivityTimeline(limit); + }); + + // Bulk Actions Events + $("#selectAllMembers").on("change", function () { + const isChecked = $(this).prop("checked"); + $(".member-checkbox").prop("checked", isChecked); + updateBulkActionsBar(); + }); + + $(document).on("change", ".member-checkbox", function () { + updateBulkActionsBar(); + updateSelectAllCheckbox(); + }); + + $("#clearSelectionBtn").on("click", function () { + $(".member-checkbox").prop("checked", false); + $("#selectAllMembers").prop("checked", false); + updateBulkActionsBar(); + }); + + $("#bulkAssignRoleBtn").on("click", function () { + const selectedMembers = getSelectedMembers(); + if (selectedMembers.length === 0) { + alert("Please select at least one member"); + return; + } + $("#bulkRoleCount").text(selectedMembers.length); + $("#bulkAssignRoleModal").modal("show"); + }); + + $("#bulkAssignRoleForm").on("submit", function (e) { + e.preventDefault(); + bulkAssignRole(); + }); + + $("#bulkRemoveMembersBtn").on("click", function () { + const selectedMembers = getSelectedMembers(); + if (selectedMembers.length === 0) { + alert("Please select at least one member"); + return; + } + + // Populate the list of members to be removed + let membersList = "
      "; + selectedMembers.forEach(member => { + membersList += `
    • • ${member.name} (${member.email})
    • `; + }); + membersList += "
    "; + + $("#bulkRemoveCount").text(selectedMembers.length); + $("#bulkRemoveMembersList").html(membersList); + $("#bulkRemoveMembersModal").modal("show"); + }); + + $("#bulkRemoveMembersForm").on("submit", function (e) { + e.preventDefault(); + bulkRemoveMembers(); + }); + $("#transferOwnershipBtn").on("click", function () { $.get(`/api/groups/${groupId}/members`, function (members) { let options = ""; @@ -76,7 +192,7 @@ $(document).ready(function () { e.preventDefault(); const newOwnerId = $("#newOwnerSelect").val(); if (!newOwnerId) { - alert("Please select a member."); + showToast("Please select a member.", "warning"); return; } @@ -86,15 +202,19 @@ $(document).ready(function () { contentType: "application/json", data: JSON.stringify({ newOwnerId }), success: function (resp) { - alert("Ownership transferred successfully."); - window.location.reload(); + $("#transferOwnershipModal").modal("hide"); + showToast("Ownership transferred successfully.", "success"); + setTimeout(function() { + window.location.reload(); + }, 1000); }, error: function (err) { console.error(err); + $("#transferOwnershipModal").modal("hide"); if (err.responseJSON && err.responseJSON.error) { - alert("Error: " + err.responseJSON.error); + showToast("Error: " + err.responseJSON.error, "danger"); } else { - alert("Failed to transfer ownership."); + showToast("Failed to transfer ownership.", "danger"); } }, }); @@ -145,17 +265,22 @@ function loadGroupInfo(doneCallback) { const ownerName = group.owner?.displayName || "N/A"; const ownerEmail = group.owner?.email || "N/A"; - $("#groupInfoContainer").html(` -

    ${group.name}

    -

    ${group.description || ""}

    -

    - Owner Name: ${ownerName}
    - Owner Email: ${ownerEmail} -

    - `); + // Update hero section + const initial = group.name ? group.name.charAt(0).toUpperCase() : 'G'; + $('#groupInitial').text(initial); + $('#groupHeroName').text(group.name); + $('#groupOwnerName').text(ownerName); + $('#groupOwnerEmail').text(ownerEmail); + $('#groupHeroDescription').text(group.description || 'No description provided'); + + // Update group status alert if not active + updateGroupStatusAlert(group.status || 'active'); const admins = group.admins || []; const docManagers = group.documentManagers || []; + const groupStatus = group.status || 'active'; + const isGroupEditable = (groupStatus === 'active' || groupStatus === 'upload_disabled'); + const isGroupLocked = (groupStatus === 'locked' || groupStatus === 'inactive'); if (userId === group.owner?.id) { currentUserRole = "Owner"; @@ -172,15 +297,38 @@ function loadGroupInfo(doneCallback) { $("#editGroupName").val(group.name); $("#editGroupDescription").val(group.description); $("#ownerActionsContainer").show(); + + // Disable editing for locked/inactive groups + if (isGroupLocked) { + $("#editGroupName").prop('readonly', true); + $("#editGroupDescription").prop('readonly', true); + $("#editGroupForm button[type='submit']").hide(); + } else { + $("#editGroupName").prop('readonly', false); + $("#editGroupDescription").prop('readonly', false); + $("#editGroupForm button[type='submit']").show(); + } } else { $("#leaveGroupContainer").show(); } if (currentUserRole === "Admin" || currentUserRole === "Owner") { - $("#addMemberBtn").show(); + // Show/hide member management buttons based on group status + if (isGroupLocked) { + $("#addMemberBtn").hide(); + $("#addBulkMemberBtn").hide(); + } else { + $("#addMemberBtn").show(); + $("#addBulkMemberBtn").show(); + } + $("#pendingRequestsSection").show(); + $("#activityTimelineSection").show(); + $("#stats-tab-item").show(); loadPendingRequests(); + loadGroupStats(); + loadActivityTimeline(50); } if (typeof doneCallback === "function") { @@ -251,8 +399,18 @@ function loadMembers(searchTerm, roleFilter) { $.get(url, function (members) { let rows = ""; members.forEach((m) => { + const isOwner = m.role === "Owner"; + const checkboxHtml = isOwner || (currentUserRole !== "Owner" && currentUserRole !== "Admin") + ? '' + : ``; + rows += ` + ${checkboxHtml} ${m.displayName || "(no name)"}
    ${m.email || ""} @@ -263,10 +421,14 @@ function loadMembers(searchTerm, roleFilter) { `; }); $("#membersTable tbody").html(rows); + + // Reset selection UI + $("#selectAllMembers").prop("checked", false); + updateBulkActionsBar(); }).fail(function (err) { console.error(err); $("#membersTable tbody").html( - "Failed to load members" + "Failed to load members" ); }); } @@ -278,17 +440,15 @@ function renderMemberActions(member) { } else { return ` `; @@ -311,11 +471,21 @@ function setRole(userId, newRole) { data: JSON.stringify({ role: newRole }), success: function () { $("#changeRoleModal").modal("hide"); + showToast("success", "Role updated successfully"); loadMembers(); }, error: function (err) { - console.error(err); - alert("Failed to update role."); + console.error("Error updating role:", err); + let errorMsg = "Failed to update role."; + if (err.status === 404) { + errorMsg = "Member not found. They may have been removed."; + loadMembers(); // Refresh the member list + } else if (err.status === 403) { + errorMsg = "You don't have permission to change this member's role."; + } else if (err.responseJSON && err.responseJSON.message) { + errorMsg = err.responseJSON.message; + } + showToast("error", errorMsg); }, }); } @@ -326,11 +496,21 @@ function removeMember(userId) { url: `/api/groups/${groupId}/members/${userId}`, method: "DELETE", success: function () { + showToast("success", "Member removed successfully"); loadMembers(); }, error: function (err) { - console.error(err); - alert("Failed to remove member."); + console.error("Error removing member:", err); + let errorMsg = "Failed to remove member."; + if (err.status === 404) { + errorMsg = "Member not found. They may have already been removed."; + loadMembers(); // Refresh the member list + } else if (err.status === 403) { + errorMsg = "You don't have permission to remove this member."; + } else if (err.responseJSON && err.responseJSON.message) { + errorMsg = err.responseJSON.message; + } + showToast("error", errorMsg); }, }); } @@ -344,8 +524,10 @@ function loadPendingRequests() { ${u.displayName} ${u.email} - - + + `; @@ -393,66 +575,65 @@ function rejectRequest(requestId) { }); } +// Search users for manual add +// Search users for manual add function searchUsers() { const term = $("#userSearchTerm").val().trim(); if (!term) { - alert("Please enter a search term."); + // Show inline validation error + $("#searchStatus").text("⚠️ Please enter a name or email to search"); + $("#searchStatus").removeClass("text-muted text-success").addClass("text-warning"); + $("#userSearchTerm").addClass("is-invalid"); return; } - - // UI state + + // Clear any previous validation states + $("#userSearchTerm").removeClass("is-invalid"); + $("#searchStatus").removeClass("text-warning text-danger text-success").addClass("text-muted"); $("#searchStatus").text("Searching..."); $("#searchUsersBtn").prop("disabled", true); - $.ajax({ - url: "/api/userSearch", - method: "GET", - data: { query: term }, - dataType: "json", - }) - .done(function (results) { - renderUserSearchResults(results); - }) - .fail(function (jqXHR, textStatus, errorThrown) { - console.error("User search error:", textStatus, errorThrown); - - if (jqXHR.status === 401) { - // Session expired or no token → force re-login - window.location.href = "/login"; + $.get("/api/userSearch", { query: term }) + .done(function(users) { + renderUserSearchResults(users); + // Show success status + if (users && users.length > 0) { + $("#searchStatus").text(`✓ Found ${users.length} user(s)`); + $("#searchStatus").removeClass("text-muted text-warning text-danger").addClass("text-success"); } else { - const msg = jqXHR.responseJSON?.error - ? jqXHR.responseJSON.error - : "User search failed."; - alert(msg); + $("#searchStatus").text("No users found"); + $("#searchStatus").removeClass("text-muted text-warning text-success").addClass("text-muted"); } }) + .fail(function (jq) { + const err = jq.responseJSON?.error || jq.statusText; + // Show inline error + $("#searchStatus").text(`❌ Search failed: ${err}`); + $("#searchStatus").removeClass("text-muted text-warning text-success").addClass("text-danger"); + // Also show toast for critical errors + showToast("User search failed: " + err, "danger"); + }) .always(function () { - // Restore UI state - $("#searchStatus").text(""); $("#searchUsersBtn").prop("disabled", false); }); } +// Render user-search results in add-member modal function renderUserSearchResults(users) { let html = ""; - if (!users || users.length === 0) { - html = ` - - No results found - - `; + if (!users || !users.length) { + html = `No results.`; } else { - users.forEach((u) => { + users.forEach(u => { html += ` ${u.displayName || "(no name)"} ${u.email || ""} - @@ -463,9 +644,10 @@ function renderUserSearchResults(users) { $("#userSearchResultsTable tbody").html(html); } -function selectUserForAdd(uid, displayName, email) { - $("#newUserId").val(uid); - $("#newUserDisplayName").val(displayName); +// Populate manual-add fields from search result +function selectUserForAdd(id, name, email) { + $("#newUserId").val(id); + $("#newUserDisplayName").val(name); $("#newUserEmail").val(email); } @@ -494,3 +676,798 @@ function addMemberDirectly() { }, }); } + +// Function to update group status alert box +function updateGroupStatusAlert(status) { + const alertBox = $("#group-status-alert"); + const contentDiv = $("#group-status-content"); + + if (!status || status === 'active') { + alertBox.addClass('d-none'); + alertBox.removeClass('alert-warning alert-info alert-danger'); + return; + } + + const statusMessages = { + 'locked': { + type: 'warning', + icon: 'bi-lock-fill', + title: '🔒 Locked (Read-Only)', + message: 'Group is in read-only mode', + details: [ + '❌ New document uploads', + '❌ Document deletions', + '❌ Creating, editing, or deleting prompts', + '❌ Creating, editing, or deleting agents', + '❌ Creating, editing, or deleting actions', + '✅ Viewing existing documents', + '✅ Chat and search with existing documents', + '✅ Using existing prompts, agents, and actions' + ] + }, + 'upload_disabled': { + type: 'info', + icon: 'bi-cloud-slash-fill', + title: '📁 Upload Disabled', + message: 'Restrict new content but allow other operations', + details: [ + '❌ New document uploads', + '✅ Document deletions (cleanup)', + '✅ Full chat and search functionality', + '✅ Creating, editing, and deleting prompts', + '✅ Creating, editing, and deleting agents', + '✅ Creating, editing, and deleting actions' + ] + }, + 'inactive': { + type: 'danger', + icon: 'bi-exclamation-triangle-fill', + title: '⭕ Inactive', + message: 'Group is disabled', + details: [ + '❌ ALL operations (uploads, chat, document access)', + '❌ Creating, editing, or deleting prompts, agents, and actions', + '✅ Only admin viewing of group information', + 'Use case: Decommissioned projects, suspended groups, compliance holds' + ] + } + }; + + const config = statusMessages[status]; + if (config) { + alertBox.removeClass('d-none alert-warning alert-info alert-danger'); + alertBox.addClass(`alert-${config.type}`); + + const detailsList = config.details.map(d => `
  • ${d}
  • `).join(''); + + contentDiv.html(` +
    + +
    + ${config.title} - ${config.message} +
      + ${detailsList} +
    +
    +
    + `); + } else { + alertBox.addClass('d-none'); + } +} + +// ============================================================================ +// CSV Bulk Member Upload Functions +// ============================================================================ + +let csvParsedData = []; + +function downloadCsvExample() { + const csvContent = `userId,displayName,email,role +00000000-0000-0000-0000-000000000001,John Smith,john.smith@contoso.com,user +00000000-0000-0000-0000-000000000002,Jane Doe,jane.doe@contoso.com,admin +00000000-0000-0000-0000-000000000003,Bob Johnson,bob.johnson@contoso.com,document_manager`; + + const blob = new Blob([csvContent], { type: 'text/csv' }); + const url = window.URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = 'bulk_members_example.csv'; + document.body.appendChild(a); + a.click(); + document.body.removeChild(a); + window.URL.revokeObjectURL(url); +} + +function showCsvConfig() { + const modal = new bootstrap.Modal(document.getElementById('csvFormatInfoModal')); + modal.show(); +} + +function validateGuid(guid) { + return ValidationUtils.validateGuid(guid); +} + +function validateEmail(email) { + const emailRegex = /^[^\s@]+@[^\s@]+\.[^\s@]+$/; + return emailRegex.test(email); +} + +function handleCsvFileSelect(event) { + const file = event.target.files[0]; + if (!file) { + $("#csvNextBtn").prop("disabled", true); + $("#csvValidationResults").hide(); + $("#csvErrorDetails").hide(); + return; + } + + const reader = new FileReader(); + reader.onload = function (e) { + const text = e.target.result; + const lines = text.split(/\r?\n/).filter(line => line.trim()); + + $("#csvErrorDetails").hide(); + $("#csvValidationResults").hide(); + + // Validate header + if (lines.length < 2) { + showCsvError("CSV must contain at least a header row and one data row"); + return; + } + + const header = lines[0].toLowerCase().trim(); + if (header !== "userid,displayname,email,role") { + showCsvError("Invalid header. Expected: userId,displayName,email,role"); + return; + } + + // Validate row count + const dataRows = lines.slice(1); + if (dataRows.length > 1000) { + showCsvError(`Too many rows. Maximum 1,000 members allowed (found ${dataRows.length})`); + return; + } + + // Parse and validate rows + csvParsedData = []; + const errors = []; + const validRoles = ['user', 'admin', 'document_manager']; + + for (let i = 0; i < dataRows.length; i++) { + const rowNum = i + 2; // +2 because header is row 1 + const row = dataRows[i].split(','); + + if (row.length !== 4) { + errors.push(`Row ${rowNum}: Expected 4 columns, found ${row.length}`); + continue; + } + + const userId = row[0].trim(); + const displayName = row[1].trim(); + const email = row[2].trim(); + const role = row[3].trim().toLowerCase(); + + if (!userId || !displayName || !email || !role) { + errors.push(`Row ${rowNum}: All fields are required`); + continue; + } + + if (!validateGuid(userId)) { + errors.push(`Row ${rowNum}: Invalid GUID format for userId`); + continue; + } + + if (!validateEmail(email)) { + errors.push(`Row ${rowNum}: Invalid email format`); + continue; + } + + if (!validRoles.includes(role)) { + errors.push(`Row ${rowNum}: Invalid role '${role}'. Must be: user, admin, or document_manager`); + continue; + } + + csvParsedData.push({ userId, displayName, email, role }); + } + + if (errors.length > 0) { + showCsvError(`Found ${errors.length} validation error(s):\n` + errors.slice(0, 10).join('\n') + + (errors.length > 10 ? `\n... and ${errors.length - 10} more` : '')); + return; + } + + // Show validation success + const sampleRows = csvParsedData.slice(0, 3); + $("#csvValidationDetails").html(` +

    ✓ Valid CSV file detected

    +

    Total members to add: ${csvParsedData.length}

    +

    Sample data (first 3):

    +
      + ${sampleRows.map(row => `
    • ${row.displayName} (${row.email})
    • `).join('')} +
    + `); + $("#csvValidationResults").show(); + $("#csvNextBtn").prop("disabled", false); + }; + + reader.readAsText(file); +} + +// Stats and Charts Functions +let documentChart, storageChart, tokenChart; + +function loadGroupStats() { + $.get(`/api/groups/${groupId}/stats`) + .done(function(data) { + // Update stat cards + $('#stat-documents').text(data.totalDocuments || 0); + + // Format storage + const storageMB = Math.round(data.storageUsed / (1024 * 1024)); + $('#stat-storage').text(storageMB + ' MB'); + + // Format tokens + const tokensK = Math.round(data.totalTokens / 1000); + $('#stat-tokens').text(tokensK + 'K'); + + $('#stat-members').text(data.totalMembers || 0); + + // Create charts + createDocumentChart(data.documentActivity); + createStorageChart(data.storage); + createTokenChart(data.tokenUsage); + }) + .fail(function(xhr) { + console.error('Failed to load group stats:', xhr); + $('#stat-documents').text('Error'); + $('#stat-storage').text('Error'); + $('#stat-tokens').text('Error'); + $('#stat-members').text('Error'); + }); +} + +function createDocumentChart(activityData) { + const ctx = document.getElementById('documentChart'); + if (!ctx) return; + + if (documentChart) { + documentChart.destroy(); + } + + documentChart = new Chart(ctx, { + type: 'bar', + data: { + labels: activityData.labels, + datasets: [ + { + label: 'Uploads', + data: activityData.uploads, + backgroundColor: 'rgba(13, 202, 240, 0.8)', + borderColor: 'rgba(13, 202, 240, 1)', + borderWidth: 1 + }, + { + label: 'Deletes', + data: activityData.deletes, + backgroundColor: 'rgba(220, 53, 69, 0.8)', + borderColor: 'rgba(220, 53, 69, 1)', + borderWidth: 1 + } + ] + }, + options: { + responsive: true, + maintainAspectRatio: false, + plugins: { + legend: { + display: true, + position: 'top' + } + }, + scales: { + y: { + beginAtZero: true, + ticks: { + stepSize: 1 + } + } + } + } + }); +} + +function createStorageChart(storageData) { + const ctx = document.getElementById('storageChart'); + if (!ctx) return; + + if (storageChart) { + storageChart.destroy(); + } + + const aiSearchMB = Math.round(storageData.ai_search_size / (1024 * 1024)); + const blobStorageMB = Math.round(storageData.storage_account_size / (1024 * 1024)); + + storageChart = new Chart(ctx, { + type: 'doughnut', + data: { + labels: ['AI Search', 'Blob Storage'], + datasets: [{ + data: [aiSearchMB, blobStorageMB], + backgroundColor: [ + 'rgba(13, 110, 253, 0.8)', + 'rgba(13, 202, 240, 0.8)' + ], + borderColor: [ + 'rgba(13, 110, 253, 1)', + 'rgba(13, 202, 240, 1)' + ], + borderWidth: 1 + }] + }, + options: { + responsive: true, + maintainAspectRatio: false, + plugins: { + legend: { + display: true, + position: 'bottom' + }, + tooltip: { + callbacks: { + label: function(context) { + return context.label + ': ' + context.parsed + ' MB'; + } + } + } + } + } + }); +} + +function createTokenChart(tokenData) { + const ctx = document.getElementById('tokenChart'); + if (!ctx) return; + + if (tokenChart) { + tokenChart.destroy(); + } + + tokenChart = new Chart(ctx, { + type: 'bar', + data: { + labels: tokenData.labels, + datasets: [{ + label: 'Tokens', + data: tokenData.data, + backgroundColor: 'rgba(255, 193, 7, 0.8)', + borderColor: 'rgba(255, 193, 7, 1)', + borderWidth: 1 + }] + }, + options: { + responsive: true, + maintainAspectRatio: false, + plugins: { + legend: { + display: false + } + }, + scales: { + y: { + beginAtZero: true + } + } + } + }); +} + +// Activity Timeline Functions +function loadActivityTimeline(limit = 50) { + $.get(`/api/groups/${groupId}/activity?limit=${limit}`) + .done(function(activities) { + if (!activities || activities.length === 0) { + $('#activityTimeline').html('

    No recent activity

    '); + return; + } + + const html = activities.map(activity => renderActivityItem(activity)).join(''); + $('#activityTimeline').html(html); + }) + .fail(function(xhr) { + if (xhr.status === 403) { + $('#activityTimeline').html('

    Access denied - Only group owners and admins can view activity timeline

    '); + } else { + $('#activityTimeline').html('

    Failed to load activity

    '); + } + }); +} + +function renderActivityItem(activity) { + const icons = { + 'document_creation': 'file-earmark-arrow-up', + 'document_deletion': 'file-earmark-x', + 'token_usage': 'cpu', + 'user_login': 'box-arrow-in-right', + 'conversation_creation': 'chat-dots', + 'conversation_deletion': 'chat-dots-fill' + }; + + const colors = { + 'document_creation': 'success', + 'document_deletion': 'danger', + 'token_usage': 'primary', + 'user_login': 'info', + 'conversation_creation': 'primary', + 'conversation_deletion': 'danger' + }; + + const activityType = activity.activity_type || 'unknown'; + const icon = icons[activityType] || 'circle'; + const color = colors[activityType] || 'secondary'; + const time = formatRelativeTime(activity.timestamp || activity.created_at); + + // Generate description based on activity type + let description = ''; + let title = activityType.replace(/_/g, ' ').replace(/\b\w/g, l => l.toUpperCase()); + + if (activityType === 'document_creation' && activity.document) { + description = `File: ${activity.document.file_name || 'Unknown'}`; + } else if (activityType === 'document_deletion' && activity.document_metadata) { + description = `File: ${activity.document_metadata.file_name || 'Unknown'}`; + } else if (activityType === 'token_usage' && activity.usage) { + description = `Tokens: ${formatNumber(activity.usage.total_tokens || 0)}`; + } else if (activityType === 'user_login') { + description = 'User logged in'; + } else if (activityType === 'conversation_creation') { + description = 'New conversation started'; + } else if (activityType === 'conversation_deletion') { + description = 'Conversation deleted'; + } + + const activityJson = JSON.stringify(activity); + + return ` +
    +
    +
    + +
    +
    +
    +
    ${title}
    + ${time} +
    +

    ${description}

    +
    +
    +
    + `; +} + +function formatRelativeTime(timestamp) { + if (!timestamp) return 'Unknown'; + + const date = new Date(timestamp); + const now = new Date(); + const diffMs = now - date; + const diffMins = Math.floor(diffMs / 60000); + const diffHours = Math.floor(diffMs / 3600000); + const diffDays = Math.floor(diffMs / 86400000); + + if (diffMins < 1) return 'Just now'; + if (diffMins < 60) return `${diffMins}m ago`; + if (diffHours < 24) return `${diffHours}h ago`; + if (diffDays < 7) return `${diffDays}d ago`; + if (diffDays < 30) return `${Math.floor(diffDays / 7)}w ago`; + if (diffDays < 365) return `${Math.floor(diffDays / 30)}mo ago`; + return `${Math.floor(diffDays / 365)}y ago`; +} + +function formatNumber(num) { + return num.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ','); +} + +function showRawActivity(element) { + try { + const activityJson = element.getAttribute('data-activity'); + const activity = JSON.parse(activityJson); + const modalBody = document.getElementById('rawActivityModalBody'); + modalBody.innerHTML = `
    ${JSON.stringify(activity, null, 2)}
    `; + $('#rawActivityModal').modal('show'); + } catch (error) { + console.error('Error showing raw activity:', error); + } +} + +function copyRawActivityToClipboard() { + const modalBody = document.getElementById('rawActivityModalBody'); + const text = modalBody.textContent; + + navigator.clipboard.writeText(text).then(() => { + showToast('Activity data copied to clipboard', 'success'); + }).catch(err => { + console.error('Failed to copy:', err); + showToast('Failed to copy to clipboard', 'danger'); + }); +} + +// Make functions globally available for onclick handlers +window.showRawActivity = showRawActivity; +window.copyRawActivityToClipboard = copyRawActivityToClipboard; + +function showCsvError(message) { + $("#csvErrorList").html(`
    ${escapeHtml(message)}
    `); + $("#csvErrorDetails").show(); + $("#csvNextBtn").prop("disabled", true); + csvParsedData = []; +} + +function startCsvUpload() { + if (csvParsedData.length === 0) { + alert("No valid data to upload"); + return; + } + + // Switch to stage 2 + $("#csvStage1").hide(); + $("#csvStage2").show(); + $("#csvNextBtn").hide(); + $("#csvCancelBtn").hide(); + $("#csvModalClose").hide(); + + // Upload members + uploadCsvMembers(); +} + +async function uploadCsvMembers() { + let successCount = 0; + let failedCount = 0; + let skippedCount = 0; + const failures = []; + + for (let i = 0; i < csvParsedData.length; i++) { + const member = csvParsedData[i]; + const progress = Math.round(((i + 1) / csvParsedData.length) * 100); + + updateCsvProgress(progress, `Processing ${i + 1} of ${csvParsedData.length}: ${member.displayName}`); + + try { + const response = await fetch(`/api/groups/${groupId}/members`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + userId: member.userId, + displayName: member.displayName, + email: member.email, + role: member.role + }) + }); + + const data = await response.json(); + + if (response.ok && data.success) { + successCount++; + } else if (data.error && data.error.includes('already a member')) { + skippedCount++; + } else { + failedCount++; + failures.push(`${member.displayName}: ${data.error || 'Unknown error'}`); + } + } catch (error) { + failedCount++; + failures.push(`${member.displayName}: ${error.message}`); + } + } + + // Show summary + showCsvSummary(successCount, failedCount, skippedCount, failures); +} + +function updateCsvProgress(percentage, statusText) { + $("#csvProgressBar").css("width", percentage + "%"); + $("#csvProgressBar").attr("aria-valuenow", percentage); + $("#csvProgressText").text(percentage + "%"); + $("#csvStatusText").text(statusText); +} + +function showCsvSummary(successCount, failedCount, skippedCount, failures) { + $("#csvStage2").hide(); + $("#csvStage3").show(); + $("#csvDoneBtn").show(); + + let summaryHtml = ` +

    Upload Summary:

    +
      +
    • ✅ Successfully added: ${successCount}
    • +
    • ⏭️ Skipped (already members): ${skippedCount}
    • +
    • ❌ Failed: ${failedCount}
    • +
    + `; + + if (failures.length > 0) { + summaryHtml += ` +
    +

    Failed Members:

    +
      + ${failures.slice(0, 10).map(f => `
    • ${escapeHtml(f)}
    • `).join('')} + ${failures.length > 10 ? `
    • ... and ${failures.length - 10} more
    • ` : ''} +
    + `; + } + + $("#csvSummary").html(summaryHtml); +} + +function resetCsvModal() { + // Reset to stage 1 + $("#csvStage1").show(); + $("#csvStage2").hide(); + $("#csvStage3").hide(); + $("#csvNextBtn").show(); + $("#csvNextBtn").prop("disabled", true); + $("#csvCancelBtn").show(); + $("#csvDoneBtn").hide(); + $("#csvModalClose").show(); + $("#csvValidationResults").hide(); + $("#csvErrorDetails").hide(); + $("#csvFileInput").val(''); + csvParsedData = []; + + // Reset progress + updateCsvProgress(0, 'Ready'); +} + +function escapeHtml(text) { + const div = document.createElement('div'); + div.textContent = text; + return div.innerHTML; +} + +// ============================================================================ +// Bulk Actions Functions +// ============================================================================ + +function getSelectedMembers() { + const selected = []; + $(".member-checkbox:checked").each(function () { + selected.push({ + userId: $(this).data("user-id"), + name: $(this).data("user-name"), + email: $(this).data("user-email"), + role: $(this).data("user-role") + }); + }); + return selected; +} + +function updateBulkActionsBar() { + const selectedCount = $(".member-checkbox:checked").length; + if (selectedCount > 0) { + $("#selectedCount").text(selectedCount); + $("#bulkActionsBar").show(); + } else { + $("#bulkActionsBar").hide(); + } +} + +function updateSelectAllCheckbox() { + const totalCheckboxes = $(".member-checkbox").length; + const checkedCheckboxes = $(".member-checkbox:checked").length; + + if (totalCheckboxes > 0 && checkedCheckboxes === totalCheckboxes) { + $("#selectAllMembers").prop("checked", true); + $("#selectAllMembers").prop("indeterminate", false); + } else if (checkedCheckboxes > 0) { + $("#selectAllMembers").prop("checked", false); + $("#selectAllMembers").prop("indeterminate", true); + } else { + $("#selectAllMembers").prop("checked", false); + $("#selectAllMembers").prop("indeterminate", false); + } +} + +async function bulkAssignRole() { + const selectedMembers = getSelectedMembers(); + const newRole = $("#bulkRoleSelect").val(); + + if (selectedMembers.length === 0) { + alert("No members selected"); + return; + } + + // Close modal and show progress + $("#bulkAssignRoleModal").modal("hide"); + + let successCount = 0; + let failedCount = 0; + const failures = []; + + for (let i = 0; i < selectedMembers.length; i++) { + const member = selectedMembers[i]; + + try { + const response = await fetch(`/api/groups/${groupId}/members/${member.userId}`, { + method: 'PATCH', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ role: newRole }) + }); + + const data = await response.json(); + + if (response.ok) { + successCount++; + } else { + failedCount++; + failures.push(`${member.name}: ${data.error || 'Unknown error'}`); + } + } catch (error) { + failedCount++; + failures.push(`${member.name}: ${error.message}`); + } + } + + // Show summary + let message = `Role assignment complete:\n✅ Success: ${successCount}\n❌ Failed: ${failedCount}`; + if (failures.length > 0) { + message += "\n\nFailed members:\n" + failures.slice(0, 5).join("\n"); + if (failures.length > 5) { + message += `\n... and ${failures.length - 5} more`; + } + } + alert(message); + + // Reload members and clear selection + loadMembers(); +} + +async function bulkRemoveMembers() { + const selectedMembers = getSelectedMembers(); + + if (selectedMembers.length === 0) { + alert("No members selected"); + return; + } + + // Close modal + $("#bulkRemoveMembersModal").modal("hide"); + + let successCount = 0; + let failedCount = 0; + const failures = []; + + for (let i = 0; i < selectedMembers.length; i++) { + const member = selectedMembers[i]; + + try { + const response = await fetch(`/api/groups/${groupId}/members/${member.userId}`, { + method: 'DELETE' + }); + + const data = await response.json(); + + if (response.ok && data.success) { + successCount++; + } else { + failedCount++; + failures.push(`${member.name}: ${data.error || 'Unknown error'}`); + } + } catch (error) { + failedCount++; + failures.push(`${member.name}: ${error.message}`); + } + } + + // Show summary + let message = `Member removal complete:\n✅ Success: ${successCount}\n❌ Failed: ${failedCount}`; + if (failures.length > 0) { + message += "\n\nFailed removals:\n" + failures.slice(0, 5).join("\n"); + if (failures.length > 5) { + message += `\n... and ${failures.length - 5} more`; + } + } + alert(message); + + // Reload members and clear selection + loadMembers(); +} diff --git a/application/single_app/static/js/notifications.js b/application/single_app/static/js/notifications.js new file mode 100644 index 00000000..3728f7a2 --- /dev/null +++ b/application/single_app/static/js/notifications.js @@ -0,0 +1,569 @@ +// notifications.js + +/** + * Notifications UI Management + * + * Handles notification polling, rendering, interactions, and badge updates. + * Polling interval is randomized between 20-40 seconds to prevent thundering herd. + */ + +(function() { + 'use strict'; + + // Configuration + const MIN_POLL_INTERVAL = 20000; // 20 seconds + const MAX_POLL_INTERVAL = 40000; // 40 seconds + + // State + let currentPage = 1; + let currentPerPage = 20; + let currentFilter = 'all'; + let currentSearch = ''; + let pollTimeout = null; + let isPolling = false; + + /** + * Get randomized poll interval + */ + function getRandomPollInterval() { + return Math.floor(Math.random() * (MAX_POLL_INTERVAL - MIN_POLL_INTERVAL + 1)) + MIN_POLL_INTERVAL; + } + + /** + * Update notification badge in top nav and sidebar + */ + function updateNotificationBadge(count) { + // Top nav badges + const badge = document.getElementById('notification-badge'); + const countBadge = document.getElementById('notification-count-badge'); + + // Sidebar badges + const sidebarBadge = document.getElementById('sidebar-notification-badge'); + const sidebarCountBadge = document.getElementById('sidebar-notification-count-badge'); + + if (count > 0) { + const displayCount = count > 9 ? '+' : count.toString(); + + // Update top nav avatar badge + if (badge) { + badge.textContent = displayCount; + badge.classList.add('d-flex'); + badge.style.display = 'flex'; + } + + // Update top nav menu badge + if (countBadge) { + countBadge.textContent = displayCount; + countBadge.style.display = 'inline-block'; + } + + // Update sidebar avatar badge + if (sidebarBadge) { + sidebarBadge.textContent = displayCount; + sidebarBadge.classList.add('d-flex'); + sidebarBadge.style.display = 'flex'; + } + + // Update sidebar menu badge + if (sidebarCountBadge) { + sidebarCountBadge.textContent = displayCount; + sidebarCountBadge.style.display = 'inline-block'; + } + } else { + // Hide all badges + if (badge) { + badge.classList.remove('d-flex'); + badge.style.display = 'none'; + } + + if (countBadge) { + countBadge.style.display = 'none'; + } + + if (sidebarBadge) { + sidebarBadge.classList.remove('d-flex'); + sidebarBadge.style.display = 'none'; + } + + if (sidebarCountBadge) { + sidebarCountBadge.style.display = 'none'; + } + } + } + + /** + * Poll for notification count + */ + function pollNotificationCount() { + if (isPolling) return; + + isPolling = true; + + fetch('/api/notifications/count') + .then(response => response.json()) + .then(data => { + if (data.success) { + updateNotificationBadge(data.count); + } + }) + .catch(error => { + console.error('Error polling notification count:', error); + }) + .finally(() => { + isPolling = false; + + // Schedule next poll with randomized interval + pollTimeout = setTimeout(pollNotificationCount, getRandomPollInterval()); + }); + } + + /** + * Format relative time + */ + function formatRelativeTime(isoString) { + const date = new Date(isoString); + const now = new Date(); + const diffMs = now - date; + const diffMins = Math.floor(diffMs / 60000); + const diffHours = Math.floor(diffMs / 3600000); + const diffDays = Math.floor(diffMs / 86400000); + + if (diffMins < 1) return 'Just now'; + if (diffMins < 60) return `${diffMins} minute${diffMins !== 1 ? 's' : ''} ago`; + if (diffHours < 24) return `${diffHours} hour${diffHours !== 1 ? 's' : ''} ago`; + if (diffDays < 7) return `${diffDays} day${diffDays !== 1 ? 's' : ''} ago`; + + return date.toLocaleDateString(); + } + + /** + * Render notification item + */ + function renderNotification(notification) { + const isUnread = !notification.is_read; + const typeConfig = notification.type_config || { icon: 'bi-bell', color: 'secondary' }; + + return ` +
    +
    +
    +
    + +
    +
    +
    ${escapeHtml(notification.title)}
    +
    ${escapeHtml(notification.message)}
    +
    + ${formatRelativeTime(notification.created_at)} +
    +
    +
    + ${!isUnread ? '' : ` + + `} + +
    +
    +
    +
    + `; + } + + /** + * Escape HTML to prevent XSS + */ + function escapeHtml(text) { + const div = document.createElement('div'); + div.textContent = text; + return div.innerHTML; + } + + /** + * Render pagination + */ + function renderPagination(page, totalPages, hasMore) { + if (totalPages <= 1) return ''; + + let html = ''; + return html; + } + + /** + * Load and render notifications + */ + function loadNotifications() { + const container = document.getElementById('notifications-container'); + const paginationContainer = document.getElementById('pagination-container'); + const loadingIndicator = document.getElementById('loading-indicator'); + + if (!container) return; + + // Show loading + loadingIndicator.style.display = 'block'; + container.innerHTML = ''; + + // Build query parameters + const params = new URLSearchParams({ + page: currentPage, + per_page: currentPerPage, + include_read: currentFilter !== 'unread', + include_dismissed: false + }); + + fetch(`/api/notifications?${params}`) + .then(response => response.json()) + .then(data => { + loadingIndicator.style.display = 'none'; + + if (!data.success) { + container.innerHTML = '
    Failed to load notifications
    '; + return; + } + + // Filter by search if needed + let notifications = data.notifications; + + if (currentSearch) { + const searchLower = currentSearch.toLowerCase(); + notifications = notifications.filter(n => + n.title.toLowerCase().includes(searchLower) || + n.message.toLowerCase().includes(searchLower) + ); + } + + // Filter by read status + if (currentFilter === 'read') { + notifications = notifications.filter(n => n.is_read); + } else if (currentFilter === 'unread') { + notifications = notifications.filter(n => !n.is_read); + } + + // Cache notifications for click handlers + cachedNotifications = notifications; + + // Render notifications + if (notifications.length === 0) { + container.innerHTML = ` +
    + +

    No notifications

    +

    You're all caught up!

    +
    + `; + } else { + container.innerHTML = notifications.map(renderNotification).join(''); + + // Attach event listeners + attachNotificationListeners(); + } + + // Render pagination + const totalPages = Math.ceil(data.total / currentPerPage); + paginationContainer.innerHTML = renderPagination(currentPage, totalPages, data.has_more); + + // Attach pagination listeners + attachPaginationListeners(); + + // Update badge + pollNotificationCount(); + }) + .catch(error => { + console.error('Error loading notifications:', error); + loadingIndicator.style.display = 'none'; + container.innerHTML = '
    Failed to load notifications
    '; + }); + } + + /** + * Attach event listeners to notification items + */ + function attachNotificationListeners() { + // Click on notification to view/navigate + document.querySelectorAll('.notification-item').forEach(item => { + item.addEventListener('click', function(e) { + // Don't navigate if clicking action buttons + if (e.target.closest('.mark-read-btn') || e.target.closest('.dismiss-btn')) { + return; + } + + const notificationId = this.dataset.notificationId; + const notification = getNotificationById(notificationId); + + if (notification) { + handleNotificationClick(notification); + } + }); + }); + + // Mark as read buttons + document.querySelectorAll('.mark-read-btn').forEach(btn => { + btn.addEventListener('click', function(e) { + e.stopPropagation(); + const notificationId = this.dataset.notificationId; + markNotificationRead(notificationId); + }); + }); + + // Dismiss buttons + document.querySelectorAll('.dismiss-btn').forEach(btn => { + btn.addEventListener('click', function(e) { + e.stopPropagation(); + const notificationId = this.dataset.notificationId; + dismissNotification(notificationId); + }); + }); + } + + /** + * Attach pagination listeners + */ + function attachPaginationListeners() { + document.querySelectorAll('.page-link').forEach(link => { + link.addEventListener('click', function(e) { + e.preventDefault(); + const page = parseInt(this.dataset.page); + if (page && !isNaN(page)) { + currentPage = page; + loadNotifications(); + } + }); + }); + } + + /** + * Get notification data by ID (stored during render) + */ + let cachedNotifications = []; + + function getNotificationById(id) { + return cachedNotifications.find(n => n.id === id); + } + + /** + * Handle notification click + */ + async function handleNotificationClick(notification) { + // Mark as read + if (!notification.is_read) { + markNotificationRead(notification.id); + } + + // Check if this is a group notification - set active group before navigating + const groupId = notification.metadata?.group_id; + if (groupId && notification.link_url === '/group_workspaces') { + try { + const response = await fetch('/api/groups/setActive', { + method: 'PATCH', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ groupId: groupId }) + }); + + if (!response.ok) { + console.error('Failed to set active group:', await response.text()); + } + } catch (error) { + console.error('Error setting active group:', error); + } + } + + // Navigate if link exists + if (notification.link_url) { + window.location.href = notification.link_url; + } + } + + /** + * Mark notification as read + */ + function markNotificationRead(notificationId) { + fetch(`/api/notifications/${notificationId}/read`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + } + }) + .then(response => response.json()) + .then(data => { + if (data.success) { + loadNotifications(); + } + }) + .catch(error => { + console.error('Error marking notification as read:', error); + }); + } + + /** + * Dismiss notification + */ + function dismissNotification(notificationId) { + fetch(`/api/notifications/${notificationId}/dismiss`, { + method: 'DELETE' + }) + .then(response => response.json()) + .then(data => { + if (data.success) { + loadNotifications(); + } + }) + .catch(error => { + console.error('Error dismissing notification:', error); + }); + } + + /** + * Initialize page + */ + function initNotificationsPage() { + // Only run on notifications page + if (!window.location.pathname.includes('/notifications')) { + return; + } + + // Get user's per-page preference + const perPageSelect = document.getElementById('per-page-select'); + if (perPageSelect) { + currentPerPage = parseInt(perPageSelect.value); + + perPageSelect.addEventListener('change', function() { + currentPerPage = parseInt(this.value); + currentPage = 1; + + // Save preference + fetch('/api/notifications/settings', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + notifications_per_page: currentPerPage + }) + }); + + loadNotifications(); + }); + } + + // Filter buttons + document.querySelectorAll('.filter-btn').forEach(btn => { + btn.addEventListener('click', function() { + document.querySelectorAll('.filter-btn').forEach(b => b.classList.remove('active')); + this.classList.add('active'); + currentFilter = this.dataset.filter; + currentPage = 1; + loadNotifications(); + }); + }); + + // Search input + const searchInput = document.getElementById('search-input'); + if (searchInput) { + let searchTimeout; + searchInput.addEventListener('input', function() { + clearTimeout(searchTimeout); + searchTimeout = setTimeout(() => { + currentSearch = this.value; + currentPage = 1; + loadNotifications(); + }, 500); + }); + } + + // Mark all as read button + const markAllReadBtn = document.getElementById('mark-all-read-btn'); + if (markAllReadBtn) { + markAllReadBtn.addEventListener('click', function() { + fetch('/api/notifications/mark-all-read', { + method: 'POST' + }) + .then(response => response.json()) + .then(data => { + if (data.success) { + loadNotifications(); + } + }) + .catch(error => { + console.error('Error marking all as read:', error); + }); + }); + } + + // Refresh button + const refreshBtn = document.getElementById('refresh-btn'); + if (refreshBtn) { + refreshBtn.addEventListener('click', function() { + loadNotifications(); + }); + } + + // Initial load + loadNotifications(); + } + + // Start polling when page loads (for badge updates) + if (document.readyState === 'loading') { + document.addEventListener('DOMContentLoaded', function() { + pollNotificationCount(); + initNotificationsPage(); + }); + } else { + pollNotificationCount(); + initNotificationsPage(); + } + + // Clean up on page unload + window.addEventListener('beforeunload', function() { + if (pollTimeout) { + clearTimeout(pollTimeout); + } + }); + +})(); diff --git a/application/single_app/static/js/plugin_common.js b/application/single_app/static/js/plugin_common.js index 3e399d31..e40158b9 100644 --- a/application/single_app/static/js/plugin_common.js +++ b/application/single_app/static/js/plugin_common.js @@ -291,30 +291,9 @@ export async function showPluginModal({ } } -// Validate plugin manifest with server-side validation +// Validate plugin manifest using server-side validation only export async function validatePluginManifest(pluginManifest) { - try { - // Try client-side validation first if available - if (!window.validatePlugin) { - try { - window.validatePlugin = (await import('/static/js/validatePlugin.mjs')).default; - } catch (importError) { - console.warn('Client-side validation module failed to load, falling back to server-side validation:', importError); - // Fallback to server-side validation - return await validatePluginManifestServerSide(pluginManifest); - } - } - - const result = window.validatePlugin(pluginManifest); - if (result === true) { - return { valid: true, errors: [] }; - } else { - return { valid: false, errors: result.errors || ['Validation failed'] }; - } - } catch (error) { - console.warn('Client-side validation failed, falling back to server-side validation:', error); - return await validatePluginManifestServerSide(pluginManifest); - } + return await validatePluginManifestServerSide(pluginManifest); } // Server-side validation fallback diff --git a/application/single_app/static/js/plugin_modal_stepper.js b/application/single_app/static/js/plugin_modal_stepper.js index 955af056..89076076 100644 --- a/application/single_app/static/js/plugin_modal_stepper.js +++ b/application/single_app/static/js/plugin_modal_stepper.js @@ -3,6 +3,8 @@ import { showToast } from "./chat/chat-toast.js"; export class PluginModalStepper { + + constructor() { this.currentStep = 1; this.maxSteps = 5; @@ -13,10 +15,89 @@ export class PluginModalStepper { this.itemsPerPage = 12; this.filteredTypes = []; this.originalPlugin = null; // Store original state for change tracking - + this.pluginSchemaCache = null; // Will hold plugin.schema.json + this.pluginDefinitionCache = {}; // Cache for per-type definition schemas + this.additionalSettingsSchemaCache = {}; // Cache for additional settings schemas + this.lastAdditionalFieldsType = null; // Track last type to avoid unnecessary redraws + this.defaultAuthTypes = ["NoAuth", "key", "identity", "user", "servicePrincipal", "connection_string", "basic", "username_password"]; + this.currentAllowedAuthTypes = null; // Active allowed auth types derived from definition + + this._loadPluginSchema().then(() => { // Load schema on initialization + this._populateGenericAuthTypeDropdown(); // Dynamically populate generic auth type dropdown after schema loads (will be called again after schema loads) + }); this.bindEvents(); } + async _loadPluginSchema() { + try { + const res = await fetch('/static/json/schemas/plugin.schema.json'); + if (!res.ok) throw new Error('Failed to load plugin.schema.json'); + this.pluginSchemaCache = await res.json(); + } catch (err) { + console.error('Error loading plugin.schema.json:', err); + this.pluginSchemaCache = null; + } + } + + getAuthTypeEnumFromSchema() { + const authEnum = this.pluginSchemaCache?.definitions?.AuthType?.enum; + return Array.isArray(authEnum) && authEnum.length ? authEnum : null; + } + + async loadPluginDefinition(type) { + const safeType = this.getSafeType(type); + if (!safeType) return null; + + if (Object.prototype.hasOwnProperty.call(this.pluginDefinitionCache, safeType)) { + return this.pluginDefinitionCache[safeType]; + } + + try { + const res = await fetch(`/api/plugins/${encodeURIComponent(type)}/auth-types`); + if (!res.ok) throw new Error(`Auth types fetch failed with status ${res.status}`); + const json = await res.json(); + this.pluginDefinitionCache[safeType] = json; + return json; + } catch (err) { + console.warn(`Failed to load auth types for type '${safeType}':`, err.message || err); + this.pluginDefinitionCache[safeType] = null; + return null; + } + } + + async applyDefinitionForSelectedType(type = this.selectedType) { + this.currentAllowedAuthTypes = null; + + if (type) { + const definition = await this.loadPluginDefinition(type); + const allowed = definition?.allowedAuthTypes; + if (Array.isArray(allowed) && allowed.length) { + this.currentAllowedAuthTypes = allowed; + } + } + + this._populateGenericAuthTypeDropdown(); + } + + _populateGenericAuthTypeDropdown() { + // Only run if dropdown exists + const dropdown = document.getElementById('plugin-auth-type-generic'); + if (!dropdown) return; + const fullAuthEnum = this.getAuthTypeEnumFromSchema() || this.defaultAuthTypes; + const allowedList = this.currentAllowedAuthTypes && this.currentAllowedAuthTypes.length + ? this.currentAllowedAuthTypes + : fullAuthEnum; + + // Clear existing options + dropdown.innerHTML = ''; + allowedList.forEach(type => { + const option = document.createElement('option'); + option.value = type; + option.textContent = this.formatAuthType(type); + dropdown.appendChild(option); + }); + } + bindEvents() { // Step navigation buttons document.getElementById('plugin-modal-next').addEventListener('click', () => this.nextStep()); @@ -83,6 +164,7 @@ export class PluginModalStepper { // Load available types and populate await this.loadAvailableTypes(); + await this.applyDefinitionForSelectedType(this.selectedType); if (this.isEditMode) { this.populateFormFromPlugin(plugin); @@ -247,6 +329,9 @@ export class PluginModalStepper { document.getElementById('plugin-description').value = typeData.description; } + // Apply auth definition overrides for this type + this.applyDefinitionForSelectedType(typeName).catch(err => console.error('Definition apply failed:', err)); + // Pre-configure for step 3 if needed this.showConfigSectionForType(); } @@ -368,12 +453,12 @@ export class PluginModalStepper { goToStep(stepNumber) { if (stepNumber < 1 || stepNumber > this.maxSteps) return; - + this.currentStep = stepNumber; this.showStep(stepNumber); this.updateStepIndicator(); this.updateNavigationButtons(); - + // Handle step-specific logic if (stepNumber === 3) { this.showConfigSectionForType(); @@ -426,6 +511,10 @@ export class PluginModalStepper { if (currentStepEl) { currentStepEl.classList.remove('d-none'); } + + if (stepNumber === 2) { + + } // Update step 3 title based on plugin type if (stepNumber === 3) { @@ -449,7 +538,45 @@ export class PluginModalStepper { } if (stepNumber === 4) { - // Only run for new plugins (not editing) + // Load additional settings schema for selected type + let options = {forceReload: true}; + this.getAdditionalSettingsSchema(this.selectedType, options); + const additionalFieldsDiv = document.getElementById('plugin-additional-fields-div'); + if (additionalFieldsDiv) { + // Only clear and rebuild if type changes + if (this.selectedType !== this.lastAdditionalFieldsType) { + additionalFieldsDiv.innerHTML = ''; + additionalFieldsDiv.classList.remove('d-none'); + if (this.selectedType) { + this.getAdditionalSettingsSchema(this.selectedType) + .then(schema => { + if (schema) { + this.buildAdditionalFieldsUI(schema, additionalFieldsDiv); + try { + if (this.isEditMode && this.originalPlugin && this.originalPlugin.additionalFields) { + this.populateDynamicAdditionalFields(this.originalPlugin.additionalFields); + } + } catch (error) { + console.error('Error populating dynamic additional fields:', error); + } + } else { + console.log('No additional settings schema found'); + additionalFieldsDiv.classList.add('d-none'); + } + }) + .catch(error => { + console.error(`Error fetching additional settings schema for type: ${this.selectedType} -- ${error}`); + additionalFieldsDiv.classList.add('d-none'); + }); + } else { + console.warn('No plugin type selected'); + additionalFieldsDiv.classList.add('d-none'); + } + this.lastAdditionalFieldsType = this.selectedType; + } + // Otherwise, preserve user data and do not redraw + } + if (!this.isEditMode) { const typeField = document.getElementById('plugin-type'); const selectedType = typeField && typeField.value ? typeField.value : null; @@ -458,13 +585,13 @@ export class PluginModalStepper { import('./plugin_common.js').then(module => { module.fetchAndMergePluginSettings(selectedType, {}).then(merged => { document.getElementById('plugin-metadata').value = merged.metadata ? JSON.stringify(merged.metadata, null, 2) : '{}'; - document.getElementById('plugin-additional-fields').value = merged.additionalFields ? JSON.stringify(merged.additionalFields, null, 2) : '{}'; + //document.getElementById('plugin-additional-fields').value = merged.additionalFields ? JSON.stringify(merged.additionalFields, null, 2) : '{}'; }); }); } else { // Fallback to empty objects if no type selected document.getElementById('plugin-metadata').value = '{}'; - document.getElementById('plugin-additional-fields').value = '{}'; + //document.getElementById('plugin-additional-fields').value = '{}'; } } } @@ -695,7 +822,7 @@ export class PluginModalStepper { case 4: // Validate JSON fields if (!this.validateJSONField('plugin-metadata', 'Metadata')) return false; - if (!this.validateJSONField('plugin-additional-fields', 'Additional Fields')) return false; + //if (!this.validateJSONField('plugin-additional-fields', 'Additional Fields')) return false; break; } @@ -885,33 +1012,63 @@ export class PluginModalStepper { } toggleGenericAuthFields() { - const authType = document.getElementById('plugin-auth-type-generic').value; - const identityGroup = document.getElementById('auth-identity-group'); - const keyGroup = document.getElementById('auth-key-group'); - const tenantIdGroup = document.getElementById('auth-tenantid-group'); - - // Hide all groups first - [identityGroup, keyGroup, tenantIdGroup].forEach(group => { - if (group) group.style.display = 'none'; - }); - - // Show relevant groups based on auth type - switch (authType) { - case 'key': - if (keyGroup) keyGroup.style.display = 'flex'; - break; - case 'identity': - if (identityGroup) identityGroup.style.display = 'flex'; - break; - case 'servicePrincipal': - if (identityGroup) identityGroup.style.display = 'flex'; - if (keyGroup) keyGroup.style.display = 'flex'; - if (tenantIdGroup) tenantIdGroup.style.display = 'flex'; - break; - case 'user': - // No additional fields needed - break; + const dropdown = document.getElementById('plugin-auth-type-generic'); + if (!dropdown) return; + const authType = dropdown.value; + + // Get required fields for selected auth type from schema + let requiredFields = []; + // Defensive: find the correct schema location + const pluginDef = this.pluginSchemaCache?.definitions?.Plugin; + const authSchema = pluginDef?.properties?.auth; + if (authSchema && Array.isArray(authSchema.allOf)) { + for (const cond of authSchema.allOf) { + // Check if this allOf block matches the selected type + if (cond.if && cond.if.properties && cond.if.properties.type && cond.if.properties.type.const === authType) { + // Use the required array from then + if (cond.then && Array.isArray(cond.then.required)) { + requiredFields = cond.then.required.filter(f => f !== 'type'); + } + break; + } + } } + + // Map field keys to DOM groups + const fieldMap = { + identity: document.getElementById('auth-identity-group'), + key: document.getElementById('auth-key-group'), + tenantId: document.getElementById('auth-tenantid-group') + }; + + // Hide all groups first using d-none + Object.values(fieldMap).forEach(group => { if (group) group.classList.add('d-none'); }); + + // Show only required fields for selected auth type using d-none + requiredFields.forEach(field => { + if (fieldMap[field]) { + fieldMap[field].classList.remove('d-none'); + // Update label using mapping or schema description + const label = fieldMap[field].querySelector('span.input-group-text'); + console.log('Updating label for field:', field, 'Auth type:', authType, 'label:', label); + if (label) { + if (authType === 'username_password') { + if (field === 'key') label.textContent = 'Password'; + else if (field === 'identity') label.textContent = 'Username'; + } else if (authType === 'connection_string') { + if (field === 'key') label.textContent = 'Connection String'; + } else if (authType === 'servicePrincipal') { + if (field === 'key') label.textContent = 'Client Secret'; + else if (field === 'identity') label.textContent = 'Client ID'; + else if (field === 'tenantId') label.textContent = 'Tenant ID'; + } else { + if (field === 'key') label.textContent = 'Key'; + else if (field === 'identity') label.textContent = 'Identity'; + else if (field === 'tenantId') label.textContent = 'Tenant ID'; + } + } + } + }); } // SQL Plugin Configuration Methods @@ -1354,7 +1511,11 @@ export class PluginModalStepper { JSON.stringify(plugin.additionalFields, null, 2) : '{}'; document.getElementById('plugin-metadata').value = metadata; - document.getElementById('plugin-additional-fields').value = additionalFields; + try { + document.getElementById('plugin-additional-fields').value = additionalFields; + } catch (e) { + console.warn('Legacy additional fields accessed:', e); + } } getFormData() { @@ -1383,6 +1544,7 @@ export class PluginModalStepper { } // Store the OpenAPI spec content directly in the plugin config + // IMPORTANT: Set these BEFORE collecting additional fields so they don't get overwritten additionalFields.openapi_spec_content = JSON.parse(specContent); additionalFields.openapi_source_type = 'content'; // Changed from 'file' additionalFields.base_url = endpoint; @@ -1556,13 +1718,14 @@ export class PluginModalStepper { } } - // Parse existing additional fields and merge + // Collect additional fields from the dynamic UI and MERGE with existing additionalFields + // This preserves OpenAPI spec content and other auto-populated fields try { - const additionalFieldsValue = document.getElementById('plugin-additional-fields').value.trim(); - const existingAdditionalFields = additionalFieldsValue ? JSON.parse(additionalFieldsValue) : {}; - additionalFields = { ...existingAdditionalFields, ...additionalFields }; + const dynamicFields = this.collectAdditionalFields(); + // Merge dynamicFields into additionalFields (preserving existing values) + additionalFields = { ...additionalFields, ...dynamicFields }; } catch (e) { - throw new Error('Invalid additional fields JSON'); + throw new Error('Invalid additional fields input'); } let metadata = {}; @@ -1700,10 +1863,17 @@ export class PluginModalStepper { 'none': 'No Authentication', 'api_key': 'API Key', 'bearer': 'Bearer Token', - 'basic': 'Basic Authentication', 'oauth2': 'OAuth2', 'windows': 'Windows Authentication', - 'sql': 'SQL Authentication' + 'sql': 'SQL Authentication', + 'username_password': 'Username/Password', + 'key': 'Key', + 'identity': 'Identity', + 'user': 'User', + 'servicePrincipal': 'Service Principal', + 'connection_string': 'Connection String', + 'basic': 'Basic', + 'NoAuth': 'No Authentication' }; return authTypeMap[authType] || authType; } @@ -1939,7 +2109,7 @@ export class PluginModalStepper { // Check if there's any metadata or additional fields const metadata = document.getElementById('plugin-metadata').value.trim(); - const additionalFields = document.getElementById('plugin-additional-fields').value.trim(); + //const additionalFields = document.getElementById('plugin-additional-fields').value.trim(); // Check if metadata/additional fields actually contain meaningful data (not just empty objects) let hasMetadata = false; @@ -1953,13 +2123,9 @@ export class PluginModalStepper { hasMetadata = metadata.length > 0 && metadata !== '{}'; } - try { - const additionalFieldsObj = JSON.parse(additionalFields || '{}'); - hasAdditionalFields = Object.keys(additionalFieldsObj).length > 0; - } catch (e) { - // If it's not valid JSON, consider it as having content if it's not empty - hasAdditionalFields = additionalFields.length > 0 && additionalFields !== '{}'; - } + // DRY: Use private helper to collect additional fields + let additionalFieldsObj = this.collectAdditionalFields(); + hasAdditionalFields = Object.keys(additionalFieldsObj).length > 0; // Update has metadata/additional fields indicators document.getElementById('summary-has-metadata').textContent = hasMetadata ? 'Yes' : 'No'; @@ -1977,7 +2143,13 @@ export class PluginModalStepper { // Show/hide additional fields preview const additionalFieldsPreview = document.getElementById('summary-additional-fields-preview'); if (hasAdditionalFields) { - document.getElementById('summary-additional-fields-content').textContent = additionalFields; + let previewContent = ''; + if (typeof additionalFieldsObj === 'object' && additionalFieldsObj !== null) { + previewContent = JSON.stringify(additionalFieldsObj, null, 2); + } else { + previewContent = ''; + } + document.getElementById('summary-additional-fields-content').textContent = previewContent; additionalFieldsPreview.style.display = ''; } else { additionalFieldsPreview.style.display = 'none'; @@ -2126,6 +2298,7 @@ export class PluginModalStepper { // Clear any type selection this.selectedType = null; + this.currentAllowedAuthTypes = null; // Hide all auth field sections (with safe calls) try { @@ -2148,6 +2321,434 @@ export class PluginModalStepper { div.textContent = str; return div.innerHTML; } + + formatLabel(str) { + // Convert snake_case, camelCase, PascalCase to spaced words + return str + .replace(/([a-z])([A-Z])/g, '$1 $2') // camelCase, PascalCase + .replace(/_/g, ' ') // snake_case + .replace(/\b([A-Z]+)\b/g, match => match.charAt(0) + match.slice(1).toLowerCase()) // ALLCAPS to Capitalized + .replace(/^\w/, c => c.toUpperCase()); + } + + // Build the additional fields UI from a JSON schema + buildAdditionalFieldsUI(schema, parentDiv) { + // Utility to create a labeled field + const self = this; + // Render title and description + const title = document.createElement('h6'); + title.textContent = schema.title || 'Additional Settings'; + parentDiv.appendChild(title); + if (schema.description) { + const desc = document.createElement('p'); + desc.className = 'text-muted'; + desc.textContent = schema.description; + parentDiv.appendChild(desc); + } + // Render all top-level properties + if (schema.properties) { + Object.entries(schema.properties).forEach(([key, prop]) => { + if (prop.type === 'array') { + this.addArrayFieldUI(prop, key, parentDiv, prop.default || []); + } else if (prop.type === 'object') { + const wrapper = document.createElement('div'); + wrapper.className = 'additional-field-object'; + // Create a fieldset for the object + const fieldset = document.createElement('fieldset'); + fieldset.dataset.schemaKey = key; + // Optionally add a legend for the object + const legend = document.createElement('legend'); + legend.textContent = this.formatLabel(key); + fieldset.appendChild(legend); + // Render all sub-properties inside the fieldset + if (prop.properties) { + Object.entries(prop.properties).forEach(([subKey, subProp]) => { + this.createField(subKey, subProp, fieldset); + }); + } + wrapper.appendChild(fieldset); + parentDiv.appendChild(wrapper); + } else { + const wrapper = document.createElement('div'); + wrapper.className = 'additional-field-primitive'; + this.createField(key, prop, wrapper); + parentDiv.appendChild(wrapper); + } + }); + } + } + + // Recursively populate dynamic additional fields UI + populateDynamicAdditionalFields(fields, parentKey = '') { + if (!fields || typeof fields !== 'object') return; + if (this.additionalSettingsSchemaCache && this.selectedType && !this.additionalSettingsSchemaCache[this.getSafeType(this.selectedType)]) { + this.getAdditionalSettingsSchema(this.selectedType); + } + const schema = this.additionalSettingsSchemaCache && this.selectedType ? this.additionalSettingsSchemaCache[this.getSafeType(this.selectedType)] : null; + Object.entries(fields).forEach(([key, value]) => { + console.log('Processing field:', key, 'with value:', value, 'under parentKey:', parentKey); + let fieldName = key; + if (Array.isArray(value)) { + // Find array wrapper, add items if needed + let arrayWrapper = document.querySelector(`#plugin-additional-fields-div [data-schema-key="${fieldName}"]`); + if (!arrayWrapper) { + // Try to find schema for this array (assume you have access to schema) + if (this.additionalSettingsSchemaCache && this.selectedType) { + if (schema && schema.properties && schema.properties[fieldName] && schema.properties[fieldName].type === 'array') { + this.addArrayFieldUI(schema.properties[fieldName], fieldName, document.getElementById('plugin-additional-fields-div'), value); + arrayWrapper = document.querySelector(`#plugin-additional-fields-div [data-schema-key="${fieldName}"]`); + } + } + } + // Now populate each item + if (arrayWrapper) { + const itemsContainer = arrayWrapper.querySelector('.array-group'); + // Remove existing items + while (itemsContainer && itemsContainer.firstChild) itemsContainer.removeChild(itemsContainer.firstChild); + value.forEach(item => { + this.addArrayItemUI( + (schema && schema.properties && schema.properties[fieldName] && schema.properties[fieldName].items) || {}, + fieldName, + itemsContainer, + item + ); + }); + } + } else if (value && typeof value === 'object') { + this.populateDynamicAdditionalFields(value, fieldName); + } else { + let query = parentKey ? `#plugin-additional-fields-div [data-schema-key="${parentKey}"] [name="${fieldName}"]` : `#plugin-additional-fields-div [name="${fieldName}"]`; + console.log('Querying elements with:', query); + const elements = document.querySelectorAll(query); + console.log('Found elements for field', fieldName, ':', elements); + elements.forEach(el => { + console.log('Setting field:', fieldName, 'with value:', value, 'on element:', el); + if (el.type === 'checkbox') { + el.checked = !!value; + } else if (el.type === 'radio') { + el.checked = el.value == value; + } else if (el.tagName === 'SELECT') { + el.value = value; + } else if (el.tagName === 'TEXTAREA') { + el.value = value; + } else if (el.type === 'number') { + el.value = value !== undefined && value !== null ? Number(value) : ''; + } else { + el.value = value; + } + }); + } + }); + } + + // Private deep merge utility + deepMerge(target, source) { + for (const key in source) { + if (source[key] && typeof source[key] === 'object' && + !Array.isArray(source[key]) && target[key] && typeof target[key] === 'object' && + !Array.isArray(target[key]) + ) { + target[key] = this.deepMerge(target[key], source[key]); + } else { + target[key] = source[key]; + } + } + return target; + } + + // Private method to collect additional fields from both legacy textarea and dynamic UI + collectAdditionalFields() { + // 1. Get from textarea (legacy) + const additionalFieldsValue = document.getElementById('plugin-additional-fields')?.value?.trim() || ''; + let legacyFields = {}; + if (additionalFieldsValue && additionalFieldsValue !== '{}') { + try { + legacyFields = JSON.parse(additionalFieldsValue); + } catch { + // If not valid JSON, skip + } + } + + // 2. Get from dynamic UI + let uiFields = {}; + const additionalFieldsDiv = document.getElementById('plugin-additional-fields-div'); + if (additionalFieldsDiv) { + // Arrays + const arrayWrappers = additionalFieldsDiv.querySelectorAll('.additional-field-array'); + arrayWrappers.forEach(wrapper => { + const arrayGroup = wrapper.querySelector('.array-group'); + if (arrayGroup) { + const arrayKey = arrayGroup.dataset.schemaKey; + const items = []; + // Loop over each .array-item inside .array-group + const arrayItems = arrayGroup.querySelectorAll('.array-item'); + arrayItems.forEach(itemDiv => { + // Check for array of objects (fieldset present) + const fieldset = itemDiv.querySelector('fieldset'); + if (fieldset) { + let obj = {}; + const subInputs = fieldset.querySelectorAll('input, select, textarea'); + subInputs.forEach(subEl => { + let subKey = subEl.name || subEl.id; + if (!subKey) return; + let subValue = subEl.type === 'checkbox' ? subEl.checked : (subEl.type === 'number' ? (subEl.value !== '' ? Number(subEl.value) : '') : subEl.value); + obj[subKey] = subValue; + }); + items.push(obj); + } else { + // Primitive array: find first input/select/textarea directly inside .array-item (not in fieldset or button) + const possibleInputs = Array.from(itemDiv.querySelectorAll('input, select, textarea')); + // Exclude those inside a fieldset or button + const input = possibleInputs.find(el => { + // Not inside a fieldset or button + return !el.closest('fieldset') && !el.closest('button'); + }); + if (input) { + let subValue = input.type === 'checkbox' ? input.checked : (input.type === 'number' ? (input.value !== '' ? Number(input.value) : '') : input.value); + items.push(subValue); + } + } + }); + if (arrayKey) { + uiFields[arrayKey] = items; + } + } + }); + // Objects + const objectWrappers = additionalFieldsDiv.querySelectorAll('.additional-field-object'); + objectWrappers.forEach(wrapper => { + const objFieldset = wrapper.querySelector('fieldset'); + const objKey = objFieldset.dataset.schemaKey; + let obj = {}; + const subInputs = objFieldset.querySelectorAll('input, select, textarea'); + subInputs.forEach(subEl => { + let subKey = subEl.name || subEl.id; + if (!subKey) return; + let subValue = subEl.type === 'checkbox' ? subEl.checked : (subEl.type === 'number' ? (subEl.value !== '' ? Number(subEl.value) : '') : subEl.value); + obj[subKey] = subValue; + }); + if (objKey) { + uiFields[objKey] = obj; + } + }); + // Primitives + const primitiveWrappers = additionalFieldsDiv.querySelectorAll('.additional-field-primitive'); + primitiveWrappers.forEach(wrapper => { + const inputs = wrapper.querySelectorAll('input, select, textarea'); + inputs.forEach(input => { + let key = input.name || input.id; + let value = input.type === 'checkbox' ? input.checked : (input.type === 'number' ? (input.value !== '' ? Number(input.value) : '') : input.value); + uiFields[key] = value; + }); + }); + } + + // 3. Deep merge (UI fields take precedence) + return this.deepMerge(legacyFields, uiFields); + } + + getSafeType(type) { + return type ? type.replace(/[^a-zA-Z0-9_]/g, '_').toLowerCase() : null; + } + + async getAdditionalSettingsSchema(type, options = {}) { + if (!type) return null; + const { useLegacyPattern = false, forceReload = false } = options; + // Normalize type for filename + const safeType = this.getSafeType(type); + // Choose filename pattern + const schemaFile = `${safeType}_plugin.additional_settings.schema.json`; + + const schemaPath = `/static/json/schemas/${schemaFile}`; + + // Use cache unless forceReload + if (!forceReload && this.additionalSettingsSchemaCache[safeType]) { + return this.additionalSettingsSchemaCache[safeType]; + } + try { + console.log(`Fetching additional settings schema for type: ${safeType} (pattern: ${safeType})`); + const res = await fetch(schemaPath); + if (res.status === 404) { + console.log(`No additional settings schema found for type: ${type} (404)`); + this.additionalSettingsSchemaCache[safeType] = null; + return null; + } + if (!res.ok) throw new Error(`Failed to load additional settings schema for type: ${type}`); + const schema = await res.json(); + this.additionalSettingsSchemaCache[safeType] = schema; + return schema; + } catch (err) { + console.error(`Error loading additional settings schema for type ${type}:`, err); + this.additionalSettingsSchemaCache[safeType] = null; + return null; + } + } + + // Utility to create a labeled field (refactored from buildAdditionalFieldsUI) + createField(key, prop, parent, prefix = '') { + // If prefix is a number, treat as array index for uniqueness + let fieldId; + if (typeof prefix === 'number') { + fieldId = `${key}_${prefix}`; + } else { + fieldId = `${prefix}${key}`; + } + const wrapper = document.createElement('div'); + wrapper.className = 'mb-3'; + // Label with tooltip if description exists + const label = document.createElement('label'); + label.className = 'form-label'; + label.htmlFor = fieldId; + label.textContent = this.formatLabel(key); + if (prop.description) { + label.title = prop.description; + // Add help icon + const helpIcon = document.createElement('span'); + helpIcon.className = 'ms-1 bi bi-question-circle-fill text-info'; + helpIcon.setAttribute('tabindex', '0'); + helpIcon.setAttribute('data-bs-toggle', 'tooltip'); + helpIcon.setAttribute('title', prop.description); + label.appendChild(helpIcon); + } + wrapper.appendChild(label); + + let input; + if (prop.enum) { + input = document.createElement('select'); + input.className = 'form-select'; + input.id = fieldId; + input.name = key; + prop.enum.forEach(opt => { + const option = document.createElement('option'); + option.value = opt; + option.textContent = this.formatLabel(opt); + option.title = opt; + input.appendChild(option); + }); + if (prop.default) input.value = prop.default; + } else if (prop.type === 'boolean') { + input = document.createElement('input'); + input.type = 'checkbox'; + input.className = 'form-check-input'; + input.id = fieldId; + input.name = key; + input.checked = !!prop.default; + wrapper.className += ' form-check'; + } else if (prop.type === 'number' || prop.type === 'integer') { + input = document.createElement('input'); + input.type = 'number'; + input.className = 'form-control'; + input.id = fieldId; + input.name = key; + if (prop.minimum !== undefined) input.min = prop.minimum; + if (prop.maximum !== undefined) input.max = prop.maximum; + if (prop.default !== undefined) input.value = prop.default; + if (prop.pattern) input.pattern = prop.pattern; + } else if (prop.type === 'string' && prop.format === 'email') { + input = document.createElement('input'); + input.type = 'email'; + input.className = 'form-control'; + input.id = fieldId; + input.name = key; + if (prop.default) input.value = prop.default; + } else if (prop.type === 'string') { + input = document.createElement('input'); + input.type = 'text'; + input.className = 'form-control'; + input.id = fieldId; + input.name = key; + if (prop.minLength !== undefined) input.minLength = prop.minLength; + if (prop.maxLength !== undefined) input.maxLength = prop.maxLength; + if (prop.default) input.value = prop.default; + if (prop.pattern) input.pattern = prop.pattern; + } + if (input) wrapper.appendChild(input); + parent.appendChild(wrapper); + } + + // New: Array field builder for both initial render and dynamic population + addArrayFieldUI(arraySchema, arrayKey, parentDiv, initialValues = []) { + // Create array wrapper + const wrapper = document.createElement('div'); + wrapper.className = 'additional-field-array'; + wrapper.dataset.schemaKey = arrayKey; + + // Title + const label = document.createElement('label'); + label.className = 'form-label'; + label.textContent = this.formatLabel(arrayKey); + wrapper.appendChild(label); + + // Items container + const itemsContainer = document.createElement('div'); + itemsContainer.className = 'array-group'; + itemsContainer.dataset.schemaKey = arrayKey; + wrapper.appendChild(itemsContainer); + + // Add button + const addBtn = document.createElement('button'); + addBtn.type = 'button'; + addBtn.className = 'btn btn-sm btn-outline-primary mb-2'; + addBtn.textContent = 'Add Item'; + addBtn.onclick = () => { + this.addArrayItemUI(arraySchema.items, arrayKey, itemsContainer); + }; + wrapper.appendChild(addBtn); + + // Initial values + if (Array.isArray(initialValues)) { + initialValues.forEach(val => { + this.addArrayItemUI(arraySchema.items, arrayKey, itemsContainer, val); + }); + } + + parentDiv.appendChild(wrapper); + return wrapper; + } + + // Helper to add a single array item + addArrayItemUI(itemSchema, arrayKey, itemsContainer, initialValue = undefined) { + const itemDiv = document.createElement('div'); + itemDiv.className = 'array-item mb-2 p-2 border rounded'; + // Remove button + const removeBtn = document.createElement('button'); + removeBtn.type = 'button'; + removeBtn.className = 'btn btn-sm btn-outline-danger float-end'; + removeBtn.textContent = 'Remove'; + removeBtn.onclick = () => { + itemsContainer.removeChild(itemDiv); + }; + itemDiv.appendChild(removeBtn); + // Determine index for uniqueness + let index = itemsContainer.childNodes.length; + // Render item fields + if (itemSchema.type === 'object' && itemSchema.properties) { + // Create a fieldset for the object item + const fieldset = document.createElement('fieldset'); + fieldset.dataset.schemaKey = arrayKey; + // Optionally add a legend for the object item + const legend = document.createElement('legend'); + legend.textContent = this.formatLabel(arrayKey); + fieldset.appendChild(legend); + Object.entries(itemSchema.properties).forEach(([subKey, subProp]) => { + this.createField(subKey, subProp, fieldset, index); + // Set initial value if provided + if (initialValue && initialValue[subKey] !== undefined) { + const input = fieldset.querySelector(`[name="${subKey}"]`); + if (input) input.value = initialValue[subKey]; + } + }); + itemDiv.appendChild(fieldset); + } else { + // Primitive array + this.createField(arrayKey, itemSchema, itemDiv, index); + if (initialValue !== undefined) { + const input = itemDiv.querySelector(`[name="${arrayKey}"]`); + if (input) input.value = initialValue; + } + } + itemsContainer.appendChild(itemDiv); + } } // Create global instance diff --git a/application/single_app/static/js/profile-image.js b/application/single_app/static/js/profile-image.js index 37f1f168..7ada361c 100644 --- a/application/single_app/static/js/profile-image.js +++ b/application/single_app/static/js/profile-image.js @@ -17,21 +17,37 @@ let isLoading = false; const sidebar = document.getElementById('sidebar-profile-avatar'); if (topNav && userProfileImage) { + // Preserve notification badge if it exists + const existingBadge = topNav.querySelector('#notification-badge'); + const img = document.createElement('img'); img.src = userProfileImage; img.alt = 'Profile'; img.style.cssText = 'width: 28px; height: 28px; border-radius: 50%; object-fit: cover;'; topNav.innerHTML = ''; topNav.appendChild(img); + + // Re-append badge if it existed + if (existingBadge) { + topNav.appendChild(existingBadge); + } } if (sidebar && userProfileImage) { + // Preserve notification badge if it exists + const existingBadge = sidebar.querySelector('#sidebar-notification-badge'); + const img = document.createElement('img'); img.src = userProfileImage; img.alt = 'Profile'; img.style.cssText = 'width: 32px; height: 32px; border-radius: 50%; object-fit: cover;'; sidebar.innerHTML = ''; sidebar.appendChild(img); + + // Re-append badge if it existed + if (existingBadge) { + sidebar.appendChild(existingBadge); + } } } @@ -151,6 +167,9 @@ function updateTopNavAvatar() { const avatarElement = document.getElementById('top-nav-profile-avatar'); if (!avatarElement) return; + // Preserve notification badge if it exists + const existingBadge = avatarElement.querySelector('#notification-badge'); + if (userProfileImage) { const img = document.createElement('img'); img.src = userProfileImage; @@ -165,6 +184,11 @@ function updateTopNavAvatar() { avatarElement.innerHTML = ''; avatarElement.appendChild(img); avatarElement.style.backgroundColor = 'transparent'; + + // Re-append badge if it existed + if (existingBadge) { + avatarElement.appendChild(existingBadge); + } } else { // Keep the existing initials display, but use cached name if possible const nameElement = avatarElement.parentElement.querySelector('.fw-semibold'); @@ -176,6 +200,11 @@ function updateTopNavAvatar() { avatarElement.style.width = '28px'; avatarElement.style.height = '28px'; avatarElement.style.backgroundColor = '#6c757d'; + + // Re-append badge if it existed + if (existingBadge) { + avatarElement.appendChild(existingBadge); + } } } } @@ -187,6 +216,9 @@ function updateSidebarAvatar() { const sidebarAvatar = document.getElementById('sidebar-profile-avatar'); if (!sidebarAvatar) return; + // Preserve notification badge if it exists + const existingBadge = sidebarAvatar.querySelector('#sidebar-notification-badge'); + if (userProfileImage) { const img = document.createElement('img'); img.src = userProfileImage; @@ -201,6 +233,11 @@ function updateSidebarAvatar() { sidebarAvatar.innerHTML = ''; sidebarAvatar.appendChild(img); sidebarAvatar.style.backgroundColor = 'transparent'; + + // Re-append badge if it existed + if (existingBadge) { + sidebarAvatar.appendChild(existingBadge); + } } else { // Get initials for sidebar const nameElement = document.querySelector('#sidebar-user-account .fw-semibold'); @@ -212,6 +249,11 @@ function updateSidebarAvatar() { sidebarAvatar.style.width = '28px'; sidebarAvatar.style.height = '28px'; sidebarAvatar.style.backgroundColor = '#6c757d'; + + // Re-append badge if it existed + if (existingBadge) { + sidebarAvatar.appendChild(existingBadge); + } } } } diff --git a/application/single_app/static/js/public/manage_public_workspace.js b/application/single_app/static/js/public/manage_public_workspace.js index 402a55b8..3b31ce9b 100644 --- a/application/single_app/static/js/public/manage_public_workspace.js +++ b/application/single_app/static/js/public/manage_public_workspace.js @@ -12,6 +12,20 @@ $(document).ready(function () { loadMembers(); }); + // Initialize color picker + initializeColorPicker(); + + // Load stats when stats tab is shown + $('#stats-tab').on('shown.bs.tab', function () { + loadWorkspaceStats(); + }); + + // Activity timeline pagination + $('input[name="activityLimit"]').on('change', function() { + const limit = parseInt($(this).val()); + loadActivityTimeline(limit); + }); + // Edit workspace form (Owner only) $("#editWorkspaceForm").on("submit", function (e) { e.preventDefault(); @@ -139,6 +153,82 @@ $(document).ready(function () { $("#pendingRequestsTable").on("click", ".reject-request-btn", function () { rejectRequest($(this).data("id")); }); + + // CSV Bulk Upload Events + $("#addBulkMemberBtn").on("click", function () { + $("#csvBulkUploadModal").modal("show"); + }); + + $("#csvExampleBtn").on("click", downloadCsvExample); + $("#csvConfigBtn").on("click", showCsvConfig); + $("#csvFileInput").on("change", handleCsvFileSelect); + $("#csvNextBtn").on("click", startCsvUpload); + $("#csvDoneBtn").on("click", function () { + resetCsvModal(); + loadMembers(); + }); + + // Reset CSV modal when closed + $("#csvBulkUploadModal").on("hidden.bs.modal", function () { + resetCsvModal(); + }); + + // Bulk Actions Events + $("#selectAllMembers").on("change", function () { + const isChecked = $(this).prop("checked"); + $(".member-checkbox").prop("checked", isChecked); + updateBulkActionsBar(); + }); + + $(document).on("change", ".member-checkbox", function () { + updateBulkActionsBar(); + updateSelectAllCheckbox(); + }); + + $("#clearSelectionBtn").on("click", function () { + $(".member-checkbox").prop("checked", false); + $("#selectAllMembers").prop("checked", false); + updateBulkActionsBar(); + }); + + $("#bulkAssignRoleBtn").on("click", function () { + const selectedMembers = getSelectedMembers(); + if (selectedMembers.length === 0) { + alert("Please select at least one member"); + return; + } + $("#bulkRoleCount").text(selectedMembers.length); + $("#bulkAssignRoleModal").modal("show"); + }); + + $("#bulkAssignRoleForm").on("submit", function (e) { + e.preventDefault(); + bulkAssignRole(); + }); + + $("#bulkRemoveMembersBtn").on("click", function () { + const selectedMembers = getSelectedMembers(); + if (selectedMembers.length === 0) { + alert("Please select at least one member"); + return; + } + + // Populate the list of members to be removed + let membersList = "
      "; + selectedMembers.forEach(member => { + membersList += `
    • • ${member.name} (${member.email})
    • `; + }); + membersList += "
    "; + + $("#bulkRemoveCount").text(selectedMembers.length); + $("#bulkRemoveMembersList").html(membersList); + $("#bulkRemoveMembersModal").modal("show"); + }); + + $("#bulkRemoveMembersForm").on("submit", function (e) { + e.preventDefault(); + bulkRemoveMembers(); + }); }); @@ -148,16 +238,14 @@ $(document).ready(function () { function loadWorkspaceInfo(callback) { $.get(`/api/public_workspaces/${workspaceId}`) .done(function (ws) { + // Update status alert + updateWorkspaceStatusAlert(ws); const owner = ws.owner || {}; const admins = ws.admins || []; const docMgrs = ws.documentManagers || []; - // Header info - $("#workspaceInfoContainer").html(` -

    ${ws.name}

    -

    ${ws.description || ""}

    -

    Owner: ${owner.displayName} (${owner.email})

    - `); + // Update profile hero + updateProfileHero(ws, owner); // Determine role if (userId === owner.userId) { @@ -174,12 +262,25 @@ function loadWorkspaceInfo(callback) { $("#editWorkspaceContainer").show(); $("#editWorkspaceName").val(ws.name); $("#editWorkspaceDescription").val(ws.description); + + // Set selected color + const color = ws.heroColor || '#0078d4'; + $("#selectedColor").val(color); + updateHeroColor(color); + $(`.color-option[data-color="${color}"]`).addClass('selected'); + } + + // Show member actions for non-owners + if (currentUserRole !== "Owner" && currentUserRole) { + $("#memberActionsContainer").show(); } // Admin & Owner UI if (currentUserRole === "Owner" || currentUserRole === "Admin") { $("#addMemberBtn").show(); + $("#addBulkMemberBtn").show(); $("#pendingRequestsSection").show(); + $("#activityTimelineSection").show(); loadPendingRequests(); } @@ -194,7 +295,8 @@ function loadWorkspaceInfo(callback) { function updateWorkspaceInfo() { const data = { name: $("#editWorkspaceName").val().trim(), - description: $("#editWorkspaceDescription").val().trim() + description: $("#editWorkspaceDescription").val().trim(), + heroColor: $("#selectedColor").val() }; $.ajax({ url: `/api/public_workspaces/${workspaceId}`, @@ -223,8 +325,18 @@ function loadMembers(searchTerm = "", roleFilter = "") { $.get(url) .done(function (members) { const rows = members.map(m => { + const isOwner = m.role === "Owner"; + const checkboxHtml = isOwner || (currentUserRole !== "Owner" && currentUserRole !== "Admin") + ? '' + : ``; + return ` + ${checkboxHtml} ${m.displayName || "(no name)"}
    ${m.email || ""} @@ -235,10 +347,14 @@ function loadMembers(searchTerm = "", roleFilter = "") { `; }).join(""); $("#membersTable tbody").html(rows); + + // Reset selection UI + $("#selectAllMembers").prop("checked", false); + updateBulkActionsBar(); }) .fail(function () { $("#membersTable tbody").html( - `Failed to load members.` + `Failed to load members.` ); }); } @@ -436,3 +552,744 @@ function addMemberDirectly() { } }); } + +// --- New Functions for Profile Hero and Stats --- + +// Update profile hero section +function updateProfileHero(workspace, owner) { + const initial = workspace.name ? workspace.name.charAt(0).toUpperCase() : 'W'; + $('#workspaceInitial').text(initial); + $('#workspaceHeroName').text(workspace.name || 'Unnamed Workspace'); + $('#workspaceOwnerName').text(owner.displayName || 'Unknown'); + $('#workspaceOwnerEmail').text(owner.email || 'N/A'); + $('#workspaceHeroDescription').text(workspace.description || 'No description provided'); + + // Apply hero color + const color = workspace.heroColor || '#0078d4'; + updateHeroColor(color); +} + +// Update hero color +function updateHeroColor(color) { + const darker = adjustColorBrightness(color, -30); + document.documentElement.style.setProperty('--hero-color', color); + document.documentElement.style.setProperty('--hero-color-dark', darker); +} + +// Adjust color brightness +function adjustColorBrightness(color, percent) { + const num = parseInt(color.replace('#', ''), 16); + const amt = Math.round(2.55 * percent); + const R = (num >> 16) + amt; + const G = (num >> 8 & 0x00FF) + amt; + const B = (num & 0x0000FF) + amt; + return '#' + (0x1000000 + (R < 255 ? R < 1 ? 0 : R : 255) * 0x10000 + + (G < 255 ? G < 1 ? 0 : G : 255) * 0x100 + + (B < 255 ? B < 1 ? 0 : B : 255)) + .toString(16).slice(1); +} + +// Initialize color picker +function initializeColorPicker() { + $('.color-option').on('click', function() { + $('.color-option').removeClass('selected'); + $(this).addClass('selected'); + const color = $(this).data('color'); + $('#selectedColor').val(color); + updateHeroColor(color); + }); +} + +// Load workspace stats +let documentChart, storageChart, tokenChart; + +function loadWorkspaceStats() { + // Load stats data + $.get(`/api/public_workspaces/${workspaceId}/stats`) + .done(function(stats) { + updateStatCards(stats); + updateCharts(stats); + // Load activity timeline if user has permission + if (currentUserRole === "Owner" || currentUserRole === "Admin") { + loadActivityTimeline(50); + } + }) + .fail(function() { + console.error('Failed to load workspace stats'); + $('#stat-documents').text('N/A'); + $('#stat-storage').text('N/A'); + $('#stat-tokens').text('N/A'); + $('#stat-members').text('N/A'); + }); +} + +// Update stat cards +function updateStatCards(stats) { + $('#stat-documents').text(stats.totalDocuments || 0); + $('#stat-storage').text(formatBytes(stats.storageUsed || 0)); + $('#stat-tokens').text(formatNumber(stats.totalTokens || 0)); + $('#stat-members').text(stats.totalMembers || 0); +} + +// Update charts +function updateCharts(stats) { + // Document Activity Chart - Two bars for uploads and deletes + const docCtx = document.getElementById('documentChart'); + if (docCtx) { + if (documentChart) documentChart.destroy(); + documentChart = new Chart(docCtx, { + type: 'bar', + data: { + labels: stats.documentActivity?.labels || [], + datasets: [ + { + label: 'Uploads', + data: stats.documentActivity?.uploads || [], + backgroundColor: 'rgba(13, 202, 240, 0.8)', + borderColor: 'rgb(13, 202, 240)', + borderWidth: 1 + }, + { + label: 'Deletes', + data: stats.documentActivity?.deletes || [], + backgroundColor: 'rgba(220, 53, 69, 0.8)', + borderColor: 'rgb(220, 53, 69)', + borderWidth: 1 + } + ] + }, + options: { + responsive: true, + maintainAspectRatio: false, + plugins: { + legend: { + display: true, + position: 'top' + } + }, + scales: { + y: { + beginAtZero: true, + ticks: { precision: 0 } + } + } + } + }); + } + + // Storage Usage Chart (Doughnut) - AI Search and Blob Storage + const storageCtx = document.getElementById('storageChart'); + if (storageCtx) { + if (storageChart) storageChart.destroy(); + const aiSearch = stats.storage?.ai_search_size || 0; + const blobStorage = stats.storage?.storage_account_size || 0; + + storageChart = new Chart(storageCtx, { + type: 'doughnut', + data: { + labels: ['AI Search', 'Blob Storage'], + datasets: [{ + data: [aiSearch, blobStorage], + backgroundColor: [ + 'rgb(13, 110, 253)', + 'rgb(23, 162, 184)' + ], + borderWidth: 2 + }] + }, + options: { + responsive: true, + maintainAspectRatio: false, + plugins: { + legend: { position: 'bottom' }, + tooltip: { + callbacks: { + label: function(context) { + return context.label + ': ' + formatBytes(context.parsed); + } + } + } + } + } + }); + } + + // Token Usage Chart + const tokenCtx = document.getElementById('tokenChart'); + if (tokenCtx) { + if (tokenChart) tokenChart.destroy(); + tokenChart = new Chart(tokenCtx, { + type: 'bar', + data: { + labels: stats.tokenUsage?.labels || [], + datasets: [{ + label: 'Tokens Used', + data: stats.tokenUsage?.data || [], + backgroundColor: 'rgba(255, 193, 7, 0.7)', + borderColor: 'rgb(255, 193, 7)', + borderWidth: 1 + }] + }, + options: { + responsive: true, + maintainAspectRatio: false, + plugins: { + legend: { display: false } + }, + scales: { + y: { + beginAtZero: true, + ticks: { + callback: function(value) { + return formatNumber(value); + } + } + } + } + } + }); + } +} + +// Load activity timeline +function loadActivityTimeline(limit = 50) { + $.get(`/api/public_workspaces/${workspaceId}/activity?limit=${limit}`) + .done(function(activities) { + if (!activities || activities.length === 0) { + $('#activityTimeline').html('

    No recent activity

    '); + return; + } + + const html = activities.map(activity => renderActivityItem(activity)).join(''); + $('#activityTimeline').html(html); + }) + .fail(function(xhr) { + if (xhr.status === 403) { + $('#activityTimeline').html('

    Access denied - Only workspace owners and admins can view activity timeline

    '); + } else { + $('#activityTimeline').html('

    Failed to load activity

    '); + } + }); +} + +// Render activity item +function renderActivityItem(activity) { + const icons = { + 'document_creation': 'file-earmark-arrow-up', + 'document_deletion': 'file-earmark-x', + 'token_usage': 'cpu', + 'user_login': 'box-arrow-in-right' + }; + + const colors = { + 'document_creation': 'success', + 'document_deletion': 'danger', + 'token_usage': 'primary', + 'user_login': 'info' + }; + + const activityType = activity.activity_type || 'unknown'; + const icon = icons[activityType] || 'circle'; + const color = colors[activityType] || 'secondary'; + const time = formatRelativeTime(activity.timestamp || activity.created_at); + + // Generate description based on activity type + let description = ''; + let title = activityType.replace(/_/g, ' ').replace(/\b\w/g, l => l.toUpperCase()); + + if (activityType === 'document_creation' && activity.document) { + description = `File: ${activity.document.file_name || 'Unknown'}`; + } else if (activityType === 'document_deletion' && activity.document_metadata) { + description = `File: ${activity.document_metadata.file_name || 'Unknown'}`; + } else if (activityType === 'token_usage' && activity.usage) { + description = `Tokens: ${formatNumber(activity.usage.total_tokens || 0)}`; + } else if (activityType === 'user_login') { + description = 'User logged in'; + } + + const activityJson = JSON.stringify(activity); + + return ` +
    +
    +
    + +
    +
    +
    +
    ${title}
    + ${time} +
    +

    ${description}

    +
    +
    +
    + `; +} + +// Format bytes +function formatBytes(bytes) { + if (bytes === 0) return '0 B'; + const k = 1024; + const sizes = ['B', 'KB', 'MB', 'GB', 'TB']; + const i = Math.floor(Math.log(bytes) / Math.log(k)); + return Math.round(bytes / Math.pow(k, i) * 100) / 100 + ' ' + sizes[i]; +} + +// Format number with commas +function formatNumber(num) { + return num.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ','); +} + +// Show raw activity in modal +function showRawActivity(element) { + try { + const activityJson = element.getAttribute('data-activity'); + const activity = JSON.parse(activityJson); + const modalBody = document.getElementById('rawActivityModalBody'); + modalBody.innerHTML = `
    ${JSON.stringify(activity, null, 2)}
    `; + $('#rawActivityModal').modal('show'); + } catch (error) { + console.error('Error showing raw activity:', error); + } +} + +// Copy raw activity to clipboard +function copyRawActivityToClipboard() { + const modalBody = document.getElementById('rawActivityModalBody'); + const text = modalBody.textContent; + navigator.clipboard.writeText(text).then(() => { + alert('Activity data copied to clipboard!'); + }).catch(err => { + console.error('Failed to copy:', err); + }); +} + +// Make functions globally available +window.showRawActivity = showRawActivity; +window.copyRawActivityToClipboard = copyRawActivityToClipboard; + +// Format relative time +function formatRelativeTime(timestamp) { + const now = new Date(); + const date = new Date(timestamp); + const diffMs = now - date; + const diffMins = Math.floor(diffMs / 60000); + const diffHours = Math.floor(diffMs / 3600000); + const diffDays = Math.floor(diffMs / 86400000); + + if (diffMins < 1) return 'Just now'; + if (diffMins < 60) return `${diffMins}m ago`; + if (diffHours < 24) return `${diffHours}h ago`; + if (diffDays < 7) return `${diffDays}d ago`; + return date.toLocaleDateString(); +} + +// ============================================================================ +// CSV Bulk Member Upload Functions +// ============================================================================ + +let csvParsedData = []; + +function downloadCsvExample() { + const csvContent = `userId,displayName,email,role +00000000-0000-0000-0000-000000000001,John Smith,john.smith@contoso.com,user +00000000-0000-0000-0000-000000000002,Jane Doe,jane.doe@contoso.com,admin +00000000-0000-0000-0000-000000000003,Bob Johnson,bob.johnson@contoso.com,document_manager`; + + const blob = new Blob([csvContent], { type: 'text/csv' }); + const url = window.URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = 'bulk_members_example.csv'; + document.body.appendChild(a); + a.click(); + document.body.removeChild(a); + window.URL.revokeObjectURL(url); +} + +function showCsvConfig() { + const modal = new bootstrap.Modal(document.getElementById('csvFormatInfoModal')); + modal.show(); +} + +function validateGuid(guid) { + return ValidationUtils.validateGuid(guid); +} + +function validateEmail(email) { + const emailRegex = /^[^\s@]+@[^\s@]+\.[^\s@]+$/; + return emailRegex.test(email); +} + +function handleCsvFileSelect(event) { + const file = event.target.files[0]; + if (!file) { + $("#csvNextBtn").prop("disabled", true); + $("#csvValidationResults").hide(); + $("#csvErrorDetails").hide(); + return; + } + + const reader = new FileReader(); + reader.onload = function (e) { + const text = e.target.result; + const lines = text.split(/\r?\n/).filter(line => line.trim()); + + $("#csvErrorDetails").hide(); + $("#csvValidationResults").hide(); + + // Validate header + if (lines.length < 2) { + showCsvError("CSV must contain at least a header row and one data row"); + return; + } + + const header = lines[0].toLowerCase().trim(); + if (header !== "userid,displayname,email,role") { + showCsvError("Invalid header. Expected: userId,displayName,email,role"); + return; + } + + // Validate row count + const dataRows = lines.slice(1); + if (dataRows.length > 1000) { + showCsvError(`Too many rows. Maximum 1,000 members allowed (found ${dataRows.length})`); + return; + } + + // Parse and validate rows + csvParsedData = []; + const errors = []; + const validRoles = ['user', 'admin', 'document_manager']; + + for (let i = 0; i < dataRows.length; i++) { + const rowNum = i + 2; // +2 because header is row 1 + const row = dataRows[i].split(','); + + if (row.length !== 4) { + errors.push(`Row ${rowNum}: Expected 4 columns, found ${row.length}`); + continue; + } + + const userId = row[0].trim(); + const displayName = row[1].trim(); + const email = row[2].trim(); + const role = row[3].trim().toLowerCase(); + + if (!userId || !displayName || !email || !role) { + errors.push(`Row ${rowNum}: All fields are required`); + continue; + } + + if (!validateGuid(userId)) { + errors.push(`Row ${rowNum}: Invalid GUID format for userId`); + continue; + } + + if (!validateEmail(email)) { + errors.push(`Row ${rowNum}: Invalid email format`); + continue; + } + + if (!validRoles.includes(role)) { + errors.push(`Row ${rowNum}: Invalid role '${role}'. Must be: user, admin, or document_manager`); + continue; + } + + csvParsedData.push({ userId, displayName, email, role }); + } + + if (errors.length > 0) { + showCsvError(`Found ${errors.length} validation error(s):\n` + errors.slice(0, 10).join('\n') + + (errors.length > 10 ? `\n... and ${errors.length - 10} more` : '')); + return; + } + + // Show validation success + const sampleRows = csvParsedData.slice(0, 3); + $("#csvValidationDetails").html(` +

    ✓ Valid CSV file detected

    +

    Total members to add: ${csvParsedData.length}

    +

    Sample data (first 3):

    +
      + ${sampleRows.map(row => `
    • ${row.displayName} (${row.email})
    • `).join('')} +
    + `); + $("#csvValidationResults").show(); + $("#csvNextBtn").prop("disabled", false); + }; + + reader.readAsText(file); +} + +function showCsvError(message) { + $("#csvErrorList").html(`
    ${escapeHtml(message)}
    `); + $("#csvErrorDetails").show(); + $("#csvNextBtn").prop("disabled", true); + csvParsedData = []; +} + +function startCsvUpload() { + if (csvParsedData.length === 0) { + alert("No valid data to upload"); + return; + } + + // Switch to stage 2 + $("#csvStage1").hide(); + $("#csvStage2").show(); + $("#csvNextBtn").hide(); + $("#csvCancelBtn").hide(); + $("#csvModalClose").hide(); + + // Upload members + uploadCsvMembers(); +} + +async function uploadCsvMembers() { + let successCount = 0; + let failedCount = 0; + let skippedCount = 0; + const failures = []; + + for (let i = 0; i < csvParsedData.length; i++) { + const member = csvParsedData[i]; + const progress = Math.round(((i + 1) / csvParsedData.length) * 100); + + updateCsvProgress(progress, `Processing ${i + 1} of ${csvParsedData.length}: ${member.displayName}`); + + try { + const response = await fetch(`/api/public_workspaces/${workspaceId}/members`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + userId: member.userId, + displayName: member.displayName, + email: member.email, + role: member.role + }) + }); + + const data = await response.json(); + + if (response.ok && data.success) { + successCount++; + } else if (data.error && data.error.includes('already a member')) { + skippedCount++; + } else { + failedCount++; + failures.push(`${member.displayName}: ${data.error || 'Unknown error'}`); + } + } catch (error) { + failedCount++; + failures.push(`${member.displayName}: ${error.message}`); + } + } + + // Show summary + showCsvSummary(successCount, failedCount, skippedCount, failures); +} + +function updateCsvProgress(percentage, statusText) { + $("#csvProgressBar").css("width", percentage + "%"); + $("#csvProgressBar").attr("aria-valuenow", percentage); + $("#csvProgressText").text(percentage + "%"); + $("#csvStatusText").text(statusText); +} + +function showCsvSummary(successCount, failedCount, skippedCount, failures) { + $("#csvStage2").hide(); + $("#csvStage3").show(); + $("#csvDoneBtn").show(); + + let summaryHtml = ` +

    Upload Summary:

    +
      +
    • ✅ Successfully added: ${successCount}
    • +
    • ⏭️ Skipped (already members): ${skippedCount}
    • +
    • ❌ Failed: ${failedCount}
    • +
    + `; + + if (failures.length > 0) { + summaryHtml += ` +
    +

    Failed Members:

    +
      + ${failures.slice(0, 10).map(f => `
    • ${escapeHtml(f)}
    • `).join('')} + ${failures.length > 10 ? `
    • ... and ${failures.length - 10} more
    • ` : ''} +
    + `; + } + + $("#csvSummary").html(summaryHtml); +} + +function resetCsvModal() { + // Reset to stage 1 + $("#csvStage1").show(); + $("#csvStage2").hide(); + $("#csvStage3").hide(); + $("#csvNextBtn").show(); + $("#csvNextBtn").prop("disabled", true); + $("#csvCancelBtn").show(); + $("#csvDoneBtn").hide(); + $("#csvModalClose").show(); + $("#csvValidationResults").hide(); + $("#csvErrorDetails").hide(); + $("#csvFileInput").val(''); + csvParsedData = []; + + // Reset progress + updateCsvProgress(0, 'Ready'); +} + +// ============================================================================ +// Bulk Member Actions Functions +// ============================================================================ + +function getSelectedMembers() { + const selected = []; + $(".member-checkbox:checked").each(function () { + selected.push({ + userId: $(this).data("user-id"), + name: $(this).data("user-name"), + email: $(this).data("user-email"), + role: $(this).data("user-role") + }); + }); + return selected; +} + +function updateBulkActionsBar() { + const selectedCount = $(".member-checkbox:checked").length; + if (selectedCount > 0) { + $("#selectedCount").text(selectedCount); + $("#bulkActionsBar").show(); + } else { + $("#bulkActionsBar").hide(); + } +} + +function updateSelectAllCheckbox() { + const totalCheckboxes = $(".member-checkbox").length; + const checkedCheckboxes = $(".member-checkbox:checked").length; + + if (totalCheckboxes > 0 && checkedCheckboxes === totalCheckboxes) { + $("#selectAllMembers").prop("checked", true); + $("#selectAllMembers").prop("indeterminate", false); + } else if (checkedCheckboxes > 0) { + $("#selectAllMembers").prop("checked", false); + $("#selectAllMembers").prop("indeterminate", true); + } else { + $("#selectAllMembers").prop("checked", false); + $("#selectAllMembers").prop("indeterminate", false); + } +} + +async function bulkAssignRole() { + const selectedMembers = getSelectedMembers(); + const newRole = $("#bulkRoleSelect").val(); + + if (selectedMembers.length === 0) { + alert("No members selected"); + return; + } + + // Close modal and show progress + $("#bulkAssignRoleModal").modal("hide"); + + let successCount = 0; + let failedCount = 0; + const failures = []; + + for (let i = 0; i < selectedMembers.length; i++) { + const member = selectedMembers[i]; + + try { + const response = await fetch(`/api/public_workspaces/${workspaceId}/members/${member.userId}`, { + method: 'PATCH', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ role: newRole }) + }); + + const data = await response.json(); + + if (response.ok && data.success) { + successCount++; + } else { + failedCount++; + failures.push(`${member.name}: ${data.error || 'Unknown error'}`); + } + } catch (error) { + failedCount++; + failures.push(`${member.name}: ${error.message}`); + } + } + + // Show summary + let message = `Role assignment complete:\n✅ Success: ${successCount}\n❌ Failed: ${failedCount}`; + if (failures.length > 0) { + message += "\n\nFailed members:\n" + failures.slice(0, 5).join("\n"); + if (failures.length > 5) { + message += `\n... and ${failures.length - 5} more`; + } + } + alert(message); + + // Reload members and clear selection + loadMembers(); +} + +async function bulkRemoveMembers() { + const selectedMembers = getSelectedMembers(); + + if (selectedMembers.length === 0) { + alert("No members selected"); + return; + } + + // Close modal + $("#bulkRemoveMembersModal").modal("hide"); + + let successCount = 0; + let failedCount = 0; + const failures = []; + + for (let i = 0; i < selectedMembers.length; i++) { + const member = selectedMembers[i]; + + try { + const response = await fetch(`/api/public_workspaces/${workspaceId}/members/${member.userId}`, { + method: 'DELETE' + }); + + const data = await response.json(); + + if (response.ok && data.success) { + successCount++; + } else { + failedCount++; + failures.push(`${member.name}: ${data.error || 'Unknown error'}`); + } + } catch (error) { + failedCount++; + failures.push(`${member.name}: ${error.message}`); + } + } + + // Show summary + let message = `Member removal complete:\n✅ Success: ${successCount}\n❌ Failed: ${failedCount}`; + if (failures.length > 0) { + message += "\n\nFailed removals:\n" + failures.slice(0, 5).join("\n"); + if (failures.length > 5) { + message += `\n... and ${failures.length - 5} more`; + } + } + alert(message); + + // Reload members and clear selection + loadMembers(); +} + diff --git a/application/single_app/static/js/public/my_public_workspaces.js b/application/single_app/static/js/public/my_public_workspaces.js index 7123d4b7..21e6d0ef 100644 --- a/application/single_app/static/js/public/my_public_workspaces.js +++ b/application/single_app/static/js/public/my_public_workspaces.js @@ -2,7 +2,7 @@ $(document).ready(function () { // Grab global active workspace ID (set via inline @@ -641,12 +2050,157 @@ def swagger_ui(): return swagger_html @app.route('/swagger.json') + @swagger_route( + summary="OpenAPI Specification", + description="Serve the OpenAPI 3.0 specification as JSON with caching and rate limiting.", + tags=["Documentation"], + responses={ + 200: { + "description": "OpenAPI specification", + "content": { + "application/json": { + "schema": {"type": "object"} + } + } + }, + 429: { + "description": "Rate limit exceeded", + "content": { + "application/json": { + "schema": COMMON_SCHEMAS["error_response"] + } + } + } + }, + security=get_auth_security() + ) + @login_required def swagger_json(): - """Serve OpenAPI specification as JSON.""" - spec = extract_route_info(app) - return jsonify(spec) + """Serve OpenAPI specification as JSON with caching and rate limiting.""" + # Check for cache refresh parameter (admin use) + force_refresh = request.args.get('refresh') == 'true' + + # Get spec from cache + spec, status_code, content_type = _swagger_cache.get_spec(app, force_refresh=force_refresh, format='json') + + if status_code == 429: + return jsonify({ + "error": "Rate limit exceeded", + "message": "Too many requests for swagger.json. Please wait before trying again.", + "retry_after": 60 + }), 429 + elif status_code == 500: + return jsonify(spec), 500 + + # Create response with cache headers + response = make_response(jsonify(spec)) + + # Add cache control headers (5 minutes client cache) + response.headers['Cache-Control'] = 'public, max-age=300' + response.headers['ETag'] = hashlib.md5(json.dumps(spec, sort_keys=True).encode()).hexdigest()[:16] + + # Add generation timestamp for monitoring + response.headers['X-Generated-At'] = datetime.utcnow().isoformat() + 'Z' + response.headers['X-Spec-Paths'] = str(len(spec.get('paths', {}))) + + return response + + @app.route('/swagger.yaml') + @swagger_route( + summary="OpenAPI Specification (YAML)", + description="Serve the OpenAPI 3.0 specification as YAML with caching and rate limiting.", + tags=["Documentation"], + responses={ + 200: { + "description": "OpenAPI specification in YAML format", + "content": { + "application/x-yaml": { + "schema": {"type": "string"} + } + } + }, + 429: { + "description": "Rate limit exceeded", + "content": { + "application/json": { + "schema": COMMON_SCHEMAS["error_response"] + } + } + } + }, + security=get_auth_security() + ) + @login_required + def swagger_yaml(): + """Serve OpenAPI specification as YAML with caching and rate limiting.""" + # Check for cache refresh parameter (admin use) + force_refresh = request.args.get('refresh') == 'true' + + # Get spec from cache in YAML format + spec, status_code, content_type = _swagger_cache.get_spec(app, force_refresh=force_refresh, format='yaml') + + if status_code == 429: + return jsonify({ + "error": "Rate limit exceeded", + "message": "Too many requests for swagger.yaml. Please wait before trying again.", + "retry_after": 60 + }), 429 + elif status_code == 500: + return jsonify(spec), 500 + + # Create response with cache headers + response = make_response(spec) # spec is already YAML string + response.headers['Content-Type'] = content_type + + # Add cache control headers (5 minutes client cache) + response.headers['Cache-Control'] = 'public, max-age=300' + response.headers['ETag'] = hashlib.md5(spec.encode()).hexdigest()[:16] + + # Add generation timestamp for monitoring + response.headers['X-Generated-At'] = datetime.utcnow().isoformat() + 'Z' + + return response @app.route('/api/swagger/routes') + @swagger_route( + summary="List Documented Routes", + description="List all routes and their documentation status with cache statistics.", + tags=["Documentation", "Admin"], + responses={ + 200: { + "description": "Routes documentation status", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "routes": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": {"type": "string"}, + "methods": {"type": "array", "items": {"type": "string"}}, + "endpoint": {"type": "string"}, + "documented": {"type": "boolean"}, + "summary": {"type": "string"}, + "tags": {"type": "array", "items": {"type": "string"}} + } + } + }, + "total_routes": {"type": "integer"}, + "documented_routes": {"type": "integer"}, + "undocumented_routes": {"type": "integer"}, + "cache_stats": {"type": "object"} + } + } + } + } + } + }, + security=get_auth_security() + ) + @login_required def list_documented_routes(): """List all routes and their documentation status.""" routes = [] @@ -675,8 +2229,51 @@ def list_documented_routes(): 'routes': routes, 'total_routes': len(routes), 'documented_routes': len([r for r in routes if r['documented']]), - 'undocumented_routes': len([r for r in routes if not r['documented']]) + 'undocumented_routes': len([r for r in routes if not r['documented']]), + 'cache_stats': _swagger_cache.get_cache_stats() }) + + @app.route('/api/swagger/cache', methods=['GET', 'DELETE']) + @swagger_route( + summary="Swagger Cache Management", + description="Manage swagger specification cache - get cache statistics or clear cache.", + tags=["Documentation", "Admin"], + responses={ + 200: { + "description": "Cache operation successful", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "cache_stats": {"type": "object"}, + "message": {"type": "string"}, + "timestamp": {"type": "string"} + } + } + } + } + } + }, + security=get_auth_security() + ) + @login_required + def swagger_cache_management(): + """Manage swagger spec cache.""" + if request.method == 'DELETE': + # Clear cache (useful for development) + _swagger_cache.clear_cache() + return jsonify({ + 'message': 'Swagger cache cleared successfully', + 'timestamp': datetime.utcnow().isoformat() + 'Z' + }) + else: + # Get cache stats + stats = _swagger_cache.get_cache_stats() + return jsonify({ + 'cache_stats': stats, + 'message': 'Use DELETE method to clear cache' + }) # Utility function to create common response schemas def create_response_schema(success_schema: Optional[Dict[str, Any]] = None, error_schema: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: @@ -809,4 +2406,4 @@ def create_parameter(name: str, location: str, param_type: str = "string", requi "schema": { "type": param_type } - } \ No newline at end of file + } diff --git a/application/single_app/templates/_agent_config_info.html b/application/single_app/templates/_agent_config_info.html new file mode 100644 index 00000000..a088e8f5 --- /dev/null +++ b/application/single_app/templates/_agent_config_info.html @@ -0,0 +1,299 @@ +{% from '_agent_examples.html' import agent_examples %} + + + + diff --git a/application/single_app/templates/_agent_examples.html b/application/single_app/templates/_agent_examples.html new file mode 100644 index 00000000..7e523704 --- /dev/null +++ b/application/single_app/templates/_agent_examples.html @@ -0,0 +1,45 @@ +{% macro agent_examples(accordion_id='agentExamples', show_copy_buttons=True, show_create_buttons=False) %} + +{% endmacro %} diff --git a/application/single_app/templates/_agent_examples_modal.html b/application/single_app/templates/_agent_examples_modal.html new file mode 100644 index 00000000..52f95cdc --- /dev/null +++ b/application/single_app/templates/_agent_examples_modal.html @@ -0,0 +1,629 @@ + + + + + diff --git a/application/single_app/templates/_agent_modal.html b/application/single_app/templates/_agent_modal.html index a6bbe785..80f068ca 100644 --- a/application/single_app/templates/_agent_modal.html +++ b/application/single_app/templates/_agent_modal.html @@ -2,13 +2,37 @@