From aa060c44a42b2af823722fcdb9505fd2b895db5f Mon Sep 17 00:00:00 2001 From: James Broadhead Date: Tue, 12 May 2026 22:11:29 +0000 Subject: [PATCH] fix: address ACE multi-model code review findings Subset of fixes from the experimental-branch batch that apply to files present on main. (mas_manager.py and compute.py don't exist under databricks-skills/ on main; those fixes ship only on the experimental companion PR.) databricks-app-python/examples/llm_config.py: - OAuth error no longer interpolates the full token-endpoint payload (which can contain `id_token` / refresh material). Logs the present key names instead. - DATABRICKS_MODEL validation error drops the `response.text[:300]` echo so server bodies don't end up in operator-visible error text. databricks-app-python/examples/fm-minimal-chat.py: - Docstring + `app.yaml` examples reference the actual filename (`fm-minimal-chat.py`), not `2-minimal-chat-app.py`. databricks-app-python/examples/fm-parallel-calls.py: - Guard `Speedup` division on `total_time > 0` to avoid ZeroDivisionError on fast paths. - Convert the trailing standalone triple-quoted string (dead code) to real `#` comments. databricks-python-sdk/examples/5-serving-and-vector-search.py: - Replace the `[0.1, 0.2, 0.3, ...]` literal-Ellipsis vector with a named placeholder + comment explaining it's a stand-in. This pull request was AI-assisted by Isaac. Co-authored-by: Isaac --- .../examples/fm-minimal-chat.py | 4 +- .../examples/fm-parallel-calls.py | 66 +++++++++---------- .../examples/llm_config.py | 7 +- .../examples/5-serving-and-vector-search.py | 3 +- 4 files changed, 41 insertions(+), 39 deletions(-) diff --git a/databricks-skills/databricks-app-python/examples/fm-minimal-chat.py b/databricks-skills/databricks-app-python/examples/fm-minimal-chat.py index db9a35d6..920a4051 100644 --- a/databricks-skills/databricks-app-python/examples/fm-minimal-chat.py +++ b/databricks-skills/databricks-app-python/examples/fm-minimal-chat.py @@ -16,11 +16,11 @@ export DATABRICKS_TOKEN="dapi..." export DATABRICKS_SERVING_BASE_URL="https:///serving-endpoints" export DATABRICKS_MODEL="" # See databricks-model-serving - streamlit run 2-minimal-chat-app.py + streamlit run fm-minimal-chat.py Databricks Apps Deployment: 1. Create app.yaml: - command: ["streamlit", "run", "2-minimal-chat-app.py"] + command: ["streamlit", "run", "fm-minimal-chat.py"] env: - name: DATABRICKS_SERVING_BASE_URL value: "https:///serving-endpoints" diff --git a/databricks-skills/databricks-app-python/examples/fm-parallel-calls.py b/databricks-skills/databricks-app-python/examples/fm-parallel-calls.py index 53cc6a23..41226196 100644 --- a/databricks-skills/databricks-app-python/examples/fm-parallel-calls.py +++ b/databricks-skills/databricks-app-python/examples/fm-parallel-calls.py @@ -224,42 +224,42 @@ def check_audience_fit(client: OpenAI, text: str) -> Dict[str, Any]: time_saved = (total_latency / 1000) - total_time print(f"\n{'='*60}") print(f"Time saved vs serial execution: {time_saved:.2f}s") - print(f"Speedup: {(total_latency/1000) / total_time:.1f}×") + if total_time > 0: + print(f"Speedup: {(total_latency/1000) / total_time:.1f}×") print(f"{'='*60}") # ============================================================================= # Production Best Practices # ============================================================================= -""" -Best practices from databricksters-check-and-pub: - -1. Configurable concurrency - - Use LLM_MAX_CONCURRENCY env var (default: 5 in the production app) - - Balance throughput vs rate limits - - Too high = rate limit errors - - Too low = underutilized resources - -2. Error handling - - Capture exceptions per job - - Return None for failed jobs - - Collect error messages for debugging - - Continue execution even if some jobs fail - -3. Bounded execution - - Only parallelize independent checks - - Cap concurrency with an env var rather than firing unlimited requests - - Keep the job contract simple: name -> (callable, args, kwargs) - -4. When to use parallel calls - - Multiple independent evaluations of same content - - Batch processing multiple documents - - A/B testing different prompts - - Multi-aspect analysis - -5. When NOT to use parallel calls - - Dependent/sequential operations - - Single evaluation needed - - Rate limits are very strict - - Debugging (use serial for easier troubleshooting) -""" +# +# Best practices from databricksters-check-and-pub: +# +# 1. Configurable concurrency +# - Use LLM_MAX_CONCURRENCY env var (default: 5 in the production app) +# - Balance throughput vs rate limits +# - Too high = rate limit errors +# - Too low = underutilized resources +# +# 2. Error handling +# - Capture exceptions per job +# - Return None for failed jobs +# - Collect error messages for debugging +# - Continue execution even if some jobs fail +# +# 3. Bounded execution +# - Only parallelize independent checks +# - Cap concurrency with an env var rather than firing unlimited requests +# - Keep the job contract simple: name -> (callable, args, kwargs) +# +# 4. When to use parallel calls +# - Multiple independent evaluations of same content +# - Batch processing multiple documents +# - A/B testing different prompts +# - Multi-aspect analysis +# +# 5. When NOT to use parallel calls +# - Dependent/sequential operations +# - Single evaluation needed +# - Rate limits are very strict +# - Debugging (use serial for easier troubleshooting) diff --git a/databricks-skills/databricks-app-python/examples/llm_config.py b/databricks-skills/databricks-app-python/examples/llm_config.py index 6c4d5509..200aaef9 100644 --- a/databricks-skills/databricks-app-python/examples/llm_config.py +++ b/databricks-skills/databricks-app-python/examples/llm_config.py @@ -218,8 +218,10 @@ def get_databricks_bearer_token( access_token = payload.get("access_token") expires_in = int(payload.get("expires_in", 300)) if not access_token: + payload_keys = sorted(payload.keys()) if isinstance(payload, dict) else [] raise DatabricksLLMConfigError( - f"Token endpoint response is missing access_token: {payload}" + "Token endpoint response is missing access_token " + f"(keys present: {payload_keys})" ) expires_at = int(time.time()) + expires_in @@ -278,8 +280,7 @@ def validate_databricks_llm_config( if response.status_code >= 400: raise DatabricksLLMConfigError( f"Failed to validate DATABRICKS_MODEL={config.model!r} in workspace " - f"{config.workspace_host} (HTTP {response.status_code}). " - f"Response: {response.text[:300]}" + f"{config.workspace_host} (HTTP {response.status_code})." ) _validation_cache[cache_key] = int(time.time()) + VALIDATION_TTL_SECONDS diff --git a/databricks-skills/databricks-python-sdk/examples/5-serving-and-vector-search.py b/databricks-skills/databricks-python-sdk/examples/5-serving-and-vector-search.py index 2a47c2b0..70b87116 100644 --- a/databricks-skills/databricks-python-sdk/examples/5-serving-and-vector-search.py +++ b/databricks-skills/databricks-python-sdk/examples/5-serving-and-vector-search.py @@ -168,10 +168,11 @@ # Query with embedding vector directly +query_vector = [0.1, 0.2, 0.3] # Replace with your real embedding (list of floats matching the index's dimension) results = w.vector_search_indexes.query_index( index_name="main.default.my_index", columns=["id", "text"], - query_vector=[0.1, 0.2, 0.3, ...], # Your embedding vector + query_vector=query_vector, num_results=10 )