Skip to content

Commit d83d7b6

Browse files
committed
fix comments
1 parent 0d2efc6 commit d83d7b6

4 files changed

Lines changed: 11 additions & 56 deletions

File tree

agents/src/metrics/usage_collector.ts

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,14 +4,9 @@
44
import { log } from '../log.js';
55
import type { AgentMetrics } from './base.js';
66

7-
// Ref: python livekit-agents/livekit/agents/metrics/usage_collector.py - lines 10-14 (diff)
8-
// NOTE: Python uses warnings.warn() for deprecation at runtime.
9-
// TypeScript uses JSDoc @deprecated which shows in IDE.
10-
// We also add optional console.warn() in constructor for runtime parity.
117
/**
12-
* @deprecated Use LLMModelUsage, TTSModelUsage, or STTModelUsage from './model_usage.js' instead.
8+
* @deprecated Use LLMModelUsage, TTSModelUsage, or STTModelUsage instead.
139
* These new types provide per-model/provider usage aggregation for more detailed tracking.
14-
* Ref: python livekit-agents/livekit/agents/metrics/usage_collector.py - lines 10-14 (diff)
1510
*/
1611
export interface UsageSummary {
1712
llmPromptTokens: number;
@@ -22,7 +17,7 @@ export interface UsageSummary {
2217
}
2318

2419
/**
25-
* @deprecated Use ModelUsageCollector from './model_usage.js' instead.
20+
* @deprecated Use ModelUsageCollector instead.
2621
* ModelUsageCollector provides per-model/provider usage aggregation for more detailed tracking.
2722
*/
2823
export class UsageCollector {

agents/src/telemetry/trace_types.ts

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -30,10 +30,9 @@ export const ATTR_FUNCTION_TOOLS = 'lk.function_tools';
3030
export const ATTR_RESPONSE_TEXT = 'lk.response.text';
3131
export const ATTR_RESPONSE_FUNCTION_CALLS = 'lk.response.function_calls';
3232

33-
// Ref: python livekit-agents/livekit/agents/telemetry/trace_types.py - lines 5-6 (diff)
3433
// New latency attributes for response timing
3534
/** Time to first token in seconds. */
36-
export const ATTR_RESPONSE_TTFT = 'lk.response.ttft'; // Ref: line 5 (ATTR_RESPONSE_TTFT)
35+
export const ATTR_RESPONSE_TTFT = 'lk.response.ttft';
3736

3837
// function tool
3938
export const ATTR_FUNCTION_TOOL_NAME = 'lk.function_tool.name';
@@ -46,9 +45,8 @@ export const ATTR_TTS_INPUT_TEXT = 'lk.input_text';
4645
export const ATTR_TTS_STREAMING = 'lk.tts.streaming';
4746
export const ATTR_TTS_LABEL = 'lk.tts.label';
4847

49-
// Ref: python livekit-agents/livekit/agents/telemetry/trace_types.py - lines 10-11 (diff)
5048
/** Time to first byte in seconds. */
51-
export const ATTR_RESPONSE_TTFB = 'lk.response.ttfb'; // Ref: line 10 (ATTR_RESPONSE_TTFB)
49+
export const ATTR_RESPONSE_TTFB = 'lk.response.ttfb';
5250

5351
// eou detection
5452
export const ATTR_EOU_PROBABILITY = 'lk.eou.probability';
@@ -72,18 +70,15 @@ export const ATTR_LLM_METRICS = 'lk.llm_metrics';
7270
export const ATTR_TTS_METRICS = 'lk.tts_metrics';
7371
export const ATTR_REALTIME_MODEL_METRICS = 'lk.realtime_model_metrics';
7472

75-
// Ref: python livekit-agents/livekit/agents/telemetry/trace_types.py - lines 16-17 (diff)
76-
// latency span attributes
7773
/** End-to-end latency in seconds. */
78-
export const ATTR_E2E_LATENCY = 'lk.e2e_latency'; // Ref: line 17 (ATTR_E2E_LATENCY)
74+
export const ATTR_E2E_LATENCY = 'lk.e2e_latency';
7975

8076
// OpenTelemetry GenAI attributes
8177
// OpenTelemetry specification: https://opentelemetry.io/docs/specs/semconv/registry/attributes/gen-ai/
8278
export const ATTR_GEN_AI_OPERATION_NAME = 'gen_ai.operation.name';
8379
export const ATTR_GEN_AI_REQUEST_MODEL = 'gen_ai.request.model';
84-
// Ref: python livekit-agents/livekit/agents/telemetry/trace_types.py - lines 22-23 (diff)
8580
/** The provider name (e.g., 'openai', 'anthropic'). */
86-
export const ATTR_GEN_AI_PROVIDER_NAME = 'gen_ai.provider.name'; // Ref: line 23
81+
export const ATTR_GEN_AI_PROVIDER_NAME = 'gen_ai.provider.name';
8782
export const ATTR_GEN_AI_USAGE_INPUT_TOKENS = 'gen_ai.usage.input_tokens';
8883
export const ATTR_GEN_AI_USAGE_OUTPUT_TOKENS = 'gen_ai.usage.output_tokens';
8984

agents/src/voice/agent_session.ts

Lines changed: 1 addition & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@ import {
1717
} from '../inference/index.js';
1818
import type { InterruptionEvent } from '../inference/interruption/types.js';
1919
import { type JobContext, getJobContext } from '../job.js';
20-
// Ref: python livekit-agents/livekit/agents/voice/agent_session.py - lines 5-6 (diff)
2120
import type { FunctionCall, FunctionCallOutput } from '../llm/chat_context.js';
2221
import { AgentHandoffItem, ChatContext, ChatMessage } from '../llm/chat_context.js';
2322
import type { LLM, RealtimeModel, RealtimeModelError, ToolChoice } from '../llm/index.js';
@@ -72,15 +71,9 @@ import type {
7271
} from './turn_config/turn_handling.js';
7372
import { migrateLegacyOptions } from './turn_config/utils.js';
7473

75-
// Ref: python livekit-agents/livekit/agents/voice/agent_session.py - lines 17-19 (diff)
76-
// NOTE: Python uses @dataclass. TypeScript uses interface.
77-
/**
78-
* Usage summary for an AgentSession, aggregated per model/provider combination.
79-
* Ref: python livekit-agents/livekit/agents/voice/agent_session.py - lines 17-19 (diff)
80-
*/
8174
export interface AgentSessionUsage {
8275
/** List of usage summaries, one per model/provider combination. */
83-
modelUsage: ModelUsage[]; // Ref: line 18 (model_usage: list[ModelUsage])
76+
modelUsage: ModelUsage[];
8477
}
8578

8679
export interface SessionOptions {
@@ -206,8 +199,6 @@ export class AgentSession<
206199

207200
private _interruptionDetection?: InterruptionConfig['mode'];
208201

209-
// Ref: python livekit-agents/livekit/agents/voice/agent_session.py - line 34 (diff)
210-
// Collects and aggregates usage metrics per model/provider combination
211202
private _usageCollector: ModelUsageCollector = new ModelUsageCollector();
212203

213204
/** @internal */
@@ -290,9 +281,6 @@ export class AgentSession<
290281
): boolean {
291282
const eventData = args[0] as AgentEvent;
292283
this._recordedEvents.push(eventData);
293-
// Ref: python livekit-agents/livekit/agents/voice/agent_session.py - lines 39-40 (diff)
294-
// if isinstance(arg, MetricsCollectedEvent):
295-
// self._usage_collector.collect(arg.metrics)
296284
if (event === AgentSessionEventTypes.MetricsCollected) {
297285
this._usageCollector.collect((eventData as MetricsCollectedEvent).metrics);
298286
}
@@ -328,10 +316,8 @@ export class AgentSession<
328316
return this._interruptionDetection;
329317
}
330318

331-
// Ref: python livekit-agents/livekit/agents/voice/agent_session.py - lines 45-48 (diff)
332319
/**
333320
* Returns usage summaries for this session, one per model/provider combination.
334-
* Ref: python livekit-agents/livekit/agents/voice/agent_session.py - lines 45-48 (diff)
335321
*/
336322
get usage(): AgentSessionUsage {
337323
return { modelUsage: this._usageCollector.flatten() };
@@ -959,8 +945,6 @@ export class AgentSession<
959945
this.rootSpanContext = undefined;
960946
this.llmErrorCounts = 0;
961947
this.ttsErrorCounts = 0;
962-
// Ref: python livekit-agents/livekit/agents/voice/agent_session.py - lines 53-54 (diff)
963-
// Reset collector on session reset
964948
this._usageCollector = new ModelUsageCollector();
965949

966950
this.logger.info({ reason, error }, 'AgentSession closed');

agents/src/voice/generation.ts

Lines changed: 4 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@ export class _LLMGenerationData {
3636
generatedText: string = '';
3737
generatedToolCalls: FunctionCall[];
3838
id: string;
39-
// Time to first token in seconds (for TTFT span attribute)
4039
ttft?: number;
4140

4241
constructor(
@@ -382,16 +381,14 @@ export function updateInstructions(options: {
382381
}
383382
}
384383

385-
// Ref: python livekit-agents/livekit/agents/voice/generation.py - lines 3-7 (diff)
386-
// Added model and provider parameters to generation functions
387384
export function performLLMInference(
388385
node: LLMNode,
389386
chatCtx: ChatContext,
390387
toolCtx: ToolContext,
391388
modelSettings: ModelSettings,
392389
controller: AbortController,
393-
model?: string, // Ref: line 5 (model: str | None = None)
394-
provider?: string, // Ref: line 6 (provider: str | None = None)
390+
model?: string,
391+
provider?: string,
395392
): [Task<void>, _LLMGenerationData] {
396393
const textStream = new IdentityTransform<string>();
397394
const toolCallStream = new IdentityTransform<FunctionCall>();
@@ -407,20 +404,15 @@ export function performLLMInference(
407404
);
408405
span.setAttribute(traceTypes.ATTR_FUNCTION_TOOLS, JSON.stringify(Object.keys(toolCtx)));
409406

410-
// Ref: python livekit-agents/livekit/agents/voice/generation.py - lines 36-48 (diff)
411-
// Set model/provider attributes on the span
412407
if (model) {
413-
// Ref: lines 44-45
414408
span.setAttribute(traceTypes.ATTR_GEN_AI_REQUEST_MODEL, model);
415409
}
416410
if (provider) {
417-
// Ref: lines 46-47
418411
span.setAttribute(traceTypes.ATTR_GEN_AI_PROVIDER_NAME, provider);
419412
}
420413

421414
let llmStreamReader: ReadableStreamDefaultReader<string | ChatChunk> | null = null;
422415
let llmStream: ReadableStream<string | ChatChunk> | null = null;
423-
// Track start time for TTFT calculation
424416
const startTime = performance.now() / 1000; // Convert to seconds
425417
let firstTokenReceived = false;
426418

@@ -445,7 +437,6 @@ export function performLLMInference(
445437
const { done, value: chunk } = result;
446438
if (done) break;
447439

448-
// Track time to first token
449440
if (!firstTokenReceived) {
450441
firstTokenReceived = true;
451442
data.ttft = performance.now() / 1000 - startTime;
@@ -521,35 +512,28 @@ export function performLLMInference(
521512
];
522513
}
523514

524-
// Ref: python livekit-agents/livekit/agents/voice/generation.py - lines 77-82 (diff)
525-
// Added model and provider parameters for TTS generation
526515
export function performTTSInference(
527516
node: TTSNode,
528517
text: ReadableStream<string>,
529518
modelSettings: ModelSettings,
530519
controller: AbortController,
531-
model?: string, // Ref: line 79 (model: str | None = None)
532-
provider?: string, // Ref: line 80 (provider: str | None = None)
520+
model?: string,
521+
provider?: string,
533522
): [Task<void>, ReadableStream<AudioFrame>] {
534523
const audioStream = new IdentityTransform<AudioFrame>();
535524
const outputWriter = audioStream.writable.getWriter();
536525
const audioOutputStream = audioStream.readable;
537526

538527
const _performTTSInferenceImpl = async (signal: AbortSignal, span: Span) => {
539-
// Ref: python livekit-agents/livekit/agents/voice/generation.py - lines 77-82 (diff)
540-
// Set model/provider attributes on the span
541528
if (model) {
542-
// Ref: lines 79-80
543529
span.setAttribute(traceTypes.ATTR_GEN_AI_REQUEST_MODEL, model);
544530
}
545531
if (provider) {
546-
// Ref: lines 81-82
547532
span.setAttribute(traceTypes.ATTR_GEN_AI_PROVIDER_NAME, provider);
548533
}
549534

550535
let ttsStreamReader: ReadableStreamDefaultReader<AudioFrame> | null = null;
551536
let ttsStream: ReadableStream<AudioFrame> | null = null;
552-
// Track start time for TTFB calculation
553537
const startTime = performance.now() / 1000; // Convert to seconds
554538
let firstByteReceived = false;
555539

@@ -570,7 +554,6 @@ export function performTTSInference(
570554
break;
571555
}
572556

573-
// Track time to first byte and set span attribute
574557
if (!firstByteReceived) {
575558
firstByteReceived = true;
576559
const ttfb = performance.now() / 1000 - startTime;
@@ -663,7 +646,6 @@ export function performTextForwarding(
663646

664647
export interface _AudioOut {
665648
audio: Array<AudioFrame>;
666-
/** Future that will be set with the timestamp of the first frame's capture */
667649
firstFrameFut: Future<number>;
668650
}
669651

@@ -751,7 +733,6 @@ export function performAudioForwarding(
751733
];
752734
}
753735

754-
// function_tool span is already implemented in tracableToolExecution below (line ~796)
755736
export function performToolExecutions({
756737
session,
757738
speechHandle,

0 commit comments

Comments
 (0)