Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ describe('Anthropic integration', () => {
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic',
}),
description: 'chat claude-3-haiku-20240307 stream-response',
description: 'chat claude-3-haiku-20240307',
op: 'gen_ai.chat',
origin: 'auto.ai.anthropic',
status: 'ok',
Expand Down Expand Up @@ -296,7 +296,7 @@ describe('Anthropic integration', () => {
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
[GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
}),
description: 'chat claude-3-haiku-20240307 stream-response',
description: 'chat claude-3-haiku-20240307',
op: 'gen_ai.chat',
origin: 'auto.ai.anthropic',
status: 'ok',
Expand Down Expand Up @@ -401,7 +401,7 @@ describe('Anthropic integration', () => {
spans: expect.arrayContaining([
// messages.create with stream: true
expect.objectContaining({
description: 'chat claude-3-haiku-20240307 stream-response',
description: 'chat claude-3-haiku-20240307',
op: 'gen_ai.chat',
data: expect.objectContaining({
[GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
Expand All @@ -419,7 +419,7 @@ describe('Anthropic integration', () => {
}),
// messages.stream
expect.objectContaining({
description: 'chat claude-3-haiku-20240307 stream-response',
description: 'chat claude-3-haiku-20240307',
op: 'gen_ai.chat',
data: expect.objectContaining({
[GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
Expand All @@ -435,7 +435,7 @@ describe('Anthropic integration', () => {
}),
// messages.stream with redundant stream: true param
expect.objectContaining({
description: 'chat claude-3-haiku-20240307 stream-response',
description: 'chat claude-3-haiku-20240307',
op: 'gen_ai.chat',
data: expect.objectContaining({
[GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
Expand All @@ -457,7 +457,7 @@ describe('Anthropic integration', () => {
transaction: 'main',
spans: expect.arrayContaining([
expect.objectContaining({
description: 'chat claude-3-haiku-20240307 stream-response',
description: 'chat claude-3-haiku-20240307',
op: 'gen_ai.chat',
data: expect.objectContaining({
[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
Expand All @@ -466,15 +466,15 @@ describe('Anthropic integration', () => {
}),
}),
expect.objectContaining({
description: 'chat claude-3-haiku-20240307 stream-response',
description: 'chat claude-3-haiku-20240307',
op: 'gen_ai.chat',
data: expect.objectContaining({
[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from stream!',
}),
}),
expect.objectContaining({
description: 'chat claude-3-haiku-20240307 stream-response',
description: 'chat claude-3-haiku-20240307',
op: 'gen_ai.chat',
data: expect.objectContaining({
[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
Expand Down Expand Up @@ -536,7 +536,7 @@ describe('Anthropic integration', () => {
transaction: {
spans: expect.arrayContaining([
expect.objectContaining({
description: expect.stringContaining('stream-response'),
description: 'chat claude-3-haiku-20240307',
op: 'gen_ai.chat',
data: expect.objectContaining({
[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_TOOLS_JSON,
Expand All @@ -557,7 +557,7 @@ describe('Anthropic integration', () => {
spans: expect.arrayContaining([
// Error with messages.create on stream initialization
expect.objectContaining({
description: 'chat error-stream-init stream-response',
description: 'chat error-stream-init',
op: 'gen_ai.chat',
status: 'internal_error', // Actual status coming from the instrumentation
data: expect.objectContaining({
Expand All @@ -567,7 +567,7 @@ describe('Anthropic integration', () => {
}),
// Error with messages.stream on stream initialization
expect.objectContaining({
description: 'chat error-stream-init stream-response',
description: 'chat error-stream-init',
op: 'gen_ai.chat',
status: 'internal_error', // Actual status coming from the instrumentation
data: expect.objectContaining({
Expand All @@ -577,7 +577,7 @@ describe('Anthropic integration', () => {
// Error midway with messages.create on streaming - note: The stream is started successfully
// so we get a successful span with the content that was streamed before the error
expect.objectContaining({
description: 'chat error-stream-midway stream-response',
description: 'chat error-stream-midway',
op: 'gen_ai.chat',
status: 'ok',
data: expect.objectContaining({
Expand All @@ -589,7 +589,7 @@ describe('Anthropic integration', () => {
}),
// Error midway with messages.stream - same behavior, we get a span with the streamed data
expect.objectContaining({
description: 'chat error-stream-midway stream-response',
description: 'chat error-stream-midway',
op: 'gen_ai.chat',
status: 'ok',
data: expect.objectContaining({
Expand Down Expand Up @@ -731,7 +731,7 @@ describe('Anthropic integration', () => {
source: {
type: 'base64',
media_type: 'image/png',
data: '[Filtered]',
data: '[Blob substitute]',
},
},
],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,7 @@ describe('Google GenAI integration', () => {
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10,
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22,
}),
description: 'generate_content gemini-2.0-flash-001 stream-response',
description: 'generate_content gemini-2.0-flash-001',
op: 'gen_ai.generate_content',
origin: 'auto.ai.google_genai',
status: 'ok',
Expand Down Expand Up @@ -327,7 +327,7 @@ describe('Google GenAI integration', () => {
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22,
}),
description: 'generate_content gemini-1.5-flash stream-response',
description: 'generate_content gemini-1.5-flash',
op: 'gen_ai.generate_content',
origin: 'auto.ai.google_genai',
status: 'ok',
Expand Down Expand Up @@ -361,7 +361,7 @@ describe('Google GenAI integration', () => {
[GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id',
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro',
}),
description: 'chat gemini-1.5-pro stream-response',
description: 'chat gemini-1.5-pro',
op: 'gen_ai.chat',
origin: 'auto.ai.google_genai',
status: 'ok',
Expand All @@ -373,7 +373,7 @@ describe('Google GenAI integration', () => {
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
}),
description: 'generate_content blocked-model stream-response',
description: 'generate_content blocked-model',
op: 'gen_ai.generate_content',
origin: 'auto.ai.google_genai',
status: 'internal_error',
Expand All @@ -385,7 +385,7 @@ describe('Google GenAI integration', () => {
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
}),
description: 'generate_content error-model stream-response',
description: 'generate_content error-model',
op: 'gen_ai.generate_content',
origin: 'auto.ai.google_genai',
status: 'internal_error',
Expand Down Expand Up @@ -416,7 +416,7 @@ describe('Google GenAI integration', () => {
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22,
}),
description: 'generate_content gemini-1.5-flash stream-response',
description: 'generate_content gemini-1.5-flash',
op: 'gen_ai.generate_content',
origin: 'auto.ai.google_genai',
status: 'ok',
Expand Down Expand Up @@ -455,7 +455,7 @@ describe('Google GenAI integration', () => {
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22,
}),
description: 'chat gemini-1.5-pro stream-response',
description: 'chat gemini-1.5-pro',
op: 'gen_ai.chat',
origin: 'auto.ai.google_genai',
status: 'ok',
Expand All @@ -472,7 +472,7 @@ describe('Google GenAI integration', () => {
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true
[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
}),
description: 'generate_content blocked-model stream-response',
description: 'generate_content blocked-model',
op: 'gen_ai.generate_content',
origin: 'auto.ai.google_genai',
status: 'internal_error',
Expand All @@ -488,7 +488,7 @@ describe('Google GenAI integration', () => {
[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true
}),
description: 'generate_content error-model stream-response',
description: 'generate_content error-model',
op: 'gen_ai.generate_content',
origin: 'auto.ai.google_genai',
status: 'internal_error',
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ describe('OpenAI Tool Calls integration', () => {
[OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 25,
[OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 15,
},
description: 'chat gpt-4 stream-response',
description: 'chat gpt-4',
op: 'gen_ai.chat',
origin: 'auto.ai.openai',
status: 'ok',
Expand Down Expand Up @@ -187,7 +187,7 @@ describe('OpenAI Tool Calls integration', () => {
[OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 12,
[OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 8,
},
description: 'chat gpt-4 stream-response',
description: 'chat gpt-4',
op: 'gen_ai.chat',
origin: 'auto.ai.openai',
status: 'ok',
Expand Down Expand Up @@ -254,7 +254,7 @@ describe('OpenAI Tool Calls integration', () => {
[OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 25,
[OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 15,
},
description: 'chat gpt-4 stream-response',
description: 'chat gpt-4',
op: 'gen_ai.chat',
origin: 'auto.ai.openai',
status: 'ok',
Expand Down Expand Up @@ -314,7 +314,7 @@ describe('OpenAI Tool Calls integration', () => {
[OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 12,
[OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 8,
},
description: 'chat gpt-4 stream-response',
description: 'chat gpt-4',
op: 'gen_ai.chat',
origin: 'auto.ai.openai',
status: 'ok',
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
import * as Sentry from '@sentry/node';
import express from 'express';
import OpenAI from 'openai';

function startMockServer() {
const app = express();
app.use(express.json({ limit: '10mb' }));

app.post('/openai/chat/completions', (req, res) => {
res.send({
id: 'chatcmpl-vision-123',
object: 'chat.completion',
created: 1677652288,
model: req.body.model,
choices: [
{
index: 0,
message: {
role: 'assistant',
content: 'I see a red square in the image.',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 50,
completion_tokens: 10,
total_tokens: 60,
},
});
});

return new Promise(resolve => {
const server = app.listen(0, () => {
resolve(server);
});
});
}

// Small 10x10 red PNG image encoded as base64
const RED_PNG_BASE64 =
'iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8z8BQDwADhQGAWjR9awAAAABJRU5ErkJggg==';

async function run() {
const server = await startMockServer();

await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
const client = new OpenAI({
baseURL: `http://localhost:${server.address().port}/openai`,
apiKey: 'mock-api-key',
});

// Vision request with inline base64 image
await client.chat.completions.create({
model: 'gpt-4o',
messages: [
{
role: 'user',
content: [
{ type: 'text', text: 'What is in this image?' },
{
type: 'image_url',
image_url: {
url: `data:image/png;base64,${RED_PNG_BASE64}`,
},
},
],
},
],
});

// Vision request with multiple images (one inline, one URL)
await client.chat.completions.create({
model: 'gpt-4o',
messages: [
{
role: 'user',
content: [
{ type: 'text', text: 'Compare these images' },
{
type: 'image_url',
image_url: {
url: `data:image/png;base64,${RED_PNG_BASE64}`,
},
},
{
type: 'image_url',
image_url: {
url: 'https://example.com/image.png',
},
},
],
},
],
});
});

server.close();
}

run();
Loading
Loading