Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
import * as Sentry from '@sentry/node';
import express from 'express';
import OpenAI from 'openai';

function startMockServer() {
const app = express();
app.use(express.json());

app.post('/openai/chat/completions', (req, res) => {
const { model } = req.body;

res.set({
'x-request-id': 'req_withresponse_test',
'openai-organization': 'test-org',
'openai-processing-ms': '150',
'openai-version': '2020-10-01',
});

res.send({
id: 'chatcmpl-withresponse',
object: 'chat.completion',
created: 1677652288,
model: model,
choices: [
{
index: 0,
message: {
role: 'assistant',
content: 'Testing .withResponse() method!',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 8,
completion_tokens: 12,
total_tokens: 20,
},
});
});

return new Promise(resolve => {
const server = app.listen(0, () => {
resolve(server);
});
});
}

async function run() {
const server = await startMockServer();

await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
const client = new OpenAI({
baseURL: `http://localhost:${server.address().port}/openai`,
apiKey: 'mock-api-key',
});

// Verify .withResponse() method exists and can be called
const result = client.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'Test withResponse' }],
});
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Test doesn't cover streaming .withResponse() regression

Low Severity

The PR description states the fix is specifically for streaming calls where .withResponse() returned the original uninstrumented stream. However, the test scenario only tests non-streaming calls (no stream: true parameter). According to the review rules for fix PRs, tests should verify the specific regression being fixed. A streaming test case with .withResponse() would provide confidence that the core bug is actually fixed. Flagging this because the review rules file specifies that fix PRs should include tests for the specific regression.

Fix in Cursor Fix in Web


// Verify method exists
if (typeof result.withResponse !== 'function') {
throw new Error('.withResponse() method does not exist');
}

// Call .withResponse() and verify structure
const withResponseResult = await result.withResponse();

// Verify all three properties exist
if (!withResponseResult.data) {
throw new Error('.withResponse() did not return data');
}
if (!withResponseResult.response) {
throw new Error('.withResponse() did not return response');
}
if (withResponseResult.request_id === undefined) {
throw new Error('.withResponse() did not return request_id');
}

// Verify data structure matches expected OpenAI response
const { data } = withResponseResult;
if (data.id !== 'chatcmpl-withresponse') {
throw new Error(`Expected data.id to be 'chatcmpl-withresponse', got '${data.id}'`);
}
if (data.choices[0].message.content !== 'Testing .withResponse() method!') {
throw new Error(`Expected specific content, got '${data.choices[0].message.content}'`);
}
if (data.usage.total_tokens !== 20) {
throw new Error(`Expected 20 total tokens, got ${data.usage.total_tokens}`);
}

// Verify response is a Response object with correct headers
if (!(withResponseResult.response instanceof Response)) {
throw new Error('response is not a Response object');
}
if (withResponseResult.response.headers.get('x-request-id') !== 'req_withresponse_test') {
throw new Error(
`Expected x-request-id header 'req_withresponse_test', got '${withResponseResult.response.headers.get('x-request-id')}'`,
);
}

// Verify request_id matches the header
if (withResponseResult.request_id !== 'req_withresponse_test') {
throw new Error(`Expected request_id 'req_withresponse_test', got '${withResponseResult.request_id}'`);
}

// Test 2: Verify .asResponse() method works
const result2 = client.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'Test asResponse' }],
});

// Verify method exists
if (typeof result2.asResponse !== 'function') {
throw new Error('.asResponse() method does not exist');
}

// Call .asResponse() and verify it returns raw Response
const rawResponse = await result2.asResponse();

if (!(rawResponse instanceof Response)) {
throw new Error('.asResponse() did not return a Response object');
}

// Verify response has correct status
if (rawResponse.status !== 200) {
throw new Error(`Expected status 200, got ${rawResponse.status}`);
}

// Verify response headers
if (rawResponse.headers.get('x-request-id') !== 'req_withresponse_test') {
throw new Error(
`Expected x-request-id header 'req_withresponse_test', got '${rawResponse.headers.get('x-request-id')}'`,
);
}

// Verify we can manually parse the body
const body = await rawResponse.json();
if (body.id !== 'chatcmpl-withresponse') {
throw new Error(`Expected body.id 'chatcmpl-withresponse', got '${body.id}'`);
}
if (body.choices[0].message.content !== 'Testing .withResponse() method!') {
throw new Error(`Expected specific content in body, got '${body.choices[0].message.content}'`);
}
});

server.close();
}

run();
38 changes: 38 additions & 0 deletions dev-packages/node-integration-tests/suites/tracing/openai/test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -945,4 +945,42 @@ describe('OpenAI integration', () => {
});
},
);

createEsmAndCjsTests(__dirname, 'scenario-with-response.mjs', 'instrument.mjs', (createRunner, test) => {
test('preserves .withResponse() method and works correctly', async () => {
await createRunner()
.ignore('event')
.expect({
transaction: {
transaction: 'main',
spans: expect.arrayContaining([
// First call using .withResponse()
expect.objectContaining({
data: expect.objectContaining({
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
[GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-withresponse',
}),
description: 'chat gpt-4',
op: 'gen_ai.chat',
status: 'ok',
}),
// Second call using .asResponse()
expect.objectContaining({
data: expect.objectContaining({
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
[GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-withresponse',
}),
description: 'chat gpt-4',
op: 'gen_ai.chat',
status: 'ok',
}),
]),
},
})
.start()
.completed();
});
});
});
Loading
Loading