Skip to content

Commit 530595a

Browse files
author
Neo Kusanagi
committed
feat(js/plugins/compat-oai): Add reasoning_content support to OpenAI conversions
This commit adds support for handling reasoning_content in both streaming and non-streaming for OpenAI Compatible API Chat Completions responses, allowing models that support reasoning_content to properly return their reasoning traces. ref: https://docs.x.ai/docs/guides/reasoning CHANGELOG: - [x] Add reasoning_content handling in fromOpenAIChoice - [x] Add reasoning_content handling in fromOpenAIChunkChoice - [x] Add test cases for reasoning_content in both functions
1 parent 0095beb commit 530595a

File tree

2 files changed

+123
-18
lines changed

2 files changed

+123
-18
lines changed

js/plugins/compat-oai/src/model.ts

Lines changed: 44 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -277,19 +277,31 @@ export function fromOpenAIChoice(
277277
const toolRequestParts = choice.message.tool_calls?.map((toolCall) =>
278278
fromOpenAIToolCall(toolCall, choice)
279279
);
280+
281+
// Build content array based on what's present in the message
282+
let content: Part[] = [];
283+
284+
if (toolRequestParts) {
285+
content = toolRequestParts as ToolRequestPart[];
286+
} else {
287+
// Handle reasoning_content if present
288+
if ('reasoning_content' in choice.message && choice.message.reasoning_content) {
289+
content.push({ reasoning: choice.message.reasoning_content as string });
290+
}
291+
292+
// Handle regular content
293+
content.push(
294+
jsonMode
295+
? { data: JSON.parse(choice.message.content!) }
296+
: { text: choice.message.content! }
297+
);
298+
}
299+
280300
return {
281301
finishReason: finishReasonMap[choice.finish_reason] || 'other',
282302
message: {
283303
role: 'model',
284-
content: toolRequestParts
285-
? // Note: Not sure why I have to cast here exactly.
286-
// Otherwise it thinks toolRequest must be 'undefined' if provided
287-
(toolRequestParts as ToolRequestPart[])
288-
: [
289-
jsonMode
290-
? { data: JSON.parse(choice.message.content!) }
291-
: { text: choice.message.content! },
292-
],
304+
content,
293305
},
294306
};
295307
}
@@ -308,21 +320,35 @@ export function fromOpenAIChunkChoice(
308320
const toolRequestParts = choice.delta.tool_calls?.map((toolCall) =>
309321
fromOpenAIToolCall(toolCall, choice)
310322
);
323+
324+
// Build content array based on what's present in the delta
325+
let content: Part[] = [];
326+
327+
if (toolRequestParts) {
328+
content = toolRequestParts as ToolRequestPart[];
329+
} else {
330+
// Handle reasoning_content if present
331+
if ('reasoning_content' in choice.delta && choice.delta.reasoning_content) {
332+
content.push({ reasoning: choice.delta.reasoning_content as string });
333+
}
334+
335+
// Handle regular content if present
336+
if (choice.delta.content) {
337+
content.push(
338+
jsonMode
339+
? { data: JSON.parse(choice.delta.content) }
340+
: { text: choice.delta.content }
341+
);
342+
}
343+
}
344+
311345
return {
312346
finishReason: choice.finish_reason
313347
? finishReasonMap[choice.finish_reason] || 'other'
314348
: 'unknown',
315349
message: {
316350
role: 'model',
317-
content: toolRequestParts
318-
? // Note: Not sure why I have to cast here exactly.
319-
// Otherwise it thinks toolRequest must be 'undefined' if provided
320-
(toolRequestParts as ToolRequestPart[])
321-
: [
322-
jsonMode
323-
? { data: JSON.parse(choice.delta.content!) }
324-
: { text: choice.delta.content! },
325-
],
351+
content,
326352
},
327353
};
328354
}

js/plugins/compat-oai/tests/compat_oai_test.ts

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -412,6 +412,48 @@ describe('fromOpenAiChoice', () => {
412412
finishReason: 'stop',
413413
},
414414
},
415+
{
416+
should: 'should work with reasoning_content',
417+
choice: {
418+
index: 0,
419+
message: {
420+
role: 'assistant',
421+
content: null,
422+
reasoning_content: 'Let me think about this step by step...',
423+
refusal: null,
424+
} as any,
425+
finish_reason: 'stop',
426+
logprobs: null,
427+
},
428+
expectedOutput: {
429+
finishReason: 'stop',
430+
message: {
431+
role: 'model',
432+
content: [{ reasoning: 'Let me think about this step by step...' }],
433+
},
434+
},
435+
},
436+
{
437+
should: 'should work with both reasoning_content and content',
438+
choice: {
439+
index: 0,
440+
message: {
441+
role: 'assistant',
442+
content: 'Final answer',
443+
reasoning_content: 'Let me think...',
444+
refusal: null,
445+
} as any,
446+
finish_reason: 'stop',
447+
logprobs: null,
448+
},
449+
expectedOutput: {
450+
finishReason: 'stop',
451+
message: {
452+
role: 'model',
453+
content: [{ reasoning: 'Let me think...' }, { text: 'Final answer' }],
454+
},
455+
},
456+
},
415457
];
416458

417459
for (const test of testCases) {
@@ -503,6 +545,43 @@ describe('fromOpenAiChunkChoice', () => {
503545
finishReason: 'stop',
504546
},
505547
},
548+
{
549+
should: 'should work with reasoning_content',
550+
chunkChoice: {
551+
index: 0,
552+
delta: {
553+
role: 'assistant',
554+
reasoning_content: 'Let me think about this step by step...',
555+
} as any,
556+
finish_reason: null,
557+
},
558+
expectedOutput: {
559+
finishReason: 'unknown',
560+
message: {
561+
role: 'model',
562+
content: [{ reasoning: 'Let me think about this step by step...' }],
563+
},
564+
},
565+
},
566+
{
567+
should: 'should work with both reasoning_content and content',
568+
chunkChoice: {
569+
index: 0,
570+
delta: {
571+
role: 'assistant',
572+
reasoning_content: 'Let me think...',
573+
content: 'Final answer',
574+
} as any,
575+
finish_reason: 'stop',
576+
},
577+
expectedOutput: {
578+
finishReason: 'stop',
579+
message: {
580+
role: 'model',
581+
content: [{ reasoning: 'Let me think...' }, { text: 'Final answer' }],
582+
},
583+
},
584+
},
506585
];
507586

508587
for (const test of testCases) {

0 commit comments

Comments
 (0)