Skip to content

Commit a457dc3

Browse files
feat: add initial working version of the OpenAI tracer
1 parent 84480cf commit a457dc3

File tree

2 files changed

+105
-11
lines changed

2 files changed

+105
-11
lines changed
Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
import OpenAI from 'openai';
2+
import { Stream } from 'openai/src/streaming';
3+
import { addChatCompletionStepToTrace } from '../tracing/tracer';
4+
5+
export function traceOpenAI(openai: OpenAI): OpenAI {
6+
const createFunction = openai.chat.completions.create;
7+
8+
openai.chat.completions.create = async function (
9+
this: typeof openai.chat.completions,
10+
...args: Parameters<typeof createFunction>
11+
): Promise<Stream<OpenAI.Chat.Completions.ChatCompletionChunk> | OpenAI.Chat.Completions.ChatCompletion> {
12+
const [params, options = { stream: false }] = args;
13+
try {
14+
const startTime = performance.now();
15+
if (options.stream) {
16+
console.log('streaming not implemented yet');
17+
return createFunction.apply(this, args) as unknown as Promise<
18+
Stream<OpenAI.Chat.Completions.ChatCompletionChunk>
19+
>;
20+
} else {
21+
const response = (await createFunction.apply(this, args)) as OpenAI.Chat.Completions.ChatCompletion;
22+
const completion = response.choices[0];
23+
const endTime = performance.now();
24+
const traceData = {
25+
name: 'OpenAI Chat Completion',
26+
inputs: { prompt: params.messages },
27+
output: completion?.message.content,
28+
latency: endTime - startTime,
29+
tokens: response?.usage?.total_tokens ?? null,
30+
promptTokens: response?.usage?.prompt_tokens ?? null,
31+
completionTokens: response?.usage?.completion_tokens ?? null,
32+
model: response?.model,
33+
modelParameters: getModelParameters(args),
34+
rawOutput: JSON.stringify(response, null, 2),
35+
metadata: {},
36+
provider: 'OpenAI',
37+
};
38+
addChatCompletionStepToTrace(traceData);
39+
return response;
40+
}
41+
} catch (error) {
42+
console.error('Failed to trace the create chat completion request with Openlayer', error);
43+
throw error;
44+
}
45+
} as typeof createFunction;
46+
47+
return openai;
48+
}
49+
50+
function getModelParameters(args: any): Record<string, any> {
51+
const params = args[0];
52+
return {
53+
frequency_penalty: params?.frequencyPenalty ?? 0,
54+
logit_bias: params?.logitBias ?? null,
55+
logprobs: params?.logprobs ?? false,
56+
top_logprobs: params?.topLogprobs ?? null,
57+
max_tokens: params?.maxTokens ?? null,
58+
n: params?.n ?? 1,
59+
presence_penalty: params?.presencePenalty ?? 0,
60+
seed: params?.seed ?? null,
61+
stop: params?.stop ?? null,
62+
temperature: params?.temperature ?? 1,
63+
top_p: params?.topP ?? 1,
64+
};
65+
}

src/lib/tracing/tracer.ts

Lines changed: 40 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -69,8 +69,6 @@ function createStep(
6969
const traceData = getCurrentTrace();
7070
// Post process trace and get the input variable names
7171
const { traceData: processedTraceData, inputVariableNames } = postProcessTrace(traceData!);
72-
console.log('Processed trace data:', JSON.stringify(processedTraceData, null, 2));
73-
console.log('Input variable names:', inputVariableNames);
7472

7573
if (publish && process.env['OPENLAYER_INFERENCE_PIPELINE_ID']) {
7674
client!.inferencePipelines.data.stream(process.env['OPENLAYER_INFERENCE_PIPELINE_ID'], {
@@ -87,7 +85,6 @@ function createStep(
8785
rows: [processedTraceData],
8886
});
8987
}
90-
console.log('Trace data ready for upload:', JSON.stringify(traceData, null, 2));
9188

9289
// Reset the entire trace state
9390
setCurrentTrace(null);
@@ -134,15 +131,47 @@ function trace(fn: Function, stepType: StepType = StepType.USER_CALL, stepName?:
134131
};
135132
}
136133

137-
// Example usage of specialized function to add a chat completion step
138-
export function addChatCompletionStepToTrace(
139-
name: string,
140-
inputs: any,
141-
output: any,
142-
metadata?: Record<string, any>,
143-
) {
134+
export function addChatCompletionStepToTrace({
135+
name,
136+
inputs,
137+
output,
138+
latency,
139+
tokens = null,
140+
promptTokens = null,
141+
completionTokens = null,
142+
model = null,
143+
modelParameters = null,
144+
rawOutput = null,
145+
metadata = {},
146+
provider = 'OpenAI',
147+
}: {
148+
name: string;
149+
inputs: any;
150+
output: any;
151+
latency: number;
152+
tokens?: number | null;
153+
promptTokens?: number | null;
154+
completionTokens?: number | null;
155+
model?: string | null;
156+
modelParameters?: Record<string, any> | null;
157+
rawOutput?: string | null;
158+
metadata?: Record<string, any>;
159+
provider?: string;
160+
}) {
144161
const [step, endStep] = createStep(name, StepType.CHAT_COMPLETION, inputs, output, metadata);
145-
step.log({ inputs, output });
162+
163+
if (step instanceof ChatCompletionStep) {
164+
step.provider = provider;
165+
step.promptTokens = promptTokens;
166+
step.completionTokens = completionTokens;
167+
step.tokens = tokens;
168+
step.model = model;
169+
step.modelParameters = modelParameters;
170+
step.rawOutput = rawOutput;
171+
step.latency = latency;
172+
}
173+
174+
step.log({ inputs, output, metadata });
146175
endStep();
147176
}
148177

0 commit comments

Comments
 (0)