Skip to content

Commit 16af5f9

Browse files
feat: introduce the OpenlayerHandler, which implements the LangChain callback handler interface
1 parent 4645d45 commit 16af5f9

File tree

1 file changed

+141
-0
lines changed

1 file changed

+141
-0
lines changed
Lines changed: 141 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,141 @@
1+
import { BaseCallbackHandler } from '@langchain/core/callbacks/base';
2+
import { LLMResult } from '@langchain/core/dist/outputs';
3+
import type { Serialized } from '@langchain/core/load/serializable';
4+
import { AIMessage, BaseMessage, SystemMessage } from '@langchain/core/messages';
5+
import { addChatCompletionStepToTrace } from '../tracing/tracer';
6+
7+
const LANGCHAIN_TO_OPENLAYER_PROVIDER_MAP: Record<string, string> = {
8+
openai: 'OpenAI',
9+
'openai-chat': 'OpenAI',
10+
'chat-ollama': 'Ollama',
11+
vertexai: 'Google',
12+
};
13+
const PROVIDER_TO_STEP_NAME: Record<string, string> = {
14+
OpenAI: 'OpenAI Chat Completion',
15+
Ollama: 'Ollama Chat Completion',
16+
Google: 'Google Vertex AI Chat Completion',
17+
};
18+
19+
export class OpenlayerHandler extends BaseCallbackHandler {
20+
name = 'OpenlayerHandler';
21+
startTime: number | null = null;
22+
endTime: number | null = null;
23+
prompt: Array<{ role: string; content: string }> | null = null;
24+
latency: number = 0;
25+
provider: string | undefined;
26+
model: string | null = null;
27+
modelParameters: Record<string, any> | null = null;
28+
promptTokens: number | null = 0;
29+
completionTokens: number | null = 0;
30+
totalTokens: number | null = 0;
31+
output: string = '';
32+
metadata: Record<string, any>;
33+
34+
constructor(kwargs: Record<string, any> = {}) {
35+
super();
36+
this.metadata = kwargs;
37+
}
38+
override async handleChatModelStart(
39+
llm: Serialized,
40+
messages: BaseMessage[][],
41+
runId: string,
42+
parentRunId?: string | undefined,
43+
extraParams?: Record<string, unknown> | undefined,
44+
tags?: string[] | undefined,
45+
metadata?: Record<string, unknown> | undefined,
46+
name?: string,
47+
): Promise<void> {
48+
this.initializeRun(extraParams || {}, metadata || {});
49+
this.prompt = this.langchainMassagesToPrompt(messages);
50+
this.startTime = performance.now();
51+
}
52+
53+
private initializeRun(extraParams: Record<string, any>, metadata: Record<string, unknown>): void {
54+
this.modelParameters = extraParams['invocation_params'] || {};
55+
56+
const provider = metadata?.['ls_provider'] as string;
57+
if (provider && LANGCHAIN_TO_OPENLAYER_PROVIDER_MAP[provider]) {
58+
this.provider = LANGCHAIN_TO_OPENLAYER_PROVIDER_MAP[provider];
59+
}
60+
this.model = (this.modelParameters?.['model'] as string) || (metadata['ls_model_name'] as string) || null;
61+
this.output = '';
62+
}
63+
64+
private langchainMassagesToPrompt(messages: BaseMessage[][]): Array<{ role: string; content: string }> {
65+
let prompt: Array<{ role: string; content: string }> = [];
66+
for (const message of messages) {
67+
for (const m of message) {
68+
if (m instanceof AIMessage) {
69+
prompt.push({ role: 'assistant', content: m.content as string });
70+
} else if (m instanceof SystemMessage) {
71+
prompt.push({ role: 'system', content: m.content as string });
72+
} else {
73+
prompt.push({ role: 'user', content: m.content as string });
74+
}
75+
}
76+
}
77+
return prompt;
78+
}
79+
80+
override async handleLLMStart(
81+
llm: Serialized,
82+
prompts: string[],
83+
runId: string,
84+
parentRunId?: string,
85+
extraParams?: Record<string, unknown>,
86+
tags?: string[],
87+
metadata?: Record<string, unknown>,
88+
runName?: string,
89+
) {
90+
this.initializeRun(extraParams || {}, metadata || {});
91+
this.prompt = prompts.map((p) => ({ role: 'user', content: p }));
92+
this.startTime = performance.now();
93+
}
94+
95+
override async handleLLMEnd(output: LLMResult, runId: string, parentRunId?: string, tags?: string[]) {
96+
this.endTime = performance.now();
97+
this.latency = this.endTime - this.startTime!;
98+
this.extractTokenInformation(output);
99+
this.extractOutput(output);
100+
this.addToTrace();
101+
}
102+
103+
private extractTokenInformation(output: LLMResult) {
104+
if (this.provider === 'OpenAI') {
105+
this.openaiTokenInformation(output);
106+
}
107+
}
108+
109+
private openaiTokenInformation(output: LLMResult) {
110+
if (output.llmOutput && 'tokenUsage' in output.llmOutput) {
111+
this.promptTokens = output.llmOutput?.['tokenUsage']?.promptTokens ?? 0;
112+
this.completionTokens = output.llmOutput?.['tokenUsage']?.completionTokens ?? 0;
113+
this.totalTokens = output.llmOutput?.['tokenUsage']?.totalTokens ?? 0;
114+
}
115+
}
116+
117+
private extractOutput(output: LLMResult) {
118+
const lastResponse = output?.generations?.at(-1)?.at(-1) ?? undefined;
119+
this.output += lastResponse?.text ?? '';
120+
}
121+
122+
private addToTrace() {
123+
let name = 'Chat Completion Model';
124+
if (this.provider && this.provider in PROVIDER_TO_STEP_NAME) {
125+
name = PROVIDER_TO_STEP_NAME[this.provider] ?? 'Chat Completion Model';
126+
}
127+
addChatCompletionStepToTrace({
128+
name: name,
129+
inputs: { prompt: this.prompt },
130+
output: this.output,
131+
latency: this.latency,
132+
tokens: this.totalTokens,
133+
promptTokens: this.promptTokens,
134+
completionTokens: this.completionTokens,
135+
model: this.model,
136+
modelParameters: this.modelParameters,
137+
metadata: this.metadata,
138+
provider: this.provider ?? '',
139+
});
140+
}
141+
}

0 commit comments

Comments
 (0)