Skip to content

Commit f0083a7

Browse files
feat: add Bedrock agent tracer
1 parent 476bea6 commit f0083a7

File tree

2 files changed

+373
-0
lines changed

2 files changed

+373
-0
lines changed

examples/bedrock-agent-tracing.ts

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,83 @@
1+
import { BedrockAgentRuntimeClient, InvokeAgentCommand } from '@aws-sdk/client-bedrock-agent-runtime';
2+
import { traceBedrockAgent } from '../src/lib/integrations/bedrockAgentTracer';
3+
4+
/**
5+
* Simple example to test the Bedrock agent tracer
6+
* Replace the agent IDs with your actual Bedrock agent configuration
7+
*/
8+
export const simpleBedrockExample = async () => {
9+
// Step 0: Make sure to set OPENLAYER_API_KEY and OPENLAYER_INFERENCE_PIPELINE_ID as environment variables
10+
11+
// Step 1: Create the Bedrock client
12+
const client = new BedrockAgentRuntimeClient({
13+
region: 'us-west-2', // Updated to match your agent's region
14+
});
15+
16+
// Step 2: Apply Openlayer tracing
17+
const tracedClient = traceBedrockAgent(client);
18+
19+
// Step 3: Configure your agent details
20+
// Update the example with your actual agent details
21+
const agentId = 'YOUR_AGENT_ID'; // Your actual agent ID
22+
const agentAliasId = 'YOUR_ALIAS_ID'; // You'll need to create/find this
23+
const sessionId = `test-session-${Date.now()}`;
24+
25+
// Step 4: Create the command
26+
const command = new InvokeAgentCommand({
27+
agentId: agentId,
28+
agentAliasId: agentAliasId, // You'll get this after creating an alias
29+
sessionId: `test-session-${Date.now()}`,
30+
inputText: 'What is the capital of the moon?',
31+
enableTrace: true,
32+
});
33+
34+
try {
35+
console.log('🤖 Sending request to Bedrock agent...');
36+
37+
// Step 5: Send the command and process the response
38+
const response = await tracedClient.send(command);
39+
40+
if (!response.completion) {
41+
throw new Error('No completion received');
42+
}
43+
44+
let finalOutput = '';
45+
46+
// Step 6: Process the streaming response
47+
for await (const event of response.completion) {
48+
if (event.chunk?.bytes) {
49+
const text = new TextDecoder('utf-8').decode(event.chunk.bytes);
50+
finalOutput += text;
51+
process.stdout.write(text); // Real-time output
52+
}
53+
}
54+
55+
console.log('\n\n✅ Request completed successfully!');
56+
console.log('📊 Check your Openlayer dashboard for trace data');
57+
58+
return {
59+
sessionId,
60+
output: finalOutput,
61+
success: true,
62+
};
63+
} catch (error) {
64+
console.error('❌ Error:', error);
65+
return {
66+
sessionId,
67+
output: '',
68+
success: false,
69+
error: error instanceof Error ? error.message : String(error),
70+
};
71+
}
72+
};
73+
74+
// Run the example if this file is executed directly
75+
if (require.main === module) {
76+
simpleBedrockExample()
77+
.then((result) => {
78+
console.log('\n📋 Final result:', result);
79+
})
80+
.catch((error) => {
81+
console.error('💥 Failed to run example:', error);
82+
});
83+
}
Lines changed: 290 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,290 @@
1+
import {
2+
BedrockAgentRuntimeClient,
3+
InvokeAgentCommand,
4+
InvokeAgentCommandInput,
5+
InvokeAgentCommandOutput,
6+
} from '@aws-sdk/client-bedrock-agent-runtime';
7+
import { addChatCompletionStepToTrace } from '../tracing/tracer';
8+
9+
export function traceBedrockAgent(client: BedrockAgentRuntimeClient): BedrockAgentRuntimeClient {
10+
const originalSend = client.send.bind(client);
11+
12+
client.send = async function (this: BedrockAgentRuntimeClient, command: any, options?: any): Promise<any> {
13+
// Only trace InvokeAgentCommand
14+
if (!(command instanceof InvokeAgentCommand)) {
15+
return originalSend(command, options);
16+
}
17+
18+
const startTime = performance.now();
19+
const input = command.input as InvokeAgentCommandInput;
20+
21+
try {
22+
// Call the original send method
23+
const response = (await originalSend(command, options)) as InvokeAgentCommandOutput;
24+
25+
if (!response.completion) {
26+
throw new Error('Completion is undefined');
27+
}
28+
29+
// Create a traced async iterator that preserves the original
30+
const tracedCompletion = createTracedCompletion(response.completion, input, startTime);
31+
32+
// Return the response with the traced completion
33+
return {
34+
...response,
35+
completion: tracedCompletion,
36+
};
37+
} catch (error) {
38+
console.error('Failed to trace the Bedrock agent invocation with Openlayer', error);
39+
throw error;
40+
}
41+
};
42+
43+
return client;
44+
}
45+
46+
// Create a traced completion that collects data while yielding original events
47+
function createTracedCompletion(
48+
originalCompletion: AsyncIterable<any>,
49+
input: InvokeAgentCommandInput,
50+
startTime: number,
51+
): AsyncIterable<any> {
52+
return {
53+
async *[Symbol.asyncIterator]() {
54+
let firstTokenTime: number | undefined;
55+
let totalTokens = 0;
56+
let promptTokens = 0;
57+
let completionTokens = 0;
58+
let collectedOutput = '';
59+
const rawOutputChunks: any[] = [];
60+
let agentModel: string | null = null;
61+
let citations: any[] = [];
62+
let traceData: any[] = [];
63+
let chunkCount = 0;
64+
65+
try {
66+
for await (const chunkEvent of originalCompletion) {
67+
// YIELD FIRST - ensure user gets data immediately
68+
yield chunkEvent;
69+
70+
// Then collect tracing data
71+
if (chunkCount === 0) {
72+
firstTokenTime = performance.now();
73+
}
74+
chunkCount++;
75+
76+
// Handle chunk events
77+
if (chunkEvent.chunk) {
78+
const chunk = chunkEvent.chunk;
79+
rawOutputChunks.push(chunk);
80+
81+
if (chunk.bytes) {
82+
const decodedResponse = new TextDecoder('utf-8').decode(chunk.bytes);
83+
collectedOutput += decodedResponse;
84+
completionTokens += 1;
85+
}
86+
87+
if (chunk.attribution && chunk.attribution.citations) {
88+
citations.push(...chunk.attribution.citations);
89+
}
90+
}
91+
92+
// Handle trace events
93+
if (chunkEvent.trace) {
94+
traceData.push(chunkEvent.trace);
95+
96+
if (chunkEvent.trace.trace) {
97+
const trace = chunkEvent.trace.trace;
98+
99+
// Extract tokens and model info
100+
if (
101+
'orchestrationTrace' in trace &&
102+
trace.orchestrationTrace?.modelInvocationOutput?.metadata?.usage
103+
) {
104+
const usage = trace.orchestrationTrace.modelInvocationOutput.metadata.usage;
105+
promptTokens += usage.inputTokens || 0;
106+
completionTokens += usage.outputTokens || 0;
107+
}
108+
109+
if (
110+
'orchestrationTrace' in trace &&
111+
trace.orchestrationTrace?.modelInvocationInput?.foundationModel
112+
) {
113+
agentModel = trace.orchestrationTrace.modelInvocationInput.foundationModel;
114+
}
115+
}
116+
}
117+
}
118+
119+
// After the stream is complete, send trace data
120+
const endTime = performance.now();
121+
totalTokens = promptTokens + completionTokens;
122+
123+
// Send trace data to Openlayer
124+
const inputs = extractInputs(input, traceData);
125+
const metadata: Record<string, any> = {
126+
agentId: input.agentId,
127+
agentAliasId: input.agentAliasId,
128+
sessionId: input.sessionId,
129+
timeToFirstToken: firstTokenTime ? firstTokenTime - startTime : null,
130+
};
131+
132+
if (citations.length > 0) {
133+
metadata['citations'] = citations;
134+
}
135+
136+
const reasoning = extractReasoning(traceData);
137+
if (reasoning && reasoning.length > 0) {
138+
metadata['reasoning'] = reasoning;
139+
}
140+
141+
if (input.sessionState) {
142+
metadata['sessionState'] = {
143+
hasSessionAttributes: !!input.sessionState.sessionAttributes,
144+
hasPromptSessionAttributes: !!input.sessionState.promptSessionAttributes,
145+
hasFiles: !!input.sessionState.files && input.sessionState.files.length > 0,
146+
hasKnowledgeBaseConfigurations:
147+
!!input.sessionState.knowledgeBaseConfigurations &&
148+
input.sessionState.knowledgeBaseConfigurations.length > 0,
149+
};
150+
}
151+
152+
const traceStepData = {
153+
name: 'AWS Bedrock Agent Invocation',
154+
inputs: inputs,
155+
output: collectedOutput,
156+
latency: endTime - startTime,
157+
tokens: totalTokens > 0 ? totalTokens : null,
158+
promptTokens: promptTokens > 0 ? promptTokens : null,
159+
completionTokens: completionTokens > 0 ? completionTokens : null,
160+
model: agentModel || `${input.agentId}:${input.agentAliasId}`,
161+
modelParameters: extractModelParameters(input),
162+
rawOutput: JSON.stringify(rawOutputChunks, null, 2),
163+
metadata: metadata,
164+
provider: 'Bedrock',
165+
};
166+
167+
addChatCompletionStepToTrace(traceStepData);
168+
} catch (error) {
169+
console.error('Error in traced completion:', error);
170+
// Don't rethrow - we don't want tracing errors to break the user's stream
171+
}
172+
},
173+
};
174+
}
175+
176+
function extractInputs(input: InvokeAgentCommandInput, traceData: any[]): Record<string, any> {
177+
const inputs: Record<string, any> = {};
178+
179+
// Build the prompt in OpenAI-compatible format
180+
const prompt: Array<{ role: string; content: string }> = [];
181+
182+
// Add the main user message
183+
if (input.inputText) {
184+
prompt.push({
185+
role: 'user',
186+
content: input.inputText,
187+
});
188+
}
189+
190+
// Add conversation history if present
191+
if (input.sessionState?.conversationHistory?.messages) {
192+
for (const message of input.sessionState.conversationHistory.messages) {
193+
const content =
194+
message.content ?
195+
message.content.map((block) => ('text' in block ? block.text || '' : '')).join('')
196+
: '';
197+
198+
const role = message.role || 'user';
199+
200+
prompt.unshift({
201+
role: role,
202+
content: content,
203+
});
204+
}
205+
}
206+
207+
// Extract system prompt from trace data if available
208+
const systemPrompt = extractSystemPrompt(traceData);
209+
if (systemPrompt) {
210+
prompt.unshift({
211+
role: 'system',
212+
content: systemPrompt,
213+
});
214+
}
215+
216+
inputs['prompt'] = prompt;
217+
218+
// Add additional context as separate fields
219+
if (input.sessionState?.sessionAttributes) {
220+
inputs['sessionAttributes'] = input.sessionState.sessionAttributes;
221+
}
222+
223+
if (input.sessionState?.promptSessionAttributes) {
224+
inputs['promptSessionAttributes'] = input.sessionState.promptSessionAttributes;
225+
}
226+
227+
if (input.sessionState?.files && input.sessionState.files.length > 0) {
228+
inputs['files'] = input.sessionState.files.map((file) => ({
229+
name: file.name,
230+
useCase: file.useCase,
231+
sourceType: file.source?.sourceType,
232+
}));
233+
}
234+
235+
return inputs;
236+
}
237+
238+
function extractSystemPrompt(traceData: any[]): string | null {
239+
for (const trace of traceData) {
240+
if (trace.trace?.orchestrationTrace?.modelInvocationInput?.text) {
241+
try {
242+
const parsed = JSON.parse(trace.trace.orchestrationTrace.modelInvocationInput.text);
243+
if (parsed.system) {
244+
return parsed.system;
245+
}
246+
} catch (e) {
247+
// If parsing fails, continue
248+
}
249+
}
250+
}
251+
return null;
252+
}
253+
254+
function extractReasoning(traceData: any[]): string[] | undefined {
255+
const reasoning: string[] = [];
256+
257+
for (const trace of traceData) {
258+
if (trace.trace?.orchestrationTrace?.rationale?.text) {
259+
reasoning.push(trace.trace.orchestrationTrace.rationale.text);
260+
}
261+
}
262+
263+
return reasoning.length > 0 ? reasoning : undefined;
264+
}
265+
266+
function extractModelParameters(input: InvokeAgentCommandInput): Record<string, any> {
267+
const params: Record<string, any> = {};
268+
269+
if (input.enableTrace !== undefined) {
270+
params['enableTrace'] = input.enableTrace;
271+
}
272+
273+
if (input.endSession !== undefined) {
274+
params['endSession'] = input.endSession;
275+
}
276+
277+
if (input.bedrockModelConfigurations) {
278+
params['bedrockModelConfigurations'] = input.bedrockModelConfigurations;
279+
}
280+
281+
if (input.streamingConfigurations) {
282+
params['streamingConfigurations'] = input.streamingConfigurations;
283+
}
284+
285+
if (input.promptCreationConfigurations) {
286+
params['promptCreationConfigurations'] = input.promptCreationConfigurations;
287+
}
288+
289+
return params;
290+
}

0 commit comments

Comments
 (0)