@@ -62,7 +62,13 @@ def _normalize_langgraph_message(message):
6262
6363 parsed = {"role" : getattr (message , "type" , None ), "content" : message .content }
6464
65- for attr in ["name" , "tool_calls" , "function_call" , "tool_call_id" ]:
65+ for attr in [
66+ "name" ,
67+ "tool_calls" ,
68+ "function_call" ,
69+ "tool_call_id" ,
70+ "response_metadata" ,
71+ ]:
6672 if hasattr (message , attr ):
6773 value = getattr (message , attr )
6874 if value is not None :
@@ -311,14 +317,51 @@ def _extract_tool_calls(messages):
311317 return tool_calls if tool_calls else None
312318
313319
320+ def _set_usage_data (span , messages ):
321+ # type: (sentry_sdk.tracing.Span, Any) -> None
322+ input_tokens = 0
323+ output_tokens = 0
324+ total_tokens = 0
325+
326+ for message in messages :
327+ response_metadata = message .get ("response_metadata" )
328+ if response_metadata is None :
329+ continue
330+
331+ token_usage = response_metadata .get ("token_usage" )
332+ if not token_usage :
333+ continue
334+
335+ input_tokens += int (token_usage .get ("prompt_tokens" , 0 ))
336+ output_tokens += int (token_usage .get ("completion_tokens" , 0 ))
337+ total_tokens += int (token_usage .get ("total_tokens" , 0 ))
338+
339+ if input_tokens > 0 :
340+ span .set_data (SPANDATA .GEN_AI_USAGE_INPUT_TOKENS , input_tokens )
341+
342+ if output_tokens > 0 :
343+ span .set_data (SPANDATA .GEN_AI_USAGE_OUTPUT_TOKENS , output_tokens )
344+
345+ if total_tokens > 0 :
346+ span .set_data (
347+ SPANDATA .GEN_AI_USAGE_TOTAL_TOKENS ,
348+ total_tokens ,
349+ )
350+
351+
314352def _set_response_attributes (span , input_messages , result , integration ):
315353 # type: (Any, Optional[List[Any]], Any, LanggraphIntegration) -> None
316- if not (should_send_default_pii () and integration .include_prompts ):
317- return
318-
319354 parsed_response_messages = _parse_langgraph_messages (result )
320355 new_messages = _get_new_messages (input_messages , parsed_response_messages )
321356
357+ if new_messages is None :
358+ return
359+
360+ _set_usage_data (span , new_messages )
361+
362+ if not (should_send_default_pii () and integration .include_prompts ):
363+ return
364+
322365 llm_response_text = _extract_llm_response_text (new_messages )
323366 if llm_response_text :
324367 set_data_normalized (span , SPANDATA .GEN_AI_RESPONSE_TEXT , llm_response_text )
0 commit comments