From 45908430bf92c6399f91ff5ff12df402e4a88ec8 Mon Sep 17 00:00:00 2001 From: Lam Nguyen Date: Tue, 22 Jul 2025 21:09:31 +0100 Subject: [PATCH] fix: Use candidates_token_count for output tokens in telemetry --- src/google/adk/telemetry.py | 2 +- tests/unittests/test_telemetry.py | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/google/adk/telemetry.py b/src/google/adk/telemetry.py index a09c2f55b..10ac58399 100644 --- a/src/google/adk/telemetry.py +++ b/src/google/adk/telemetry.py @@ -202,7 +202,7 @@ def trace_call_llm( ) span.set_attribute( 'gen_ai.usage.output_tokens', - llm_response.usage_metadata.total_token_count, + llm_response.usage_metadata.candidates_token_count, ) diff --git a/tests/unittests/test_telemetry.py b/tests/unittests/test_telemetry.py index cf115d5f0..8a3964b21 100644 --- a/tests/unittests/test_telemetry.py +++ b/tests/unittests/test_telemetry.py @@ -155,7 +155,9 @@ async def test_trace_call_llm_usage_metadata(monkeypatch, mock_span_fixture): llm_response = LlmResponse( turn_complete=True, usage_metadata=types.GenerateContentResponseUsageMetadata( - total_token_count=100, prompt_token_count=50 + total_token_count=100, + prompt_token_count=50, + candidates_token_count=50, ), ) trace_call_llm(invocation_context, 'test_event_id', llm_request, llm_response) @@ -163,7 +165,7 @@ async def test_trace_call_llm_usage_metadata(monkeypatch, mock_span_fixture): expected_calls = [ mock.call('gen_ai.system', 'gcp.vertex.agent'), mock.call('gen_ai.usage.input_tokens', 50), - mock.call('gen_ai.usage.output_tokens', 100), + mock.call('gen_ai.usage.output_tokens', 50), ] assert mock_span_fixture.set_attribute.call_count == 9 mock_span_fixture.set_attribute.assert_has_calls(