From 11687ce19844b00853cdb9a6c60206883576a3fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sa=C5=A1o=20Stanovnik?= Date: Fri, 14 Nov 2025 18:49:32 +0100 Subject: [PATCH 1/9] Add metadata to the Agent class. Metadata is attached to the logfire.agent.metadata span attribute. It can either be a string, dict, or a callable taking the RunContext and returning a string or a dict. --- docs/logfire.md | 17 ++++++ .../pydantic_ai/agent/__init__.py | 52 +++++++++++++++++-- tests/test_logfire.py | 45 ++++++++++++++++ 3 files changed, 111 insertions(+), 3 deletions(-) diff --git a/docs/logfire.md b/docs/logfire.md index 94fe349340..5c3a285454 100644 --- a/docs/logfire.md +++ b/docs/logfire.md @@ -92,6 +92,8 @@ including how to instrument other libraries like [HTTPX](https://logfire.pydanti Since Logfire is built on [OpenTelemetry](https://opentelemetry.io/), you can use the Logfire Python SDK to send data to any OpenTelemetry collector, see [below](#using-opentelemetry). +When instrumentation is enabled, the resolved metadata is recorded (JSON encoded) on the run span under the `logfire.agent.metadata` attribute. + ### Debugging To demonstrate how Logfire can let you visualise the flow of a Pydantic AI run, here's the view you get from Logfire while running the [chat app examples](examples/chat-app.md): @@ -356,3 +358,18 @@ Agent.instrument_all(instrumentation_settings) ``` This setting is particularly useful in production environments where compliance requirements or data sensitivity concerns make it necessary to limit what content is sent to your observability platform. + +### Adding Custom Metadata + +Use the agent's `metadata` parameter to attach additional data to the agent's span. +Metadata can be provided as a string, a dictionary, or a callable that reads the [`RunContext`][pydantic_ai.tools.RunContext] to compute values on each run. + +```python {hl_lines="4-5"} +from pydantic_ai import Agent + +agent = Agent( + 'openai:gpt-5', + instrument=True, + metadata=lambda ctx: {'deployment': 'staging', 'tenant': ctx.deps.tenant}, +) +``` diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 4cd353b44a..5fd681cb70 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -32,6 +32,7 @@ HistoryProcessor, ModelRequestNode, UserPromptNode, + build_run_context, capture_run_messages, ) from .._output import OutputToolset @@ -89,6 +90,8 @@ S = TypeVar('S') NoneType = type(None) +AgentMetadataValue = str | dict[str, str] | Callable[[RunContext[AgentDepsT]], str | dict[str, str]] + @dataclasses.dataclass(init=False) class Agent(AbstractAgent[AgentDepsT, OutputDataT]): @@ -130,6 +133,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]): """Options to automatically instrument with OpenTelemetry.""" _instrument_default: ClassVar[InstrumentationSettings | bool] = False + _metadata: AgentMetadataValue[AgentDepsT] | None = dataclasses.field(repr=False) _deps_type: type[AgentDepsT] = dataclasses.field(repr=False) _output_schema: _output.OutputSchema[OutputDataT] = dataclasses.field(repr=False) @@ -175,6 +179,7 @@ def __init__( defer_model_check: bool = False, end_strategy: EndStrategy = 'early', instrument: InstrumentationSettings | bool | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, history_processors: Sequence[HistoryProcessor[AgentDepsT]] | None = None, event_stream_handler: EventStreamHandler[AgentDepsT] | None = None, ) -> None: ... @@ -201,6 +206,7 @@ def __init__( defer_model_check: bool = False, end_strategy: EndStrategy = 'early', instrument: InstrumentationSettings | bool | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, history_processors: Sequence[HistoryProcessor[AgentDepsT]] | None = None, event_stream_handler: EventStreamHandler[AgentDepsT] | None = None, ) -> None: ... @@ -225,6 +231,7 @@ def __init__( defer_model_check: bool = False, end_strategy: EndStrategy = 'early', instrument: InstrumentationSettings | bool | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, history_processors: Sequence[HistoryProcessor[AgentDepsT]] | None = None, event_stream_handler: EventStreamHandler[AgentDepsT] | None = None, **_deprecated_kwargs: Any, @@ -276,6 +283,10 @@ def __init__( [`Agent.instrument_all()`][pydantic_ai.Agent.instrument_all] will be used, which defaults to False. See the [Debugging and Monitoring guide](https://ai.pydantic.dev/logfire/) for more info. + metadata: Optional metadata to attach to telemetry for this agent. + Provide a string literal, a dict of string keys and values, or a callable returning one of those values + computed from the [`RunContext`][pydantic_ai.tools.RunContext] on each run. + Metadata is only recorded when instrumentation is enabled. history_processors: Optional list of callables to process the message history before sending it to the model. Each processor takes a list of messages and returns a modified list of messages. Processors can be sync or async and are applied in sequence. @@ -292,6 +303,7 @@ def __init__( self._output_type = output_type self.instrument = instrument + self._metadata = metadata self._deps_type = deps_type if mcp_servers := _deprecated_kwargs.pop('mcp_servers', None): @@ -349,6 +361,9 @@ def __init__( self._override_instructions: ContextVar[ _utils.Option[list[str | _system_prompt.SystemPromptFunc[AgentDepsT]]] ] = ContextVar('_override_instructions', default=None) + self._override_metadata: ContextVar[_utils.Option[AgentMetadataValue[AgentDepsT]]] = ContextVar( + '_override_metadata', default=None + ) self._enter_lock = Lock() self._entered_count = 0 @@ -645,6 +660,7 @@ async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None: }, ) + run_metadata: str | dict[str, str] | None = None try: async with graph.iter( inputs=user_prompt_node, @@ -656,8 +672,9 @@ async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None: async with toolset: agent_run = AgentRun(graph_run) yield agent_run - if (final_result := agent_run.result) is not None and run_span.is_recording(): - if instrumentation_settings and instrumentation_settings.include_content: + if instrumentation_settings and run_span.is_recording(): + run_metadata = self._compute_agent_metadata(build_run_context(agent_run.ctx)) + if instrumentation_settings.include_content and (final_result := agent_run.result) is not None: run_span.set_attribute( 'final_result', ( @@ -671,18 +688,32 @@ async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None: if instrumentation_settings and run_span.is_recording(): run_span.set_attributes( self._run_span_end_attributes( - instrumentation_settings, usage, state.message_history, graph_deps.new_message_index + instrumentation_settings, + usage, + state.message_history, + graph_deps.new_message_index, + run_metadata, ) ) finally: run_span.end() + def _compute_agent_metadata(self, ctx: RunContext[AgentDepsT]) -> str | dict[str, str] | None: + metadata_override = self._override_metadata.get() + metadata_config = metadata_override.value if metadata_override is not None else self._metadata + if metadata_config is None: + return None + + metadata = metadata_config(ctx) if callable(metadata_config) else metadata_config + return metadata + def _run_span_end_attributes( self, settings: InstrumentationSettings, usage: _usage.RunUsage, message_history: list[_messages.ModelMessage], new_message_index: int, + metadata: str | dict[str, str] | None = None, ): if settings.version == 1: attrs = { @@ -716,6 +747,12 @@ def _run_span_end_attributes( ): attrs['pydantic_ai.variable_instructions'] = True + if metadata is not None: + if isinstance(metadata, dict): + attrs['logfire.agent.metadata'] = json.dumps(metadata) + else: + attrs['logfire.agent.metadata'] = metadata + return { **usage.opentelemetry_attributes(), **attrs, @@ -740,6 +777,7 @@ def override( toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, + metadata: AgentMetadataValue[AgentDepsT] | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: """Context manager to temporarily override agent name, dependencies, model, toolsets, tools, or instructions. @@ -753,6 +791,7 @@ def override( toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run. tools: The tools to use instead of the tools registered with the agent. instructions: The instructions to use instead of the instructions registered with the agent. + metadata: The metadata to use instead of the metadata passed to the agent constructor. """ if _utils.is_set(name): name_token = self._override_name.set(_utils.Some(name)) @@ -785,6 +824,11 @@ def override( else: instructions_token = None + if _utils.is_set(metadata): + metadata_token = self._override_metadata.set(_utils.Some(metadata)) + else: + metadata_token = None + try: yield finally: @@ -800,6 +844,8 @@ def override( self._override_tools.reset(tools_token) if instructions_token is not None: self._override_instructions.reset(instructions_token) + if metadata_token is not None: + self._override_metadata.reset(metadata_token) @overload def instructions( diff --git a/tests/test_logfire.py b/tests/test_logfire.py index dadb930dd0..514d5c4aec 100644 --- a/tests/test_logfire.py +++ b/tests/test_logfire.py @@ -120,6 +120,7 @@ async def my_ret(x: int) -> str: model=TestModel(), toolsets=[toolset], instrument=instrument, + metadata={'env': 'test'}, ) result = my_agent.run_sync('Hello') @@ -314,12 +315,14 @@ async def my_ret(x: int) -> str: ] ) ), + 'logfire.agent.metadata': '{"env": "test"}', 'logfire.json_schema': IsJson( snapshot( { 'type': 'object', 'properties': { 'pydantic_ai.all_messages': {'type': 'array'}, + 'logfire.agent.metadata': {'type': 'array'}, 'final_result': {'type': 'object'}, }, } @@ -379,12 +382,14 @@ async def my_ret(x: int) -> str: ) ), 'final_result': '{"my_ret":"1"}', + 'logfire.agent.metadata': '{"env": "test"}', 'logfire.json_schema': IsJson( snapshot( { 'type': 'object', 'properties': { 'all_messages_events': {'type': 'array'}, + 'logfire.agent.metadata': {'type': 'array'}, 'final_result': {'type': 'object'}, }, } @@ -569,6 +574,46 @@ async def my_ret(x: int) -> str: ) +def _test_logfire_metadata_values_callable_dict(ctx: RunContext[Any]) -> dict[str, str]: + return {'model_name': ctx.model.model_name} + + +def _test_logfire_metadata_values_callable_string(_ctx: RunContext[Any]) -> str: + return 'callable-str' + + +@pytest.mark.skipif(not logfire_installed, reason='logfire not installed') +@pytest.mark.parametrize( + ('metadata', 'expected'), + [ + pytest.param({'env': 'test'}, '{"env": "test"}', id='dict'), + pytest.param('staging', 'staging', id='literal-string'), + pytest.param(_test_logfire_metadata_values_callable_dict, '{"model_name": "test"}', id='callable-dict'), + pytest.param(_test_logfire_metadata_values_callable_string, 'callable-str', id='callable-string'), + ], +) +def test_logfire_metadata_values( + get_logfire_summary: Callable[[], LogfireSummary], + metadata: str | dict[str, str] | Callable[[RunContext[Any]], str | dict[str, str]], + expected: str | dict[str, str], +) -> None: + agent = Agent(model=TestModel(), instrument=InstrumentationSettings(version=2), metadata=metadata) + agent.run_sync('Hello') + + summary = get_logfire_summary() + assert summary.attributes[0]['logfire.agent.metadata'] == expected + + +@pytest.mark.skipif(not logfire_installed, reason='logfire not installed') +def test_logfire_metadata_override(get_logfire_summary: Callable[[], LogfireSummary]) -> None: + agent = Agent(model=TestModel(), instrument=InstrumentationSettings(version=2), metadata='base') + with agent.override(metadata={'env': 'override'}): + agent.run_sync('Hello') + + summary = get_logfire_summary() + assert summary.attributes[0]['logfire.agent.metadata'] == '{"env": "override"}' + + @pytest.mark.skipif(not logfire_installed, reason='logfire not installed') @pytest.mark.parametrize( 'instrument', From cf001b58f2b6173215cf8a0b0a0d57cd2c960bde Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sa=C5=A1o=20Stanovnik?= Date: Fri, 14 Nov 2025 21:27:32 +0100 Subject: [PATCH 2/9] Misplaced doc line. --- docs/logfire.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/logfire.md b/docs/logfire.md index 5c3a285454..ea15b058c0 100644 --- a/docs/logfire.md +++ b/docs/logfire.md @@ -92,8 +92,6 @@ including how to instrument other libraries like [HTTPX](https://logfire.pydanti Since Logfire is built on [OpenTelemetry](https://opentelemetry.io/), you can use the Logfire Python SDK to send data to any OpenTelemetry collector, see [below](#using-opentelemetry). -When instrumentation is enabled, the resolved metadata is recorded (JSON encoded) on the run span under the `logfire.agent.metadata` attribute. - ### Debugging To demonstrate how Logfire can let you visualise the flow of a Pydantic AI run, here's the view you get from Logfire while running the [chat app examples](examples/chat-app.md): @@ -373,3 +371,5 @@ agent = Agent( metadata=lambda ctx: {'deployment': 'staging', 'tenant': ctx.deps.tenant}, ) ``` + +When instrumentation is enabled, the resolved metadata is recorded on the agent span under the `logfire.agent.metadata` attribute. From 150777a65705fadcadf2912cd1dc4c3ccd115b0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sa=C5=A1o=20Stanovnik?= Date: Fri, 14 Nov 2025 21:40:46 +0100 Subject: [PATCH 3/9] Coverage. --- pydantic_ai_slim/pydantic_ai/agent/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 5fd681cb70..b44785e372 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -672,9 +672,10 @@ async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None: async with toolset: agent_run = AgentRun(graph_run) yield agent_run + final_result = agent_run.result if instrumentation_settings and run_span.is_recording(): run_metadata = self._compute_agent_metadata(build_run_context(agent_run.ctx)) - if instrumentation_settings.include_content and (final_result := agent_run.result) is not None: + if instrumentation_settings.include_content and final_result is not None: run_span.set_attribute( 'final_result', ( From 2faab1ec01da75f70a8074acc7744b1bf4e12905 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sa=C5=A1o=20Stanovnik?= Date: Thu, 27 Nov 2025 09:26:56 +0100 Subject: [PATCH 4/9] Remove string, add dict-str-any. --- docs/logfire.md | 2 +- pydantic_ai_slim/pydantic_ai/agent/__init__.py | 15 ++++++--------- tests/test_logfire.py | 12 +++--------- 3 files changed, 10 insertions(+), 19 deletions(-) diff --git a/docs/logfire.md b/docs/logfire.md index 4ef0ef8811..99650f8861 100644 --- a/docs/logfire.md +++ b/docs/logfire.md @@ -341,7 +341,7 @@ This setting is particularly useful in production environments where compliance ### Adding Custom Metadata Use the agent's `metadata` parameter to attach additional data to the agent's span. -Metadata can be provided as a string, a dictionary, or a callable that reads the [`RunContext`][pydantic_ai.tools.RunContext] to compute values on each run. +Metadata can be provided as a dictionary, or a callable that reads the [`RunContext`][pydantic_ai.tools.RunContext] to compute values on each run, returning a dictionary. ```python {hl_lines="4-5"} from pydantic_ai import Agent diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 5379678d39..a12403814a 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -90,7 +90,7 @@ S = TypeVar('S') NoneType = type(None) -AgentMetadataValue = str | dict[str, str] | Callable[[RunContext[AgentDepsT]], str | dict[str, str]] +AgentMetadataValue = dict[str, Any] | Callable[[RunContext[AgentDepsT]], dict[str, Any]] @dataclasses.dataclass(init=False) @@ -289,7 +289,7 @@ def __init__( will be used, which defaults to False. See the [Debugging and Monitoring guide](https://ai.pydantic.dev/logfire/) for more info. metadata: Optional metadata to attach to telemetry for this agent. - Provide a string literal, a dict of string keys and values, or a callable returning one of those values + Provide a dictionary of primitives, or a callable returning one computed from the [`RunContext`][pydantic_ai.tools.RunContext] on each run. Metadata is only recorded when instrumentation is enabled. history_processors: Optional list of callables to process the message history before sending it to the model. @@ -668,7 +668,7 @@ async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None: }, ) - run_metadata: str | dict[str, str] | None = None + run_metadata: dict[str, Any] | None = None try: async with graph.iter( inputs=user_prompt_node, @@ -707,7 +707,7 @@ async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None: finally: run_span.end() - def _compute_agent_metadata(self, ctx: RunContext[AgentDepsT]) -> str | dict[str, str] | None: + def _compute_agent_metadata(self, ctx: RunContext[AgentDepsT]) -> dict[str, Any] | None: metadata_override = self._override_metadata.get() metadata_config = metadata_override.value if metadata_override is not None else self._metadata if metadata_config is None: @@ -722,7 +722,7 @@ def _run_span_end_attributes( usage: _usage.RunUsage, message_history: list[_messages.ModelMessage], new_message_index: int, - metadata: str | dict[str, str] | None = None, + metadata: dict[str, Any] | None = None, ): if settings.version == 1: attrs = { @@ -757,10 +757,7 @@ def _run_span_end_attributes( attrs['pydantic_ai.variable_instructions'] = True if metadata is not None: - if isinstance(metadata, dict): - attrs['logfire.agent.metadata'] = json.dumps(metadata) - else: - attrs['logfire.agent.metadata'] = metadata + attrs['logfire.agent.metadata'] = json.dumps(metadata) return { **usage.opentelemetry_attributes(), diff --git a/tests/test_logfire.py b/tests/test_logfire.py index 514d5c4aec..a0cac74456 100644 --- a/tests/test_logfire.py +++ b/tests/test_logfire.py @@ -578,24 +578,18 @@ def _test_logfire_metadata_values_callable_dict(ctx: RunContext[Any]) -> dict[st return {'model_name': ctx.model.model_name} -def _test_logfire_metadata_values_callable_string(_ctx: RunContext[Any]) -> str: - return 'callable-str' - - @pytest.mark.skipif(not logfire_installed, reason='logfire not installed') @pytest.mark.parametrize( ('metadata', 'expected'), [ pytest.param({'env': 'test'}, '{"env": "test"}', id='dict'), - pytest.param('staging', 'staging', id='literal-string'), pytest.param(_test_logfire_metadata_values_callable_dict, '{"model_name": "test"}', id='callable-dict'), - pytest.param(_test_logfire_metadata_values_callable_string, 'callable-str', id='callable-string'), ], ) def test_logfire_metadata_values( get_logfire_summary: Callable[[], LogfireSummary], - metadata: str | dict[str, str] | Callable[[RunContext[Any]], str | dict[str, str]], - expected: str | dict[str, str], + metadata: dict[str, Any] | Callable[[RunContext[Any]], dict[str, Any]], + expected: dict[str, Any], ) -> None: agent = Agent(model=TestModel(), instrument=InstrumentationSettings(version=2), metadata=metadata) agent.run_sync('Hello') @@ -606,7 +600,7 @@ def test_logfire_metadata_values( @pytest.mark.skipif(not logfire_installed, reason='logfire not installed') def test_logfire_metadata_override(get_logfire_summary: Callable[[], LogfireSummary]) -> None: - agent = Agent(model=TestModel(), instrument=InstrumentationSettings(version=2), metadata='base') + agent = Agent(model=TestModel(), instrument=InstrumentationSettings(version=2), metadata={'env': 'base'}) with agent.override(metadata={'env': 'override'}): agent.run_sync('Hello') From 6fe93df9a3af13292ea908825e7adc9e881301a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sa=C5=A1o=20Stanovnik?= Date: Thu, 27 Nov 2025 11:08:59 +0100 Subject: [PATCH 5/9] Remove logfire prefix from agent metadata. --- docs/logfire.md | 2 +- pydantic_ai_slim/pydantic_ai/agent/__init__.py | 2 +- tests/test_logfire.py | 12 ++++++------ 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/logfire.md b/docs/logfire.md index 99650f8861..bfbb9656f7 100644 --- a/docs/logfire.md +++ b/docs/logfire.md @@ -353,4 +353,4 @@ agent = Agent( ) ``` -When instrumentation is enabled, the resolved metadata is recorded on the agent span under the `logfire.agent.metadata` attribute. +When instrumentation is enabled, the resolved metadata is recorded on the agent span under the `agent.metadata` attribute. diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index a12403814a..03c16483d6 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -757,7 +757,7 @@ def _run_span_end_attributes( attrs['pydantic_ai.variable_instructions'] = True if metadata is not None: - attrs['logfire.agent.metadata'] = json.dumps(metadata) + attrs['agent.metadata'] = json.dumps(metadata) return { **usage.opentelemetry_attributes(), diff --git a/tests/test_logfire.py b/tests/test_logfire.py index a0cac74456..235aadd5d3 100644 --- a/tests/test_logfire.py +++ b/tests/test_logfire.py @@ -315,14 +315,14 @@ async def my_ret(x: int) -> str: ] ) ), - 'logfire.agent.metadata': '{"env": "test"}', + 'agent.metadata': '{"env": "test"}', 'logfire.json_schema': IsJson( snapshot( { 'type': 'object', 'properties': { 'pydantic_ai.all_messages': {'type': 'array'}, - 'logfire.agent.metadata': {'type': 'array'}, + 'agent.metadata': {'type': 'array'}, 'final_result': {'type': 'object'}, }, } @@ -382,14 +382,14 @@ async def my_ret(x: int) -> str: ) ), 'final_result': '{"my_ret":"1"}', - 'logfire.agent.metadata': '{"env": "test"}', + 'agent.metadata': '{"env": "test"}', 'logfire.json_schema': IsJson( snapshot( { 'type': 'object', 'properties': { 'all_messages_events': {'type': 'array'}, - 'logfire.agent.metadata': {'type': 'array'}, + 'agent.metadata': {'type': 'array'}, 'final_result': {'type': 'object'}, }, } @@ -595,7 +595,7 @@ def test_logfire_metadata_values( agent.run_sync('Hello') summary = get_logfire_summary() - assert summary.attributes[0]['logfire.agent.metadata'] == expected + assert summary.attributes[0]['agent.metadata'] == expected @pytest.mark.skipif(not logfire_installed, reason='logfire not installed') @@ -605,7 +605,7 @@ def test_logfire_metadata_override(get_logfire_summary: Callable[[], LogfireSumm agent.run_sync('Hello') summary = get_logfire_summary() - assert summary.attributes[0]['logfire.agent.metadata'] == '{"env": "override"}' + assert summary.attributes[0]['agent.metadata'] == '{"env": "override"}' @pytest.mark.skipif(not logfire_installed, reason='logfire not installed') From 44553cadb884a4ddea1b2c08710dd216d473badc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sa=C5=A1o=20Stanovnik?= Date: Thu, 27 Nov 2025 11:42:05 +0100 Subject: [PATCH 6/9] Add metadata to run contexts and results. --- docs/agents.md | 1 + docs/logfire.md | 3 ++ pydantic_ai_slim/pydantic_ai/_agent_graph.py | 3 ++ pydantic_ai_slim/pydantic_ai/_run_context.py | 2 ++ .../pydantic_ai/agent/__init__.py | 12 ++++++-- pydantic_ai_slim/pydantic_ai/result.py | 25 ++++++++++++++++- pydantic_ai_slim/pydantic_ai/run.py | 10 +++++++ tests/test_agent.py | 23 +++++++++++++++ tests/test_streaming.py | 28 +++++++++++++++++++ 9 files changed, 103 insertions(+), 4 deletions(-) diff --git a/docs/agents.md b/docs/agents.md index 0633fb88ba..3da6ac475b 100644 --- a/docs/agents.md +++ b/docs/agents.md @@ -414,6 +414,7 @@ _(This example is complete, it can be run "as is" — you'll need to add `asynci You can retrieve usage statistics (tokens, requests, etc.) at any time from the [`AgentRun`][pydantic_ai.agent.AgentRun] object via `agent_run.usage()`. This method returns a [`RunUsage`][pydantic_ai.usage.RunUsage] object containing the usage data. Once the run finishes, `agent_run.result` becomes a [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] object containing the final output (and related metadata). +You can inspect [`agent_run.metadata`][pydantic_ai.agent.AgentRun] or [`agent_run.result.metadata`][pydantic_ai.agent.AgentRunResult] after the run completes to read any metadata configured on the agent. #### Streaming All Events and Output diff --git a/docs/logfire.md b/docs/logfire.md index bfbb9656f7..3bc5be6660 100644 --- a/docs/logfire.md +++ b/docs/logfire.md @@ -353,4 +353,7 @@ agent = Agent( ) ``` +Resolved metadata is available on [`RunContext.metadata`][pydantic_ai.tools.RunContext], +[`AgentRun.metadata`][pydantic_ai.agent.AgentRun], [`AgentRunResult.metadata`][pydantic_ai.agent.AgentRunResult], +and [`StreamedRunResult.metadata`][pydantic_ai.result.StreamedRunResult], so you can persist it alongside outputs. When instrumentation is enabled, the resolved metadata is recorded on the agent span under the `agent.metadata` attribute. diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 6a14f8b350..29892c5d90 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -94,6 +94,7 @@ class GraphAgentState: retries: int = 0 run_step: int = 0 run_id: str = dataclasses.field(default_factory=lambda: str(uuid.uuid4())) + metadata: dict[str, Any] | None = None def increment_retries( self, @@ -450,6 +451,7 @@ async def stream( _run_ctx=build_run_context(ctx), _usage_limits=ctx.deps.usage_limits, _tool_manager=ctx.deps.tool_manager, + _metadata_getter=lambda: ctx.state.metadata, ) yield agent_stream # In case the user didn't manually consume the full stream, ensure it is fully consumed here, @@ -806,6 +808,7 @@ def build_run_context(ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT else DEFAULT_INSTRUMENTATION_VERSION, run_step=ctx.state.run_step, run_id=ctx.state.run_id, + metadata=ctx.state.metadata, ) validation_context = build_validation_context(ctx.deps.validation_context, run_context) run_context = replace(run_context, validation_context=validation_context) diff --git a/pydantic_ai_slim/pydantic_ai/_run_context.py b/pydantic_ai_slim/pydantic_ai/_run_context.py index b605bd8b54..85b19d2462 100644 --- a/pydantic_ai_slim/pydantic_ai/_run_context.py +++ b/pydantic_ai_slim/pydantic_ai/_run_context.py @@ -64,6 +64,8 @@ class RunContext(Generic[RunContextAgentDepsT]): """Whether the output passed to an output validator is partial.""" run_id: str | None = None """"Unique identifier for the agent run.""" + metadata: dict[str, Any] | None = None + """Metadata associated with this agent run, if configured.""" @property def last_attempt(self) -> bool: diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 03c16483d6..ca0d455676 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -288,10 +288,13 @@ def __init__( [`Agent.instrument_all()`][pydantic_ai.Agent.instrument_all] will be used, which defaults to False. See the [Debugging and Monitoring guide](https://ai.pydantic.dev/logfire/) for more info. - metadata: Optional metadata to attach to telemetry for this agent. + metadata: Optional metadata to store with each run. Provide a dictionary of primitives, or a callable returning one computed from the [`RunContext`][pydantic_ai.tools.RunContext] on each run. - Metadata is only recorded when instrumentation is enabled. + Resolved metadata is exposed on [`RunContext.metadata`][pydantic_ai.tools.RunContext], + [`AgentRun.metadata`][pydantic_ai.agent.AgentRun], and + [`AgentRunResult.metadata`][pydantic_ai.agent.AgentRunResult], + and is attached to telemetry when instrumentation is enabled. history_processors: Optional list of callables to process the message history before sending it to the model. Each processor takes a list of messages and returns a modified list of messages. Processors can be sync or async and are applied in sequence. @@ -681,8 +684,11 @@ async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None: agent_run = AgentRun(graph_run) yield agent_run final_result = agent_run.result + run_context = build_run_context(agent_run.ctx) + run_metadata = self._compute_agent_metadata(run_context) + run_context.metadata = run_metadata + graph_run.state.metadata = run_metadata if instrumentation_settings and run_span.is_recording(): - run_metadata = self._compute_agent_metadata(build_run_context(agent_run.ctx)) if instrumentation_settings.include_content and final_result is not None: run_span.set_attribute( 'final_result', diff --git a/pydantic_ai_slim/pydantic_ai/result.py b/pydantic_ai_slim/pydantic_ai/result.py index 88bfe407fa..a046ce6854 100644 --- a/pydantic_ai_slim/pydantic_ai/result.py +++ b/pydantic_ai_slim/pydantic_ai/result.py @@ -4,7 +4,7 @@ from copy import deepcopy from dataclasses import dataclass, field, replace from datetime import datetime -from typing import TYPE_CHECKING, Generic, cast, overload +from typing import TYPE_CHECKING, Any, Generic, cast, overload from pydantic import ValidationError from typing_extensions import TypeVar, deprecated @@ -52,6 +52,7 @@ class AgentStream(Generic[AgentDepsT, OutputDataT]): _run_ctx: RunContext[AgentDepsT] _usage_limits: UsageLimits | None _tool_manager: ToolManager[AgentDepsT] + _metadata_getter: Callable[[], dict[str, Any] | None] | None = field(default=None, repr=False) _agent_stream_iterator: AsyncIterator[ModelResponseStreamEvent] | None = field(default=None, init=False) _initial_run_ctx_usage: RunUsage = field(init=False) @@ -126,6 +127,13 @@ def run_id(self) -> str: assert self._run_ctx.run_id is not None return self._run_ctx.run_id + @property + def metadata(self) -> dict[str, Any] | None: + """Metadata associated with this agent run, if configured.""" + if self._metadata_getter is not None: + return self._metadata_getter() + return self._run_ctx.metadata + # TODO (v2): Drop in favor of `response` property def get(self) -> _messages.ModelResponse: """Get the current state of the response.""" @@ -518,6 +526,16 @@ def response(self) -> _messages.ModelResponse: else: raise ValueError('No stream response or run result provided') # pragma: no cover + @property + def metadata(self) -> dict[str, Any] | None: + """Metadata associated with this agent run, if configured.""" + if self._run_result is not None: + return self._run_result.metadata + elif self._stream_response is not None: + return self._stream_response.metadata + else: + return None + # TODO (v2): Make this a property def usage(self) -> RunUsage: """Return the usage of the whole run. @@ -717,6 +735,11 @@ def run_id(self) -> str: """The unique identifier for the agent run.""" return self._streamed_run_result.run_id + @property + def metadata(self) -> dict[str, Any] | None: + """Metadata associated with this agent run, if configured.""" + return self._streamed_run_result.metadata + def validate_response_output(self, message: _messages.ModelResponse, *, allow_partial: bool = False) -> OutputDataT: """Validate a structured result message.""" return _utils.get_event_loop().run_until_complete( diff --git a/pydantic_ai_slim/pydantic_ai/run.py b/pydantic_ai_slim/pydantic_ai/run.py index 0ed3e2455d..4ca5046d2a 100644 --- a/pydantic_ai_slim/pydantic_ai/run.py +++ b/pydantic_ai_slim/pydantic_ai/run.py @@ -283,6 +283,11 @@ def usage(self) -> _usage.RunUsage: """Get usage statistics for the run so far, including token usage, model requests, and so on.""" return self._graph_run.state.usage + @property + def metadata(self) -> dict[str, Any] | None: + """Metadata associated with this agent run, if configured.""" + return self._graph_run.state.metadata + @property def run_id(self) -> str: """The unique identifier for the agent run.""" @@ -422,6 +427,11 @@ def timestamp(self) -> datetime: """Return the timestamp of last response.""" return self.response.timestamp + @property + def metadata(self) -> dict[str, Any] | None: + """Metadata associated with this agent run, if configured.""" + return self._state.metadata + @property def run_id(self) -> str: """The unique identifier for the agent run.""" diff --git a/tests/test_agent.py b/tests/test_agent.py index c912334434..627cd4d906 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -2668,6 +2668,29 @@ def test_agent_message_history_includes_run_id() -> None: assert len({*run_ids}) == snapshot(1) +async def test_agent_run_result_metadata_available() -> None: + agent = Agent( + TestModel(custom_output_text='metadata output'), + metadata=lambda ctx: {'prompt': ctx.prompt}, + ) + + result = await agent.run('metadata prompt') + assert result.output == 'metadata output' + assert result.metadata == {'prompt': 'metadata prompt'} + + +async def test_agent_iter_metadata_surfaces_on_result() -> None: + agent = Agent(TestModel(custom_output_text='iter metadata output'), metadata={'env': 'tests'}) + + async with agent.iter('iter metadata prompt') as agent_run: + async for _ in agent_run: + pass + + assert agent_run.metadata == {'env': 'tests'} + assert agent_run.result is not None + assert agent_run.result.metadata == {'env': 'tests'} + + def test_unknown_tool(): def empty(_: list[ModelMessage], _info: AgentInfo) -> ModelResponse: return ModelResponse(parts=[ToolCallPart('foobar', '{}')]) diff --git a/tests/test_streaming.py b/tests/test_streaming.py index 0c6a46f3c0..b76d3ad43b 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -1414,6 +1414,34 @@ def output_validator_simple(data: str) -> str: ) +async def test_streamed_run_result_metadata_available() -> None: + agent = Agent(TestModel(custom_output_text='stream metadata'), metadata={'env': 'stream'}) + + async with agent.run_stream('stream metadata prompt') as result: + assert await result.get_output() == 'stream metadata' + + assert result.metadata == {'env': 'stream'} + + +async def test_agent_stream_metadata_available() -> None: + agent = Agent( + TestModel(custom_output_text='agent stream metadata'), + metadata=lambda ctx: {'prompt': ctx.prompt}, + ) + + captured_stream: AgentStream | None = None + async with agent.iter('agent stream prompt') as run: + async for node in run: + if agent.is_model_request_node(node): + async with node.stream(run.ctx) as stream: + captured_stream = stream + async for _ in stream.stream_text(debounce_by=None): + pass + + assert captured_stream is not None + assert captured_stream.metadata == {'prompt': 'agent stream prompt'} + + async def test_iter_stream_responses(): m = TestModel(custom_output_text='The cat sat on the mat.') From 6db0f1f20a1794786dec1452298cb80e20540d49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sa=C5=A1o=20Stanovnik?= Date: Thu, 27 Nov 2025 12:37:59 +0100 Subject: [PATCH 7/9] Compute run metadata even on failure. --- .../pydantic_ai/agent/__init__.py | 20 +++-- tests/test_agent.py | 42 ++++++++++ tests/test_streaming.py | 76 ++++++++++++++++++- 3 files changed, 131 insertions(+), 7 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index ca0d455676..ec3134c15d 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -291,6 +291,8 @@ def __init__( metadata: Optional metadata to store with each run. Provide a dictionary of primitives, or a callable returning one computed from the [`RunContext`][pydantic_ai.tools.RunContext] on each run. + Callables are invoked after the run finishes (whether it succeeded or raised) so they can + inspect the final run state. Resolved metadata is exposed on [`RunContext.metadata`][pydantic_ai.tools.RunContext], [`AgentRun.metadata`][pydantic_ai.agent.AgentRun], and [`AgentRunResult.metadata`][pydantic_ai.agent.AgentRunResult], @@ -682,12 +684,20 @@ async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None: ) as graph_run: async with toolset: agent_run = AgentRun(graph_run) - yield agent_run + + def resolve_run_metadata() -> None: + nonlocal run_metadata + run_context = build_run_context(agent_run.ctx) + run_metadata = self._compute_agent_metadata(run_context) + run_context.metadata = run_metadata + graph_run.state.metadata = run_metadata + + try: + yield agent_run + finally: + resolve_run_metadata() + final_result = agent_run.result - run_context = build_run_context(agent_run.ctx) - run_metadata = self._compute_agent_metadata(run_context) - run_context.metadata = run_metadata - graph_run.state.metadata = run_metadata if instrumentation_settings and run_span.is_recording(): if instrumentation_settings.include_content and final_result is not None: run_span.set_attribute( diff --git a/tests/test_agent.py b/tests/test_agent.py index 627cd4d906..f41122a7b0 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -2691,6 +2691,48 @@ async def test_agent_iter_metadata_surfaces_on_result() -> None: assert agent_run.result.metadata == {'env': 'tests'} +async def test_agent_metadata_persisted_when_run_fails() -> None: + agent = Agent( + TestModel(), + metadata=lambda ctx: {'prompt': ctx.prompt}, + ) + + @agent.tool + def explode(_: RunContext) -> str: + raise RuntimeError('explode') + + failing_prompt = 'metadata failure prompt' + captured_run = None + + with pytest.raises(RuntimeError, match='explode'): + async with agent.iter(failing_prompt) as agent_run: + captured_run = agent_run + async for _ in agent_run: + pass + + assert captured_run is not None + assert captured_run.metadata == {'prompt': failing_prompt} + assert captured_run.result is None + + +async def test_agent_metadata_override_with_dict() -> None: + agent = Agent(TestModel(custom_output_text='override dict base'), metadata={'env': 'base'}) + + with agent.override(metadata={'env': 'override'}): + result = await agent.run('override dict prompt') + + assert result.metadata == {'env': 'override'} + + +async def test_agent_metadata_override_with_callable() -> None: + agent = Agent(TestModel(custom_output_text='override callable base'), metadata={'env': 'base'}) + + with agent.override(metadata=lambda ctx: {'computed': ctx.prompt}): + result = await agent.run('callable override prompt') + + assert result.metadata == {'computed': 'callable override prompt'} + + def test_unknown_tool(): def empty(_: list[ModelMessage], _info: AgentInfo) -> ModelResponse: return ModelResponse(parts=[ToolCallPart('foobar', '{}')]) diff --git a/tests/test_streaming.py b/tests/test_streaming.py index b76d3ad43b..ca302a477d 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -8,6 +8,7 @@ from dataclasses import replace from datetime import timezone from typing import Any +from unittest.mock import MagicMock import pytest from inline_snapshot import snapshot @@ -39,13 +40,17 @@ UserError, UserPromptPart, capture_run_messages, + models, ) +from pydantic_ai._agent_graph import GraphAgentState +from pydantic_ai._output import TextOutputProcessor, TextOutputSchema +from pydantic_ai._tool_manager import ToolManager from pydantic_ai.agent import AgentRun from pydantic_ai.exceptions import ApprovalRequired, CallDeferred from pydantic_ai.models.function import AgentInfo, DeltaToolCall, DeltaToolCalls, FunctionModel -from pydantic_ai.models.test import TestModel +from pydantic_ai.models.test import TestModel, TestStreamedResponse as ModelTestStreamedResponse from pydantic_ai.output import PromptedOutput, TextOutput -from pydantic_ai.result import AgentStream, FinalResult, RunUsage +from pydantic_ai.result import AgentStream, FinalResult, RunUsage, StreamedRunResult, StreamedRunResultSync from pydantic_ai.tools import DeferredToolRequests, DeferredToolResults, ToolApproved, ToolDefinition from pydantic_ai.usage import RequestUsage from pydantic_graph import End @@ -1442,6 +1447,73 @@ async def test_agent_stream_metadata_available() -> None: assert captured_stream.metadata == {'prompt': 'agent stream prompt'} +def test_agent_stream_metadata_falls_back_to_run_context() -> None: + response_message = ModelResponse(parts=[TextPart('fallback metadata')], model_name='test') + stream_response = ModelTestStreamedResponse( + model_request_parameters=models.ModelRequestParameters(), + _model_name='test', + _structured_response=response_message, + _messages=[], + _provider_name='test', + ) + run_ctx = RunContext( + deps=None, + model=TestModel(), + usage=RunUsage(), + metadata={'source': 'run-context'}, + ) + output_schema = TextOutputSchema[str]( + text_processor=TextOutputProcessor(), + allows_deferred_tools=False, + allows_image=False, + ) + stream = AgentStream( + _raw_stream_response=stream_response, + _output_schema=output_schema, + _model_request_parameters=models.ModelRequestParameters(), + _output_validators=[], + _run_ctx=run_ctx, + _usage_limits=None, + _tool_manager=ToolManager(toolset=MagicMock()), + ) + + assert stream.metadata == {'source': 'run-context'} + + +def _make_run_result(*, metadata: dict[str, Any] | None) -> AgentRunResult[str]: + state = GraphAgentState(metadata=metadata) + response_message = ModelResponse(parts=[TextPart('final')], model_name='test') + state.message_history.append(response_message) + return AgentRunResult('final', _state=state) + + +def test_streamed_run_result_metadata_prefers_run_result_state() -> None: + run_result = _make_run_result(metadata={'from': 'run-result'}) + streamed = StreamedRunResult( + all_messages=run_result.all_messages(), + new_message_index=0, + run_result=run_result, + ) + assert streamed.metadata == {'from': 'run-result'} + + +def test_streamed_run_result_metadata_none_without_sources() -> None: + run_result = _make_run_result(metadata=None) + streamed = StreamedRunResult(all_messages=[], new_message_index=0, run_result=run_result) + assert streamed.metadata is None + + +def test_streamed_run_result_sync_exposes_metadata() -> None: + run_result = _make_run_result(metadata={'sync': 'metadata'}) + streamed = StreamedRunResult( + all_messages=run_result.all_messages(), + new_message_index=0, + run_result=run_result, + ) + sync_result = StreamedRunResultSync(streamed) + assert sync_result.metadata == {'sync': 'metadata'} + + async def test_iter_stream_responses(): m = TestModel(custom_output_text='The cat sat on the mat.') From d05887cc46ea10c1291d08232d4361eae944637e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sa=C5=A1o=20Stanovnik?= Date: Thu, 27 Nov 2025 14:20:26 +0100 Subject: [PATCH 8/9] Add metadata to run and iter. --- .../pydantic_ai/agent/__init__.py | 42 +++++++++++++---- .../pydantic_ai/agent/abstract.py | 39 ++++++++++++++++ pydantic_ai_slim/pydantic_ai/agent/wrapper.py | 7 ++- .../pydantic_ai/durable_exec/dbos/_agent.py | 45 ++++++++++++++++++- .../durable_exec/prefect/_agent.py | 42 ++++++++++++++++- .../durable_exec/temporal/_agent.py | 42 ++++++++++++++++- tests/test_agent.py | 36 +++++++++++++++ tests/test_streaming.py | 5 +++ 8 files changed, 245 insertions(+), 13 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index ec3134c15d..9d120fabf8 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -64,7 +64,7 @@ from ..toolsets.combined import CombinedToolset from ..toolsets.function import FunctionToolset from ..toolsets.prepared import PreparedToolset -from .abstract import AbstractAgent, EventStreamHandler, Instructions, RunOutputDataT +from .abstract import AbstractAgent, AgentMetadataValue, EventStreamHandler, Instructions, RunOutputDataT from .wrapper import WrapperAgent if TYPE_CHECKING: @@ -90,8 +90,6 @@ S = TypeVar('S') NoneType = type(None) -AgentMetadataValue = dict[str, Any] | Callable[[RunContext[AgentDepsT]], dict[str, Any]] - @dataclasses.dataclass(init=False) class Agent(AbstractAgent[AgentDepsT, OutputDataT]): @@ -445,6 +443,7 @@ def iter( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -464,6 +463,7 @@ def iter( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -483,6 +483,7 @@ async def iter( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -559,6 +560,10 @@ async def main(): model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + metadata: Optional metadata to attach to this run. Accepts a dictionary or a callable taking + [`RunContext`][pydantic_ai.tools.RunContext]. The resolved dictionary is shallow merged into the + agent's metadata (or any [`Agent.override`][pydantic_ai.agent.Agent.override]) with run-level keys + overwriting duplicates. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. builtin_tools: Optional additional builtin tools for this run. @@ -688,7 +693,7 @@ async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None: def resolve_run_metadata() -> None: nonlocal run_metadata run_context = build_run_context(agent_run.ctx) - run_metadata = self._compute_agent_metadata(run_context) + run_metadata = self._compute_agent_metadata(run_context, metadata) run_context.metadata = run_metadata graph_run.state.metadata = run_metadata @@ -723,13 +728,32 @@ def resolve_run_metadata() -> None: finally: run_span.end() - def _compute_agent_metadata(self, ctx: RunContext[AgentDepsT]) -> dict[str, Any] | None: + def _compute_agent_metadata( + self, + ctx: RunContext[AgentDepsT], + run_metadata_config: AgentMetadataValue[AgentDepsT] | None = None, + ) -> dict[str, Any] | None: metadata_override = self._override_metadata.get() - metadata_config = metadata_override.value if metadata_override is not None else self._metadata - if metadata_config is None: - return None + if metadata_override is not None: + base_config = metadata_override.value + else: + base_config = self._metadata + + base_metadata = self._resolve_metadata_config(base_config, ctx) + run_metadata = self._resolve_metadata_config(run_metadata_config, ctx) + + if base_metadata and run_metadata: + return {**base_metadata, **run_metadata} + return run_metadata or base_metadata - metadata = metadata_config(ctx) if callable(metadata_config) else metadata_config + def _resolve_metadata_config( + self, + config: AgentMetadataValue[AgentDepsT] | None, + ctx: RunContext[AgentDepsT], + ) -> dict[str, Any] | None: + if config is None: + return None + metadata = config(ctx) if callable(config) else config return metadata def _run_span_end_attributes( diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index 567b61dff6..b5031ce273 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -70,6 +70,8 @@ | None ) +AgentMetadataValue = dict[str, Any] | Callable[[RunContext[AgentDepsT]], dict[str, Any]] + class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC): """Abstract superclass for [`Agent`][pydantic_ai.agent.Agent], [`WrapperAgent`][pydantic_ai.agent.WrapperAgent], and your own custom agent implementations.""" @@ -136,6 +138,7 @@ async def run( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -156,6 +159,7 @@ async def run( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -175,6 +179,7 @@ async def run( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -209,6 +214,10 @@ async def main(): model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + metadata: Optional metadata to attach to this run. Accepts a dictionary or a + callable taking [`RunContext`][pydantic_ai.tools.RunContext]. + The resolved dictionary is shallow merged into the metadata configured on the agent (or + via [`Agent.override`][pydantic_ai.agent.Agent.override]), with per-run keys replacing duplicates. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. event_stream_handler: Optional handler for events from the model's streaming response and the agent's execution of tools to use for this run. @@ -233,6 +242,7 @@ async def main(): model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, toolsets=toolsets, builtin_tools=builtin_tools, ) as agent_run: @@ -260,6 +270,7 @@ def run_sync( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -280,6 +291,7 @@ def run_sync( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -299,6 +311,7 @@ def run_sync( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -332,6 +345,7 @@ def run_sync( model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + metadata: Optional metadata to attach to this run, merged with the agent's configured metadata. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. event_stream_handler: Optional handler for events from the model's streaming response and the agent's execution of tools to use for this run. @@ -355,6 +369,7 @@ def run_sync( model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, infer_name=False, toolsets=toolsets, builtin_tools=builtin_tools, @@ -376,6 +391,7 @@ def run_stream( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -396,6 +412,7 @@ def run_stream( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -416,6 +433,7 @@ async def run_stream( # noqa: C901 model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -458,6 +476,7 @@ async def main(): usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. + metadata: Optional metadata to attach to this run, merged with the agent's configured metadata. toolsets: Optional additional toolsets for this run. builtin_tools: Optional additional builtin tools for this run. event_stream_handler: Optional handler for events from the model's streaming response and the agent's execution of tools to use for this run. @@ -486,6 +505,7 @@ async def main(): model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, infer_name=False, toolsets=toolsets, builtin_tools=builtin_tools, @@ -608,6 +628,7 @@ def run_stream_sync( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -627,6 +648,7 @@ def run_stream_sync( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -645,6 +667,7 @@ def run_stream_sync( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -689,6 +712,7 @@ def main(): usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. + metadata: Optional metadata to attach to this run, merged with the agent's configured metadata. toolsets: Optional additional toolsets for this run. builtin_tools: Optional additional builtin tools for this run. event_stream_handler: Optional handler for events from the model's streaming response and the agent's execution of tools to use for this run. @@ -712,6 +736,7 @@ async def _consume_stream(): model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, infer_name=infer_name, toolsets=toolsets, builtin_tools=builtin_tools, @@ -736,6 +761,7 @@ def run_stream_events( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -755,6 +781,7 @@ def run_stream_events( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -773,6 +800,7 @@ def run_stream_events( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -823,6 +851,7 @@ async def main(): model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + metadata: Optional metadata to attach to this run, merged with the agent's configured metadata. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. builtin_tools: Optional additional builtin tools for this run. @@ -848,6 +877,7 @@ async def main(): model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, toolsets=toolsets, builtin_tools=builtin_tools, ) @@ -865,6 +895,7 @@ async def _run_stream_events( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, ) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[Any]]: @@ -891,6 +922,7 @@ async def run_agent() -> AgentRunResult[Any]: model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, infer_name=False, toolsets=toolsets, builtin_tools=builtin_tools, @@ -920,6 +952,7 @@ def iter( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -939,6 +972,7 @@ def iter( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -959,6 +993,7 @@ async def iter( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -1035,6 +1070,10 @@ async def main(): model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + metadata: Optional metadata to attach to this run. Accepts a dictionary or a callable taking + [`RunContext`][pydantic_ai.tools.RunContext]. The resolved dictionary is shallow merged into the + agent's metadata (or any [`Agent.override`][pydantic_ai.agent.Agent.override]) with run-level keys + overwriting duplicates. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. builtin_tools: Optional additional builtin tools for this run. diff --git a/pydantic_ai_slim/pydantic_ai/agent/wrapper.py b/pydantic_ai_slim/pydantic_ai/agent/wrapper.py index 38e832fa2b..be8272ca8d 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/wrapper.py +++ b/pydantic_ai_slim/pydantic_ai/agent/wrapper.py @@ -21,7 +21,7 @@ ToolFuncEither, ) from ..toolsets import AbstractToolset -from .abstract import AbstractAgent, EventStreamHandler, Instructions, RunOutputDataT +from .abstract import AbstractAgent, AgentMetadataValue, EventStreamHandler, Instructions, RunOutputDataT class WrapperAgent(AbstractAgent[AgentDepsT, OutputDataT]): @@ -81,6 +81,7 @@ def iter( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -100,6 +101,7 @@ def iter( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -119,6 +121,7 @@ async def iter( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -195,6 +198,7 @@ async def main(): model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + metadata: Optional metadata to attach to this run. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. builtin_tools: Optional additional builtin tools for this run. @@ -213,6 +217,7 @@ async def main(): model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, infer_name=infer_name, toolsets=toolsets, builtin_tools=builtin_tools, diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py index 9e1c8ee3c0..f7519418ef 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py @@ -16,7 +16,7 @@ usage as _usage, ) from pydantic_ai.agent import AbstractAgent, AgentRun, AgentRunResult, EventStreamHandler, WrapperAgent -from pydantic_ai.agent.abstract import Instructions, RunOutputDataT +from pydantic_ai.agent.abstract import AgentMetadataValue, Instructions, RunOutputDataT from pydantic_ai.builtin_tools import AbstractBuiltinTool from pydantic_ai.exceptions import UserError from pydantic_ai.models import Model @@ -121,6 +121,7 @@ async def wrapped_run_workflow( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -139,6 +140,7 @@ async def wrapped_run_workflow( model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, infer_name=infer_name, toolsets=toolsets, builtin_tools=builtin_tools, @@ -162,6 +164,7 @@ def wrapped_run_sync_workflow( instructions: Instructions[AgentDepsT] = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -180,6 +183,7 @@ def wrapped_run_sync_workflow( model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, infer_name=infer_name, toolsets=toolsets, builtin_tools=builtin_tools, @@ -254,6 +258,7 @@ async def run( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -274,6 +279,7 @@ async def run( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -293,6 +299,7 @@ async def run( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -328,6 +335,10 @@ async def main(): model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + metadata: Optional metadata to attach to this run. Accepts a dictionary or a + callable taking [`RunContext`][pydantic_ai.tools.RunContext]. + The resolved dictionary is shallow merged into the metadata configured on the agent (or + via [`Agent.override`][pydantic_ai.agent.Agent.override]) with run-level keys replacing duplicates. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. builtin_tools: Optional additional builtin tools for this run. @@ -347,6 +358,7 @@ async def main(): model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, infer_name=infer_name, toolsets=toolsets, builtin_tools=builtin_tools, @@ -368,6 +380,7 @@ def run_sync( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -388,6 +401,7 @@ def run_sync( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -407,6 +421,7 @@ def run_sync( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -441,6 +456,10 @@ def run_sync( model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + metadata: Optional metadata to attach to this run. Accepts a dictionary or a + callable taking [`RunContext`][pydantic_ai.tools.RunContext]. + The resolved dictionary is shallow merged into the metadata configured on the agent (or + via [`Agent.override`][pydantic_ai.agent.Agent.override]) with run-level keys replacing duplicates. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. builtin_tools: Optional additional builtin tools for this run. @@ -460,6 +479,7 @@ def run_sync( model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, infer_name=infer_name, toolsets=toolsets, builtin_tools=builtin_tools, @@ -481,6 +501,7 @@ def run_stream( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -501,6 +522,7 @@ def run_stream( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -521,6 +543,7 @@ async def run_stream( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -553,6 +576,10 @@ async def main(): model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + metadata: Optional metadata to attach to this run. Accepts a dictionary or a + callable taking [`RunContext`][pydantic_ai.tools.RunContext]. + The resolved dictionary is shallow merged into the metadata configured on the agent (or + via [`Agent.override`][pydantic_ai.agent.Agent.override]) with run-level keys replacing duplicates. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. builtin_tools: Optional additional builtin tools for this run. @@ -578,6 +605,7 @@ async def main(): model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, infer_name=infer_name, toolsets=toolsets, builtin_tools=builtin_tools, @@ -600,6 +628,7 @@ def run_stream_events( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -619,6 +648,7 @@ def run_stream_events( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -637,6 +667,7 @@ def run_stream_events( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -687,6 +718,10 @@ async def main(): model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + metadata: Optional metadata to attach to this run. Accepts a dictionary or a + callable taking [`RunContext`][pydantic_ai.tools.RunContext]. + The resolved dictionary is shallow merged into the metadata configured on the agent (or + via [`Agent.override`][pydantic_ai.agent.Agent.override]) with run-level keys replacing duplicates. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. builtin_tools: Optional additional builtin tools for this run. @@ -714,6 +749,7 @@ def iter( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -734,6 +770,7 @@ def iter( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -754,6 +791,7 @@ async def iter( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -831,6 +869,10 @@ async def main(): model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + metadata: Optional metadata to attach to this run. Accepts a dictionary or a + callable taking [`RunContext`][pydantic_ai.tools.RunContext]. + The resolved dictionary is shallow merged into the metadata configured on the agent (or + via [`Agent.override`][pydantic_ai.agent.Agent.override]) with run-level keys replacing duplicates. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. builtin_tools: Optional additional builtin tools for this run. @@ -855,6 +897,7 @@ async def main(): model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, infer_name=infer_name, toolsets=toolsets, builtin_tools=builtin_tools, diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py index 8b1b6af44a..06f60eda83 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py @@ -19,7 +19,7 @@ usage as _usage, ) from pydantic_ai.agent import AbstractAgent, AgentRun, AgentRunResult, EventStreamHandler, WrapperAgent -from pydantic_ai.agent.abstract import Instructions, RunOutputDataT +from pydantic_ai.agent.abstract import AgentMetadataValue, Instructions, RunOutputDataT from pydantic_ai.builtin_tools import AbstractBuiltinTool from pydantic_ai.exceptions import UserError from pydantic_ai.models import Model @@ -185,6 +185,7 @@ async def run( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -205,6 +206,7 @@ async def run( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -224,6 +226,7 @@ async def run( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -259,6 +262,10 @@ async def main(): model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + metadata: Optional metadata to attach to this run. Accepts a dictionary or a + callable taking [`RunContext`][pydantic_ai.tools.RunContext]. + The resolved dictionary is shallow merged into the metadata configured on the agent (or + via [`Agent.override`][pydantic_ai.agent.Agent.override]) with run-level keys replacing duplicates. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. event_stream_handler: Optional event stream handler to use for this run. @@ -285,6 +292,7 @@ async def wrapped_run_flow() -> AgentRunResult[Any]: model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, infer_name=infer_name, toolsets=toolsets, event_stream_handler=event_stream_handler, @@ -309,6 +317,7 @@ def run_sync( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -329,6 +338,7 @@ def run_sync( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -348,6 +358,7 @@ def run_sync( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -382,6 +393,10 @@ def run_sync( model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + metadata: Optional metadata to attach to this run. Accepts a dictionary or a + callable taking [`RunContext`][pydantic_ai.tools.RunContext]. + The resolved dictionary is shallow merged into the metadata configured on the agent (or + via [`Agent.override`][pydantic_ai.agent.Agent.override]) with run-level keys replacing duplicates. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. event_stream_handler: Optional event stream handler to use for this run. @@ -410,6 +425,7 @@ def wrapped_run_sync_flow() -> AgentRunResult[Any]: model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, infer_name=infer_name, toolsets=toolsets, event_stream_handler=event_stream_handler, @@ -435,6 +451,7 @@ def run_stream( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -455,6 +472,7 @@ def run_stream( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -475,6 +493,7 @@ async def run_stream( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -507,6 +526,10 @@ async def main(): model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + metadata: Optional metadata to attach to this run. Accepts a dictionary or a + callable taking [`RunContext`][pydantic_ai.tools.RunContext]. + The resolved dictionary is shallow merged into the metadata configured on the agent (or + via [`Agent.override`][pydantic_ai.agent.Agent.override]) with run-level keys replacing duplicates. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. builtin_tools: Optional additional builtin tools for this run. @@ -532,6 +555,7 @@ async def main(): model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, infer_name=infer_name, toolsets=toolsets, event_stream_handler=event_stream_handler, @@ -554,6 +578,7 @@ def run_stream_events( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -573,6 +598,7 @@ def run_stream_events( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -591,6 +617,7 @@ def run_stream_events( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -641,6 +668,10 @@ async def main(): model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + metadata: Optional metadata to attach to this run. Accepts a dictionary or a + callable taking [`RunContext`][pydantic_ai.tools.RunContext]. + The resolved dictionary is shallow merged into the metadata configured on the agent (or + via [`Agent.override`][pydantic_ai.agent.Agent.override]) with run-level keys replacing duplicates. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. builtin_tools: Optional additional builtin tools for this run. @@ -666,6 +697,7 @@ async def main(): model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, infer_name=infer_name, toolsets=toolsets, builtin_tools=builtin_tools, @@ -685,6 +717,7 @@ def iter( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -704,6 +737,7 @@ def iter( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -723,6 +757,7 @@ async def iter( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -799,6 +834,10 @@ async def main(): model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + metadata: Optional metadata to attach to this run. Accepts a dictionary or a + callable taking [`RunContext`][pydantic_ai.tools.RunContext]. + The resolved dictionary is shallow merged into the metadata configured on the agent (or + via [`Agent.override`][pydantic_ai.agent.Agent.override]) with run-level keys replacing duplicates. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. builtin_tools: Optional additional builtin tools for this run. @@ -823,6 +862,7 @@ async def main(): model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, infer_name=infer_name, toolsets=toolsets, ) as run: diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py index 6e964c8d08..92597388e8 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py @@ -24,7 +24,7 @@ usage as _usage, ) from pydantic_ai.agent import AbstractAgent, AgentRun, AgentRunResult, EventStreamHandler, WrapperAgent -from pydantic_ai.agent.abstract import Instructions, RunOutputDataT +from pydantic_ai.agent.abstract import AgentMetadataValue, Instructions, RunOutputDataT from pydantic_ai.builtin_tools import AbstractBuiltinTool from pydantic_ai.exceptions import UserError from pydantic_ai.models import Model @@ -268,6 +268,7 @@ async def run( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -288,6 +289,7 @@ async def run( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -307,6 +309,7 @@ async def run( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -342,6 +345,10 @@ async def main(): model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + metadata: Optional metadata to attach to this run. Accepts a dictionary or a + callable taking [`RunContext`][pydantic_ai.tools.RunContext]. + The resolved dictionary is shallow merged into the metadata configured on the agent (or + via [`Agent.override`][pydantic_ai.agent.Agent.override]) with run-level keys replacing duplicates. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. event_stream_handler: Optional event stream handler to use for this run. @@ -367,6 +374,7 @@ async def main(): model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, infer_name=infer_name, toolsets=toolsets, builtin_tools=builtin_tools, @@ -388,6 +396,7 @@ def run_sync( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -408,6 +417,7 @@ def run_sync( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -427,6 +437,7 @@ def run_sync( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -461,6 +472,10 @@ def run_sync( model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + metadata: Optional metadata to attach to this run. Accepts a dictionary or a + callable taking [`RunContext`][pydantic_ai.tools.RunContext]. + The resolved dictionary is shallow merged into the metadata configured on the agent (or + via [`Agent.override`][pydantic_ai.agent.Agent.override]) with run-level keys replacing duplicates. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. event_stream_handler: Optional event stream handler to use for this run. @@ -485,6 +500,7 @@ def run_sync( model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, infer_name=infer_name, toolsets=toolsets, builtin_tools=builtin_tools, @@ -506,6 +522,7 @@ def run_stream( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -526,6 +543,7 @@ def run_stream( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -546,6 +564,7 @@ async def run_stream( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -578,6 +597,10 @@ async def main(): model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + metadata: Optional metadata to attach to this run. Accepts a dictionary or a + callable taking [`RunContext`][pydantic_ai.tools.RunContext]. + The resolved dictionary is shallow merged into the metadata configured on the agent (or + via [`Agent.override`][pydantic_ai.agent.Agent.override]) with run-level keys replacing duplicates. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. builtin_tools: Optional additional builtin tools for this run. @@ -603,6 +626,7 @@ async def main(): model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, infer_name=infer_name, toolsets=toolsets, event_stream_handler=event_stream_handler, @@ -625,6 +649,7 @@ def run_stream_events( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -644,6 +669,7 @@ def run_stream_events( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -662,6 +688,7 @@ def run_stream_events( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -712,6 +739,10 @@ async def main(): model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + metadata: Optional metadata to attach to this run. Accepts a dictionary or a + callable taking [`RunContext`][pydantic_ai.tools.RunContext]. + The resolved dictionary is shallow merged into the metadata configured on the agent (or + via [`Agent.override`][pydantic_ai.agent.Agent.override]) with run-level keys replacing duplicates. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. builtin_tools: Optional additional builtin tools for this run. @@ -737,6 +768,7 @@ async def main(): model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, infer_name=infer_name, toolsets=toolsets, builtin_tools=builtin_tools, @@ -756,6 +788,7 @@ def iter( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, @@ -776,6 +809,7 @@ def iter( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -796,6 +830,7 @@ async def iter( model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, + metadata: AgentMetadataValue[AgentDepsT] | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, @@ -873,6 +908,10 @@ async def main(): model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + metadata: Optional metadata to attach to this run. Accepts a dictionary or a + callable taking [`RunContext`][pydantic_ai.tools.RunContext]. + The resolved dictionary is shallow merged into the metadata configured on the agent (or + via [`Agent.override`][pydantic_ai.agent.Agent.override]) with run-level keys replacing duplicates. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. builtin_tools: Optional additional builtin tools for this run. @@ -907,6 +946,7 @@ async def main(): model_settings=model_settings, usage_limits=usage_limits, usage=usage, + metadata=metadata, infer_name=infer_name, toolsets=toolsets, builtin_tools=builtin_tools, diff --git a/tests/test_agent.py b/tests/test_agent.py index f41122a7b0..23235365a6 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -2733,6 +2733,42 @@ async def test_agent_metadata_override_with_callable() -> None: assert result.metadata == {'computed': 'callable override prompt'} +async def test_agent_run_metadata_kwarg_dict() -> None: + agent = Agent(TestModel(custom_output_text='kwarg dict output')) + + result = await agent.run('kwarg dict prompt', metadata={'env': 'run'}) + + assert result.metadata == {'env': 'run'} + + +async def test_agent_run_metadata_kwarg_callable() -> None: + agent = Agent(TestModel(custom_output_text='kwarg callable output')) + + def run_meta(ctx: RunContext[None]) -> dict[str, Any]: + return {'prompt': ctx.prompt} + + result = await agent.run('kwarg callable prompt', metadata=run_meta) + + assert result.metadata == {'prompt': 'kwarg callable prompt'} + + +async def test_agent_run_metadata_kwarg_merges_agent_metadata() -> None: + agent = Agent(TestModel(custom_output_text='kwarg merge output'), metadata={'env': 'base', 'shared': 'agent'}) + + result = await agent.run('kwarg merge prompt', metadata={'run': 'value', 'shared': 'run'}) + + assert result.metadata == {'env': 'base', 'run': 'value', 'shared': 'run'} + + +async def test_agent_run_metadata_kwarg_merges_override() -> None: + agent = Agent(TestModel(custom_output_text='kwarg override output'), metadata={'env': 'base'}) + + with agent.override(metadata={'env': 'override', 'override_only': True}): + result = await agent.run('kwarg override prompt', metadata={'run_only': True}) + + assert result.metadata == {'env': 'override', 'override_only': True, 'run_only': True} + + def test_unknown_tool(): def empty(_: list[ModelMessage], _info: AgentInfo) -> ModelResponse: return ModelResponse(parts=[ToolCallPart('foobar', '{}')]) diff --git a/tests/test_streaming.py b/tests/test_streaming.py index ca302a477d..62432a58b1 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -1503,6 +1503,11 @@ def test_streamed_run_result_metadata_none_without_sources() -> None: assert streamed.metadata is None +def test_streamed_run_result_metadata_none_without_run_or_stream() -> None: + streamed = StreamedRunResult(all_messages=[], new_message_index=0, stream_response=None, on_complete=None) + assert streamed.metadata is None + + def test_streamed_run_result_sync_exposes_metadata() -> None: run_result = _make_run_result(metadata={'sync': 'metadata'}) streamed = StreamedRunResult( From 0c5b782ceca07c0adf58fe759c70133c6e8a6679 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sa=C5=A1o=20Stanovnik?= Date: Thu, 27 Nov 2025 15:02:06 +0100 Subject: [PATCH 9/9] Doc fixes. --- docs/logfire.md | 2 +- pydantic_ai_slim/pydantic_ai/agent/__init__.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/docs/logfire.md b/docs/logfire.md index 3bc5be6660..043b4f7df9 100644 --- a/docs/logfire.md +++ b/docs/logfire.md @@ -353,7 +353,7 @@ agent = Agent( ) ``` -Resolved metadata is available on [`RunContext.metadata`][pydantic_ai.tools.RunContext], +Resolved metadata is available after the run completes on [`AgentRun.metadata`][pydantic_ai.agent.AgentRun], [`AgentRunResult.metadata`][pydantic_ai.agent.AgentRunResult], and [`StreamedRunResult.metadata`][pydantic_ai.result.StreamedRunResult], so you can persist it alongside outputs. When instrumentation is enabled, the resolved metadata is recorded on the agent span under the `agent.metadata` attribute. diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 9d120fabf8..297b45fc51 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -291,9 +291,10 @@ def __init__( computed from the [`RunContext`][pydantic_ai.tools.RunContext] on each run. Callables are invoked after the run finishes (whether it succeeded or raised) so they can inspect the final run state. - Resolved metadata is exposed on [`RunContext.metadata`][pydantic_ai.tools.RunContext], - [`AgentRun.metadata`][pydantic_ai.agent.AgentRun], and - [`AgentRunResult.metadata`][pydantic_ai.agent.AgentRunResult], + Resolved metadata can be read after the run completes via + [`AgentRun.metadata`][pydantic_ai.agent.AgentRun], + [`AgentRunResult.metadata`][pydantic_ai.agent.AgentRunResult], and + [`StreamedRunResult.metadata`][pydantic_ai.result.StreamedRunResult], and is attached to telemetry when instrumentation is enabled. history_processors: Optional list of callables to process the message history before sending it to the model. Each processor takes a list of messages and returns a modified list of messages.