From d12b5715ae066740885ee7fb6a37947f4a6a4ebd Mon Sep 17 00:00:00 2001 From: NicolasPllr1 Date: Sun, 16 Nov 2025 17:28:14 +0100 Subject: [PATCH 01/15] Pass Pydantic validation context to agents (#3381) --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 14 ++++++--- pydantic_ai_slim/pydantic_ai/_output.py | 31 +++++++++++++++---- pydantic_ai_slim/pydantic_ai/_tool_manager.py | 28 ++++++++++++++--- .../pydantic_ai/agent/__init__.py | 9 +++++- pydantic_ai_slim/pydantic_ai/result.py | 6 ++-- 5 files changed, 71 insertions(+), 17 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index c973befc70..de72b1da2b 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -18,7 +18,7 @@ from pydantic_ai._function_schema import _takes_ctx as is_takes_ctx # type: ignore from pydantic_ai._instrumentation import DEFAULT_INSTRUMENTATION_VERSION -from pydantic_ai._tool_manager import ToolManager +from pydantic_ai._tool_manager import ToolManager, build_validation_context from pydantic_ai._utils import dataclasses_no_defaults_repr, get_union_args, is_async_callable, run_in_executor from pydantic_ai.builtin_tools import AbstractBuiltinTool from pydantic_graph import BaseNode, GraphRunContext @@ -605,7 +605,9 @@ async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa text = '' # pragma: no cover if text: try: - self._next_node = await self._handle_text_response(ctx, text, text_processor) + self._next_node = await self._handle_text_response( + ctx, ctx.deps.tool_manager.validation_ctx, text, text_processor + ) return except ToolRetryError: # pragma: no cover # If the text from the previous response was invalid, ignore it. @@ -669,7 +671,9 @@ async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa if text_processor := output_schema.text_processor: if text: - self._next_node = await self._handle_text_response(ctx, text, text_processor) + self._next_node = await self._handle_text_response( + ctx, ctx.deps.tool_manager.validation_ctx, text, text_processor + ) return alternatives.insert(0, 'return text') @@ -731,12 +735,14 @@ async def _handle_tool_calls( async def _handle_text_response( self, ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]], + validation_ctx: Any | Callable[[RunContext[DepsT]], Any], text: str, text_processor: _output.BaseOutputProcessor[NodeRunEndT], ) -> ModelRequestNode[DepsT, NodeRunEndT] | End[result.FinalResult[NodeRunEndT]]: run_context = build_run_context(ctx) + validation_context = build_validation_context(validation_ctx, run_context) - result_data = await text_processor.process(text, run_context) + result_data = await text_processor.process(text, run_context, validation_context) for validator in ctx.deps.output_validators: result_data = await validator.validate(result_data, run_context) diff --git a/pydantic_ai_slim/pydantic_ai/_output.py b/pydantic_ai_slim/pydantic_ai/_output.py index 053d3a71a8..f3b77e2ae4 100644 --- a/pydantic_ai_slim/pydantic_ai/_output.py +++ b/pydantic_ai_slim/pydantic_ai/_output.py @@ -523,6 +523,7 @@ async def process( self, data: str, run_context: RunContext[AgentDepsT], + validation_context: Any | None, allow_partial: bool = False, wrap_validation_errors: bool = True, ) -> OutputDataT: @@ -610,6 +611,7 @@ async def process( self, data: str | dict[str, Any] | None, run_context: RunContext[AgentDepsT], + validation_context: Any | None, allow_partial: bool = False, wrap_validation_errors: bool = True, ) -> OutputDataT: @@ -618,6 +620,7 @@ async def process( Args: data: The output data to validate. run_context: The current run context. + validation_context: Additional Pydantic validation context for the current run. allow_partial: If true, allow partial validation. wrap_validation_errors: If true, wrap the validation errors in a retry message. @@ -628,7 +631,7 @@ async def process( data = _utils.strip_markdown_fences(data) try: - output = self.validate(data, allow_partial) + output = self.validate(data, allow_partial, validation_context) except ValidationError as e: if wrap_validation_errors: m = _messages.RetryPromptPart( @@ -646,12 +649,17 @@ def validate( self, data: str | dict[str, Any] | None, allow_partial: bool = False, + validation_context: Any | None = None, ) -> dict[str, Any]: pyd_allow_partial: Literal['off', 'trailing-strings'] = 'trailing-strings' if allow_partial else 'off' if isinstance(data, str): - return self.validator.validate_json(data or '{}', allow_partial=pyd_allow_partial) + return self.validator.validate_json( + data or '{}', allow_partial=pyd_allow_partial, context=validation_context + ) else: - return self.validator.validate_python(data or {}, allow_partial=pyd_allow_partial) + return self.validator.validate_python( + data or {}, allow_partial=pyd_allow_partial, context=validation_context + ) async def call( self, @@ -771,11 +779,16 @@ async def process( self, data: str, run_context: RunContext[AgentDepsT], + validation_context: Any | None = None, allow_partial: bool = False, wrap_validation_errors: bool = True, ) -> OutputDataT: union_object = await self._union_processor.process( - data, run_context, allow_partial=allow_partial, wrap_validation_errors=wrap_validation_errors + data, + run_context, + validation_context, + allow_partial=allow_partial, + wrap_validation_errors=wrap_validation_errors, ) result = union_object.result @@ -791,7 +804,11 @@ async def process( raise return await processor.process( - inner_data, run_context, allow_partial=allow_partial, wrap_validation_errors=wrap_validation_errors + inner_data, + run_context, + validation_context, + allow_partial=allow_partial, + wrap_validation_errors=wrap_validation_errors, ) @@ -800,6 +817,7 @@ async def process( self, data: str, run_context: RunContext[AgentDepsT], + validation_context: Any | None = None, allow_partial: bool = False, wrap_validation_errors: bool = True, ) -> OutputDataT: @@ -831,13 +849,14 @@ async def process( self, data: str, run_context: RunContext[AgentDepsT], + validation_context: Any | None = None, allow_partial: bool = False, wrap_validation_errors: bool = True, ) -> OutputDataT: args = {self._str_argument_name: data} data = await execute_traced_output_function(self._function_schema, run_context, args, wrap_validation_errors) - return await super().process(data, run_context, allow_partial, wrap_validation_errors) + return await super().process(data, run_context, validation_context, allow_partial, wrap_validation_errors) @dataclass(init=False) diff --git a/pydantic_ai_slim/pydantic_ai/_tool_manager.py b/pydantic_ai_slim/pydantic_ai/_tool_manager.py index fb7039e2cc..5dede17d6e 100644 --- a/pydantic_ai_slim/pydantic_ai/_tool_manager.py +++ b/pydantic_ai_slim/pydantic_ai/_tool_manager.py @@ -1,11 +1,11 @@ from __future__ import annotations import json -from collections.abc import Iterator +from collections.abc import Callable, Iterator from contextlib import contextmanager from contextvars import ContextVar from dataclasses import dataclass, field, replace -from typing import Any, Generic +from typing import Any, Generic, cast from opentelemetry.trace import Tracer from pydantic import ValidationError @@ -31,6 +31,8 @@ class ToolManager(Generic[AgentDepsT]): """The toolset that provides the tools for this run step.""" ctx: RunContext[AgentDepsT] | None = None """The agent run context for a specific run step.""" + validation_ctx: Any | Callable[[RunContext[AgentDepsT]], Any] = None + """Additional Pydantic validation context for the run.""" tools: dict[str, ToolsetTool[AgentDepsT]] | None = None """The cached tools for this run step.""" failed_tools: set[str] = field(default_factory=set) @@ -61,6 +63,7 @@ async def for_run_step(self, ctx: RunContext[AgentDepsT]) -> ToolManager[AgentDe return self.__class__( toolset=self.toolset, ctx=ctx, + validation_ctx=self.validation_ctx, tools=await self.toolset.get_tools(ctx), ) @@ -161,12 +164,18 @@ async def _call_tool( partial_output=allow_partial, ) + validation_ctx = build_validation_context(self.validation_ctx, self.ctx) + pyd_allow_partial = 'trailing-strings' if allow_partial else 'off' validator = tool.args_validator if isinstance(call.args, str): - args_dict = validator.validate_json(call.args or '{}', allow_partial=pyd_allow_partial) + args_dict = validator.validate_json( + call.args or '{}', allow_partial=pyd_allow_partial, context=validation_ctx + ) else: - args_dict = validator.validate_python(call.args or {}, allow_partial=pyd_allow_partial) + args_dict = validator.validate_python( + call.args or {}, allow_partial=pyd_allow_partial, context=validation_ctx + ) result = await self.toolset.call_tool(name, args_dict, ctx, tool) @@ -270,3 +279,14 @@ async def _call_function_tool( ) return tool_result + + +def build_validation_context( + validation_ctx: Any | Callable[[RunContext[AgentDepsT]], Any], run_context: RunContext[AgentDepsT] +) -> Any: + """Build a Pydantic validation context, potentially from the current agent run context.""" + if callable(validation_ctx): + fn = cast(Callable[[RunContext[AgentDepsT]], Any], validation_ctx) + return fn(run_context) + else: + return validation_ctx diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 4cd353b44a..3d16a64488 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -147,6 +147,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]): _prepare_output_tools: ToolsPrepareFunc[AgentDepsT] | None = dataclasses.field(repr=False) _max_result_retries: int = dataclasses.field(repr=False) _max_tool_retries: int = dataclasses.field(repr=False) + _validation_context: Any | Callable[[RunContext[AgentDepsT]], Any] = dataclasses.field(repr=False) _event_stream_handler: EventStreamHandler[AgentDepsT] | None = dataclasses.field(repr=False) @@ -166,6 +167,7 @@ def __init__( name: str | None = None, model_settings: ModelSettings | None = None, retries: int = 1, + validation_context: Any | Callable[[RunContext[AgentDepsT]], Any] = None, output_retries: int | None = None, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] = (), builtin_tools: Sequence[AbstractBuiltinTool] = (), @@ -192,6 +194,7 @@ def __init__( name: str | None = None, model_settings: ModelSettings | None = None, retries: int = 1, + validation_context: Any | Callable[[RunContext[AgentDepsT]], Any] = None, output_retries: int | None = None, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] = (), builtin_tools: Sequence[AbstractBuiltinTool] = (), @@ -216,6 +219,7 @@ def __init__( name: str | None = None, model_settings: ModelSettings | None = None, retries: int = 1, + validation_context: Any | Callable[[RunContext[AgentDepsT]], Any] = None, output_retries: int | None = None, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] = (), builtin_tools: Sequence[AbstractBuiltinTool] = (), @@ -249,6 +253,7 @@ def __init__( model_settings: Optional model request settings to use for this agent's runs, by default. retries: The default number of retries to allow for tool calls and output validation, before raising an error. For model request retries, see the [HTTP Request Retries](../retries.md) documentation. + validation_context: Additional validation context used to validate all outputs. output_retries: The maximum number of retries to allow for output validation, defaults to `retries`. tools: Tools to register with the agent, you can also register tools via the decorators [`@agent.tool`][pydantic_ai.Agent.tool] and [`@agent.tool_plain`][pydantic_ai.Agent.tool_plain]. @@ -314,6 +319,8 @@ def __init__( self._max_result_retries = output_retries if output_retries is not None else retries self._max_tool_retries = retries + self._validation_context = validation_context + self._builtin_tools = builtin_tools self._prepare_tools = prepare_tools @@ -562,7 +569,7 @@ async def main(): output_toolset.max_retries = self._max_result_retries output_toolset.output_validators = output_validators toolset = self._get_toolset(output_toolset=output_toolset, additional_toolsets=toolsets) - tool_manager = ToolManager[AgentDepsT](toolset) + tool_manager = ToolManager[AgentDepsT](toolset, validation_ctx=self._validation_context) # Build the graph graph = _agent_graph.build_agent_graph(self.name, self._deps_type, output_type_) diff --git a/pydantic_ai_slim/pydantic_ai/result.py b/pydantic_ai_slim/pydantic_ai/result.py index c6b59ec796..140c8304b0 100644 --- a/pydantic_ai_slim/pydantic_ai/result.py +++ b/pydantic_ai_slim/pydantic_ai/result.py @@ -18,7 +18,7 @@ TextOutputSchema, ) from ._run_context import AgentDepsT, RunContext -from ._tool_manager import ToolManager +from ._tool_manager import ToolManager, build_validation_context from .messages import ModelResponseStreamEvent from .output import ( DeferredToolRequests, @@ -197,8 +197,10 @@ async def validate_response_output( # not part of the final result output, so we reset the accumulated text text = '' + validation_context = build_validation_context(self._tool_manager.validation_ctx, self._run_ctx) + result_data = await text_processor.process( - text, self._run_ctx, allow_partial=allow_partial, wrap_validation_errors=False + text, self._run_ctx, validation_context, allow_partial=allow_partial, wrap_validation_errors=False ) for validator in self._output_validators: result_data = await validator.validate( From af7b1937895765022dde214e17b2934c275b11c3 Mon Sep 17 00:00:00 2001 From: NicolasPllr1 Date: Sun, 16 Nov 2025 17:28:51 +0100 Subject: [PATCH 02/15] Test agent validation context Add tests involving the new 'validation context' for: - Pydantic model as the output type - Tool, native and prompted output - Tool calling - Output function - Output validator --- tests/test_validation_context.py | 122 +++++++++++++++++++++++++++++++ 1 file changed, 122 insertions(+) create mode 100644 tests/test_validation_context.py diff --git a/tests/test_validation_context.py b/tests/test_validation_context.py new file mode 100644 index 0000000000..abd5de8012 --- /dev/null +++ b/tests/test_validation_context.py @@ -0,0 +1,122 @@ +from dataclasses import dataclass + +import pytest +from inline_snapshot import snapshot +from pydantic import BaseModel, ValidationInfo, field_validator + +from pydantic_ai import ( + Agent, + ModelMessage, + ModelResponse, + NativeOutput, + PromptedOutput, + RunContext, + TextPart, + ToolCallPart, + ToolOutput, +) +from pydantic_ai._output import OutputSpec +from pydantic_ai.models.function import AgentInfo, FunctionModel + + +class Value(BaseModel): + x: int + + @field_validator('x') + def increment_value(cls, value: int, info: ValidationInfo): + return value + (info.context or 0) + + +@dataclass +class Deps: + increment: int + + +@pytest.mark.parametrize( + 'output_type', + [ + Value, + ToolOutput(Value), + NativeOutput(Value), + PromptedOutput(Value), + ], + ids=[ + 'Value', + 'ToolOutput(Value)', + 'NativeOutput(Value)', + 'PromptedOutput(Value)', + ], +) +def test_agent_output_with_validation_context(output_type: OutputSpec[Value]): + """Test that the output is validated using the validation context""" + + def mock_llm(_: list[ModelMessage], _info: AgentInfo) -> ModelResponse: + if isinstance(output_type, ToolOutput): + return ModelResponse(parts=[ToolCallPart(tool_name='final_result', args={'x': 0})]) + else: + text = Value(x=0).model_dump_json() + return ModelResponse(parts=[TextPart(content=text)]) + + agent = Agent( + FunctionModel(mock_llm), + output_type=output_type, + deps_type=Deps, + validation_context=lambda ctx: ctx.deps.increment, + ) + + result = agent.run_sync('', deps=Deps(increment=10)) + assert result.output.x == snapshot(10) + + +def test_agent_tool_call_with_validation_context(): + """Test that the argument passed to the tool call is validated using the validation context.""" + + agent = Agent( + 'test', + deps_type=Deps, + validation_context=lambda ctx: ctx.deps.increment, + ) + + @agent.tool + def get_value(ctx: RunContext[Deps], v: Value) -> int: + # NOTE: The test agent calls this tool with Value(x=0) which should then have been influenced by the validation context through the `increment_value` field validator + assert v.x == ctx.deps.increment + return v.x + + result = agent.run_sync('', deps=Deps(increment=10)) + assert result.output == snapshot('{"get_value":10}') + + +def test_agent_output_function_with_validation_context(): + """Test that the argument passed to the output function is validated using the validation context.""" + + def get_value(v: Value) -> int: + return v.x + + agent = Agent( + 'test', + output_type=get_value, + deps_type=Deps, + validation_context=lambda ctx: ctx.deps.increment, + ) + + result = agent.run_sync('', deps=Deps(increment=10)) + assert result.output == snapshot(10) + + +def test_agent_output_validator_with_validation_context(): + """Test that the argument passed to the output validator is validated using the validation context.""" + + agent = Agent( + 'test', + output_type=Value, + deps_type=Deps, + validation_context=lambda ctx: ctx.deps.increment, + ) + + @agent.output_validator + def identity(ctx: RunContext[Deps], v: Value) -> Value: + return v + + result = agent.run_sync('', deps=Deps(increment=10)) + assert result.output.x == snapshot(10) From 516e30cfc8c23387761e95f658bbdbbd8461354a Mon Sep 17 00:00:00 2001 From: NicolasPllr1 Date: Tue, 18 Nov 2025 22:02:41 +0100 Subject: [PATCH 03/15] feedback: Link Pydantic doc --- pydantic_ai_slim/pydantic_ai/agent/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 3d16a64488..e813860b5c 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -253,7 +253,7 @@ def __init__( model_settings: Optional model request settings to use for this agent's runs, by default. retries: The default number of retries to allow for tool calls and output validation, before raising an error. For model request retries, see the [HTTP Request Retries](../retries.md) documentation. - validation_context: Additional validation context used to validate all outputs. + validation_context: Additional [validation context](https://docs.pydantic.dev/latest/concepts/validators/#validation-context) used to validate all outputs. output_retries: The maximum number of retries to allow for output validation, defaults to `retries`. tools: Tools to register with the agent, you can also register tools via the decorators [`@agent.tool`][pydantic_ai.Agent.tool] and [`@agent.tool_plain`][pydantic_ai.Agent.tool_plain]. From a74d95d115be3898932a9d6ff22249e8dd3955fe Mon Sep 17 00:00:00 2001 From: NicolasPllr1 Date: Tue, 18 Nov 2025 22:03:49 +0100 Subject: [PATCH 04/15] feedback: Remove unnecessary arg in _handle_text_response --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index de72b1da2b..1a3d9ac62a 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -605,9 +605,7 @@ async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa text = '' # pragma: no cover if text: try: - self._next_node = await self._handle_text_response( - ctx, ctx.deps.tool_manager.validation_ctx, text, text_processor - ) + self._next_node = await self._handle_text_response(ctx, text, text_processor) return except ToolRetryError: # pragma: no cover # If the text from the previous response was invalid, ignore it. @@ -671,9 +669,7 @@ async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa if text_processor := output_schema.text_processor: if text: - self._next_node = await self._handle_text_response( - ctx, ctx.deps.tool_manager.validation_ctx, text, text_processor - ) + self._next_node = await self._handle_text_response(ctx, text, text_processor) return alternatives.insert(0, 'return text') @@ -735,14 +731,13 @@ async def _handle_tool_calls( async def _handle_text_response( self, ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]], - validation_ctx: Any | Callable[[RunContext[DepsT]], Any], text: str, text_processor: _output.BaseOutputProcessor[NodeRunEndT], ) -> ModelRequestNode[DepsT, NodeRunEndT] | End[result.FinalResult[NodeRunEndT]]: run_context = build_run_context(ctx) - validation_context = build_validation_context(validation_ctx, run_context) + validation_context = build_validation_context(ctx.deps.tool_manager.validation_ctx, run_context) - result_data = await text_processor.process(text, run_context, validation_context) + result_data = await text_processor.process(text, run_context=run_context, validation_context=validation_context) for validator in ctx.deps.output_validators: result_data = await validator.validate(result_data, run_context) From b80c4728701904a2f36fa365235a49acf7cd0e65 Mon Sep 17 00:00:00 2001 From: NicolasPllr1 Date: Tue, 18 Nov 2025 22:05:16 +0100 Subject: [PATCH 05/15] feedback: `None` default for the val ctx and require kwargs after `data` in process functions --- pydantic_ai_slim/pydantic_ai/_output.py | 25 ++++++++++++++++++------- pydantic_ai_slim/pydantic_ai/result.py | 6 +++++- 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_output.py b/pydantic_ai_slim/pydantic_ai/_output.py index f3b77e2ae4..b2de2e3a88 100644 --- a/pydantic_ai_slim/pydantic_ai/_output.py +++ b/pydantic_ai_slim/pydantic_ai/_output.py @@ -522,8 +522,9 @@ class BaseOutputProcessor(ABC, Generic[OutputDataT]): async def process( self, data: str, + *, run_context: RunContext[AgentDepsT], - validation_context: Any | None, + validation_context: Any | None = None, allow_partial: bool = False, wrap_validation_errors: bool = True, ) -> OutputDataT: @@ -610,8 +611,9 @@ def __init__( async def process( self, data: str | dict[str, Any] | None, + *, run_context: RunContext[AgentDepsT], - validation_context: Any | None, + validation_context: Any | None = None, allow_partial: bool = False, wrap_validation_errors: bool = True, ) -> OutputDataT: @@ -778,6 +780,7 @@ def __init__( async def process( self, data: str, + *, run_context: RunContext[AgentDepsT], validation_context: Any | None = None, allow_partial: bool = False, @@ -785,8 +788,8 @@ async def process( ) -> OutputDataT: union_object = await self._union_processor.process( data, - run_context, - validation_context, + run_context=run_context, + validation_context=validation_context, allow_partial=allow_partial, wrap_validation_errors=wrap_validation_errors, ) @@ -805,8 +808,8 @@ async def process( return await processor.process( inner_data, - run_context, - validation_context, + run_context=run_context, + validation_context=validation_context, allow_partial=allow_partial, wrap_validation_errors=wrap_validation_errors, ) @@ -816,6 +819,7 @@ class TextOutputProcessor(BaseOutputProcessor[OutputDataT]): async def process( self, data: str, + *, run_context: RunContext[AgentDepsT], validation_context: Any | None = None, allow_partial: bool = False, @@ -848,6 +852,7 @@ def __init__( async def process( self, data: str, + *, run_context: RunContext[AgentDepsT], validation_context: Any | None = None, allow_partial: bool = False, @@ -856,7 +861,13 @@ async def process( args = {self._str_argument_name: data} data = await execute_traced_output_function(self._function_schema, run_context, args, wrap_validation_errors) - return await super().process(data, run_context, validation_context, allow_partial, wrap_validation_errors) + return await super().process( + data, + run_context=run_context, + validation_context=validation_context, + allow_partial=allow_partial, + wrap_validation_errors=wrap_validation_errors, + ) @dataclass(init=False) diff --git a/pydantic_ai_slim/pydantic_ai/result.py b/pydantic_ai_slim/pydantic_ai/result.py index 140c8304b0..6d91d1bc39 100644 --- a/pydantic_ai_slim/pydantic_ai/result.py +++ b/pydantic_ai_slim/pydantic_ai/result.py @@ -200,7 +200,11 @@ async def validate_response_output( validation_context = build_validation_context(self._tool_manager.validation_ctx, self._run_ctx) result_data = await text_processor.process( - text, self._run_ctx, validation_context, allow_partial=allow_partial, wrap_validation_errors=False + text, + run_context=self._run_ctx, + validation_context=validation_context, + allow_partial=allow_partial, + wrap_validation_errors=False, ) for validator in self._output_validators: result_data = await validator.validate( From d032feeb81b0271b62e6bbd5dc4ee19b20c72c76 Mon Sep 17 00:00:00 2001 From: NicolasPllr1 Date: Wed, 19 Nov 2025 20:19:03 +0100 Subject: [PATCH 06/15] feedback: Shove the validation context inside the RunContext --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 24 ++++++++++++++++--- pydantic_ai_slim/pydantic_ai/_output.py | 7 +----- pydantic_ai_slim/pydantic_ai/_run_context.py | 4 +++- pydantic_ai_slim/pydantic_ai/_tool_manager.py | 24 ++++--------------- .../pydantic_ai/agent/__init__.py | 3 ++- pydantic_ai_slim/pydantic_ai/result.py | 5 +--- 6 files changed, 32 insertions(+), 35 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 1a3d9ac62a..e78bfb5962 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -18,7 +18,7 @@ from pydantic_ai._function_schema import _takes_ctx as is_takes_ctx # type: ignore from pydantic_ai._instrumentation import DEFAULT_INSTRUMENTATION_VERSION -from pydantic_ai._tool_manager import ToolManager, build_validation_context +from pydantic_ai._tool_manager import ToolManager from pydantic_ai._utils import dataclasses_no_defaults_repr, get_union_args, is_async_callable, run_in_executor from pydantic_ai.builtin_tools import AbstractBuiltinTool from pydantic_graph import BaseNode, GraphRunContext @@ -144,6 +144,7 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]): output_schema: _output.OutputSchema[OutputDataT] output_validators: list[_output.OutputValidator[DepsT, OutputDataT]] + validation_context: Any | Callable[[RunContext[DepsT]], Any] history_processors: Sequence[HistoryProcessor[DepsT]] @@ -484,6 +485,8 @@ async def _prepare_request( ctx.state.run_step += 1 run_context = build_run_context(ctx) + validation_context = build_validation_context(ctx.deps.validation_context, run_context) + run_context = replace(run_context, validation_context=validation_context) # This will raise errors for any tool name conflicts ctx.deps.tool_manager = await ctx.deps.tool_manager.for_run_step(run_context) @@ -735,9 +738,11 @@ async def _handle_text_response( text_processor: _output.BaseOutputProcessor[NodeRunEndT], ) -> ModelRequestNode[DepsT, NodeRunEndT] | End[result.FinalResult[NodeRunEndT]]: run_context = build_run_context(ctx) - validation_context = build_validation_context(ctx.deps.tool_manager.validation_ctx, run_context) + validation_context = build_validation_context(ctx.deps.validation_context, run_context) - result_data = await text_processor.process(text, run_context=run_context, validation_context=validation_context) + run_context = replace(run_context, validation_context=validation_context) + + result_data = await text_processor.process(text, run_context=run_context) for validator in ctx.deps.output_validators: result_data = await validator.validate(result_data, run_context) @@ -788,6 +793,7 @@ def build_run_context(ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT usage=ctx.state.usage, prompt=ctx.deps.prompt, messages=ctx.state.message_history, + validation_context=None, tracer=ctx.deps.tracer, trace_include_content=ctx.deps.instrumentation_settings is not None and ctx.deps.instrumentation_settings.include_content, @@ -799,6 +805,18 @@ def build_run_context(ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT ) +def build_validation_context( + validation_ctx: Any | Callable[[RunContext[DepsT]], Any], + run_context: RunContext[DepsT], +) -> Any: + """Build a Pydantic validation context, potentially from the current agent run context.""" + if callable(validation_ctx): + fn = cast(Callable[[RunContext[DepsT]], Any], validation_ctx) + return fn(run_context) + else: + return validation_ctx + + async def process_tool_calls( # noqa: C901 tool_manager: ToolManager[DepsT], tool_calls: list[_messages.ToolCallPart], diff --git a/pydantic_ai_slim/pydantic_ai/_output.py b/pydantic_ai_slim/pydantic_ai/_output.py index b2de2e3a88..b53255b6d9 100644 --- a/pydantic_ai_slim/pydantic_ai/_output.py +++ b/pydantic_ai_slim/pydantic_ai/_output.py @@ -524,7 +524,6 @@ async def process( data: str, *, run_context: RunContext[AgentDepsT], - validation_context: Any | None = None, allow_partial: bool = False, wrap_validation_errors: bool = True, ) -> OutputDataT: @@ -613,7 +612,6 @@ async def process( data: str | dict[str, Any] | None, *, run_context: RunContext[AgentDepsT], - validation_context: Any | None = None, allow_partial: bool = False, wrap_validation_errors: bool = True, ) -> OutputDataT: @@ -633,7 +631,7 @@ async def process( data = _utils.strip_markdown_fences(data) try: - output = self.validate(data, allow_partial, validation_context) + output = self.validate(data, allow_partial, run_context.validation_context) except ValidationError as e: if wrap_validation_errors: m = _messages.RetryPromptPart( @@ -782,14 +780,12 @@ async def process( data: str, *, run_context: RunContext[AgentDepsT], - validation_context: Any | None = None, allow_partial: bool = False, wrap_validation_errors: bool = True, ) -> OutputDataT: union_object = await self._union_processor.process( data, run_context=run_context, - validation_context=validation_context, allow_partial=allow_partial, wrap_validation_errors=wrap_validation_errors, ) @@ -809,7 +805,6 @@ async def process( return await processor.process( inner_data, run_context=run_context, - validation_context=validation_context, allow_partial=allow_partial, wrap_validation_errors=wrap_validation_errors, ) diff --git a/pydantic_ai_slim/pydantic_ai/_run_context.py b/pydantic_ai_slim/pydantic_ai/_run_context.py index 4f9b253767..edee68f975 100644 --- a/pydantic_ai_slim/pydantic_ai/_run_context.py +++ b/pydantic_ai_slim/pydantic_ai/_run_context.py @@ -3,7 +3,7 @@ import dataclasses from collections.abc import Sequence from dataclasses import field -from typing import TYPE_CHECKING, Generic +from typing import TYPE_CHECKING, Any, Generic from opentelemetry.trace import NoOpTracer, Tracer from typing_extensions import TypeVar @@ -38,6 +38,8 @@ class RunContext(Generic[RunContextAgentDepsT]): """The original user prompt passed to the run.""" messages: list[_messages.ModelMessage] = field(default_factory=list) """Messages exchanged in the conversation so far.""" + validation_context: Any = None + """Additional Pydantic validation context for the run outputs.""" tracer: Tracer = field(default_factory=NoOpTracer) """The tracer to use for tracing the run.""" trace_include_content: bool = False diff --git a/pydantic_ai_slim/pydantic_ai/_tool_manager.py b/pydantic_ai_slim/pydantic_ai/_tool_manager.py index 5dede17d6e..9a9f93e1ff 100644 --- a/pydantic_ai_slim/pydantic_ai/_tool_manager.py +++ b/pydantic_ai_slim/pydantic_ai/_tool_manager.py @@ -1,11 +1,11 @@ from __future__ import annotations import json -from collections.abc import Callable, Iterator +from collections.abc import Iterator from contextlib import contextmanager from contextvars import ContextVar from dataclasses import dataclass, field, replace -from typing import Any, Generic, cast +from typing import Any, Generic from opentelemetry.trace import Tracer from pydantic import ValidationError @@ -31,8 +31,6 @@ class ToolManager(Generic[AgentDepsT]): """The toolset that provides the tools for this run step.""" ctx: RunContext[AgentDepsT] | None = None """The agent run context for a specific run step.""" - validation_ctx: Any | Callable[[RunContext[AgentDepsT]], Any] = None - """Additional Pydantic validation context for the run.""" tools: dict[str, ToolsetTool[AgentDepsT]] | None = None """The cached tools for this run step.""" failed_tools: set[str] = field(default_factory=set) @@ -63,7 +61,6 @@ async def for_run_step(self, ctx: RunContext[AgentDepsT]) -> ToolManager[AgentDe return self.__class__( toolset=self.toolset, ctx=ctx, - validation_ctx=self.validation_ctx, tools=await self.toolset.get_tools(ctx), ) @@ -164,17 +161,15 @@ async def _call_tool( partial_output=allow_partial, ) - validation_ctx = build_validation_context(self.validation_ctx, self.ctx) - pyd_allow_partial = 'trailing-strings' if allow_partial else 'off' validator = tool.args_validator if isinstance(call.args, str): args_dict = validator.validate_json( - call.args or '{}', allow_partial=pyd_allow_partial, context=validation_ctx + call.args or '{}', allow_partial=pyd_allow_partial, context=ctx.validation_context ) else: args_dict = validator.validate_python( - call.args or {}, allow_partial=pyd_allow_partial, context=validation_ctx + call.args or {}, allow_partial=pyd_allow_partial, context=ctx.validation_context ) result = await self.toolset.call_tool(name, args_dict, ctx, tool) @@ -279,14 +274,3 @@ async def _call_function_tool( ) return tool_result - - -def build_validation_context( - validation_ctx: Any | Callable[[RunContext[AgentDepsT]], Any], run_context: RunContext[AgentDepsT] -) -> Any: - """Build a Pydantic validation context, potentially from the current agent run context.""" - if callable(validation_ctx): - fn = cast(Callable[[RunContext[AgentDepsT]], Any], validation_ctx) - return fn(run_context) - else: - return validation_ctx diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index e813860b5c..80bd223b1b 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -569,7 +569,7 @@ async def main(): output_toolset.max_retries = self._max_result_retries output_toolset.output_validators = output_validators toolset = self._get_toolset(output_toolset=output_toolset, additional_toolsets=toolsets) - tool_manager = ToolManager[AgentDepsT](toolset, validation_ctx=self._validation_context) + tool_manager = ToolManager[AgentDepsT](toolset) # Build the graph graph = _agent_graph.build_agent_graph(self.name, self._deps_type, output_type_) @@ -619,6 +619,7 @@ async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None: end_strategy=self.end_strategy, output_schema=output_schema, output_validators=output_validators, + validation_context=self._validation_context, history_processors=self.history_processors, builtin_tools=[*self._builtin_tools, *(builtin_tools or [])], tool_manager=tool_manager, diff --git a/pydantic_ai_slim/pydantic_ai/result.py b/pydantic_ai_slim/pydantic_ai/result.py index 6d91d1bc39..88bfe407fa 100644 --- a/pydantic_ai_slim/pydantic_ai/result.py +++ b/pydantic_ai_slim/pydantic_ai/result.py @@ -18,7 +18,7 @@ TextOutputSchema, ) from ._run_context import AgentDepsT, RunContext -from ._tool_manager import ToolManager, build_validation_context +from ._tool_manager import ToolManager from .messages import ModelResponseStreamEvent from .output import ( DeferredToolRequests, @@ -197,12 +197,9 @@ async def validate_response_output( # not part of the final result output, so we reset the accumulated text text = '' - validation_context = build_validation_context(self._tool_manager.validation_ctx, self._run_ctx) - result_data = await text_processor.process( text, run_context=self._run_ctx, - validation_context=validation_context, allow_partial=allow_partial, wrap_validation_errors=False, ) From 2e5ea2b6d3ecb59b5d938dd9ae0ac2c6847e589c Mon Sep 17 00:00:00 2001 From: NicolasPllr1 Date: Wed, 19 Nov 2025 21:51:33 +0100 Subject: [PATCH 07/15] Test that the validation context is updated as the deps are mutated --- tests/test_validation_context.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/tests/test_validation_context.py b/tests/test_validation_context.py index abd5de8012..ae475859ee 100644 --- a/tests/test_validation_context.py +++ b/tests/test_validation_context.py @@ -120,3 +120,28 @@ def identity(ctx: RunContext[Deps], v: Value) -> Value: result = agent.run_sync('', deps=Deps(increment=10)) assert result.output.x == snapshot(10) + + +def test_agent_output_validator_with_intermediary_deps_change_and_validation_context(): + """Test that the validation context is updated as run dependencies are mutated.""" + + agent = Agent( + 'test', + output_type=Value, + deps_type=Deps, + validation_context=lambda ctx: ctx.deps.increment, + ) + + @agent.tool + def bump_increment(ctx: RunContext[Deps]): + assert ctx.validation_context == snapshot(10) # validation ctx was first computed using the original deps + ctx.deps.increment += 5 # update the deps + + @agent.output_validator + def identity(ctx: RunContext[Deps], v: Value) -> Value: + assert ctx.validation_context == snapshot(15) # validation ctx was re-computed after deps update from tool call + + return v + + result = agent.run_sync('', deps=Deps(increment=10)) + assert result.output.x == snapshot(15) From 973cb3438b0dfbffef2afc3a103ca7ec9baa98c4 Mon Sep 17 00:00:00 2001 From: NicolasPllr1 Date: Fri, 21 Nov 2025 16:51:37 +0100 Subject: [PATCH 08/15] feedback: fix docstring --- pydantic_ai_slim/pydantic_ai/_output.py | 1 - pydantic_ai_slim/pydantic_ai/_run_context.py | 2 +- pydantic_ai_slim/pydantic_ai/agent/__init__.py | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_output.py b/pydantic_ai_slim/pydantic_ai/_output.py index b53255b6d9..24df1025bd 100644 --- a/pydantic_ai_slim/pydantic_ai/_output.py +++ b/pydantic_ai_slim/pydantic_ai/_output.py @@ -620,7 +620,6 @@ async def process( Args: data: The output data to validate. run_context: The current run context. - validation_context: Additional Pydantic validation context for the current run. allow_partial: If true, allow partial validation. wrap_validation_errors: If true, wrap the validation errors in a retry message. diff --git a/pydantic_ai_slim/pydantic_ai/_run_context.py b/pydantic_ai_slim/pydantic_ai/_run_context.py index edee68f975..6acf05ba18 100644 --- a/pydantic_ai_slim/pydantic_ai/_run_context.py +++ b/pydantic_ai_slim/pydantic_ai/_run_context.py @@ -39,7 +39,7 @@ class RunContext(Generic[RunContextAgentDepsT]): messages: list[_messages.ModelMessage] = field(default_factory=list) """Messages exchanged in the conversation so far.""" validation_context: Any = None - """Additional Pydantic validation context for the run outputs.""" + """Pydantic [validation context](https://docs.pydantic.dev/latest/concepts/validators/#validation-context) for the run outputs.""" tracer: Tracer = field(default_factory=NoOpTracer) """The tracer to use for tracing the run.""" trace_include_content: bool = False diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 80bd223b1b..b1ef5a71d0 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -253,7 +253,7 @@ def __init__( model_settings: Optional model request settings to use for this agent's runs, by default. retries: The default number of retries to allow for tool calls and output validation, before raising an error. For model request retries, see the [HTTP Request Retries](../retries.md) documentation. - validation_context: Additional [validation context](https://docs.pydantic.dev/latest/concepts/validators/#validation-context) used to validate all outputs. + validation_context: Pydantic [validation context](https://docs.pydantic.dev/latest/concepts/validators/#validation-context) used to validate all outputs. output_retries: The maximum number of retries to allow for output validation, defaults to `retries`. tools: Tools to register with the agent, you can also register tools via the decorators [`@agent.tool`][pydantic_ai.Agent.tool] and [`@agent.tool_plain`][pydantic_ai.Agent.tool_plain]. From 492bd2b58f58f9d5dca7a3c03124cab2adb54fe9 Mon Sep 17 00:00:00 2001 From: NicolasPllr1 Date: Fri, 21 Nov 2025 16:52:57 +0100 Subject: [PATCH 09/15] feedback: move validation context 'build + replace' inside build_run_context --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index e78bfb5962..a6d493679b 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -485,8 +485,6 @@ async def _prepare_request( ctx.state.run_step += 1 run_context = build_run_context(ctx) - validation_context = build_validation_context(ctx.deps.validation_context, run_context) - run_context = replace(run_context, validation_context=validation_context) # This will raise errors for any tool name conflicts ctx.deps.tool_manager = await ctx.deps.tool_manager.for_run_step(run_context) @@ -738,9 +736,6 @@ async def _handle_text_response( text_processor: _output.BaseOutputProcessor[NodeRunEndT], ) -> ModelRequestNode[DepsT, NodeRunEndT] | End[result.FinalResult[NodeRunEndT]]: run_context = build_run_context(ctx) - validation_context = build_validation_context(ctx.deps.validation_context, run_context) - - run_context = replace(run_context, validation_context=validation_context) result_data = await text_processor.process(text, run_context=run_context) @@ -787,7 +782,7 @@ async def run( def build_run_context(ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, Any]]) -> RunContext[DepsT]: """Build a `RunContext` object from the current agent graph run context.""" - return RunContext[DepsT]( + run_context = RunContext[DepsT]( deps=ctx.deps.user_deps, model=ctx.deps.model, usage=ctx.state.usage, @@ -803,6 +798,9 @@ def build_run_context(ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT run_step=ctx.state.run_step, run_id=ctx.state.run_id, ) + validation_context = build_validation_context(ctx.deps.validation_context, run_context) + run_context = replace(run_context, validation_context=validation_context) + return run_context def build_validation_context( From 04e209603e94b5523e97172976475341be455648 Mon Sep 17 00:00:00 2001 From: NicolasPllr1 Date: Tue, 25 Nov 2025 09:00:24 +0100 Subject: [PATCH 10/15] Add documentation for the agent's validation context --- docs/output.md | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/docs/output.md b/docs/output.md index 182a753944..0b61334e1c 100644 --- a/docs/output.md +++ b/docs/output.md @@ -385,6 +385,62 @@ print(repr(result.output)) _(This example is complete, it can be run "as is")_ +### Validation context {#validation-context} + +Some validation relies on an extra Pydantic [context](https://docs.pydantic.dev/latest/concepts/validators/#validation-context) object. You can pass such an object to an `Agent` at definition-time via its [`validation_context`][pydantic_ai.Agent.__init__] parameter. + +This validation context is used for the validation of _all_ structured outputs. It can be either: + +- the context object itself (`Any`), used as-is to validate outputs, or +- a function that takes the [`RunContext`][pydantic_ai.tools.RunContext] and returns a context object (`Any`). This function will be called automatically before each validation, allowing you to build a dynamic validation context. + +!!! warning "Don't confuse this _validation_ context with the _LLM_ context" + This Pydantic [context](https://docs.pydantic.dev/latest/concepts/validators/#validation-data) object is only used internally by Pydantic AI for output validation. In particular, it is **not** included in the prompts or messages sent to the language model. + +```python {title="validation_context.py"} +from dataclasses import dataclass + +from pydantic import BaseModel, ValidationInfo, field_validator + +from pydantic_ai import Agent + + +class Value(BaseModel): + x: int + + @field_validator('x') + def increment_value(cls, value: int, info: ValidationInfo): + return value + (info.context or 0) + + +agent = Agent( + 'google-gla:gemini-2.5-flash', + output_type=Value, + validation_context=10, +) +result = agent.run_sync('Give me a value of 5.') +print(repr(result.output)) # 5 from the model + 10 from the validation context +#> Value(x=15) + + +@dataclass +class Deps: + increment: int + + +agent = Agent( + 'google-gla:gemini-2.5-flash', + output_type=Value, + deps_type=Deps, + validation_context=lambda ctx: ctx.deps.increment, +) +result = agent.run_sync('Give me a value of 5.', deps=Deps(increment=10)) +print(repr(result.output)) # 5 from the model + 10 from the validation context +#> Value(x=15) +``` + +_(This example is complete, it can be run "as is")_ + ### Custom JSON schema {#structured-dict} If it's not feasible to define your desired structured output object using a Pydantic `BaseModel`, dataclass, or `TypedDict`, for example when you get a JSON schema from an external source or generate it dynamically, you can use the [`StructuredDict()`][pydantic_ai.output.StructuredDict] helper function to generate a `dict[str, Any]` subclass with a JSON schema attached that Pydantic AI will pass to the model. From 348b411efcb637f66e790abcd4fae7cd8d93835d Mon Sep 17 00:00:00 2001 From: NicolasPllr1 Date: Tue, 25 Nov 2025 12:07:10 +0100 Subject: [PATCH 11/15] Fix the new doc-example test --- tests/test_examples.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_examples.py b/tests/test_examples.py index 407816b60a..b4208a045c 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -506,6 +506,7 @@ async def call_tool( 'What is a banana?': ToolCallPart(tool_name='return_fruit', args={'name': 'banana', 'color': 'yellow'}), 'What is a Ford Explorer?': '{"result": {"kind": "Vehicle", "data": {"name": "Ford Explorer", "wheels": 4}}}', 'What is a MacBook?': '{"result": {"kind": "Device", "data": {"name": "MacBook", "kind": "laptop"}}}', + 'Give me a value of 5.': ToolCallPart(tool_name='final_result', args={'x': 5}), 'Write a creative story about space exploration': 'In the year 2157, Captain Maya Chen piloted her spacecraft through the vast expanse of the Andromeda Galaxy. As she discovered a planet with crystalline mountains that sang in harmony with the cosmic winds, she realized that space exploration was not just about finding new worlds, but about finding new ways to understand the universe and our place within it.', 'Create a person': ToolCallPart( tool_name='final_result', From 754154331bee0004a17f256d69c54d3987bbd630 Mon Sep 17 00:00:00 2001 From: NicolasPllr1 Date: Wed, 26 Nov 2025 07:58:36 +0100 Subject: [PATCH 12/15] feedback: requires kwargs for self.validate calls --- pydantic_ai_slim/pydantic_ai/_output.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/_output.py b/pydantic_ai_slim/pydantic_ai/_output.py index 24df1025bd..e3adfbd190 100644 --- a/pydantic_ai_slim/pydantic_ai/_output.py +++ b/pydantic_ai_slim/pydantic_ai/_output.py @@ -630,7 +630,7 @@ async def process( data = _utils.strip_markdown_fences(data) try: - output = self.validate(data, allow_partial, run_context.validation_context) + output = self.validate(data, allow_partial=allow_partial, validation_context=run_context.validation_context) except ValidationError as e: if wrap_validation_errors: m = _messages.RetryPromptPart( @@ -647,6 +647,7 @@ async def process( def validate( self, data: str | dict[str, Any] | None, + *, allow_partial: bool = False, validation_context: Any | None = None, ) -> dict[str, Any]: From cb8bb55aabcbd9f6d030e79d6c935ec983ccddbd Mon Sep 17 00:00:00 2001 From: NicolasPllr1 Date: Wed, 26 Nov 2025 07:54:15 +0100 Subject: [PATCH 13/15] feedback(docstrings): mention tool args --- pydantic_ai_slim/pydantic_ai/_run_context.py | 2 +- pydantic_ai_slim/pydantic_ai/agent/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_run_context.py b/pydantic_ai_slim/pydantic_ai/_run_context.py index 6acf05ba18..b605bd8b54 100644 --- a/pydantic_ai_slim/pydantic_ai/_run_context.py +++ b/pydantic_ai_slim/pydantic_ai/_run_context.py @@ -39,7 +39,7 @@ class RunContext(Generic[RunContextAgentDepsT]): messages: list[_messages.ModelMessage] = field(default_factory=list) """Messages exchanged in the conversation so far.""" validation_context: Any = None - """Pydantic [validation context](https://docs.pydantic.dev/latest/concepts/validators/#validation-context) for the run outputs.""" + """Pydantic [validation context](https://docs.pydantic.dev/latest/concepts/validators/#validation-context) for tool args and run outputs.""" tracer: Tracer = field(default_factory=NoOpTracer) """The tracer to use for tracing the run.""" trace_include_content: bool = False diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index b1ef5a71d0..c8208ac9e6 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -253,7 +253,7 @@ def __init__( model_settings: Optional model request settings to use for this agent's runs, by default. retries: The default number of retries to allow for tool calls and output validation, before raising an error. For model request retries, see the [HTTP Request Retries](../retries.md) documentation. - validation_context: Pydantic [validation context](https://docs.pydantic.dev/latest/concepts/validators/#validation-context) used to validate all outputs. + validation_context: Pydantic [validation context](https://docs.pydantic.dev/latest/concepts/validators/#validation-context) used to validate tool arguments and outputs. output_retries: The maximum number of retries to allow for output validation, defaults to `retries`. tools: Tools to register with the agent, you can also register tools via the decorators [`@agent.tool`][pydantic_ai.Agent.tool] and [`@agent.tool_plain`][pydantic_ai.Agent.tool_plain]. From da9e3e83fe9efd17a74c82e0090105698d9a20df Mon Sep 17 00:00:00 2001 From: NicolasPllr1 Date: Wed, 26 Nov 2025 07:51:48 +0100 Subject: [PATCH 14/15] feedback(doc): move 'validation context' below 'custom json' --- docs/output.md | 62 +++++++++++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/docs/output.md b/docs/output.md index 0b61334e1c..f6b1f8e745 100644 --- a/docs/output.md +++ b/docs/output.md @@ -385,6 +385,37 @@ print(repr(result.output)) _(This example is complete, it can be run "as is")_ +### Custom JSON schema {#structured-dict} + +If it's not feasible to define your desired structured output object using a Pydantic `BaseModel`, dataclass, or `TypedDict`, for example when you get a JSON schema from an external source or generate it dynamically, you can use the [`StructuredDict()`][pydantic_ai.output.StructuredDict] helper function to generate a `dict[str, Any]` subclass with a JSON schema attached that Pydantic AI will pass to the model. + +Note that Pydantic AI will not perform any validation of the received JSON object and it's up to the model to correctly interpret the schema and any constraints expressed in it, like required fields or integer value ranges. + +The output type will be a `dict[str, Any]` and it's up to your code to defensively read from it in case the model made a mistake. You can use an [output validator](#output-validator-functions) to reflect validation errors back to the model and get it to try again. + +Along with the JSON schema, you can optionally pass `name` and `description` arguments to provide additional context to the model: + +```python +from pydantic_ai import Agent, StructuredDict + +HumanDict = StructuredDict( + { + 'type': 'object', + 'properties': { + 'name': {'type': 'string'}, + 'age': {'type': 'integer'} + }, + 'required': ['name', 'age'] + }, + name='Human', + description='A human with a name and age', +) + +agent = Agent('openai:gpt-5', output_type=HumanDict) +result = agent.run_sync('Create a person') +#> {'name': 'John Doe', 'age': 30} +``` + ### Validation context {#validation-context} Some validation relies on an extra Pydantic [context](https://docs.pydantic.dev/latest/concepts/validators/#validation-context) object. You can pass such an object to an `Agent` at definition-time via its [`validation_context`][pydantic_ai.Agent.__init__] parameter. @@ -441,37 +472,6 @@ print(repr(result.output)) # 5 from the model + 10 from the validation context _(This example is complete, it can be run "as is")_ -### Custom JSON schema {#structured-dict} - -If it's not feasible to define your desired structured output object using a Pydantic `BaseModel`, dataclass, or `TypedDict`, for example when you get a JSON schema from an external source or generate it dynamically, you can use the [`StructuredDict()`][pydantic_ai.output.StructuredDict] helper function to generate a `dict[str, Any]` subclass with a JSON schema attached that Pydantic AI will pass to the model. - -Note that Pydantic AI will not perform any validation of the received JSON object and it's up to the model to correctly interpret the schema and any constraints expressed in it, like required fields or integer value ranges. - -The output type will be a `dict[str, Any]` and it's up to your code to defensively read from it in case the model made a mistake. You can use an [output validator](#output-validator-functions) to reflect validation errors back to the model and get it to try again. - -Along with the JSON schema, you can optionally pass `name` and `description` arguments to provide additional context to the model: - -```python -from pydantic_ai import Agent, StructuredDict - -HumanDict = StructuredDict( - { - 'type': 'object', - 'properties': { - 'name': {'type': 'string'}, - 'age': {'type': 'integer'} - }, - 'required': ['name', 'age'] - }, - name='Human', - description='A human with a name and age', -) - -agent = Agent('openai:gpt-5', output_type=HumanDict) -result = agent.run_sync('Create a person') -#> {'name': 'John Doe', 'age': 30} -``` - ### Output validators {#output-validator-functions} Some validation is inconvenient or impossible to do in Pydantic validators, in particular when the validation requires IO and is asynchronous. Pydantic AI provides a way to add validation functions via the [`agent.output_validator`][pydantic_ai.Agent.output_validator] decorator. From a01aea150eddbcce132a3d1f85b7137786691793 Mon Sep 17 00:00:00 2001 From: NicolasPllr1 Date: Wed, 26 Nov 2025 08:28:40 +0100 Subject: [PATCH 15/15] feedback(doc): validation context can influence tool args and output function args --- docs/output.md | 6 +++--- docs/tools-advanced.md | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/output.md b/docs/output.md index f6b1f8e745..d6d2627faa 100644 --- a/docs/output.md +++ b/docs/output.md @@ -121,7 +121,7 @@ Instead of plain text or structured data, you may want the output of your agent Output functions are similar to [function tools](tools.md), but the model is forced to call one of them, the call ends the agent run, and the result is not passed back to the model. -As with tool functions, output function arguments provided by the model are validated using Pydantic, they can optionally take [`RunContext`][pydantic_ai.tools.RunContext] as the first argument, and they can raise [`ModelRetry`][pydantic_ai.exceptions.ModelRetry] to ask the model to try again with modified arguments (or with a different output type). +As with tool functions, output function arguments provided by the model are validated using Pydantic which can be influenced by a [validation context](#validation-context), they can optionally take [`RunContext`][pydantic_ai.tools.RunContext] as the first argument, and they can raise [`ModelRetry`][pydantic_ai.exceptions.ModelRetry] to ask the model to try again with modified arguments (or with a different output type). To specify output functions, you set the agent's `output_type` to either a single function (or bound instance method), or a list of functions. The list can also contain other output types like simple scalars or entire Pydantic models. You typically do not want to also register your output function as a tool (using the `@agent.tool` decorator or `tools` argument), as this could confuse the model about which it should be calling. @@ -418,9 +418,9 @@ result = agent.run_sync('Create a person') ### Validation context {#validation-context} -Some validation relies on an extra Pydantic [context](https://docs.pydantic.dev/latest/concepts/validators/#validation-context) object. You can pass such an object to an `Agent` at definition-time via its [`validation_context`][pydantic_ai.Agent.__init__] parameter. +Some validation relies on an extra Pydantic [context](https://docs.pydantic.dev/latest/concepts/validators/#validation-context) object. You can pass such an object to an `Agent` at definition-time via its [`validation_context`][pydantic_ai.Agent.__init__] parameter. It will be used in the validation of both structured outputs and [tool arguments](tools-advanced.md#tool-retries). -This validation context is used for the validation of _all_ structured outputs. It can be either: +This validation context can be either: - the context object itself (`Any`), used as-is to validate outputs, or - a function that takes the [`RunContext`][pydantic_ai.tools.RunContext] and returns a context object (`Any`). This function will be called automatically before each validation, allowing you to build a dynamic validation context. diff --git a/docs/tools-advanced.md b/docs/tools-advanced.md index f01a243c11..6b919c2a52 100644 --- a/docs/tools-advanced.md +++ b/docs/tools-advanced.md @@ -353,7 +353,7 @@ If both per-tool `prepare` and agent-wide `prepare_tools` are used, the per-tool ## Tool Execution and Retries {#tool-retries} -When a tool is executed, its arguments (provided by the LLM) are first validated against the function's signature using Pydantic. If validation fails (e.g., due to incorrect types or missing required arguments), a `ValidationError` is raised, and the framework automatically generates a [`RetryPromptPart`][pydantic_ai.messages.RetryPromptPart] containing the validation details. This prompt is sent back to the LLM, informing it of the error and allowing it to correct the parameters and retry the tool call. +When a tool is executed, its arguments (provided by the LLM) are first validated against the function's signature using Pydantic. Note that the [validation context](output.md#validation-context) - if provided - will be used during this validation. If validation fails (e.g., due to incorrect types or missing required arguments), a `ValidationError` is raised, and the framework automatically generates a [`RetryPromptPart`][pydantic_ai.messages.RetryPromptPart] containing the validation details. This prompt is sent back to the LLM, informing it of the error and allowing it to correct the parameters and retry the tool call. Beyond automatic validation errors, the tool's own internal logic can also explicitly request a retry by raising the [`ModelRetry`][pydantic_ai.exceptions.ModelRetry] exception. This is useful for situations where the parameters were technically valid, but an issue occurred during execution (like a transient network error, or the tool determining the initial attempt needs modification).