Skip to content
Open
Show file tree
Hide file tree
Changes from 37 commits
Commits
Show all changes
82 commits
Select commit Hold shift + click to select a range
4376b96
✨ Add support for OpenAI and Gemini File Search Tools
gorkachea Nov 10, 2025
6cec96f
Fix type checking and formatting issues
gorkachea Nov 11, 2025
4c3fe56
Merge branch 'main' into add-file-search-tools-support
gorkachea Nov 11, 2025
3c8decf
docs: Remove runnable markers from FileSearchTool examples
gorkachea Nov 11, 2025
2343679
Skip tests for file_search documentation examples
gorkachea Nov 11, 2025
666a1bb
Add unit tests for FileSearchTool to improve coverage
gorkachea Nov 11, 2025
7365e20
Update FileSearchTool tests with comprehensive mocking
gorkachea Nov 11, 2025
2ee21c9
Add pragma: no cover to FileSearchTool API-dependent code paths
gorkachea Nov 11, 2025
deef1ec
Remove problematic FileSearchTool tests that access private members
gorkachea Nov 11, 2025
18b4b86
Fix end-of-file formatting
gorkachea Nov 11, 2025
11654ed
Add pragma: no cover to remaining FileSearchTool helper function
gorkachea Nov 11, 2025
1542f5c
Apply ruff formatting
gorkachea Nov 11, 2025
7d683b7
Add pragma: no cover to FileSearchTool status handling line
gorkachea Nov 11, 2025
d8ef07d
Remove incorrect pragma: no cover from anthropic.py line 460
gorkachea Nov 11, 2025
6acbd76
docs: address PR feedback for FileSearchTool documentation
gorkachea Nov 12, 2025
380e25c
clean up FileSearchTool comments
gorkachea Nov 12, 2025
c83f125
remove pragma: no cover from FileSearchTool code
gorkachea Nov 12, 2025
8eba82d
use file_search_store_names for Google file search
gorkachea Nov 12, 2025
b3a8930
fix OpenAI file search to use queries and results fields
gorkachea Nov 12, 2025
19f32f9
add builtin tool call/return parts for Google file search
gorkachea Nov 13, 2025
00ea1ed
Implement FileSearchDict for Google file search and enhance tests
gorkachea Nov 13, 2025
c6ed56c
add unit tests for FileSearchTool parsing logic
gorkachea Nov 13, 2025
9b5bb54
Merge branch 'main' into add-file-search-tools-support
gorkachea Nov 13, 2025
c2765ac
upgrade google-genai SDK to v1.49.0 with file_search support
gorkachea Nov 13, 2025
8286cd7
add integration tests for FileSearchTool
gorkachea Nov 13, 2025
3011e05
add VCR decorators to FileSearchTool integration tests
gorkachea Nov 13, 2025
bc278e8
fix Google FileSearchTool SDK parameters and add VCR decorators
gorkachea Nov 14, 2025
5f694c9
fix type errors in FileSearchTool integration tests
gorkachea Nov 14, 2025
8dc7c17
Regenerate uv.lock with uv 0.9.9 to reduce diff size
gorkachea Nov 15, 2025
8216f31
Remove unit tests for Google FileSearchTool parsing
gorkachea Nov 15, 2025
ffcb21f
Remove unit tests for OpenAI FileSearchTool parsing
gorkachea Nov 15, 2025
bc3ac7a
Refactor Google FileSearchTool tests to match built-in tool pattern
gorkachea Nov 15, 2025
977ab53
Refactor OpenAI FileSearchTool tests to match built-in tool pattern
gorkachea Nov 15, 2025
68bafb6
Merge main into add-file-search-tools-support
gorkachea Nov 15, 2025
29f8da0
Merge main into add-file-search-tools-support
gorkachea Nov 15, 2025
eef4526
Add cassettes
DouweM Nov 20, 2025
8cc3d60
Merge branch 'main' into add-file-search-tools-support
DouweM Nov 20, 2025
b77d857
Rename vector_store_ids to file_store_ids and add Vertex AI to provid…
gorkachea Nov 22, 2025
db475c6
Add file upload examples for OpenAI and Google FileSearchTool
gorkachea Nov 23, 2025
fd62e29
Add openai_include_file_search_results setting
gorkachea Nov 23, 2025
13abf31
Fix Google non-streaming file search to extract retrievedContext
gorkachea Nov 23, 2025
129dacd
Fix Google streaming file search parsing
gorkachea Nov 23, 2025
8d3f359
Merge latest from main
gorkachea Nov 23, 2025
065c711
Revert boto3 version bump to match main
gorkachea Nov 23, 2025
50ad873
Fix CI typecheck and formatting errors
gorkachea Nov 23, 2025
ff22a6d
Fix FileSearchToolParam typecheck error with cast
gorkachea Nov 23, 2025
1f249aa
Fix FileSearchTool documentation examples and update test snapshots
gorkachea Nov 23, 2025
c7798d1
Fix deprecated event types in FileSearchTool test snapshots
gorkachea Nov 23, 2025
9696c01
Fix import order in FileSearchTool documentation examples
gorkachea Nov 23, 2025
2fe8791
Fix import order: put submodule imports before top-level imports
gorkachea Nov 23, 2025
1c0589b
Revert import order to match working example pattern
gorkachea Nov 23, 2025
122be13
Add workaround for pydantic_ai import sorting issue in test framework
gorkachea Nov 23, 2025
925a909
Suppress complexity warning for test_docs_examples function
gorkachea Nov 23, 2025
5cc9fb0
Add pyright ignore for private usage in tests
gorkachea Nov 23, 2025
8d6a6e7
Fix test failures: wrap mock responses in lists for multiple API calls
gorkachea Nov 24, 2025
68738fd
Fix test failures: add allow_model_requests fixture to mock tests
gorkachea Nov 24, 2025
99af405
Fix coverage: add tests for missing openai.py branches and cleanup code
gorkachea Nov 24, 2025
0c7c582
Refactor cleanup code into helper functions for 100% coverage
gorkachea Nov 24, 2025
b2d95e5
docs: Fix OpenAI built-in tools documentation
gorkachea Nov 27, 2025
1f6f4c9
refactor: Change file_store_ids from list to set
gorkachea Nov 27, 2025
810fc25
refactor: Move file search grounding metadata handling out of loop
gorkachea Nov 27, 2025
7610c5a
refactor: Check file search tool inclusion and move executableCode ha…
gorkachea Nov 27, 2025
10960b6
refactor: Extract duplicated retrieved_contexts extraction into helpe…
gorkachea Nov 27, 2025
74f8078
test: Remove unnecessary cleanup helper tests
gorkachea Nov 27, 2025
2faa581
Revert "test: Remove unnecessary cleanup helper tests"
gorkachea Nov 27, 2025
a5402fc
fix: Improve file_search query extraction regex
gorkachea Nov 27, 2025
8c8c949
refactor: Remove unnecessary comments from regex code
gorkachea Nov 27, 2025
057b0dd
refactor: Drop dict wrapper for file_search content
gorkachea Nov 27, 2025
fe1cde9
refactor: Exclude None fields from file_search retrieved_contexts
gorkachea Nov 28, 2025
5984c0d
feat: Add file_search_store field to Google file search retrieved con…
gorkachea Nov 28, 2025
17eb139
fix: Prevent duplicate BuiltinToolCallPart and BuiltinToolReturnPart …
gorkachea Nov 28, 2025
3f757d6
test: Remove unclear file search tests
gorkachea Nov 28, 2025
188085d
test: Remove unnecessary cleanup helper tests
gorkachea Nov 28, 2025
9e1c707
test: Remove unnecessary file search tests in test_openai_responses.py
gorkachea Nov 28, 2025
2d86b0d
test: Add snapshot test for file search tool call mapping
gorkachea Nov 28, 2025
2c3fcd5
Merge main into add-file-search-tools-support
gorkachea Nov 28, 2025
1c8ccc6
refactor: Remove unnecessary comment in file search code
gorkachea Nov 28, 2025
fa338fc
fix: Add encoding parameter to tempfile.NamedTemporaryFile calls
gorkachea Nov 28, 2025
87caa1d
fix: Update snapshots and remove unnecessary comments
gorkachea Nov 28, 2025
b199b8b
Skip computer_use example test in documentation
gorkachea Nov 28, 2025
250b3df
Remove unnecessary I001 ignore for pydantic_ai imports
gorkachea Nov 28, 2025
608b977
Re-record cassettes
DouweM Nov 28, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
60 changes: 60 additions & 0 deletions docs/builtin-tools.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ Pydantic AI supports the following built-in tools:
- **[`UrlContextTool`][pydantic_ai.builtin_tools.UrlContextTool]**: Enables agents to pull URL contents into their context
- **[`MemoryTool`][pydantic_ai.builtin_tools.MemoryTool]**: Enables agents to use memory
- **[`MCPServerTool`][pydantic_ai.builtin_tools.MCPServerTool]**: Enables agents to use remote MCP servers with communication handled by the model provider
- **[`FileSearchTool`][pydantic_ai.builtin_tools.FileSearchTool]**: Enables agents to search through uploaded files using vector search (RAG)

These tools are passed to the agent via the `builtin_tools` parameter and are executed by the model provider's infrastructure.

Expand Down Expand Up @@ -566,6 +567,65 @@ _(This example is complete, it can be run "as is")_
| `description` | ✅ | ❌ |
| `headers` | ✅ | ❌ |

## File Search Tool

The [`FileSearchTool`][pydantic_ai.builtin_tools.FileSearchTool] enables your agent to search through uploaded files using vector search, providing a fully managed Retrieval-Augmented Generation (RAG) system. This tool handles file storage, chunking, embedding generation, and context injection into prompts.

### Provider Support

| Provider | Supported | Notes |
|----------|-----------|-------|
| OpenAI Responses | ✅ | Full feature support. Requires files to be uploaded to vector stores via the [OpenAI Files API](https://platform.openai.com/docs/api-reference/files). Vector stores must be created and file IDs added before using the tool. |
| Google (Gemini) | ✅ | Requires files to be uploaded via the [Gemini Files API](https://ai.google.dev/gemini-api/docs/files). Files are automatically deleted after 48 hours. Supports up to 2 GB per file and 20 GB per project. Using built-in tools and function tools (including [output tools](output.md#tool-output)) at the same time is not supported; to use structured output, use [`PromptedOutput`](output.md#prompted-output) instead. |
| Anthropic | ❌ | Not supported |
| Groq | ❌ | Not supported |
| OpenAI Chat Completions | ❌ | Not supported |
| Bedrock | ❌ | Not supported |
| Mistral | ❌ | Not supported |
| Cohere | ❌ | Not supported |
| HuggingFace | ❌ | Not supported |
| Outlines | ❌ | Not supported |

### Usage

#### OpenAI Responses

With OpenAI, you need to first [upload files to a vector store](https://platform.openai.com/docs/assistants/tools/file-search), then reference the vector store IDs when using the `FileSearchTool`:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It would be awesome if you could have example code for the upload step as well, using the OpenAIResponsesModel.client

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added! The example now shows the complete workflow using model.client


```py {title="file_search_openai.py" test="skip"}
from pydantic_ai import Agent, FileSearchTool

agent = Agent(
'openai-responses:gpt-5',
builtin_tools=[FileSearchTool(vector_store_ids=['vs_abc123'])] # (1)
)

result = agent.run_sync('What information is in my documents about pydantic?')
print(result.output)
#> Based on your documents, Pydantic is a data validation library for Python...
```

1. Replace `vs_abc123` with your actual vector store ID from the OpenAI API.

#### Google (Gemini)

With Gemini, you need to first [create a file search store via the Files API](https://ai.google.dev/gemini-api/docs/files), then reference the file search store names:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same as up.

FYI Another Google file upload example is being created in #3492

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added a complete example using model.client.aio.file_search_stores.


```py {title="file_search_google.py" test="skip"}
from pydantic_ai import Agent, FileSearchTool

agent = Agent(
'google-gla:gemini-2.5-flash',
builtin_tools=[FileSearchTool(vector_store_ids=['files/abc123'])] # (1)
)

result = agent.run_sync('Summarize the key points from my uploaded documents.')
print(result.output)
#> The documents discuss the following key points: ...
```

1. Replace `files/abc123` with your actual file search store name from the Gemini Files API.

## API Reference

For complete API documentation, see the [API Reference](api/builtin_tools.md).
10 changes: 6 additions & 4 deletions pydantic_ai_slim/pydantic_ai/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
)
from .builtin_tools import (
CodeExecutionTool,
FileSearchTool,
ImageGenerationTool,
MCPServerTool,
MemoryTool,
Expand Down Expand Up @@ -212,13 +213,14 @@
'ToolsetTool',
'WrapperToolset',
# builtin_tools
'WebSearchTool',
'WebSearchUserLocation',
'UrlContextTool',
'CodeExecutionTool',
'FileSearchTool',
'ImageGenerationTool',
'MemoryTool',
'MCPServerTool',
'MemoryTool',
'UrlContextTool',
'WebSearchTool',
'WebSearchUserLocation',
# output
'ToolOutput',
'NativeOutput',
Expand Down
25 changes: 25 additions & 0 deletions pydantic_ai_slim/pydantic_ai/builtin_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
'ImageGenerationTool',
'MemoryTool',
'MCPServerTool',
'FileSearchTool',
)

_BUILTIN_TOOL_TYPES: dict[str, type[AbstractBuiltinTool]] = {}
Expand Down Expand Up @@ -334,6 +335,30 @@ def unique_id(self) -> str:
return ':'.join([self.kind, self.id])


@dataclass(kw_only=True)
class FileSearchTool(AbstractBuiltinTool):
"""A builtin tool that allows your agent to search through uploaded files using vector search.

This tool provides a fully managed Retrieval-Augmented Generation (RAG) system that handles
file storage, chunking, embedding generation, and context injection into prompts.

Supported by:

* OpenAI Responses
* Google (Gemini)
"""

vector_store_ids: list[str]
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's make this file_store_ids; I like Google's more generic naming better than OpenAI's

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Renamed to file_store_ids throughout the codebase. Thanks for the suggestion on using Google's more generic naming!

"""List of vector store IDs to search through.

For OpenAI, these are the IDs of vector stores created via the OpenAI API.
For Google, these are file resource names that have been uploaded and processed.
"""

kind: str = 'file_search'
"""The kind of tool."""


def _tool_discriminator(tool_data: dict[str, Any] | AbstractBuiltinTool) -> str:
if isinstance(tool_data, dict):
return tool_data.get('kind', AbstractBuiltinTool.kind)
Expand Down
2 changes: 1 addition & 1 deletion pydantic_ai_slim/pydantic_ai/models/anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -529,7 +529,7 @@ def _add_builtin_tools(
mcp_server_url_definition_param['authorization_token'] = tool.authorization_token
mcp_servers.append(mcp_server_url_definition_param)
beta_features.append('mcp-client-2025-04-04')
else: # pragma: no cover
else:
raise UserError(
f'`{tool.__class__.__name__}` is not supported by `AnthropicModel`. If it should be, please file an issue.'
)
Expand Down
41 changes: 40 additions & 1 deletion pydantic_ai_slim/pydantic_ai/models/google.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from .. import UnexpectedModelBehavior, _utils, usage
from .._output import OutputObjectDefinition
from .._run_context import RunContext
from ..builtin_tools import CodeExecutionTool, ImageGenerationTool, UrlContextTool, WebSearchTool
from ..builtin_tools import CodeExecutionTool, FileSearchTool, ImageGenerationTool, UrlContextTool, WebSearchTool
from ..exceptions import ModelHTTPError, UserError
from ..messages import (
BinaryContent,
Expand Down Expand Up @@ -63,6 +63,7 @@
ExecutableCode,
ExecutableCodeDict,
FileDataDict,
FileSearchDict,
FinishReason as GoogleFinishReason,
FunctionCallDict,
FunctionCallingConfigDict,
Expand Down Expand Up @@ -93,6 +94,7 @@
'you can use the `google` optional group — `pip install "pydantic-ai-slim[google]"`'
) from _import_error


LatestGoogleModelNames = Literal[
'gemini-flash-latest',
'gemini-flash-lite-latest',
Expand Down Expand Up @@ -350,6 +352,9 @@ def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[T
tools.append(ToolDict(url_context=UrlContextDict()))
elif isinstance(tool, CodeExecutionTool):
tools.append(ToolDict(code_execution=ToolCodeExecutionDict()))
elif isinstance(tool, FileSearchTool):
file_search_config = FileSearchDict(file_search_store_names=tool.vector_store_ids)
tools.append(ToolDict(file_search=file_search_config))
elif isinstance(tool, ImageGenerationTool): # pragma: no branch
if not self.profile.supports_image_output:
raise UserError(
Expand Down Expand Up @@ -844,6 +849,11 @@ def _process_response_from_parts(
items.append(web_search_call)
items.append(web_search_return)

file_search_call, file_search_return = _map_file_search_grounding_metadata(grounding_metadata, provider_name)
if file_search_call and file_search_return:
items.append(file_search_call)
items.append(file_search_return)

item: ModelResponsePart | None = None
code_execution_tool_call_id: str | None = None
for part in parts:
Expand Down Expand Up @@ -991,3 +1001,32 @@ def _map_grounding_metadata(
)
else:
return None, None


def _map_file_search_grounding_metadata(
grounding_metadata: GroundingMetadata | None, provider_name: str
) -> tuple[BuiltinToolCallPart, BuiltinToolReturnPart] | tuple[None, None]:
if grounding_metadata and (retrieval_queries := grounding_metadata.retrieval_queries):
tool_call_id = _utils.generate_tool_call_id()
return (
BuiltinToolCallPart(
provider_name=provider_name,
tool_name=FileSearchTool.kind,
tool_call_id=tool_call_id,
args={'queries': retrieval_queries},
),
BuiltinToolReturnPart(
provider_name=provider_name,
tool_name=FileSearchTool.kind,
tool_call_id=tool_call_id,
content=[
chunk.retrieved_context.model_dump(mode='json')
for chunk in grounding_chunks
if chunk.retrieved_context
]
if (grounding_chunks := grounding_metadata.grounding_chunks)
else None,
),
)
else:
return None, None
91 changes: 87 additions & 4 deletions pydantic_ai_slim/pydantic_ai/models/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
from .._run_context import RunContext
from .._thinking_part import split_content_into_text_and_thinking
from .._utils import guard_tool_call_id as _guard_tool_call_id, now_utc as _now_utc, number_to_datetime
from ..builtin_tools import CodeExecutionTool, ImageGenerationTool, MCPServerTool, WebSearchTool
from ..builtin_tools import CodeExecutionTool, FileSearchTool, ImageGenerationTool, MCPServerTool, WebSearchTool
from ..exceptions import UserError
from ..messages import (
AudioUrl,
Expand Down Expand Up @@ -1091,9 +1091,10 @@ def _process_response( # noqa: C901
elif isinstance(item, responses.response_output_item.LocalShellCall): # pragma: no cover
# Pydantic AI doesn't yet support the `codex-mini-latest` LocalShell built-in tool
pass
elif isinstance(item, responses.ResponseFileSearchToolCall): # pragma: no cover
# Pydantic AI doesn't yet support the FileSearch built-in tool
pass
elif isinstance(item, responses.ResponseFileSearchToolCall):
call_part, return_part = _map_file_search_tool_call(item, self.system)
items.append(call_part)
items.append(return_part)
elif isinstance(item, responses.response_output_item.McpCall):
call_part, return_part = _map_mcp_call(item, self.system)
items.append(call_part)
Expand Down Expand Up @@ -1288,6 +1289,11 @@ def _get_builtin_tools(self, model_request_parameters: ModelRequestParameters) -
type='approximate', **tool.user_location
)
tools.append(web_search_tool)
elif isinstance(tool, FileSearchTool):
file_search_tool = responses.FileSearchToolParam(
type='file_search', vector_store_ids=tool.vector_store_ids
)
tools.append(file_search_tool)
elif isinstance(tool, CodeExecutionTool):
has_image_generating_tool = True
tools.append({'type': 'code_interpreter', 'container': {'type': 'auto'}})
Expand Down Expand Up @@ -1425,6 +1431,7 @@ async def _map_messages( # noqa: C901
message_item: responses.ResponseOutputMessageParam | None = None
reasoning_item: responses.ResponseReasoningItemParam | None = None
web_search_item: responses.ResponseFunctionWebSearchParam | None = None
file_search_item: responses.ResponseFileSearchToolCallParam | None = None
code_interpreter_item: responses.ResponseCodeInterpreterToolCallParam | None = None
for item in message.parts:
if isinstance(item, TextPart):
Expand Down Expand Up @@ -1494,6 +1501,21 @@ async def _map_messages( # noqa: C901
type='web_search_call',
)
openai_messages.append(web_search_item)
elif (
item.tool_name == FileSearchTool.kind
and item.tool_call_id
and (args := item.args_as_dict())
):
file_search_item = cast(
responses.ResponseFileSearchToolCallParam,
{
'id': item.tool_call_id,
'queries': args.get('queries', []),
'status': 'completed',
'type': 'file_search_call',
},
)
openai_messages.append(file_search_item)
elif item.tool_name == ImageGenerationTool.kind and item.tool_call_id:
# The cast is necessary because of https://github.com/openai/openai-python/issues/2648
image_generation_item = cast(
Expand Down Expand Up @@ -1553,6 +1575,14 @@ async def _map_messages( # noqa: C901
and (status := content.get('status'))
):
web_search_item['status'] = status
elif (
item.tool_name == FileSearchTool.kind
and file_search_item is not None
and isinstance(item.content, dict) # pyright: ignore[reportUnknownMemberType]
and (content := cast(dict[str, Any], item.content)) # pyright: ignore[reportUnknownMemberType]
and (status := content.get('status'))
):
file_search_item['status'] = status
elif item.tool_name == ImageGenerationTool.kind:
# Image generation result does not need to be sent back, just the `id` off of `BuiltinToolCallPart`.
pass
Expand Down Expand Up @@ -1869,6 +1899,11 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
yield self._parts_manager.handle_part(
vendor_part_id=f'{chunk.item.id}-call', part=replace(call_part, args=None)
)
elif isinstance(chunk.item, responses.ResponseFileSearchToolCall):
call_part, _ = _map_file_search_tool_call(chunk.item, self.provider_name)
yield self._parts_manager.handle_part(
vendor_part_id=f'{chunk.item.id}-call', part=replace(call_part, args=None)
)
elif isinstance(chunk.item, responses.ResponseCodeInterpreterToolCall):
call_part, _, _ = _map_code_interpreter_tool_call(chunk.item, self.provider_name)

Expand Down Expand Up @@ -1937,6 +1972,17 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
elif isinstance(chunk.item, responses.ResponseFunctionWebSearch):
call_part, return_part = _map_web_search_tool_call(chunk.item, self.provider_name)

maybe_event = self._parts_manager.handle_tool_call_delta(
vendor_part_id=f'{chunk.item.id}-call',
args=call_part.args,
)
if maybe_event is not None: # pragma: no branch
yield maybe_event

yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item.id}-return', part=return_part)
elif isinstance(chunk.item, responses.ResponseFileSearchToolCall):
call_part, return_part = _map_file_search_tool_call(chunk.item, self.provider_name)

maybe_event = self._parts_manager.handle_tool_call_delta(
vendor_part_id=f'{chunk.item.id}-call',
args=call_part.args,
Expand Down Expand Up @@ -2086,6 +2132,15 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
elif isinstance(chunk, responses.ResponseMcpCallCompletedEvent):
pass # there's nothing we need to do here

elif isinstance(chunk, responses.ResponseFileSearchCallCompletedEvent):
pass # there's nothing we need to do here

elif isinstance(chunk, responses.ResponseFileSearchCallSearchingEvent):
pass # there's nothing we need to do here

elif isinstance(chunk, responses.ResponseFileSearchCallInProgressEvent):
pass # there's nothing we need to do here

else: # pragma: no cover
warnings.warn(
f'Handling of this event type is not yet implemented. Please report on our GitHub: {chunk}',
Expand Down Expand Up @@ -2240,6 +2295,34 @@ def _map_web_search_tool_call(
)


def _map_file_search_tool_call(
item: responses.ResponseFileSearchToolCall,
provider_name: str,
) -> tuple[BuiltinToolCallPart, BuiltinToolReturnPart]:
args = {'queries': item.queries}

result: dict[str, Any] = {
'status': item.status,
}
if item.results is not None:
result['results'] = [r.model_dump(mode='json') for r in item.results]

return (
BuiltinToolCallPart(
tool_name=FileSearchTool.kind,
tool_call_id=item.id,
args=args,
provider_name=provider_name,
),
BuiltinToolReturnPart(
tool_name=FileSearchTool.kind,
tool_call_id=item.id,
content=result,
provider_name=provider_name,
),
)


def _map_image_generation_tool_call(
item: responses.response_output_item.ImageGenerationCall, provider_name: str
) -> tuple[BuiltinToolCallPart, BuiltinToolReturnPart, FilePart | None]:
Expand Down
Loading
Loading