Skip to content

Commit bf5e75c

Browse files
authored
Merge branch 'main' into patch-3323
2 parents 2674863 + 5afc2d6 commit bf5e75c

File tree

3 files changed

+133
-1
lines changed

3 files changed

+133
-1
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ docs = [
126126
docs-upload = ["algoliasearch>=4.12.0", "pydantic>=2.10.1"]
127127

128128
[tool.hatch.build.targets.wheel]
129-
only-include = ["/README.md"]
129+
bypass-selection = true
130130

131131
[tool.hatch.build.targets.sdist]
132132
include = ["/README.md", "/Makefile", "/tests"]
Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
interactions:
2+
- request:
3+
headers:
4+
accept:
5+
- application/json
6+
accept-encoding:
7+
- gzip, deflate
8+
connection:
9+
- keep-alive
10+
content-length:
11+
- '180'
12+
content-type:
13+
- application/json
14+
host:
15+
- api.openai.com
16+
method: POST
17+
parsed_body:
18+
messages:
19+
- content: Where do you want to go today?
20+
role: assistant
21+
- content: Answer in 5 words only. Who is Tux?
22+
role: user
23+
model: gpt-4.1-mini
24+
stream: false
25+
uri: https://api.openai.com/v1/chat/completions
26+
response:
27+
headers:
28+
access-control-expose-headers:
29+
- X-Request-ID
30+
alt-svc:
31+
- h3=":443"; ma=86400
32+
connection:
33+
- keep-alive
34+
content-length:
35+
- '841'
36+
content-type:
37+
- application/json
38+
openai-organization:
39+
- deeplytalented
40+
openai-processing-ms:
41+
- '423'
42+
openai-project:
43+
- proj_1aziXuKoVAC897wPxnvH0q7Z
44+
openai-version:
45+
- '2020-10-01'
46+
strict-transport-security:
47+
- max-age=31536000; includeSubDomains; preload
48+
transfer-encoding:
49+
- chunked
50+
parsed_body:
51+
choices:
52+
- finish_reason: stop
53+
index: 0
54+
logprobs: null
55+
message:
56+
annotations: []
57+
content: Linux mascot, a penguin character.
58+
refusal: null
59+
role: assistant
60+
created: 1763805700
61+
id: chatcmpl-Ceeiy4ivEE0hcL1EX5ZfLuW5xNUXB
62+
model: gpt-4.1-mini-2025-04-14
63+
object: chat.completion
64+
service_tier: default
65+
system_fingerprint: fp_9766e549b2
66+
usage:
67+
completion_tokens: 8
68+
completion_tokens_details:
69+
accepted_prediction_tokens: 0
70+
audio_tokens: 0
71+
reasoning_tokens: 0
72+
rejected_prediction_tokens: 0
73+
prompt_tokens: 31
74+
prompt_tokens_details:
75+
audio_tokens: 0
76+
cached_tokens: 0
77+
total_tokens: 39
78+
status:
79+
code: 200
80+
message: OK
81+
version: 1

tests/models/test_openai.py

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1215,6 +1215,57 @@ async def get_capital(country: str) -> str:
12151215
assert result.output == snapshot('The capital of England is London.')
12161216

12171217

1218+
async def test_message_history_can_start_with_model_response(allow_model_requests: None, openai_api_key: str):
1219+
"""Test that an agent run with message_history starting with ModelResponse is executed correctly."""
1220+
1221+
openai_model = OpenAIChatModel('gpt-4.1-mini', provider=OpenAIProvider(api_key=openai_api_key))
1222+
1223+
message_history = [ModelResponse(parts=[TextPart('Where do you want to go today?')])]
1224+
1225+
agent = Agent(model=openai_model)
1226+
1227+
result = await agent.run('Answer in 5 words only. Who is Tux?', message_history=message_history)
1228+
1229+
assert result.output == snapshot('Linux mascot, a penguin character.')
1230+
assert result.all_messages() == snapshot(
1231+
[
1232+
ModelResponse(
1233+
parts=[TextPart(content='Where do you want to go today?')],
1234+
timestamp=IsDatetime(),
1235+
),
1236+
ModelRequest(
1237+
parts=[
1238+
UserPromptPart(
1239+
content='Answer in 5 words only. Who is Tux?',
1240+
timestamp=IsDatetime(),
1241+
)
1242+
],
1243+
run_id=IsStr(),
1244+
),
1245+
ModelResponse(
1246+
parts=[TextPart(content='Linux mascot, a penguin character.')],
1247+
usage=RequestUsage(
1248+
input_tokens=31,
1249+
output_tokens=8,
1250+
details={
1251+
'accepted_prediction_tokens': 0,
1252+
'audio_tokens': 0,
1253+
'reasoning_tokens': 0,
1254+
'rejected_prediction_tokens': 0,
1255+
},
1256+
),
1257+
model_name='gpt-4.1-mini-2025-04-14',
1258+
timestamp=IsDatetime(),
1259+
provider_name='openai',
1260+
provider_details={'finish_reason': 'stop'},
1261+
provider_response_id='chatcmpl-Ceeiy4ivEE0hcL1EX5ZfLuW5xNUXB',
1262+
finish_reason='stop',
1263+
run_id=IsStr(),
1264+
),
1265+
]
1266+
)
1267+
1268+
12181269
async def test_extra_headers(allow_model_requests: None, openai_api_key: str):
12191270
# This test doesn't do anything, it's just here to ensure that calls with `extra_headers` don't cause errors, including type.
12201271
m = OpenAIChatModel('gpt-4o', provider=OpenAIProvider(api_key=openai_api_key))

0 commit comments

Comments
 (0)