Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ client = instructor.from_openai(OpenAI())
agent = AtomicAgent[BasicChatInputSchema, CustomOutputSchema](
config=AgentConfig(
client=client,
model="gpt-4o-mini",
model="gpt-5-mini",
system_prompt_generator=system_prompt_generator,
history=ChatHistory(),
)
Expand Down Expand Up @@ -219,7 +219,7 @@ class QueryAgentInputSchema(BaseIOSchema):
query_agent = AtomicAgent[QueryAgentInputSchema, SearXNGSearchTool.input_schema](
config=AgentConfig(
client=instructor.from_openai(openai.OpenAI()),
model="gpt-4o-mini",
model="gpt-5-mini",
system_prompt_generator=SystemPromptGenerator(
background=[
"You are an intelligent query generation expert.",
Expand Down
24 changes: 12 additions & 12 deletions UPGRADE_DOC.md
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ client = instructor.from_openai(OpenAI())
agent = BaseAgent(
BaseAgentConfig(
client=client,
model="gpt-4o-mini",
model="gpt-5-mini",
memory=AgentMemory()
# No schema parameters = uses BaseAgentInputSchema and BaseAgentOutputSchema
)
Expand All @@ -192,7 +192,7 @@ agent = BaseAgent(
agent = BaseAgent(
BaseAgentConfig(
client=client,
model="gpt-4o-mini",
model="gpt-5-mini",
memory=AgentMemory(),
input_schema=CustomInputSchema, # Passed in config
output_schema=CustomOutputSchema # Passed in config
Expand Down Expand Up @@ -226,7 +226,7 @@ client = instructor.from_openai(OpenAI())
agent = AtomicAgent[BasicChatInputSchema, BasicChatOutputSchema](
AgentConfig(
client=client,
model="gpt-4o-mini",
model="gpt-5-mini",
history=ChatHistory()
)
)
Expand All @@ -235,7 +235,7 @@ agent = AtomicAgent[BasicChatInputSchema, BasicChatOutputSchema](
agent = AtomicAgent[CustomInputSchema, CustomOutputSchema](
AgentConfig(
client=client,
model="gpt-4o-mini",
model="gpt-5-mini",
history=ChatHistory()
# No schema parameters in config!
)
Expand All @@ -262,7 +262,7 @@ agent = AtomicAgent[CustomInputSchema, CustomOutputSchema](
agent = BaseAgent(
BaseAgentConfig(
client=client,
model="gpt-4o-mini",
model="gpt-5-mini",
memory=AgentMemory(),
input_schema=TranslationInput, # Was here
output_schema=TranslationOutput # Was here
Expand All @@ -273,7 +273,7 @@ agent = AtomicAgent[CustomInputSchema, CustomOutputSchema](
agent = AtomicAgent[TranslationInput, TranslationOutput](
AgentConfig(
client=client,
model="gpt-4o-mini",
model="gpt-5-mini",
history=ChatHistory() # Note: memory → history
# Schemas no longer in config!
)
Expand All @@ -285,7 +285,7 @@ agent = AtomicAgent[CustomInputSchema, CustomOutputSchema](
# OLD - Direct parameters
config = BaseAgentConfig(
client=client,
model="gpt-4o-mini",
model="gpt-5-mini",
memory=AgentMemory(), # Old field name
temperature=0.7, # Direct parameter
max_tokens=1000 # Direct parameter
Expand All @@ -294,7 +294,7 @@ agent = AtomicAgent[CustomInputSchema, CustomOutputSchema](
# NEW - Grouped parameters
config = AgentConfig(
client=client,
model="gpt-4o-mini",
model="gpt-5-mini",
history=ChatHistory(), # New field name
model_api_parameters={ # Temperature and max_tokens moved here
"temperature": 0.7,
Expand Down Expand Up @@ -347,7 +347,7 @@ from atomic_agents.context import ChatHistory

client = instructor.from_openai(OpenAI())
agent = AtomicAgent[BasicChatInputSchema, BasicChatOutputSchema](
AgentConfig(client=client, model="gpt-4o-mini", history=ChatHistory())
AgentConfig(client=client, model="gpt-5-mini", history=ChatHistory())
)

# Non-streaming (same as v1.x) - Wait for complete response
Expand All @@ -372,7 +372,7 @@ import asyncio

client = instructor.from_openai(AsyncOpenAI())
agent = AtomicAgent[BasicChatInputSchema, BasicChatOutputSchema](
AgentConfig(client=client, model="gpt-4o-mini", history=ChatHistory())
AgentConfig(client=client, model="gpt-5-mini", history=ChatHistory())
)

# OLD (v1.x) - run_async was a streaming generator
Expand Down Expand Up @@ -605,7 +605,7 @@ system_prompt = SystemPromptGenerator(
agent = BaseAgent(
BaseAgentConfig(
client=client,
model="gpt-4o-mini",
model="gpt-5-mini",
memory=memory,
system_prompt_generator=system_prompt,
input_schema=SupportTicketInput,
Expand Down Expand Up @@ -663,7 +663,7 @@ system_prompt = SystemPromptGenerator(
agent = AtomicAgent[SupportTicketInput, SupportTicketOutput]( # Schemas as type parameters
AgentConfig(
client=client,
model="gpt-4o-mini",
model="gpt-5-mini",
history=history, # Changed from memory
system_prompt_generator=system_prompt,
# No input_schema or output_schema in config
Expand Down
2 changes: 1 addition & 1 deletion agents.md
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ agent = AtomicAgent(
system_prompt="You are a helpful assistant. Provide accurate answers.",
input_schema=InputSchema,
output_schema=OutputSchema,
model="gpt-4o-mini",
model="gpt-5-mini",
)

# Run agent
Expand Down
6 changes: 3 additions & 3 deletions atomic-agents/atomic_agents/agents/atomic_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ class BasicChatOutputSchema(BaseIOSchema):

class AgentConfig(BaseModel):
client: instructor.client.Instructor = Field(..., description="Client for interacting with the language model.")
model: str = Field(default="gpt-4o-mini", description="The model to use for generating responses.")
model: str = Field(default="gpt-5-mini", description="The model to use for generating responses.")
history: Optional[ChatHistory] = Field(default=None, description="History component for storing chat history.")
system_prompt_generator: Optional[SystemPromptGenerator] = Field(
default=None, description="Component for generating system prompts."
Expand Down Expand Up @@ -579,11 +579,11 @@ async def chat_loop(streaming: bool = False):
"""
if streaming:
client = instructor.from_openai(AsyncOpenAI())
config = AgentConfig(client=client, model="gpt-4o-mini")
config = AgentConfig(client=client, model="gpt-5-mini")
agent = AtomicAgent[BasicChatInputSchema, BasicChatOutputSchema](config)
else:
client = instructor.from_openai(OpenAI())
config = AgentConfig(client=client, model="gpt-4o-mini")
config = AgentConfig(client=client, model="gpt-5-mini")
agent = AtomicAgent[BasicChatInputSchema, BasicChatOutputSchema](config)

# Display agent information before starting the chat
Expand Down
32 changes: 16 additions & 16 deletions atomic-agents/tests/agents/test_atomic_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def mock_system_prompt_generator():
def agent_config(mock_instructor, mock_history, mock_system_prompt_generator):
return AgentConfig(
client=mock_instructor,
model="gpt-4o-mini",
model="gpt-5-mini",
history=mock_history,
system_prompt_generator=mock_system_prompt_generator,
)
Expand All @@ -91,7 +91,7 @@ def agent(agent_config):
def agent_config_async(mock_instructor_async, mock_history, mock_system_prompt_generator):
return AgentConfig(
client=mock_instructor_async,
model="gpt-4o-mini",
model="gpt-5-mini",
history=mock_history,
system_prompt_generator=mock_system_prompt_generator,
)
Expand All @@ -104,7 +104,7 @@ def agent_async(agent_config_async):

def test_initialization(agent, mock_instructor, mock_history, mock_system_prompt_generator):
assert agent.client == mock_instructor
assert agent.model == "gpt-4o-mini"
assert agent.model == "gpt-5-mini"
assert agent.history == mock_history
assert agent.system_prompt_generator == mock_system_prompt_generator
assert "max_tokens" not in agent.model_api_parameters
Expand All @@ -114,7 +114,7 @@ def test_initialization(agent, mock_instructor, mock_history, mock_system_prompt
def test_initialization_temperature_priority(mock_instructor, mock_history, mock_system_prompt_generator):
config = AgentConfig(
client=mock_instructor,
model="gpt-4o-mini",
model="gpt-5-mini",
history=mock_history,
system_prompt_generator=mock_system_prompt_generator,
model_api_parameters={"temperature": 1.0},
Expand All @@ -126,7 +126,7 @@ def test_initialization_temperature_priority(mock_instructor, mock_history, mock
def test_initialization_without_temperature(mock_instructor, mock_history, mock_system_prompt_generator):
config = AgentConfig(
client=mock_instructor,
model="gpt-4o-mini",
model="gpt-5-mini",
history=mock_history,
system_prompt_generator=mock_system_prompt_generator,
model_api_parameters={"temperature": 0.5},
Expand All @@ -138,7 +138,7 @@ def test_initialization_without_temperature(mock_instructor, mock_history, mock_
def test_initialization_without_max_tokens(mock_instructor, mock_history, mock_system_prompt_generator):
config = AgentConfig(
client=mock_instructor,
model="gpt-4o-mini",
model="gpt-5-mini",
history=mock_history,
system_prompt_generator=mock_system_prompt_generator,
model_api_parameters={"max_tokens": 1024},
Expand All @@ -150,7 +150,7 @@ def test_initialization_without_max_tokens(mock_instructor, mock_history, mock_s
def test_initialization_system_role_equals_developer(mock_instructor, mock_history, mock_system_prompt_generator):
config = AgentConfig(
client=mock_instructor,
model="gpt-4o-mini",
model="gpt-5-mini",
history=mock_history,
system_prompt_generator=mock_system_prompt_generator,
system_role="developer",
Expand All @@ -164,7 +164,7 @@ def test_initialization_system_role_equals_developer(mock_instructor, mock_histo
def test_initialization_system_role_equals_None(mock_instructor, mock_history, mock_system_prompt_generator):
config = AgentConfig(
client=mock_instructor,
model="gpt-4o-mini",
model="gpt-5-mini",
history=mock_history,
system_prompt_generator=mock_system_prompt_generator,
system_role=None,
Expand Down Expand Up @@ -215,7 +215,7 @@ def test_unregister_context_provider(agent, mock_system_prompt_generator):
def test_no_type_parameters(mock_instructor):
custom_config = AgentConfig(
client=mock_instructor,
model="gpt-4o-mini",
model="gpt-5-mini",
)

custom_agent = AtomicAgent(custom_config)
Expand All @@ -233,7 +233,7 @@ class CustomOutputSchema(BaseModel):

custom_config = AgentConfig(
client=mock_instructor,
model="gpt-4o-mini",
model="gpt-5-mini",
)

custom_agent = AtomicAgent[CustomInputSchema, CustomOutputSchema](custom_config)
Expand All @@ -256,7 +256,7 @@ def __init__(self, extra_param: str):
self.extra_param = extra_param
config = AgentConfig(
client=mock_instructor,
model="gpt-4o-mini",
model="gpt-5-mini",
)
super().__init__(config)

Expand Down Expand Up @@ -319,7 +319,7 @@ def test_run_stream(mock_instructor, mock_history):
# Create a AgentConfig with system_role set to None
config = AgentConfig(
client=mock_instructor,
model="gpt-4o-mini",
model="gpt-5-mini",
history=mock_history,
system_prompt_generator=None, # No system prompt generator
)
Expand Down Expand Up @@ -457,7 +457,7 @@ def test_hook_registration_with_instructor_client(mock_instructor):
mock_instructor.off = Mock()
mock_instructor.clear = Mock()

config = AgentConfig(client=mock_instructor, model="gpt-4o-mini")
config = AgentConfig(client=mock_instructor, model="gpt-5-mini")
agent = AtomicAgent[BasicChatInputSchema, BasicChatOutputSchema](config)

def test_handler(error):
Expand Down Expand Up @@ -629,7 +629,7 @@ def test_agent_initialization_includes_hooks(mock_instructor, mock_history, mock
"""Test that agent initialization properly sets up hook system."""
config = AgentConfig(
client=mock_instructor,
model="gpt-4o-mini",
model="gpt-5-mini",
history=mock_history,
system_prompt_generator=mock_system_prompt_generator,
)
Expand Down Expand Up @@ -665,7 +665,7 @@ def test_backward_compatibility_no_breaking_changes(mock_instructor, mock_histor

config = AgentConfig(
client=mock_instructor,
model="gpt-4o-mini",
model="gpt-5-mini",
history=mock_history,
system_prompt_generator=mock_system_prompt_generator,
)
Expand All @@ -674,7 +674,7 @@ def test_backward_compatibility_no_breaking_changes(mock_instructor, mock_histor

# Test that all existing attributes still exist and work
assert agent.client == mock_instructor
assert agent.model == "gpt-4o-mini"
assert agent.model == "gpt-5-mini"
assert agent.history == mock_history
assert agent.system_prompt_generator == mock_system_prompt_generator

Expand Down
Loading