diff --git a/src/praisonai-agents/praisonaiagents/__init__.py b/src/praisonai-agents/praisonaiagents/__init__.py index 04330d6c..829610fd 100644 --- a/src/praisonai-agents/praisonaiagents/__init__.py +++ b/src/praisonai-agents/praisonaiagents/__init__.py @@ -28,6 +28,8 @@ from .agent.agent import Agent from .agent.image_agent import ImageAgent +from .agent.reasoning_agent import ReasoningAgent +from .agent.dual_brain_agent import DualBrainAgent from .agents.agents import PraisonAIAgents from .task.task import Task from .tools.tools import Tools @@ -39,6 +41,7 @@ from .memory.memory import Memory from .guardrails import GuardrailResult, LLMGuardrail from .agent.handoff import Handoff, handoff, handoff_filters, RECOMMENDED_PROMPT_PREFIX, prompt_with_handoff_instructions +from .reasoning import ReasoningConfig, ActionState, ReasoningStep, ReasoningTrace, ReasoningFlow, reason_step from .main import ( TaskOutput, ReflectionOutput, @@ -102,6 +105,8 @@ def disable_telemetry(): __all__ = [ 'Agent', 'ImageAgent', + 'ReasoningAgent', + 'DualBrainAgent', 'PraisonAIAgents', 'Agents', 'Tools', @@ -132,6 +137,12 @@ def disable_telemetry(): 'handoff_filters', 'RECOMMENDED_PROMPT_PREFIX', 'prompt_with_handoff_instructions', + 'ReasoningConfig', + 'ActionState', + 'ReasoningStep', + 'ReasoningTrace', + 'ReasoningFlow', + 'reason_step', 'get_telemetry', 'enable_telemetry', 'disable_telemetry', diff --git a/src/praisonai-agents/praisonaiagents/agent/__init__.py b/src/praisonai-agents/praisonaiagents/agent/__init__.py index b14ff51f..1a151838 100644 --- a/src/praisonai-agents/praisonaiagents/agent/__init__.py +++ b/src/praisonai-agents/praisonaiagents/agent/__init__.py @@ -3,5 +3,18 @@ from .image_agent import ImageAgent from .handoff import Handoff, handoff, handoff_filters, RECOMMENDED_PROMPT_PREFIX, prompt_with_handoff_instructions from .router_agent import RouterAgent +from .reasoning_agent import ReasoningAgent +from .dual_brain_agent import DualBrainAgent -__all__ = ['Agent', 'ImageAgent', 'Handoff', 'handoff', 'handoff_filters', 'RECOMMENDED_PROMPT_PREFIX', 'prompt_with_handoff_instructions', 'RouterAgent'] \ No newline at end of file +__all__ = [ + 'Agent', + 'ImageAgent', + 'Handoff', + 'handoff', + 'handoff_filters', + 'RECOMMENDED_PROMPT_PREFIX', + 'prompt_with_handoff_instructions', + 'RouterAgent', + 'ReasoningAgent', + 'DualBrainAgent' +] \ No newline at end of file diff --git a/src/praisonai-agents/praisonaiagents/agent/dual_brain_agent.py b/src/praisonai-agents/praisonaiagents/agent/dual_brain_agent.py new file mode 100644 index 00000000..4f6e4c94 --- /dev/null +++ b/src/praisonai-agents/praisonaiagents/agent/dual_brain_agent.py @@ -0,0 +1,373 @@ +""" +DualBrainAgent - An agent with separate LLMs for conversation and reasoning. + +This agent extends the base Agent class with dual LLM capabilities: +- Main LLM for conversational responses +- Separate reasoning LLM for analytical thinking +- Configurable models for different purposes +- Reasoning coordination between the two models +""" + +from typing import List, Optional, Any, Dict, Union, Literal, TYPE_CHECKING +from ..agent import Agent +from ..reasoning import ( + ReasoningConfig, + ReasoningTrace, + ReasoningStep, + ActionState, + ReasoningFlow +) +import time + +if TYPE_CHECKING: + from ..task.task import Task + + +class DualBrainAgent(Agent): + """ + Agent with separate LLMs for conversation and reasoning. + + This agent uses two different language models: + - Main LLM for conversational interaction and response generation + - Reasoning LLM for analytical thinking and problem decomposition + """ + + def __init__( + self, + name: Optional[str] = None, + role: Optional[str] = None, + goal: Optional[str] = None, + backstory: Optional[str] = None, + instructions: Optional[str] = None, + llm: Optional[Union[str, Any]] = None, + reasoning_llm: Optional[Union[str, Any]] = None, + reasoning: bool = True, + reasoning_config: Optional[Union[ReasoningConfig, Dict[str, Any]]] = None, + llm_config: Optional[Dict[str, Any]] = None, + **kwargs + ): + """ + Initialize a DualBrainAgent. + + Args: + name: Agent name + role: Agent role + goal: Agent goal + backstory: Agent backstory + instructions: Direct instructions + llm: Main conversational model (e.g., "gpt-4-turbo") + reasoning_llm: Analytical reasoning model (e.g., "o1-preview") + reasoning: Enable reasoning capabilities + reasoning_config: Reasoning configuration or dict + llm_config: Configuration for main LLM + **kwargs: Additional Agent parameters + """ + # Set up main LLM + if llm_config and isinstance(llm_config, dict): + main_llm = llm_config.get('model', llm) + # Apply LLM config parameters as needed + else: + main_llm = llm or "gpt-4o" + + # Force reasoning to be enabled and set reflect_llm + kwargs['reasoning_steps'] = True + kwargs['self_reflect'] = kwargs.get('self_reflect', True) + kwargs['reflect_llm'] = reasoning_llm or "o1-preview" + + super().__init__( + name=name, + role=role, + goal=goal, + backstory=backstory, + instructions=instructions, + llm=main_llm, + **kwargs + ) + + # Set up reasoning LLM + self.reasoning_llm = reasoning_llm or "o1-preview" + self.main_llm = main_llm + + # Initialize reasoning configuration + if isinstance(reasoning_config, dict): + self.reasoning_config = ReasoningConfig(**reasoning_config) + elif reasoning_config is None: + self.reasoning_config = ReasoningConfig() + else: + self.reasoning_config = reasoning_config + + # Store LLM configurations + self.llm_config = llm_config or {} + self.reasoning_llm_config = { + "model": self.reasoning_llm, + "temperature": 0.1, + "system_prompt": "You are a step-by-step analytical reasoner" + } + + # Update reasoning LLM config if provided in reasoning_config + if isinstance(reasoning_config, dict) and 'model' in reasoning_config: + self.reasoning_llm_config.update(reasoning_config) + + self.reasoning_trace: Optional[ReasoningTrace] = None + self.last_reasoning_steps: List[ReasoningStep] = [] + + # Update instructions to include dual-brain guidance + self._enhance_instructions_for_dual_brain() + + def _enhance_instructions_for_dual_brain(self): + """Enhance agent instructions with dual-brain guidance.""" + dual_brain_guidance = f""" + +DUAL-BRAIN INSTRUCTIONS: +- You have access to two specialized models: + * Main LLM ({self.main_llm}): For conversational responses and final output + * Reasoning LLM ({self.reasoning_llm}): For analytical reasoning and problem decomposition +- Use the reasoning LLM for complex analysis, then generate responses with the main LLM +- Coordinate between both models for optimal problem-solving +- Show the reasoning process from the analytical model in your final response + """ + + if self.instructions: + self.instructions += dual_brain_guidance + else: + base_instructions = f"You are {self.role or 'an assistant'}" + if self.goal: + base_instructions += f" with the goal: {self.goal}" + self.instructions = base_instructions + dual_brain_guidance + + def _reason_with_analytical_brain(self, problem: str) -> str: + """ + Use the reasoning LLM for analytical thinking. + + Args: + problem: Problem to analyze + + Returns: + Analytical reasoning result + """ + reasoning_prompt = f""" +You are an analytical reasoning specialist. Break down this problem step-by-step: + +Problem: {problem} + +Please provide: +1. Problem decomposition +2. Step-by-step analysis +3. Key insights and conclusions +4. Confidence assessment for each step +5. Reasoning strategy used + +Format your response clearly with numbered steps and confidence scores. + """ + + # Store original LLM temporarily + original_llm = self.llm + + try: + # Switch to reasoning LLM + self.llm = self.reasoning_llm + + # Use parent chat method with reasoning LLM + reasoning_result = super().chat(reasoning_prompt) + + return reasoning_result + + finally: + # Restore original LLM + self.llm = original_llm + + def _generate_response_with_main_brain( + self, + original_query: str, + reasoning_analysis: str + ) -> str: + """ + Use the main LLM to generate the final conversational response. + + Args: + original_query: Original user query + reasoning_analysis: Analysis from reasoning LLM + + Returns: + Final conversational response + """ + response_prompt = f""" +Based on the analytical reasoning provided, generate a clear and helpful response to the user's query. + +Original Query: {original_query} + +Analytical Reasoning: +{reasoning_analysis} + +Please provide a comprehensive response that: +1. Addresses the user's query directly +2. Incorporates insights from the analytical reasoning +3. Is clear and conversational +4. Shows confidence in the conclusions +5. Acknowledges any reasoning steps taken + +Format your response naturally while incorporating the analytical insights. + """ + + # Use main LLM for response generation + return super().chat(response_prompt) + + def chat( + self, + message: str, + **kwargs + ) -> str: + """ + Enhanced chat method using dual-brain approach. + + Args: + message: Input message + **kwargs: Additional chat parameters + + Returns: + Response coordinated between both LLMs + """ + # Start reasoning trace + self.reasoning_trace = ReasoningTrace(problem=message, started_at=time.time()) + + # Step 1: Use reasoning LLM for analysis + reasoning_analysis = self._reason_with_analytical_brain(message) + + # Add reasoning step + reasoning_step = ReasoningStep( + step_number=1, + title="Analytical Brain Reasoning", + thought=f"Using {self.reasoning_llm} for analytical thinking", + action=reasoning_analysis, + confidence=0.9 # High confidence in reasoning LLM analysis + ) + self.reasoning_trace.steps.append(reasoning_step) + self.last_reasoning_steps.append(reasoning_step) + + # Step 2: Use main LLM for response generation + final_response = self._generate_response_with_main_brain(message, reasoning_analysis) + + # Add response generation step + response_step = ReasoningStep( + step_number=2, + title="Main Brain Response Generation", + thought=f"Using {self.main_llm} for conversational response", + action=final_response, + confidence=0.85 + ) + self.reasoning_trace.steps.append(response_step) + self.last_reasoning_steps.append(response_step) + + # Complete reasoning trace + self.reasoning_trace.final_answer = final_response + self.reasoning_trace.completed_at = time.time() + self.reasoning_trace.total_time = ( + self.reasoning_trace.completed_at - self.reasoning_trace.started_at + ) + self.reasoning_trace.overall_confidence = sum( + step.confidence for step in self.reasoning_trace.steps + ) / len(self.reasoning_trace.steps) + + return final_response + + def execute(self, task: 'Task', **kwargs) -> Any: + """ + Enhanced execute method using dual-brain approach. + + Args: + task: Task to execute + **kwargs: Additional execution parameters + + Returns: + Task result with dual-brain processing + """ + # Start reasoning trace for the task + self.reasoning_trace = ReasoningTrace( + problem=task.description, + started_at=time.time() + ) + + # Use reasoning LLM for task analysis + task_analysis = self._reason_with_analytical_brain(task.description) + + # Create enhanced task description + enhanced_description = f""" +{task.description} + +Analytical Insights: +{task_analysis} + +Please use these insights to complete the task effectively. + """ + + # Store original task description + original_description = task.description + task.description = enhanced_description + + try: + # Execute the task with main LLM + result = super().execute(task, **kwargs) + + # Complete reasoning trace + if hasattr(result, 'raw'): + self.reasoning_trace.final_answer = result.raw + else: + self.reasoning_trace.final_answer = str(result) + + self.reasoning_trace.completed_at = time.time() + self.reasoning_trace.total_time = ( + self.reasoning_trace.completed_at - self.reasoning_trace.started_at + ) + + return result + + finally: + # Restore original task description + task.description = original_description + + def get_brain_status(self) -> Dict[str, Any]: + """Get status of both brain models.""" + return { + "main_llm": { + "model": self.main_llm, + "config": self.llm_config, + "purpose": "Conversational responses and final output generation" + }, + "reasoning_llm": { + "model": self.reasoning_llm, + "config": self.reasoning_llm_config, + "purpose": "Analytical reasoning and problem decomposition" + }, + "last_reasoning_steps": len(self.last_reasoning_steps), + "reasoning_config": self.reasoning_config.model_dump() if self.reasoning_config else None + } + + def switch_reasoning_llm(self, new_reasoning_llm: str, config: Optional[Dict[str, Any]] = None): + """ + Switch the reasoning LLM to a different model. + + Args: + new_reasoning_llm: New reasoning model name + config: Optional configuration for the new model + """ + self.reasoning_llm = new_reasoning_llm + self.reflect_llm = new_reasoning_llm # Update reflect_llm as well + + if config: + self.reasoning_llm_config.update(config) + else: + self.reasoning_llm_config["model"] = new_reasoning_llm + + def switch_main_llm(self, new_main_llm: str, config: Optional[Dict[str, Any]] = None): + """ + Switch the main LLM to a different model. + + Args: + new_main_llm: New main model name + config: Optional configuration for the new model + """ + self.main_llm = new_main_llm + self.llm = new_main_llm + + if config: + self.llm_config.update(config) \ No newline at end of file diff --git a/src/praisonai-agents/praisonaiagents/agent/reasoning_agent.py b/src/praisonai-agents/praisonaiagents/agent/reasoning_agent.py new file mode 100644 index 00000000..3e53ac83 --- /dev/null +++ b/src/praisonai-agents/praisonaiagents/agent/reasoning_agent.py @@ -0,0 +1,256 @@ +""" +ReasoningAgent - An enhanced agent with built-in reasoning capabilities. + +This agent extends the base Agent class with advanced reasoning features including: +- Configurable reasoning parameters +- Step-by-step reasoning with confidence scoring +- Flow control with action states +- Reasoning trace tracking +""" + +from typing import List, Optional, Any, Dict, Union, Literal, TYPE_CHECKING +from ..agent import Agent +from ..reasoning import ( + ReasoningConfig, + ReasoningTrace, + ReasoningStep, + ActionState, + ReasoningFlow, + reason_step +) +import time + +if TYPE_CHECKING: + from ..task.task import Task + + +class ReasoningAgent(Agent): + """ + Enhanced agent with built-in reasoning capabilities. + + This agent provides step-by-step reasoning with configurable parameters, + confidence scoring, and reasoning trace tracking. + """ + + def __init__( + self, + name: Optional[str] = None, + role: Optional[str] = None, + goal: Optional[str] = None, + backstory: Optional[str] = None, + instructions: Optional[str] = None, + reasoning: bool = True, + reasoning_config: Optional[Union[ReasoningConfig, Dict[str, Any]]] = None, + min_confidence: float = 0.7, + reasoning_flow: Optional[ReasoningFlow] = None, + **kwargs + ): + """ + Initialize a ReasoningAgent. + + Args: + name: Agent name + role: Agent role + goal: Agent goal + backstory: Agent backstory + instructions: Direct instructions + reasoning: Enable reasoning (always True for ReasoningAgent) + reasoning_config: Reasoning configuration + min_confidence: Minimum confidence threshold + reasoning_flow: Flow control configuration + **kwargs: Additional Agent parameters + """ + # Force reasoning to be enabled + kwargs['reasoning_steps'] = True + kwargs['self_reflect'] = kwargs.get('self_reflect', True) + + super().__init__( + name=name, + role=role, + goal=goal, + backstory=backstory, + instructions=instructions, + **kwargs + ) + + # Initialize reasoning configuration + if isinstance(reasoning_config, dict): + self.reasoning_config = ReasoningConfig(**reasoning_config) + elif reasoning_config is None: + self.reasoning_config = ReasoningConfig() + else: + self.reasoning_config = reasoning_config + + self.min_confidence = min_confidence + self.reasoning_flow = reasoning_flow or ReasoningFlow() + self.reasoning_trace: Optional[ReasoningTrace] = None + self.last_reasoning_steps: List[ReasoningStep] = [] + + # Update instructions to include reasoning guidance + self._enhance_instructions_for_reasoning() + + def _enhance_instructions_for_reasoning(self): + """Enhance agent instructions with reasoning guidance.""" + reasoning_guidance = f""" + +REASONING INSTRUCTIONS: +- Use step-by-step reasoning for all complex problems +- Show your thinking process explicitly +- Assess confidence for each reasoning step (0.0-1.0) +- Minimum {self.reasoning_config.min_steps} steps, maximum {self.reasoning_config.max_steps} steps +- Reasoning style: {self.reasoning_config.style} +- Minimum confidence threshold: {self.min_confidence} + """ + + if self.instructions: + self.instructions += reasoning_guidance + else: + base_instructions = f"You are {self.role or 'an assistant'}" + if self.goal: + base_instructions += f" with the goal: {self.goal}" + self.instructions = base_instructions + reasoning_guidance + + def start_reasoning_trace(self, problem: str) -> ReasoningTrace: + """Start a new reasoning trace for a problem.""" + self.reasoning_trace = ReasoningTrace(problem=problem) + self.last_reasoning_steps = [] + return self.reasoning_trace + + def add_reasoning_step( + self, + thought: str, + action: str, + confidence: Optional[float] = None + ) -> ReasoningStep: + """Add a reasoning step to the current trace.""" + if not self.reasoning_trace: + self.start_reasoning_trace("Current problem") + + # Calculate confidence if not provided + if confidence is None: + confidence = min(0.95, len(action) / 100.0 + 0.6) + + step = ReasoningStep( + step_number=len(self.reasoning_trace.steps) + 1, + title=f"Step {len(self.reasoning_trace.steps) + 1}", + thought=thought, + action=action, + confidence=confidence + ) + + # Apply flow control + if confidence < self.min_confidence: + step.action_state = ActionState.RESET + elif self.reasoning_flow.should_validate(step): + step.action_state = ActionState.VALIDATE + + self.reasoning_trace.steps.append(step) + self.last_reasoning_steps.append(step) + + return step + + def complete_reasoning_trace(self, final_answer: str) -> ReasoningTrace: + """Complete the current reasoning trace.""" + if not self.reasoning_trace: + return None + + self.reasoning_trace.final_answer = final_answer + self.reasoning_trace.completed_at = time.time() + self.reasoning_trace.total_time = ( + self.reasoning_trace.completed_at - self.reasoning_trace.started_at + ) + + # Calculate overall confidence as average of step confidences + if self.reasoning_trace.steps: + self.reasoning_trace.overall_confidence = sum( + step.confidence for step in self.reasoning_trace.steps + ) / len(self.reasoning_trace.steps) + + return self.reasoning_trace + + def chat( + self, + message: str, + **kwargs + ) -> str: + """ + Enhanced chat method with reasoning capabilities. + + Args: + message: Input message + **kwargs: Additional chat parameters + + Returns: + Response with reasoning trace + """ + # Start reasoning trace + self.start_reasoning_trace(message) + + # Enhance message with reasoning instructions + enhanced_message = f""" +{message} + +Please solve this step-by-step using the following reasoning process: +1. Break down the problem into logical steps +2. For each step, show your thought process +3. State your confidence level (0.0-1.0) for each step +4. Ensure minimum {self.reasoning_config.min_steps} reasoning steps +5. Use {self.reasoning_config.style} reasoning style +6. Provide a clear final answer + +Format your response to show each reasoning step clearly. + """ + + # Call parent chat method + response = super().chat(enhanced_message, **kwargs) + + # Complete reasoning trace + self.complete_reasoning_trace(response) + + return response + + def execute(self, task: 'Task', **kwargs) -> Any: + """ + Enhanced execute method with reasoning capabilities. + + Args: + task: Task to execute + **kwargs: Additional execution parameters + + Returns: + Task result with reasoning trace + """ + # Start reasoning trace for the task + self.start_reasoning_trace(task.description) + + # Execute the task + result = super().execute(task, **kwargs) + + # Complete reasoning trace + if hasattr(result, 'raw'): + self.complete_reasoning_trace(result.raw) + else: + self.complete_reasoning_trace(str(result)) + + return result + + def get_reasoning_summary(self) -> Dict[str, Any]: + """Get a summary of the last reasoning process.""" + if not self.reasoning_trace: + return {"status": "No reasoning trace available"} + + return { + "problem": self.reasoning_trace.problem, + "total_steps": len(self.reasoning_trace.steps), + "overall_confidence": self.reasoning_trace.overall_confidence, + "total_time": self.reasoning_trace.total_time, + "final_answer": self.reasoning_trace.final_answer, + "steps_summary": [ + { + "step": step.step_number, + "confidence": step.confidence, + "action_state": step.action_state.value + } + for step in self.reasoning_trace.steps + ] + } \ No newline at end of file diff --git a/src/praisonai-agents/praisonaiagents/reasoning.py b/src/praisonai-agents/praisonaiagents/reasoning.py new file mode 100644 index 00000000..0c448f5c --- /dev/null +++ b/src/praisonai-agents/praisonaiagents/reasoning.py @@ -0,0 +1,129 @@ +""" +Reasoning module for advanced reasoning capabilities in PraisonAI Agents. + +This module provides enhanced reasoning features including: +- ReasoningConfig for configurable reasoning parameters +- ActionState enum for flow control +- ReasoningAgent with built-in reasoning capabilities +- DualBrainAgent with separate LLMs for conversation and reasoning +- Confidence scoring and validation +- Step-by-step reasoning with flow control +""" + +from typing import List, Optional, Any, Dict, Union, Literal, Callable, Tuple +from enum import Enum +from pydantic import BaseModel, Field +import time +import uuid + + +class ActionState(Enum): + """Action states for reasoning flow control.""" + CONTINUE = "continue" + VALIDATE = "validate" + FINAL_ANSWER = "final_answer" + RESET = "reset" + + +class ReasoningConfig(BaseModel): + """Configuration for reasoning behavior.""" + min_steps: int = Field(default=2, description="Minimum number of reasoning steps") + max_steps: int = Field(default=10, description="Maximum number of reasoning steps") + style: Literal["analytical", "creative", "systematic"] = Field( + default="analytical", + description="Reasoning style: analytical, creative, or systematic" + ) + confidence_threshold: float = Field( + default=0.8, + description="Minimum confidence threshold for proceeding" + ) + show_internal_thoughts: bool = Field( + default=True, + description="Whether to display internal reasoning thoughts" + ) + auto_validate_critical: bool = Field( + default=True, + description="Automatically validate critical reasoning steps" + ) + + +class ReasoningStep(BaseModel): + """Individual reasoning step with metadata.""" + step_id: str = Field(default_factory=lambda: str(uuid.uuid4())) + step_number: int + title: str + thought: str + action: str + confidence: float = Field(ge=0.0, le=1.0) + timestamp: float = Field(default_factory=time.time) + action_state: ActionState = ActionState.CONTINUE + retries: int = 0 + + +class ReasoningTrace(BaseModel): + """Complete reasoning trace for a problem.""" + problem: str + steps: List[ReasoningStep] = Field(default_factory=list) + final_answer: str = "" + overall_confidence: float = 0.0 + total_time: float = 0.0 + started_at: float = Field(default_factory=time.time) + completed_at: Optional[float] = None + + +class ReasoningFlow: + """Flow control for reasoning processes.""" + + def __init__( + self, + on_validate: Optional[Callable[[ReasoningStep], bool]] = None, + on_reset: Optional[Callable[[ReasoningStep], bool]] = None, + auto_validate_critical: bool = True + ): + self.on_validate = on_validate or (lambda step: step.confidence > 0.9) + self.on_reset = on_reset or (lambda step: step.retries < 3) + self.auto_validate_critical = auto_validate_critical + + def should_validate(self, step: ReasoningStep) -> bool: + """Determine if a step should be validated.""" + return self.on_validate(step) + + def should_reset(self, step: ReasoningStep) -> bool: + """Determine if a step should be reset/retried.""" + return self.on_reset(step) + + +def reason_step( + agent: Any, + thought: str, + action: str, + min_confidence: float = 0.7 +) -> ReasoningStep: + """ + Create a reasoning step with confidence validation. + + Args: + agent: The agent performing the reasoning + thought: The reasoning thought/analysis + action: The action or conclusion from the thought + min_confidence: Minimum confidence required + + Returns: + ReasoningStep with confidence scoring + """ + # Simulate confidence calculation (in real implementation, this could use LLM) + confidence = min(0.95, len(action) / 100.0 + 0.5) # Simple heuristic + + step = ReasoningStep( + step_number=len(getattr(agent, 'reasoning_trace', {}).get('steps', [])) + 1, + title=f"Step {len(getattr(agent, 'reasoning_trace', {}).get('steps', [])) + 1}", + thought=thought, + action=action, + confidence=confidence + ) + + # Validate confidence + if confidence < min_confidence: + step.action_state = ActionState.RESET + + return step \ No newline at end of file