diff --git a/adalflow/adalflow/components/agent/__init__.py b/adalflow/adalflow/components/agent/__init__.py index 6afd1875e..b24a9f070 100644 --- a/adalflow/adalflow/components/agent/__init__.py +++ b/adalflow/adalflow/components/agent/__init__.py @@ -1,10 +1,23 @@ -from .react import DEFAULT_REACT_AGENT_SYSTEM_PROMPT, ReActAgent -from adalflow.utils.registry import EntityMapping +"""Agent components for AdalFlow.""" + +from .react import ReActAgent as LegacyReActAgent +from .react_agent import ReActAgent as NewReActAgent +from .base_agent import ( + BaseAgent, + BasePlanner, + BaseToolManager, + BaseMemory, + Step, + AgentOutput, +) __all__ = [ - "ReActAgent", - "DEFAULT_REACT_AGENT_SYSTEM_PROMPT", + "LegacyReActAgent", # Old implementation for backward compatibility + "NewReActAgent", # New implementation using base agent + "BaseAgent", # Base agent class + "BasePlanner", # Base planner interface + "BaseToolManager", # Base tool manager interface + "BaseMemory", # Base memory interface + "Step", # Step data class + "AgentOutput", # Output data class ] - -for name in __all__: - EntityMapping.register(name, globals()[name]) diff --git a/adalflow/adalflow/components/agent/base_agent.py b/adalflow/adalflow/components/agent/base_agent.py new file mode 100644 index 000000000..ed37cef1e --- /dev/null +++ b/adalflow/adalflow/components/agent/base_agent.py @@ -0,0 +1,231 @@ +"""Base agent implementation with standardized interfaces.""" + +from typing import List, Union, Callable, Optional, Any, Dict +from dataclasses import dataclass, field +from adalflow.core.base_data_class import DataClass +import logging + +from adalflow.core.func_tool import FunctionTool, AsyncCallable +from adalflow.core.component import Component +from adalflow.core.types import ( + Function, +) +from adalflow.core.model_client import ModelClient +from adalflow.utils.logger import printc + +log = logging.getLogger(__name__) + + +@dataclass +class Step(DataClass): + """Standardized step structure for all agents.""" + + step_number: int = field(metadata={"desc": "The step number"}) + action: Optional[Function] = field( + default=None, metadata={"desc": "The action taken in this step"} + ) + observation: Any = field( + default=None, metadata={"desc": "The observation from this step"} + ) + metadata: Dict = field( + default_factory=dict, metadata={"desc": "Additional metadata for this step"} + ) + + +@dataclass +class AgentOutput(DataClass): + """Standardized output structure for all agents.""" + + id: Optional[str] = field( + default=None, metadata={"desc": "The unique id of the output"} + ) + step_history: List[Step] = field( + metadata={"desc": "The history of steps."}, default_factory=list + ) + answer: Any = field(metadata={"desc": "The final answer."}, default=None) + metadata: Dict = field( + default_factory=dict, metadata={"desc": "Additional metadata"} + ) + + def validate(self) -> bool: + """Validate the output structure.""" + if not isinstance(self.step_history, list): + return False + if not all(isinstance(step, Step) for step in self.step_history): + return False + return True + + +class BasePlanner(Component): + """Base interface for planning strategies.""" + + def __init__(self, model_client: ModelClient, model_kwargs: Dict = {}): + super().__init__() + self.model_client = model_client + self.model_kwargs = model_kwargs + + def plan(self, input: str, context: Dict) -> Function: + """Plan the next action based on input and context.""" + raise NotImplementedError + + +class BaseToolManager(Component): + """Base interface for tool management.""" + + def __init__(self, tools: List[Union[Callable, AsyncCallable, FunctionTool]]): + super().__init__() + self.tools = tools + + def execute(self, action: Function) -> Any: + """Execute an action using the appropriate tool.""" + raise NotImplementedError + + +class BaseMemory(Component): + """Base interface for memory management.""" + + def __init__(self): + super().__init__() + self.steps: List[Step] = [] + + def store(self, step: Step) -> None: + """Store a step in memory.""" + self.steps.append(step) + + def retrieve(self, query: str) -> List[Step]: + """Retrieve relevant steps from memory.""" + raise NotImplementedError + + +class BaseAgent(Component): + """Base agent class with standardized interfaces.""" + + def __init__( + self, + planner: BasePlanner, + tool_manager: BaseToolManager, + memory: Optional[BaseMemory] = None, + max_steps: int = 10, + context_variables: Optional[Dict] = None, + use_cache: bool = True, + debug: bool = False, + ): + super().__init__() + self.planner = planner + self.tool_manager = tool_manager + self.memory = memory + self.max_steps = max_steps + self.context_variables = context_variables + self.use_cache = use_cache + self.debug = debug + + def _handle_training(self, step: Step) -> Step: + """Handle training mode specific logic.""" + if not self.training: + return step + # Add training specific logic here + return step + + def _handle_evaluation(self, step: Step) -> Step: + """Handle evaluation mode specific logic.""" + return step + + def _format_output(self, step_history: List[Step], answer: Any) -> AgentOutput: + """Format the final output.""" + return AgentOutput( + step_history=step_history, answer=answer, metadata=self._get_metadata() + ) + + def _get_metadata(self) -> Dict: + """Get metadata for the output.""" + return { + "max_steps": self.max_steps, + "use_cache": self.use_cache, + "context_variables": self.context_variables, + } + + def _run_one_step( + self, + step_number: int, + input: str, + context: Dict, + step_history: List[Step] = [], + ) -> Step: + """Run one step of the agent.""" + if self.debug: + printc(f"Running step {step_number}", color="yellow") + + # Plan the next action + action = self.planner.plan(input, context) + + # Execute the action + observation = self.tool_manager.execute(action) + + # Create step + step = Step( + step_number=step_number, + action=action, + observation=observation, + metadata={"context": context}, + ) + + # Handle training/evaluation mode + if self.training: + step = self._handle_training(step) + else: + step = self._handle_evaluation(step) + + # Store in memory if available + if self.memory: + self.memory.store(step) + + return step + + def call(self, input: str, **kwargs) -> AgentOutput: + """Main entry point for the agent.""" + step_history: List[Step] = [] + context = { + "input": input, + "step_history": step_history, + **(self.context_variables or {}), + **kwargs, + } + + for step_number in range(1, self.max_steps + 1): + step = self._run_one_step( + step_number=step_number, + input=input, + context=context, + step_history=step_history, + ) + step_history.append(step) + + # Check if we should stop + if self._should_stop(step): + break + + # Get final answer + answer = self._get_answer(step_history) + + # Format and return output + output = self._format_output(step_history, answer) + if not output.validate(): + raise ValueError("Invalid output format") + + return output + + def _should_stop(self, step: Step) -> bool: + """Determine if the agent should stop.""" + raise NotImplementedError + + def _get_answer(self, step_history: List[Step]) -> Any: + """Get the final answer from step history.""" + raise NotImplementedError + + def train_step(self, input: str, target: Any) -> Dict: + """Standard training step interface.""" + raise NotImplementedError + + def eval_step(self, input: str) -> AgentOutput: + """Standard evaluation step interface.""" + raise NotImplementedError diff --git a/adalflow/adalflow/components/agent/examples/__init__.py b/adalflow/adalflow/components/agent/examples/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/adalflow/adalflow/components/agent/examples/react_memory_example.py b/adalflow/adalflow/components/agent/examples/react_memory_example.py new file mode 100644 index 000000000..eae9ec2d6 --- /dev/null +++ b/adalflow/adalflow/components/agent/examples/react_memory_example.py @@ -0,0 +1,134 @@ +"""Example demonstrating ReAct agent with vector memory support.""" + +from adalflow.components.agent.react_agent import ReActAgent +from adalflow.core.func_tool import FunctionTool +from adalflow.components.model_client import OpenAIClient +from adalflow.core.types import Function +import logging + +# from adalflow.components.memory import Memory +from adalflow.components.memory.memory import Memory + +from dotenv import load_dotenv + +load_dotenv() +# Set up logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +def main(): + # Initialize model client + model_client = OpenAIClient() + model_kwargs = { + "model": "gpt-3.5-turbo", + "temperature": 0.7, + } + memory = Memory() + + # Define some example tools + def calculate(expression: str, **kwargs) -> str: + """Calculate the result of a mathematical expression.""" + try: + return str(eval(expression)) + except Exception as e: + return f"Error calculating: {str(e)}" + + def get_factorial(n: int, **kwargs) -> str: + """Calculate factorial of a number.""" + try: + result = 1 + for i in range(1, n + 1): + result *= i + return str(result) + except Exception as e: + return f"Error calculating factorial: {str(e)}" + + def finish(answer: str, **kwargs) -> str: + """Finish the conversation with a final answer.""" + return answer + + def extract_result(history: str, **kwargs) -> str: + """Get the previous context.""" + return memory.call() + + def square(n: int, **kwargs) -> str: + """Square a number.""" + return str(n * n) + + # Create example functions for the agent + examples = [ + Function( + thought="I need to calculate a simple arithmetic expression.", + name="calculate", + kwargs={"expression": "2 + 2"}, + ), + Function( + thought="I need to calculate the factorial of a number 5.", + name="get_factorial", + kwargs={"n": 5}, + ), + Function( + thought="I need to context data of previous conversation.", + name="extract_result", + kwargs={"history": "history"}, + ), + Function( + thought="I need to square a number 3.", + name="square", + kwargs={"n": 3}, + ), + ] + # Create function tools + calc_tool = FunctionTool(calculate) + factorial_tool = FunctionTool(get_factorial) + extract_result_tool = FunctionTool(extract_result) + square_tool = FunctionTool(square) + + # Create ReAct agent with vector memory + agent = ReActAgent( + tools=[ + calc_tool, + factorial_tool, + extract_result_tool, + square_tool, + ], + model_client=model_client, + model_kwargs=model_kwargs, + add_llm_as_fallback=True, + max_steps=5, + examples=examples, + debug=True, # Enable debug output + ) + + # Example 1: Simple calculation + logger.info("Example 1: Simple calculation") + print("MEMORY_CALL", memory.call()) + agent.context_variables = {"context_variables": {"history": memory.call()}} + result1 = agent("What is 2 + 1?") + logger.info(f"Result 1: {result1.answer}") + memory.add_dialog_turn("What is 2 + 1?", result1.step_history) + + # Example 2: Using previous context + print("MEMORY_CALL", memory.call()) + logger.info("\nExample 2: Using previous context") + agent.context_variables = {"context_variables": {"history": memory.call()}} + result2 = agent("what is the result of the previous question?") + logger.info(f"Result 2: {result2.answer}") + memory.add_dialog_turn( + "what is the result of the previous question?", result2.step_history + ) + + # Example 2: Using previous context + print("MEMORY_CALL", memory.call()) + logger.info("\nExample 3: Factorial of previous final result") + agent.context_variables = {"context_variables": {"history": memory.call()}} + result3 = agent("what is the square of the previous final result?") + logger.info(f"Result 3: {result3.answer}") + memory.add_dialog_turn( + "what is the square of the previous final result?", result3.step_history + ) + + +if __name__ == "__main__": + main() diff --git a/adalflow/adalflow/components/agent/examples/react_memory_example_result.txt b/adalflow/adalflow/components/agent/examples/react_memory_example_result.txt new file mode 100644 index 000000000..03390908e --- /dev/null +++ b/adalflow/adalflow/components/agent/examples/react_memory_example_result.txt @@ -0,0 +1,65 @@ +INFO:adalflow.core.prompt_builder:Prompt has variables: ['example', 'schema'] +INFO:adalflow.optim.grad_component:EvalFnToTextLoss: No backward engine provided. Creating one using model_client and model_kwargs. +INFO:adalflow.core.prompt_builder:Prompt has variables: ['tools', 'context_variables', 'step_history', 'input_str', 'react_agent_task_desc', 'max_steps', 'output_format_str', 'examples'] +INFO:adalflow.core.generator:Generator Generator initialized. +INFO:adalflow.optim.grad_component:EvalFnToTextLoss: No backward engine provided. Creating one using model_client and model_kwargs. +INFO:adalflow.core.prompt_builder:Prompt has variables: ['tools_str', 'task_desc_str', 'input_format_str', 'chat_history_str', 'steps_str', 'examples_str', 'context_str', 'input_str', 'output_format_str'] +INFO:adalflow.core.generator:Generator Generator initialized. +INFO:adalflow.core.tool_manager:Initialized ToolManager with 6 tools and additional context {} +INFO:adalflow.components.agent.examples.react_memory_example:Example 1: Simple calculation +MEMORY_CALL +2025-05-16 12:35:36 - [base_agent.py:156:_run_one_step] - Running step 1 +Reaching for fun **************** +CONTEXT_VARIABLES {'input': 'What is 2 + 1?', 'step_history': [], 'context_variables': {'history': ''}} +INFO:adalflow.components.model_client.openai_client:api_kwargs: {'model': 'gpt-3.5-turbo', 'temperature': 0.7, 'messages': [{'role': 'system', 'content': '\n\n\nYou are an excellent task planner.\nAnswer the input query using the tools provided below with maximum accuracy.\n\nEach step you will read the previous thought, Action(name, kwargs), and Observation(execution result of the action) and then provide the next Thought and Action.\n\nFollow function docstring to best call the tool.\n- For simple queries: Directly call the ``finish`` action and provide the answer.\n- For complex queries:\n - Step 1: Read the user query and divide it into multisteps. Start with the first tool/subquery.\n - Call one tool at a time to solve each subquery/subquestion. \\\n - At step \'finish\', give the final answer based on all previous steps.\nREMEMBER:\n- Action MUST call one of the tools. It CANNOT be empty.\n- You will ALWAYS END WITH \'finish\' tool to finish the task directly with answer or failure message.\n- When the tool is a class method and when class_instance exists, use . to call instead (NOT the CLASS NAME)\n\n\n- You cant use more than 10 steps. At the 10th current step, must finish with answer.\n\n\nYou have access to context_variables with the following keys:\nhistory\n------------------------\nYou can either pass context_variables or context_variables[\'key\'] to the tools depending on the tool\'s requirements.\n\n\nYour output should be formatted as a standard JSON instance with the following schema:\n```\n{\n "thought": "Why the function is called (Optional[str]) (optional)",\n "name": "The name of the function (str) (optional)",\n "kwargs": "The keyword arguments of the function (Optional[Dict[str, object]]) (optional)"\n}\n```\nExamples:\n```\n{\n "thought": "I need to calculate a simple arithmetic expression.",\n "name": "calculate",\n "kwargs": {\n "expression": "2 + 2"\n }\n}\n________\n{\n "thought": "I need to calculate the factorial of a number 5.",\n "name": "get_factorial",\n "kwargs": {\n "n": 5\n }\n}\n________\n{\n "thought": "Based on the extracted information, I can now provide a final answer.",\n "name": "finish",\n "kwargs": {\n "answer": "The square of 3 is 9"\n }\n}\n________\n{\n "thought": "I need to context data of previous conversation.",\n "name": "extract_result",\n "kwargs": {\n "history": "history"\n }\n}\n________\n{\n "thought": "I need to square a number 3.",\n "name": "square",\n "kwargs": {\n "n": 3\n }\n}\n________\n```\n-Make sure to always enclose the JSON output in triple backticks (```). Please do not add anything other than valid JSON output!\n-Use double quotes for the keys and string values.\n-DO NOT mistaken the "properties" and "type" in the schema as the actual fields in the JSON output.\n-Follow the JSON formatting conventions.\n\n\nExamples:\nFunction(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 2\'})\n------------------------\nFunction(thought=\'I need to calculate the factorial of a number 5.\', name=\'get_factorial\', args=[], kwargs={\'n\': 5})\n------------------------\nFunction(thought=\'Based on the extracted information, I can now provide a final answer.\', name=\'finish\', args=[], kwargs={\'answer\': \'The square of 3 is 9\'})\n------------------------\nFunction(thought=\'I need to context data of previous conversation.\', name=\'extract_result\', args=[], kwargs={\'history\': \'history\'})\n------------------------\nFunction(thought=\'I need to square a number 3.\', name=\'square\', args=[], kwargs={\'n\': 3})\n------------------------\n\n\n-----------------\n\nInput query:\nWhat is 2 + 1?\n_____________________\nCurrent Step/Max Step: 1 / 10\n'}]} +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:adalflow.core.generator:output: GeneratorOutput(id=None, data=Function(thought='I need to calculate a simple arithmetic expression.', name='calculate', args=[], kwargs={'expression': '2 + 1'}), error=None, usage=CompletionUsage(completion_tokens=41, prompt_tokens=918, total_tokens=959), raw_response='{\n "thought": "I need to calculate a simple arithmetic expression.",\n "name": "calculate",\n "kwargs": {\n "expression": "2 + 1"\n }\n}', metadata=None) +2025-05-16 12:35:38 - [base_agent.py:156:_run_one_step] - Running step 2 +Reaching for fun **************** +CONTEXT_VARIABLES {'input': 'What is 2 + 1?', 'step_history': [Step(step_number=1, action=Function(thought='I need to calculate a simple arithmetic expression.', name='calculate', args=[], kwargs={'expression': '2 + 1'}), observation='3', metadata={'context': {...}})], 'context_variables': {'history': ''}} +INFO:adalflow.components.model_client.openai_client:api_kwargs: {'model': 'gpt-3.5-turbo', 'temperature': 0.7, 'messages': [{'role': 'system', 'content': '\n\n\nYou are an excellent task planner.\nAnswer the input query using the tools provided below with maximum accuracy.\n\nEach step you will read the previous thought, Action(name, kwargs), and Observation(execution result of the action) and then provide the next Thought and Action.\n\nFollow function docstring to best call the tool.\n- For simple queries: Directly call the ``finish`` action and provide the answer.\n- For complex queries:\n - Step 1: Read the user query and divide it into multisteps. Start with the first tool/subquery.\n - Call one tool at a time to solve each subquery/subquestion. \\\n - At step \'finish\', give the final answer based on all previous steps.\nREMEMBER:\n- Action MUST call one of the tools. It CANNOT be empty.\n- You will ALWAYS END WITH \'finish\' tool to finish the task directly with answer or failure message.\n- When the tool is a class method and when class_instance exists, use . to call instead (NOT the CLASS NAME)\n\n\n- You cant use more than 10 steps. At the 10th current step, must finish with answer.\n\n\nYou have access to context_variables with the following keys:\nhistory\n------------------------\nYou can either pass context_variables or context_variables[\'key\'] to the tools depending on the tool\'s requirements.\n\n\nYour output should be formatted as a standard JSON instance with the following schema:\n```\n{\n "thought": "Why the function is called (Optional[str]) (optional)",\n "name": "The name of the function (str) (optional)",\n "kwargs": "The keyword arguments of the function (Optional[Dict[str, object]]) (optional)"\n}\n```\nExamples:\n```\n{\n "thought": "I need to calculate a simple arithmetic expression.",\n "name": "calculate",\n "kwargs": {\n "expression": "2 + 2"\n }\n}\n________\n{\n "thought": "I need to calculate the factorial of a number 5.",\n "name": "get_factorial",\n "kwargs": {\n "n": 5\n }\n}\n________\n{\n "thought": "Based on the extracted information, I can now provide a final answer.",\n "name": "finish",\n "kwargs": {\n "answer": "The square of 3 is 9"\n }\n}\n________\n{\n "thought": "I need to context data of previous conversation.",\n "name": "extract_result",\n "kwargs": {\n "history": "history"\n }\n}\n________\n{\n "thought": "I need to square a number 3.",\n "name": "square",\n "kwargs": {\n "n": 3\n }\n}\n________\n```\n-Make sure to always enclose the JSON output in triple backticks (```). Please do not add anything other than valid JSON output!\n-Use double quotes for the keys and string values.\n-DO NOT mistaken the "properties" and "type" in the schema as the actual fields in the JSON output.\n-Follow the JSON formatting conventions.\n\n\nExamples:\nFunction(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 2\'})\n------------------------\nFunction(thought=\'I need to calculate the factorial of a number 5.\', name=\'get_factorial\', args=[], kwargs={\'n\': 5})\n------------------------\nFunction(thought=\'Based on the extracted information, I can now provide a final answer.\', name=\'finish\', args=[], kwargs={\'answer\': \'The square of 3 is 9\'})\n------------------------\nFunction(thought=\'I need to context data of previous conversation.\', name=\'extract_result\', args=[], kwargs={\'history\': \'history\'})\n------------------------\nFunction(thought=\'I need to square a number 3.\', name=\'square\', args=[], kwargs={\'n\': 3})\n------------------------\n\n\n-----------------\n\nInput query:\nWhat is 2 + 1?\n_____________________\nCurrent Step/Max Step: 2 / 10\n\nYour previous steps:\nStep 1.\n"thought": "I need to calculate a simple arithmetic expression.",\n"name": "calculate,\n"kwargs": {\'expression\': \'2 + 1\'}",\n"Observation": "3"\n------------------------\n\n'}]} +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:adalflow.core.generator:output: GeneratorOutput(id=None, data=Function(thought='I need to provide the final answer based on the calculation result.', name='finish', args=[], kwargs={'answer': 'The result of 2 + 1 is 3.'}), error=None, usage=CompletionUsage(completion_tokens=51, prompt_tokens=974, total_tokens=1025), raw_response='{\n "thought": "I need to provide the final answer based on the calculation result.",\n "name": "finish",\n "kwargs": {\n "answer": "The result of 2 + 1 is 3."\n }\n}', metadata=None) +INFO:adalflow.components.agent.examples.react_memory_example:Result 1: The result of 2 + 1 is 3. +MEMORY_CALL User: What is 2 + 1? +Assistant: [Step(step_number=1, action=Function(thought='I need to calculate a simple arithmetic expression.', name='calculate', args=[], kwargs={'expression': '2 + 1'}), observation='3', metadata={'context': {'input': 'What is 2 + 1?', 'step_history': [...], 'context_variables': {'history': ''}}}), Step(step_number=2, action=Function(thought='I need to provide the final answer based on the calculation result.', name='finish', args=[], kwargs={'answer': 'The result of 2 + 1 is 3.'}), observation='The result of 2 + 1 is 3.', metadata={'context': {'input': 'What is 2 + 1?', 'step_history': [...], 'context_variables': {'history': ''}}})] +INFO:adalflow.components.agent.examples.react_memory_example: +Example 2: Using previous context +2025-05-16 12:35:39 - [base_agent.py:156:_run_one_step] - Running step 1 +Reaching for fun **************** +CONTEXT_VARIABLES {'input': 'what is the result of the previous question?', 'step_history': [], 'context_variables': {'history': "User: What is 2 + 1?\nAssistant: [Step(step_number=1, action=Function(thought='I need to calculate a simple arithmetic expression.', name='calculate', args=[], kwargs={'expression': '2 + 1'}), observation='3', metadata={'context': {'input': 'What is 2 + 1?', 'step_history': [...], 'context_variables': {'history': ''}}}), Step(step_number=2, action=Function(thought='I need to provide the final answer based on the calculation result.', name='finish', args=[], kwargs={'answer': 'The result of 2 + 1 is 3.'}), observation='The result of 2 + 1 is 3.', metadata={'context': {'input': 'What is 2 + 1?', 'step_history': [...], 'context_variables': {'history': ''}}})]"}} +INFO:adalflow.components.model_client.openai_client:api_kwargs: {'model': 'gpt-3.5-turbo', 'temperature': 0.7, 'messages': [{'role': 'system', 'content': '\n\n\nYou are an excellent task planner.\nAnswer the input query using the tools provided below with maximum accuracy.\n\nEach step you will read the previous thought, Action(name, kwargs), and Observation(execution result of the action) and then provide the next Thought and Action.\n\nFollow function docstring to best call the tool.\n- For simple queries: Directly call the ``finish`` action and provide the answer.\n- For complex queries:\n - Step 1: Read the user query and divide it into multisteps. Start with the first tool/subquery.\n - Call one tool at a time to solve each subquery/subquestion. \\\n - At step \'finish\', give the final answer based on all previous steps.\nREMEMBER:\n- Action MUST call one of the tools. It CANNOT be empty.\n- You will ALWAYS END WITH \'finish\' tool to finish the task directly with answer or failure message.\n- When the tool is a class method and when class_instance exists, use . to call instead (NOT the CLASS NAME)\n\n\n- You cant use more than 10 steps. At the 10th current step, must finish with answer.\n\n\nYou have access to context_variables with the following keys:\nhistory\n------------------------\nYou can either pass context_variables or context_variables[\'key\'] to the tools depending on the tool\'s requirements.\n\n\nYour output should be formatted as a standard JSON instance with the following schema:\n```\n{\n "thought": "Why the function is called (Optional[str]) (optional)",\n "name": "The name of the function (str) (optional)",\n "kwargs": "The keyword arguments of the function (Optional[Dict[str, object]]) (optional)"\n}\n```\nExamples:\n```\n{\n "thought": "I need to calculate a simple arithmetic expression.",\n "name": "calculate",\n "kwargs": {\n "expression": "2 + 2"\n }\n}\n________\n{\n "thought": "I need to calculate the factorial of a number 5.",\n "name": "get_factorial",\n "kwargs": {\n "n": 5\n }\n}\n________\n{\n "thought": "Based on the extracted information, I can now provide a final answer.",\n "name": "finish",\n "kwargs": {\n "answer": "The square of 3 is 9"\n }\n}\n________\n{\n "thought": "I need to context data of previous conversation.",\n "name": "extract_result",\n "kwargs": {\n "history": "history"\n }\n}\n________\n{\n "thought": "I need to square a number 3.",\n "name": "square",\n "kwargs": {\n "n": 3\n }\n}\n________\n```\n-Make sure to always enclose the JSON output in triple backticks (```). Please do not add anything other than valid JSON output!\n-Use double quotes for the keys and string values.\n-DO NOT mistaken the "properties" and "type" in the schema as the actual fields in the JSON output.\n-Follow the JSON formatting conventions.\n\n\nExamples:\nFunction(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 2\'})\n------------------------\nFunction(thought=\'I need to calculate the factorial of a number 5.\', name=\'get_factorial\', args=[], kwargs={\'n\': 5})\n------------------------\nFunction(thought=\'Based on the extracted information, I can now provide a final answer.\', name=\'finish\', args=[], kwargs={\'answer\': \'The square of 3 is 9\'})\n------------------------\nFunction(thought=\'I need to context data of previous conversation.\', name=\'extract_result\', args=[], kwargs={\'history\': \'history\'})\n------------------------\nFunction(thought=\'I need to square a number 3.\', name=\'square\', args=[], kwargs={\'n\': 3})\n------------------------\n\n\n-----------------\n\nInput query:\nwhat is the result of the previous question?\n_____________________\nCurrent Step/Max Step: 1 / 10\n'}]} +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:adalflow.core.generator:output: GeneratorOutput(id=None, data=Function(thought='I need to context data of previous conversation.', name='extract_result', args=[], kwargs={'history': 'history'}), error=None, usage=CompletionUsage(completion_tokens=39, prompt_tokens=919, total_tokens=958), raw_response='{\n "thought": "I need to context data of previous conversation.",\n "name": "extract_result",\n "kwargs": {\n "history": "history"\n }\n}', metadata=None) +2025-05-16 12:35:39 - [base_agent.py:156:_run_one_step] - Running step 2 +Reaching for fun **************** +CONTEXT_VARIABLES {'input': 'what is the result of the previous question?', 'step_history': [Step(step_number=1, action=Function(thought='I need to context data of previous conversation.', name='extract_result', args=[], kwargs={'history': 'history'}), observation="User: What is 2 + 1?\nAssistant: [Step(step_number=1, action=Function(thought='I need to calculate a simple arithmetic expression.', name='calculate', args=[], kwargs={'expression': '2 + 1'}), observation='3', metadata={'context': {'input': 'What is 2 + 1?', 'step_history': [...], 'context_variables': {'history': ''}}}), Step(step_number=2, action=Function(thought='I need to provide the final answer based on the calculation result.', name='finish', args=[], kwargs={'answer': 'The result of 2 + 1 is 3.'}), observation='The result of 2 + 1 is 3.', metadata={'context': {'input': 'What is 2 + 1?', 'step_history': [...], 'context_variables': {'history': ''}}})]", metadata={'context': {...}})], 'context_variables': {'history': "User: What is 2 + 1?\nAssistant: [Step(step_number=1, action=Function(thought='I need to calculate a simple arithmetic expression.', name='calculate', args=[], kwargs={'expression': '2 + 1'}), observation='3', metadata={'context': {'input': 'What is 2 + 1?', 'step_history': [...], 'context_variables': {'history': ''}}}), Step(step_number=2, action=Function(thought='I need to provide the final answer based on the calculation result.', name='finish', args=[], kwargs={'answer': 'The result of 2 + 1 is 3.'}), observation='The result of 2 + 1 is 3.', metadata={'context': {'input': 'What is 2 + 1?', 'step_history': [...], 'context_variables': {'history': ''}}})]"}} +INFO:adalflow.components.model_client.openai_client:api_kwargs: {'model': 'gpt-3.5-turbo', 'temperature': 0.7, 'messages': [{'role': 'system', 'content': '\n\n\nYou are an excellent task planner.\nAnswer the input query using the tools provided below with maximum accuracy.\n\nEach step you will read the previous thought, Action(name, kwargs), and Observation(execution result of the action) and then provide the next Thought and Action.\n\nFollow function docstring to best call the tool.\n- For simple queries: Directly call the ``finish`` action and provide the answer.\n- For complex queries:\n - Step 1: Read the user query and divide it into multisteps. Start with the first tool/subquery.\n - Call one tool at a time to solve each subquery/subquestion. \\\n - At step \'finish\', give the final answer based on all previous steps.\nREMEMBER:\n- Action MUST call one of the tools. It CANNOT be empty.\n- You will ALWAYS END WITH \'finish\' tool to finish the task directly with answer or failure message.\n- When the tool is a class method and when class_instance exists, use . to call instead (NOT the CLASS NAME)\n\n\n- You cant use more than 10 steps. At the 10th current step, must finish with answer.\n\n\nYou have access to context_variables with the following keys:\nhistory\n------------------------\nYou can either pass context_variables or context_variables[\'key\'] to the tools depending on the tool\'s requirements.\n\n\nYour output should be formatted as a standard JSON instance with the following schema:\n```\n{\n "thought": "Why the function is called (Optional[str]) (optional)",\n "name": "The name of the function (str) (optional)",\n "kwargs": "The keyword arguments of the function (Optional[Dict[str, object]]) (optional)"\n}\n```\nExamples:\n```\n{\n "thought": "I need to calculate a simple arithmetic expression.",\n "name": "calculate",\n "kwargs": {\n "expression": "2 + 2"\n }\n}\n________\n{\n "thought": "I need to calculate the factorial of a number 5.",\n "name": "get_factorial",\n "kwargs": {\n "n": 5\n }\n}\n________\n{\n "thought": "Based on the extracted information, I can now provide a final answer.",\n "name": "finish",\n "kwargs": {\n "answer": "The square of 3 is 9"\n }\n}\n________\n{\n "thought": "I need to context data of previous conversation.",\n "name": "extract_result",\n "kwargs": {\n "history": "history"\n }\n}\n________\n{\n "thought": "I need to square a number 3.",\n "name": "square",\n "kwargs": {\n "n": 3\n }\n}\n________\n```\n-Make sure to always enclose the JSON output in triple backticks (```). Please do not add anything other than valid JSON output!\n-Use double quotes for the keys and string values.\n-DO NOT mistaken the "properties" and "type" in the schema as the actual fields in the JSON output.\n-Follow the JSON formatting conventions.\n\n\nExamples:\nFunction(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 2\'})\n------------------------\nFunction(thought=\'I need to calculate the factorial of a number 5.\', name=\'get_factorial\', args=[], kwargs={\'n\': 5})\n------------------------\nFunction(thought=\'Based on the extracted information, I can now provide a final answer.\', name=\'finish\', args=[], kwargs={\'answer\': \'The square of 3 is 9\'})\n------------------------\nFunction(thought=\'I need to context data of previous conversation.\', name=\'extract_result\', args=[], kwargs={\'history\': \'history\'})\n------------------------\nFunction(thought=\'I need to square a number 3.\', name=\'square\', args=[], kwargs={\'n\': 3})\n------------------------\n\n\n-----------------\n\nInput query:\nwhat is the result of the previous question?\n_____________________\nCurrent Step/Max Step: 2 / 10\n\nYour previous steps:\nStep 1.\n"thought": "I need to context data of previous conversation.",\n"name": "extract_result,\n"kwargs": {\'history\': \'history\'}",\n"Observation": "User: What is 2 + 1?\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]"\n------------------------\n\n'}]} +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:adalflow.core.generator:output: GeneratorOutput(id=None, data=Function(thought='I need to provide the final answer based on the calculation result.', name='finish', args=[], kwargs={'answer': 'The result of 2 + 1 is 3.'}), error=None, usage=CompletionUsage(completion_tokens=51, prompt_tokens=1154, total_tokens=1205), raw_response='{\n "thought": "I need to provide the final answer based on the calculation result.",\n "name": "finish",\n "kwargs": {\n "answer": "The result of 2 + 1 is 3."\n }\n}', metadata=None) +INFO:adalflow.components.agent.examples.react_memory_example:Result 2: The result of 2 + 1 is 3. +MEMORY_CALL User: What is 2 + 1? +Assistant: [Step(step_number=1, action=Function(thought='I need to calculate a simple arithmetic expression.', name='calculate', args=[], kwargs={'expression': '2 + 1'}), observation='3', metadata={'context': {'input': 'What is 2 + 1?', 'step_history': [...], 'context_variables': {'history': ''}}}), Step(step_number=2, action=Function(thought='I need to provide the final answer based on the calculation result.', name='finish', args=[], kwargs={'answer': 'The result of 2 + 1 is 3.'}), observation='The result of 2 + 1 is 3.', metadata={'context': {'input': 'What is 2 + 1?', 'step_history': [...], 'context_variables': {'history': ''}}})] +User: what is the result of the previous question? +Assistant: [Step(step_number=1, action=Function(thought='I need to context data of previous conversation.', name='extract_result', args=[], kwargs={'history': 'history'}), observation="User: What is 2 + 1?\nAssistant: [Step(step_number=1, action=Function(thought='I need to calculate a simple arithmetic expression.', name='calculate', args=[], kwargs={'expression': '2 + 1'}), observation='3', metadata={'context': {'input': 'What is 2 + 1?', 'step_history': [...], 'context_variables': {'history': ''}}}), Step(step_number=2, action=Function(thought='I need to provide the final answer based on the calculation result.', name='finish', args=[], kwargs={'answer': 'The result of 2 + 1 is 3.'}), observation='The result of 2 + 1 is 3.', metadata={'context': {'input': 'What is 2 + 1?', 'step_history': [...], 'context_variables': {'history': ''}}})]", metadata={'context': {'input': 'what is the result of the previous question?', 'step_history': [...], 'context_variables': {'history': "User: What is 2 + 1?\nAssistant: [Step(step_number=1, action=Function(thought='I need to calculate a simple arithmetic expression.', name='calculate', args=[], kwargs={'expression': '2 + 1'}), observation='3', metadata={'context': {'input': 'What is 2 + 1?', 'step_history': [...], 'context_variables': {'history': ''}}}), Step(step_number=2, action=Function(thought='I need to provide the final answer based on the calculation result.', name='finish', args=[], kwargs={'answer': 'The result of 2 + 1 is 3.'}), observation='The result of 2 + 1 is 3.', metadata={'context': {'input': 'What is 2 + 1?', 'step_history': [...], 'context_variables': {'history': ''}}})]"}}}), Step(step_number=2, action=Function(thought='I need to provide the final answer based on the calculation result.', name='finish', args=[], kwargs={'answer': 'The result of 2 + 1 is 3.'}), observation='The result of 2 + 1 is 3.', metadata={'context': {'input': 'what is the result of the previous question?', 'step_history': [...], 'context_variables': {'history': "User: What is 2 + 1?\nAssistant: [Step(step_number=1, action=Function(thought='I need to calculate a simple arithmetic expression.', name='calculate', args=[], kwargs={'expression': '2 + 1'}), observation='3', metadata={'context': {'input': 'What is 2 + 1?', 'step_history': [...], 'context_variables': {'history': ''}}}), Step(step_number=2, action=Function(thought='I need to provide the final answer based on the calculation result.', name='finish', args=[], kwargs={'answer': 'The result of 2 + 1 is 3.'}), observation='The result of 2 + 1 is 3.', metadata={'context': {'input': 'What is 2 + 1?', 'step_history': [...], 'context_variables': {'history': ''}}})]"}}})] +INFO:adalflow.components.agent.examples.react_memory_example: +Example 3: Factorial of previous final result +2025-05-16 12:35:40 - [base_agent.py:156:_run_one_step] - Running step 1 +Reaching for fun **************** +CONTEXT_VARIABLES {'input': 'what is the square of the previous final result?', 'step_history': [], 'context_variables': {'history': 'User: What is 2 + 1?\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]\nUser: what is the result of the previous question?\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to context data of previous conversation.\', name=\'extract_result\', args=[], kwargs={\'history\': \'history\'}), observation="User: What is 2 + 1?\\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]", metadata={\'context\': {\'input\': \'what is the result of the previous question?\', \'step_history\': [...], \'context_variables\': {\'history\': "User: What is 2 + 1?\\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]"}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'what is the result of the previous question?\', \'step_history\': [...], \'context_variables\': {\'history\': "User: What is 2 + 1?\\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]"}}})]'}} +INFO:adalflow.components.model_client.openai_client:api_kwargs: {'model': 'gpt-3.5-turbo', 'temperature': 0.7, 'messages': [{'role': 'system', 'content': '\n\n\nYou are an excellent task planner.\nAnswer the input query using the tools provided below with maximum accuracy.\n\nEach step you will read the previous thought, Action(name, kwargs), and Observation(execution result of the action) and then provide the next Thought and Action.\n\nFollow function docstring to best call the tool.\n- For simple queries: Directly call the ``finish`` action and provide the answer.\n- For complex queries:\n - Step 1: Read the user query and divide it into multisteps. Start with the first tool/subquery.\n - Call one tool at a time to solve each subquery/subquestion. \\\n - At step \'finish\', give the final answer based on all previous steps.\nREMEMBER:\n- Action MUST call one of the tools. It CANNOT be empty.\n- You will ALWAYS END WITH \'finish\' tool to finish the task directly with answer or failure message.\n- When the tool is a class method and when class_instance exists, use . to call instead (NOT the CLASS NAME)\n\n\n- You cant use more than 10 steps. At the 10th current step, must finish with answer.\n\n\nYou have access to context_variables with the following keys:\nhistory\n------------------------\nYou can either pass context_variables or context_variables[\'key\'] to the tools depending on the tool\'s requirements.\n\n\nYour output should be formatted as a standard JSON instance with the following schema:\n```\n{\n "thought": "Why the function is called (Optional[str]) (optional)",\n "name": "The name of the function (str) (optional)",\n "kwargs": "The keyword arguments of the function (Optional[Dict[str, object]]) (optional)"\n}\n```\nExamples:\n```\n{\n "thought": "I need to calculate a simple arithmetic expression.",\n "name": "calculate",\n "kwargs": {\n "expression": "2 + 2"\n }\n}\n________\n{\n "thought": "I need to calculate the factorial of a number 5.",\n "name": "get_factorial",\n "kwargs": {\n "n": 5\n }\n}\n________\n{\n "thought": "Based on the extracted information, I can now provide a final answer.",\n "name": "finish",\n "kwargs": {\n "answer": "The square of 3 is 9"\n }\n}\n________\n{\n "thought": "I need to context data of previous conversation.",\n "name": "extract_result",\n "kwargs": {\n "history": "history"\n }\n}\n________\n{\n "thought": "I need to square a number 3.",\n "name": "square",\n "kwargs": {\n "n": 3\n }\n}\n________\n```\n-Make sure to always enclose the JSON output in triple backticks (```). Please do not add anything other than valid JSON output!\n-Use double quotes for the keys and string values.\n-DO NOT mistaken the "properties" and "type" in the schema as the actual fields in the JSON output.\n-Follow the JSON formatting conventions.\n\n\nExamples:\nFunction(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 2\'})\n------------------------\nFunction(thought=\'I need to calculate the factorial of a number 5.\', name=\'get_factorial\', args=[], kwargs={\'n\': 5})\n------------------------\nFunction(thought=\'Based on the extracted information, I can now provide a final answer.\', name=\'finish\', args=[], kwargs={\'answer\': \'The square of 3 is 9\'})\n------------------------\nFunction(thought=\'I need to context data of previous conversation.\', name=\'extract_result\', args=[], kwargs={\'history\': \'history\'})\n------------------------\nFunction(thought=\'I need to square a number 3.\', name=\'square\', args=[], kwargs={\'n\': 3})\n------------------------\n\n\n-----------------\n\nInput query:\nwhat is the square of the previous final result?\n_____________________\nCurrent Step/Max Step: 1 / 10\n'}]} +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:adalflow.core.generator:output: GeneratorOutput(id=None, data=Function(thought='I need to context data of previous conversation.', name='extract_result', args=[], kwargs={'history': 'history'}), error=None, usage=CompletionUsage(completion_tokens=39, prompt_tokens=920, total_tokens=959), raw_response='{\n "thought": "I need to context data of previous conversation.",\n "name": "extract_result",\n "kwargs": {\n "history": "history"\n }\n}', metadata=None) +2025-05-16 12:35:41 - [base_agent.py:156:_run_one_step] - Running step 2 +Reaching for fun **************** +CONTEXT_VARIABLES {'input': 'what is the square of the previous final result?', 'step_history': [Step(step_number=1, action=Function(thought='I need to context data of previous conversation.', name='extract_result', args=[], kwargs={'history': 'history'}), observation='User: What is 2 + 1?\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]\nUser: what is the result of the previous question?\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to context data of previous conversation.\', name=\'extract_result\', args=[], kwargs={\'history\': \'history\'}), observation="User: What is 2 + 1?\\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]", metadata={\'context\': {\'input\': \'what is the result of the previous question?\', \'step_history\': [...], \'context_variables\': {\'history\': "User: What is 2 + 1?\\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]"}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'what is the result of the previous question?\', \'step_history\': [...], \'context_variables\': {\'history\': "User: What is 2 + 1?\\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]"}}})]', metadata={'context': {...}})], 'context_variables': {'history': 'User: What is 2 + 1?\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]\nUser: what is the result of the previous question?\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to context data of previous conversation.\', name=\'extract_result\', args=[], kwargs={\'history\': \'history\'}), observation="User: What is 2 + 1?\\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]", metadata={\'context\': {\'input\': \'what is the result of the previous question?\', \'step_history\': [...], \'context_variables\': {\'history\': "User: What is 2 + 1?\\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]"}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'what is the result of the previous question?\', \'step_history\': [...], \'context_variables\': {\'history\': "User: What is 2 + 1?\\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]"}}})]'}} +INFO:adalflow.components.model_client.openai_client:api_kwargs: {'model': 'gpt-3.5-turbo', 'temperature': 0.7, 'messages': [{'role': 'system', 'content': '\n\n\nYou are an excellent task planner.\nAnswer the input query using the tools provided below with maximum accuracy.\n\nEach step you will read the previous thought, Action(name, kwargs), and Observation(execution result of the action) and then provide the next Thought and Action.\n\nFollow function docstring to best call the tool.\n- For simple queries: Directly call the ``finish`` action and provide the answer.\n- For complex queries:\n - Step 1: Read the user query and divide it into multisteps. Start with the first tool/subquery.\n - Call one tool at a time to solve each subquery/subquestion. \\\n - At step \'finish\', give the final answer based on all previous steps.\nREMEMBER:\n- Action MUST call one of the tools. It CANNOT be empty.\n- You will ALWAYS END WITH \'finish\' tool to finish the task directly with answer or failure message.\n- When the tool is a class method and when class_instance exists, use . to call instead (NOT the CLASS NAME)\n\n\n- You cant use more than 10 steps. At the 10th current step, must finish with answer.\n\n\nYou have access to context_variables with the following keys:\nhistory\n------------------------\nYou can either pass context_variables or context_variables[\'key\'] to the tools depending on the tool\'s requirements.\n\n\nYour output should be formatted as a standard JSON instance with the following schema:\n```\n{\n "thought": "Why the function is called (Optional[str]) (optional)",\n "name": "The name of the function (str) (optional)",\n "kwargs": "The keyword arguments of the function (Optional[Dict[str, object]]) (optional)"\n}\n```\nExamples:\n```\n{\n "thought": "I need to calculate a simple arithmetic expression.",\n "name": "calculate",\n "kwargs": {\n "expression": "2 + 2"\n }\n}\n________\n{\n "thought": "I need to calculate the factorial of a number 5.",\n "name": "get_factorial",\n "kwargs": {\n "n": 5\n }\n}\n________\n{\n "thought": "Based on the extracted information, I can now provide a final answer.",\n "name": "finish",\n "kwargs": {\n "answer": "The square of 3 is 9"\n }\n}\n________\n{\n "thought": "I need to context data of previous conversation.",\n "name": "extract_result",\n "kwargs": {\n "history": "history"\n }\n}\n________\n{\n "thought": "I need to square a number 3.",\n "name": "square",\n "kwargs": {\n "n": 3\n }\n}\n________\n```\n-Make sure to always enclose the JSON output in triple backticks (```). Please do not add anything other than valid JSON output!\n-Use double quotes for the keys and string values.\n-DO NOT mistaken the "properties" and "type" in the schema as the actual fields in the JSON output.\n-Follow the JSON formatting conventions.\n\n\nExamples:\nFunction(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 2\'})\n------------------------\nFunction(thought=\'I need to calculate the factorial of a number 5.\', name=\'get_factorial\', args=[], kwargs={\'n\': 5})\n------------------------\nFunction(thought=\'Based on the extracted information, I can now provide a final answer.\', name=\'finish\', args=[], kwargs={\'answer\': \'The square of 3 is 9\'})\n------------------------\nFunction(thought=\'I need to context data of previous conversation.\', name=\'extract_result\', args=[], kwargs={\'history\': \'history\'})\n------------------------\nFunction(thought=\'I need to square a number 3.\', name=\'square\', args=[], kwargs={\'n\': 3})\n------------------------\n\n\n-----------------\n\nInput query:\nwhat is the square of the previous final result?\n_____________________\nCurrent Step/Max Step: 2 / 10\n\nYour previous steps:\nStep 1.\n"thought": "I need to context data of previous conversation.",\n"name": "extract_result,\n"kwargs": {\'history\': \'history\'}",\n"Observation": "User: What is 2 + 1?\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]\nUser: what is the result of the previous question?\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to context data of previous conversation.\', name=\'extract_result\', args=[], kwargs={\'history\': \'history\'}), observation="User: What is 2 + 1?\\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]", metadata={\'context\': {\'input\': \'what is the result of the previous question?\', \'step_history\': [...], \'context_variables\': {\'history\': "User: What is 2 + 1?\\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]"}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'what is the result of the previous question?\', \'step_history\': [...], \'context_variables\': {\'history\': "User: What is 2 + 1?\\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]"}}})]"\n------------------------\n\n'}]} +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:adalflow.core.generator:output: GeneratorOutput(id=None, data=Function(thought='I need to calculate the square of the previous final result.', name='calculate', args=[], kwargs={'expression': '3 ** 2'}), error=None, usage=CompletionUsage(completion_tokens=43, prompt_tokens=1889, total_tokens=1932), raw_response='{\n "thought": "I need to calculate the square of the previous final result.",\n "name": "calculate",\n "kwargs": {\n "expression": "3 ** 2"\n }\n}', metadata=None) +2025-05-16 12:35:42 - [base_agent.py:156:_run_one_step] - Running step 3 +Reaching for fun **************** +CONTEXT_VARIABLES {'input': 'what is the square of the previous final result?', 'step_history': [Step(step_number=1, action=Function(thought='I need to context data of previous conversation.', name='extract_result', args=[], kwargs={'history': 'history'}), observation='User: What is 2 + 1?\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]\nUser: what is the result of the previous question?\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to context data of previous conversation.\', name=\'extract_result\', args=[], kwargs={\'history\': \'history\'}), observation="User: What is 2 + 1?\\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]", metadata={\'context\': {\'input\': \'what is the result of the previous question?\', \'step_history\': [...], \'context_variables\': {\'history\': "User: What is 2 + 1?\\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]"}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'what is the result of the previous question?\', \'step_history\': [...], \'context_variables\': {\'history\': "User: What is 2 + 1?\\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]"}}})]', metadata={'context': {...}}), Step(step_number=2, action=Function(thought='I need to calculate the square of the previous final result.', name='calculate', args=[], kwargs={'expression': '3 ** 2'}), observation='9', metadata={'context': {...}})], 'context_variables': {'history': 'User: What is 2 + 1?\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]\nUser: what is the result of the previous question?\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to context data of previous conversation.\', name=\'extract_result\', args=[], kwargs={\'history\': \'history\'}), observation="User: What is 2 + 1?\\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]", metadata={\'context\': {\'input\': \'what is the result of the previous question?\', \'step_history\': [...], \'context_variables\': {\'history\': "User: What is 2 + 1?\\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]"}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'what is the result of the previous question?\', \'step_history\': [...], \'context_variables\': {\'history\': "User: What is 2 + 1?\\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]"}}})]'}} +INFO:adalflow.components.model_client.openai_client:api_kwargs: {'model': 'gpt-3.5-turbo', 'temperature': 0.7, 'messages': [{'role': 'system', 'content': '\n\n\nYou are an excellent task planner.\nAnswer the input query using the tools provided below with maximum accuracy.\n\nEach step you will read the previous thought, Action(name, kwargs), and Observation(execution result of the action) and then provide the next Thought and Action.\n\nFollow function docstring to best call the tool.\n- For simple queries: Directly call the ``finish`` action and provide the answer.\n- For complex queries:\n - Step 1: Read the user query and divide it into multisteps. Start with the first tool/subquery.\n - Call one tool at a time to solve each subquery/subquestion. \\\n - At step \'finish\', give the final answer based on all previous steps.\nREMEMBER:\n- Action MUST call one of the tools. It CANNOT be empty.\n- You will ALWAYS END WITH \'finish\' tool to finish the task directly with answer or failure message.\n- When the tool is a class method and when class_instance exists, use . to call instead (NOT the CLASS NAME)\n\n\n- You cant use more than 10 steps. At the 10th current step, must finish with answer.\n\n\nYou have access to context_variables with the following keys:\nhistory\n------------------------\nYou can either pass context_variables or context_variables[\'key\'] to the tools depending on the tool\'s requirements.\n\n\nYour output should be formatted as a standard JSON instance with the following schema:\n```\n{\n "thought": "Why the function is called (Optional[str]) (optional)",\n "name": "The name of the function (str) (optional)",\n "kwargs": "The keyword arguments of the function (Optional[Dict[str, object]]) (optional)"\n}\n```\nExamples:\n```\n{\n "thought": "I need to calculate a simple arithmetic expression.",\n "name": "calculate",\n "kwargs": {\n "expression": "2 + 2"\n }\n}\n________\n{\n "thought": "I need to calculate the factorial of a number 5.",\n "name": "get_factorial",\n "kwargs": {\n "n": 5\n }\n}\n________\n{\n "thought": "Based on the extracted information, I can now provide a final answer.",\n "name": "finish",\n "kwargs": {\n "answer": "The square of 3 is 9"\n }\n}\n________\n{\n "thought": "I need to context data of previous conversation.",\n "name": "extract_result",\n "kwargs": {\n "history": "history"\n }\n}\n________\n{\n "thought": "I need to square a number 3.",\n "name": "square",\n "kwargs": {\n "n": 3\n }\n}\n________\n```\n-Make sure to always enclose the JSON output in triple backticks (```). Please do not add anything other than valid JSON output!\n-Use double quotes for the keys and string values.\n-DO NOT mistaken the "properties" and "type" in the schema as the actual fields in the JSON output.\n-Follow the JSON formatting conventions.\n\n\nExamples:\nFunction(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 2\'})\n------------------------\nFunction(thought=\'I need to calculate the factorial of a number 5.\', name=\'get_factorial\', args=[], kwargs={\'n\': 5})\n------------------------\nFunction(thought=\'Based on the extracted information, I can now provide a final answer.\', name=\'finish\', args=[], kwargs={\'answer\': \'The square of 3 is 9\'})\n------------------------\nFunction(thought=\'I need to context data of previous conversation.\', name=\'extract_result\', args=[], kwargs={\'history\': \'history\'})\n------------------------\nFunction(thought=\'I need to square a number 3.\', name=\'square\', args=[], kwargs={\'n\': 3})\n------------------------\n\n\n-----------------\n\nInput query:\nwhat is the square of the previous final result?\n_____________________\nCurrent Step/Max Step: 3 / 10\n\nYour previous steps:\nStep 1.\n"thought": "I need to context data of previous conversation.",\n"name": "extract_result,\n"kwargs": {\'history\': \'history\'}",\n"Observation": "User: What is 2 + 1?\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]\nUser: what is the result of the previous question?\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to context data of previous conversation.\', name=\'extract_result\', args=[], kwargs={\'history\': \'history\'}), observation="User: What is 2 + 1?\\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]", metadata={\'context\': {\'input\': \'what is the result of the previous question?\', \'step_history\': [...], \'context_variables\': {\'history\': "User: What is 2 + 1?\\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]"}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'what is the result of the previous question?\', \'step_history\': [...], \'context_variables\': {\'history\': "User: What is 2 + 1?\\nAssistant: [Step(step_number=1, action=Function(thought=\'I need to calculate a simple arithmetic expression.\', name=\'calculate\', args=[], kwargs={\'expression\': \'2 + 1\'}), observation=\'3\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}}), Step(step_number=2, action=Function(thought=\'I need to provide the final answer based on the calculation result.\', name=\'finish\', args=[], kwargs={\'answer\': \'The result of 2 + 1 is 3.\'}), observation=\'The result of 2 + 1 is 3.\', metadata={\'context\': {\'input\': \'What is 2 + 1?\', \'step_history\': [...], \'context_variables\': {\'history\': \'\'}}})]"}}})]"\n------------------------\nStep 2.\n"thought": "I need to calculate the square of the previous final result.",\n"name": "calculate,\n"kwargs": {\'expression\': \'3 ** 2\'}",\n"Observation": "9"\n------------------------\n\n'}]} +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:adalflow.core.generator:output: GeneratorOutput(id=None, data=Function(thought='I need to provide the final answer based on the calculation result.', name='finish', args=[], kwargs={'answer': 'The square of the previous final result is 9.'}), error=None, usage=CompletionUsage(completion_tokens=50, prompt_tokens=1936, total_tokens=1986), raw_response='{\n "thought": "I need to provide the final answer based on the calculation result.",\n "name": "finish",\n "kwargs": {\n "answer": "The square of the previous final result is 9."\n }\n}', metadata=None) +INFO:adalflow.components.agent.examples.react_memory_example:Result 3: The square of the previous final result is 9 diff --git a/adalflow/adalflow/components/agent/react_agent.py b/adalflow/adalflow/components/agent/react_agent.py new file mode 100644 index 000000000..e6f545634 --- /dev/null +++ b/adalflow/adalflow/components/agent/react_agent.py @@ -0,0 +1,344 @@ +"""ReAct agent implementation using the base agent.""" + +from typing import List, Union, Callable, Optional, Any, Dict +import logging +from adalflow.components.output_parsers import JsonOutputParser + +from adalflow.core.generator import Generator +from adalflow.core.func_tool import FunctionTool, AsyncCallable +from adalflow.core.tool_manager import ToolManager +from adalflow.core.types import Function +from adalflow.optim.parameter import Parameter, ParameterType +from adalflow.core.model_client import ModelClient + +from .base_agent import ( + BaseAgent, + BasePlanner, + BaseToolManager, + BaseMemory, + Step, + AgentOutput, +) + +log = logging.getLogger(__name__) +react_agent_task_desc = r""" + +You are an excellent task planner. +Answer the input query using the tools provided below with maximum accuracy. + +Each step you will read the previous thought, Action(name, kwargs), and Observation(execution result of the action) and then provide the next Thought and Action. + +Follow function docstring to best call the tool. +- For simple queries: Directly call the ``finish`` action and provide the answer. +- For complex queries: + - Step 1: Read the user query and divide it into multisteps. Start with the first tool/subquery. + - Call one tool at a time to solve each subquery/subquestion. \ + - At step 'finish', give the final answer based on all previous steps. +REMEMBER: +- Action MUST call one of the tools. It CANNOT be empty. +- You will ALWAYS END WITH 'finish' tool to finish the task directly with answer or failure message. +- When the tool is a class method and when class_instance exists, use . to call instead (NOT the CLASS NAME) + +""" + +DEFAULT_REACT_AGENT_SYSTEM_PROMPT = r""" +{{react_agent_task_desc}} +- You cant use more than {{max_steps}} steps. At the {{max_steps}}th current step, must finish with answer. + +{# Tools #} +{% if tools %} + +Tools and instructions: +{% for tool in tools %} +{{ loop.index }}. +{{tool}} +------------------------ +{% endfor %} + +{% endif %} +{# Context Variables #} +{% if context_variables is not none %} + +You have access to context_variables with the following keys: +{% for key, value in context_variables.items() %} +{{ key }} +------------------------ +{% endfor %} +You can either pass context_variables or context_variables['key'] to the tools depending on the tool's requirements. + +{% endif %} +{# output format and examples for output format #} + +{{output_format_str}} + +{% if examples %} + +Examples: +{% for example in examples %} +{{example}} +------------------------ +{% endfor %} + +{% endif %} + +----------------- + +Input query: +{{ input_str }} +_____________________ +Current Step/Max Step: {{step_history|length + 1}} / {{max_steps}} +{# Step History #} +{% if step_history %} + +Your previous steps: +{% for history in step_history %} +Step {{ loop.index }}. +{% if history.action %} +"thought": "{{history.action.thought}}", +"name": "{{history.action.name}}, +"kwargs": {{history.action.kwargs}}", +{% endif %} +"Observation": "{{history.observation}}" +------------------------ +{% endfor %} + +{% endif %} + +""" + + +class ReActPlanner(BasePlanner): + """ReAct-specific planner implementation.""" + + def __init__( + self, + model_client: ModelClient, + model_kwargs: Dict = {}, + template: Optional[str] = None, + examples: List[Function] = [], + ): + super().__init__(model_client, model_kwargs) + self.template = template or DEFAULT_REACT_AGENT_SYSTEM_PROMPT + self.examples = examples + + # Initialize output parser with strict JSON requirements + self.output_parser = JsonOutputParser( + data_class=Function, + examples=self.examples, + return_data_class=True, + include_fields=["thought", "name", "kwargs"], + ) + + # Initialize generator with proper output format + self.generator = Generator( + template=self.template, + model_client=model_client, + model_kwargs=model_kwargs, + output_processors=self.output_parser, + ) + + def plan(self, input: str, context: Dict) -> Function: + """Plan the next action using ReAct format.""" + print("Reaching for fun ****************") + print("CONTEXT_VARIABLES", context) + prompt_kwargs = { + "input_str": input, + "step_history": context.get("step_history", []), + "tools": context.get("tools", []), + "max_steps": context.get("max_steps", 10), + "context_variables": context.get("context_variables", {}), + "react_agent_task_desc": Parameter( + name="react_agent_task_desc", + data=react_agent_task_desc, + # data="You are an excellent task planner. Answer the input query using the tools provided below with maximum accuracy.\n\nEach step you will read the previous thought, Action(name, kwargs), and Observation(execution result of the action) and then provide the next Thought and Action.\n\n\nFollow function docstring to best call the tool.\n- For simple queries: Directly call the 'finish' action and answer with a concise 'yes' or 'no' when it fits.\n- For complex queries:\n - Step 1: Understand the main subject(s) and context of the user query accurately.\n - Step 2: Break down the query into multisteps, starting with the first tool/subquery.\n - Ensure each step accurately reflects the subjects under consideration.\n - Continuously verify your extracted information and logic for factual accuracy using concise comparisons.\n - At step 'finish', conclude with a precise final answer.\nREMEMBER:\n- Action MUST call one of the tools. It CANNOT be empty.\n- You will ALWAYS END WITH 'finish' tool to conclude the task directly with an answer or failure message.\n- When the tool is a class method and when class_instance exists, use . to call instead (NOT the CLASS NAME).\n", + role_desc="Task instruction for the agent to plan steps to solve a question in sequential and multi-steps to get the final answer. \ + For optimizer: you need to adapt this to the current specific task.", + param_type=ParameterType.PROMPT, + requires_opt=True, + ), + "examples": Parameter( + name="examples", + data=self.examples, + role_desc="Examples for the ReAct agent.", + param_type=ParameterType.DEMOS, + requires_opt=True, + ), + "output_format_str": self.output_parser.format_instructions(), + } + + try: + response = self.generator(prompt_kwargs=prompt_kwargs) + if not response or not response.data: + raise ValueError("No valid response generated") + + return response.data + + except Exception as e: + log.error(f"Error generating plan: {str(e)}") + # Provide a fallback response + return Function( + thought="Failed to generate plan, falling back to finish", + name="finish", + kwargs={"answer": f"Sorry, I encountered an error: {str(e)}"}, + ) + + +class ReActToolManager(BaseToolManager): + """ReAct-specific tool manager implementation.""" + + def __init__( + self, + tools: List[Union[Callable, AsyncCallable, FunctionTool]], + add_llm_as_fallback: bool = True, + model_client: Optional[ModelClient] = None, + model_kwargs: Dict = {}, + ): + super().__init__(tools) + self.add_llm_as_fallback = add_llm_as_fallback + + # Add default finish tool if not already present + self._add_default_finish_tool() + + # Add LLM fallback if requested + if add_llm_as_fallback and model_client: + self._add_llm_fallback(model_client, model_kwargs) + + # Initialize tool manager + self.tool_manager = ToolManager(tools=self.tools) + + def _add_default_finish_tool(self): + """Add a default finish tool if one doesn't already exist.""" + # Check if a finish tool already exists + finish_exists = any( + ( + tool.name == "finish" + if isinstance(tool, FunctionTool) + else getattr(tool, "__name__", "") == "finish" + ) + for tool in self.tools + ) + + if not finish_exists: + + def finish(answer: str, **kwargs) -> str: + """Finish the conversation with a final answer.""" + return answer + + self.tools.append(FunctionTool(finish)) + log.info("Added default 'finish' tool to ReActAgent") + + def _add_llm_fallback(self, model_client: ModelClient, model_kwargs: Dict): + """Add LLM as a fallback tool.""" + llm_tool = Generator(model_client=model_client, model_kwargs=model_kwargs) + + def llm_fallback(input: str, **kwargs) -> str: + """Fallback tool that uses LLM to answer queries.""" + try: + output = llm_tool(prompt_kwargs={"input_str": input}) + return output.data if output else None + except Exception as e: + log.error(f"Error using llm_fallback: {e}") + return None + + self.tools.append(FunctionTool(llm_fallback)) + + def execute(self, action: Function) -> Any: + """Execute an action using the tool manager.""" + if not action or not action.name: + raise ValueError("Invalid action") + + result = self.tool_manager(expr_or_fun=action, step="execute") + + if not result: + raise ValueError(f"Failed to execute action: {action}") + + return result.output + + +class ReActMemory(BaseMemory): + """ReAct-specific memory implementation.""" + + def retrieve(self, query: str) -> List[Step]: + """Retrieve relevant steps based on query.""" + # Simple implementation - can be enhanced with vector search etc. + return [step for step in self.steps if query.lower() in str(step).lower()] + + +class ReActAgent(BaseAgent): + """ReAct agent implementation using the base agent. + + The agent automatically adds a default 'finish' tool that can be used to complete + the conversation with a final answer. Users do not need to manually add this tool. + """ + + def __init__( + self, + tools: List[Union[Callable, AsyncCallable, FunctionTool]], + max_steps: int = 10, + add_llm_as_fallback: bool = True, + model_client: ModelClient = None, + model_kwargs: Dict = {}, + template: Optional[str] = None, + examples: List[Function] = [], + context_variables: Optional[Dict] = None, + use_cache: bool = True, + debug: bool = False, + ): + # Initialize components + planner = ReActPlanner( + model_client=model_client, + model_kwargs=model_kwargs, + template=template, + examples=examples, + ) + + tool_manager = ReActToolManager( + tools=tools, + add_llm_as_fallback=add_llm_as_fallback, + model_client=model_client, + model_kwargs=model_kwargs, + ) + + memory = ReActMemory() + + super().__init__( + planner=planner, + tool_manager=tool_manager, + memory=memory, + max_steps=max_steps, + context_variables=context_variables, + use_cache=use_cache, + debug=debug, + ) + + def _should_stop(self, step: Step) -> bool: + """Check if we should stop based on ReAct rules.""" + if not step.action: + return True + + return step.action.name == "finish" + + def _get_answer(self, step_history: List[Step]) -> Any: + """Get the final answer from step history.""" + if not step_history: + return None + + last_step = step_history[-1] + return last_step.observation + + def train_step(self, input: str, target: Any) -> Dict: + """Training step implementation.""" + self.train() + output = self.call(input) + loss = self._compute_loss(output, target) + return {"loss": loss} + + def eval_step(self, input: str) -> AgentOutput: + """Evaluation step implementation.""" + self.eval() + return self.call(input) + + def _compute_loss(self, output: AgentOutput, target: Any) -> float: + """Compute loss for training.""" + # Implement loss computation based on your needs + raise NotImplementedError diff --git a/adalflow/adalflow/components/model_client/openai_client.py b/adalflow/adalflow/components/model_client/openai_client.py index bf77ea477..a69763147 100644 --- a/adalflow/adalflow/components/model_client/openai_client.py +++ b/adalflow/adalflow/components/model_client/openai_client.py @@ -588,4 +588,3 @@ def _prepare_image_content( ) resopnse = openai_llm(prompt_kwargs={"input_str": "What is LLM?"}) print(resopnse) - diff --git a/adalflow/adalflow/core/base_data_class.py b/adalflow/adalflow/core/base_data_class.py index 245b345a8..74677cd28 100644 --- a/adalflow/adalflow/core/base_data_class.py +++ b/adalflow/adalflow/core/base_data_class.py @@ -464,7 +464,7 @@ def from_yaml(cls, yaml_str: str) -> "DataClass": return cls.from_dict(data) except yaml.YAMLError as e: raise ValueError(f"Failed to load YAML string: {e}") - + @classmethod def to_pydantic(cls, instance: "DataClass"): """ @@ -495,7 +495,7 @@ def to_pydantic(cls, instance: "DataClass"): pydantic_model = create_model(f"{cls.__name__}Pydantic", **field_definitions) data = instance.to_dict() return pydantic_model(**data) - + @classmethod def pydantic_to_dataclass(cls, pydantic_obj): """ @@ -521,7 +521,7 @@ def pydantic_to_dataclass(cls, pydantic_obj): return cls.from_dict(data) except Exception as e: raise ValueError(f"Failed to convert pydantic model to DataClass: {e}") - + def to_yaml_obj( self, exclude: ExcludeType = None, diff --git a/adalflow/adalflow/core/db.py b/adalflow/adalflow/core/db.py index 1ec214d67..6e74cb383 100644 --- a/adalflow/adalflow/core/db.py +++ b/adalflow/adalflow/core/db.py @@ -206,7 +206,9 @@ def transform( """ key_to_use = key if transformer: - key = self.register_transformer(transformer=transformer, key=key, map_fn=map_fn) + key = self.register_transformer( + transformer=transformer, key=key, map_fn=map_fn + ) key_to_use = key if key_to_use is None: raise ValueError("Key must be provided.") diff --git a/adalflow/tests/test_base_data_class.py b/adalflow/tests/test_base_data_class.py index b5bbcb442..5dee459b7 100644 --- a/adalflow/tests/test_base_data_class.py +++ b/adalflow/tests/test_base_data_class.py @@ -2,14 +2,15 @@ from dataclasses import field, MISSING, dataclass from typing import List, Dict, Optional, Set import enum -from dataclasses import field, MISSING, dataclass, asdict +from dataclasses import asdict # Assume these imports come from the adalflow package -from adalflow.core.base_data_class import DataClass, required_field, check_adal_dataclass +from adalflow.core.base_data_class import ( + DataClass, + required_field, +) from adalflow.core.functional import get_type_schema -import json -import yaml # Simple dataclass for testing @dataclass @@ -21,20 +22,24 @@ class MyOutputs(DataClass): metadata={"desc": "The name of the person", "prefix": "Name:"}, ) + @dataclass class Address: street: str city: str zipcode: str + class Label(str, enum.Enum): SPAM = "spam" NOT_SPAM = "not_spam" + @dataclass class ClassificationOutput(DataClass): label: Label = field(metadata={"desc": "Label of the category."}) + @dataclass class Person(DataClass): name: Optional[str] = field( @@ -55,6 +60,7 @@ class Person(DataClass): metadata={"desc": "The set of hobbies"}, default_factory=required_field() ) + class TestBaseDataClass(unittest.TestCase): # setup def setUp(self): @@ -365,6 +371,7 @@ class SetDataclass(DataClass): restored_instance = SetDataclass.from_dict(result) self.assertEqual(restored_instance, instance) + class TestPydanticConversionExtended(unittest.TestCase): def test_missing_required_field(self): """ @@ -376,13 +383,13 @@ def test_missing_required_field(self): # Override to_dict to simulate missing 'age' original_to_dict = instance.to_dict instance.to_dict = lambda exclude=None, include=None: {"name": "Alice"} - + with self.assertRaises(Exception): MyOutputs.to_pydantic(instance) - + # Restore the original to_dict instance.to_dict = original_to_dict - + def test_invalid_type_conversion(self): """ Test that providing an invalid type raises a validation error. @@ -391,19 +398,21 @@ def test_invalid_type_conversion(self): instance = MyOutputs(age="not_an_int", name="Alice") with self.assertRaises(Exception): MyOutputs.to_pydantic(instance) - + def test_default_value_usage(self): """ Test that fields with default values are correctly used when not provided. """ + @dataclass class WithDefault(DataClass): value: int = field(default=100, metadata={"desc": "A default value"}) + # Here, we pass an instance without modifying the default. instance = WithDefault() p_instance = WithDefault.to_pydantic(instance) self.assertEqual(p_instance.value, 100) - + def test_extra_fields_behavior(self): """ Test how extra fields are handled. Extra fields in the input dict are ignored. @@ -414,19 +423,25 @@ def test_extra_fields_behavior(self): # Although we cannot directly pass extra fields via to_pydantic (since it builds from to_dict()), # we simulate the behavior by creating a Pydantic model instance manually. ModelClass = type(p_instance) - p_manual = ModelClass(**{**instance.to_dict(), "extra_field": "should_be_ignored"}) + p_manual = ModelClass( + **{**instance.to_dict(), "extra_field": "should_be_ignored"} + ) # Check that the extra field is not set. self.assertEqual(p_manual.age, 30) self.assertEqual(p_manual.name, "Bob") self.assertFalse(hasattr(p_manual, "extra_field")) - + def test_union_optional_handling(self): """ Test a dataclass field with an Optional type to ensure that None is accepted. """ + @dataclass class WithOptional(DataClass): - optional_value: Optional[int] = field(metadata={"desc": "An optional integer"}, default=None) + optional_value: Optional[int] = field( + metadata={"desc": "An optional integer"}, default=None + ) + # Create an instance without providing a value. instance = WithOptional() p_instance = WithOptional.to_pydantic(instance) @@ -435,7 +450,7 @@ class WithOptional(DataClass): instance2 = WithOptional(optional_value=42) p_instance2 = WithOptional.to_pydantic(instance2) self.assertEqual(p_instance2.optional_value, 42) - + def test_nested_model_conversion_errors(self): """ Test nested dataclass conversion where nested dict has an invalid type. @@ -447,11 +462,11 @@ def test_nested_model_conversion_errors(self): addresses=[123], # invalid: should be a dict for Address single_address={"street": "X", "city": "Y", "zipcode": "Z"}, dict_addresses={"home": {"street": "X", "city": "Y", "zipcode": "Z"}}, - set_hobbies={1, 2} + set_hobbies={1, 2}, ) with self.assertRaises(Exception): Person.to_pydantic(instance) - + def test_pydantic_model_repr(self): """ Test that the __repr__ of the Pydantic model includes the expected field values. @@ -461,7 +476,7 @@ def test_pydantic_model_repr(self): repr_str = repr(p_instance) self.assertIn("age=55", repr_str) self.assertIn("name='Charlie'", repr_str) - + def test_round_trip_conversion(self): """ Full round-trip test: convert DataClass instance -> Pydantic model instance -> back to DataClass. @@ -471,22 +486,26 @@ def test_round_trip_conversion(self): p_instance = MyOutputs.to_pydantic(original) base_instance = MyOutputs.pydantic_to_dataclass(p_instance) self.assertEqual(base_instance, original) - + # For Person with nested data: original_person = Person( name="Dana", age=45, - addresses=[Address(street="100 Main St", city="Cityville", zipcode="00000")], - single_address=Address(street="100 Main St", city="Cityville", zipcode="00000"), - dict_addresses={"home": Address(street="100 Main St", city="Cityville", zipcode="00000")}, - set_hobbies={9, 10} + addresses=[ + Address(street="100 Main St", city="Cityville", zipcode="00000") + ], + single_address=Address( + street="100 Main St", city="Cityville", zipcode="00000" + ), + dict_addresses={ + "home": Address(street="100 Main St", city="Cityville", zipcode="00000") + }, + set_hobbies={9, 10}, ) p_person = Person.to_pydantic(original_person) base_person = Person.pydantic_to_dataclass(p_person) self.assertEqual(base_person, original_person) + if __name__ == "__main__": unittest.main() - - - diff --git a/poetry.lock b/poetry.lock index 6a9381040..3cb005bdf 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. [[package]] name = "absl-py" @@ -6,6 +6,7 @@ version = "2.1.0" description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py." optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "absl-py-2.1.0.tar.gz", hash = "sha256:7820790efbb316739cde8b4e19357243fc3608a152024288513dd968d7d959ff"}, {file = "absl_py-2.1.0-py3-none-any.whl", hash = "sha256:526a04eadab8b4ee719ce68f204172ead1027549089702d99b9059f129ff1308"}, @@ -17,6 +18,7 @@ version = "0.33.0" description = "Accelerate" optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "accelerate-0.33.0-py3-none-any.whl", hash = "sha256:0a7f33d60ba09afabd028d4f0856dd19c5a734b7a596d637d9dd6e3d0eadbaf3"}, {file = "accelerate-0.33.0.tar.gz", hash = "sha256:11ba481ed6ea09191775df55ce464aeeba67a024bd0261a44b77b30fb439e26a"}, @@ -44,41 +46,45 @@ testing = ["bitsandbytes", "datasets", "diffusers", "evaluate", "parameterized", [[package]] name = "adalflow" -version = "0.2.6" +version = "1.0.4" description = "The Library to Build and Auto-optimize LLM Applications" optional = false python-versions = ">=3.9, <4.0" +groups = ["main"] files = [] develop = true [package.dependencies] backoff = "^2.2.1" -botocore = "^1.34.149" colorama = "^0.4.6" diskcache = "^5.6.3" jinja2 = "^3.1.3" jsonlines = "^4.0.0" nest-asyncio = "^1.6.0" -numpy = "^1.26.4" +numpy = {version = "*", markers = "python_version >= \"3.10\""} python-dotenv = "^1.0.1" -pyyaml = "^6.0.1" -tiktoken = "^0.7.0" +PyYAML = ">=6.0.1" +tiktoken = ">=0.3.3" tqdm = "^4.66.4" [package.extras] -anthropic = ["anthropic (>=0.31.1,<0.32.0)"] -azure = ["azure-core (>=1.24.0,<2.0.0)", "azure-identity (>=1.12.0,<2.0.0)"] +anthropic = ["anthropic (>=0.31.1)"] +azure = ["azure-core (>=1.24.0)", "azure-identity (>=1.12.0)"] bedrock = ["boto3 (>=1.35.19,<2.0.0)"] -cohere = ["cohere (>=5.5.8,<6.0.0)"] +cohere = ["cohere (>=5.5.8)"] datasets = [] -faiss-cpu = ["faiss-cpu (>=1.8.0,<2.0.0)"] -google-generativeai = ["google-generativeai (>=0.7.2,<0.8.0)"] -groq = ["groq (>=0.9.0,<0.10.0)"] -ollama = ["ollama (>=0.2.1,<0.3.0)"] -openai = ["openai (>=1.12.0,<2.0.0)"] -pgvector = ["pgvector (>=0.3.1,<0.4.0)"] -sqlalchemy = ["sqlalchemy (>=2.0.30,<3.0.0)"] -torch = ["torch (>=2.3.1,<3.0.0)"] +faiss-cpu = ["faiss-cpu (>=1.8.0)"] +fireworks-ai = [] +google-generativeai = ["google-generativeai (>=0.7.2)"] +groq = ["groq (>=0.9.0)"] +lancedb = ["lancedb (>=0.5.2)"] +mistralai = [] +ollama = ["ollama (>=0.2.1)"] +openai = ["openai (>=1.12.0)"] +pgvector = ["pgvector (>=0.3.1)"] +sqlalchemy = ["sqlalchemy (>=2.0.30)"] +together = ["together (>=1.3.14)"] +torch = ["torch (>=2.3.1)"] [package.source] type = "directory" @@ -90,6 +96,7 @@ version = "2.3.5" description = "Happy Eyeballs for asyncio" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "aiohappyeyeballs-2.3.5-py3-none-any.whl", hash = "sha256:4d6dea59215537dbc746e93e779caea8178c866856a721c9c660d7a5a7b8be03"}, {file = "aiohappyeyeballs-2.3.5.tar.gz", hash = "sha256:6fa48b9f1317254f122a07a131a86b71ca6946ca989ce6326fff54a99a920105"}, @@ -101,6 +108,7 @@ version = "3.10.2" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "aiohttp-3.10.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:95213b3d79c7e387144e9cb7b9d2809092d6ff2c044cb59033aedc612f38fb6d"}, {file = "aiohttp-3.10.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1aa005f060aff7124cfadaa2493f00a4e28ed41b232add5869e129a2e395935a"}, @@ -189,7 +197,7 @@ multidict = ">=4.5,<7.0" yarl = ">=1.0,<2.0" [package.extras] -speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"] +speedups = ["Brotli ; platform_python_implementation == \"CPython\"", "aiodns (>=3.2.0) ; sys_platform == \"linux\" or sys_platform == \"darwin\"", "brotlicffi ; platform_python_implementation != \"CPython\""] [[package]] name = "aiosignal" @@ -197,6 +205,7 @@ version = "1.3.1" description = "aiosignal: a list of registered asynchronous callbacks" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, @@ -211,6 +220,7 @@ version = "1.13.2" description = "A database migration tool for SQLAlchemy." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "alembic-1.13.2-py3-none-any.whl", hash = "sha256:6b8733129a6224a9a711e17c99b08462dbf7cc9670ba8f2e2ae9af860ceb1953"}, {file = "alembic-1.13.2.tar.gz", hash = "sha256:1ff0ae32975f4fd96028c39ed9bb3c867fe3af956bd7bb37343b54c9fe7445ef"}, @@ -222,7 +232,7 @@ SQLAlchemy = ">=1.3.0" typing-extensions = ">=4" [package.extras] -tz = ["backports.zoneinfo"] +tz = ["backports.zoneinfo ; python_version < \"3.9\""] [[package]] name = "annotated-types" @@ -230,6 +240,7 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, @@ -241,6 +252,7 @@ version = "0.26.1" description = "The official Python library for the anthropic API" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "anthropic-0.26.1-py3-none-any.whl", hash = "sha256:2812b9b250b551ed8a1f0a7e6ae3f005654098994f45ebca5b5808bd154c9628"}, {file = "anthropic-0.26.1.tar.gz", hash = "sha256:26680ff781a6f678a30a1dccd0743631e602b23a47719439ffdef5335fa167d8"}, @@ -266,6 +278,7 @@ version = "4.4.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, @@ -277,7 +290,7 @@ sniffio = ">=1.1" [package.extras] doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\""] trio = ["trio (>=0.23)"] [[package]] @@ -286,6 +299,7 @@ version = "1.4.4" description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, @@ -297,6 +311,8 @@ version = "0.1.4" description = "Disable App Nap on macOS >= 10.9" optional = false python-versions = ">=3.6" +groups = ["dev"] +markers = "platform_system == \"Darwin\"" files = [ {file = "appnope-0.1.4-py2.py3-none-any.whl", hash = "sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c"}, {file = "appnope-0.1.4.tar.gz", hash = "sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee"}, @@ -308,6 +324,7 @@ version = "23.1.0" description = "Argon2 for Python" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "argon2_cffi-23.1.0-py3-none-any.whl", hash = "sha256:c670642b78ba29641818ab2e68bd4e6a78ba53b7eff7b4c3815ae16abf91c7ea"}, {file = "argon2_cffi-23.1.0.tar.gz", hash = "sha256:879c3e79a2729ce768ebb7d36d4609e3a78a4ca2ec3a9f12286ca057e3d0db08"}, @@ -328,6 +345,7 @@ version = "21.2.0" description = "Low-level CFFI bindings for Argon2" optional = false python-versions = ">=3.6" +groups = ["dev"] files = [ {file = "argon2-cffi-bindings-21.2.0.tar.gz", hash = "sha256:bb89ceffa6c791807d1305ceb77dbfacc5aa499891d2c55661c6459651fc39e3"}, {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ccb949252cb2ab3a08c02024acb77cfb179492d5701c7cbdbfd776124d4d2367"}, @@ -365,6 +383,7 @@ version = "1.3.0" description = "Better dates & times for Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "arrow-1.3.0-py3-none-any.whl", hash = "sha256:c728b120ebc00eb84e01882a6f5e7927a53960aa990ce7dd2b10f39005a67f80"}, {file = "arrow-1.3.0.tar.gz", hash = "sha256:d4540617648cb5f895730f1ad8c82a65f2dad0166f57b75f3ca54759c4d67a85"}, @@ -384,6 +403,7 @@ version = "2.4.1" description = "Annotate AST trees with source code positions" optional = false python-versions = "*" +groups = ["main", "dev"] files = [ {file = "asttokens-2.4.1-py2.py3-none-any.whl", hash = "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24"}, {file = "asttokens-2.4.1.tar.gz", hash = "sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0"}, @@ -393,8 +413,8 @@ files = [ six = ">=1.12.0" [package.extras] -astroid = ["astroid (>=1,<2)", "astroid (>=2,<4)"] -test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"] +astroid = ["astroid (>=1,<2) ; python_version < \"3\"", "astroid (>=2,<4) ; python_version >= \"3\""] +test = ["astroid (>=1,<2) ; python_version < \"3\"", "astroid (>=2,<4) ; python_version >= \"3\"", "pytest"] [[package]] name = "async-lru" @@ -402,6 +422,7 @@ version = "2.0.4" description = "Simple LRU cache for asyncio" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "async-lru-2.0.4.tar.gz", hash = "sha256:b8a59a5df60805ff63220b2a0c5b5393da5521b113cd5465a44eb037d81a5627"}, {file = "async_lru-2.0.4-py3-none-any.whl", hash = "sha256:ff02944ce3c288c5be660c42dbcca0742b32c3b279d6dceda655190240b99224"}, @@ -413,18 +434,19 @@ version = "24.2.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, ] [package.extras] -benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\" and python_version < \"3.13\"", "pytest-xdist[psutil]"] +cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\" and python_version < \"3.13\"", "pytest-xdist[psutil]"] +dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\"", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\" and python_version < \"3.13\"", "pytest-xdist[psutil]"] docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] -tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] +tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\" and python_version < \"3.13\"", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\" and python_version < \"3.13\""] [[package]] name = "babel" @@ -432,6 +454,7 @@ version = "2.16.0" description = "Internationalization utilities" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b"}, {file = "babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316"}, @@ -446,6 +469,7 @@ version = "2.2.1" description = "Function decoration for backoff and retry" optional = false python-versions = ">=3.7,<4.0" +groups = ["main", "dev"] files = [ {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, @@ -457,6 +481,7 @@ version = "4.12.3" description = "Screen-scraping library" optional = false python-versions = ">=3.6.0" +groups = ["dev"] files = [ {file = "beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"}, {file = "beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051"}, @@ -478,6 +503,7 @@ version = "24.10.0" description = "The uncompromising code formatter." optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "black-24.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6668650ea4b685440857138e5fe40cde4d652633b1bdffc62933d0db4ed9812"}, {file = "black-24.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1c536fcf674217e87b8cc3657b81809d3c085d7bf3ef262ead700da345bfa6ea"}, @@ -524,6 +550,7 @@ version = "6.1.0" description = "An easy safelist-based HTML-sanitizing tool." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "bleach-6.1.0-py3-none-any.whl", hash = "sha256:3225f354cfc436b9789c66c4ee030194bee0568fbf9cbdad3bc8b5c26c5f12b6"}, {file = "bleach-6.1.0.tar.gz", hash = "sha256:0a31f1837963c41d46bbf1331b8778e1308ea0791db03cc4e7357b97cf42a8fe"}, @@ -542,6 +569,7 @@ version = "1.34.157" description = "The AWS SDK for Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "boto3-1.34.157-py3-none-any.whl", hash = "sha256:3cc357156df5482154a016f138d1953061a181b4c594f8b6302c9d6c024bd950"}, {file = "boto3-1.34.157.tar.gz", hash = "sha256:7ef19ed38cba9863b58430fb4a66a72a5c250304f234bd1c16b860f9bf25677b"}, @@ -561,6 +589,7 @@ version = "1.34.157" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "botocore-1.34.157-py3-none-any.whl", hash = "sha256:c6cba6de8eb86ca4d2f934e009b37adbe1e7fdcfa52fbab74783f4c30676e07d"}, {file = "botocore-1.34.157.tar.gz", hash = "sha256:5628a36cec123cdc8c1158d05a7b06aa5e53649ad73796c50ef3fb51199785fb"}, @@ -580,6 +609,7 @@ version = "5.5.0" description = "Extensible memoizing collections and decorators" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292"}, {file = "cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a"}, @@ -591,6 +621,7 @@ version = "2024.7.4" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" +groups = ["main", "dev"] files = [ {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, @@ -602,6 +633,7 @@ version = "1.17.0" description = "Foreign Function Interface for Python calling C code." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "cffi-1.17.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f9338cc05451f1942d0d8203ec2c346c830f8e86469903d5126c1f0a13a2bcbb"}, {file = "cffi-1.17.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a0ce71725cacc9ebf839630772b07eeec220cbb5f03be1399e0457a1464f8e1a"}, @@ -681,6 +713,7 @@ version = "3.4.0" description = "Validate configuration and produce human readable error messages." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"}, {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, @@ -692,6 +725,7 @@ version = "3.3.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" +groups = ["main", "dev"] files = [ {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, @@ -791,6 +825,7 @@ version = "8.1.7" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, @@ -805,6 +840,7 @@ version = "5.8.0" description = "" optional = false python-versions = "<4.0,>=3.8" +groups = ["dev"] files = [ {file = "cohere-5.8.0-py3-none-any.whl", hash = "sha256:f87f709be6dfe3dce57bef0dd5e90924e8828fb8d334c96fc27663b6a7298c6b"}, {file = "cohere-5.8.0.tar.gz", hash = "sha256:c4e1ab064d66cc0170091f614b4ea22f55e079f2c7fe9e0de8752fd46f8d2a70"}, @@ -829,6 +865,7 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main", "dev"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, @@ -840,6 +877,7 @@ version = "6.8.2" description = "Add colours to the output of Python's logging module." optional = false python-versions = ">=3.6" +groups = ["dev"] files = [ {file = "colorlog-6.8.2-py3-none-any.whl", hash = "sha256:4dcbb62368e2800cb3c5abd348da7e53f6c362dda502ec27c560b2e58a66bd33"}, {file = "colorlog-6.8.2.tar.gz", hash = "sha256:3e3e079a41feb5a1b64f978b5ea4f46040a94f11f0e8bbb8261e3dbbeca64d44"}, @@ -857,6 +895,7 @@ version = "0.2.2" description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "comm-0.2.2-py3-none-any.whl", hash = "sha256:e6fb86cb70ff661ee8c9c14e7d36d6de3b4066f1441be4063df9c5009f0a64d3"}, {file = "comm-0.2.2.tar.gz", hash = "sha256:3fd7a84065306e07bea1773df6eb8282de51ba82f77c72f9c85716ab11fe980e"}, @@ -874,6 +913,7 @@ version = "1.2.1" description = "Python library for calculating contours of 2D quadrilateral grids" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "contourpy-1.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bd7c23df857d488f418439686d3b10ae2fbf9bc256cd045b37a8c16575ea1040"}, {file = "contourpy-1.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5b9eb0ca724a241683c9685a484da9d35c872fd42756574a7cfbf58af26677fd"}, @@ -937,6 +977,7 @@ version = "0.12.1" description = "Composable style cycles" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"}, @@ -952,6 +993,7 @@ version = "0.6.7" description = "Easily serialize dataclasses to and from JSON." optional = false python-versions = "<4.0,>=3.7" +groups = ["dev"] files = [ {file = "dataclasses_json-0.6.7-py3-none-any.whl", hash = "sha256:0dbf33f26c8d5305befd61b39d2b3414e8a407bedc2834dea9b8d642666fb40a"}, {file = "dataclasses_json-0.6.7.tar.gz", hash = "sha256:b6b3e528266ea45b9535223bc53ca645f5208833c29229e847b3f26a1cc55fc0"}, @@ -967,6 +1009,7 @@ version = "2.16.0" description = "JSON schema generation from dataclasses" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "dataclasses-jsonschema-2.16.0.tar.gz", hash = "sha256:effb0c73db30a537a962da75c4f35b94a5b1b7c1b17806b1ef74aed8e0aa2768"}, {file = "dataclasses_jsonschema-2.16.0-py3-none-any.whl", hash = "sha256:d203d6a16c990f7d09eae58c97ffaaea1e45ecb7a033d312e61e4c7836a741bf"}, @@ -989,6 +1032,7 @@ version = "2.20.0" description = "HuggingFace community-driven open-source library of datasets" optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "datasets-2.20.0-py3-none-any.whl", hash = "sha256:76ac02e3bdfff824492e20678f0b6b1b6d080515957fe834b00c2ba8d6b18e5e"}, {file = "datasets-2.20.0.tar.gz", hash = "sha256:3c4dbcd27e0f642b9d41d20ff2efa721a5e04b32b2ca4009e0fc9139e324553f"}, @@ -1015,7 +1059,7 @@ xxhash = "*" apache-beam = ["apache-beam (>=2.26.0)"] audio = ["librosa", "soundfile (>=0.12.1)"] benchmarks = ["tensorflow (==2.12.0)", "torch (==2.0.1)", "transformers (==4.30.1)"] -dev = ["Pillow (>=9.4.0)", "absl-py", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "jax (>=0.3.14)", "jaxlib (>=0.3.14)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "polars[timezone] (>=0.20.0)", "protobuf (<4.0.0)", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "ruff (>=0.3.0)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy", "tensorflow (>=2.6.0)", "tiktoken", "torch", "torch (>=2.0.0)", "transformers", "typing-extensions (>=4.6.1)", "zstandard"] +dev = ["Pillow (>=9.4.0)", "absl-py", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "jax (>=0.3.14) ; sys_platform != \"win32\"", "jaxlib (>=0.3.14) ; sys_platform != \"win32\"", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "polars[timezone] (>=0.20.0)", "protobuf (<4.0.0)", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "ruff (>=0.3.0)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy", "tensorflow (>=2.6.0)", "tiktoken", "torch", "torch (>=2.0.0)", "transformers", "typing-extensions (>=4.6.1)", "zstandard"] docs = ["s3fs", "tensorflow (>=2.6.0)", "torch", "transformers"] jax = ["jax (>=0.3.14)", "jaxlib (>=0.3.14)"] metrics-tests = ["Werkzeug (>=1.0.1)", "accelerate", "bert-score (>=0.3.6)", "jiwer", "langdetect", "mauve-text", "nltk", "requests-file (>=1.5.1)", "rouge-score", "sacrebleu", "sacremoses", "scikit-learn", "scipy", "sentencepiece", "seqeval", "six (>=1.15.0,<1.16.0)", "spacy (>=3.0.0)", "texttable (>=1.6.3)", "tldextract", "tldextract (>=3.1.0)", "toml (>=0.10.1)", "typer (<0.5.0)"] @@ -1023,7 +1067,7 @@ quality = ["ruff (>=0.3.0)"] s3 = ["s3fs"] tensorflow = ["tensorflow (>=2.6.0)"] tensorflow-gpu = ["tensorflow (>=2.6.0)"] -tests = ["Pillow (>=9.4.0)", "absl-py", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "jax (>=0.3.14)", "jaxlib (>=0.3.14)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "polars[timezone] (>=0.20.0)", "protobuf (<4.0.0)", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy", "tensorflow (>=2.6.0)", "tiktoken", "torch (>=2.0.0)", "transformers", "typing-extensions (>=4.6.1)", "zstandard"] +tests = ["Pillow (>=9.4.0)", "absl-py", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "jax (>=0.3.14) ; sys_platform != \"win32\"", "jaxlib (>=0.3.14) ; sys_platform != \"win32\"", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "polars[timezone] (>=0.20.0)", "protobuf (<4.0.0)", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy", "tensorflow (>=2.6.0)", "tiktoken", "torch (>=2.0.0)", "transformers", "typing-extensions (>=4.6.1)", "zstandard"] torch = ["torch"] vision = ["Pillow (>=9.4.0)"] @@ -1033,6 +1077,7 @@ version = "1.8.5" description = "An implementation of the Debug Adapter Protocol for Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "debugpy-1.8.5-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:7e4d594367d6407a120b76bdaa03886e9eb652c05ba7f87e37418426ad2079f7"}, {file = "debugpy-1.8.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4413b7a3ede757dc33a273a17d685ea2b0c09dbd312cc03f5534a0fd4d40750a"}, @@ -1064,6 +1109,7 @@ version = "5.1.1" description = "Decorators for Humans" optional = false python-versions = ">=3.5" +groups = ["main", "dev"] files = [ {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, @@ -1075,6 +1121,7 @@ version = "0.7.1" description = "XML bomb protection for Python stdlib modules" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +groups = ["dev"] files = [ {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, @@ -1086,6 +1133,7 @@ version = "0.3.8" description = "serialize all of Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, @@ -1101,6 +1149,7 @@ version = "5.6.3" description = "Disk Cache -- Disk and file backed persistent cache." optional = false python-versions = ">=3" +groups = ["main", "dev"] files = [ {file = "diskcache-5.6.3-py3-none-any.whl", hash = "sha256:5e31b2d5fbad117cc363ebaf6b689474db18a1f6438bc82358b024abd4c2ca19"}, {file = "diskcache-5.6.3.tar.gz", hash = "sha256:2c3a3fa2743d8535d832ec61c2054a1641f41775aa7c556758a109941e33e4fc"}, @@ -1112,6 +1161,7 @@ version = "0.3.8" description = "Distribution utilities" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784"}, {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"}, @@ -1123,6 +1173,7 @@ version = "1.9.0" description = "Distro - an OS platform information API" optional = false python-versions = ">=3.6" +groups = ["main", "dev"] files = [ {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, @@ -1134,6 +1185,7 @@ version = "2.4.13" description = "DSPy" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "dspy-ai-2.4.13.tar.gz", hash = "sha256:0ed5648d8267b6a4ebe5b72ec5dbcca9fa194d800885a0182cad93c312cd3166"}, {file = "dspy_ai-2.4.13-py3-none-any.whl", hash = "sha256:b43aa117b4b6fcb009274f61adcfb0a1dbe1cbb4a370da3bd14cd4d230f17665"}, @@ -1174,6 +1226,7 @@ version = "0.2.2" description = "Like `typing._eval_type`, but lets older Python versions use newer typing features." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "eval_type_backport-0.2.2-py3-none-any.whl", hash = "sha256:cb6ad7c393517f476f96d456d0412ea80f0a8cf96f6892834cd9340149111b0a"}, {file = "eval_type_backport-0.2.2.tar.gz", hash = "sha256:f0576b4cf01ebb5bd358d02314d31846af5e07678387486e2c798af0e7d849c1"}, @@ -1188,13 +1241,14 @@ version = "2.0.1" description = "Get the currently executing AST node of a frame, and other information" optional = false python-versions = ">=3.5" +groups = ["main", "dev"] files = [ {file = "executing-2.0.1-py2.py3-none-any.whl", hash = "sha256:eac49ca94516ccc753f9fb5ce82603156e590b27525a8bc32cce8ae302eb61bc"}, {file = "executing-2.0.1.tar.gz", hash = "sha256:35afe2ce3affba8ee97f2d69927fa823b08b472b7b994e36a52a964b93d16147"}, ] [package.extras] -tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"] +tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich ; python_version >= \"3.11\""] [[package]] name = "faiss-cpu" @@ -1202,6 +1256,7 @@ version = "1.8.0.post1" description = "A library for efficient similarity search and clustering of dense vectors." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "faiss_cpu-1.8.0.post1-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:fd84721eb599aa1da19b1b36345bb8705a60bb1d2887bbbc395a29e3d36a1a62"}, {file = "faiss_cpu-1.8.0.post1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b78ff9079d15fd0f156bf5dd8a2975a8abffac1854a86ece263eec1500a2e836"}, @@ -1241,6 +1296,7 @@ version = "1.9.5" description = "Fast read/write of AVRO files" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "fastavro-1.9.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:61253148e95dd2b6457247b441b7555074a55de17aef85f5165bfd5facf600fc"}, {file = "fastavro-1.9.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b604935d671ad47d888efc92a106f98e9440874108b444ac10e28d643109c937"}, @@ -1287,6 +1343,7 @@ version = "2.20.0" description = "Fastest Python implementation of JSON schema" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "fastjsonschema-2.20.0-py3-none-any.whl", hash = "sha256:5875f0b0fa7a0043a91e93a9b8f793bcbbba9691e7fd83dca95c28ba26d21f0a"}, {file = "fastjsonschema-2.20.0.tar.gz", hash = "sha256:3d48fc5300ee96f5d116f10fe6f28d938e6008f59a6a025c2649475b87f76a23"}, @@ -1301,6 +1358,7 @@ version = "3.15.4" description = "A platform independent file lock." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "filelock-3.15.4-py3-none-any.whl", hash = "sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7"}, {file = "filelock-3.15.4.tar.gz", hash = "sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb"}, @@ -1309,7 +1367,29 @@ files = [ [package.extras] docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-asyncio (>=0.21)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)", "virtualenv (>=20.26.2)"] -typing = ["typing-extensions (>=4.8)"] +typing = ["typing-extensions (>=4.8) ; python_version < \"3.11\""] + +[[package]] +name = "fireworks-ai" +version = "0.15.12" +description = "Python client library for the Fireworks.ai Generative AI Platform" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "fireworks_ai-0.15.12-py3-none-any.whl", hash = "sha256:3fbf3f89e65ccfc46c88b71246b9d4fdf3301955ac4050193d8f4b4058cb193a"}, + {file = "fireworks_ai-0.15.12.tar.gz", hash = "sha256:2380a53d92244c608fd398f8d97b97380d899f3ff710091f4b50917b75119ec2"}, +] + +[package.dependencies] +httpx = "*" +httpx_sse = "*" +httpx-ws = "*" +Pillow = "*" +pydantic = "*" + +[package.extras] +flumina = ["fastapi", "gitignore-parser", "openapi-spec-validator", "prance", "safetensors", "tabulate", "torch", "tqdm"] [[package]] name = "fonttools" @@ -1317,6 +1397,7 @@ version = "4.53.1" description = "Tools to manipulate font files" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "fonttools-4.53.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0679a30b59d74b6242909945429dbddb08496935b82f91ea9bf6ad240ec23397"}, {file = "fonttools-4.53.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8bf06b94694251861ba7fdeea15c8ec0967f84c3d4143ae9daf42bbc7717fe3"}, @@ -1363,18 +1444,18 @@ files = [ ] [package.extras] -all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"] +all = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\"", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0) ; python_version <= \"3.12\"", "xattr ; sys_platform == \"darwin\"", "zopfli (>=0.1.4)"] graphite = ["lz4 (>=1.7.4.2)"] -interpolatable = ["munkres", "pycairo", "scipy"] +interpolatable = ["munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\""] lxml = ["lxml (>=4.0)"] pathops = ["skia-pathops (>=0.5.0)"] plot = ["matplotlib"] repacker = ["uharfbuzz (>=0.23.0)"] symfont = ["sympy"] -type1 = ["xattr"] +type1 = ["xattr ; sys_platform == \"darwin\""] ufo = ["fs (>=2.2.0,<3)"] -unicode = ["unicodedata2 (>=15.1.0)"] -woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] +unicode = ["unicodedata2 (>=15.1.0) ; python_version <= \"3.12\""] +woff = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "zopfli (>=0.1.4)"] [[package]] name = "fqdn" @@ -1382,6 +1463,7 @@ version = "1.5.1" description = "Validates fully-qualified domain names against RFC 1123, so that they are acceptable to modern bowsers" optional = false python-versions = ">=2.7, !=3.0, !=3.1, !=3.2, !=3.3, !=3.4, <4" +groups = ["dev"] files = [ {file = "fqdn-1.5.1-py3-none-any.whl", hash = "sha256:3a179af3761e4df6eb2e026ff9e1a3033d3587bf980a0b1b2e1e5d08d7358014"}, {file = "fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f"}, @@ -1393,6 +1475,7 @@ version = "1.4.1" description = "A list-like structure which implements collections.abc.MutableSequence" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"}, {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"}, @@ -1479,6 +1562,7 @@ version = "2024.5.0" description = "File-system specification" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "fsspec-2024.5.0-py3-none-any.whl", hash = "sha256:e0fdbc446d67e182f49a70b82cf7889028a63588fde6b222521f10937b2b670c"}, {file = "fsspec-2024.5.0.tar.gz", hash = "sha256:1d021b0b0f933e3b3029ed808eb400c08ba101ca2de4b3483fbc9ca23fcee94a"}, @@ -1520,6 +1604,7 @@ version = "5.2.0" description = "Google Drive Public File/Folder Downloader" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "gdown-5.2.0-py3-none-any.whl", hash = "sha256:33083832d82b1101bdd0e9df3edd0fbc0e1c5f14c9d8c38d2a35bf1683b526d6"}, {file = "gdown-5.2.0.tar.gz", hash = "sha256:2145165062d85520a3cd98b356c9ed522c5e7984d408535409fd46f94defc787"}, @@ -1540,6 +1625,7 @@ version = "0.6.10" description = "Google Ai Generativelanguage API client library" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "google_ai_generativelanguage-0.6.10-py3-none-any.whl", hash = "sha256:854a2bf833d18be05ad5ef13c755567b66a4f4a870f099b62c61fe11bddabcf4"}, {file = "google_ai_generativelanguage-0.6.10.tar.gz", hash = "sha256:6fa642c964d8728006fe7e8771026fc0b599ae0ebeaf83caf550941e8e693455"}, @@ -1557,6 +1643,7 @@ version = "2.23.0" description = "Google API client core library" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "google_api_core-2.23.0-py3-none-any.whl", hash = "sha256:c20100d4c4c41070cf365f1d8ddf5365915291b5eb11b83829fbd1c999b5122f"}, {file = "google_api_core-2.23.0.tar.gz", hash = "sha256:2ceb087315e6af43f256704b871d99326b1f12a9d6ce99beaedec99ba26a0ace"}, @@ -1568,7 +1655,7 @@ googleapis-common-protos = ">=1.56.2,<2.0.dev0" grpcio = {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""} grpcio-status = {version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""} proto-plus = [ - {version = ">=1.22.3,<2.0.0dev", markers = "python_version < \"3.13\""}, + {version = ">=1.22.3,<2.0.0dev"}, {version = ">=1.25.0,<2.0.0dev", markers = "python_version >= \"3.13\""}, ] protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" @@ -1576,7 +1663,7 @@ requests = ">=2.18.0,<3.0.0.dev0" [package.extras] async-rest = ["google-auth[aiohttp] (>=2.35.0,<3.0.dev0)"] -grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"] +grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev) ; python_version >= \"3.11\"", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0) ; python_version >= \"3.11\""] grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] @@ -1586,6 +1673,7 @@ version = "2.154.0" description = "Google API Client Library for Python" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "google_api_python_client-2.154.0-py2.py3-none-any.whl", hash = "sha256:a521bbbb2ec0ba9d6f307cdd64ed6e21eeac372d1bd7493a4ab5022941f784ad"}, {file = "google_api_python_client-2.154.0.tar.gz", hash = "sha256:1b420062e03bfcaa1c79e2e00a612d29a6a934151ceb3d272fe150a656dc8f17"}, @@ -1604,6 +1692,7 @@ version = "2.36.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "google_auth-2.36.0-py2.py3-none-any.whl", hash = "sha256:51a15d47028b66fd36e5c64a82d2d57480075bccc7da37cde257fc94177a61fb"}, {file = "google_auth-2.36.0.tar.gz", hash = "sha256:545e9618f2df0bcbb7dcbc45a546485b1212624716975a1ea5ae8149ce769ab1"}, @@ -1627,6 +1716,7 @@ version = "0.2.0" description = "Google Authentication Library: httplib2 transport" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "google-auth-httplib2-0.2.0.tar.gz", hash = "sha256:38aa7badf48f974f1eb9861794e9c0cb2a0511a4ec0679b1f886d108f5640e05"}, {file = "google_auth_httplib2-0.2.0-py2.py3-none-any.whl", hash = "sha256:b65a0a2123300dd71281a7bf6e64d65a0759287df52729bdd1ae2e47dc311a3d"}, @@ -1642,6 +1732,7 @@ version = "0.8.3" description = "Google Generative AI High level API client library and tools." optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "google_generativeai-0.8.3-py3-none-any.whl", hash = "sha256:1108ff89d5b8e59f51e63d1a8bf84701cd84656e17ca28d73aeed745e736d9b7"}, ] @@ -1665,6 +1756,7 @@ version = "1.66.0" description = "Common protobufs used in Google APIs" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "googleapis_common_protos-1.66.0-py2.py3-none-any.whl", hash = "sha256:d7abcd75fabb2e0ec9f74466401f6c119a0b498e27370e9be4c94cb7e382b8ed"}, {file = "googleapis_common_protos-1.66.0.tar.gz", hash = "sha256:c3e7b33d15fdca5374cc0a7346dd92ffa847425cc4ea941d970f13680052ec8c"}, @@ -1682,6 +1774,7 @@ version = "0.20.3" description = "Simple Python interface for Graphviz" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "graphviz-0.20.3-py3-none-any.whl", hash = "sha256:81f848f2904515d8cd359cc611faba817598d2feaac4027b266aa3eda7b3dde5"}, {file = "graphviz-0.20.3.zip", hash = "sha256:09d6bc81e6a9fa392e7ba52135a9d49f1ed62526f96499325930e87ca1b5925d"}, @@ -1698,6 +1791,8 @@ version = "3.0.3" description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=3.7" +groups = ["dev"] +markers = "python_version < \"3.13\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")" files = [ {file = "greenlet-3.0.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a"}, {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881"}, @@ -1769,6 +1864,7 @@ version = "0.9.0" description = "The official Python library for the groq API" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "groq-0.9.0-py3-none-any.whl", hash = "sha256:d0e46f4ad645504672bb09c8100af3ced3a7db0d5119dc13e4aca535fc455874"}, {file = "groq-0.9.0.tar.gz", hash = "sha256:130ed5e35d3acfaab46b9e7a078eeaebf91052f4a9d71f86f87fb319b5fec332"}, @@ -1788,6 +1884,7 @@ version = "1.63.2" description = "HTTP/2-based RPC framework" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "grpcio-1.63.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:bfb7443a525a0ccc8ae89d29d5257a895fe33af23ba8be21609138cef42deb79"}, {file = "grpcio-1.63.2-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:4410f179181961c043c58454ee9cb28474ab38080a1f12e56bac45dc1cf21491"}, @@ -1846,6 +1943,7 @@ version = "1.62.3" description = "Status proto mapping for gRPC" optional = false python-versions = ">=3.6" +groups = ["dev"] files = [ {file = "grpcio-status-1.62.3.tar.gz", hash = "sha256:289bdd7b2459794a12cf95dc0cb727bd4a1742c37bd823f760236c937e53a485"}, {file = "grpcio_status-1.62.3-py3-none-any.whl", hash = "sha256:f9049b762ba8de6b1086789d8315846e094edac2c50beaf462338b301a8fd4b8"}, @@ -1862,6 +1960,7 @@ version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, @@ -1873,6 +1972,7 @@ version = "1.0.5" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, @@ -1894,6 +1994,7 @@ version = "0.22.0" description = "A comprehensive HTTP client library." optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["dev"] files = [ {file = "httplib2-0.22.0-py3-none-any.whl", hash = "sha256:14ae0a53c1ba8f3d37e9e27cf37eabb0fb9980f435ba405d546948b009dd64dc"}, {file = "httplib2-0.22.0.tar.gz", hash = "sha256:d7a10bc5ef5ab08322488bde8c726eeee5c8618723fdb399597ec58f3d82df81"}, @@ -1908,6 +2009,7 @@ version = "0.27.0" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, @@ -1921,7 +2023,7 @@ idna = "*" sniffio = "*" [package.extras] -brotli = ["brotli", "brotlicffi"] +brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] @@ -1932,17 +2034,37 @@ version = "0.4.0" description = "Consume Server-Sent Event (SSE) messages with HTTPX." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"}, {file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"}, ] +[[package]] +name = "httpx-ws" +version = "0.7.2" +description = "WebSockets support for HTTPX" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "httpx_ws-0.7.2-py3-none-any.whl", hash = "sha256:dd7bf9dbaa96dcd5cef1af3a7e1130cfac068bebecce25a74145022f5a8427a3"}, + {file = "httpx_ws-0.7.2.tar.gz", hash = "sha256:93edea6c8fc313464fc287bff7d2ad20e6196b7754c76f946f73b4af79886d4e"}, +] + +[package.dependencies] +anyio = ">=4" +httpcore = ">=1.0.4" +httpx = ">=0.23.1" +wsproto = "*" + [[package]] name = "huggingface-hub" version = "0.24.5" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "huggingface_hub-0.24.5-py3-none-any.whl", hash = "sha256:d93fb63b1f1a919a22ce91a14518974e81fc4610bf344dfe7572343ce8d3aced"}, {file = "huggingface_hub-0.24.5.tar.gz", hash = "sha256:7b45d6744dd53ce9cbf9880957de00e9d10a9ae837f1c9b7255fc8fa4e8264f3"}, @@ -1977,6 +2099,7 @@ version = "2.6.0" description = "File identification library for Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "identify-2.6.0-py2.py3-none-any.whl", hash = "sha256:e79ae4406387a9d300332b5fd366d8994f1525e8414984e1a59e058b2eda2dd0"}, {file = "identify-2.6.0.tar.gz", hash = "sha256:cb171c685bdc31bcc4c1734698736a7d5b6c8bf2e0c15117f4d469c8640ae5cf"}, @@ -1991,6 +2114,7 @@ version = "3.7" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.5" +groups = ["main", "dev"] files = [ {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, @@ -2002,6 +2126,7 @@ version = "6.29.5" description = "IPython Kernel for Jupyter" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "ipykernel-6.29.5-py3-none-any.whl", hash = "sha256:afdb66ba5aa354b09b91379bac28ae4afebbb30e8b39510c9690afb7a10421b5"}, {file = "ipykernel-6.29.5.tar.gz", hash = "sha256:f093a22c4a40f8828f8e330a9c297cb93dcab13bd9678ded6de8e5cf81c56215"}, @@ -2035,6 +2160,7 @@ version = "8.26.0" description = "IPython: Productive Interactive Computing" optional = false python-versions = ">=3.10" +groups = ["main", "dev"] files = [ {file = "ipython-8.26.0-py3-none-any.whl", hash = "sha256:e6b347c27bdf9c32ee9d31ae85defc525755a1869f14057e900675b9e8d6e6ff"}, {file = "ipython-8.26.0.tar.gz", hash = "sha256:1cec0fbba8404af13facebe83d04436a7434c7400e59f47acf467c64abd0956c"}, @@ -2055,7 +2181,7 @@ typing-extensions = {version = ">=4.6", markers = "python_version < \"3.12\""} [package.extras] all = ["ipython[black,doc,kernel,matplotlib,nbconvert,nbformat,notebook,parallel,qtconsole]", "ipython[test,test-extra]"] black = ["black"] -doc = ["docrepr", "exceptiongroup", "intersphinx-registry", "ipykernel", "ipython[test]", "matplotlib", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "sphinxcontrib-jquery", "tomli", "typing-extensions"] +doc = ["docrepr", "exceptiongroup", "intersphinx-registry", "ipykernel", "ipython[test]", "matplotlib", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "sphinxcontrib-jquery", "tomli ; python_version < \"3.11\"", "typing-extensions"] kernel = ["ipykernel"] matplotlib = ["matplotlib"] nbconvert = ["nbconvert"] @@ -2072,6 +2198,7 @@ version = "8.1.3" description = "Jupyter interactive widgets" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "ipywidgets-8.1.3-py3-none-any.whl", hash = "sha256:efafd18f7a142248f7cb0ba890a68b96abd4d6e88ddbda483c9130d12667eaf2"}, {file = "ipywidgets-8.1.3.tar.gz", hash = "sha256:f5f9eeaae082b1823ce9eac2575272952f40d748893972956dc09700a6392d9c"}, @@ -2093,6 +2220,7 @@ version = "20.11.0" description = "Operations with ISO 8601 durations" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "isoduration-20.11.0-py3-none-any.whl", hash = "sha256:b2904c2a4228c3d44f409c8ae8e2370eb21a26f7ac2ec5446df141dde3452042"}, {file = "isoduration-20.11.0.tar.gz", hash = "sha256:ac2f9015137935279eac671f94f89eb00584f940f5dc49462a0c4ee692ba1bd9"}, @@ -2107,6 +2235,7 @@ version = "0.19.1" description = "An autocompletion tool for Python that can be used for text editors." optional = false python-versions = ">=3.6" +groups = ["main", "dev"] files = [ {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"}, {file = "jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd"}, @@ -2126,6 +2255,7 @@ version = "3.1.4" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, @@ -2143,6 +2273,7 @@ version = "0.5.0" description = "Fast iterable JSON parser." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "jiter-0.5.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b599f4e89b3def9a94091e6ee52e1d7ad7bc33e238ebb9c4c63f211d74822c3f"}, {file = "jiter-0.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2a063f71c4b06225543dddadbe09d203dc0c95ba352d8b85f1221173480a71d5"}, @@ -2213,6 +2344,7 @@ version = "1.0.1" description = "JSON Matching Expressions" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, @@ -2224,6 +2356,7 @@ version = "1.4.2" description = "Lightweight pipelining with Python functions" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "joblib-1.4.2-py3-none-any.whl", hash = "sha256:06d478d5674cbc267e7496a410ee875abd68e4340feff4490bcb7afb88060ae6"}, {file = "joblib-1.4.2.tar.gz", hash = "sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e"}, @@ -2235,6 +2368,7 @@ version = "0.9.25" description = "A Python implementation of the JSON5 data format." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "json5-0.9.25-py3-none-any.whl", hash = "sha256:34ed7d834b1341a86987ed52f3f76cd8ee184394906b6e22a1e0deb9ab294e8f"}, {file = "json5-0.9.25.tar.gz", hash = "sha256:548e41b9be043f9426776f05df8635a00fe06104ea51ed24b67f908856e151ae"}, @@ -2246,6 +2380,7 @@ version = "4.0.0" description = "Library with helpers for the jsonlines file format" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "jsonlines-4.0.0-py3-none-any.whl", hash = "sha256:185b334ff2ca5a91362993f42e83588a360cf95ce4b71a73548502bda52a7c55"}, {file = "jsonlines-4.0.0.tar.gz", hash = "sha256:0c6d2c09117550c089995247f605ae4cf77dd1533041d366351f6f298822ea74"}, @@ -2260,6 +2395,7 @@ version = "1.33" description = "Apply JSON-Patches (RFC 6902)" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" +groups = ["dev"] files = [ {file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"}, {file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"}, @@ -2268,12 +2404,25 @@ files = [ [package.dependencies] jsonpointer = ">=1.9" +[[package]] +name = "jsonpath-python" +version = "1.0.6" +description = "A more powerful JSONPath implementation in modern python" +optional = false +python-versions = ">=3.6" +groups = ["dev"] +files = [ + {file = "jsonpath-python-1.0.6.tar.gz", hash = "sha256:dd5be4a72d8a2995c3f583cf82bf3cd1a9544cfdabf2d22595b67aff07349666"}, + {file = "jsonpath_python-1.0.6-py3-none-any.whl", hash = "sha256:1e3b78df579f5efc23565293612decee04214609208a2335884b3ee3f786b575"}, +] + [[package]] name = "jsonpointer" version = "3.0.0" description = "Identify specific nodes in a JSON document (RFC 6901)" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942"}, {file = "jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef"}, @@ -2285,6 +2434,7 @@ version = "4.23.0" description = "An implementation of JSON Schema validation for Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, @@ -2314,6 +2464,7 @@ version = "2023.12.1" description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c"}, {file = "jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc"}, @@ -2328,6 +2479,7 @@ version = "1.0.0" description = "Jupyter metapackage. Install all the Jupyter components in one go." optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "jupyter-1.0.0-py2.py3-none-any.whl", hash = "sha256:5b290f93b98ffbc21c0c7e749f054b3267782166d72fa5e3ed1ed4eaf34a2b78"}, {file = "jupyter-1.0.0.tar.gz", hash = "sha256:d9dc4b3318f310e34c82951ea5d6683f67bed7def4b259fafbfe4f1beb1d8e5f"}, @@ -2348,6 +2500,7 @@ version = "8.6.2" description = "Jupyter protocol implementation and client libraries" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "jupyter_client-8.6.2-py3-none-any.whl", hash = "sha256:50cbc5c66fd1b8f65ecb66bc490ab73217993632809b6e505687de18e9dea39f"}, {file = "jupyter_client-8.6.2.tar.gz", hash = "sha256:2bda14d55ee5ba58552a8c53ae43d215ad9868853489213f37da060ced54d8df"}, @@ -2362,7 +2515,7 @@ traitlets = ">=5.3" [package.extras] docs = ["ipykernel", "myst-parser", "pydata-sphinx-theme", "sphinx (>=4)", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] -test = ["coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-commit", "pytest (<8.2.0)", "pytest-cov", "pytest-jupyter[client] (>=0.4.1)", "pytest-timeout"] +test = ["coverage", "ipykernel (>=6.14)", "mypy", "paramiko ; sys_platform == \"win32\"", "pre-commit", "pytest (<8.2.0)", "pytest-cov", "pytest-jupyter[client] (>=0.4.1)", "pytest-timeout"] [[package]] name = "jupyter-console" @@ -2370,6 +2523,7 @@ version = "6.6.3" description = "Jupyter terminal console" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "jupyter_console-6.6.3-py3-none-any.whl", hash = "sha256:309d33409fcc92ffdad25f0bcdf9a4a9daa61b6f341177570fdac03de5352485"}, {file = "jupyter_console-6.6.3.tar.gz", hash = "sha256:566a4bf31c87adbfadf22cdf846e3069b59a71ed5da71d6ba4d8aaad14a53539"}, @@ -2394,6 +2548,7 @@ version = "5.7.2" description = "Jupyter core package. A base package on which Jupyter projects rely." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "jupyter_core-5.7.2-py3-none-any.whl", hash = "sha256:4f7315d2f6b4bcf2e3e7cb6e46772eba760ae459cd1f59d29eb57b0a01bd7409"}, {file = "jupyter_core-5.7.2.tar.gz", hash = "sha256:aa5f8d32bbf6b431ac830496da7392035d6f61b4f54872f15c4bd2a9c3f536d9"}, @@ -2414,6 +2569,7 @@ version = "0.10.0" description = "Jupyter Event System library" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "jupyter_events-0.10.0-py3-none-any.whl", hash = "sha256:4b72130875e59d57716d327ea70d3ebc3af1944d3717e5a498b8a06c6c159960"}, {file = "jupyter_events-0.10.0.tar.gz", hash = "sha256:670b8229d3cc882ec782144ed22e0d29e1c2d639263f92ca8383e66682845e22"}, @@ -2439,6 +2595,7 @@ version = "2.2.5" description = "Multi-Language Server WebSocket proxy for Jupyter Notebook/Lab server" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "jupyter-lsp-2.2.5.tar.gz", hash = "sha256:793147a05ad446f809fd53ef1cd19a9f5256fd0a2d6b7ce943a982cb4f545001"}, {file = "jupyter_lsp-2.2.5-py3-none-any.whl", hash = "sha256:45fbddbd505f3fbfb0b6cb2f1bc5e15e83ab7c79cd6e89416b248cb3c00c11da"}, @@ -2453,6 +2610,7 @@ version = "2.14.2" description = "The backend—i.e. core services, APIs, and REST endpoints—to Jupyter web applications." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "jupyter_server-2.14.2-py3-none-any.whl", hash = "sha256:47ff506127c2f7851a17bf4713434208fc490955d0e8632e95014a9a9afbeefd"}, {file = "jupyter_server-2.14.2.tar.gz", hash = "sha256:66095021aa9638ced276c248b1d81862e4c50f292d575920bbe960de1c56b12b"}, @@ -2489,6 +2647,7 @@ version = "0.5.3" description = "A Jupyter Server Extension Providing Terminals." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "jupyter_server_terminals-0.5.3-py3-none-any.whl", hash = "sha256:41ee0d7dc0ebf2809c668e0fc726dfaf258fcd3e769568996ca731b6194ae9aa"}, {file = "jupyter_server_terminals-0.5.3.tar.gz", hash = "sha256:5ae0295167220e9ace0edcfdb212afd2b01ee8d179fe6f23c899590e9b8a5269"}, @@ -2508,6 +2667,7 @@ version = "4.2.4" description = "JupyterLab computational environment" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "jupyterlab-4.2.4-py3-none-any.whl", hash = "sha256:807a7ec73637744f879e112060d4b9d9ebe028033b7a429b2d1f4fc523d00245"}, {file = "jupyterlab-4.2.4.tar.gz", hash = "sha256:343a979fb9582fd08c8511823e320703281cd072a0049bcdafdc7afeda7f2537"}, @@ -2541,6 +2701,7 @@ version = "0.3.0" description = "Pygments theme using JupyterLab CSS variables" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "jupyterlab_pygments-0.3.0-py3-none-any.whl", hash = "sha256:841a89020971da1d8693f1a99997aefc5dc424bb1b251fd6322462a1b8842780"}, {file = "jupyterlab_pygments-0.3.0.tar.gz", hash = "sha256:721aca4d9029252b11cfa9d185e5b5af4d54772bb8072f9b7036f4170054d35d"}, @@ -2552,6 +2713,7 @@ version = "2.27.3" description = "A set of server components for JupyterLab and JupyterLab like applications." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "jupyterlab_server-2.27.3-py3-none-any.whl", hash = "sha256:e697488f66c3db49df675158a77b3b017520d772c6e1548c7d9bcc5df7944ee4"}, {file = "jupyterlab_server-2.27.3.tar.gz", hash = "sha256:eb36caca59e74471988f0ae25c77945610b887f777255aa21f8065def9e51ed4"}, @@ -2577,6 +2739,7 @@ version = "3.0.11" description = "Jupyter interactive widgets for JupyterLab" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "jupyterlab_widgets-3.0.11-py3-none-any.whl", hash = "sha256:78287fd86d20744ace330a61625024cf5521e1c012a352ddc0a3cdc2348becd0"}, {file = "jupyterlab_widgets-3.0.11.tar.gz", hash = "sha256:dd5ac679593c969af29c9bed054c24f26842baa51352114736756bc035deee27"}, @@ -2588,6 +2751,7 @@ version = "1.4.5" description = "A fast implementation of the Cassowary constraint solver" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:05703cf211d585109fcd72207a31bb170a0f22144d68298dc5e61b3c946518af"}, {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:146d14bebb7f1dc4d5fbf74f8a6cb15ac42baadee8912eb84ac0b3b2a3dc6ac3"}, @@ -2701,6 +2865,7 @@ version = "0.2.16" description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0,>=3.8.1" +groups = ["dev"] files = [ {file = "langchain-0.2.16-py3-none-any.whl", hash = "sha256:8f59ee8b45f268df4b924ea3b9c63e49286efa756d16b3f6a9de5c6e502c36e1"}, {file = "langchain-0.2.16.tar.gz", hash = "sha256:ffb426a76a703b73ac69abad77cd16eaf03dda76b42cff55572f592d74944166"}, @@ -2727,6 +2892,7 @@ version = "0.2.16" description = "Community contributed LangChain integrations." optional = false python-versions = "<4.0,>=3.8.1" +groups = ["dev"] files = [ {file = "langchain_community-0.2.16-py3-none-any.whl", hash = "sha256:115e1419c176091d4e00240cb5a38612a249e70f213516b6cacae61a8794a868"}, {file = "langchain_community-0.2.16.tar.gz", hash = "sha256:ab416b793a7aed1fa46ebaffd29993296b02286a99819eabc43be2ea8e41ae78"}, @@ -2753,6 +2919,7 @@ version = "0.2.38" description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0,>=3.8.1" +groups = ["dev"] files = [ {file = "langchain_core-0.2.38-py3-none-any.whl", hash = "sha256:8a5729bc7e68b4af089af20eff44fe4e7ca21d0e0c87ec21cef7621981fd1a4a"}, {file = "langchain_core-0.2.38.tar.gz", hash = "sha256:eb69dbedd344f2ee1f15bcea6c71a05884b867588fadc42d04632e727c1238f3"}, @@ -2776,6 +2943,7 @@ version = "0.1.23" description = "An integration package connecting OpenAI and LangChain" optional = false python-versions = "<4.0,>=3.8.1" +groups = ["dev"] files = [ {file = "langchain_openai-0.1.23-py3-none-any.whl", hash = "sha256:8e3d215803e157f26480c6108eb4333629832b1a0e746723060c24f93b8b78f4"}, {file = "langchain_openai-0.1.23.tar.gz", hash = "sha256:ed7f16671ea0af177ac5f82d5645a746c5097c56f97b31798e5c07b5c84f0eed"}, @@ -2792,6 +2960,7 @@ version = "0.2.4" description = "LangChain text splitting utilities" optional = false python-versions = "<4.0,>=3.8.1" +groups = ["dev"] files = [ {file = "langchain_text_splitters-0.2.4-py3-none-any.whl", hash = "sha256:2702dee5b7cbdd595ccbe43b8d38d01a34aa8583f4d6a5a68ad2305ae3e7b645"}, {file = "langchain_text_splitters-0.2.4.tar.gz", hash = "sha256:f7daa7a3b0aa8309ce248e2e2b6fc8115be01118d336c7f7f7dfacda0e89bf29"}, @@ -2806,6 +2975,7 @@ version = "0.1.114" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" +groups = ["dev"] files = [ {file = "langsmith-0.1.114-py3-none-any.whl", hash = "sha256:2b6b6b49ddb1cea75f465da107ddc21e60d3c7242813dcc0de90f914e4957249"}, {file = "langsmith-0.1.114.tar.gz", hash = "sha256:1683e1505d034d1bf7c960067c1357fd0d294172dd20540f913093e4b86857a2"}, @@ -2826,6 +2996,7 @@ version = "0.11.6" description = "Lightning toolbox for across the our ecosystem." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "lightning_utilities-0.11.6-py3-none-any.whl", hash = "sha256:ecd9953c316cbaf56ad820fbe7bd062187b9973c4a23d47b076cd59dc080a310"}, {file = "lightning_utilities-0.11.6.tar.gz", hash = "sha256:79fc27ef8ec8b8d55a537920f2c7610270c0c9e037fa6efc78f1aa34ec8cdf04"}, @@ -2847,6 +3018,7 @@ version = "1.3.5" description = "A super-fast templating language that borrows the best ideas from the existing templating languages." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "Mako-1.3.5-py3-none-any.whl", hash = "sha256:260f1dbc3a519453a9c856dedfe4beb4e50bd5a26d96386cb6c80856556bb91a"}, {file = "Mako-1.3.5.tar.gz", hash = "sha256:48dbc20568c1d276a2698b36d968fa76161bf127194907ea6fc594fa81f943bc"}, @@ -2866,6 +3038,7 @@ version = "3.6" description = "Python implementation of John Gruber's Markdown." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "Markdown-3.6-py3-none-any.whl", hash = "sha256:48f276f4d8cfb8ce6527c8f79e2ee29708508bf4d40aa410fbc3b4ee832c850f"}, {file = "Markdown-3.6.tar.gz", hash = "sha256:ed4f41f6daecbeeb96e576ce414c41d2d876daa9a16cb35fa8ed8c2ddfad0224"}, @@ -2881,6 +3054,7 @@ version = "3.0.0" description = "Python port of markdown-it. Markdown parsing, done right!" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, @@ -2905,6 +3079,7 @@ version = "2.1.5" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, @@ -2974,6 +3149,7 @@ version = "3.22.0" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "marshmallow-3.22.0-py3-none-any.whl", hash = "sha256:71a2dce49ef901c3f97ed296ae5051135fd3febd2bf43afe0ae9a82143a494d9"}, {file = "marshmallow-3.22.0.tar.gz", hash = "sha256:4972f529104a220bb8637d595aa4c9762afbe7f7a77d82dc58c1615d70c5823e"}, @@ -2993,6 +3169,7 @@ version = "3.9.1.post1" description = "Python plotting package" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "matplotlib-3.9.1.post1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3779ad3e8b72df22b8a622c5796bbcfabfa0069b835412e3c1dec8ee3de92d0c"}, {file = "matplotlib-3.9.1.post1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ec400340f8628e8e2260d679078d4e9b478699f386e5cc8094e80a1cb0039c7c"}, @@ -3045,6 +3222,7 @@ version = "0.1.7" description = "Inline Matplotlib backend for Jupyter" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca"}, {file = "matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90"}, @@ -3059,17 +3237,42 @@ version = "0.1.2" description = "Markdown URL utilities" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, ] +[[package]] +name = "mistralai" +version = "1.5.0" +description = "Python Client SDK for the Mistral AI API." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "mistralai-1.5.0-py3-none-any.whl", hash = "sha256:9372537719f87bd6f9feef4747d0bf1f4fbe971f8c02945ca4b4bf3c94571c97"}, + {file = "mistralai-1.5.0.tar.gz", hash = "sha256:fd94bc93bc25aad9c6dd8005b1a0bc4ba1250c6b3fbf855a49936989cc6e5c0d"}, +] + +[package.dependencies] +eval-type-backport = ">=0.2.0" +httpx = ">=0.27.0" +jsonpath-python = ">=1.0.6" +pydantic = ">=2.9.0" +python-dateutil = ">=2.8.2" +typing-inspect = ">=0.9.0" + +[package.extras] +gcp = ["google-auth (>=2.27.0)", "requests (>=2.32.3)"] + [[package]] name = "mistune" version = "3.0.2" description = "A sane and fast Markdown parser with useful plugins and renderers" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "mistune-3.0.2-py3-none-any.whl", hash = "sha256:71481854c30fdbc938963d3605b72501f5c10a9320ecd412c121c163a1c7d205"}, {file = "mistune-3.0.2.tar.gz", hash = "sha256:fc7f93ded930c92394ef2cb6f04a8aabab4117a91449e72dcc8dfa646a508be8"}, @@ -3081,6 +3284,7 @@ version = "1.3.0" description = "Python library for arbitrary-precision floating-point arithmetic" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, @@ -3089,7 +3293,7 @@ files = [ [package.extras] develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] docs = ["sphinx"] -gmpy = ["gmpy2 (>=2.1.0a4)"] +gmpy = ["gmpy2 (>=2.1.0a4) ; platform_python_implementation != \"PyPy\""] tests = ["pytest (>=4.6)"] [[package]] @@ -3098,6 +3302,7 @@ version = "6.0.5" description = "multidict implementation" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "multidict-6.0.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:228b644ae063c10e7f324ab1ab6b548bdf6f8b47f3ec234fef1093bc2735e5f9"}, {file = "multidict-6.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:896ebdcf62683551312c30e20614305f53125750803b614e9e6ce74a96232604"}, @@ -3197,6 +3402,7 @@ version = "0.70.16" description = "better multiprocessing and multithreading in Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "multiprocess-0.70.16-pp310-pypy310_pp73-macosx_10_13_x86_64.whl", hash = "sha256:476887be10e2f59ff183c006af746cb6f1fd0eadcfd4ef49e605cbe2659920ee"}, {file = "multiprocess-0.70.16-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d951bed82c8f73929ac82c61f01a7b5ce8f3e5ef40f5b52553b4f547ce2b08ec"}, @@ -3221,6 +3427,7 @@ version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" +groups = ["main", "dev"] files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, @@ -3232,6 +3439,7 @@ version = "0.10.0" description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor." optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "nbclient-0.10.0-py3-none-any.whl", hash = "sha256:f13e3529332a1f1f81d82a53210322476a168bb7090a0289c795fe9cc11c9d3f"}, {file = "nbclient-0.10.0.tar.gz", hash = "sha256:4b3f1b7dba531e498449c4db4f53da339c91d449dc11e9af3a43b4eb5c5abb09"}, @@ -3254,6 +3462,7 @@ version = "7.16.4" description = "Converting Jupyter Notebooks (.ipynb files) to other formats. Output formats include asciidoc, html, latex, markdown, pdf, py, rst, script. nbconvert can be used both as a Python library (`import nbconvert`) or as a command line tool (invoked as `jupyter nbconvert ...`)." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "nbconvert-7.16.4-py3-none-any.whl", hash = "sha256:05873c620fe520b6322bf8a5ad562692343fe3452abda5765c7a34b7d1aa3eb3"}, {file = "nbconvert-7.16.4.tar.gz", hash = "sha256:86ca91ba266b0a448dc96fa6c5b9d98affabde2867b363258703536807f9f7f4"}, @@ -3291,6 +3500,7 @@ version = "5.10.4" description = "The Jupyter Notebook format" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "nbformat-5.10.4-py3-none-any.whl", hash = "sha256:3b48d6c8fbca4b299bf3982ea7db1af21580e4fec269ad087b9e81588891200b"}, {file = "nbformat-5.10.4.tar.gz", hash = "sha256:322168b14f937a5d11362988ecac2a4952d3d8e3a2cbeb2319584631226d5b3a"}, @@ -3312,6 +3522,7 @@ version = "1.6.0" description = "Patch asyncio to allow nested event loops" optional = false python-versions = ">=3.5" +groups = ["main", "dev"] files = [ {file = "nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c"}, {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"}, @@ -3323,6 +3534,7 @@ version = "3.3" description = "Python package for creating and manipulating graphs and networks" optional = false python-versions = ">=3.10" +groups = ["dev"] files = [ {file = "networkx-3.3-py3-none-any.whl", hash = "sha256:28575580c6ebdaf4505b22c6256a2b9de86b316dc63ba9e93abde3d78dfdbcf2"}, {file = "networkx-3.3.tar.gz", hash = "sha256:0c127d8b2f4865f59ae9cb8aafcd60b5c70f3241ebd66f7defad7c4ab90126c9"}, @@ -3341,6 +3553,7 @@ version = "3.9.1" description = "Natural Language Toolkit" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "nltk-3.9.1-py3-none-any.whl", hash = "sha256:4fa26829c5b00715afe3061398a8989dc643b92ce7dd93fb4585a70930d168a1"}, {file = "nltk-3.9.1.tar.gz", hash = "sha256:87d127bd3de4bd89a4f81265e5fa59cb1b199b27440175370f7417d2bc7ae868"}, @@ -3366,6 +3579,7 @@ version = "1.9.1" description = "Node.js virtual environment builder" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["dev"] files = [ {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, @@ -3377,6 +3591,7 @@ version = "7.2.1" description = "Jupyter Notebook - A web-based notebook environment for interactive computing" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "notebook-7.2.1-py3-none-any.whl", hash = "sha256:f45489a3995746f2195a137e0773e2130960b51c9ac3ce257dbc2705aab3a6ca"}, {file = "notebook-7.2.1.tar.gz", hash = "sha256:4287b6da59740b32173d01d641f763d292f49c30e7a51b89c46ba8473126341e"}, @@ -3392,7 +3607,7 @@ tornado = ">=6.2.0" [package.extras] dev = ["hatch", "pre-commit"] docs = ["myst-parser", "nbsphinx", "pydata-sphinx-theme", "sphinx (>=1.3.6)", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] -test = ["importlib-resources (>=5.0)", "ipykernel", "jupyter-server[test] (>=2.4.0,<3)", "jupyterlab-server[test] (>=2.27.1,<3)", "nbval", "pytest (>=7.0)", "pytest-console-scripts", "pytest-timeout", "pytest-tornasync", "requests"] +test = ["importlib-resources (>=5.0) ; python_version < \"3.10\"", "ipykernel", "jupyter-server[test] (>=2.4.0,<3)", "jupyterlab-server[test] (>=2.27.1,<3)", "nbval", "pytest (>=7.0)", "pytest-console-scripts", "pytest-timeout", "pytest-tornasync", "requests"] [[package]] name = "notebook-shim" @@ -3400,6 +3615,7 @@ version = "0.2.4" description = "A shim layer for notebook traits and config" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "notebook_shim-0.2.4-py3-none-any.whl", hash = "sha256:411a5be4e9dc882a074ccbcae671eda64cceb068767e9a3419096986560e1cef"}, {file = "notebook_shim-0.2.4.tar.gz", hash = "sha256:b4b2cfa1b65d98307ca24361f5b30fe785b53c3fd07b7a47e89acb5e6ac638cb"}, @@ -3417,6 +3633,7 @@ version = "1.26.4" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, @@ -3462,6 +3679,8 @@ version = "12.1.3.1" description = "CUBLAS native runtime libraries" optional = false python-versions = ">=3" +groups = ["dev"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:ee53ccca76a6fc08fb9701aa95b6ceb242cdaab118c3bb152af4e579af792728"}, {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-win_amd64.whl", hash = "sha256:2b964d60e8cf11b5e1073d179d85fa340c120e99b3067558f3cf98dd69d02906"}, @@ -3473,6 +3692,8 @@ version = "12.1.105" description = "CUDA profiling tools runtime libs." optional = false python-versions = ">=3" +groups = ["dev"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:e54fde3983165c624cb79254ae9818a456eb6e87a7fd4d56a2352c24ee542d7e"}, {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:bea8236d13a0ac7190bd2919c3e8e6ce1e402104276e6f9694479e48bb0eb2a4"}, @@ -3484,6 +3705,8 @@ version = "12.1.105" description = "NVRTC native runtime libraries" optional = false python-versions = ">=3" +groups = ["dev"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:339b385f50c309763ca65456ec75e17bbefcbbf2893f462cb8b90584cd27a1c2"}, {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:0a98a522d9ff138b96c010a65e145dc1b4850e9ecb75a0172371793752fd46ed"}, @@ -3495,6 +3718,8 @@ version = "12.1.105" description = "CUDA Runtime native Libraries" optional = false python-versions = ">=3" +groups = ["dev"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:6e258468ddf5796e25f1dc591a31029fa317d97a0a94ed93468fc86301d61e40"}, {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:dfb46ef84d73fababab44cf03e3b83f80700d27ca300e537f85f636fac474344"}, @@ -3506,6 +3731,8 @@ version = "9.1.0.70" description = "cuDNN runtime libraries" optional = false python-versions = ">=3" +groups = ["dev"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl", hash = "sha256:165764f44ef8c61fcdfdfdbe769d687e06374059fbb388b6c89ecb0e28793a6f"}, {file = "nvidia_cudnn_cu12-9.1.0.70-py3-none-win_amd64.whl", hash = "sha256:6278562929433d68365a07a4a1546c237ba2849852c0d4b2262a486e805b977a"}, @@ -3520,6 +3747,8 @@ version = "11.0.2.54" description = "CUFFT native runtime libraries" optional = false python-versions = ">=3" +groups = ["dev"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl", hash = "sha256:794e3948a1aa71fd817c3775866943936774d1c14e7628c74f6f7417224cdf56"}, {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-win_amd64.whl", hash = "sha256:d9ac353f78ff89951da4af698f80870b1534ed69993f10a4cf1d96f21357e253"}, @@ -3531,6 +3760,8 @@ version = "10.3.2.106" description = "CURAND native runtime libraries" optional = false python-versions = ">=3" +groups = ["dev"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:9d264c5036dde4e64f1de8c50ae753237c12e0b1348738169cd0f8a536c0e1e0"}, {file = "nvidia_curand_cu12-10.3.2.106-py3-none-win_amd64.whl", hash = "sha256:75b6b0c574c0037839121317e17fd01f8a69fd2ef8e25853d826fec30bdba74a"}, @@ -3542,6 +3773,8 @@ version = "11.4.5.107" description = "CUDA solver native runtime libraries" optional = false python-versions = ">=3" +groups = ["dev"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd"}, {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-win_amd64.whl", hash = "sha256:74e0c3a24c78612192a74fcd90dd117f1cf21dea4822e66d89e8ea80e3cd2da5"}, @@ -3558,6 +3791,8 @@ version = "12.1.0.106" description = "CUSPARSE native runtime libraries" optional = false python-versions = ">=3" +groups = ["dev"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c"}, {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-win_amd64.whl", hash = "sha256:b798237e81b9719373e8fae8d4f091b70a0cf09d9d85c95a557e11df2d8e9a5a"}, @@ -3572,6 +3807,8 @@ version = "2.20.5" description = "NVIDIA Collective Communication Library (NCCL) Runtime" optional = false python-versions = ">=3" +groups = ["dev"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1fc150d5c3250b170b29410ba682384b14581db722b2531b0d8d33c595f33d01"}, {file = "nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_x86_64.whl", hash = "sha256:057f6bf9685f75215d0c53bf3ac4a10b3e6578351de307abad9e18a99182af56"}, @@ -3583,6 +3820,8 @@ version = "12.6.20" description = "Nvidia JIT LTO Library" optional = false python-versions = ">=3" +groups = ["dev"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_nvjitlink_cu12-12.6.20-py3-none-manylinux2014_aarch64.whl", hash = "sha256:84fb38465a5bc7c70cbc320cfd0963eb302ee25a5e939e9f512bbba55b6072fb"}, {file = "nvidia_nvjitlink_cu12-12.6.20-py3-none-manylinux2014_x86_64.whl", hash = "sha256:562ab97ea2c23164823b2a89cb328d01d45cb99634b8c65fe7cd60d14562bd79"}, @@ -3595,6 +3834,8 @@ version = "12.1.105" description = "NVIDIA Tools Extension" optional = false python-versions = ">=3" +groups = ["dev"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:dc21cf308ca5691e7c04d962e213f8a4aa9bbfa23d95412f452254c2caeb09e5"}, {file = "nvidia_nvtx_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:65f4d98982b31b60026e0e6de73fbdfc09d08a96f4656dd3665ca616a11e1e82"}, @@ -3606,6 +3847,7 @@ version = "0.2.1" description = "The official Python client for Ollama." optional = false python-versions = "<4.0,>=3.8" +groups = ["dev"] files = [ {file = "ollama-0.2.1-py3-none-any.whl", hash = "sha256:b6e2414921c94f573a903d1069d682ba2fb2607070ea9e19ca4a7872f2a460ec"}, {file = "ollama-0.2.1.tar.gz", hash = "sha256:fa316baa9a81eac3beb4affb0a17deb3008fdd6ed05b123c26306cfbe4c349b6"}, @@ -3620,6 +3862,7 @@ version = "1.40.2" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" +groups = ["main", "dev"] files = [ {file = "openai-1.40.2-py3-none-any.whl", hash = "sha256:38068f858f310b4fd4b0ea8734c3efcfde3c15a2978311e1453bd84817231b96"}, {file = "openai-1.40.2.tar.gz", hash = "sha256:2180e9070bd36084328248b3ce668964e8ddd2e9019e1d426e31dc54cc117bb5"}, @@ -3644,6 +3887,7 @@ version = "3.6.1" description = "A hyperparameter optimization framework" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "optuna-3.6.1-py3-none-any.whl", hash = "sha256:b32e0490bd6552790b70ec94de77dd2855057c9e229cd9f4da48fe8a31c7f1cc"}, {file = "optuna-3.6.1.tar.gz", hash = "sha256:146e530b57b4b9afd7526b3e642fbe65491f7e292b405913355f8e438e361ecf"}, @@ -3663,7 +3907,7 @@ benchmark = ["asv (>=0.5.0)", "botorch", "cma", "virtualenv"] checking = ["black", "blackdoc", "flake8", "isort", "mypy", "mypy-boto3-s3", "types-PyYAML", "types-redis", "types-setuptools", "types-tqdm", "typing-extensions (>=3.10.0.0)"] document = ["ase", "cmaes (>=0.10.0)", "fvcore", "lightgbm", "matplotlib (!=3.6.0)", "pandas", "pillow", "plotly (>=4.9.0)", "scikit-learn", "sphinx", "sphinx-copybutton", "sphinx-gallery", "sphinx-plotly-directive", "sphinx-rtd-theme (>=1.2.0)", "torch", "torchvision"] optional = ["boto3", "cmaes (>=0.10.0)", "google-cloud-storage", "matplotlib (!=3.6.0)", "pandas", "plotly (>=4.9.0)", "redis", "scikit-learn (>=0.24.2)", "scipy", "torch"] -test = ["coverage", "fakeredis[lua]", "kaleido", "moto", "pytest", "scipy (>=1.9.2)", "torch"] +test = ["coverage", "fakeredis[lua]", "kaleido", "moto", "pytest", "scipy (>=1.9.2) ; python_version >= \"3.8\"", "torch"] [[package]] name = "orjson" @@ -3671,6 +3915,7 @@ version = "3.10.7" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "orjson-3.10.7-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:74f4544f5a6405b90da8ea724d15ac9c36da4d72a738c64685003337401f5c12"}, {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34a566f22c28222b08875b18b0dfbf8a947e69df21a9ed5c51a6bf91cfb944ac"}, @@ -3737,6 +3982,7 @@ version = "7.7.0" description = "A decorator to automatically detect mismatch when overriding a method." optional = false python-versions = ">=3.6" +groups = ["dev"] files = [ {file = "overrides-7.7.0-py3-none-any.whl", hash = "sha256:c7ed9d062f78b8e4c1a7b70bd8796b35ead4d9f510227ef9c5dc7626c60d7e49"}, {file = "overrides-7.7.0.tar.gz", hash = "sha256:55158fa3d93b98cc75299b1e67078ad9003ca27945c76162c1c0766d6f91820a"}, @@ -3748,6 +3994,7 @@ version = "24.1" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, @@ -3759,6 +4006,7 @@ version = "2.2.2" description = "Powerful data structures for data analysis, time series, and statistics" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "pandas-2.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:90c6fca2acf139569e74e8781709dccb6fe25940488755716d1d354d6bc58bce"}, {file = "pandas-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c7adfc142dac335d8c1e0dcbd37eb8617eac386596eb9e1a1b77791cf2498238"}, @@ -3831,6 +4079,7 @@ version = "1.5.1" description = "Utilities for writing pandoc filters in python" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["dev"] files = [ {file = "pandocfilters-1.5.1-py2.py3-none-any.whl", hash = "sha256:93be382804a9cdb0a7267585f157e5d1731bbe5545a85b268d6f5fe6232de2bc"}, {file = "pandocfilters-1.5.1.tar.gz", hash = "sha256:002b4a555ee4ebc03f8b66307e287fa492e4a77b4ea14d3f934328297bb4939e"}, @@ -3842,6 +4091,7 @@ version = "0.9.0" description = "Parameterized testing with any Python test framework" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "parameterized-0.9.0-py2.py3-none-any.whl", hash = "sha256:4e0758e3d41bea3bbd05ec14fc2c24736723f243b28d702081aef438c9372b1b"}, {file = "parameterized-0.9.0.tar.gz", hash = "sha256:7fc905272cefa4f364c1a3429cbbe9c0f98b793988efb5bf90aac80f08db09b1"}, @@ -3856,6 +4106,7 @@ version = "0.8.4" description = "A Python Parser" optional = false python-versions = ">=3.6" +groups = ["main", "dev"] files = [ {file = "parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18"}, {file = "parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d"}, @@ -3871,6 +4122,7 @@ version = "0.12.1" description = "Utility library for gitignore style pattern matching of file paths." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, @@ -3882,6 +4134,8 @@ version = "4.9.0" description = "Pexpect allows easy control of interactive console applications." optional = false python-versions = "*" +groups = ["main", "dev"] +markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\"" files = [ {file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"}, {file = "pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f"}, @@ -3896,6 +4150,7 @@ version = "0.2.5" description = "pgvector support for Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pgvector-0.2.5-py2.py3-none-any.whl", hash = "sha256:5e5e93ec4d3c45ab1fa388729d56c602f6966296e19deee8878928c6d567e41b"}, ] @@ -3909,6 +4164,7 @@ version = "11.1.0" description = "Python Imaging Library (Fork)" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "pillow-11.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:e1abe69aca89514737465752b4bcaf8016de61b3be1397a8fc260ba33321b3a8"}, {file = "pillow-11.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c640e5a06869c75994624551f45e5506e4256562ead981cce820d5ab39ae2192"}, @@ -3988,7 +4244,7 @@ docs = ["furo", "olefile", "sphinx (>=8.1)", "sphinx-copybutton", "sphinx-inline fpx = ["olefile"] mic = ["olefile"] tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout", "trove-classifiers (>=2024.10.12)"] -typing = ["typing-extensions"] +typing = ["typing-extensions ; python_version < \"3.10\""] xmp = ["defusedxml"] [[package]] @@ -3997,6 +4253,7 @@ version = "4.2.2" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, @@ -4013,6 +4270,7 @@ version = "3.8.0" description = "A framework for managing and maintaining multi-language pre-commit hooks." optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "pre_commit-3.8.0-py2.py3-none-any.whl", hash = "sha256:9a90a53bf82fdd8778d58085faf8d83df56e40dfe18f45b19446e26bf1b3a63f"}, {file = "pre_commit-3.8.0.tar.gz", hash = "sha256:8bb6494d4a20423842e198980c9ecf9f96607a07ea29549e180eef9ae80fe7af"}, @@ -4031,6 +4289,7 @@ version = "0.20.0" description = "Python client for the Prometheus monitoring system." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "prometheus_client-0.20.0-py3-none-any.whl", hash = "sha256:cde524a85bce83ca359cc837f28b8c0db5cac7aa653a588fd7e84ba061c329e7"}, {file = "prometheus_client-0.20.0.tar.gz", hash = "sha256:287629d00b147a32dcb2be0b9df905da599b2d82f80377083ec8463309a4bb89"}, @@ -4045,6 +4304,7 @@ version = "3.0.47" description = "Library for building powerful interactive command lines in Python" optional = false python-versions = ">=3.7.0" +groups = ["main", "dev"] files = [ {file = "prompt_toolkit-3.0.47-py3-none-any.whl", hash = "sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10"}, {file = "prompt_toolkit-3.0.47.tar.gz", hash = "sha256:1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360"}, @@ -4059,6 +4319,7 @@ version = "1.25.0" description = "Beautiful, Pythonic protocol buffers." optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "proto_plus-1.25.0-py3-none-any.whl", hash = "sha256:c91fc4a65074ade8e458e95ef8bac34d4008daa7cce4a12d6707066fca648961"}, {file = "proto_plus-1.25.0.tar.gz", hash = "sha256:fbb17f57f7bd05a68b7707e745e26528b0b3c34e378db91eef93912c54982d91"}, @@ -4076,6 +4337,7 @@ version = "4.25.4" description = "" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "protobuf-4.25.4-cp310-abi3-win32.whl", hash = "sha256:db9fd45183e1a67722cafa5c1da3e85c6492a5383f127c86c4c4aa4845867dc4"}, {file = "protobuf-4.25.4-cp310-abi3-win_amd64.whl", hash = "sha256:ba3d8504116a921af46499471c63a85260c1a5fc23333154a427a310e015d26d"}, @@ -4096,6 +4358,7 @@ version = "6.0.0" description = "Cross-platform lib for process and system monitoring in Python." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +groups = ["dev"] files = [ {file = "psutil-6.0.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a021da3e881cd935e64a3d0a20983bda0bb4cf80e4f74fa9bfcb1bc5785360c6"}, {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1287c2b95f1c0a364d23bc6f2ea2365a8d4d9b726a3be7294296ff7ba97c17f0"}, @@ -4117,7 +4380,7 @@ files = [ ] [package.extras] -test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] +test = ["enum34 ; python_version <= \"3.4\"", "ipaddress ; python_version < \"3.0\"", "mock ; python_version < \"3.0\"", "pywin32 ; sys_platform == \"win32\"", "wmi ; sys_platform == \"win32\""] [[package]] name = "ptyprocess" @@ -4125,10 +4388,12 @@ version = "0.7.0" description = "Run a subprocess in a pseudo terminal" optional = false python-versions = "*" +groups = ["main", "dev"] files = [ {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, ] +markers = {main = "sys_platform != \"win32\" and sys_platform != \"emscripten\"", dev = "sys_platform != \"win32\" and sys_platform != \"emscripten\" or os_name != \"nt\""} [[package]] name = "pure-eval" @@ -4136,6 +4401,7 @@ version = "0.2.3" description = "Safely evaluate AST nodes without side effects" optional = false python-versions = "*" +groups = ["main", "dev"] files = [ {file = "pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0"}, {file = "pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42"}, @@ -4150,6 +4416,7 @@ version = "17.0.0" description = "Python library for Apache Arrow" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pyarrow-17.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a5c8b238d47e48812ee577ee20c9a2779e6a5904f1708ae240f53ecbee7c9f07"}, {file = "pyarrow-17.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db023dc4c6cae1015de9e198d41250688383c3f9af8f565370ab2b4cb5f62655"}, @@ -4201,6 +4468,7 @@ version = "0.6" description = "" optional = false python-versions = ">=3.5" +groups = ["dev"] files = [ {file = "pyarrow_hotfix-0.6-py3-none-any.whl", hash = "sha256:dcc9ae2d220dff0083be6a9aa8e0cdee5182ad358d4931fce825c545e5c89178"}, {file = "pyarrow_hotfix-0.6.tar.gz", hash = "sha256:79d3e030f7ff890d408a100ac16d6f00b14d44a502d7897cd9fc3e3a534e9945"}, @@ -4212,6 +4480,7 @@ version = "0.6.1" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, @@ -4223,6 +4492,7 @@ version = "0.4.1" description = "A collection of ASN.1-based protocols modules" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, @@ -4237,6 +4507,7 @@ version = "2.22" description = "C parser in Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, @@ -4244,122 +4515,133 @@ files = [ [[package]] name = "pydantic" -version = "2.8.2" +version = "2.11.4" description = "Data validation using Python type hints" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["main", "dev"] files = [ - {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, - {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, + {file = "pydantic-2.11.4-py3-none-any.whl", hash = "sha256:d9615eaa9ac5a063471da949c8fc16376a84afb5024688b3ff885693506764eb"}, + {file = "pydantic-2.11.4.tar.gz", hash = "sha256:32738d19d63a226a52eed76645a98ee07c1f410ee41d93b4afbfa85ed8111c2d"}, ] [package.dependencies] -annotated-types = ">=0.4.0" -pydantic-core = "2.20.1" -typing-extensions = [ - {version = ">=4.6.1", markers = "python_version < \"3.13\""}, - {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, -] +annotated-types = ">=0.6.0" +pydantic-core = "2.33.2" +typing-extensions = ">=4.12.2" +typing-inspection = ">=0.4.0" [package.extras] email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] [[package]] name = "pydantic-core" -version = "2.20.1" +version = "2.33.2" description = "Core functionality for Pydantic validation and serialization" optional = false -python-versions = ">=3.8" -files = [ - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, - {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, - {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, - {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, - {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, - {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, - {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, - {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, - {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, - {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, - {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, - {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, - {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, - {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"}, + {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b"}, + {file = "pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22"}, + {file = "pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640"}, + {file = "pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7"}, + {file = "pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65"}, + {file = "pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc"}, + {file = "pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d"}, + {file = "pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e"}, + {file = "pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27"}, + {file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"}, ] [package.dependencies] @@ -4371,6 +4653,7 @@ version = "2.0.0" description = "Python interface to Graphviz's Dot" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "pydot-2.0.0-py3-none-any.whl", hash = "sha256:408a47913ea7bd5d2d34b274144880c1310c4aee901f353cf21fe2e526a4ea28"}, {file = "pydot-2.0.0.tar.gz", hash = "sha256:60246af215123fa062f21cd791be67dda23a6f280df09f68919e637a1e4f3235"}, @@ -4390,6 +4673,7 @@ version = "2.18.0" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, @@ -4404,6 +4688,7 @@ version = "3.1.2" description = "pyparsing module - Classes and methods to define and execute parsing grammars" optional = false python-versions = ">=3.6.8" +groups = ["dev"] files = [ {file = "pyparsing-3.1.2-py3-none-any.whl", hash = "sha256:f9db75911801ed778fe61bb643079ff86601aca99fcae6345aa67292038fb742"}, {file = "pyparsing-3.1.2.tar.gz", hash = "sha256:a1bac0ce561155ecc3ed78ca94d3c9378656ad4c94c1270de543f621420f94ad"}, @@ -4418,6 +4703,7 @@ version = "0.3.4" description = "pysbd (Python Sentence Boundary Disambiguation) is a rule-based sentence boundary detection that works out-of-the-box across many languages." optional = false python-versions = ">=3" +groups = ["dev"] files = [ {file = "pysbd-0.3.4-py3-none-any.whl", hash = "sha256:cd838939b7b0b185fcf86b0baf6636667dfb6e474743beeff878e9f42e022953"}, ] @@ -4428,6 +4714,7 @@ version = "1.7.1" description = "A Python SOCKS client module. See https://github.com/Anorov/PySocks for more information." optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["dev"] files = [ {file = "PySocks-1.7.1-py27-none-any.whl", hash = "sha256:08e69f092cc6dbe92a0fdd16eeb9b9ffbc13cadfe5ca4c7bd92ffb078b293299"}, {file = "PySocks-1.7.1-py3-none-any.whl", hash = "sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5"}, @@ -4440,6 +4727,7 @@ version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["dev"] files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -4454,6 +4742,7 @@ version = "1.0.1" description = "Read key-value pairs from a .env file and set them as environment variables" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, @@ -4468,6 +4757,7 @@ version = "2.0.7" description = "A python library adding a json log formatter" optional = false python-versions = ">=3.6" +groups = ["dev"] files = [ {file = "python-json-logger-2.0.7.tar.gz", hash = "sha256:23e7ec02d34237c5aa1e29a070193a4ea87583bb4e7f8fd06d3de8264c4b2e1c"}, {file = "python_json_logger-2.0.7-py3-none-any.whl", hash = "sha256:f380b826a991ebbe3de4d897aeec42760035ac760345e57b812938dc8b35e2bd"}, @@ -4479,6 +4769,7 @@ version = "2024.1" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, @@ -4490,6 +4781,8 @@ version = "306" description = "Python for Window Extensions" optional = false python-versions = "*" +groups = ["dev"] +markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\"" files = [ {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, @@ -4513,6 +4806,8 @@ version = "2.0.13" description = "Pseudo terminal support for Windows from Python." optional = false python-versions = ">=3.8" +groups = ["dev"] +markers = "os_name == \"nt\"" files = [ {file = "pywinpty-2.0.13-cp310-none-win_amd64.whl", hash = "sha256:697bff211fb5a6508fee2dc6ff174ce03f34a9a233df9d8b5fe9c8ce4d5eaf56"}, {file = "pywinpty-2.0.13-cp311-none-win_amd64.whl", hash = "sha256:b96fb14698db1284db84ca38c79f15b4cfdc3172065b5137383910567591fa99"}, @@ -4528,6 +4823,7 @@ version = "6.0.2" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, @@ -4590,6 +4886,7 @@ version = "26.1.0" description = "Python bindings for 0MQ" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "pyzmq-26.1.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:263cf1e36862310bf5becfbc488e18d5d698941858860c5a8c079d1511b3b18e"}, {file = "pyzmq-26.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d5c8b17f6e8f29138678834cf8518049e740385eb2dbf736e8f07fc6587ec682"}, @@ -4711,6 +5008,7 @@ version = "5.5.2" description = "Jupyter Qt console" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "qtconsole-5.5.2-py3-none-any.whl", hash = "sha256:42d745f3d05d36240244a04e1e1ec2a86d5d9b6edb16dbdef582ccb629e87e0b"}, {file = "qtconsole-5.5.2.tar.gz", hash = "sha256:6b5fb11274b297463706af84dcbbd5c92273b1f619e6d25d08874b0a88516989"}, @@ -4736,6 +5034,7 @@ version = "2.4.1" description = "Provides an abstraction layer on top of the various Qt bindings (PyQt5/6 and PySide2/6)." optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "QtPy-2.4.1-py3-none-any.whl", hash = "sha256:1c1d8c4fa2c884ae742b069151b0abe15b3f70491f3972698c683b8e38de839b"}, {file = "QtPy-2.4.1.tar.gz", hash = "sha256:a5a15ffd519550a1361bdc56ffc07fda56a6af7292f17c7b395d4083af632987"}, @@ -4753,6 +5052,7 @@ version = "0.1.16" description = "" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "ragas-0.1.16-py3-none-any.whl", hash = "sha256:371378dda700fb7cf7ba5e473d280887e51a6248574bc474e5952a05b8312a80"}, {file = "ragas-0.1.16.tar.gz", hash = "sha256:23d0d27272fd47d5e6687f1f05461098650c09ad20337e93a0cd7cbfcc2f65b8"}, @@ -4780,6 +5080,7 @@ version = "0.35.1" description = "JSON Referencing + Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de"}, {file = "referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c"}, @@ -4795,6 +5096,7 @@ version = "2024.7.24" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "regex-2024.7.24-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:228b0d3f567fafa0633aee87f08b9276c7062da9616931382993c03808bb68ce"}, {file = "regex-2024.7.24-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3426de3b91d1bc73249042742f45c2148803c111d1175b283270177fdf669024"}, @@ -4883,6 +5185,7 @@ version = "2.32.3" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, @@ -4905,6 +5208,7 @@ version = "0.1.4" description = "A pure python RFC3339 validator" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +groups = ["dev"] files = [ {file = "rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa"}, {file = "rfc3339_validator-0.1.4.tar.gz", hash = "sha256:138a2abdf93304ad60530167e51d2dfb9549521a836871b88d7f4695d0022f6b"}, @@ -4919,6 +5223,7 @@ version = "0.1.1" description = "Pure python rfc3986 validator" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +groups = ["dev"] files = [ {file = "rfc3986_validator-0.1.1-py2.py3-none-any.whl", hash = "sha256:2f235c432ef459970b4306369336b9d5dbdda31b510ca1e327636e01f528bfa9"}, {file = "rfc3986_validator-0.1.1.tar.gz", hash = "sha256:3d44bde7921b3b9ec3ae4e3adca370438eccebc676456449b145d533b240d055"}, @@ -4930,6 +5235,7 @@ version = "13.9.4" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90"}, {file = "rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098"}, @@ -4948,6 +5254,7 @@ version = "0.20.0" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "rpds_py-0.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3ad0fda1635f8439cde85c700f964b23ed5fc2d28016b32b9ee5fe30da5c84e2"}, {file = "rpds_py-0.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9bb4a0d90fdb03437c109a17eade42dfbf6190408f29b2744114d11586611d6f"}, @@ -5060,6 +5367,7 @@ version = "4.9" description = "Pure-Python RSA implementation" optional = false python-versions = ">=3.6,<4" +groups = ["dev"] files = [ {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, @@ -5074,6 +5382,7 @@ version = "0.8.0" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "ruff-0.8.0-py3-none-linux_armv6l.whl", hash = "sha256:fcb1bf2cc6706adae9d79c8d86478677e3bbd4ced796ccad106fd4776d395fea"}, {file = "ruff-0.8.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:295bb4c02d58ff2ef4378a1870c20af30723013f441c9d1637a008baaf928c8b"}, @@ -5101,6 +5410,7 @@ version = "0.10.2" description = "An Amazon S3 Transfer Manager" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "s3transfer-0.10.2-py3-none-any.whl", hash = "sha256:eca1c20de70a39daee580aef4986996620f365c4e0fda6a86100231d62f1bf69"}, {file = "s3transfer-0.10.2.tar.gz", hash = "sha256:0711534e9356d3cc692fdde846b4a1e4b0cb6519971860796e6bc4c7aea00ef6"}, @@ -5118,6 +5428,7 @@ version = "0.4.4" description = "" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "safetensors-0.4.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2adb497ada13097f30e386e88c959c0fda855a5f6f98845710f5bb2c57e14f12"}, {file = "safetensors-0.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7db7fdc2d71fd1444d85ca3f3d682ba2df7d61a637dfc6d80793f439eae264ab"}, @@ -5250,15 +5561,16 @@ version = "1.8.3" description = "Send file to trash natively under Mac OS X, Windows and Linux" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +groups = ["dev"] files = [ {file = "Send2Trash-1.8.3-py3-none-any.whl", hash = "sha256:0c31227e0bd08961c7665474a3d1ef7193929fedda4233843689baa056be46c9"}, {file = "Send2Trash-1.8.3.tar.gz", hash = "sha256:b18e7a3966d99871aefeb00cfbcfdced55ce4871194810fc71f4aa484b953abf"}, ] [package.extras] -nativelib = ["pyobjc-framework-Cocoa", "pywin32"] -objc = ["pyobjc-framework-Cocoa"] -win32 = ["pywin32"] +nativelib = ["pyobjc-framework-Cocoa ; sys_platform == \"darwin\"", "pywin32 ; sys_platform == \"win32\""] +objc = ["pyobjc-framework-Cocoa ; sys_platform == \"darwin\""] +win32 = ["pywin32 ; sys_platform == \"win32\""] [[package]] name = "setuptools" @@ -5266,15 +5578,16 @@ version = "72.1.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "setuptools-72.1.0-py3-none-any.whl", hash = "sha256:5a03e1860cf56bb6ef48ce186b0e557fdba433237481a9a625176c2831be15d1"}, {file = "setuptools-72.1.0.tar.gz", hash = "sha256:8d243eff56d095e5817f796ede6ae32941278f542e0f941867cc05ae52b162ec"}, ] [package.extras] -core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.text (>=3.7)", "more-itertools (>=8.8)", "ordered-set (>=3.1.1)", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +core = ["importlib-metadata (>=6) ; python_version < \"3.10\"", "importlib-resources (>=5.10.2) ; python_version < \"3.9\"", "jaraco.text (>=3.7)", "more-itertools (>=8.8)", "ordered-set (>=3.1.1)", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.11.*)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (<0.4)", "pytest-ruff (>=0.2.1)", "pytest-ruff (>=0.3.2)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.11.*)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-ruff (<0.4) ; platform_system == \"Windows\"", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "pytest-ruff (>=0.3.2) ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] [[package]] name = "shellingham" @@ -5282,6 +5595,7 @@ version = "1.5.4" description = "Tool to Detect Surrounding Shell" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, @@ -5293,6 +5607,7 @@ version = "1.16.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +groups = ["main", "dev"] files = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, @@ -5304,6 +5619,7 @@ version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, @@ -5315,6 +5631,7 @@ version = "2.5" description = "A modern CSS selector implementation for Beautiful Soup." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "soupsieve-2.5-py3-none-any.whl", hash = "sha256:eaa337ff55a1579b6549dc679565eac1e3d000563bcb1c8ab0d0fefbc0c2cdc7"}, {file = "soupsieve-2.5.tar.gz", hash = "sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690"}, @@ -5326,6 +5643,7 @@ version = "2.0.32" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "SQLAlchemy-2.0.32-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0c9045ecc2e4db59bfc97b20516dfdf8e41d910ac6fb667ebd3a79ea54084619"}, {file = "SQLAlchemy-2.0.32-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1467940318e4a860afd546ef61fefb98a14d935cd6817ed07a228c7f7c62f389"}, @@ -5413,6 +5731,7 @@ version = "0.6.3" description = "Extract data from python stack frames and tracebacks for informative displays" optional = false python-versions = "*" +groups = ["main", "dev"] files = [ {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"}, {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"}, @@ -5432,6 +5751,7 @@ version = "24.4.0" description = "Structured Logging for Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "structlog-24.4.0-py3-none-any.whl", hash = "sha256:597f61e80a91cc0749a9fd2a098ed76715a1c8a01f73e336b746504d1aad7610"}, {file = "structlog-24.4.0.tar.gz", hash = "sha256:b27bfecede327a6d2da5fbc96bd859f114ecc398a6389d664f62085ee7ae6fc4"}, @@ -5449,6 +5769,7 @@ version = "1.13.1" description = "Computer algebra system (CAS) in Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "sympy-1.13.1-py3-none-any.whl", hash = "sha256:db36cdc64bf61b9b24578b6f7bab1ecdd2452cf008f34faa33776680c26d66f8"}, {file = "sympy-1.13.1.tar.gz", hash = "sha256:9cebf7e04ff162015ce31c9c6c9144daa34a93bd082f54fd8f12deca4f47515f"}, @@ -5466,6 +5787,7 @@ version = "0.9.0" description = "Pretty-print tabular data" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, @@ -5480,6 +5802,7 @@ version = "8.4.2" description = "Retry code until it succeeds" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "tenacity-8.4.2-py3-none-any.whl", hash = "sha256:9e6f7cf7da729125c7437222f8a522279751cdfbe6b67bfe64f75d3a348661b2"}, {file = "tenacity-8.4.2.tar.gz", hash = "sha256:cd80a53a79336edba8489e767f729e4f391c896956b57140b5d7511a64bbd3ef"}, @@ -5495,6 +5818,7 @@ version = "2.17.0" description = "TensorBoard lets you watch Tensors Flow" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "tensorboard-2.17.0-py3-none-any.whl", hash = "sha256:859a499a9b1fb68a058858964486627100b71fcb21646861c61d31846a6478fb"}, ] @@ -5516,6 +5840,7 @@ version = "0.7.2" description = "Fast data loading for TensorBoard" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "tensorboard_data_server-0.7.2-py3-none-any.whl", hash = "sha256:7e0610d205889588983836ec05dc098e80f97b7e7bbff7e994ebb78f578d0ddb"}, {file = "tensorboard_data_server-0.7.2-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:9fe5d24221b29625dbc7328b0436ca7fc1c23de4acf4d272f1180856e32f9f60"}, @@ -5528,6 +5853,7 @@ version = "2.6.2.2" description = "TensorBoardX lets you watch Tensors Flow without Tensorflow" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "tensorboardX-2.6.2.2-py2.py3-none-any.whl", hash = "sha256:160025acbf759ede23fd3526ae9d9bfbfd8b68eb16c38a010ebe326dc6395db8"}, {file = "tensorboardX-2.6.2.2.tar.gz", hash = "sha256:c6476d7cd0d529b0b72f4acadb1269f9ed8b22f441e87a84f2a3b940bb87b666"}, @@ -5544,6 +5870,7 @@ version = "0.18.1" description = "Tornado websocket backend for the Xterm.js Javascript terminal emulator library." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "terminado-0.18.1-py3-none-any.whl", hash = "sha256:a4468e1b37bb318f8a86514f65814e1afc977cf29b3992a4500d9dd305dcceb0"}, {file = "terminado-0.18.1.tar.gz", hash = "sha256:de09f2c4b85de4765f7714688fff57d3e75bad1f909b589fde880460c753fd2e"}, @@ -5565,6 +5892,7 @@ version = "0.1.5" description = "" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "textgrad-0.1.5.tar.gz", hash = "sha256:5f283aa21b70575f50018b7b6545402c61109292ea628f273416699515ebc009"}, ] @@ -5591,6 +5919,7 @@ version = "0.7.0" description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "tiktoken-0.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:485f3cc6aba7c6b6ce388ba634fbba656d9ee27f766216f45146beb4ac18b25f"}, {file = "tiktoken-0.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e54be9a2cd2f6d6ffa3517b064983fb695c9a9d8aa7d574d1ef3c3f931a99225"}, @@ -5643,6 +5972,7 @@ version = "1.3.0" description = "A tiny CSS parser" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "tinycss2-1.3.0-py3-none-any.whl", hash = "sha256:54a8dbdffb334d536851be0226030e9505965bb2f30f21a4a82c55fb2a80fae7"}, {file = "tinycss2-1.3.0.tar.gz", hash = "sha256:152f9acabd296a8375fbca5b84c961ff95971fcfc32e79550c8df8e29118c54d"}, @@ -5661,6 +5991,7 @@ version = "1.4.0" description = "Python client for Together's Cloud Platform!" optional = false python-versions = "<4.0,>=3.9" +groups = ["dev"] files = [ {file = "together-1.4.0-py3-none-any.whl", hash = "sha256:0108d5dc4026f238ab6515654774e2466ab2aef33f3694902b548b45097009dd"}, {file = "together-1.4.0.tar.gz", hash = "sha256:8164d7b68a3b875029522d3fac8d44c573825ba3b1998f98ba18c15ecb8a8c82"}, @@ -5690,6 +6021,7 @@ version = "6.1.0" description = "A wrapper around the stdlib `tokenize` which roundtrips." optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "tokenize_rt-6.1.0-py2.py3-none-any.whl", hash = "sha256:d706141cdec4aa5f358945abe36b911b8cbdc844545da99e811250c0cee9b6fc"}, {file = "tokenize_rt-6.1.0.tar.gz", hash = "sha256:e8ee836616c0877ab7c7b54776d2fefcc3bde714449a206762425ae114b53c86"}, @@ -5701,6 +6033,7 @@ version = "0.19.1" description = "" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "tokenizers-0.19.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:952078130b3d101e05ecfc7fc3640282d74ed26bcf691400f872563fca15ac97"}, {file = "tokenizers-0.19.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:82c8b8063de6c0468f08e82c4e198763e7b97aabfe573fd4cf7b33930ca4df77"}, @@ -5818,6 +6151,7 @@ version = "2.4.0" description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "torch-2.4.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:4ed94583e244af51d6a8d28701ca5a9e02d1219e782f5a01dd401f90af17d8ac"}, {file = "torch-2.4.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:c4ca297b7bd58b506bfd6e78ffd14eb97c0e7797dcd7965df62f50bb575d8954"}, @@ -5871,6 +6205,7 @@ version = "1.4.1" description = "PyTorch native Metrics" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "torchmetrics-1.4.1-py3-none-any.whl", hash = "sha256:c2e7cd56dd8bdc60ae63d712f3bdce649f23bd174d9180bdd0b746e0230b865a"}, ] @@ -5898,6 +6233,7 @@ version = "6.4.1" description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8"}, {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6d5ce3437e18a2b66fbadb183c1d3364fb03f2be71299e7d10dbeeb69f4b2a14"}, @@ -5918,6 +6254,7 @@ version = "4.66.5" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"}, {file = "tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"}, @@ -5938,6 +6275,7 @@ version = "5.14.3" description = "Traitlets Python configuration system" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"}, {file = "traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7"}, @@ -5953,6 +6291,7 @@ version = "4.44.0" description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "transformers-4.44.0-py3-none-any.whl", hash = "sha256:ea0ff72def71e9f4812d9414d4803b22681b1617aa6f511bd51cfff2b44a6fca"}, {file = "transformers-4.44.0.tar.gz", hash = "sha256:75699495e30b7635ca444d8d372e138c687ab51a875b387e33f1fb759c37f196"}, @@ -6021,17 +6360,14 @@ version = "3.0.0" description = "A language and compiler for custom Deep Learning operations" optional = false python-versions = "*" +groups = ["dev"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.13\"" files = [ {file = "triton-3.0.0-1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e1efef76935b2febc365bfadf74bcb65a6f959a9872e5bddf44cc9e0adce1e1a"}, {file = "triton-3.0.0-1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5ce8520437c602fb633f1324cc3871c47bee3b67acf9756c1a66309b60e3216c"}, {file = "triton-3.0.0-1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:34e509deb77f1c067d8640725ef00c5cbfcb2052a1a3cb6a6d343841f92624eb"}, {file = "triton-3.0.0-1-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bcbf3b1c48af6a28011a5c40a5b3b9b5330530c3827716b5fbf6d7adcc1e53e9"}, {file = "triton-3.0.0-1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6e5727202f7078c56f91ff13ad0c1abab14a0e7f2c87e91b12b6f64f3e8ae609"}, - {file = "triton-3.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39b052da883351fdf6be3d93cedae6db3b8e3988d3b09ed221bccecfa9612230"}, - {file = "triton-3.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd34f19a8582af96e6291d4afce25dac08cb2a5d218c599163761e8e0827208e"}, - {file = "triton-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d5e10de8c011adeb7c878c6ce0dd6073b14367749e34467f1cff2bde1b78253"}, - {file = "triton-3.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8903767951bf86ec960b4fe4e21bc970055afc65e9d57e916d79ae3c93665e3"}, - {file = "triton-3.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41004fb1ae9a53fcb3e970745feb87f0e3c94c6ce1ba86e95fa3b8537894bef7"}, ] [package.dependencies] @@ -6048,6 +6384,7 @@ version = "0.15.1" description = "Typer, build great CLIs. Easy to code. Based on Python type hints." optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "typer-0.15.1-py3-none-any.whl", hash = "sha256:7994fb7b8155b64d3402518560648446072864beefd44aa2dc36972a5972e847"}, {file = "typer-0.15.1.tar.gz", hash = "sha256:a0588c0a7fa68a1978a069818657778f86abe6ff5ea6abf472f940a08bfe4f0a"}, @@ -6065,6 +6402,7 @@ version = "2.9.0.20240316" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "types-python-dateutil-2.9.0.20240316.tar.gz", hash = "sha256:5d2f2e240b86905e40944dd787db6da9263f0deabef1076ddaed797351ec0202"}, {file = "types_python_dateutil-2.9.0.20240316-py3-none-any.whl", hash = "sha256:6b8cb66d960771ce5ff974e9dd45e38facb81718cc1e208b10b1baccbfdbee3b"}, @@ -6076,6 +6414,7 @@ version = "2.32.0.20240712" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "types-requests-2.32.0.20240712.tar.gz", hash = "sha256:90c079ff05e549f6bf50e02e910210b98b8ff1ebdd18e19c873cd237737c1358"}, {file = "types_requests-2.32.0.20240712-py3-none-any.whl", hash = "sha256:f754283e152c752e46e70942fa2a146b5bc70393522257bb85bd1ef7e019dcc3"}, @@ -6090,6 +6429,7 @@ version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, @@ -6101,6 +6441,7 @@ version = "0.9.0" description = "Runtime inspection utilities for typing module." optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, @@ -6110,12 +6451,28 @@ files = [ mypy-extensions = ">=0.3.0" typing-extensions = ">=3.7.4" +[[package]] +name = "typing-inspection" +version = "0.4.0" +description = "Runtime typing introspection tools" +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, + {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, +] + +[package.dependencies] +typing-extensions = ">=4.12.0" + [[package]] name = "tzdata" version = "2024.1" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" +groups = ["dev"] files = [ {file = "tzdata-2024.1-py2.py3-none-any.whl", hash = "sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252"}, {file = "tzdata-2024.1.tar.gz", hash = "sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd"}, @@ -6127,6 +6484,7 @@ version = "5.10.0" description = "Ultra fast JSON encoder and decoder for Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "ujson-5.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2601aa9ecdbee1118a1c2065323bda35e2c5a2cf0797ef4522d485f9d3ef65bd"}, {file = "ujson-5.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:348898dd702fc1c4f1051bc3aacbf894caa0927fe2c53e68679c073375f732cf"}, @@ -6214,6 +6572,7 @@ version = "1.3.0" description = "RFC 6570 URI Template Processor" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "uri-template-1.3.0.tar.gz", hash = "sha256:0e00f8eb65e18c7de20d595a14336e9f337ead580c70934141624b6d1ffdacc7"}, {file = "uri_template-1.3.0-py3-none-any.whl", hash = "sha256:a44a133ea12d44a0c0f06d7d42a52d71282e77e2f937d8abd5655b8d56fc1363"}, @@ -6228,6 +6587,7 @@ version = "4.1.1" description = "Implementation of RFC 6570 URI Templates" optional = false python-versions = ">=3.6" +groups = ["dev"] files = [ {file = "uritemplate-4.1.1-py2.py3-none-any.whl", hash = "sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e"}, {file = "uritemplate-4.1.1.tar.gz", hash = "sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0"}, @@ -6239,13 +6599,14 @@ version = "2.2.2" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] @@ -6256,6 +6617,7 @@ version = "20.26.3" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "virtualenv-20.26.3-py3-none-any.whl", hash = "sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589"}, {file = "virtualenv-20.26.3.tar.gz", hash = "sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a"}, @@ -6268,7 +6630,7 @@ platformdirs = ">=3.9.1,<5" [package.extras] docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] -test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8) ; platform_python_implementation == \"PyPy\" or platform_python_implementation == \"CPython\" and sys_platform == \"win32\" and python_version >= \"3.13\"", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10) ; platform_python_implementation == \"CPython\""] [[package]] name = "wcwidth" @@ -6276,6 +6638,7 @@ version = "0.2.13" description = "Measures the displayed width of unicode strings in a terminal" optional = false python-versions = "*" +groups = ["main", "dev"] files = [ {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, @@ -6287,6 +6650,7 @@ version = "24.6.0" description = "A library for working with the color formats defined by HTML and CSS." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "webcolors-24.6.0-py3-none-any.whl", hash = "sha256:8cf5bc7e28defd1d48b9e83d5fc30741328305a8195c29a8e668fa45586568a1"}, {file = "webcolors-24.6.0.tar.gz", hash = "sha256:1d160d1de46b3e81e58d0a280d0c78b467dc80f47294b91b1ad8029d2cedb55b"}, @@ -6302,6 +6666,7 @@ version = "0.5.1" description = "Character encoding aliases for legacy web content" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, @@ -6313,6 +6678,7 @@ version = "1.8.0" description = "WebSocket client for Python with low level API options" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526"}, {file = "websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da"}, @@ -6329,6 +6695,7 @@ version = "3.0.3" description = "The comprehensive WSGI web application library." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "werkzeug-3.0.3-py3-none-any.whl", hash = "sha256:fc9645dc43e03e4d630d23143a04a7f947a9a3b5727cd535fdfe155a17cc48c8"}, {file = "werkzeug-3.0.3.tar.gz", hash = "sha256:097e5bfda9f0aba8da6b8545146def481d06aa7d3266e7448e2cccf67dd8bd18"}, @@ -6346,17 +6713,34 @@ version = "4.0.11" description = "Jupyter interactive widgets for Jupyter Notebook" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "widgetsnbextension-4.0.11-py3-none-any.whl", hash = "sha256:55d4d6949d100e0d08b94948a42efc3ed6dfdc0e9468b2c4b128c9a2ce3a7a36"}, {file = "widgetsnbextension-4.0.11.tar.gz", hash = "sha256:8b22a8f1910bfd188e596fe7fc05dcbd87e810c8a4ba010bdb3da86637398474"}, ] +[[package]] +name = "wsproto" +version = "1.2.0" +description = "WebSockets state-machine based protocol implementation" +optional = false +python-versions = ">=3.7.0" +groups = ["dev"] +files = [ + {file = "wsproto-1.2.0-py3-none-any.whl", hash = "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736"}, + {file = "wsproto-1.2.0.tar.gz", hash = "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065"}, +] + +[package.dependencies] +h11 = ">=0.9.0,<1" + [[package]] name = "xxhash" version = "3.4.1" description = "Python binding for xxHash" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "xxhash-3.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:91dbfa55346ad3e18e738742236554531a621042e419b70ad8f3c1d9c7a16e7f"}, {file = "xxhash-3.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:665a65c2a48a72068fcc4d21721510df5f51f1142541c890491afc80451636d2"}, @@ -6474,6 +6858,7 @@ version = "1.9.4" description = "Yet another URL library" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e"}, {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2"}, @@ -6572,6 +6957,6 @@ idna = ">=2.0" multidict = ">=4.0" [metadata] -lock-version = "2.0" +lock-version = "2.1" python-versions = ">=3.11, <4.0" -content-hash = "30abe3776ca910352fbdbce918d8cc78f9acdd6e6651521395c2119f6faf4a51" +content-hash = "660394179869b6c32688a84af22d3b400de404fb72327f35b30db644fda3733d"