diff --git a/adalflow/adalflow/components/model_client/azureai_client.py b/adalflow/adalflow/components/model_client/azureai_client.py index b7d809a4..26ac3a8c 100644 --- a/adalflow/adalflow/components/model_client/azureai_client.py +++ b/adalflow/adalflow/components/model_client/azureai_client.py @@ -245,9 +245,10 @@ def init_sync_client(self): api_key=api_key, azure_endpoint=azure_endpoint, api_version=api_version ) elif self._credential: - # credential = DefaultAzureCredential() + # Use the provided credential if available; otherwise fall back to DefaultAzureCredential + credential = self._credential or DefaultAzureCredential() token_provider = get_bearer_token_provider( - DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" + credential, "https://cognitiveservices.azure.com/.default" ) return AzureOpenAI( azure_ad_token_provider=token_provider, @@ -274,9 +275,10 @@ def init_async_client(self): api_key=api_key, azure_endpoint=azure_endpoint, api_version=api_version ) elif self._credential: - # credential = DefaultAzureCredential() + # Use the provided credential if available; otherwise fall back to DefaultAzureCredential + credential = self._credential or DefaultAzureCredential() token_provider = get_bearer_token_provider( - DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" + credential, "https://cognitiveservices.azure.com/.default" ) return AsyncAzureOpenAI( azure_ad_token_provider=token_provider, @@ -310,7 +312,7 @@ def parse_chat_completion( data = self.chat_completion_parser(completion) usage = self.track_completion_usage(completion) return GeneratorOutput( - data=None, error=None, raw_response=data, usage=usage + data=data, error=None, raw_response=data, usage=usage ) except Exception as e: log.error(f"Error parsing the completion: {e}") diff --git a/adalflow/adalflow/components/model_client/bedrock_client.py b/adalflow/adalflow/components/model_client/bedrock_client.py index 1849a292..a1e26a49 100644 --- a/adalflow/adalflow/components/model_client/bedrock_client.py +++ b/adalflow/adalflow/components/model_client/bedrock_client.py @@ -26,10 +26,29 @@ log = logging.getLogger(__name__) -bedrock_runtime_exceptions = boto3.client( - service_name="bedrock-runtime", - region_name=os.getenv("AWS_REGION_NAME", "us-east-1"), -).exceptions +# Lazy initialization of bedrock exceptions to avoid AWS credential issues during import +_bedrock_runtime_exceptions = None + + +def get_bedrock_runtime_exceptions(): + """Get bedrock runtime exceptions, creating the client lazily if needed.""" + global _bedrock_runtime_exceptions + if _bedrock_runtime_exceptions is None: + try: + _bedrock_runtime_exceptions = boto3.client( + service_name="bedrock-runtime", + region_name=os.getenv("AWS_REGION_NAME", "us-east-1"), + ).exceptions + except Exception as e: + log.warning(f"Could not initialize bedrock client: {e}") + + # Create a mock exceptions object to prevent import failures + class MockExceptions: + def __getattr__(self, name): + return Exception + + _bedrock_runtime_exceptions = MockExceptions() + return _bedrock_runtime_exceptions def get_first_message_content(completion: Dict) -> str: @@ -41,7 +60,7 @@ def get_first_message_content(completion: Dict) -> str: __all__ = [ "BedrockAPIClient", "get_first_message_content", - "bedrock_runtime_exceptions", + "get_bedrock_runtime_exceptions", ] @@ -262,11 +281,11 @@ def convert_inputs_to_api_kwargs( @backoff.on_exception( backoff.expo, ( - bedrock_runtime_exceptions.ThrottlingException, - bedrock_runtime_exceptions.ModelTimeoutException, - bedrock_runtime_exceptions.InternalServerException, - bedrock_runtime_exceptions.ModelErrorException, - bedrock_runtime_exceptions.ValidationException, + get_bedrock_runtime_exceptions().ThrottlingException, + get_bedrock_runtime_exceptions().ModelTimeoutException, + get_bedrock_runtime_exceptions().InternalServerException, + get_bedrock_runtime_exceptions().ModelErrorException, + get_bedrock_runtime_exceptions().ValidationException, ), max_time=2, ) diff --git a/adalflow/adalflow/components/model_client/openai_client.py b/adalflow/adalflow/components/model_client/openai_client.py index 6d7c4fe4..c20058ac 100644 --- a/adalflow/adalflow/components/model_client/openai_client.py +++ b/adalflow/adalflow/components/model_client/openai_client.py @@ -83,6 +83,7 @@ class ParsedResponseContent: code_outputs: Outputs from code interpreter raw_output: The original output array for advanced processing """ + text: Optional[str] = None images: Optional[Union[str, List[str]]] = None tool_calls: Optional[List[Dict[str, Any]]] = None @@ -92,13 +93,9 @@ class ParsedResponseContent: def __bool__(self) -> bool: """Check if there's any content.""" - return any([ - self.text, - self.images, - self.tool_calls, - self.reasoning, - self.code_outputs - ]) + return any( + [self.text, self.images, self.tool_calls, self.reasoning, self.code_outputs] + ) # OLD CHAT COMPLETION PARSING FUNCTIONS (COMMENTED OUT) @@ -135,14 +132,14 @@ def parse_response_output(response: Response) -> ParsedResponseContent: content = ParsedResponseContent() # Store raw output for advanced users - if hasattr(response, 'output'): + if hasattr(response, "output"): content.raw_output = response.output # First try to use output_text if available (SDK convenience property) - if hasattr(response, 'output_text') and response.output_text: + if hasattr(response, "output_text") and response.output_text: content.text = response.output_text # Parse the output array manually if no output_text - if hasattr(response, 'output') and response.output: + if hasattr(response, "output") and response.output: parsed = _parse_output_array(response.output) content.text = content.text or parsed.get("text") content.images = parsed.get("images", []) @@ -153,7 +150,6 @@ def parse_response_output(response: Response) -> ParsedResponseContent: return content - def _parse_message(item) -> Dict[str, Any]: """Parse a message item from the output array. @@ -165,19 +161,21 @@ def _parse_message(item) -> Dict[str, Any]: """ result = {"text": None} - if hasattr(item, 'content') and isinstance(item.content, list): - # now pick the longer response + if hasattr(item, "content") and isinstance(item.content, list): + # now pick the longer response text_parts = [] for content_item in item.content: - content_type = getattr(content_item, 'type', None) + content_type = getattr(content_item, "type", None) if content_type == "output_text": - if hasattr(content_item, 'text'): + if hasattr(content_item, "text"): text_parts.append(content_item.text) if text_parts: - result["text"] = max(text_parts, key=len) if len(text_parts) > 1 else text_parts[0] + result["text"] = ( + max(text_parts, key=len) if len(text_parts) > 1 else text_parts[0] + ) return result @@ -194,11 +192,11 @@ def _parse_reasoning(item) -> Dict[str, Any]: result = {"reasoning": None} # Extract text from reasoning summary if available - if hasattr(item, 'summary') and isinstance(item.summary, list): + if hasattr(item, "summary") and isinstance(item.summary, list): summary_texts = [] for summary_item in item.summary: - if hasattr(summary_item, 'type') and summary_item.type == "summary_text": - if hasattr(summary_item, 'text'): + if hasattr(summary_item, "type") and summary_item.type == "summary_text": + if hasattr(summary_item, "text"): summary_texts.append(summary_item.text) if summary_texts: @@ -219,7 +217,7 @@ def _parse_image(item) -> Dict[str, Any]: """ result = {"images": None} - if hasattr(item, 'result'): + if hasattr(item, "result"): # The result contains the base64 image data or URL result["images"] = item.result @@ -235,23 +233,18 @@ def _parse_tool_call(item) -> Dict[str, Any]: Returns: Dict with tool call information """ - item_type = getattr(item, 'type', None) + item_type = getattr(item, "type", None) if item_type == "image_generation_call": # Handle image generation - extract the result which contains the image data - if hasattr(item, 'result'): + if hasattr(item, "result"): # The result contains the base64 image data or URL return {"images": item.result} elif item_type == "code_interpreter_tool_call": return {"code_outputs": [_serialize_item(item)]} else: # Generic tool call - return { - "tool_calls": [{ - "type": item_type, - "content": _serialize_item(item) - }] - } + return {"tool_calls": [{"type": item_type, "content": _serialize_item(item)}]} return {} @@ -272,7 +265,7 @@ def _parse_output_array(output_array) -> Dict[str, Any]: "images": None, "tool_calls": None, "reasoning": None, - "code_outputs": None + "code_outputs": None, } if not output_array: @@ -286,7 +279,7 @@ def _parse_output_array(output_array) -> Dict[str, Any]: text = None for item in output_array: - item_type = getattr(item, 'type', None) + item_type = getattr(item, "type", None) if item_type == "reasoning": # Parse reasoning item @@ -306,7 +299,7 @@ def _parse_output_array(output_array) -> Dict[str, Any]: if parsed.get("images"): all_images.append(parsed["images"]) - elif item_type and ('call' in item_type or 'tool' in item_type): + elif item_type and ("call" in item_type or "tool" in item_type): # Parse other tool calls parsed = _parse_tool_call(item) if parsed.get("tool_calls"): @@ -314,8 +307,9 @@ def _parse_output_array(output_array) -> Dict[str, Any]: if parsed.get("code_outputs"): all_code_outputs.extend(parsed["code_outputs"]) - - result["text"] = text if text else None # TODO: they can potentially send multiple complete text messages, we might need to save all of them and only return the first that can convert to outpu parser + result["text"] = ( + text if text else None + ) # TODO: they can potentially send multiple complete text messages, we might need to save all of them and only return the first that can convert to outpu parser # Set other fields if they have content result["images"] = all_images @@ -333,7 +327,7 @@ def _serialize_item(item) -> Dict[str, Any]: """Convert an output item to a serializable dict.""" result = {} for attr in dir(item): - if not attr.startswith('_'): + if not attr.startswith("_"): value = getattr(item, attr, None) if value is not None and not callable(value): result[attr] = value @@ -406,8 +400,6 @@ def handle_streaming_response_sync(stream: Iterable) -> GeneratorType: yield event - - class OpenAIClient(ModelClient): __doc__ = r"""A component wrapper for the OpenAI API client. @@ -783,11 +775,15 @@ def parse_chat_completion( """Parse the Response API completion and put it into the raw_response. Fully migrated to Response API only.""" - parser = self.response_parser - log.info(f"completion/response: {completion}, parser: {parser}") - - # Check if this is a Response with complex output (tools, images, etc.) + # Determine parser dynamically based on completion type instead of relying on instance variable + # This fixes the issue where streaming/non-streaming modes get mixed up if isinstance(completion, Response): + # Non-streaming Response object + parser = self.non_streaming_response_parser + log.info( + f"completion/response: {completion}, parser: {parser} (non-streaming)" + ) + parsed_content = parse_response_output(completion) usage = self.track_completion_usage(completion) @@ -797,7 +793,6 @@ def parse_chat_completion( if parsed_content.reasoning: thinking = str(parsed_content.reasoning) - return GeneratorOutput( data=data, # only text thinking=thinking, @@ -805,14 +800,34 @@ def parse_chat_completion( tool_use=None, # Will be populated when we handle function tool calls error=None, raw_response=data, - usage=usage + usage=usage, + ) + elif hasattr(completion, "__aiter__"): + # Async streaming (AsyncIterable) + parser = self.streaming_response_parser_async + log.info( + f"completion/response: {completion}, parser: {parser} (async streaming)" ) + elif hasattr(completion, "__iter__") and not isinstance( + completion, (str, bytes, dict) + ): + # Sync streaming (Iterable) - exclude basic types that have __iter__ but aren't streams + parser = self.streaming_response_parser_sync + log.info( + f"completion/response: {completion}, parser: {parser} (sync streaming)" + ) + else: + # Fallback to non-streaming parser (includes strings, dicts, etc.) + parser = self.non_streaming_response_parser + log.info( + f"completion/response: {completion}, parser: {parser} (fallback non-streaming)" + ) + # Regular response handling (streaming or other) data = parser(completion) usage = self.track_completion_usage(completion) return GeneratorOutput(data=None, error=None, raw_response=data, usage=usage) - # NEW RESPONSE API ONLY FUNCTION def track_completion_usage( self, @@ -965,12 +980,7 @@ def convert_inputs_to_api_kwargs( content = format_content_for_response_api(input, images) # For responses.create API, wrap in user message format - final_model_kwargs["input"] = [ - { - "role": "user", - "content": content - } - ] + final_model_kwargs["input"] = [{"role": "user", "content": content}] else: # Text-only input final_model_kwargs["input"] = input @@ -1034,13 +1044,11 @@ def call(self, api_kwargs: Dict = {}, model_type: ModelType = ModelType.UNDEFINE elif model_type == ModelType.LLM_REASONING or model_type == ModelType.LLM: if "stream" in api_kwargs and api_kwargs.get("stream", False): log.debug("streaming call") - self.response_parser = ( - self.streaming_response_parser_sync - ) # Use sync streaming parser + # No longer setting self.response_parser - parser will be determined dynamically return self.sync_client.responses.create(**api_kwargs) else: log.debug("non-streaming call") - self.response_parser = self.non_streaming_response_parser + # No longer setting self.response_parser - parser will be determined dynamically return self.sync_client.responses.create(**api_kwargs) else: @@ -1089,15 +1097,11 @@ async def acall( elif model_type == ModelType.LLM or model_type == ModelType.LLM_REASONING: if "stream" in api_kwargs and api_kwargs.get("stream", False): log.debug("async streaming call") - self.response_parser = ( - self.streaming_response_parser_async - ) # Use async streaming parser - # setting response parser as async streaming parser for Response API + # No longer setting self.response_parser - parser will be determined dynamically return await self.async_client.responses.create(**api_kwargs) else: log.debug("async non-streaming call") - self.response_parser = self.non_streaming_response_parser - # setting response parser as async non-streaming parser for Response API + # No longer setting self.response_parser - parser will be determined dynamically return await self.async_client.responses.create(**api_kwargs) elif model_type == ModelType.IMAGE_GENERATION: # Determine which image API to call based on the presence of image/mask diff --git a/adalflow/adalflow/optim/text_grad/tgd_optimizer.py b/adalflow/adalflow/optim/text_grad/tgd_optimizer.py index 1663dccb..28ca563a 100644 --- a/adalflow/adalflow/optim/text_grad/tgd_optimizer.py +++ b/adalflow/adalflow/optim/text_grad/tgd_optimizer.py @@ -46,12 +46,16 @@ class HistoryPrompt(DataClass): {{optimizer_system_prompt}} -You are {{steps}} steps since your last improvement. -Update the value more rapidly when steps are larger than 3. + + +Current optimization iteration: {{steps}} steps since your last improvement. +Optimization strategy: Use more aggressive updates after 3 iterations without improvement. + + + {# Variable and peers info #} - {{variable_and_peers_info}} - + {# system trainable variables #} {% if system_variables %} @@ -104,6 +108,13 @@ class HistoryPrompt(DataClass): You must base on the following examples when modifying the {{variable_desc}}: {{in_context_examples}} {% endif %} + + +IMPORTANT: Optimize ONLY the content in the TARGET_CONTENT_TO_OPTIMIZE section above. +Do NOT include any references to optimization steps, iterations, or meta-instructions in your response. +Do NOT mention phrases like "when steps exceed", "steps are larger than", "rapid updates", or "step size". +Your output should contain ONLY the improved version of the target content, without any optimization metadata. + """ # NO OPRO history @@ -268,11 +279,11 @@ class TGDOptimizerTrace(DataClass): class CustomizedXMLParser(DataComponent): """Custom XML parser for TGD optimizer output with reasoning, method, and proposed_variable fields.""" - + def __init__(self): super().__init__() pass - + def get_output_format_str(self) -> str: return """Please provide your response in the following XML format: @@ -283,58 +294,64 @@ def get_output_format_str(self) -> str: Make sure to include all three fields and properly close all XML tags.""" - + def call(self, input: str) -> TGDData: """Parse the XML response and extract the three fields, returning TGDData directly.""" try: # Clean the input and extract XML content input = input.strip() - + # Try to find the response tags start_tag = "" end_tag = "" - + start_idx = input.find(start_tag) end_idx = input.find(end_tag) - + if start_idx == -1 or end_idx == -1: # Fallback: try to parse the entire input as XML xml_content = input else: - xml_content = input[start_idx:end_idx + len(end_tag)] - + xml_content = input[start_idx : end_idx + len(end_tag)] + # Parse XML root = ET.fromstring(xml_content) - + # Extract fields - reasoning_elem = root.find('reasoning') - method_elem = root.find('method') - proposed_variable_elem = root.find('proposed_variable') - - reasoning = reasoning_elem.text.strip() if reasoning_elem is not None and reasoning_elem.text else "" - method = method_elem.text.strip() if method_elem is not None and method_elem.text else "" - proposed_variable = proposed_variable_elem.text.strip() if proposed_variable_elem is not None and proposed_variable_elem.text else "" - + reasoning_elem = root.find("reasoning") + method_elem = root.find("method") + proposed_variable_elem = root.find("proposed_variable") + + reasoning = ( + reasoning_elem.text.strip() + if reasoning_elem is not None and reasoning_elem.text + else "" + ) + method = ( + method_elem.text.strip() + if method_elem is not None and method_elem.text + else "" + ) + proposed_variable = ( + proposed_variable_elem.text.strip() + if proposed_variable_elem is not None and proposed_variable_elem.text + else "" + ) + # Create and return TGDData object directly return TGDData( - reasoning=reasoning, - method=method, - proposed_variable=proposed_variable + reasoning=reasoning, method=method, proposed_variable=proposed_variable ) - + except ET.ParseError as e: log.error(f"XML parsing error: {e}") return TGDData( - reasoning="XML parsing failed", - method="Error", - proposed_variable=input + reasoning="XML parsing failed", method="Error", proposed_variable=input ) except Exception as e: log.error(f"Error parsing XML output: {e}") return TGDData( - reasoning="Parsing failed", - method="Error", - proposed_variable=input + reasoning="Parsing failed", method="Error", proposed_variable=input ) diff --git a/adalflow/tests/test_AzureClient.py b/adalflow/tests/test_AzureClient.py index 14b7cf4a..05762c05 100644 --- a/adalflow/tests/test_AzureClient.py +++ b/adalflow/tests/test_AzureClient.py @@ -1,5 +1,5 @@ import unittest -from unittest.mock import patch +from unittest.mock import patch, MagicMock from openai import AzureOpenAI from adalflow.components.model_client.azureai_client import AzureAIClient @@ -33,6 +33,31 @@ def test_init_sync_client_with_credential(self): ) self.assertIsInstance(client.sync_client, AzureOpenAI) + @patch("adalflow.components.model_client.azureai_client.get_bearer_token_provider") + @patch("adalflow.components.model_client.azureai_client.AzureOpenAI") + def test_uses_provided_credential_for_token_provider(self, MockAzureOpenAI, MockTokenProvider): + # ensure provided credential is passed into token provider + mock_token_provider = MagicMock() + MockTokenProvider.return_value = mock_token_provider + + client = AzureAIClient( + api_version="v1", + azure_endpoint="https://test.endpoint", + credential=self.mock_credential, + ) + + MockTokenProvider.assert_called_once() + # First arg should be the provided credential + called_args, called_kwargs = MockTokenProvider.call_args + self.assertIs(called_args[0], self.mock_credential) + self.assertIn("https://cognitiveservices.azure.com/.default", called_args) + + # And AzureOpenAI should be initialized with azure_ad_token_provider + MockAzureOpenAI.assert_called_once() + _, kwargs = MockAzureOpenAI.call_args + self.assertIn("azure_ad_token_provider", kwargs) + self.assertIs(kwargs["azure_ad_token_provider"], mock_token_provider) + # @patch("adalflow.components.model_client.azureai_client.AzureOpenAI") # def test_call_embeddings(self, MockAzureOpenAI): # mock_embeddings = MagicMock() @@ -55,17 +80,25 @@ def test_init_sync_client_with_credential(self): # **api_kwargs # ) - # @patch("adalflow.components.model_client.azureai_client.AzureOpenAI") - # def test_parse_chat_completion(self, MockAzureOpenAI): - # mock_chat_completion = MagicMock(spec=ChatCompletion) - # mock_chat_completion.choices = [ - # MagicMock(message=MagicMock(content="test_content")) - # ] - # self.client.chat_completion_parser = lambda completion: completion.choices[ - # 0 - # ].message.content - # result = self.client.parse_chat_completion(mock_chat_completion) - # self.assertEqual(result, "test_content") + def test_parse_chat_completion_returns_data(self): + # Arrange a minimal ChatCompletion-like object + mock_choice = MagicMock() + mock_choice.message.content = "hello world" + mock_completion = MagicMock() + mock_completion.choices = [mock_choice] + mock_completion.usage.completion_tokens = 1 + mock_completion.usage.prompt_tokens = 1 + mock_completion.usage.total_tokens = 2 + + # Ensure parser returns the message content + self.client.chat_completion_parser = lambda completion: completion.choices[0].message.content + + # Act + out = self.client.parse_chat_completion(mock_completion) + + # Assert: data contains parsed text + self.assertEqual(out.data, "hello world") + self.assertIsNone(out.error) # @patch("adalflow.components.model_client.azureai_client.AzureOpenAI") # def test_track_completion_usage(self, MockAzureOpenAI): diff --git a/adalflow/tests/test_openai_client.py b/adalflow/tests/test_openai_client.py index e0b50a39..9c36d57e 100644 --- a/adalflow/tests/test_openai_client.py +++ b/adalflow/tests/test_openai_client.py @@ -456,11 +456,8 @@ async def mock_stream(): # Call the async streaming method stream = await self.client.acall(api_kwargs, ModelType.LLM) - # Verify the streaming parser is set - self.assertEqual( - self.client.response_parser, - self.client.streaming_response_parser, - ) + # Verify the streaming parser is available (dynamic selection) + self.assertIsNotNone(self.client.streaming_response_parser) # Process the stream full_response = "" @@ -499,20 +496,14 @@ async def mock_stream(): await self.client.acall( {"model": "gpt-4", "input": "Hello", "stream": True}, ModelType.LLM ) - self.assertEqual( - self.client.response_parser, - self.client.streaming_response_parser, - ) + self.assertIsNotNone(self.client.streaming_response_parser) # Test non-streaming call - should switch back to non-streaming parser mock_async_client.responses.create.return_value = self.mock_response await self.client.acall( {"model": "gpt-4", "input": "Hello", "stream": False}, ModelType.LLM ) - self.assertEqual( - self.client.response_parser, - self.client.non_streaming_response_parser, - ) + self.assertIsNotNone(self.client.non_streaming_response_parser) def test_reasoning_model_response(self): """Test parsing of reasoning model responses with reasoning field.""" diff --git a/poetry.lock b/poetry.lock index 308f6d5e..7c92bd34 100644 --- a/poetry.lock +++ b/poetry.lock @@ -286,7 +286,7 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" -groups = ["dev"] +groups = ["dev", "test"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, @@ -1050,7 +1050,7 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["main", "dev"] +groups = ["main", "dev", "test"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, @@ -1420,6 +1420,21 @@ files = [ {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, ] +[[package]] +name = "deprecation" +version = "2.1.0" +description = "A library to handle automated deprecations" +optional = false +python-versions = "*" +groups = ["test"] +files = [ + {file = "deprecation-2.1.0-py2.py3-none-any.whl", hash = "sha256:a10811591210e1fb0e768a8c25517cabeabcba6f0bf96564f8ff45189f90b14a"}, + {file = "deprecation-2.1.0.tar.gz", hash = "sha256:72b3bde64e5d778694b0cf68178aed03d15e15477116add3fb773e581f9518ff"}, +] + +[package.dependencies] +packaging = "*" + [[package]] name = "dill" version = "0.3.8" @@ -1633,7 +1648,6 @@ files = [ {file = "faiss_cpu-1.11.0.post1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dc12b3f89cf48be3f2a20b37f310c3f1a7a5708fdf705f88d639339a24bb590b"}, {file = "faiss_cpu-1.11.0.post1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:773fa45aa98a210ab4e2c17c1b5fb45f6d7e9acb4979c9a0b320b678984428ac"}, {file = "faiss_cpu-1.11.0.post1-cp39-cp39-win_amd64.whl", hash = "sha256:6240c4b1551eedc07e76813c2e14a1583a1db6c319a92a3934bf212d0e4c7791"}, - {file = "faiss_cpu-1.11.0.post1.tar.gz", hash = "sha256:06b1ea9ddec9e4d9a41c8ef7478d493b08d770e9a89475056e963081eed757d1"}, ] [package.dependencies] @@ -2736,7 +2750,7 @@ version = "2.1.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.8" -groups = ["main"] +groups = ["main", "test"] files = [ {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, @@ -3524,6 +3538,83 @@ files = [ {file = "kiwisolver-1.4.8.tar.gz", hash = "sha256:23d5f023bdc8c7e54eb65f03ca5d5bb25b601eac4d7f1a042888a1f45237987e"}, ] +[[package]] +name = "lance-namespace" +version = "0.0.6" +description = "Python client for Lance Namespace API" +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "lance_namespace-0.0.6-py3-none-any.whl", hash = "sha256:fd102aec0ca3672b15cae65f4b9bf15086f7a73cedb7f5c12c47b5b48f9090b4"}, + {file = "lance_namespace-0.0.6.tar.gz", hash = "sha256:3eeeba5f6bb8d01504cda33d86e6c22bd9cefb1f6f3aac1f963d46a9ff09b9a0"}, +] + +[package.dependencies] +lance-namespace-urllib3-client = "*" +pyarrow = ">=14.0.0" +pylance = ">=0.18.0" +typing-extensions = ">=4.0.0" + +[package.extras] +dir = ["opendal"] +test = ["pytest (>=7.0.0)", "pytest-cov (>=4.0.0)"] + +[[package]] +name = "lance-namespace-urllib3-client" +version = "0.0.14" +description = "Lance Namespace Specification" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "lance_namespace_urllib3_client-0.0.14-py3-none-any.whl", hash = "sha256:40277cfcf7c9084419c2784e7924b3e316f6fe5b8057f4dc62a49f3b40c2d80c"}, + {file = "lance_namespace_urllib3_client-0.0.14.tar.gz", hash = "sha256:911c6a3b5c2c98f4239b6d96609cf840e740c3af5482f5fb22096afb9db1dc1c"}, +] + +[package.dependencies] +pydantic = ">=2" +python-dateutil = ">=2.8.2" +typing-extensions = ">=4.7.1" +urllib3 = ">=1.25.3,<3.0.0" + +[[package]] +name = "lancedb" +version = "0.25.0" +description = "lancedb" +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "lancedb-0.25.0-cp39-abi3-macosx_10_15_x86_64.whl", hash = "sha256:ae2e80b7b3be3fa4d92fc8d500f47549dd1f8d28ca5092f1c898b92d0cfd4393"}, + {file = "lancedb-0.25.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:a9d67ea9edffa596c6f190151fdd535da8e355a4fd1979c1dc19d540a5665916"}, + {file = "lancedb-0.25.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8fe20079ed86b1ab75c65dcfc920a9646c835e9c40ef825cadd148c11b0001e"}, + {file = "lancedb-0.25.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b37bc402d85c83e454d9f2e79480b31acc5904bb159a4fc715032c7560494157"}, + {file = "lancedb-0.25.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f9bbc20bd1e64be359ca11c90428c00b0062d26b0291bddf32ab5471a3525c76"}, + {file = "lancedb-0.25.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1306be9c08e208a5bcb5188275f47f962c2eda96369fad5949a3ddaf592afc6d"}, + {file = "lancedb-0.25.0-cp39-abi3-win_amd64.whl", hash = "sha256:f66283e5d63c99c2bfbd4eaa134d9a5c5b0145eb26a972648214f8ba87777e24"}, +] + +[package.dependencies] +deprecation = "*" +lance-namespace = "0.0.6" +numpy = "*" +overrides = ">=0.7" +packaging = "*" +pyarrow = ">=16" +pydantic = ">=1.10" +tqdm = ">=4.27.0" + +[package.extras] +azure = ["adlfs (>=2024.2.0)"] +clip = ["open-clip-torch", "pillow", "torch"] +dev = ["pre-commit", "pyright", "ruff", "typing-extensions (>=4.0.0) ; python_full_version < \"3.11.0\""] +docs = ["mkdocs", "mkdocs-jupyter", "mkdocs-material", "mkdocstrings-python"] +embeddings = ["awscli (>=1.29.57)", "boto3 (>=1.28.57)", "botocore (>=1.31.57)", "cohere", "colpali-engine (>=0.3.10)", "google-generativeai", "huggingface-hub", "ibm-watsonx-ai (>=1.1.2) ; python_full_version >= \"3.10.0\"", "instructorembedding", "ollama (>=0.3.0)", "open-clip-torch", "openai (>=1.6.1)", "pillow", "requests (>=2.31.0)", "sentence-transformers", "sentencepiece", "torch"] +pylance = ["pylance (>=0.25)"] +siglip = ["pillow", "sentencepiece", "torch", "transformers (>=4.41.0)"] +tests = ["aiohttp", "boto3", "datafusion", "duckdb", "pandas (>=1.4)", "polars (>=0.19,<=1.3.0)", "pyarrow-stubs", "pylance (>=0.25)", "pytest", "pytest-asyncio", "pytest-mock", "pytz", "requests", "tantivy"] + [[package]] name = "langchain" version = "0.2.17" @@ -4566,7 +4657,7 @@ version = "1.26.4" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" -groups = ["main", "dev"] +groups = ["main", "dev", "test"] files = [ {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, @@ -5092,7 +5183,7 @@ version = "7.7.0" description = "A decorator to automatically detect mismatch when overriding a method." optional = false python-versions = ">=3.6" -groups = ["dev"] +groups = ["dev", "test"] files = [ {file = "overrides-7.7.0-py3-none-any.whl", hash = "sha256:c7ed9d062f78b8e4c1a7b70bd8796b35ead4d9f510227ef9c5dc7626c60d7e49"}, {file = "overrides-7.7.0.tar.gz", hash = "sha256:55158fa3d93b98cc75299b1e67078ad9003ca27945c76162c1c0766d6f91820a"}, @@ -5104,7 +5195,7 @@ version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["main", "dev", "test"] files = [ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, @@ -5414,7 +5505,7 @@ version = "1.6.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.9" -groups = ["main"] +groups = ["main", "test"] files = [ {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, @@ -5678,7 +5769,7 @@ version = "20.0.0" description = "Python library for Apache Arrow" optional = false python-versions = ">=3.9" -groups = ["dev"] +groups = ["dev", "test"] files = [ {file = "pyarrow-20.0.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:c7dd06fd7d7b410ca5dc839cc9d485d2bc4ae5240851bcd45d85105cc90a47d7"}, {file = "pyarrow-20.0.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:d5382de8dc34c943249b01c19110783d0d64b207167c728461add1ecc2db88e4"}, @@ -5785,7 +5876,7 @@ version = "2.11.7" description = "Data validation using Python type hints" optional = false python-versions = ">=3.9" -groups = ["dev"] +groups = ["dev", "test"] files = [ {file = "pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b"}, {file = "pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db"}, @@ -5807,7 +5898,7 @@ version = "2.33.2" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.9" -groups = ["dev"] +groups = ["dev", "test"] files = [ {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"}, {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"}, @@ -5963,7 +6054,7 @@ version = "2.19.2" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["main", "dev", "test"] files = [ {file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"}, {file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"}, @@ -5993,6 +6084,33 @@ dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pyte docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] +[[package]] +name = "pylance" +version = "0.36.0" +description = "python wrapper for Lance columnar format" +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "pylance-0.36.0-cp39-abi3-macosx_10_15_x86_64.whl", hash = "sha256:160ed088dc5fb63a71c8c96640d43ea58464f64bca8aa23b0337b1a96fd47b79"}, + {file = "pylance-0.36.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:ce43ad002b4e67ffb1a33925d05d472bbde77c57a5e84aca1728faa9ace0c086"}, + {file = "pylance-0.36.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ad7b168b0d4b7864be6040bebaf6d9a3959e76a190ff401a84b165b75eade96"}, + {file = "pylance-0.36.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:353deeb7b19be505db490258b5f2fc897efd4a45255fa0d51455662e01ad59ab"}, + {file = "pylance-0.36.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:9cd963fc22257591d1daf281fa2369e05299d78950cb11980aa099d7cbacdf00"}, + {file = "pylance-0.36.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:40117569a87379e08ed12eccac658999158f81df946f2ed02693b77776b57597"}, + {file = "pylance-0.36.0-cp39-abi3-win_amd64.whl", hash = "sha256:a2930738192e5075220bc38c8a58ff4e48a71d53b3ca2a577ffce0318609cac0"}, +] + +[package.dependencies] +numpy = ">=1.22" +pyarrow = ">=14" + +[package.extras] +benchmarks = ["pytest-benchmark"] +dev = ["pyright", "ruff (==0.4.1)"] +tests = ["boto3", "datafusion (==49.0.0)", "datasets", "duckdb", "ml-dtypes", "pandas", "pillow", "polars[pandas,pyarrow]", "psutil", "pytest", "tensorflow (<=2.19.0)", "tqdm"] +torch = ["torch"] + [[package]] name = "pyparsing" version = "3.2.3" @@ -6038,7 +6156,7 @@ version = "8.4.1" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.9" -groups = ["main"] +groups = ["main", "test"] files = [ {file = "pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7"}, {file = "pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c"}, @@ -6073,13 +6191,31 @@ pytest = ">=8.2,<9" docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1)"] testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] +[[package]] +name = "pytest-mock" +version = "3.15.1" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "pytest_mock-3.15.1-py3-none-any.whl", hash = "sha256:0a25e2eb88fe5168d535041d09a4529a188176ae608a6d249ee65abc0949630d"}, + {file = "pytest_mock-3.15.1.tar.gz", hash = "sha256:1849a238f6f396da19762269de72cb1814ab44416fa73a8686deac10b0d87a0f"}, +] + +[package.dependencies] +pytest = ">=6.2.5" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + [[package]] name = "python-dateutil" version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["dev"] +groups = ["dev", "test"] files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -7049,7 +7185,7 @@ version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["dev"] +groups = ["dev", "test"] files = [ {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, @@ -7693,7 +7829,7 @@ version = "4.67.1" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" -groups = ["main", "dev"] +groups = ["main", "dev", "test"] files = [ {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, @@ -7875,7 +8011,7 @@ version = "4.14.1" description = "Backported and Experimental Type Hints for Python 3.9+" optional = false python-versions = ">=3.9" -groups = ["main", "dev"] +groups = ["main", "dev", "test"] files = [ {file = "typing_extensions-4.14.1-py3-none-any.whl", hash = "sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76"}, {file = "typing_extensions-4.14.1.tar.gz", hash = "sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36"}, @@ -7904,7 +8040,7 @@ version = "0.4.1" description = "Runtime typing introspection tools" optional = false python-versions = ">=3.9" -groups = ["dev"] +groups = ["dev", "test"] files = [ {file = "typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51"}, {file = "typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28"}, @@ -8047,7 +8183,7 @@ version = "2.5.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" -groups = ["main", "dev"] +groups = ["main", "dev", "test"] files = [ {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, @@ -8489,4 +8625,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.1" python-versions = ">=3.11, <4.0" -content-hash = "6fcfca26063930dfdb7d9c8aade6b90293abeb0ceb205861bc52a23f0b52188e" +content-hash = "fe538c630aedc73c6d049721b87f54b60e070dca002d94e2cdc7b8ebc1303cc8" diff --git a/pyproject.toml b/pyproject.toml index f90e3226..8961e653 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -67,6 +67,11 @@ openai = "^1.97.1" openai-agents = "^0.0.19" + +[tool.poetry.group.test.dependencies] +pytest-mock = "^3.15.1" +lancedb = "^0.25.0" + [build-system] requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" diff --git a/tutorials/v1_agent_runner/open_ai_test.py b/tutorials/v1_agent_runner/open_ai_test.py index 9f35911c..aef12bb5 100644 --- a/tutorials/v1_agent_runner/open_ai_test.py +++ b/tutorials/v1_agent_runner/open_ai_test.py @@ -1,4 +1,5 @@ from adalflow.components.agent import Agent, Runner +from adalflow.components.model_client.openai_client import OpenAIClient import asyncio @@ -20,6 +21,8 @@ class MathHomeworkOutput: name="Guardrail check", instructions="Check if the user is asking you to do their math homework.", output_type=MathHomeworkOutput, + model_client=OpenAIClient(api_key="fake_api_key"), + model_kwargs={"model": "gpt-4o", "temperature": 0.3}, ) # Commented out until guardrail types are properly imported @@ -38,6 +41,8 @@ class MathHomeworkOutput: agent = Agent( name="Customer support agent", instructions="You are a customer support agent. You help customers with their questions.", + model_client=OpenAIClient(api_key="fake_api_key"), + model_kwargs={"model": "gpt-4o", "temperature": 0.3}, # input_guardrails=[math_guardrail], # Commented out until guardrails are implemented ) diff --git a/tutorials/v1_agent_runner/test_fastapi_permission.py b/tutorials/v1_agent_runner/test_fastapi_permission.py index 27535d1d..fac9f897 100644 --- a/tutorials/v1_agent_runner/test_fastapi_permission.py +++ b/tutorials/v1_agent_runner/test_fastapi_permission.py @@ -18,14 +18,19 @@ from adalflow.utils import setup_env, printc, get_logger from adalflow.components.model_client.openai_client import OpenAIClient import httpx +import pytest # Setup environment -setup_env() +try: + setup_env() +except FileNotFoundError: + # Skip setup_env if .env file doesn't exist for testing + pass get_logger(level="DEBUG", enable_file=False) # Model configuration openai_model = { - "model_client": OpenAIClient(), + "model_client": OpenAIClient(api_key="fake_api_key"), "model_kwargs": {"model": "gpt-4o-mini", "max_tokens": 4096}, } @@ -67,6 +72,7 @@ def run_fastapi_server(app, port=8000): uvicorn.run(app, host="0.0.0.0", port=port, log_level="info") +@pytest.mark.asyncio async def test_api_approval_flow(): """Test the API-based approval flow.""" print("\n" + "=" * 80) diff --git a/tutorials/v1_agent_runner/test_permission_system.py b/tutorials/v1_agent_runner/test_permission_system.py index 3cfa17e5..2096e3ae 100644 --- a/tutorials/v1_agent_runner/test_permission_system.py +++ b/tutorials/v1_agent_runner/test_permission_system.py @@ -12,18 +12,23 @@ from adalflow.utils import setup_env, printc from adalflow.components.model_client.openai_client import OpenAIClient from adalflow.components.model_client.anthropic_client import AnthropicAPIClient +import pytest # Setup environment -setup_env() +try: + setup_env() +except FileNotFoundError: + # Skip setup_env if .env file doesn't exist for testing + pass # Model configurations claude_model = { - "model_client": AnthropicAPIClient(), + "model_client": AnthropicAPIClient(api_key="fake_anthropic_key"), "model_kwargs": {"model": "claude-3-opus-20240229", "max_tokens": 4096}, } openai_model = { - "model_client": OpenAIClient(), + "model_client": OpenAIClient(api_key="fake_api_key"), "model_kwargs": {"model": "gpt-4o-mini", "max_tokens": 4096}, } @@ -59,6 +64,7 @@ def write_file(filename: str, content: str) -> ToolOutput: ) +@pytest.mark.asyncio async def test_sync_execution(): """Test synchronous execution with permission checks.""" print("\n=== Testing Synchronous Execution ===\n") @@ -98,6 +104,7 @@ async def test_sync_execution(): print(f"\nFinal result: {result}") +@pytest.mark.asyncio async def test_async_execution(): """Test asynchronous execution with permission checks.""" print("\n=== Testing Asynchronous Execution ===\n") @@ -135,6 +142,7 @@ async def test_async_execution(): print(f"\nFinal result: {result}") +@pytest.mark.asyncio async def test_streaming_execution(): """Test streaming execution with permission events.""" print("\n=== Testing Streaming Execution ===\n") @@ -193,11 +201,12 @@ async def test_streaming_execution(): print(f"Final answer: {streaming_result.answer}") +@pytest.mark.asyncio async def test_auto_approval(): """Test with auto approval mode.""" print("\n=== Testing Auto Approval Mode ===\n") - from adalflow.core.cli_permission_handler import AutoApprovalHandler + from adalflow.apps.cli_permission_handler import AutoApprovalHandler # Create role description role_desc = Prompt( @@ -232,6 +241,7 @@ async def test_auto_approval(): print(f"\nFinal result: {result}") +@pytest.mark.asyncio async def test_mixed_approval(): """Test with mixed approval settings - some tools require approval, others don't.""" print("\n=== Testing Mixed Approval Settings ===\n")