From 48f68714db3fa14b258d3a2f18a55b7162efb9b7 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 10 Nov 2025 16:33:36 -0800 Subject: [PATCH 01/35] sync from container_agent repo --- .../agentserver/agentframework/agent_framework.py | 4 ++-- ...ent_framework_output_non_streaming_converter.py | 2 +- .../agent_framework_output_streaming_converter.py | 1 + .../azure/ai/agentserver/core/constants.py | 3 +-- .../azure/ai/agentserver/core/logger.py | 8 +++++--- .../azure/ai/agentserver/core/server/base.py | 8 ++++---- .../core/server/common/agent_run_context.py | 6 +++--- .../common/id_generator/foundry_id_generator.py | 2 +- .../response_content_part_event_generator.py | 7 ++++--- .../response_event_generator.py | 14 +++++++------- .../response_output_text_event_generator.py | 4 ++-- 11 files changed, 31 insertions(+), 28 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 7177b522d2a9..34270cceadb9 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -14,7 +14,7 @@ from azure.ai.agentserver.core import AgentRunContext, FoundryCBAgent from azure.ai.agentserver.core.constants import Constants as AdapterConstants -from azure.ai.agentserver.core.logger import get_logger +from azure.ai.agentserver.core.logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger from azure.ai.agentserver.core.models import ( CreateResponse, Response as OpenAIResponse, @@ -77,7 +77,7 @@ def _resolve_stream_timeout(self, request_body: CreateResponse) -> float: def init_tracing(self): exporter = os.environ.get(AdapterConstants.OTEL_EXPORTER_ENDPOINT) - app_insights_conn_str = os.environ.get(AdapterConstants.APPLICATION_INSIGHTS_CONNECTION_STRING) + app_insights_conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME) project_endpoint = os.environ.get(AdapterConstants.AZURE_AI_PROJECT_ENDPOINT) if project_endpoint: diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py index 805a5eeb9dec..823846f3ca7e 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py @@ -169,7 +169,7 @@ def _append_function_result_content(self, content: FunctionResultContent, sink: result = [raw] elif isinstance(raw, list): for item in raw: - result.append(self._coerce_result_text(item)) # type: ignore + result.append(self._coerce_result_text(item)) # type: ignore call_id = getattr(content, "call_id", None) or "" func_out_id = self._context.id_generator.generate_function_output_id() sink.append( diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index d9bc3199efb5..4e3d12d4563e 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -545,6 +545,7 @@ def build_response(self, status: str) -> OpenAIResponse: "id": self._response_id, "status": status, "created_at": self._response_created_at, + "conversation": self._context.get_conversation_object(), } if status == "completed" and self._completed_output_items: response_data["output"] = self._completed_output_items diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py index a13f23aa261e..33fcb0139fea 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py @@ -3,7 +3,6 @@ # --------------------------------------------------------- class Constants: # well-known environment variables - APPLICATION_INSIGHTS_CONNECTION_STRING = "_AGENT_RUNTIME_APP_INSIGHTS_CONNECTION_STRING" AZURE_AI_PROJECT_ENDPOINT = "AZURE_AI_PROJECT_ENDPOINT" AGENT_ID = "AGENT_ID" AGENT_NAME = "AGENT_NAME" @@ -11,4 +10,4 @@ class Constants: OTEL_EXPORTER_ENDPOINT = "OTEL_EXPORTER_ENDPOINT" AGENT_LOG_LEVEL = "AGENT_LOG_LEVEL" AGENT_DEBUG_ERRORS = "AGENT_DEBUG_ERRORS" - ENABLE_APPLICATION_INSIGHTS_LOGGER = "ENABLE_APPLICATION_INSIGHTS_LOGGER" + ENABLE_APPLICATION_INSIGHTS_LOGGER = "AGENT_APP_INSIGHTS_ENABLED" diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py index f062398c0d3b..319e02da7e98 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py @@ -28,6 +28,8 @@ request_context = contextvars.ContextVar("request_context", default=None) +APPINSIGHT_CONNSTR_ENV_NAME = "APPLICATIONINSIGHTS_CONNECTION_STRING" + def get_dimensions(): env_values = {name: value for name, value in vars(Constants).items() if not name.startswith("_")} @@ -58,9 +60,9 @@ def get_project_endpoint(): def get_application_insights_connstr(): try: - conn_str = os.environ.get(Constants.APPLICATION_INSIGHTS_CONNECTION_STRING) + conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME) if not conn_str: - print("environment variable APPLICATION_INSIGHTS_CONNECTION_STRING not set.") + print(f"environment variable {APPINSIGHT_CONNSTR_ENV_NAME} not set.") project_endpoint = get_project_endpoint() if project_endpoint: # try to get the project connected application insights @@ -72,7 +74,7 @@ def get_application_insights_connstr(): if not conn_str: print(f"no connected application insights found for project:{project_endpoint}") else: - os.environ[Constants.APPLICATION_INSIGHTS_CONNECTION_STRING] = conn_str + os.environ[APPINSIGHT_CONNSTR_ENV_NAME] = conn_str return conn_str except Exception as e: print(f"failed to get application insights with error: {e}") diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 8915aadb172b..9463c4002e08 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -21,7 +21,7 @@ from starlette.types import ASGIApp from ..constants import Constants -from ..logger import get_logger, request_context +from ..logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger, request_context from ..models import ( Response as OpenAIResponse, ResponseStreamEvent, @@ -93,7 +93,7 @@ async def runs_endpoint(request): kind=trace.SpanKind.SERVER, ): try: - logger.info("Start processing CreateResponse request:") + logger.info("Start processing CreateResponse request.") context_carrier = {} TraceContextTextMapPropagator().inject(context_carrier) @@ -126,7 +126,7 @@ def gen(): yield "data: [DONE]\n\n" error_sent = True finally: - logger.info("End of processing CreateResponse request:") + logger.info("End of processing CreateResponse request.") otel_context.detach(token) if not error_sent: yield "data: [DONE]\n\n" @@ -261,7 +261,7 @@ def run(self, port: int = int(os.environ.get("DEFAULT_AD_PORT", 8088))) -> None: def init_tracing(self): exporter = os.environ.get(Constants.OTEL_EXPORTER_ENDPOINT) - app_insights_conn_str = os.environ.get(Constants.APPLICATION_INSIGHTS_CONNECTION_STRING) + app_insights_conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME) if exporter or app_insights_conn_str: from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import TracerProvider diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py index 6fae56f0027d..2703f66f6ff2 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py @@ -46,7 +46,7 @@ def stream(self) -> bool: def get_agent_id_object(self) -> AgentId: agent = self.request.get("agent") if not agent: - return None # type: ignore + return None # type: ignore return AgentId( { "type": agent.type, @@ -57,7 +57,7 @@ def get_agent_id_object(self) -> AgentId: def get_conversation_object(self) -> ResponseConversation1: if not self._conversation_id: - return None # type: ignore + return None # type: ignore return ResponseConversation1(id=self._conversation_id) @@ -72,5 +72,5 @@ def _deserialize_create_response(payload: dict) -> CreateResponse: def _deserialize_agent_reference(payload: dict) -> AgentReference: if not payload: - return None # type: ignore + return None # type: ignore return AgentReference(**payload) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py index 910a7c481daa..b1c4e1ac55fd 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py @@ -88,7 +88,7 @@ def _new_id( infix = infix or "" prefix_part = f"{prefix}{delimiter}" if prefix else "" - return f"{prefix_part}{entropy}{infix}{pkey}" + return f"{prefix_part}{infix}{pkey}{entropy}" @staticmethod def _secure_entropy(string_length: int) -> str: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py index fe141887a2b2..4823de4411ae 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py @@ -63,7 +63,7 @@ def try_process_message( return is_processed, next_processor, events - def on_start( # mypy: ignore[override] + def on_start( # mypy: ignore[override] self, event, run_details, stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: if self.started: @@ -81,8 +81,9 @@ def on_start( # mypy: ignore[override] return True, [start_event] - def on_end(self, message, context, stream_state: StreamEventState - ) -> List[project_models.ResponseStreamEvent]: # mypy: ignore[override] + def on_end( + self, message, context, stream_state: StreamEventState + ) -> List[project_models.ResponseStreamEvent]: # mypy: ignore[override] aggregated_content = self.item_content_helper.create_item_content() done_event = project_models.ResponseContentPartDoneEvent( item_id=self.item_id, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py index ee19ca74f4bb..2bea925ef2ed 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py @@ -33,11 +33,11 @@ def __init__(self, logger, parent): self.parent = parent # parent generator def try_process_message( - self, - message: AnyMessage, # mypy: ignore[valid-type] - context: AgentRunContext, - stream_state: StreamEventState - ): # mypy: ignore[empty-body] + self, + message: AnyMessage, # mypy: ignore[valid-type] + context: AgentRunContext, + stream_state: StreamEventState, + ): # mypy: ignore[empty-body] """ Try to process the incoming message. @@ -63,8 +63,8 @@ def on_start(self) -> tuple[bool, List[project_models.ResponseStreamEvent]]: return False, [] def on_end( - self, message: AnyMessage, context: AgentRunContext, stream_state: StreamEventState - ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: + self, message: AnyMessage, context: AgentRunContext, stream_state: StreamEventState + ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: """ Generate the ending events for this layer. TODO: handle different end conditions, e.g. normal end, error end, etc. diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py index b6be81ec7cb2..c65eda157bbd 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py @@ -74,7 +74,7 @@ def process( self.aggregated_content += item stream_state.sequence_number += 1 res.append(chunk_event) - return True, self, res # mypy: ignore[return-value] + return True, self, res # mypy: ignore[return-value] return False, self, [] def has_finish_reason(self, message) -> bool: @@ -92,7 +92,7 @@ def should_end(self, message) -> bool: return True return False - def on_end( # mypy: ignore[override] + def on_end( # mypy: ignore[override] self, message, context: AgentRunContext, stream_state: StreamEventState ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: if not self.started: From aef1c478de6ebaccb7d5f4a6bc3321c3add3c1f4 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 10 Nov 2025 17:16:05 -0800 Subject: [PATCH 02/35] sync error format --- .../azure/ai/agentserver/core/server/base.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 9463c4002e08..c3f001245133 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -105,7 +105,7 @@ async def runs_endpoint(request): try: first_event = next(resp) except Exception as e: # noqa: BLE001 - err_msg = str(e) if DEBUG_ERRORS else "Internal error" + err_msg = _format_error(e) logger.error("Generator initialization failed: %s\n%s", e, traceback.format_exc()) return JSONResponse({"error": err_msg}, status_code=500) @@ -119,7 +119,7 @@ def gen(): for event in resp: yield _event_to_sse_chunk(event) except Exception as e: # noqa: BLE001 - err_msg = str(e) if DEBUG_ERRORS else "Internal error" + err_msg = _format_error(e) logger.error("Error in non-async generator: %s\n%s", e, traceback.format_exc()) payload = {"error": err_msg} yield f"event: error\ndata: {json.dumps(payload)}\n\n" @@ -143,7 +143,7 @@ def empty_gen(): return StreamingResponse(empty_gen(), media_type="text/event-stream") except Exception as e: # noqa: BLE001 - err_msg = str(e) if DEBUG_ERRORS else "Internal error" + err_msg = _format_error(e) logger.error("Async generator initialization failed: %s\n%s", e, traceback.format_exc()) return JSONResponse({"error": err_msg}, status_code=500) @@ -157,7 +157,7 @@ async def gen_async(): async for event in resp: yield _event_to_sse_chunk(event) except Exception as e: # noqa: BLE001 - err_msg = str(e) if DEBUG_ERRORS else "Internal error" + err_msg = _format_error(e) logger.error("Error in async generator: %s\n%s", e, traceback.format_exc()) payload = {"error": err_msg} yield f"event: error\ndata: {json.dumps(payload)}\n\n" @@ -311,5 +311,14 @@ def _event_to_sse_chunk(event: ResponseStreamEvent) -> str: return f"data: {event_data}\n\n" +def _format_error(exc: Exception) -> str: + message = str(exc) + if message: + return message + if DEBUG_ERRORS: + return repr(exc) + return "Internal error" + + def _to_response(result: Union[Response, dict]) -> Response: return result if isinstance(result, Response) else JSONResponse(result) From 43a9c44334ee0bc2cbc2a72571e2d1645707c243 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 10 Nov 2025 17:29:01 -0800 Subject: [PATCH 03/35] updated version and changelog --- .../azure-ai-agentserver-agentframework/CHANGELOG.md | 6 ++++++ .../azure/ai/agentserver/agentframework/_version.py | 2 +- sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md | 6 ++++++ .../azure/ai/agentserver/core/_version.py | 2 +- sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md | 6 ++++++ .../azure/ai/agentserver/langgraph/_version.py | 2 +- 6 files changed, 21 insertions(+), 3 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index cfcf2445e256..b1d0e7aa3c6c 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -1,5 +1,11 @@ # Release History + +## 1.0.0b2 (2025-11-10) + +Fixed some bugs + + ## 1.0.0b1 (2025-11-07) ### Features Added diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py index be71c81bd282..bbcd28b4aa67 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b1" +VERSION = "1.0.0b2" diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index cfcf2445e256..b1d0e7aa3c6c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -1,5 +1,11 @@ # Release History + +## 1.0.0b2 (2025-11-10) + +Fixed some bugs + + ## 1.0.0b1 (2025-11-07) ### Features Added diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py index be71c81bd282..bbcd28b4aa67 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b1" +VERSION = "1.0.0b2" diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index cfcf2445e256..b1d0e7aa3c6c 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -1,5 +1,11 @@ # Release History + +## 1.0.0b2 (2025-11-10) + +Fixed some bugs + + ## 1.0.0b1 (2025-11-07) ### Features Added diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py index be71c81bd282..bbcd28b4aa67 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b1" +VERSION = "1.0.0b2" From f33ec9f46bf1e531dfa75e02c8a27ca2658f7d8b Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 10 Nov 2025 18:16:37 -0800 Subject: [PATCH 04/35] refined changelog --- .../azure-ai-agentserver-agentframework/CHANGELOG.md | 8 +++++++- sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md | 8 +++++++- .../azure-ai-agentserver-langgraph/CHANGELOG.md | 8 +++++++- 3 files changed, 21 insertions(+), 3 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index b1d0e7aa3c6c..5b10716fd6ae 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -3,7 +3,13 @@ ## 1.0.0b2 (2025-11-10) -Fixed some bugs +### Bugs Fixed + +- Fixed Id generator format. + +- Improved stream mode error messsage. + +- Updated application insights related configuration environment variables. ## 1.0.0b1 (2025-11-07) diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index b1d0e7aa3c6c..5b10716fd6ae 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -3,7 +3,13 @@ ## 1.0.0b2 (2025-11-10) -Fixed some bugs +### Bugs Fixed + +- Fixed Id generator format. + +- Improved stream mode error messsage. + +- Updated application insights related configuration environment variables. ## 1.0.0b1 (2025-11-07) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index b1d0e7aa3c6c..5b10716fd6ae 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -3,7 +3,13 @@ ## 1.0.0b2 (2025-11-10) -Fixed some bugs +### Bugs Fixed + +- Fixed Id generator format. + +- Improved stream mode error messsage. + +- Updated application insights related configuration environment variables. ## 1.0.0b1 (2025-11-07) From 18d38c45af08b9494b2b14a05f03a2b2e31f57af Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 10 Nov 2025 19:10:17 -0800 Subject: [PATCH 05/35] fix build --- .../azure/ai/agentserver/agentframework/agent_framework.py | 2 +- .../azure-ai-agentserver-agentframework/pyproject.toml | 3 +++ sdk/agentserver/azure-ai-agentserver-core/pyproject.toml | 3 +++ sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml | 3 +++ 4 files changed, 10 insertions(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 34270cceadb9..1997b22cc800 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation +# pylint: disable=logging-fstring-interpolation,no-name-in-module from __future__ import annotations import asyncio # pylint: disable=do-not-import-asyncio diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml index 814d1d6d1a1e..052d36d10c7d 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml @@ -30,6 +30,9 @@ dependencies = [ requires = ["setuptools>=69", "wheel"] build-backend = "setuptools.build_meta" +[project.urls] +repository = "https://github.com/Azure/azure-sdk-for-python" + [tool.setuptools.packages.find] exclude = [ "tests*", diff --git a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml index f574360722bb..a55490a960be 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml @@ -35,6 +35,9 @@ dependencies = [ requires = ["setuptools>=69", "wheel"] build-backend = "setuptools.build_meta" +[project.urls] +repository = "https://github.com/Azure/azure-sdk-for-python" + [tool.setuptools.packages.find] exclude = [ "tests*", diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml index 5552ff8233d2..a5140068e12d 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml @@ -31,6 +31,9 @@ dependencies = [ requires = ["setuptools>=69", "wheel"] build-backend = "setuptools.build_meta" +[project.urls] +repository = "https://github.com/Azure/azure-sdk-for-python" + [tool.setuptools.packages.find] exclude = [ "tests*", From 963b06d9737f514d5144e8caf4fe5474d60ef91f Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Tue, 11 Nov 2025 12:06:56 -0800 Subject: [PATCH 06/35] update id generator --- .../core/server/common/id_generator/foundry_id_generator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py index b1c4e1ac55fd..1082242cbf51 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py @@ -133,4 +133,4 @@ def _extract_partition_id( if len(segment) < string_length + partition_key_length: raise ValueError(f"Id '{id_str}' does not contain a valid id.") - return segment[-partition_key_length:] + return segment[:partition_key_length] From b9703266a7174a572f7b89996e1c2086d631e479 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Tue, 11 Nov 2025 12:09:05 -0800 Subject: [PATCH 07/35] fix agentframework trace init --- .../ai/agentserver/agentframework/agent_framework.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 1997b22cc800..38eea41e2afb 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -80,12 +80,7 @@ def init_tracing(self): app_insights_conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME) project_endpoint = os.environ.get(AdapterConstants.AZURE_AI_PROJECT_ENDPOINT) - if project_endpoint: - project_client = AIProjectClient(endpoint=project_endpoint, credential=DefaultAzureCredential()) - agent_client = AzureAIAgentClient(project_client=project_client) - agent_client.setup_azure_ai_observability() - elif exporter or app_insights_conn_str: - os.environ["WORKFLOW_ENABLE_OTEL"] = "true" + if exporter or app_insights_conn_str: from agent_framework.observability import setup_observability setup_observability( @@ -93,6 +88,10 @@ def init_tracing(self): otlp_endpoint=exporter, applicationinsights_connection_string=app_insights_conn_str, ) + elif project_endpoint: + project_client = AIProjectClient(endpoint=project_endpoint, credential=DefaultAzureCredential()) + agent_client = AzureAIAgentClient(project_client=project_client) + agent_client.setup_azure_ai_observability() self.tracer = trace.get_tracer(__name__) async def agent_run( From 6fb2f0398ef97b4aa41f62ae586302569f18b40b Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Tue, 11 Nov 2025 12:11:38 -0800 Subject: [PATCH 08/35] update version and changelog --- .../azure-ai-agentserver-agentframework/CHANGELOG.md | 9 +++++++++ .../azure/ai/agentserver/agentframework/_version.py | 2 +- sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md | 9 +++++++++ .../azure/ai/agentserver/core/_version.py | 2 +- .../azure-ai-agentserver-langgraph/CHANGELOG.md | 9 +++++++++ .../azure/ai/agentserver/langgraph/_version.py | 2 +- 6 files changed, 30 insertions(+), 3 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index 5b10716fd6ae..c22ea4418361 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -1,6 +1,15 @@ # Release History +## 1.0.0b3 (2025-11-11) + +### Bugs Fixed + +- Fixed Id generator format. + +- Fixed trace initialization for agent-framework. + + ## 1.0.0b2 (2025-11-10) ### Bugs Fixed diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py index bbcd28b4aa67..c43fdbc2e239 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b2" +VERSION = "1.0.0b3" diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index 5b10716fd6ae..c22ea4418361 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -1,6 +1,15 @@ # Release History +## 1.0.0b3 (2025-11-11) + +### Bugs Fixed + +- Fixed Id generator format. + +- Fixed trace initialization for agent-framework. + + ## 1.0.0b2 (2025-11-10) ### Bugs Fixed diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py index bbcd28b4aa67..c43fdbc2e239 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b2" +VERSION = "1.0.0b3" diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index 5b10716fd6ae..c22ea4418361 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -1,6 +1,15 @@ # Release History +## 1.0.0b3 (2025-11-11) + +### Bugs Fixed + +- Fixed Id generator format. + +- Fixed trace initialization for agent-framework. + + ## 1.0.0b2 (2025-11-10) ### Bugs Fixed diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py index bbcd28b4aa67..dc203fe30c70 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b2" +VERSION = "1.0.0b3" \ No newline at end of file From 5a01655b9c561970ac5106ef14046b319c7b8609 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Tue, 11 Nov 2025 12:54:59 -0800 Subject: [PATCH 09/35] fix pylint --- .../azure/ai/agentserver/langgraph/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py index dc203fe30c70..c43fdbc2e239 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b3" \ No newline at end of file +VERSION = "1.0.0b3" From f30779b952b55dd1bb8ab724ef6ae8a1a361e465 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Wed, 12 Nov 2025 22:41:54 -0800 Subject: [PATCH 10/35] pin azure-ai-agents and azure-ai-projects version --- sdk/agentserver/azure-ai-agentserver-core/pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml index a55490a960be..1c6c37e19e23 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml @@ -20,8 +20,8 @@ keywords = ["azure", "azure sdk"] dependencies = [ "azure-monitor-opentelemetry>=1.5.0", - "azure-ai-projects", - "azure-ai-agents>=1.2.0b5", + "azure-ai-projects==1.1.0b4", + "azure-ai-agents==1.2.0b6", "azure-core>=1.35.0", "azure-identity", "openai>=1.80.0", From 2891cab9346c8b830466b7609afd796b3931c378 Mon Sep 17 00:00:00 2001 From: Ganesh Bheemarasetty <1634042+ganeshyb@users.noreply.github.com> Date: Wed, 12 Nov 2025 23:25:55 -0800 Subject: [PATCH 11/35] Feature Agent Server support tools (#43961) * Tool Client V1 Version * Langraph integration * Updates fixing langgraph adapter * fix build * fix cspel * fix cspell * Add ToolClient integration with Agent Framework and examples for dynamic agent creation * Made changes to return tools instead of toolclient * Address comments --------- Co-authored-by: Lu Sun --- .../ai/agentserver/agentframework/__init__.py | 14 +- .../agentframework/agent_framework.py | 223 +++-- .../agentserver/agentframework/tool_client.py | 164 ++++ .../samples/tool_client_example/README.md | 113 +++ .../agent_factory_example.py | 109 +++ .../tool_client_example/requirements.txt | 4 + .../agentserver/core/client/tools/__init__.py | 13 + .../agentserver/core/client/tools/_client.py | 224 +++++ .../core/client/tools/_configuration.py | 88 ++ .../core/client/tools/_exceptions.py | 49 ++ .../core/client/tools/_model_base.py | 168 ++++ .../core/client/tools/_utils/_model_base.py | 792 ++++++++++++++++++ .../core/client/tools/aio/__init__.py | 13 + .../core/client/tools/aio/_client.py | 226 +++++ .../core/client/tools/aio/_configuration.py | 88 ++ .../tools/aio/operations/_operations.py | 184 ++++ .../client/tools/operations/_operations.py | 543 ++++++++++++ .../core/models/_create_response.py | 1 + .../azure/ai/agentserver/core/server/base.py | 50 +- .../core/server/common/agent_run_context.py | 21 +- .../azure-ai-agentserver-core/cspell.json | 4 +- .../custom_mock_agent_with_tools_test.py | 108 +++ .../ai/agentserver/langgraph/__init__.py | 11 +- .../ai/agentserver/langgraph/langgraph.py | 191 ++++- .../ai/agentserver/langgraph/tool_client.py | 211 +++++ .../cspell.json | 3 +- .../graph_factory_example.py | 128 +++ .../use_tool_client_example.py | 109 +++ 28 files changed, 3763 insertions(+), 89 deletions(-) create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/README.md create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/agent_factory_example.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/requirements.txt create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_configuration.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_exceptions.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_model_base.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_configuration.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py create mode 100644 sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_with_tools_test.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/graph_factory_example.py create mode 100644 sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py index af980a34799f..aa03a264339c 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py @@ -3,14 +3,22 @@ # --------------------------------------------------------- __path__ = __import__("pkgutil").extend_path(__path__, __name__) +from typing import TYPE_CHECKING, Optional, Any + from ._version import VERSION +from .agent_framework import AgentFrameworkCBAgent + +if TYPE_CHECKING: # pragma: no cover + from azure.core.credentials_async import AsyncTokenCredential -def from_agent_framework(agent): +def from_agent_framework(agent, credentials: Optional["AsyncTokenCredential"] = None, **kwargs: Any) -> "AgentFrameworkCBAgent": from .agent_framework import AgentFrameworkCBAgent - return AgentFrameworkCBAgent(agent) + return AgentFrameworkCBAgent(agent, credentials=credentials, **kwargs) + +from .tool_client import ToolClient -__all__ = ["from_agent_framework"] +__all__ = ["from_agent_framework", "ToolClient"] __version__ = VERSION diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 38eea41e2afb..50cb09fd66f7 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -6,7 +6,8 @@ import asyncio # pylint: disable=do-not-import-asyncio import os -from typing import Any, AsyncGenerator, Union +from typing import TYPE_CHECKING, Any, AsyncGenerator, Awaitable, Optional, Protocol, Union, List +import inspect from agent_framework import AgentProtocol from agent_framework.azure import AzureAIAgentClient # pylint: disable=no-name-in-module @@ -27,12 +28,35 @@ from .models.agent_framework_output_non_streaming_converter import ( AgentFrameworkOutputNonStreamingConverter, ) +from agent_framework import AIFunction from .models.agent_framework_output_streaming_converter import AgentFrameworkOutputStreamingConverter from .models.constants import Constants +from .tool_client import ToolClient + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential logger = get_logger() +class AgentFactory(Protocol): + """Protocol for agent factory functions. + + An agent factory is a callable that takes a ToolClient and returns + an AgentProtocol, either synchronously or asynchronously. + """ + + def __call__(self, tools: List[AIFunction]) -> Union[AgentProtocol, Awaitable[AgentProtocol]]: + """Create an AgentProtocol using the provided ToolClient. + + :param tools: The list of AIFunction tools available to the agent. + :type tools: List[AIFunction] + :return: An Agent Framework agent, or an awaitable that resolves to one. + :rtype: Union[AgentProtocol, Awaitable[AgentProtocol]] + """ + ... + + class AgentFrameworkCBAgent(FoundryCBAgent): """ Adapter class for integrating Agent Framework agents with the FoundryCB agent interface. @@ -50,10 +74,33 @@ class AgentFrameworkCBAgent(FoundryCBAgent): - Supports both streaming and non-streaming responses based on the `stream` flag. """ - def __init__(self, agent: AgentProtocol): - super().__init__() - self.agent = agent - logger.info(f"Initialized AgentFrameworkCBAgent with agent: {type(agent).__name__}") + def __init__(self, agent: Union[AgentProtocol, AgentFactory], credentials: "Optional[AsyncTokenCredential]" = None, **kwargs: Any): + """Initialize the AgentFrameworkCBAgent with an AgentProtocol or a factory function. + + :param agent: The Agent Framework agent to adapt, or a callable that takes ToolClient and returns AgentProtocol (sync or async). + :type agent: Union[AgentProtocol, AgentFactory] + :param credentials: Azure credentials for authentication. + :type credentials: Optional[AsyncTokenCredential] + """ + super().__init__(credentials=credentials, **kwargs) + self._agent_or_factory: Union[AgentProtocol, AgentFactory] = agent + self._resolved_agent: "Optional[AgentProtocol]" = None + + # If agent is already instantiated, use it directly + if isinstance(agent, AgentProtocol): + self._resolved_agent = agent + logger.info(f"Initialized AgentFrameworkCBAgent with agent: {type(agent).__name__}") + else: + logger.info("Initialized AgentFrameworkCBAgent with agent factory") + + @property + def agent(self) -> "Optional[AgentProtocol]": + """Get the resolved agent. This property provides backward compatibility. + + :return: The resolved AgentProtocol if available, None otherwise. + :rtype: Optional[AgentProtocol] + """ + return self._resolved_agent def _resolve_stream_timeout(self, request_body: CreateResponse) -> float: """Resolve idle timeout for streaming updates. @@ -75,6 +122,49 @@ def _resolve_stream_timeout(self, request_body: CreateResponse) -> float: env_val = os.getenv(Constants.AGENTS_ADAPTER_STREAM_TIMEOUT_S) return float(env_val) if env_val is not None else float(Constants.DEFAULT_STREAM_TIMEOUT_S) + async def _resolve_agent(self, context: AgentRunContext): + """Resolve the agent if it's a factory function (for single-use/first-time resolution). + Creates a ToolClient and calls the factory function with it. + This is used for the initial resolution. + """ + if callable(self._agent_or_factory): + logger.debug("Resolving agent from factory function") + + # Create ToolClient with credentials + tool_client = self.get_tool_client(tools=context.get_tools(), user_info=context.get_user_info()) + tool_client_wrapper = ToolClient(tool_client) + tools = await tool_client_wrapper.list_tools() + + result = self._agent_or_factory(tools) + if inspect.iscoroutine(result): + self._resolved_agent = await result + else: + self._resolved_agent = result + + logger.debug("Agent resolved successfully") + else: + # Should not reach here, but just in case + self._resolved_agent = self._agent_or_factory + + async def _resolve_agent_for_request(self, context: AgentRunContext): + + logger.debug("Resolving fresh agent from factory function for request") + + # Create ToolClient with credentials + tool_client = self.get_tool_client(tools=context.get_tools(), user_info=context.get_user_info()) + tool_client_wrapper = ToolClient(tool_client) + tools = await tool_client_wrapper.list_tools() + + import inspect + result = self._agent_or_factory(tools) + if inspect.iscoroutine(result): + agent = await result + else: + agent = result + + logger.debug("Fresh agent resolved successfully for request") + return agent, tool_client_wrapper + def init_tracing(self): exporter = os.environ.get(AdapterConstants.OTEL_EXPORTER_ENDPOINT) app_insights_conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME) @@ -100,53 +190,82 @@ async def agent_run( OpenAIResponse, AsyncGenerator[ResponseStreamEvent, Any], ]: - logger.info(f"Starting agent_run with stream={context.stream}") - request_input = context.request.get("input") - - input_converter = AgentFrameworkInputConverter() - message = input_converter.transform_input(request_input) - logger.debug(f"Transformed input message type: {type(message)}") - - # Use split converters - if context.stream: - logger.info("Running agent in streaming mode") - streaming_converter = AgentFrameworkOutputStreamingConverter(context) - - async def stream_updates(): - update_count = 0 - timeout_s = self._resolve_stream_timeout(context.request) - logger.info("Starting streaming with idle-timeout=%.2fs", timeout_s) - for ev in streaming_converter.initial_events(): - yield ev - - # Iterate with per-update timeout; terminate if idle too long - aiter = self.agent.run_stream(message).__aiter__() - while True: + # Resolve agent - always resolve if it's a factory function to get fresh agent each time + # For factories, get a new agent instance per request to avoid concurrency issues + tool_client = None + try: + if callable(self._agent_or_factory): + agent, tool_client = await self._resolve_agent_for_request(context) + elif self._resolved_agent is None: + await self._resolve_agent(context) + agent = self._resolved_agent + else: + agent = self._resolved_agent + + logger.info(f"Starting agent_run with stream={context.stream}") + request_input = context.request.get("input") + + input_converter = AgentFrameworkInputConverter() + message = input_converter.transform_input(request_input) + logger.debug(f"Transformed input message type: {type(message)}") + + # Use split converters + if context.stream: + logger.info("Running agent in streaming mode") + streaming_converter = AgentFrameworkOutputStreamingConverter(context) + + async def stream_updates(): try: - update = await asyncio.wait_for(aiter.__anext__(), timeout=timeout_s) - except StopAsyncIteration: - logger.debug("Agent streaming iterator finished (StopAsyncIteration)") - break - except asyncio.TimeoutError: - logger.warning("Streaming idle timeout reached (%.1fs); terminating stream.", timeout_s) + update_count = 0 + timeout_s = self._resolve_stream_timeout(context.request) + logger.info("Starting streaming with idle-timeout=%.2fs", timeout_s) + for ev in streaming_converter.initial_events(): + yield ev + + # Iterate with per-update timeout; terminate if idle too long + aiter = agent.run_stream(message).__aiter__() + while True: + try: + update = await asyncio.wait_for(aiter.__anext__(), timeout=timeout_s) + except StopAsyncIteration: + logger.debug("Agent streaming iterator finished (StopAsyncIteration)") + break + except asyncio.TimeoutError: + logger.warning("Streaming idle timeout reached (%.1fs); terminating stream.", timeout_s) + for ev in streaming_converter.completion_events(): + yield ev + return + update_count += 1 + transformed = streaming_converter.transform_output_for_streaming(update) + for event in transformed: + yield event for ev in streaming_converter.completion_events(): yield ev - return - update_count += 1 - transformed = streaming_converter.transform_output_for_streaming(update) - for event in transformed: - yield event - for ev in streaming_converter.completion_events(): - yield ev - logger.info("Streaming completed with %d updates", update_count) - - return stream_updates() - - # Non-streaming path - logger.info("Running agent in non-streaming mode") - non_streaming_converter = AgentFrameworkOutputNonStreamingConverter(context) - result = await self.agent.run(message) - logger.debug(f"Agent run completed, result type: {type(result)}") - transformed_result = non_streaming_converter.transform_output_for_response(result) - logger.info("Agent run and transformation completed successfully") - return transformed_result + logger.info("Streaming completed with %d updates", update_count) + finally: + # Close tool_client if it was created for this request + if tool_client is not None: + try: + await tool_client.close() + logger.debug("Closed tool_client after streaming completed") + except Exception as e: + logger.warning(f"Error closing tool_client in stream: {e}") + + return stream_updates() + + # Non-streaming path + logger.info("Running agent in non-streaming mode") + non_streaming_converter = AgentFrameworkOutputNonStreamingConverter(context) + result = await agent.run(message) + logger.debug(f"Agent run completed, result type: {type(result)}") + transformed_result = non_streaming_converter.transform_output_for_response(result) + logger.info("Agent run and transformation completed successfully") + return transformed_result + finally: + # Close tool_client if it was created for this request (non-streaming only, streaming handles in generator) + if not context.stream and tool_client is not None: + try: + await tool_client.close() + logger.debug("Closed tool_client after request processing") + except Exception as e: + logger.warning(f"Error closing tool_client: {e}") diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py new file mode 100644 index 000000000000..e06df0df3026 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py @@ -0,0 +1,164 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Tool client for integrating AzureAIToolClient with Agent Framework.""" + +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional +from agent_framework import AIFunction +from pydantic import BaseModel, Field, create_model + +if TYPE_CHECKING: + from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient, FoundryTool + +class ToolClient: + """Client that integrates AzureAIToolClient with Agent Framework. + + This class provides methods to list tools from AzureAIToolClient and invoke them + in a format compatible with Agent Framework agents. + + :param tool_client: The AzureAIToolClient instance to use for tool operations. + :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient + + .. admonition:: Example: + + .. code-block:: python + + from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient + from azure.ai.agentserver.agentframework import ToolClient + from azure.identity.aio import DefaultAzureCredential + + async with DefaultAzureCredential() as credential: + tool_client = AzureAIToolClient( + endpoint="https://", + credential=credential + ) + + client = ToolClient(tool_client) + + # List tools as Agent Framework tool definitions + tools = await client.list_tools() + + # Invoke a tool directly + result = await client.invoke_tool( + tool_name="my_tool", + tool_input={"param": "value"} + ) + + :meta private: + """ + + def __init__(self, tool_client: "AzureAIToolClient") -> None: + """Initialize the ToolClient. + + :param tool_client: The AzureAIToolClient instance to use for tool operations. + :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient + """ + self._tool_client = tool_client + self._aifunction_cache: List[AIFunction] = None + + async def list_tools(self) -> List[AIFunction]: + """List all available tools as Agent Framework tool definitions. + + Retrieves tools from AzureAIToolClient and returns them in a format + compatible with Agent Framework. + + :return: List of tool definitions. + :rtype: List[AIFunction] + :raises ~azure.core.exceptions.HttpResponseError: + Raised for HTTP communication failures. + + .. admonition:: Example: + + .. code-block:: python + + client = ToolClient(tool_client) + tools = await client.list_tools() + """ + # Get tools from AzureAIToolClient + if self._aifunction_cache is not None: + return self._aifunction_cache + + azure_tools = await self._tool_client.list_tools() + self._aifunction_cache = [] + + # Convert to Agent Framework tool definitions + for azure_tool in azure_tools: + ai_function_tool = self._convert_to_agent_framework_tool(azure_tool) + self._aifunction_cache.append(ai_function_tool) + + return self._aifunction_cache + + def _convert_to_agent_framework_tool(self, azure_tool: "FoundryTool") -> AIFunction: + """Convert an AzureAITool to an Agent Framework AI Function + + :param azure_tool: The AzureAITool to convert. + :type azure_tool: ~azure.ai.agentserver.core.client.tools.aio.FoundryTool + :return: An AI Function Tool. + :rtype: AIFunction + """ + # Get the input schema from the tool descriptor + input_schema = azure_tool.input_schema or {} + + # Create a Pydantic model from the input schema + properties = input_schema.get("properties", {}) + required_fields = set(input_schema.get("required", [])) + + # Build field definitions for the Pydantic model + field_definitions: Dict[str, Any] = {} + for field_name, field_info in properties.items(): + field_type = self._json_schema_type_to_python(field_info.get("type", "string")) + field_description = field_info.get("description", "") + is_required = field_name in required_fields + + if is_required: + field_definitions[field_name] = (field_type, Field(description=field_description)) + else: + field_definitions[field_name] = (Optional[field_type], Field(default=None, description=field_description)) + + # Create the Pydantic model dynamically + input_model = create_model( + f"{azure_tool.name}_input", + **field_definitions + ) + + # Create a wrapper function that calls the Azure tool + async def tool_func(**kwargs: Any) -> Any: + """Dynamically generated function to invoke the Azure AI tool.""" + return await self.invoke_tool(azure_tool.name, kwargs) + + # Create and return the AIFunction + return AIFunction( + name=azure_tool.name, + description=azure_tool.description or "No description available", + func=tool_func, + input_model=input_model + ) + + def _json_schema_type_to_python(self, json_type: str) -> type: + """Convert JSON schema type to Python type. + + :param json_type: The JSON schema type string. + :type json_type: str + :return: The corresponding Python type. + :rtype: type + """ + type_map = { + "string": str, + "number": float, + "integer": int, + "boolean": bool, + "array": list, + "object": dict, + } + return type_map.get(json_type, str) + + async def close(self) -> None: + await self._tool_client.close() + + async def __aenter__(self) -> "ToolClient": + """Async context manager entry.""" + return self + + async def __aexit__(self, *exc_details: Any) -> None: + """Async context manager exit.""" + await self.close() diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/README.md b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/README.md new file mode 100644 index 000000000000..019e388975ff --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/README.md @@ -0,0 +1,113 @@ +# Tool Client Example + +This example demonstrates how to use the `ToolClient` with Agent Framework to dynamically access tools from Azure AI Tool Client. + +## Overview + +The `ToolClient` provides a bridge between Azure AI Tool Client and Agent Framework, allowing agents to access tools configured in your Azure AI project. This example shows how to use a factory function pattern to create agents dynamically with access to tools at runtime. + +## Features + +- **Dynamic Tool Access**: Agents can list and invoke tools from Azure AI Tool Client +- **Factory Pattern**: Create fresh agent instances per request to avoid concurrency issues +- **Tool Integration**: Seamlessly integrate Azure AI tools with Agent Framework agents + +## Prerequisites + +- Python 3.10 or later +- Azure AI project with configured tools +- Azure credentials (DefaultAzureCredential) + +## Setup + +1. Install dependencies: +```bash +pip install -r requirements.txt +``` + +2. Configure environment variables in `.env`: +``` +AZURE_AI_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ +``` + +3. Ensure your Azure AI project has tools configured (e.g., MCP connections) + +## Running the Example + +```bash +python agent_factory_example.py +``` + +## How It Works + +1. **Factory Function**: The example creates a factory function that: + - Receives a `ToolClient` instance + - Lists available tools from Azure AI Tool Client + - Creates an Agent Framework agent with those tools + - Returns the agent instance + +2. **Dynamic Agent Creation**: The factory is called for each request, ensuring: + - Fresh agent instances per request + - Latest tool configurations + - No concurrency issues + +3. **Tool Access**: The agent can use tools like: + - MCP (Model Context Protocol) connections + - Function tools + - Other Azure AI configured tools + +## Key Code Patterns + +### Creating a Factory Function + +```python +async def agent_factory(tool_client: ToolClient): + # List tools from Azure AI + tools = await tool_client.list_tools() + + # Create agent with tools + agent = Agent( + name="MyAgent", + model="gpt-4o", + instructions="You are a helpful assistant.", + tools=tools + ) + return agent +``` + +### Using the Factory + +```python +from azure.ai.agentserver.agentframework import from_agent_framework + +adapter = from_agent_framework( + agent_factory, + credentials=credential, + tools=[{"type": "mcp", "project_connection_id": "my-mcp"}] +) +``` + +## Alternative: Direct Agent Usage + +You can also use a pre-created agent instead of a factory: + +```python +agent = Agent( + name="MyAgent", + model="gpt-4o", + instructions="You are a helpful assistant." +) + +adapter = from_agent_framework(agent, credentials=credential) +``` + +## Troubleshooting + +- **No tools found**: Ensure your Azure AI project has tools configured +- **Authentication errors**: Check your Azure credentials and project endpoint +- **Import errors**: Verify all dependencies are installed + +## Learn More + +- [Azure AI Agent Service Documentation](https://learn.microsoft.com/azure/ai-services/agents/) +- [Agent Framework Documentation](https://github.com/microsoft/agent-framework) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/agent_factory_example.py b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/agent_factory_example.py new file mode 100644 index 000000000000..bc4d6bf8806d --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/agent_factory_example.py @@ -0,0 +1,109 @@ +# Copyright (c) Microsoft. All rights reserved. +"""Example showing how to use an agent factory function with ToolClient. + +This sample demonstrates how to pass a factory function to from_agent_framework +that receives a ToolClient and returns an AgentProtocol. This pattern allows +the agent to be created dynamically with access to tools from Azure AI Tool +Client at runtime. +""" + +import asyncio +import os +from typing import List +from dotenv import load_dotenv +from agent_framework import AIFunction +from agent_framework.azure import AzureOpenAIChatClient + +from azure.ai.agentserver.agentframework import from_agent_framework +from azure.identity.aio import DefaultAzureCredential + +load_dotenv() + + +def create_agent_factory(): + """Create a factory function that builds an agent with ToolClient. + + This function returns a factory that takes a ToolClient and returns + an AgentProtocol. The agent is created at runtime for every request, + allowing it to access the latest tool configuration dynamically. + """ + + async def agent_factory(tools: List[AIFunction]) -> AzureOpenAIChatClient: + """Factory function that creates an agent using the provided tools. + + :param tools: The list of AIFunction tools available to the agent. + :type tools: List[AIFunction] + :return: An Agent Framework ChatAgent instance. + :rtype: ChatAgent + """ + # List all available tools from the ToolClient + print("Fetching tools from Azure AI Tool Client via factory...") + print(f"Found {len(tools)} tools:") + for tool in tools: + print(f" - tool: {tool.name}, description: {tool.description}") + + if not tools: + print("\nNo tools found!") + print("Make sure your Azure AI project has tools configured.") + raise ValueError("No tools available to create agent") + + # Create the Agent Framework agent with the tools + print("\nCreating Agent Framework agent with tools from factory...") + agent = AzureOpenAIChatClient(credential=DefaultAzureCredential()).create_agent( + name="ToolClientAgent", + instructions="You are a helpful assistant with access to various tools.", + tools=tools, + ) + + print("Agent created successfully!") + return agent + + return agent_factory + + +async def quickstart(): + """Build and return an AgentFrameworkCBAgent using an agent factory function.""" + + # Get configuration from environment + project_endpoint = os.getenv("AZURE_AI_PROJECT_ENDPOINT") + + if not project_endpoint: + raise ValueError( + "AZURE_AI_PROJECT_ENDPOINT environment variable is required. " + "Set it to your Azure AI project endpoint, e.g., " + "https://.services.ai.azure.com/api/projects/" + ) + + # Create Azure credentials + credential = DefaultAzureCredential() + + # Create a factory function that will build the agent at runtime + # The factory will receive a ToolClient when the agent first runs + agent_factory = create_agent_factory() + + tool_connection_id = os.getenv("AZURE_AI_PROJECT_TOOL_CONNECTION_ID") + # Pass the factory function to from_agent_framework instead of a compiled agent + # The agent will be created on every agent run with access to ToolClient + print("Creating Agent Framework adapter with factory function...") + adapter = from_agent_framework( + agent_factory, + credentials=credential, + tools=[{"type": "mcp", "project_connection_id": tool_connection_id}] + ) + + print("Adapter created! Agent will be built on every request.") + return adapter + + +async def main(): # pragma: no cover - sample entrypoint + """Main function to run the agent.""" + adapter = await quickstart() + + if adapter: + print("\nStarting agent server...") + print("The agent factory will be called for every request that arrives.") + await adapter.run_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/requirements.txt b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/requirements.txt new file mode 100644 index 000000000000..79caf276114f --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/samples/tool_client_example/requirements.txt @@ -0,0 +1,4 @@ +azure-ai-agentserver-agentframework +azure-identity +python-dotenv +agent-framework diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/__init__.py new file mode 100644 index 000000000000..8cf7c6b67389 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/__init__.py @@ -0,0 +1,13 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +from ._client import AzureAIToolClient, FoundryTool +from ._exceptions import OAuthConsentRequiredError, MCPToolApprovalRequiredError + +__all__ = [ + "AzureAIToolClient", + "FoundryTool", + "OAuthConsentRequiredError", + "MCPToolApprovalRequiredError", +] \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py new file mode 100644 index 000000000000..a7afd935df64 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py @@ -0,0 +1,224 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +from copy import deepcopy +from typing import Any, List, TYPE_CHECKING, Mapping, Union +from azure.core import PipelineClient +from azure.core.pipeline import policies +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.credentials import TokenCredential + +from ._configuration import AzureAIToolClientConfiguration +from .operations._operations import MCPToolsOperations, RemoteToolsOperations +from ._utils._model_base import InvocationPayloadBuilder +from ._model_base import FoundryTool, ToolSource + +class AzureAITool: + """Azure AI tool wrapper for invocation. + + Represents a single tool that can be invoked either via MCP protocol or + Azure AI Tools API. This class provides a convenient interface for tool + invocation and exposes tool metadata. + + :ivar str name: The name of the tool. + :ivar str description: Human-readable description of what the tool does. + :ivar dict metadata: Additional metadata about the tool from the API. + :ivar ~Tool_Client.models.ToolSource source: + The source of the tool (MCP_TOOLS or REMOTE_TOOLS). + + .. admonition:: Example: + + .. literalinclude:: ../samples/simple_example.py + :start-after: [START use_tool] + :end-before: [END use_tool] + :language: python + :dedent: 4 + :caption: Using an AzureAITool instance. + """ + + def __init__(self, client: "AzureAIToolClient", descriptor: FoundryTool) -> None: + """Initialize an Azure AI Tool. + + :param client: Parent client instance for making API calls. + :type client: AzureAIToolClient + :param descriptor: Tool descriptor containing metadata and configuration. + :type descriptor: ~Tool_Client.models.FoundryTool + """ + self._client = client + self._descriptor = descriptor + self.name = descriptor.name + self.description = descriptor.description + self.metadata = dict(descriptor.metadata) + self.source = descriptor.source + + def invoke(self, *args: Any, **kwargs: Any) -> Any: + """Invoke the tool synchronously. + + :param args: Positional arguments to pass to the tool. + :param kwargs: Keyword arguments to pass to the tool. + :return: The result from the tool invocation. + :rtype: Any + """ + payload = InvocationPayloadBuilder.build_payload(args, kwargs, {}) + return self._client._invoke_tool(self._descriptor, payload) + + def __call__(self, *args: Any, **kwargs: Any) -> Any: + return self.invoke(*args, **kwargs) + +class AzureAIToolClient: + """Synchronous client for aggregating tools from Azure AI MCP and Tools APIs. + + This client provides access to tools from both MCP (Model Context Protocol) servers + and Azure AI Tools API endpoints, enabling unified tool discovery and invocation. + + :param str endpoint: + The fully qualified endpoint for the Azure AI Agents service. + Example: "https://.api.azureml.ms" + :param credential: + Credential for authenticating requests to the service. + Use credentials from azure-identity like DefaultAzureCredential. + :type credential: ~azure.core.credentials.TokenCredential + :keyword str agent_name: + Name of the agent to use for tool operations. Default is "$default". + :keyword List[Mapping[str, Any]] tools: + List of tool configurations defining which tools to include. + :keyword Mapping[str, Any] user: + User information for tool invocations (object_id, tenant_id). + :keyword str api_version: + API version to use when communicating with the service. + Default is the latest supported version. + :keyword transport: + Custom transport implementation. Default is RequestsTransport. + :paramtype transport: ~azure.core.pipeline.transport.HttpTransport + + """ + + def __init__( + self, + endpoint: str, + credential: "TokenCredential", + **kwargs: Any, + ) -> None: + """Initialize the synchronous Azure AI Tool Client. + + :param str endpoint: The service endpoint URL. + :param credential: Credentials for authenticating requests. + :type credential: ~azure.core.credentials.TokenCredential + :keyword kwargs: Additional keyword arguments for client configuration. + """ + self._config = AzureAIToolClientConfiguration( + endpoint, + credential, + **kwargs, + ) + + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: PipelineClient = PipelineClient(base_url=endpoint, policies=_policies, **kwargs) + + # Initialize specialized clients with client and config + self._mcp_tools = MCPToolsOperations(client=self._client, config=self._config) + self._remote_tools = RemoteToolsOperations(client=self._client, config=self._config) + + def list_tools(self) -> List[FoundryTool]: + """List all available tools from configured sources. + + Retrieves tools from both MCP servers and Azure AI Tools API endpoints, + returning them as FoundryTool instances ready for invocation. + :return: List of available tools from all configured sources. + :rtype: List[~AzureAITool] + :raises ~exceptions.OAuthConsentRequiredError: + Raised when the service requires user OAuth consent. + :raises ~exceptions.MCPToolApprovalRequiredError: + Raised when tool access requires human approval. + :raises ~azure.core.exceptions.HttpResponseError: + Raised for HTTP communication failures. + + """ + + existing_names: set[str] = set() + + tools: List[FoundryTool] = [] + + # Fetch MCP tools + mcp_tools = self._mcp_tools.list_tools(existing_names) + tools.extend(mcp_tools) + + # Fetch Tools API tools + tools_api_tools = self._remote_tools.resolve_tools(existing_names) + tools.extend(tools_api_tools) + + for tool in tools: + # Capture tool in a closure to avoid shadowing issues + def make_invoker(captured_tool): + return lambda *args, **kwargs: self.invoke_tool(captured_tool, *args, **kwargs) + tool.invoker = make_invoker(tool) + return tools + + def invoke_tool( + self, + tool: Union[str, FoundryTool], + *args: Any, + **kwargs: Any, + ) -> Any: + """Invoke a tool by instance, name, or descriptor. + + :param tool: Tool to invoke, specified as an AzureAITool instance, + tool name string, or FoundryTool. + :type tool: Union[str, ~FoundryTool] + :param args: Positional arguments to pass to the tool + """ + descriptor = self._resolve_tool_descriptor(tool) + payload = InvocationPayloadBuilder.build_payload(args, kwargs, {}) + return self._invoke_tool(descriptor, payload, **kwargs) + + def _resolve_tool_descriptor( + self, tool: Union[AzureAITool, str, FoundryTool] + ) -> FoundryTool: + """Resolve a tool reference to a descriptor.""" + if isinstance(tool, AzureAITool): + return tool._descriptor + if isinstance(tool, FoundryTool): + return tool + if isinstance(tool, str): + # Fetch all tools and find matching descriptor + descriptors = self.list_tools() + for descriptor in descriptors: + if descriptor.name == tool or descriptor.key == tool: + return descriptor + raise KeyError(f"Unknown tool: {tool}") + raise TypeError("Tool must be an AzureAITool, FoundryTool, or registered name/key") + + def _invoke_tool(self, descriptor: FoundryTool, arguments: Mapping[str, Any], **kwargs: Any) -> Any: + """Invoke a tool descriptor.""" + if descriptor.source is ToolSource.MCP_TOOLS: + return self._mcp_tools.invoke_tool(descriptor, arguments) + if descriptor.source is ToolSource.REMOTE_TOOLS: + return self._remote_tools.invoke_tool(descriptor, arguments) + raise ValueError(f"Unsupported tool source: {descriptor.source}") + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> "AzureAIToolClient": + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_configuration.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_configuration.py new file mode 100644 index 000000000000..45e2ac178654 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_configuration.py @@ -0,0 +1,88 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +from typing import Any, Mapping, List, Optional, TYPE_CHECKING + +from azure.core.pipeline import policies + +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + +from ._utils._model_base import ToolConfigurationParser, UserInfo, ToolDefinition + +class AzureAIToolClientConfiguration: + """Configuration for Azure AI Tool Client. + + Manages authentication, endpoint configuration, and policy settings for the + Azure AI Tool Client. This class is used internally by the client and should + not typically be instantiated directly. + + :param str endpoint: + Fully qualified endpoint for the Azure AI Agents service. + :param credential: + Azure TokenCredential for authentication. + :type credential: ~azure.core.credentials.TokenCredential + :keyword str api_version: + API version to use. Default is the latest supported version. + :keyword List[str] credential_scopes: + OAuth2 scopes for token requests. Default is ["https://ai.azure.com/.default"]. + :keyword str agent_name: + Name of the agent. Default is "$default". + :keyword List[Mapping[str, Any]] tools: + List of tool configurations. + :keyword Mapping[str, Any] user: + User information for tool invocations. + """ + + def __init__( + self, + endpoint: str, + credential: "TokenCredential", + **kwargs: Any, + ) -> None: + """Initialize the configuration. + + :param str endpoint: The service endpoint URL. + :param credential: Credentials for authenticating requests. + :type credential: ~azure.core.credentials.TokenCredential + :keyword kwargs: Additional configuration options. + """ + api_version: str = kwargs.pop("api_version", "2025-05-15-preview") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://ai.azure.com/.default"]) + + + # Tool configuration + self.agent_name: str = kwargs.pop("agent_name", "$default") + self.tools: Optional[List[ToolDefinition]] = kwargs.pop("tools", None) + self.user: Optional[UserInfo] = kwargs.pop("user", None) + + # Initialize tool configuration parser + + self.tool_config = ToolConfigurationParser(self.tools) + + self._configure(**kwargs) + + # Warn about unused kwargs + if kwargs: + import warnings + warnings.warn(f"Unused configuration parameters: {list(kwargs.keys())}", UserWarning) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.BearerTokenCredentialPolicy( + self.credential, *self.credential_scopes, **kwargs + ) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_exceptions.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_exceptions.py new file mode 100644 index 000000000000..aa00b6b5f4b5 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_exceptions.py @@ -0,0 +1,49 @@ + +from typing import Any, Mapping, Optional + + +class OAuthConsentRequiredError(RuntimeError): + """Raised when the service requires end-user OAuth consent. + + This exception is raised when a tool or service operation requires explicit + OAuth consent from the end user before the operation can proceed. + + :ivar str message: Human-readable guidance returned by the service. + :ivar str consent_url: Link that the end user must visit to provide consent. + :ivar dict payload: Full response payload from the service. + + :param str message: Human-readable guidance returned by the service. + :param str consent_url: Link that the end user must visit to provide the required consent. + :param dict payload: Full response payload supplied by the service. + """ + + def __init__(self, message: str, consent_url: Optional[str], payload: Mapping[str, Any]): + super().__init__(message) + self.message = message + self.consent_url = consent_url + self.payload = dict(payload) + + +class MCPToolApprovalRequiredError(RuntimeError): + """Raised when an MCP tool invocation needs human approval. + + This exception is raised when an MCP (Model Context Protocol) tool requires + explicit human approval before the invocation can proceed, typically for + security or compliance reasons. + + :ivar str message: Human-readable guidance returned by the service. + :ivar dict approval_arguments: + Arguments that must be approved or amended before continuing. + :ivar dict payload: Full response payload from the service. + + :param str message: Human-readable guidance returned by the service. + :param dict approval_arguments: + Arguments that must be approved or amended before continuing. + :param dict payload: Full response payload supplied by the service. + """ + + def __init__(self, message: str, approval_arguments: Mapping[str, Any], payload: Mapping[str, Any]): + super().__init__(message) + self.message = message + self.approval_arguments = dict(approval_arguments) + self.payload = dict(payload) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_model_base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_model_base.py new file mode 100644 index 000000000000..3c7bed8b5db1 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_model_base.py @@ -0,0 +1,168 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +from enum import Enum +import json + +from typing import Any, Awaitable, Callable, List, Mapping, Optional +from dataclasses import dataclass +import asyncio +import inspect + +class ToolSource(str, Enum): + """Identifies the origin of a tool. + + Specifies whether a tool comes from an MCP (Model Context Protocol) server + or from the Azure AI Tools API (remote tools). + """ + + MCP_TOOLS = "mcp_tools" + REMOTE_TOOLS = "remote_tools" + +class ToolDefinition: + """Definition of a tool including its parameters. + + :ivar str type: JSON schema type (e.g., "mcp", "a2", other tools). + """ + + def __init__(self, type: str, **kwargs: Any) -> None: + """Initialize ToolDefinition with type and any additional properties. + + :param str type: JSON schema type (e.g., "mcp", "a2", other tools). + :param kwargs: Any additional properties to set on the tool definition. + """ + self.type = type + # Store all additional properties as attributes + for key, value in kwargs.items(): + setattr(self, key, value) + + def __repr__(self) -> str: + """Return a detailed string representation of the ToolDefinition.""" + return json.dumps(self.__dict__, default=str) + + def __str__(self) -> str: + """Return a human-readable string representation.""" + return json.dumps(self.__dict__, default=str) + + +@dataclass +class FoundryTool: + """Lightweight description of a tool that can be invoked. + + Represents metadata and configuration for a single tool, including its + name, description, input schema, and source information. + + :ivar str key: Unique identifier for this tool. + :ivar str name: Display name of the tool. + :ivar str description: Human-readable description of what the tool does. + :ivar ~ToolSource source: + Origin of the tool (MCP_TOOLS or REMOTE_TOOLS). + :ivar dict metadata: Raw metadata from the API response. + :ivar dict input_schema: + JSON schema describing the tool's input parameters, or None. + :ivar ToolDefinition tool_definition: + Optional tool definition object, or None. + """ + + key: str + name: str + description: str + source: ToolSource + metadata: Mapping[str, Any] + input_schema: Optional[Mapping[str, Any]] = None + tool_definition: Optional[ToolDefinition] = None + invoker: Optional[Callable[..., Awaitable[Any]]] = None + + def invoke(self, *args: Any, **kwargs: Any) -> Any: + """Invoke the tool synchronously. + + :param args: Positional arguments to pass to the tool. + :param kwargs: Keyword arguments to pass to the tool. + :return: The result from the tool invocation. + :rtype: Any + """ + + + if not self.invoker: + raise NotImplementedError("No invoker function defined for this tool.") + if inspect.iscoroutinefunction(self.invoker): + # If the invoker is async, check if we're already in an event loop + try: + loop = asyncio.get_running_loop() + # We're in a running loop, can't use asyncio.run() + raise RuntimeError( + "Cannot call invoke() on an async tool from within an async context. " + "Use 'await tool.ainvoke(...)' or 'await tool(...)' instead." + ) + except RuntimeError as e: + if "no running event loop" in str(e).lower(): + # No running loop, safe to use asyncio.run() + return asyncio.run(self.invoker(*args, **kwargs)) + else: + # Re-raise our custom error + raise + else: + return self.invoker(*args, **kwargs) + + async def ainvoke(self, *args: Any, **kwargs: Any) -> Any: + """Invoke the tool asynchronously. + + :param args: Positional arguments to pass to the tool. + :param kwargs: Keyword arguments to pass to the tool. + :return: The result from the tool invocation. + :rtype: Any + """ + + if not self.invoker: + raise NotImplementedError("No invoker function defined for this tool.") + if inspect.iscoroutinefunction(self.invoker): + return await self.invoker(*args, **kwargs) + else: + result = self.invoker(*args, **kwargs) + # If the result is awaitable (e.g., a coroutine), await it + if inspect.iscoroutine(result) or hasattr(result, '__await__'): + return await result + return result + + def __call__(self, *args: Any, **kwargs: Any) -> Any: + + # Check if the invoker is async + if self.invoker and inspect.iscoroutinefunction(self.invoker): + # Return coroutine for async context + return self.ainvoke(*args, **kwargs) + else: + # Use sync invoke + return self.invoke(*args, **kwargs) + + +class UserInfo: + """Represents user information. + + :ivar str objectId: User's object identifier. + :ivar str tenantId: Tenant identifier. + """ + + def __init__(self, objectId: str, tenantId: str, **kwargs: Any) -> None: + """Initialize UserInfo with user details. + + :param str objectId: User's object identifier. + :param str tenantId: Tenant identifier. + :param kwargs: Any additional properties to set on the user. + """ + self.objectId = objectId + self.tenantId = tenantId + # Store all additional properties as attributes + for key, value in kwargs.items(): + setattr(self, key, value) + + def to_dict(self) -> dict: + """Convert to dictionary for JSON serialization.""" + return { + "objectId": self.objectId, + "tenantId": self.tenantId + } + + + + diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py new file mode 100644 index 000000000000..d68c2ae28744 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py @@ -0,0 +1,792 @@ + +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +from dataclasses import dataclass, asdict, is_dataclass +from typing import Any, Dict, Iterable, List, Mapping, MutableMapping, Optional, Set, Tuple + +from .._model_base import ToolDefinition, FoundryTool, ToolSource, UserInfo + + + +class ToolDescriptorBuilder: + """Builds FoundryTool objects from raw tool data.""" + + @staticmethod + def build_descriptors( + raw_tools: Iterable[Mapping[str, Any]], + source: ToolSource, + existing_names: Set[str], + ) -> List[FoundryTool]: + """Build tool descriptors from raw tool data. + + Parameters + ---------- + raw_tools : Iterable[Mapping[str, Any]] + Raw tool data from API (can be dicts or dataclass objects) + source : ToolSource + Source of the tools + existing_names : Set[str] + Set of existing tool names to avoid conflicts + + Returns + ------- + List[FoundryTool] + List of built tool descriptors + """ + descriptors: List[FoundryTool] = [] + for raw in raw_tools: + # Convert dataclass objects to dictionaries + if is_dataclass(raw) and not isinstance(raw, type): + raw = asdict(raw) + + name, description = ToolMetadataExtractor.extract_name_description(raw) + if not name: + continue + + key = ToolMetadataExtractor.derive_tool_key(raw, source) + description = description or "" + resolved_name = NameResolver.ensure_unique_name(name, existing_names) + + descriptor = FoundryTool( + key=key, + name=resolved_name, + description=description, + source=source, + metadata=dict(raw), + input_schema=ToolMetadataExtractor.extract_input_schema(raw), + tool_definition= raw.get("tool_definition") + ) + descriptors.append(descriptor) + existing_names.add(resolved_name) + + return descriptors + + +class ToolMetadataExtractor: + """Extracts metadata from raw tool data.""" + + @staticmethod + def extract_name_description(raw: Mapping[str, Any]) -> Tuple[Optional[str], Optional[str]]: + """Extract name and description from raw tool data. + + Parameters + ---------- + raw : Mapping[str, Any] + Raw tool data + + Returns + ------- + Tuple[Optional[str], Optional[str]] + Tuple of (name, description) + """ + name = ( + raw.get("name") + or raw.get("id") + or raw.get("tool_name") + or raw.get("definition", {}).get("name") + or raw.get("tool", {}).get("name") + ) + description = ( + raw.get("description") + or raw.get("long_description") + or raw.get("definition", {}).get("description") + or raw.get("tool", {}).get("description") + ) + return name, description + + @staticmethod + def derive_tool_key(raw: Mapping[str, Any], source: ToolSource) -> str: + """Derive unique key for a tool. + + Parameters + ---------- + raw : Mapping[str, Any] + Raw tool data + source : ToolSource + Source of the tool + + Returns + ------- + str + Unique tool key + """ + for candidate in (raw.get("id"), raw.get("name"), raw.get("tool_name")): + if candidate: + return f"{source.value}:{candidate}" + return f"{source.value}:{id(raw)}" + + @staticmethod + def extract_input_schema(raw: Mapping[str, Any]) -> Optional[Mapping[str, Any]]: + """Extract input schema from raw tool data. + + Parameters + ---------- + raw : Mapping[str, Any] + Raw tool data + + Returns + ------- + Optional[Mapping[str, Any]] + Input schema if found + """ + for key in ("input_schema", "inputSchema", "schema", "parameters"): + if key in raw and isinstance(raw[key], Mapping): + return raw[key] + nested = raw.get("definition") or raw.get("tool") + if isinstance(nested, Mapping): + return ToolMetadataExtractor.extract_input_schema(nested) + return None + + @staticmethod + def extract_metadata_schema(raw: Mapping[str, Any]) -> Optional[Mapping[str, Any]]: + """Extract input schema from raw tool data. + + Parameters + ---------- + raw : Mapping[str, Any] + Raw tool data + + Returns + ------- + Optional[Mapping[str, Any]] + _metadata if found + """ + for key in ("_meta", "metadata", "meta"): + if key in raw and isinstance(raw[key], Mapping): + return raw[key] + return None + + +class NameResolver: + """Resolves tool names to ensure uniqueness.""" + + @staticmethod + def ensure_unique_name(proposed_name: str, existing_names: Set[str]) -> str: + """Ensure a tool name is unique. + + Parameters + ---------- + proposed_name : str + Proposed tool name + existing_names : Set[str] + Set of existing tool names + + Returns + ------- + str + Unique tool name + """ + if proposed_name not in existing_names: + return proposed_name + + suffix = 1 + while True: + candidate = f"{proposed_name}_{suffix}" + if candidate not in existing_names: + return candidate + suffix += 1 + + +class MetadataMapper: + """Maps tool metadata from _meta schema to tool configuration.""" + + # Default key mapping: meta_schema_key -> output_key + # Note: When used with key_overrides, the direction is reversed internally + # to support tool_def_key -> meta_schema_key mapping + DEFAULT_KEY_MAPPING = { + "imagegen_model_deployment_name": "model_deployment_name", + "model_deployment_name": "model", + "deployment_name": "model", + } + + @staticmethod + def extract_metadata_config( + tool_metadata: Mapping[str, Any], + tool_definition: Optional[Mapping[str, Any]] = None, + key_overrides: Optional[Mapping[str, str]] = None, + ) -> Dict[str, Any]: + """Extract metadata configuration from _meta schema and tool definition. + + This method extracts properties defined in the _meta schema and attempts + to find matching values in the tool definition. Key overrides allow mapping + from tool definition property names to _meta schema property names. + + Parameters + ---------- + tool_metadata : Mapping[str, Any] + The _meta schema containing property definitions + tool_definition : Optional[Mapping[str, Any]] + The tool definition containing actual values + key_overrides : Optional[Mapping[str, str]] + Mapping from tool definition keys to _meta schema keys. + Format: {"tool_def_key": "meta_schema_key"} + Example: {"model": "imagegen_model_deployment_name"} + + Returns + ------- + Dict[str, Any] + Dictionary with mapped metadata configuration + + Examples + -------- + >>> meta_schema = { + ... "properties": { + ... "quality": {"type": "string", "default": "auto"}, + ... "model_deployment_name": {"type": "string"} + ... } + ... } + >>> tool_def = {"quality": "high", "model": "gpt-4"} + >>> overrides = {"model": "model_deployment_name"} # tool_def -> meta + >>> MetadataMapper.extract_metadata_config(meta_schema, tool_def, overrides) + {'quality': 'high', 'model_deployment_name': 'gpt-4'} + """ + result: Dict[str, Any] = {} + + # Build reverse mapping: tool_definition_key -> meta_property_name + # Start with default mappings (also reversed) + reverse_default_mapping = {v: k for k, v in MetadataMapper.DEFAULT_KEY_MAPPING.items()} + + # Add user overrides (these are already tool_def -> meta format) + tool_to_meta_mapping = dict(reverse_default_mapping) + if key_overrides: + tool_to_meta_mapping.update(key_overrides) + + # Extract properties from _meta schema + properties = tool_metadata.get("properties", {}) + if not isinstance(properties, Mapping): + return result + + for meta_prop_name, prop_schema in properties.items(): + if not isinstance(prop_schema, Mapping): + continue + + is_required = meta_prop_name in tool_metadata.get("required", []) + + # Try to find value in tool definition + value = None + value_from_definition = False + + if tool_definition: + # First check if tool definition has this exact key + if meta_prop_name in tool_definition: + value = tool_definition[meta_prop_name] + value_from_definition = True + else: + # Check if any tool definition key maps to this meta property + for tool_key, mapped_meta_key in tool_to_meta_mapping.items(): + if mapped_meta_key == meta_prop_name and tool_key in tool_definition: + value = tool_definition[tool_key] + value_from_definition = True + break + + # If no value from definition, check for default (only use if required) + if value is None and is_required and "default" in prop_schema: + value = prop_schema["default"] + + # Only add if: + # 1. Value is from tool definition, OR + # 2. Value is required and has a default + if value is not None and (value_from_definition or is_required): + result[meta_prop_name] = value + + return result + + @staticmethod + def prepare_metadata_dict( + tool_metadata_raw: Mapping[str, Any], + tool_definition: Optional[Mapping[str, Any]] = None, + key_overrides: Optional[Mapping[str, str]] = None, + ) -> Dict[str, Any]: + """Prepare a _meta dictionary from tool metadata and definition. + + This is a convenience method that extracts the _meta schema from + raw tool metadata and maps it to configuration values. + + Parameters + ---------- + tool_metadata_raw : Mapping[str, Any] + Raw tool metadata containing _meta or similar fields + tool_definition : Optional[Mapping[str, Any]] + The tool definition containing actual values + key_overrides : Optional[Mapping[str, str]] + Mapping from tool definition keys to _meta schema keys. + Format: {"tool_def_key": "meta_schema_key"} + + Returns + ------- + Dict[str, Any] + Dictionary with mapped metadata configuration + """ + # Extract _meta schema using existing utility + meta_schema = ToolMetadataExtractor.extract_metadata_schema(tool_metadata_raw) + if not meta_schema: + return {} + + return MetadataMapper.extract_metadata_config( + meta_schema, + tool_definition, + key_overrides + ) + + +class InvocationPayloadBuilder: + """Builds invocation payloads for tool calls.""" + + @staticmethod + def build_payload( + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + configuration: Dict[str, Any], + ) -> Dict[str, Any]: + """Build invocation payload from args and kwargs. + + Parameters + ---------- + args : Tuple[Any, ...] + Positional arguments + kwargs : Dict[str, Any] + Keyword arguments + configuration : Dict[str, Any] + Tool configuration defaults + + Returns + ------- + Dict[str, Any] + Complete invocation payload + """ + user_arguments = InvocationPayloadBuilder._normalize_input(args, kwargs) + merged = dict(configuration) + merged.update(user_arguments) + return merged + + @staticmethod + def _normalize_input( + args: Tuple[Any, ...], + kwargs: Dict[str, Any] + ) -> Dict[str, Any]: + """Normalize invocation input to a dictionary. + + Parameters + ---------- + args : Tuple[Any, ...] + Positional arguments + kwargs : Dict[str, Any] + Keyword arguments + + Returns + ------- + Dict[str, Any] + Normalized input dictionary + + Raises + ------ + ValueError + If mixing positional and keyword arguments or providing multiple positional args + """ + if args and kwargs: + raise ValueError("Mixing positional and keyword arguments is not supported") + + if args: + if len(args) > 1: + raise ValueError("Multiple positional arguments are not supported") + candidate = next(iter(args)) + if candidate is None: + return {} + if isinstance(candidate, Mapping): + return dict(candidate) + return {"input": candidate} + + if kwargs: + return dict(kwargs) + + return {} + + +@dataclass +class ToolProperty: + """Represents a single property/parameter in a tool's schema. + + :ivar str type: JSON schema type (e.g., "string", "object", "array"). + :ivar Optional[str] description: Human-readable description of the property. + :ivar Optional[Mapping[str, Any]] properties: Nested properties for object types. + :ivar Any default: Default value for the property. + :ivar List[str] required: List of required nested properties. + """ + + type: str + description: Optional[str] = None + properties: Optional[Mapping[str, Any]] = None + default: Any = None + required: Optional[List[str]] = None + +@dataclass +class ToolParameters: + """Represents the parameters schema for a tool. + + :ivar str type: JSON schema type, typically "object". + :ivar Mapping[str, ToolProperty] properties: Dictionary of parameter properties. + :ivar List[str] required: List of required parameter names. + """ + + type: str + properties: Mapping[str, ToolProperty] + required: Optional[List[str]] = None + +@dataclass +class ToolManifest: + """Represents a tool manifest with metadata and parameters. + + :ivar str name: Unique name of the tool. + :ivar str description: Detailed description of the tool's functionality. + :ivar ToolParameters parameters: Schema defining the tool's input parameters. + """ + + name: str + description: str + parameters: ToolParameters + +@dataclass +class RemoteServer: + """Represents remote server configuration for a tool. + + :ivar str projectConnectionId: Identifier for the project connection. + :ivar str protocol: Communication protocol (e.g., "mcp"). + """ + + projectConnectionId: str + protocol: str + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary for JSON serialization.""" + return { + "projectConnectionId": self.projectConnectionId, + "protocol": self.protocol + } + +@dataclass +class EnrichedToolEntry(ToolManifest): + """Enriched tool representation with input schema. + + :ivar str name: Name of the tool. + :ivar str description: Description of the tool. + """ + remoteServer: RemoteServer + projectConnectionId: str + protocol: str + inputSchema: Optional[Mapping[str, Any]] = None + tool_definition: Optional[ToolDefinition] = None + +@dataclass +class ToolEntry: + """Represents a single tool entry in the API response. + + :ivar RemoteServer remoteServer: Configuration for the remote server. + :ivar List[ToolManifest] manifest: List of tool manifests provided by this entry. + """ + + remoteServer: RemoteServer + manifest: List[ToolManifest] + +@dataclass +class ToolsResponse: + """Root response model for the tools API. + + :ivar List[ToolEntry] tools: List of tool entries from the API. + """ + + tools: List[ToolEntry] + enriched_tools: List[EnrichedToolEntry] + + @classmethod + def from_dict(cls, data: Mapping[str, Any], tool_definitions: List[ToolDefinition]) -> "ToolsResponse": + """Create a ToolsResponse from a dictionary. + + :param Mapping[str, Any] data: Dictionary representation of the API response. + :return: Parsed ToolsResponse instance. + :rtype: ToolsResponse + """ + tool_defintions_map = {f"{td.type.lower()}_{td.project_connection_id.lower()}": td for td in tool_definitions} + + def tool_definition_lookup(remote_server: RemoteServer) -> Optional[ToolDefinition]: + return tool_defintions_map.get(f"{remote_server.protocol.lower()}_{remote_server.projectConnectionId.lower()}") + + + tools = [] + flattend_tools = [] + for tool_data in data.get("tools", []): + remote_server = RemoteServer( + projectConnectionId=tool_data["remoteServer"]["projectConnectionId"], + protocol=tool_data["remoteServer"]["protocol"] + ) + + manifests = [] + for manifest_data in tool_data.get("manifest", []): + params_data = manifest_data.get("parameters", {}) + properties = {} + + for prop_name, prop_data in params_data.get("properties", {}).items(): + properties[prop_name] = ToolProperty( + type=prop_data.get("type"), + description=prop_data.get("description"), + properties=prop_data.get("properties"), + default=prop_data.get("default"), + required=prop_data.get("required") + ) + + parameters = ToolParameters( + type=params_data.get("type", "object"), + properties=properties, + required=params_data.get("required") + ) + manifest = ToolManifest( + name=manifest_data["name"], + description=manifest_data["description"], + parameters=parameters + ) + manifests.append(manifest) + tool_definition = tool_definition_lookup(remote_server) + flattend_tools.append(EnrichedToolEntry( + projectConnectionId=remote_server.projectConnectionId, + protocol=remote_server.protocol, + name=manifest.name, + description=manifest.description, + parameters=parameters, + remoteServer=remote_server, + inputSchema=parameters, + tool_definition=tool_definition + )) + + tools.append(ToolEntry( + remoteServer=remote_server, + manifest=manifests + )) + + return cls(tools=tools, enriched_tools=flattend_tools) + +class ResolveToolsRequest: + """Represents a request containing remote servers and user information. + + :ivar List[RemoteServer] remoteservers: List of remote server configurations. + :ivar UserInfo user: User information. + """ + + def __init__(self, remoteservers: List[RemoteServer], user: UserInfo) -> None: + """Initialize RemoteServersRequest with servers and user info. + + :param List[RemoteServer] remoteservers: List of remote server configurations. + :param UserInfo user: User information. + """ + self.remoteservers = remoteservers + self.user: UserInfo = user + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary for JSON serialization.""" + result = { + "remoteservers": [rs.to_dict() for rs in self.remoteservers] + } + if self.user: + # Handle both UserInfo objects and dictionaries + if isinstance(self.user, dict): + # Validate required fields for dict + if self.user.get("objectId") and self.user.get("tenantId"): + result["user"] = { + "objectId": self.user["objectId"], + "tenantId": self.user["tenantId"] + } + elif hasattr(self.user, "objectId") and hasattr(self.user, "tenantId"): + # UserInfo object + if self.user.objectId and self.user.tenantId: + result["user"] = { + "objectId": self.user.objectId, + "tenantId": self.user.tenantId + } + return result + + +class ToolConfigurationParser: + """Parses and processes tool configuration. + + This class handles parsing and categorizing tool configurations into + remote tools (MCP/A2A) and named MCP tools. + + :param List[Mapping[str, Any]] tools_config: + List of tool configurations to parse. Can be None. + """ + + def __init__(self, tools_definitions: Optional[List[Any]] = None): + """Initialize the parser. + + :param tools_definitions: List of tool configurations (can be dicts or ToolDefinition objects), or None. + :type tools_definitions: Optional[List[Any]] + """ + # Convert dictionaries to ToolDefinition objects if needed + self._tools_definitions = [] + for tool_def in (tools_definitions or []): + if isinstance(tool_def, dict): + # Convert dict to ToolDefinition + tool_type = tool_def.get("type") + if tool_type: + self._tools_definitions.append(ToolDefinition(type=tool_type, **{k: v for k, v in tool_def.items() if k != "type"})) + elif isinstance(tool_def, ToolDefinition): + self._tools_definitions.append(tool_def) + + self._remote_tools: List[ToolDefinition] = [] + self._named_mcp_tools: List[ToolDefinition] = [] + self._parse_tools_config() + + def _parse_tools_config(self) -> None: + """Parse tools configuration into categorized lists. + + Separates tool configurations into remote tools (MCP/A2A types) and + named MCP tools based on the 'type' field in each configuration. + """ + for tool_definition in self._tools_definitions: + tool_type = tool_definition.type.lower() + if tool_type in ["mcp", "a2a"]: + self._remote_tools.append(tool_definition) + else: + self._named_mcp_tools.append(tool_definition) + +def to_remote_server(tool_definition: ToolDefinition) -> RemoteServer: + """Convert ToolDefinition to RemoteServer. + + :param ToolDefinition tool_definition: + Tool definition to convert. + :return: Converted RemoteServer instance. + :rtype: RemoteServer + """ + return RemoteServer( + projectConnectionId=tool_definition.project_connection_id, + protocol=tool_definition.type.lower() + ) + + +@dataclass +class MCPToolSchema: + """Represents the input schema for an MCP tool. + + :ivar str type: JSON schema type, typically "object". + :ivar Mapping[str, Any] properties: Dictionary of parameter properties. + :ivar List[str] required: List of required parameter names. + """ + + type: str + properties: Mapping[str, Any] + required: Optional[List[str]] = None + + +@dataclass +class MCPToolMetadata: + """Represents the _meta field for an MCP tool. + + :ivar str type: JSON schema type, typically "object". + :ivar Mapping[str, Any] properties: Dictionary of metadata properties. + :ivar List[str] required: List of required metadata parameter names. + """ + + type: str + properties: Mapping[str, Any] + required: Optional[List[str]] = None + + +@dataclass +class MCPTool: + """Represents a single MCP tool from the tools/list response. + + :ivar str name: Unique name of the tool. + :ivar str title: Display title of the tool. + :ivar str description: Detailed description of the tool's functionality. + :ivar MCPToolSchema inputSchema: Schema defining the tool's input parameters. + :ivar Optional[MCPToolMetadata] _meta: Optional metadata schema for the tool. + """ + + name: str + title: str + description: str + inputSchema: MCPToolSchema + _meta: Optional[MCPToolMetadata] = None + +@dataclass +class EnrichedMCPTool(MCPTool): + """Represents an enriched MCP tool with additional metadata. + + :ivar ToolDefinition tool_definition: Associated tool definition. + """ + tool_definition: Optional[ToolDefinition] = None + +@dataclass +class MCPToolsListResult: + """Represents the result field of an MCP tools/list response. + + :ivar List[MCPTool] tools: List of available MCP tools. + """ + + tools: List[MCPTool] + + +@dataclass +class MCPToolsListResponse: + """Root response model for the MCP tools/list JSON-RPC response. + + :ivar str jsonrpc: JSON-RPC protocol version (e.g., "2.0"). + :ivar int id: Request identifier. + :ivar MCPToolsListResult result: Result containing the list of tools. + """ + + jsonrpc: str + id: int + result: MCPToolsListResult + + @classmethod + def from_dict(cls, data: Mapping[str, Any], tool_definitions: List[ToolDefinition]) -> "MCPToolsListResponse": + """Create an MCPToolsListResponse from a dictionary. + + :param Mapping[str, Any] data: Dictionary representation of the JSON-RPC response. + :return: Parsed MCPToolsListResponse instance. + :rtype: MCPToolsListResponse + """ + result_data = data.get("result", {}) + tools_list = [] + tool_definitions_map = {f"{td.type.lower()}": td for td in tool_definitions} + + for tool_data in result_data.get("tools", []): + # Parse inputSchema + input_schema_data = tool_data.get("inputSchema", {}) + input_schema = MCPToolSchema( + type=input_schema_data.get("type", "object"), + properties=input_schema_data.get("properties", {}), + required=input_schema_data.get("required") + ) + + # Parse _meta if present + meta = None + meta_data = tool_data.get("_meta") + if meta_data: + meta = MCPToolMetadata( + type=meta_data.get("type", "object"), + properties=meta_data.get("properties", {}), + required=meta_data.get("required") + ) + + # Create MCPTool + mcp_tool = EnrichedMCPTool( + name=tool_data["name"], + title=tool_data.get("title", tool_data["name"]), + description=tool_data.get("description", ""), + inputSchema=input_schema, + _meta=meta, + tool_definition=tool_definitions_map.get(tool_data["name"].lower()) + ) + + tools_list.append(mcp_tool) + + # Create result + result = MCPToolsListResult(tools=tools_list) + + return cls( + jsonrpc=data.get("jsonrpc", "2.0"), + id=data.get("id", 0), + result=result + ) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/__init__.py new file mode 100644 index 000000000000..c0abe5b29bb9 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/__init__.py @@ -0,0 +1,13 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +from ._client import AzureAIToolClient, FoundryTool +from .._exceptions import OAuthConsentRequiredError, MCPToolApprovalRequiredError + +__all__ = [ + "AzureAIToolClient", + "FoundryTool", + "OAuthConsentRequiredError", + "MCPToolApprovalRequiredError", +] \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py new file mode 100644 index 000000000000..93f550448b5a --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py @@ -0,0 +1,226 @@ + +from typing import Any, List, Mapping, Union, TYPE_CHECKING + +from azure.core import AsyncPipelineClient +from azure.core.pipeline import policies + +from ._configuration import AzureAIToolClientConfiguration +from .._utils._model_base import InvocationPayloadBuilder +from .._model_base import FoundryTool, ToolSource + +from .operations._operations import MCPToolsOperations, RemoteToolsOperations + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + +class AzureAITool: + """Azure AI tool wrapper for invocation. + + Represents a single tool that can be invoked either via MCP protocol or + Azure AI Tools API. This class provides a convenient interface for tool + invocation and exposes tool metadata. + + :ivar str name: The name of the tool. + :ivar str description: Human-readable description of what the tool does. + :ivar dict metadata: Additional metadata about the tool from the API. + :ivar ~Tool_Client.models.ToolSource source: + The source of the tool (MCP_TOOLS or REMOTE_TOOLS). + + .. admonition:: Example: + + .. literalinclude:: ../samples/simple_example.py + :start-after: [START use_tool] + :end-before: [END use_tool] + :language: python + :dedent: 4 + :caption: Using an AzureAITool instance. + """ + + def __init__(self, client: "AzureAIToolClient", descriptor: FoundryTool) -> None: + """Initialize an Azure AI Tool. + + :param client: Parent client instance for making API calls. + :type client: AzureAIToolClient + :param descriptor: Tool descriptor containing metadata and configuration. + :type descriptor: ~Tool_Client.models.FoundryTool + """ + self._client = client + self._descriptor = descriptor + self.name = descriptor.name + self.description = descriptor.description + self.metadata = dict(descriptor.metadata) + self.source = descriptor.source + + async def invoke(self, *args: Any, **kwargs: Any) -> Any: + """Invoke the tool asynchronously. + + :param args: Positional arguments to pass to the tool. + :param kwargs: Keyword arguments to pass to the tool. + :return: The result from the tool invocation. + :rtype: Any + """ + payload = InvocationPayloadBuilder.build_payload(args, kwargs, {}) + return await self._client._invoke_tool(self._descriptor, payload) + + async def __call__(self, *args: Any, **kwargs: Any) -> Any: + return await self.invoke(*args, **kwargs) + +class AzureAIToolClient: + """Asynchronous client for aggregating tools from Azure AI MCP and Tools APIs. + + This client provides access to tools from both MCP (Model Context Protocol) servers + and Azure AI Tools API endpoints, enabling unified tool discovery and invocation. + + :param str endpoint: + The fully qualified endpoint for the Azure AI Agents service. + Example: "https://.api.azureml.ms" + :param credential: + Credential for authenticating requests to the service. + Use credentials from azure-identity like DefaultAzureCredential. + :type credential: ~azure.core.credentials.TokenCredential + :keyword str agent_name: + Name of the agent to use for tool operations. Default is "$default". + :keyword List[Mapping[str, Any]] tools: + List of tool configurations defining which tools to include. + :keyword Mapping[str, Any] user: + User information for tool invocations (object_id, tenant_id). + :keyword str api_version: + API version to use when communicating with the service. + Default is the latest supported version. + :keyword transport: + Custom transport implementation. Default is RequestsTransport. + :paramtype transport: ~azure.core.pipeline.transport.HttpTransport + + """ + + def __init__( + self, + endpoint: str, + credential: "AsyncTokenCredential", + **kwargs: Any, + ) -> None: + """Initialize the asynchronous Azure AI Tool Client. + + :param str endpoint: The service endpoint URL. + :param credential: Credentials for authenticating requests. + :type credential: ~azure.core.credentials.TokenCredential + :keyword kwargs: Additional keyword arguments for client configuration. + """ + self._config = AzureAIToolClientConfiguration( + endpoint, + credential, + **kwargs, + ) + + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=endpoint, policies=_policies, **kwargs) + + # Initialize specialized clients with client and config + self._mcp_tools = MCPToolsOperations(client=self._client, config=self._config) + self._remote_tools = RemoteToolsOperations(client=self._client, config=self._config) + + async def list_tools(self) -> List[FoundryTool]: + """List all available tools from configured sources. + + Retrieves tools from both MCP servers and Azure AI Tools API endpoints, + returning them as AzureAITool instances ready for invocation. + :return: List of available tools from all configured sources. + :rtype: List[~AzureAITool] + :raises ~Tool_Client.exceptions.OAuthConsentRequiredError: + Raised when the service requires user OAuth consent. + :raises ~Tool_Client.exceptions.MCPToolApprovalRequiredError: + Raised when tool access requires human approval. + :raises ~azure.core.exceptions.HttpResponseError: + Raised for HTTP communication failures. + + """ + + existing_names: set[str] = set() + + tools: List[FoundryTool] = [] + + # Fetch MCP tools + mcp_tools = await self._mcp_tools.list_tools(existing_names) + tools.extend(mcp_tools) + # Fetch Tools API tools + tools_api_tools = await self._remote_tools.resolve_tools(existing_names) + tools.extend(tools_api_tools) + + for tool in tools: + # Capture tool in a closure to avoid shadowing issues + def make_invoker(captured_tool): + async def _invoker(*args, **kwargs): + return await self.invoke_tool(captured_tool, *args, **kwargs) + return _invoker + tool.invoker = make_invoker(tool) + + return tools + + async def invoke_tool( + self, + tool: Union[AzureAITool, str, FoundryTool], + *args: Any, + **kwargs: Any, + ) -> Any: + """Invoke a tool by instance, name, or descriptor. + + :param tool: Tool to invoke, specified as an AzureAITool instance, + tool name string, or FoundryTool. + :type tool: Union[~AzureAITool, str, ~Tool_Client.models.FoundryTool] + :param args: Positional arguments to pass to the tool + """ + descriptor = await self._resolve_tool_descriptor(tool) + payload = InvocationPayloadBuilder.build_payload(args, kwargs, {}) + return await self._invoke_tool(descriptor, payload, **kwargs) + + async def _resolve_tool_descriptor( + self, tool: Union[AzureAITool, str, FoundryTool] + ) -> FoundryTool: + """Resolve a tool reference to a descriptor.""" + if isinstance(tool, AzureAITool): + return tool._descriptor + if isinstance(tool, FoundryTool): + return tool + if isinstance(tool, str): + # Fetch all tools and find matching descriptor + descriptors = await self.list_tools() + for descriptor in descriptors: + if descriptor.name == tool or descriptor.key == tool: + return descriptor + raise KeyError(f"Unknown tool: {tool}") + raise TypeError("Tool must be an AsyncAzureAITool, FoundryTool, or registered name/key") + + async def _invoke_tool(self, descriptor: FoundryTool, arguments: Mapping[str, Any], **kwargs: Any) -> Any: + """Invoke a tool descriptor.""" + if descriptor.source is ToolSource.MCP_TOOLS: + return await self._mcp_tools.invoke_tool(descriptor, arguments) + if descriptor.source is ToolSource.REMOTE_TOOLS: + return await self._remote_tools.invoke_tool(descriptor, arguments) + raise ValueError(f"Unsupported tool source: {descriptor.source}") + + async def close(self) -> None: + """Close the underlying HTTP pipeline.""" + await self._client.close() + + async def __aenter__(self) -> "AzureAIToolClient": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_configuration.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_configuration.py new file mode 100644 index 000000000000..79b819863399 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_configuration.py @@ -0,0 +1,88 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +from typing import Any, Mapping, List, Optional, TYPE_CHECKING + +from azure.core.pipeline import policies + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + +from .._utils._model_base import ToolConfigurationParser + +class AzureAIToolClientConfiguration: + """Configuration for Azure AI Tool Client. + + Manages authentication, endpoint configuration, and policy settings for the + Azure AI Tool Client. This class is used internally by the client and should + not typically be instantiated directly. + + :param str endpoint: + Fully qualified endpoint for the Azure AI Agents service. + :param credential: + Azure TokenCredential for authentication. + :type credential: ~azure.core.credentials.TokenCredential + :keyword str api_version: + API version to use. Default is the latest supported version. + :keyword List[str] credential_scopes: + OAuth2 scopes for token requests. Default is ["https://ai.azure.com/.default"]. + :keyword str agent_name: + Name of the agent. Default is "$default". + :keyword List[Mapping[str, Any]] tools: + List of tool configurations. + :keyword Mapping[str, Any] user: + User information for tool invocations. + """ + + def __init__( + self, + endpoint: str, + credential: "AsyncTokenCredential", + **kwargs: Any, + ) -> None: + """Initialize the configuration. + + :param str endpoint: The service endpoint URL. + :param credential: Credentials for authenticating requests. + :type credential: ~azure.core.credentials.TokenCredential + :keyword kwargs: Additional configuration options. + """ + api_version: str = kwargs.pop("api_version", "2025-05-15-preview") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://ai.azure.com/.default"]) + + + # Tool configuration + self.agent_name: str = kwargs.pop("agent_name", "$default") + self.tools: Optional[List[Mapping[str, Any]]] = kwargs.pop("tools", None) + self.user: Optional[Mapping[str, Any]] = kwargs.pop("user", None) + + # Initialize tool configuration parser + + self.tool_config = ToolConfigurationParser(self.tools) + + self._configure(**kwargs) + + # Warn about unused kwargs + if kwargs: + import warnings + warnings.warn(f"Unused configuration parameters: {list(kwargs.keys())}", UserWarning) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy( + self.credential, *self.credential_scopes, **kwargs + ) \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py new file mode 100644 index 000000000000..f99646d5fb8b --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py @@ -0,0 +1,184 @@ + + +import json +from typing import Any, Dict, List, Mapping, MutableMapping + +from azure.core import AsyncPipelineClient +from ..._exceptions import OAuthConsentRequiredError +from .._configuration import AzureAIToolClientConfiguration + +from ...operations._operations import ( + build_remotetools_invoke_tool_request, + build_remotetools_resolve_tools_request, + prepare_remotetools_invoke_tool_request_content, + prepare_remotetools_resolve_tools_request_content, + build_mcptools_list_tools_request, + prepare_mcptools_list_tools_request_content, + build_mcptools_invoke_tool_request, + prepare_mcptools_invoke_tool_request_content, + API_VERSION, + MCP_ENDPOINT_PATH, + TOOL_PROPERTY_OVERRIDES, + DEFAULT_ERROR_MAP, + MCP_HEADERS, + REMOTE_TOOLS_HEADERS, + prepare_request_headers, + prepare_error_map, + handle_response_error, + build_list_tools_request, + process_list_tools_response, + build_invoke_mcp_tool_request, + build_resolve_tools_request, + process_resolve_tools_response, + build_invoke_remote_tool_request, + process_invoke_remote_tool_response, +) +from ..._model_base import FoundryTool, ToolSource, UserInfo + +from ..._utils._model_base import ToolsResponse, ToolDescriptorBuilder, ToolConfigurationParser, ResolveToolsRequest +from ..._utils._model_base import to_remote_server, MCPToolsListResponse, MetadataMapper + +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.pipeline import PipelineResponse + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) + +class MCPToolsOperations: + + def __init__(self, *args, **kwargs) -> None: + """Initialize MCP client. + + Parameters + ---------- + client : AsyncPipelineClient + Azure AsyncPipelineClient for HTTP requests + config : AzureAIToolClientConfiguration + Configuration object + """ + input_args = list(args) + self._client : AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config : AzureAIToolClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + + if self._client is None or self._config is None: + raise ValueError("Both 'client' and 'config' must be provided") + + self._endpoint_path = MCP_ENDPOINT_PATH + self._api_version = API_VERSION + + async def list_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTool]: + """List MCP tools. + + :return: List of tool descriptors from MCP server. + :rtype: List[FoundryTool] + """ + _request, error_map, remaining_kwargs = build_list_tools_request(self._api_version, kwargs) + + path_format_arguments = {"endpoint": self._config.endpoint} + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + pipeline_response: PipelineResponse = await self._client._pipeline.run(_request, **remaining_kwargs) + response = pipeline_response.http_response + + handle_response_error(response, error_map) + return process_list_tools_response(response, self._config.tool_config._named_mcp_tools, existing_names) + + async def invoke_tool( + self, + tool: FoundryTool, + arguments: Mapping[str, Any], + **kwargs: Any + ) -> Any: + """Invoke an MCP tool. + + :param tool: Tool descriptor for the tool to invoke. + :type tool: FoundryTool + :param arguments: Input arguments for the tool. + :type arguments: Mapping[str, Any] + :return: Result of the tool invocation. + :rtype: Any + """ + _request, error_map = build_invoke_mcp_tool_request(self._api_version, tool, arguments) + + path_format_arguments = {"endpoint": self._config.endpoint} + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + pipeline_response: PipelineResponse = await self._client._pipeline.run(_request, **kwargs) + response = pipeline_response.http_response + + handle_response_error(response, error_map) + return response.json().get("result") + +class RemoteToolsOperations: + def __init__(self, *args, **kwargs) -> None: + """Initialize Tools API client. + + :param client: Azure PipelineClient for HTTP requests. + :type client: ~azure.core.PipelineClient + :param config: Configuration object. + :type config: ~Tool_Client.models.AzureAIToolClientConfiguration + :raises ValueError: If required parameters are not provided. + """ + input_args = list(args) + self._client : AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config : AzureAIToolClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + + if self._client is None or self._config is None: + raise ValueError("Both 'client' and 'config' must be provided") + + + # Apply agent name substitution to endpoint paths + self.agent = self._config.agent_name.strip() if self._config.agent_name and self._config.agent_name.strip() else "$default" + self._api_version = API_VERSION + + async def resolve_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTool]: + """Resolve remote tools from Azure AI Tools API. + + :return: List of tool descriptors from Tools API. + :rtype: List[FoundryTool] + """ + result = build_resolve_tools_request(self.agent, self._api_version, self._config.tool_config, self._config.user, kwargs) + if result[0] is None: + return [] + + _request, error_map, remaining_kwargs = result + + path_format_arguments = {"endpoint": self._config.endpoint} + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + pipeline_response: PipelineResponse = await self._client._pipeline.run(_request, **remaining_kwargs) + response = pipeline_response.http_response + + handle_response_error(response, error_map) + return process_resolve_tools_response(response, self._config.tool_config._remote_tools, existing_names) + + async def invoke_tool( + self, + tool: FoundryTool, + arguments: Mapping[str, Any], + ) -> Any: + """Invoke a remote tool. + + :param tool: Tool descriptor to invoke. + :type tool: FoundryTool + :param arguments: Input arguments for the tool. + :type arguments: Mapping[str, Any] + :return: Result of the tool invocation. + :rtype: Any + """ + _request, error_map = build_invoke_remote_tool_request(self.agent, self._api_version, tool, self._config.user, arguments) + + path_format_arguments = {"endpoint": self._config.endpoint} + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + pipeline_response: PipelineResponse = await self._client._pipeline.run(_request) + response = pipeline_response.http_response + + handle_response_error(response, error_map) + return process_invoke_remote_tool_response(response) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py new file mode 100644 index 000000000000..e05e1e84e708 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py @@ -0,0 +1,543 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +import json +import logging +from typing import Any, Dict, List, Mapping, MutableMapping, Optional, Tuple, Union +from azure.core import PipelineClient +from .._configuration import AzureAIToolClientConfiguration +from .._model_base import FoundryTool, ToolSource, UserInfo + +from .._utils._model_base import ToolsResponse, ToolDescriptorBuilder, ToolConfigurationParser, ResolveToolsRequest +from .._utils._model_base import to_remote_server, MCPToolsListResponse, MetadataMapper +from azure.core.pipeline import PipelineResponse +from azure.core.rest import HttpRequest, HttpResponse + +from .._exceptions import OAuthConsentRequiredError + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) + +logger = logging.getLogger(__name__) + +# Shared constants +API_VERSION = "2025-11-15-preview" +MCP_ENDPOINT_PATH = "/mcp_tools" + +# Tool-specific property key overrides +# Format: {"tool_name": {"tool_def_key": "meta_schema_key"}} +TOOL_PROPERTY_OVERRIDES: Dict[str, Dict[str, str]] = { + "image_generation": { + "model": "imagegen_model_deployment_name" + }, + # Add more tool-specific mappings as needed +} + +# Shared error map +DEFAULT_ERROR_MAP: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, +} + +# Shared header configurations +MCP_HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json,text/event-stream", + "Connection": "keep-alive", + "Cache-Control": "no-cache", +} + +REMOTE_TOOLS_HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + +# Helper functions for request/response processing +def prepare_request_headers(base_headers: Dict[str, str], custom_headers: Mapping[str, str] = None) -> Dict[str, str]: + """Prepare request headers by merging base and custom headers. + + :param base_headers: Base headers to use + :param custom_headers: Custom headers to merge + :return: Merged headers dictionary + """ + headers = base_headers.copy() + if custom_headers: + headers.update(custom_headers) + return headers + +def prepare_error_map(custom_error_map: Mapping[int, Any] = None) -> MutableMapping: + """Prepare error map by merging default and custom error mappings. + + :param custom_error_map: Custom error mappings to merge + :return: Merged error map + """ + error_map = DEFAULT_ERROR_MAP.copy() + if custom_error_map: + error_map.update(custom_error_map) + return error_map + +def format_and_execute_request( + client: PipelineClient, + request: HttpRequest, + endpoint: str, + **kwargs: Any +) -> HttpResponse: + """Format request URL and execute pipeline. + + :param client: Pipeline client + :param request: HTTP request to execute + :param endpoint: Endpoint URL for formatting + :return: HTTP response + """ + path_format_arguments = {"endpoint": endpoint} + request.url = client.format_url(request.url, **path_format_arguments) + pipeline_response: PipelineResponse = client._pipeline.run(request, **kwargs) + return pipeline_response.http_response + +def handle_response_error(response: HttpResponse, error_map: MutableMapping) -> None: + """Handle HTTP response errors. + + :param response: HTTP response to check + :param error_map: Error map for status code mapping + :raises HttpResponseError: If response status is not 200 + """ + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + +def process_list_tools_response( + response: HttpResponse, + named_mcp_tools: Any, + existing_names: set +) -> List[FoundryTool]: + """Process list_tools response and build descriptors. + + :param response: HTTP response with MCP tools + :param named_mcp_tools: Named MCP tools configuration + :param existing_names: Set of existing tool names + :return: List of tool descriptors + """ + mcp_response = MCPToolsListResponse.from_dict(response.json(), named_mcp_tools) + raw_tools = mcp_response.result.tools + return ToolDescriptorBuilder.build_descriptors( + raw_tools, + ToolSource.MCP_TOOLS, + existing_names, + ) + +def process_resolve_tools_response( + response: HttpResponse, + remote_tools: Any, + existing_names: set +) -> List[FoundryTool]: + """Process resolve_tools response and build descriptors. + + :param response: HTTP response with remote tools + :param remote_tools: Remote tools configuration + :param existing_names: Set of existing tool names + :return: List of tool descriptors + """ + toolResponse = ToolsResponse.from_dict(response.json(), remote_tools) + return ToolDescriptorBuilder.build_descriptors( + toolResponse.enriched_tools, + ToolSource.REMOTE_TOOLS, + existing_names, + ) + +def build_list_tools_request( + api_version: str, + kwargs: Dict[str, Any] +) -> Tuple[HttpRequest, MutableMapping, Dict[str, str]]: + """Build request for listing MCP tools. + + :param api_version: API version + :param kwargs: Additional arguments (headers, params, error_map) + :return: Tuple of (request, error_map, params) + """ + error_map = prepare_error_map(kwargs.pop("error_map", None)) + _headers = prepare_request_headers(MCP_HEADERS, kwargs.pop("headers", None)) + _params = kwargs.pop("params", {}) or {} + + _content = prepare_mcptools_list_tools_request_content() + content = json.dumps(_content) + _request = build_mcptools_list_tools_request(api_version=api_version, headers=_headers, params=_params, content=content) + + return _request, error_map, kwargs + +def build_invoke_mcp_tool_request( + api_version: str, + tool: FoundryTool, + arguments: Mapping[str, Any], + **kwargs: Any +) -> Tuple[HttpRequest, MutableMapping]: + """Build request for invoking MCP tool. + + :param api_version: API version + :param tool: Tool descriptor + :param arguments: Tool arguments + :return: Tuple of (request, error_map) + """ + error_map = prepare_error_map() + _headers = prepare_request_headers(MCP_HEADERS) + _params = {} + + _content = prepare_mcptools_invoke_tool_request_content(tool, arguments, TOOL_PROPERTY_OVERRIDES) + logger.info("Invoking MCP tool: %s with arguments: %s", tool.name, dict(arguments)) + content = json.dumps(_content) + _request = build_mcptools_invoke_tool_request(api_version=api_version, headers=_headers, params=_params, content=content) + + return _request, error_map + +def build_resolve_tools_request( + agent_name: str, + api_version: str, + tool_config: ToolConfigurationParser, + user: UserInfo, + kwargs: Dict[str, Any] +) -> Union[Tuple[HttpRequest, MutableMapping, Dict[str, Any]], Tuple[None, None, None]]: + """Build request for resolving remote tools. + + :param agent_name: Agent name + :param api_version: API version + :param tool_config: Tool configuration + :param user: User info + :param kwargs: Additional arguments + :return: Tuple of (request, error_map, remaining_kwargs) or (None, None, None) + """ + error_map = prepare_error_map(kwargs.pop("error_map", None)) + _headers = prepare_request_headers(REMOTE_TOOLS_HEADERS, kwargs.pop("headers", None)) + _params = kwargs.pop("params", {}) or {} + + _content = prepare_remotetools_resolve_tools_request_content(tool_config, user) + if _content is None: + return None, None, None + + content = json.dumps(_content.to_dict()) + _request = build_remotetools_resolve_tools_request(agent_name, api_version=api_version, headers=_headers, params=_params, content=content) + + return _request, error_map, kwargs + +def build_invoke_remote_tool_request( + agent_name: str, + api_version: str, + tool: FoundryTool, + user: UserInfo, + arguments: Mapping[str, Any] +) -> Tuple[HttpRequest, MutableMapping]: + """Build request for invoking remote tool. + + :param agent_name: Agent name + :param api_version: API version + :param tool: Tool descriptor + :param user: User info + :param arguments: Tool arguments + :return: Tuple of (request, error_map) + """ + error_map = prepare_error_map() + _headers = prepare_request_headers(REMOTE_TOOLS_HEADERS) + _params = {} + + _content = prepare_remotetools_invoke_tool_request_content(tool, user, arguments) + content = json.dumps(_content) + _request = build_remotetools_invoke_tool_request(agent_name, api_version=api_version, headers=_headers, params=_params, content=content) + + return _request, error_map + +def process_invoke_remote_tool_response(response: HttpResponse) -> Any: + """Process remote tool invocation response. + + :param response: HTTP response + :return: Tool result + :raises OAuthConsentRequiredError: If OAuth consent is required + """ + payload = response.json() + response_type = payload.get("type") + result = payload.get("toolResult") + + if response_type == "OAuthConsentRequired": + raise OAuthConsentRequiredError(result.get("message"), consent_url=result.get("consentUrl"), payload=payload) + return result + +class MCPToolsOperations: + + def __init__(self, *args, **kwargs) -> None: + """Initialize MCP client. + + Parameters + ---------- + client : PipelineClient + Azure PipelineClient for HTTP requests + config : AzureAIToolClientConfiguration + Configuration object + """ + input_args = list(args) + self._client : PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config : AzureAIToolClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + + if self._client is None or self._config is None: + raise ValueError("Both 'client' and 'config' must be provided") + + self._endpoint_path = MCP_ENDPOINT_PATH + self._api_version = API_VERSION + + def list_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTool]: + """List MCP tools. + + :return: List of tool descriptors from MCP server. + :rtype: List[FoundryTool] + """ + _request, error_map, remaining_kwargs = build_list_tools_request(self._api_version, kwargs) + response = format_and_execute_request(self._client, _request, self._config.endpoint, **remaining_kwargs) + handle_response_error(response, error_map) + return process_list_tools_response(response, self._config.tool_config._named_mcp_tools, existing_names) + + def invoke_tool( + self, + tool: FoundryTool, + arguments: Mapping[str, Any], + **kwargs: Any + ) -> Any: + """Invoke an MCP tool. + + :param tool: Tool descriptor for the tool to invoke. + :type tool: FoundryTool + :param arguments: Input arguments for the tool. + :type arguments: Mapping[str, Any] + :return: Result of the tool invocation. + :rtype: Any + """ + _request, error_map = build_invoke_mcp_tool_request(self._api_version, tool, arguments) + response = format_and_execute_request(self._client, _request, self._config.endpoint, **kwargs) + handle_response_error(response, error_map) + return response.json().get("result") + +def prepare_mcptools_list_tools_request_content() -> Any: + return { + "jsonrpc": "2.0", + "id": 1, + "method": "tools/list", + "params": {} + } + +def build_mcptools_list_tools_request( + api_version: str, + headers: Mapping[str, str] = None, + params: Mapping[str, str] = None, + **kwargs: Any + ) -> HttpRequest: + """Build the HTTP request for listing MCP tools. + + :param api_version: API version to use. + :type api_version: str + :param headers: Additional headers for the request. + :type headers: Mapping[str, str], optional + :param params: Query parameters for the request. + :type params: Mapping[str, str], optional + :return: Constructed HttpRequest object. + :rtype: ~azure.core.rest.HttpRequest + """ + _headers = headers or {} + _params = params or {} + _params["api-version"] = api_version + + _url = f"/mcp_tools" + return HttpRequest(method="POST", url=_url, headers=_headers, params=_params, **kwargs) + +def prepare_mcptools_invoke_tool_request_content(tool: FoundryTool, arguments: Mapping[str, Any], tool_overrides: Dict[str, Dict[str, str]]) -> Any: + + params = { + "name": tool.name, + "arguments": dict(arguments), + } + + if tool.tool_definition: + + key_overrides = tool_overrides.get(tool.name, {}) + meta_config = MetadataMapper.prepare_metadata_dict( + tool.metadata, + tool.tool_definition.__dict__ if hasattr(tool.tool_definition, '__dict__') else tool.tool_definition, + key_overrides + ) + if meta_config: + params["_meta"] = meta_config + logger.info("Prepared MCP tool invocation params: %s", params) + payload = { + "jsonrpc": "2.0", + "id": 2, + "method": "tools/call", + "params": params + } + return payload + +def build_mcptools_invoke_tool_request( + api_version: str, + headers: Mapping[str, str] = None, + params: Mapping[str, str] = None, + **kwargs: Any +) -> HttpRequest: + """Build the HTTP request for invoking an MCP tool. + + :param api_version: API version to use. + :type api_version: str + :param headers: Additional headers for the request. + :type headers: Mapping[str, str], optional + :param params: Query parameters for the request. + :type params: Mapping[str, str], optional + :return: Constructed HttpRequest object. + :rtype: ~azure.core.rest.HttpRequest + """ + _headers = headers or {} + _params = params or {} + _params["api-version"] = api_version + + _url = f"/mcp_tools" + return HttpRequest(method="POST", url=_url, headers=_headers, params=_params, **kwargs) + +class RemoteToolsOperations: + def __init__(self, *args, **kwargs) -> None: + """Initialize Tools API client. + + :param client: Azure PipelineClient for HTTP requests. + :type client: ~azure.core.PipelineClient + :param config: Configuration object. + :type config: ~Tool_Client.models.AzureAIToolClientConfiguration + :raises ValueError: If required parameters are not provided. + """ + input_args = list(args) + self._client : PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config : AzureAIToolClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + + if self._client is None or self._config is None: + raise ValueError("Both 'client' and 'config' must be provided") + + + # Apply agent name substitution to endpoint paths + self.agent = self._config.agent_name.strip() if self._config.agent_name and self._config.agent_name.strip() else "$default" + self._api_version = API_VERSION + + def resolve_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTool]: + """Resolve remote tools from Azure AI Tools API. + + :return: List of tool descriptors from Tools API. + :rtype: List[FoundryTool] + """ + result = build_resolve_tools_request(self.agent, self._api_version, self._config.tool_config, self._config.user, kwargs) + if result[0] is None: + return [] + + _request, error_map, remaining_kwargs = result + response = format_and_execute_request(self._client, _request, self._config.endpoint, **remaining_kwargs) + handle_response_error(response, error_map) + return process_resolve_tools_response(response, self._config.tool_config._remote_tools, existing_names) + + def invoke_tool( + self, + tool: FoundryTool, + arguments: Mapping[str, Any], + ) -> Any: + """Invoke a remote tool. + + :param tool: Tool descriptor to invoke. + :type tool: FoundryTool + :param arguments: Input arguments for the tool. + :type arguments: Mapping[str, Any] + :return: Result of the tool invocation. + :rtype: Any + """ + _request, error_map = build_invoke_remote_tool_request(self.agent, self._api_version, tool, self._config.user, arguments) + response = format_and_execute_request(self._client, _request, self._config.endpoint) + handle_response_error(response, error_map) + return process_invoke_remote_tool_response(response) + +def prepare_remotetools_invoke_tool_request_content(tool: FoundryTool, user: UserInfo, arguments: Mapping[str, Any]) -> Any: + payload = { + "toolName": tool.name, + "arguments": dict(arguments), + "remoteServer": to_remote_server(tool.tool_definition).to_dict(), + } + if user: + # Handle both UserInfo objects and dictionaries + if isinstance(user, dict): + if user.get("objectId") and user.get("tenantId"): + payload["user"] = { + "objectId": user["objectId"], + "tenantId": user["tenantId"], + } + elif hasattr(user, "objectId") and hasattr(user, "tenantId"): + if user.objectId and user.tenantId: + payload["user"] = { + "objectId": user.objectId, + "tenantId": user.tenantId, + } + return payload + +def build_remotetools_invoke_tool_request( + agent_name: str, + api_version: str, + headers: Mapping[str, str] = None, + params: Mapping[str, str] = None, + **kwargs: Any + ) -> HttpRequest: + """Build the HTTP request for invoking a remote tool. + + :param api_version: API version to use. + :type api_version: str + :param headers: Additional headers for the request. + :type headers: Mapping[str, str], optional + :param params: Query parameters for the request. + :type params: Mapping[str, str], optional + :return: Constructed HttpRequest object. + :rtype: ~azure.core.rest.HttpRequest + """ + _headers = headers or {} + _params = params or {} + _params["api-version"] = api_version + + _url = f"/agents/{agent_name}/tools/invoke" + return HttpRequest(method="POST", url=_url, headers=_headers, params=_params, **kwargs) + + +def prepare_remotetools_resolve_tools_request_content(tool_config: ToolConfigurationParser, user: UserInfo = None) -> ResolveToolsRequest: + resolve_tools_request: ResolveToolsRequest = None + if tool_config._remote_tools: + remote_servers = [] + for remote_tool in tool_config._remote_tools: + remote_servers.append(to_remote_server(remote_tool)) + resolve_tools_request = ResolveToolsRequest(remote_servers, user=user) + + return resolve_tools_request + +def build_remotetools_resolve_tools_request( + agent_name: str, + api_version: str, + headers: Mapping[str, str] = None, + params: Mapping[str, str] = None, + **kwargs: Any + ) -> HttpRequest: + """Build the HTTP request for resolving remote tools. + + :param api_version: API version to use. + :type api_version: str + :param headers: Additional headers for the request. + :type headers: Mapping[str, str], optional + :param params: Query parameters for the request. + :type params: Mapping[str, str], optional + :return: Constructed HttpRequest object. + :rtype: ~azure.core.rest.HttpRequest + """ + _headers = headers or {} + _params = params or {} + _params["api-version"] = api_version + + _url = f"/agents/{agent_name}/tools/resolve" + return HttpRequest(method="POST", url=_url, headers=_headers, params=_params, **kwargs) + \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_create_response.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_create_response.py index a38f55408c7f..820d54c6cea0 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_create_response.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_create_response.py @@ -10,3 +10,4 @@ class CreateResponse(response_create_params.ResponseCreateParamsBase, total=False): # type: ignore agent: Optional[_azure_ai_projects_models.AgentReference] stream: Optional[bool] + tools: Optional[list[_azure_ai_projects_models.Tool]] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index c3f001245133..cd0d7ed75896 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -7,7 +7,7 @@ import os import traceback from abc import abstractmethod -from typing import Any, AsyncGenerator, Generator, Union +from typing import Any, AsyncGenerator, Generator, Optional, Union import uvicorn from opentelemetry import context as otel_context, trace @@ -28,24 +28,31 @@ ) from .common.agent_run_context import AgentRunContext +from ..client.tools.aio._client import AzureAIToolClient +from ..client.tools._utils._model_base import ToolDefinition, UserInfo + logger = get_logger() DEBUG_ERRORS = os.environ.get(Constants.AGENT_DEBUG_ERRORS, "false").lower() == "true" class AgentRunContextMiddleware(BaseHTTPMiddleware): - def __init__(self, app: ASGIApp): + def __init__(self, app: ASGIApp, agent: Optional['FoundryCBAgent'] = None): super().__init__(app) + self.agent = agent async def dispatch(self, request: Request, call_next): + user_info = {} if request.url.path in ("/runs", "/responses"): try: + user_info = self.set_user_info_to_context_var(request) self.set_request_id_to_context_var(request) payload = await request.json() except Exception as e: logger.error(f"Invalid JSON payload: {e}") return JSONResponse({"error": f"Invalid JSON payload: {e}"}, status_code=400) try: - request.state.agent_run_context = AgentRunContext(payload) + agent_tools = self.agent.tools if self.agent else [] + request.state.agent_run_context = AgentRunContext(payload, user_info=user_info, agent_tools=agent_tools) self.set_run_context_to_context_var(request.state.agent_run_context) except Exception as e: logger.error(f"Context build failed: {e}.", exc_info=True) @@ -80,9 +87,32 @@ def set_run_context_to_context_var(self, run_context): ctx.update(res) request_context.set(ctx) + def set_user_info_to_context_var(self, request): + user_info: UserInfo = {} + try: + object_id_header = request.headers.get("x-aml-oid", None) + tenant_id_header = request.headers.get("x-aml-tenant-id", None) + + if object_id_header: + user_info["object_id"] = object_id_header + if tenant_id_header: + user_info["tenant_id"] = tenant_id_header + + except Exception as e: + logger.error(f"Failed to parse X-User-Info header: {e}", exc_info=True) + if user_info: + ctx = request_context.get() or {} + for key, value in user_info.items(): + ctx[f"azure.ai.agentserver.user.{key}"] = str(value) + request_context.set(ctx) + return user_info + class FoundryCBAgent: - def __init__(self): + def __init__(self, credentials: Optional["AsyncTokenCredential"] = None, **kwargs: Any) -> None: + self.credentials = credentials + self.tools = kwargs.get("tools", []) + async def runs_endpoint(request): # Set up tracing context and span context = request.state.agent_run_context @@ -200,7 +230,7 @@ async def readiness_endpoint(request): allow_methods=["*"], allow_headers=["*"], ) - self.app.add_middleware(AgentRunContextMiddleware) + self.app.add_middleware(AgentRunContextMiddleware, agent=self) @self.app.on_event("startup") async def attach_appinsights_logger(): @@ -303,7 +333,17 @@ def setup_otlp_exporter(self, endpoint, provider): provider.add_span_processor(processor) logger.info(f"Tracing setup with OTLP exporter: {endpoint}") + def get_tool_client(self, tools: Optional[list[ToolDefinition]], user_info: Optional[UserInfo]) -> AzureAIToolClient: + if not self.credentials: + raise ValueError("Credentials are required to create Tool Client.") + return AzureAIToolClient( + endpoint=os.getenv(Constants.AZURE_AI_PROJECT_ENDPOINT), + credential=self.credentials, + tools = tools, + user = user_info, + ) + def _event_to_sse_chunk(event: ResponseStreamEvent) -> str: event_data = json.dumps(event.as_dict()) if event.type: diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py index 2703f66f6ff2..89def295ef0c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py @@ -1,23 +1,27 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +from typing import Any from ...logger import get_logger from ...models import CreateResponse from ...models.projects import AgentId, AgentReference, ResponseConversation1 from .id_generator.foundry_id_generator import FoundryIdGenerator from .id_generator.id_generator import IdGenerator +from ...client.tools.aio._client import AzureAIToolClient logger = get_logger() class AgentRunContext: - def __init__(self, payload: dict): + def __init__(self, payload: dict, **kwargs: Any) -> None: self._raw_payload = payload self._request = _deserialize_create_response(payload) self._id_generator = FoundryIdGenerator.from_request(payload) self._response_id = self._id_generator.response_id self._conversation_id = self._id_generator.conversation_id self._stream = self.request.get("stream", False) + self._user_info = kwargs.get("user_info", {}) + self._agent_tools = kwargs.get("agent_tools", []) @property def raw_payload(self) -> dict: @@ -60,13 +64,26 @@ def get_conversation_object(self) -> ResponseConversation1: return None # type: ignore return ResponseConversation1(id=self._conversation_id) - + def get_tools(self) -> list: + # request tools take precedence over agent tools + request_tools = self.request.get("tools", []) + if not request_tools: + return self._agent_tools + + return request_tools + + def get_user_info(self) -> dict: + return self._user_info def _deserialize_create_response(payload: dict) -> CreateResponse: _deserialized = CreateResponse(**payload) raw_agent_reference = payload.get("agent") if raw_agent_reference: _deserialized["agent"] = _deserialize_agent_reference(raw_agent_reference) + + tools = payload.get("tools") + if tools: + _deserialized["tools"] = [tool for tool in tools] return _deserialized diff --git a/sdk/agentserver/azure-ai-agentserver-core/cspell.json b/sdk/agentserver/azure-ai-agentserver-core/cspell.json index 126cadc0625c..17fb91b1e58f 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/cspell.json +++ b/sdk/agentserver/azure-ai-agentserver-core/cspell.json @@ -16,7 +16,9 @@ "GETFL", "DETFL", "SETFL", - "Planifica" + "Planifica", + "mcptools", + "ainvoke" ], "ignorePaths": [ "*.csv", diff --git a/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_with_tools_test.py b/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_with_tools_test.py new file mode 100644 index 000000000000..52648465e151 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_with_tools_test.py @@ -0,0 +1,108 @@ +# mypy: ignore-errors +import datetime + +from azure.ai.agentserver.core import AgentRunContext, FoundryCBAgent +from azure.ai.agentserver.core.models import Response as OpenAIResponse +from azure.ai.agentserver.core.models.projects import ( + ItemContentOutputText, + ResponseCompletedEvent, + ResponseCreatedEvent, + ResponseOutputItemAddedEvent, + ResponsesAssistantMessageItemResource, + ResponseTextDeltaEvent, + ResponseTextDoneEvent, +) + +from azure.identity import DefaultAzureCredential + +def stream_events(text: str, context: AgentRunContext): + item_id = context.id_generator.generate_message_id() + + assembled = "" + yield ResponseCreatedEvent(response=OpenAIResponse(output=[])) + yield ResponseOutputItemAddedEvent( + output_index=0, + item=ResponsesAssistantMessageItemResource( + id=item_id, + status="in_progress", + content=[ + ItemContentOutputText( + text="", + annotations=[], + ) + ], + ), + ) + for i, token in enumerate(text.split(" ")): + piece = token if i == len(text.split(" ")) - 1 else token + " " + assembled += piece + yield ResponseTextDeltaEvent(output_index=0, content_index=0, delta=piece) + # Done with text + yield ResponseTextDoneEvent(output_index=0, content_index=0, text=assembled) + yield ResponseCompletedEvent( + response=OpenAIResponse( + metadata={}, + temperature=0.0, + top_p=0.0, + user="me", + id=context.response_id, + created_at=datetime.datetime.now(), + output=[ + ResponsesAssistantMessageItemResource( + id=item_id, + status="completed", + content=[ + ItemContentOutputText( + text=assembled, + annotations=[], + ) + ], + ) + ], + ) + ) + + +async def agent_run(context: AgentRunContext): + agent = context.request.get("agent") + print(f"agent:{agent}") + + if context.stream: + return stream_events( + "I am mock agent with no intelligence in stream mode.", context + ) + + tool = await my_agent.get_tool_client().list_tools() + tool_list = [t.name for t in tool] + # Build assistant output content + output_content = [ + ItemContentOutputText( + text="I am mock agent with no intelligence with tools " + str(tool_list), + annotations=[], + ) + ] + my_agent.get_tool_client() # just to illustrate we can access tool client from context + response = OpenAIResponse( + metadata={}, + temperature=0.0, + top_p=0.0, + user="me", + id=context.response_id, + created_at=datetime.datetime.now(), + output=[ + ResponsesAssistantMessageItemResource( + id=context.id_generator.generate_message_id(), + status="completed", + content=output_content, + ) + ], + ) + return response + +credentials = DefaultAzureCredential() + +my_agent = FoundryCBAgent(credentials=credentials) +my_agent.agent_run = agent_run + +if __name__ == "__main__": + my_agent.run() diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py index ed2e0d4d493a..06cfe3bd8489 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py @@ -3,19 +3,22 @@ # --------------------------------------------------------- __path__ = __import__("pkgutil").extend_path(__path__, __name__) -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Optional, Any from ._version import VERSION if TYPE_CHECKING: # pragma: no cover from . import models + from azure.core.credentials_async import AsyncTokenCredential -def from_langgraph(agent, state_converter: Optional["models.LanggraphStateConverter"] = None): +def from_langgraph(agent, credentials: Optional["AsyncTokenCredential"] = None, state_converter: Optional["models.LanggraphStateConverter"] = None, **kwargs: Any) -> "LangGraphAdapter": from .langgraph import LangGraphAdapter - return LangGraphAdapter(agent, state_converter=state_converter) + return LangGraphAdapter(agent, credentials=credentials, state_converter=state_converter, **kwargs) +from .tool_client import ToolClient -__all__ = ["from_langgraph"] + +__all__ = ["from_langgraph", "ToolClient"] __version__ = VERSION diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index 0d2b60bac248..27b302e29a18 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -5,7 +5,7 @@ # mypy: disable-error-code="assignment,arg-type" import os import re -from typing import Optional +from typing import TYPE_CHECKING, Any, Awaitable, Protocol, Union, Optional, List from langchain_core.runnables import RunnableConfig from langgraph.graph.state import CompiledStateGraph @@ -20,42 +20,177 @@ LanggraphStateConverter, ) from .models.utils import is_state_schema_valid +from .tool_client import ToolClient +from langchain_core.tools import StructuredTool + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + logger = get_logger() +class GraphFactory(Protocol): + """Protocol for graph factory functions. + + A graph factory is a callable that takes a ToolClient and returns + a CompiledStateGraph, either synchronously or asynchronously. + """ + + def __call__(self, tools: List[StructuredTool]) -> Union[CompiledStateGraph, Awaitable[CompiledStateGraph]]: + """Create a CompiledStateGraph using the provided ToolClient. + + :param tools: The list of StructuredTool instances. + :type tools: List[StructuredTool] + :return: A compiled LangGraph state graph, or an awaitable that resolves to one. + :rtype: Union[CompiledStateGraph, Awaitable[CompiledStateGraph]] + """ + ... + + class LangGraphAdapter(FoundryCBAgent): """ Adapter for LangGraph Agent. """ - def __init__(self, graph: CompiledStateGraph, state_converter: Optional[LanggraphStateConverter] = None): + def __init__(self, graph: Union[CompiledStateGraph, GraphFactory], credentials: "Optional[AsyncTokenCredential]" = None, state_converter: "Optional[LanggraphStateConverter]" = None, **kwargs: Any) -> None: """ - Initialize the LangGraphAdapter with a CompiledStateGraph. + Initialize the LangGraphAdapter with a CompiledStateGraph or a function that returns one. - :param graph: The LangGraph StateGraph to adapt. - :type graph: CompiledStateGraph + :param graph: The LangGraph StateGraph to adapt, or a callable that takes ToolClient and returns CompiledStateGraph (sync or async). + :type graph: Union[CompiledStateGraph, GraphFactory] + :param credentials: Azure credentials for authentication. + :type credentials: Optional[AsyncTokenCredential] :param state_converter: custom state converter. Required if graph state is not MessagesState. :type state_converter: Optional[LanggraphStateConverter] """ - super().__init__() - self.graph = graph + super().__init__(credentials=credentials, **kwargs) + self._graph_or_factory: Union[CompiledStateGraph, GraphFactory] = graph + self._resolved_graph: "Optional[CompiledStateGraph]" = None self.azure_ai_tracer = None - if not state_converter: - if is_state_schema_valid(self.graph.builder.state_schema): - self.state_converter = LanggraphMessageStateConverter() + + # If graph is already compiled, validate and set up state converter + if isinstance(graph, CompiledStateGraph): + self._resolved_graph = graph + if not state_converter: + if is_state_schema_valid(self._resolved_graph.builder.state_schema): + self.state_converter = LanggraphMessageStateConverter() + else: + raise ValueError("state_converter is required for non-MessagesState graph.") else: - raise ValueError("state_converter is required for non-MessagesState graph.") + self.state_converter = state_converter else: + # Defer validation until graph is resolved self.state_converter = state_converter + @property + def graph(self) -> "Optional[CompiledStateGraph]": + """ + Get the resolved graph. This property provides backward compatibility. + + :return: The resolved CompiledStateGraph if available, None otherwise. + :rtype: Optional[CompiledStateGraph] + """ + return self._resolved_graph + async def agent_run(self, context: AgentRunContext): - input_data = self.state_converter.request_to_state(context) - logger.debug(f"Converted input data: {input_data}") - if not context.stream: - response = await self.agent_run_non_stream(input_data, context) - return response - return self.agent_run_astream(input_data, context) + # Resolve graph - always resolve if it's a factory function to get fresh graph each time + # For factories, get a new graph instance per request to avoid concurrency issues + tool_client = None + try: + if callable(self._graph_or_factory): + graph, tool_client = await self._resolve_graph_for_request(context) + elif self._resolved_graph is None: + await self._resolve_graph(context) + graph = self._resolved_graph + else: + graph = self._resolved_graph + + input_data = self.state_converter.request_to_state(context) + logger.debug(f"Converted input data: {input_data}") + if not context.stream: + response = await self.agent_run_non_stream(input_data, context, graph) + return response + return self.agent_run_astream(input_data, context, graph, tool_client) + finally: + # Close tool_client if it was created for this request + if tool_client is not None: + try: + await tool_client.close() + logger.debug("Closed tool_client after request processing") + except Exception as e: + logger.warning(f"Error closing tool_client: {e}") + + async def _resolve_graph(self, context: AgentRunContext): + """ + Resolve the graph if it's a factory function (for single-use/first-time resolution). + Creates a ToolClient and calls the factory function with it. + This is used for the initial resolution to set up state_converter. + """ + if callable(self._graph_or_factory): + logger.debug("Resolving graph from factory function") + + + # Create ToolClient with credentials + tool_client = self.get_tool_client(tools = context.get_tools(), user_info = context.get_user_info()) + tool_client_wrapper = ToolClient(tool_client) + tools = await tool_client_wrapper.list_tools() + # Call the factory function with ToolClient + # Support both sync and async factories + import inspect + result = self._graph_or_factory(tools) + if inspect.iscoroutine(result): + self._resolved_graph = await result + else: + self._resolved_graph = result + + # Validate and set up state converter if not already set from initialization + if not self.state_converter: + if is_state_schema_valid(self._resolved_graph.builder.state_schema): + self.state_converter = LanggraphMessageStateConverter() + else: + raise ValueError("state_converter is required for non-MessagesState graph.") + + logger.debug("Graph resolved successfully") + else: + # Should not reach here, but just in case + self._resolved_graph = self._graph_or_factory + + async def _resolve_graph_for_request(self, context: AgentRunContext): + """ + Resolve a fresh graph instance for a single request to avoid concurrency issues. + Creates a ToolClient and calls the factory function with it. + This method returns a new graph instance and the tool_client for cleanup. + + :param context: The context for the agent run. + :type context: AgentRunContext + :return: A tuple of (compiled graph instance, tool_client wrapper). + :rtype: tuple[CompiledStateGraph, ToolClient] + """ + logger.debug("Resolving fresh graph from factory function for request") + + # Create ToolClient with credentials + tool_client = self.get_tool_client(tools = context.get_tools(), user_info = context.get_user_info()) + tool_client_wrapper = ToolClient(tool_client) + tools = await tool_client_wrapper.list_tools() + # Call the factory function with ToolClient + # Support both sync and async factories + import inspect + result = self._graph_or_factory(tools) + if inspect.iscoroutine(result): + graph = await result + else: + graph = result + + # Ensure state converter is set up (use existing one or create new) + if not self.state_converter: + if is_state_schema_valid(graph.builder.state_schema): + self.state_converter = LanggraphMessageStateConverter() + else: + raise ValueError("state_converter is required for non-MessagesState graph.") + + logger.debug("Fresh graph resolved successfully for request") + return graph, tool_client_wrapper def init_tracing_internal(self, exporter_endpoint=None, app_insights_conn_str=None): # set env vars for langsmith @@ -85,7 +220,7 @@ def get_trace_attributes(self): attrs["service.namespace"] = "azure.ai.agentserver.langgraph" return attrs - async def agent_run_non_stream(self, input_data: dict, context: AgentRunContext): + async def agent_run_non_stream(self, input_data: dict, context: AgentRunContext, graph: CompiledStateGraph): """ Run the agent with non-streaming response. @@ -93,6 +228,8 @@ async def agent_run_non_stream(self, input_data: dict, context: AgentRunContext) :type input_data: dict :param context: The context for the agent run. :type context: AgentRunContext + :param graph: The compiled graph instance to use for this request. + :type graph: CompiledStateGraph :return: The response of the agent run. :rtype: dict @@ -101,14 +238,14 @@ async def agent_run_non_stream(self, input_data: dict, context: AgentRunContext) try: config = self.create_runnable_config(context) stream_mode = self.state_converter.get_stream_mode(context) - result = await self.graph.ainvoke(input_data, config=config, stream_mode=stream_mode) + result = await graph.ainvoke(input_data, config=config, stream_mode=stream_mode) output = self.state_converter.state_to_response(result, context) return output except Exception as e: logger.error(f"Error during agent run: {e}") raise e - async def agent_run_astream(self, input_data: dict, context: AgentRunContext): + async def agent_run_astream(self, input_data: dict, context: AgentRunContext, graph: CompiledStateGraph, tool_client: "Optional[ToolClient]" = None): """ Run the agent with streaming response. @@ -116,6 +253,10 @@ async def agent_run_astream(self, input_data: dict, context: AgentRunContext): :type input_data: dict :param context: The context for the agent run. :type context: AgentRunContext + :param graph: The compiled graph instance to use for this request. + :type graph: CompiledStateGraph + :param tool_client: Optional ToolClient to close after streaming completes. + :type tool_client: Optional[ToolClient] :return: An async generator yielding the response stream events. :rtype: AsyncGenerator[dict] @@ -124,12 +265,20 @@ async def agent_run_astream(self, input_data: dict, context: AgentRunContext): logger.info(f"Starting streaming agent run {context.response_id}") config = self.create_runnable_config(context) stream_mode = self.state_converter.get_stream_mode(context) - stream = self.graph.astream(input=input_data, config=config, stream_mode=stream_mode) + stream = graph.astream(input=input_data, config=config, stream_mode=stream_mode) async for result in self.state_converter.state_to_response_stream(stream, context): yield result except Exception as e: logger.error(f"Error during streaming agent run: {e}") raise e + finally: + # Close tool_client if provided + if tool_client is not None: + try: + await tool_client._tool_client.close() + logger.debug("Closed tool_client after streaming completed") + except Exception as e: + logger.warning(f"Error closing tool_client in stream: {e}") def create_runnable_config(self, context: AgentRunContext) -> RunnableConfig: """ diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py new file mode 100644 index 000000000000..5a5b75c13a03 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py @@ -0,0 +1,211 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +"""Tool client for integrating AzureAIToolClient with LangGraph.""" + +from typing import TYPE_CHECKING, Any, Dict, List, Optional + +from langchain_core.tools import StructuredTool +from pydantic import BaseModel, Field, create_model + +if TYPE_CHECKING: + from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient, FoundryTool + + +class ToolClient: + """Client that integrates AzureAIToolClient with LangGraph. + + This class provides methods to list tools from AzureAIToolClient and convert them + to LangChain BaseTool format, as well as invoke tools in a format compatible with + LangGraph's create_react_agent and StateGraph. + + :param tool_client: The AzureAIToolClient instance to use for tool operations. + :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient + + .. admonition:: Example: + + .. code-block:: python + + from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient + from azure.ai.agentserver.langgraph import ToolClient + from azure.identity.aio import DefaultAzureCredential + + async with DefaultAzureCredential() as credential: + tool_client = AzureAIToolClient( + endpoint="https://", + credential=credential + ) + + client = ToolClient(tool_client) + + # List tools as LangChain BaseTool instances + tools = await client.list_tools() + + # Use with create_react_agent + from langgraph.prebuilt import create_react_agent + from langchain_openai import AzureChatOpenAI + + model = AzureChatOpenAI(model="gpt-4o") + agent = create_react_agent(model, tools) + + # Invoke a tool directly + result = await client.invoke_tool( + tool_name="my_tool", + tool_input={"param": "value"} + ) + + :meta private: + """ + + def __init__(self, tool_client: "AzureAIToolClient") -> None: + """Initialize the ToolClient. + + :param tool_client: The AzureAIToolClient instance to use for tool operations. + :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient + """ + self._tool_client = tool_client + self._langchain_tools_cache: List[StructuredTool] = None + + async def list_tools(self) -> List[StructuredTool]: + """List all available tools as LangChain BaseTool instances. + + Retrieves tools from AzureAIToolClient and converts them to LangChain + StructuredTool instances that can be used with LangGraph's create_react_agent + or StateGraph. + + :return: List of LangChain StructuredTool instances. + :rtype: List[~langchain_core.tools.StructuredTool] + :raises ~azure.core.exceptions.HttpResponseError: + Raised for HTTP communication failures. + + .. admonition:: Example: + + .. code-block:: python + + client = ToolClient(tool_client) + tools = await client.list_tools() + + # Use with create_react_agent + agent = create_react_agent(model, tools) + """ + # Get tools from AzureAIToolClient + if self._langchain_tools_cache is not None: + return self._langchain_tools_cache + + azure_tools = await self._tool_client.list_tools() + self._langchain_tools_cache = [] + # Convert to LangChain StructuredTool instances + for azure_tool in azure_tools: + langchain_tool = self._convert_to_langchain_tool(azure_tool) + self._langchain_tools_cache.append(langchain_tool) + + return self._langchain_tools_cache + + def _convert_to_langchain_tool(self, azure_tool: "FoundryTool") -> StructuredTool: + """Convert an AzureAITool to a LangChain StructuredTool. + + :param azure_tool: The AzureAITool to convert. + :type azure_tool: ~azure.ai.agentserver.core.client.tools.aio.AzureAITool + :return: A LangChain StructuredTool instance. + :rtype: ~langchain_core.tools.StructuredTool + """ + # Get the input schema from the tool descriptor + input_schema = azure_tool.input_schema or {} + + # Create a Pydantic model for the tool's input schema + args_schema = self._create_pydantic_model( + tool_name=azure_tool.name, + schema=input_schema + ) + + # Create an async function that invokes the tool + async def tool_func(**kwargs: Any) -> str: + """Invoke the Azure AI tool.""" + result = await azure_tool(**kwargs) + # Convert result to string for LangChain compatibility + if isinstance(result, dict): + import json + return json.dumps(result) + return str(result) + + # Create a StructuredTool with the async function + structured_tool = StructuredTool( + name=azure_tool.name, + description=azure_tool.description or "No description available", + coroutine=tool_func, + args_schema=args_schema, + ) + + return structured_tool + + def _create_pydantic_model( + self, + tool_name: str, + schema: Dict[str, Any] + ) -> type[BaseModel]: + """Create a Pydantic model from a JSON schema. + + :param tool_name: Name of the tool (used for model name). + :type tool_name: str + :param schema: JSON schema for the tool's input parameters. + :type schema: Dict[str, Any] + :return: A Pydantic model class. + :rtype: type[BaseModel] + """ + # Get properties from schema + properties = schema.get("properties", {}) + required_fields = schema.get("required", []) + + # Build field definitions for Pydantic model + field_definitions = {} + for prop_name, prop_schema in properties.items(): + prop_type = self._json_type_to_python_type(prop_schema.get("type", "string")) + prop_description = prop_schema.get("description", "") + + # Determine if field is required + is_required = prop_name in required_fields + + if is_required: + field_definitions[prop_name] = ( + prop_type, + Field(..., description=prop_description) + ) + else: + field_definitions[prop_name] = ( + Optional[prop_type], + Field(None, description=prop_description) + ) + + # Create the model dynamically + model_name = f"{tool_name.replace('-', '_').replace(' ', '_').title()}Input" + return create_model(model_name, **field_definitions) + + def _json_type_to_python_type(self, json_type: str) -> type: + """Convert JSON schema type to Python type. + + :param json_type: JSON schema type string. + :type json_type: str + :return: Corresponding Python type. + :rtype: type + """ + type_mapping = { + "string": str, + "integer": int, + "number": float, + "boolean": bool, + "array": list, + "object": dict, + } + return type_mapping.get(json_type, str) + + async def close(self) -> None: + await self._tool_client.close() + + async def __aenter__(self) -> "ToolClient": + """Async context manager entry.""" + return self + + async def __aexit__(self, *exc_details: Any) -> None: + """Async context manager exit.""" + # The tool_client lifecycle is managed externally + pass diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/cspell.json b/sdk/agentserver/azure-ai-agentserver-langgraph/cspell.json index 470408fb66cc..1ea68a37f8d5 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/cspell.json +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/cspell.json @@ -5,7 +5,8 @@ "mslearn", "envtemplate", "ainvoke", - "asetup" + "asetup", + "mcptools" ], "ignorePaths": [ "*.csv", diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/graph_factory_example.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/graph_factory_example.py new file mode 100644 index 000000000000..4b95f4d98801 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/graph_factory_example.py @@ -0,0 +1,128 @@ +# Copyright (c) Microsoft. All rights reserved. +"""Example showing how to use a graph factory function with ToolClient. + +This sample demonstrates how to pass a factory function to LangGraphAdapter +that receives a ToolClient and returns a CompiledStateGraph. This pattern +allows the graph to be created dynamically with access to tools from +Azure AI Tool Client at runtime. +""" + +import asyncio +import os +from typing import List +from dotenv import load_dotenv +from importlib.metadata import version +from langchain_openai import AzureChatOpenAI +from langgraph.checkpoint.memory import MemorySaver +from langgraph.graph.state import CompiledStateGraph +from langchain_core.tools import StructuredTool + +from azure.ai.agentserver.langgraph import from_langgraph +from azure.identity.aio import DefaultAzureCredential + +load_dotenv() + + +def create_agent(model, tools, checkpointer): + """Create a LangGraph agent based on the version.""" + # for different langgraph versions + langgraph_version = version("langgraph") + if langgraph_version < "1.0.0": + from langgraph.prebuilt import create_react_agent + + return create_react_agent(model, tools, checkpointer=checkpointer) + else: + from langchain.agents import create_agent + + return create_agent(model, tools, checkpointer=checkpointer) + + +def create_graph_factory(): + """Create a factory function that builds a graph with ToolClient. + + This function returns a factory that takes a ToolClient and returns + a CompiledStateGraph. The graph is created at runtime for every request, + allowing it to access the latest tool configuration dynamically. + """ + + async def graph_factory(tools: List[StructuredTool]) -> CompiledStateGraph: + """Factory function that creates a graph using the provided tools. + + :param tools: The list of StructuredTool instances. + :type tools: List[StructuredTool] + :return: A compiled LangGraph state graph. + :rtype: CompiledStateGraph + """ + # Get configuration from environment + deployment_name = os.getenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", "gpt-4o") + + # List all available tools from the ToolClient + print(f"Found {len(tools)} tools:") + for tool in tools: + print(f" - {tool.name}: {tool.description}") + + if not tools: + print("\nNo tools found!") + print("Make sure your Azure AI project has tools configured.") + raise ValueError("No tools available to create agent") + + # Create the language model + model = AzureChatOpenAI(model=deployment_name) + + # Create a memory checkpointer for conversation history + memory = MemorySaver() + + # Create the LangGraph agent with the tools + print("\nCreating LangGraph agent with tools from factory...") + agent = create_agent(model, tools, memory) + + print("Agent created successfully!") + return agent + + return graph_factory + + +async def quickstart(): + """Build and return a LangGraphAdapter using a graph factory function.""" + + # Get configuration from environment + project_endpoint = os.getenv("AZURE_AI_PROJECT_ENDPOINT") + + if not project_endpoint: + raise ValueError( + "AZURE_AI_PROJECT_ENDPOINT environment variable is required. " + "Set it to your Azure AI project endpoint, e.g., " + "https://.services.ai.azure.com/api/projects/" + ) + + # Create Azure credentials + credential = DefaultAzureCredential() + + # Create a factory function that will build the graph at runtime + # The factory will receive a ToolClient when the agent first runs + graph_factory = create_graph_factory() + + # Pass the factory function to from_langgraph instead of a compiled graph + # The graph will be created on every agent run with access to ToolClient + print("Creating LangGraph adapter with factory function...") + # Get project connection ID from environment + tool_connection_id = os.getenv("AZURE_AI_PROJECT_TOOL_CONNECTION_ID") + + adapter = from_langgraph(graph_factory, credentials=credential, tools=[{"type": "mcp", "project_connection_id": tool_connection_id}]) + + print("Adapter created! Graph will be built on every request.") + return adapter + + +async def main(): # pragma: no cover - sample entrypoint + """Main function to run the agent.""" + adapter = await quickstart() + + if adapter: + print("\nStarting agent server...") + print("The graph factory will be called for every request that arrives.") + await adapter.run_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py new file mode 100644 index 000000000000..f77a0b31b1d5 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py @@ -0,0 +1,109 @@ +# Copyright (c) Microsoft. All rights reserved. +"""Enhanced MCP example using ToolClient with AzureAIToolClient. + +This sample demonstrates how to use the ToolClient to integrate Azure AI +Tool Client (which supports both MCP tools and Azure AI Tools API) with +LangGraph's create_react_agent. +""" + +import asyncio +import os + +from dotenv import load_dotenv +from importlib.metadata import version +from langchain_openai import AzureChatOpenAI +from langgraph.checkpoint.memory import MemorySaver + +from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient +from azure.ai.agentserver.langgraph import ToolClient, from_langgraph +from azure.identity.aio import DefaultAzureCredential + +load_dotenv() + + +def create_agent(model, tools, checkpointer): + """Create a LangGraph agent based on available imports.""" + try: + from langgraph.prebuilt import create_react_agent + return create_react_agent(model, tools, checkpointer=checkpointer) + except ImportError: + from langchain.agents import create_agent + return create_agent(model, tools, checkpointer=checkpointer) + + +async def quickstart(): + """Build and return a LangGraph agent wired to Azure AI Tool Client.""" + + # Get configuration from environment + project_endpoint = os.getenv("AZURE_AI_PROJECT_ENDPOINT") + deployment_name = os.getenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME", "gpt-4o") + + if not project_endpoint: + raise ValueError( + "AZURE_AI_PROJECT_ENDPOINT environment variable is required. " + "Set it to your Azure AI project endpoint, e.g., " + "https://.services.ai.azure.com/api/projects/" + ) + + # Create Azure credentials + credential = DefaultAzureCredential() + tool_definitions = [ + { + "type": "mcp", + "project_connection_id": "" + }, + { + "type": "code_interpreter", + } + ] + # Create the AzureAIToolClient + # This client supports both MCP tools and Azure AI Tools API + tool_client = AzureAIToolClient( + endpoint=project_endpoint, + credential=credential, + tools=tool_definitions + ) + + # Create the ToolClient + client = ToolClient(tool_client) + + # List all available tools and convert to LangChain format + print("Fetching tools from Azure AI Tool Client...") + tools = await client.list_tools() + print(f"Found {len(tools)} tools:") + for tool in tools: + print(f" - {tool.name}: {tool.description}") + + if not tools: + print("\nNo tools found!") + print("Make sure your Azure AI project has tools configured.") + print("This can include:") + print(" - MCP (Model Context Protocol) servers") + print(" - Foundry AI Tools") + return None + + # Create the language model + model = AzureChatOpenAI(model=deployment_name) + + # Create a memory checkpointer for conversation history + memory = MemorySaver() + + # Create the LangGraph agent with the tools + print("\nCreating LangGraph agent with tools...") + agent = create_agent(model, tools, memory) + + print("Agent created successfully!") + return agent + + +async def main(): # pragma: no cover - sample entrypoint + """Main function to run the agent.""" + agent = await quickstart() + + if agent: + print("\nStarting agent server...") + await from_langgraph(agent).run_async() + + +if __name__ == "__main__": + asyncio.run(main()) From b7d7bea2296a0009dda63237f836ecbbcd246a89 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Wed, 12 Nov 2025 23:33:30 -0800 Subject: [PATCH 12/35] update changelog and version --- .../azure-ai-agentserver-agentframework/CHANGELOG.md | 11 +++++++++++ .../azure/ai/agentserver/agentframework/_version.py | 2 +- .../azure-ai-agentserver-core/CHANGELOG.md | 11 +++++++++++ .../azure/ai/agentserver/core/_version.py | 2 +- .../azure-ai-agentserver-langgraph/CHANGELOG.md | 11 +++++++++++ .../azure/ai/agentserver/langgraph/_version.py | 2 +- 6 files changed, 36 insertions(+), 3 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index c22ea4418361..15d90e5660ab 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -1,6 +1,17 @@ # Release History +## 1.0.0b4 (2025-11-13) + +### Feature Added + +- Adapters support tools + +### Bugs Fixed + +- Pin azure-ai-projects and azure-ai-agents version to avoid version confliction + + ## 1.0.0b3 (2025-11-11) ### Bugs Fixed diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py index c43fdbc2e239..22553b18fb7e 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b3" +VERSION = "1.0.0b4" diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index c22ea4418361..15d90e5660ab 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -1,6 +1,17 @@ # Release History +## 1.0.0b4 (2025-11-13) + +### Feature Added + +- Adapters support tools + +### Bugs Fixed + +- Pin azure-ai-projects and azure-ai-agents version to avoid version confliction + + ## 1.0.0b3 (2025-11-11) ### Bugs Fixed diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py index c43fdbc2e239..22553b18fb7e 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b3" +VERSION = "1.0.0b4" diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index c22ea4418361..15d90e5660ab 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -1,6 +1,17 @@ # Release History +## 1.0.0b4 (2025-11-13) + +### Feature Added + +- Adapters support tools + +### Bugs Fixed + +- Pin azure-ai-projects and azure-ai-agents version to avoid version confliction + + ## 1.0.0b3 (2025-11-11) ### Bugs Fixed diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py index c43fdbc2e239..22553b18fb7e 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b3" +VERSION = "1.0.0b4" From 52a9256153a65007fdeb83f853a9b1c83ce040f9 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Thu, 13 Nov 2025 00:13:25 -0800 Subject: [PATCH 13/35] fix cspell --- .../azure-ai-agentserver-agentframework/cspell.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/cspell.json b/sdk/agentserver/azure-ai-agentserver-agentframework/cspell.json index 116acbc87af3..48c11927e406 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/cspell.json +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/cspell.json @@ -5,7 +5,8 @@ "mslearn", "envtemplate", "pysort", - "redef" + "redef", + "aifunction" ], "ignorePaths": [ "*.csv", From ba4e1fccd8354a7e51565a8f206a5cd43f112c32 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Thu, 13 Nov 2025 01:22:14 -0800 Subject: [PATCH 14/35] fix pylint and mypy for -core --- .../core/client/tools/_utils/_model_base.py | 1 + .../ai/agentserver/core/client/tools/aio/_client.py | 3 +++ .../core/client/tools/aio/operations/_operations.py | 5 ++++- .../core/client/tools/operations/_operations.py | 1 + .../azure/ai/agentserver/core/server/base.py | 7 +++++-- .../core/server/common/agent_run_context.py | 10 +++++----- 6 files changed, 19 insertions(+), 8 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py index d68c2ae28744..1bbdb6e4172c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py @@ -2,6 +2,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +# mypy: ignore-errors from dataclasses import dataclass, asdict, is_dataclass from typing import Any, Dict, Iterable, List, Mapping, MutableMapping, Optional, Set, Tuple diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py index 93f550448b5a..8fd092bab5f1 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py @@ -1,3 +1,6 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- from typing import Any, List, Mapping, Union, TYPE_CHECKING diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py index f99646d5fb8b..e55be880fb6a 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py @@ -1,4 +1,7 @@ - +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +# mypy: ignore-errors import json from typing import Any, Dict, List, Mapping, MutableMapping diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py index e05e1e84e708..9f6e0eb20e8c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py @@ -1,6 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +# mypy: ignore-errors import json import logging diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index cd0d7ed75896..e1d2531ea22a 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -2,6 +2,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- # pylint: disable=broad-exception-caught,unused-argument,logging-fstring-interpolation,too-many-statements,too-many-return-statements +# mypy: disable-error-code="name-defined,annotation-unchecked,arg-type" import inspect import json import os @@ -333,7 +334,9 @@ def setup_otlp_exporter(self, endpoint, provider): provider.add_span_processor(processor) logger.info(f"Tracing setup with OTLP exporter: {endpoint}") - def get_tool_client(self, tools: Optional[list[ToolDefinition]], user_info: Optional[UserInfo]) -> AzureAIToolClient: + def get_tool_client( + self, tools: Optional[list[ToolDefinition]], user_info: Optional[UserInfo] + ) -> AzureAIToolClient: if not self.credentials: raise ValueError("Credentials are required to create Tool Client.") return AzureAIToolClient( @@ -343,7 +346,7 @@ def get_tool_client(self, tools: Optional[list[ToolDefinition]], user_info: Opti user = user_info, ) - + def _event_to_sse_chunk(event: ResponseStreamEvent) -> str: event_data = json.dumps(event.as_dict()) if event.type: diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py index 89def295ef0c..5188476b8339 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py @@ -7,7 +7,6 @@ from ...models.projects import AgentId, AgentReference, ResponseConversation1 from .id_generator.foundry_id_generator import FoundryIdGenerator from .id_generator.id_generator import IdGenerator -from ...client.tools.aio._client import AzureAIToolClient logger = get_logger() @@ -69,21 +68,22 @@ def get_tools(self) -> list: request_tools = self.request.get("tools", []) if not request_tools: return self._agent_tools - return request_tools - + def get_user_info(self) -> dict: return self._user_info + + def _deserialize_create_response(payload: dict) -> CreateResponse: _deserialized = CreateResponse(**payload) raw_agent_reference = payload.get("agent") if raw_agent_reference: _deserialized["agent"] = _deserialize_agent_reference(raw_agent_reference) - + tools = payload.get("tools") if tools: - _deserialized["tools"] = [tool for tool in tools] + _deserialized["tools"] = [tool for tool in tools] # pylint: disable=unnecessary-comprehension return _deserialized From 661ecb31b4b4fe2f5e115dd61ef7ba715cd55336 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Thu, 13 Nov 2025 10:00:47 -0800 Subject: [PATCH 15/35] fix agents sdk version --- sdk/agentserver/azure-ai-agentserver-core/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml index 1c6c37e19e23..ad882b2ab596 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml @@ -21,7 +21,7 @@ keywords = ["azure", "azure sdk"] dependencies = [ "azure-monitor-opentelemetry>=1.5.0", "azure-ai-projects==1.1.0b4", - "azure-ai-agents==1.2.0b6", + "azure-ai-agents==1.2.0b5", "azure-core>=1.35.0", "azure-identity", "openai>=1.80.0", From 8ba9f1b5ec11daa4206e2b51d4604f06a680ee94 Mon Sep 17 00:00:00 2001 From: Ganesh Bheemarasetty <1634042+ganeshyb@users.noreply.github.com> Date: Thu, 13 Nov 2025 13:05:54 -0800 Subject: [PATCH 16/35] pylint fixes (#44010) --- .../ai/agentserver/agentframework/__init__.py | 10 +-- .../agentframework/agent_framework.py | 43 +++++++------ .../agentserver/agentframework/tool_client.py | 60 +++++++++++------- .../core/client/tools/_utils/_model_base.py | 5 +- .../client/tools/operations/_operations.py | 8 +-- .../azure/ai/agentserver/core/server/base.py | 1 + .../ai/agentserver/langgraph/__init__.py | 10 ++- .../ai/agentserver/langgraph/langgraph.py | 50 +++++++++------ .../ai/agentserver/langgraph/tool_client.py | 61 +++++++++++-------- 9 files changed, 151 insertions(+), 97 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py index aa03a264339c..2b987cdcf3f5 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/__init__.py @@ -5,20 +5,20 @@ from typing import TYPE_CHECKING, Optional, Any -from ._version import VERSION from .agent_framework import AgentFrameworkCBAgent +from .tool_client import ToolClient +from ._version import VERSION if TYPE_CHECKING: # pragma: no cover from azure.core.credentials_async import AsyncTokenCredential -def from_agent_framework(agent, credentials: Optional["AsyncTokenCredential"] = None, **kwargs: Any) -> "AgentFrameworkCBAgent": - from .agent_framework import AgentFrameworkCBAgent +def from_agent_framework(agent, + credentials: Optional["AsyncTokenCredential"] = None, + **kwargs: Any) -> "AgentFrameworkCBAgent": return AgentFrameworkCBAgent(agent, credentials=credentials, **kwargs) -from .tool_client import ToolClient - __all__ = ["from_agent_framework", "ToolClient"] __version__ = VERSION diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 50cb09fd66f7..77f3a4b1ce85 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -9,7 +9,7 @@ from typing import TYPE_CHECKING, Any, AsyncGenerator, Awaitable, Optional, Protocol, Union, List import inspect -from agent_framework import AgentProtocol +from agent_framework import AgentProtocol, AIFunction from agent_framework.azure import AzureAIAgentClient # pylint: disable=no-name-in-module from opentelemetry import trace @@ -28,7 +28,6 @@ from .models.agent_framework_output_non_streaming_converter import ( AgentFrameworkOutputNonStreamingConverter, ) -from agent_framework import AIFunction from .models.agent_framework_output_streaming_converter import AgentFrameworkOutputStreamingConverter from .models.constants import Constants from .tool_client import ToolClient @@ -45,7 +44,7 @@ class AgentFactory(Protocol): An agent factory is a callable that takes a ToolClient and returns an AgentProtocol, either synchronously or asynchronously. """ - + def __call__(self, tools: List[AIFunction]) -> Union[AgentProtocol, Awaitable[AgentProtocol]]: """Create an AgentProtocol using the provided ToolClient. @@ -74,18 +73,20 @@ class AgentFrameworkCBAgent(FoundryCBAgent): - Supports both streaming and non-streaming responses based on the `stream` flag. """ - def __init__(self, agent: Union[AgentProtocol, AgentFactory], credentials: "Optional[AsyncTokenCredential]" = None, **kwargs: Any): + def __init__(self, agent: Union[AgentProtocol, AgentFactory], + credentials: "Optional[AsyncTokenCredential]" = None, + **kwargs: Any): """Initialize the AgentFrameworkCBAgent with an AgentProtocol or a factory function. - :param agent: The Agent Framework agent to adapt, or a callable that takes ToolClient and returns AgentProtocol (sync or async). + :param agent: The Agent Framework agent to adapt, or a callable that takes ToolClient + and returns AgentProtocol (sync or async). :type agent: Union[AgentProtocol, AgentFactory] :param credentials: Azure credentials for authentication. :type credentials: Optional[AsyncTokenCredential] """ - super().__init__(credentials=credentials, **kwargs) + super().__init__(credentials=credentials, **kwargs) # pylint: disable=unexpected-keyword-arg self._agent_or_factory: Union[AgentProtocol, AgentFactory] = agent self._resolved_agent: "Optional[AgentProtocol]" = None - # If agent is already instantiated, use it directly if isinstance(agent, AgentProtocol): self._resolved_agent = agent @@ -126,21 +127,24 @@ async def _resolve_agent(self, context: AgentRunContext): """Resolve the agent if it's a factory function (for single-use/first-time resolution). Creates a ToolClient and calls the factory function with it. This is used for the initial resolution. + + :param context: The agent run context containing tools and user information. + :type context: AgentRunContext """ if callable(self._agent_or_factory): logger.debug("Resolving agent from factory function") - + # Create ToolClient with credentials - tool_client = self.get_tool_client(tools=context.get_tools(), user_info=context.get_user_info()) + tool_client = self.get_tool_client(tools=context.get_tools(), user_info=context.get_user_info()) # pylint: disable=no-member tool_client_wrapper = ToolClient(tool_client) tools = await tool_client_wrapper.list_tools() - + result = self._agent_or_factory(tools) if inspect.iscoroutine(result): self._resolved_agent = await result else: self._resolved_agent = result - + logger.debug("Agent resolved successfully") else: # Should not reach here, but just in case @@ -149,19 +153,18 @@ async def _resolve_agent(self, context: AgentRunContext): async def _resolve_agent_for_request(self, context: AgentRunContext): logger.debug("Resolving fresh agent from factory function for request") - + # Create ToolClient with credentials - tool_client = self.get_tool_client(tools=context.get_tools(), user_info=context.get_user_info()) + tool_client = self.get_tool_client(tools=context.get_tools(), user_info=context.get_user_info()) # pylint: disable=no-member tool_client_wrapper = ToolClient(tool_client) tools = await tool_client_wrapper.list_tools() - - import inspect + result = self._agent_or_factory(tools) if inspect.iscoroutine(result): agent = await result else: agent = result - + logger.debug("Fresh agent resolved successfully for request") return agent, tool_client_wrapper @@ -184,7 +187,7 @@ def init_tracing(self): agent_client.setup_azure_ai_observability() self.tracer = trace.get_tracer(__name__) - async def agent_run( + async def agent_run( # pylint: disable=too-many-statements self, context: AgentRunContext ) -> Union[ OpenAIResponse, @@ -201,7 +204,7 @@ async def agent_run( agent = self._resolved_agent else: agent = self._resolved_agent - + logger.info(f"Starting agent_run with stream={context.stream}") request_input = context.request.get("input") @@ -248,7 +251,7 @@ async def stream_updates(): try: await tool_client.close() logger.debug("Closed tool_client after streaming completed") - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught logger.warning(f"Error closing tool_client in stream: {e}") return stream_updates() @@ -267,5 +270,5 @@ async def stream_updates(): try: await tool_client.close() logger.debug("Closed tool_client after request processing") - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught logger.warning(f"Error closing tool_client: {e}") diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py index e06df0df3026..0049b3982b1c 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py @@ -3,13 +3,16 @@ # --------------------------------------------------------- """Tool client for integrating AzureAIToolClient with Agent Framework.""" -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional +from typing import TYPE_CHECKING, Any, Dict, List, Optional from agent_framework import AIFunction -from pydantic import BaseModel, Field, create_model - +from pydantic import Field, create_model +from azure.ai.agentserver.core.logger import get_logger if TYPE_CHECKING: from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient, FoundryTool +logger = get_logger() + +# pylint: disable=client-accepts-api-version-keyword,missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs class ToolClient: """Client that integrates AzureAIToolClient with Agent Framework. @@ -46,7 +49,7 @@ class ToolClient: :meta private: """ - + def __init__(self, tool_client: "AzureAIToolClient") -> None: """Initialize the ToolClient. @@ -55,7 +58,7 @@ def __init__(self, tool_client: "AzureAIToolClient") -> None: """ self._tool_client = tool_client self._aifunction_cache: List[AIFunction] = None - + async def list_tools(self) -> List[AIFunction]: """List all available tools as Agent Framework tool definitions. @@ -77,7 +80,7 @@ async def list_tools(self) -> List[AIFunction]: # Get tools from AzureAIToolClient if self._aifunction_cache is not None: return self._aifunction_cache - + azure_tools = await self._tool_client.list_tools() self._aifunction_cache = [] @@ -98,34 +101,40 @@ def _convert_to_agent_framework_tool(self, azure_tool: "FoundryTool") -> AIFunct """ # Get the input schema from the tool descriptor input_schema = azure_tool.input_schema or {} - + # Create a Pydantic model from the input schema properties = input_schema.get("properties", {}) required_fields = set(input_schema.get("required", [])) - + # Build field definitions for the Pydantic model field_definitions: Dict[str, Any] = {} for field_name, field_info in properties.items(): field_type = self._json_schema_type_to_python(field_info.get("type", "string")) field_description = field_info.get("description", "") is_required = field_name in required_fields - + if is_required: field_definitions[field_name] = (field_type, Field(description=field_description)) else: - field_definitions[field_name] = (Optional[field_type], Field(default=None, description=field_description)) - + field_definitions[field_name] = (Optional[field_type], + Field(default=None, description=field_description)) + # Create the Pydantic model dynamically input_model = create_model( f"{azure_tool.name}_input", **field_definitions ) - + # Create a wrapper function that calls the Azure tool async def tool_func(**kwargs: Any) -> Any: - """Dynamically generated function to invoke the Azure AI tool.""" - return await self.invoke_tool(azure_tool.name, kwargs) - + """Dynamically generated function to invoke the Azure AI tool. + + :return: The result from the tool invocation. + :rtype: Any + """ + logger.debug("Invoking tool: %s with input: %s", azure_tool.name, kwargs) + return await azure_tool.ainvoke(kwargs) + # Create and return the AIFunction return AIFunction( name=azure_tool.name, @@ -133,7 +142,7 @@ async def tool_func(**kwargs: Any) -> Any: func=tool_func, input_model=input_model ) - + def _json_schema_type_to_python(self, json_type: str) -> type: """Convert JSON schema type to Python type. @@ -151,14 +160,23 @@ def _json_schema_type_to_python(self, json_type: str) -> type: "object": dict, } return type_map.get(json_type, str) - + async def close(self) -> None: + """Close the tool client and release resources.""" await self._tool_client.close() - + async def __aenter__(self) -> "ToolClient": - """Async context manager entry.""" + """Async context manager entry. + + :return: The ToolClient instance. + :rtype: ToolClient + """ return self - + async def __aexit__(self, *exc_details: Any) -> None: - """Async context manager exit.""" + """Async context manager exit. + + :param exc_details: Exception details if an exception occurred. + :type exc_details: Any + """ await self.close() diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py index 1bbdb6e4172c..e06ef576264e 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_utils/_model_base.py @@ -751,8 +751,11 @@ def from_dict(cls, data: Mapping[str, Any], tool_definitions: List[ToolDefinitio result_data = data.get("result", {}) tools_list = [] tool_definitions_map = {f"{td.type.lower()}": td for td in tool_definitions} - + filter_tools = len(tool_definitions_map) > 0 for tool_data in result_data.get("tools", []): + + if filter_tools and tool_data["name"].lower() not in tool_definitions_map: + continue # Parse inputSchema input_schema_data = tool_data.get("inputSchema", {}) input_schema = MCPToolSchema( diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py index 9f6e0eb20e8c..b54d2d7f6538 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py @@ -4,8 +4,7 @@ # mypy: ignore-errors import json -import logging -from typing import Any, Dict, List, Mapping, MutableMapping, Optional, Tuple, Union +from typing import Any, Dict, List, Mapping, MutableMapping, Tuple, Union from azure.core import PipelineClient from .._configuration import AzureAIToolClientConfiguration from .._model_base import FoundryTool, ToolSource, UserInfo @@ -26,7 +25,6 @@ map_error, ) -logger = logging.getLogger(__name__) # Shared constants API_VERSION = "2025-11-15-preview" @@ -192,7 +190,7 @@ def build_invoke_mcp_tool_request( _params = {} _content = prepare_mcptools_invoke_tool_request_content(tool, arguments, TOOL_PROPERTY_OVERRIDES) - logger.info("Invoking MCP tool: %s with arguments: %s", tool.name, dict(arguments)) + content = json.dumps(_content) _request = build_mcptools_invoke_tool_request(api_version=api_version, headers=_headers, params=_params, content=content) @@ -370,7 +368,7 @@ def prepare_mcptools_invoke_tool_request_content(tool: FoundryTool, arguments: M ) if meta_config: params["_meta"] = meta_config - logger.info("Prepared MCP tool invocation params: %s", params) + payload = { "jsonrpc": "2.0", "id": 2, diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index e1d2531ea22a..1d1ba9e2eb3c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -337,6 +337,7 @@ def setup_otlp_exporter(self, endpoint, provider): def get_tool_client( self, tools: Optional[list[ToolDefinition]], user_info: Optional[UserInfo] ) -> AzureAIToolClient: + logger.debug("Creating AzureAIToolClient with tools: %s", tools) if not self.credentials: raise ValueError("Credentials are required to create Tool Client.") return AzureAIToolClient( diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py index 06cfe3bd8489..4ad1719ba56b 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py @@ -6,19 +6,23 @@ from typing import TYPE_CHECKING, Optional, Any from ._version import VERSION +from .tool_client import ToolClient if TYPE_CHECKING: # pragma: no cover from . import models from azure.core.credentials_async import AsyncTokenCredential -def from_langgraph(agent, credentials: Optional["AsyncTokenCredential"] = None, state_converter: Optional["models.LanggraphStateConverter"] = None, **kwargs: Any) -> "LangGraphAdapter": +def from_langgraph( + agent, + credentials: Optional["AsyncTokenCredential"] = None, + state_converter: Optional["models.LanggraphStateConverter"] = None, + **kwargs: Any +) -> "LangGraphAdapter": from .langgraph import LangGraphAdapter return LangGraphAdapter(agent, credentials=credentials, state_converter=state_converter, **kwargs) -from .tool_client import ToolClient - __all__ = ["from_langgraph", "ToolClient"] __version__ = VERSION diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index 27b302e29a18..6aac565660dd 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -8,6 +8,7 @@ from typing import TYPE_CHECKING, Any, Awaitable, Protocol, Union, Optional, List from langchain_core.runnables import RunnableConfig +from langchain_core.tools import StructuredTool from langgraph.graph.state import CompiledStateGraph from azure.ai.agentserver.core.constants import Constants @@ -22,11 +23,9 @@ from .models.utils import is_state_schema_valid from .tool_client import ToolClient -from langchain_core.tools import StructuredTool - if TYPE_CHECKING: from azure.core.credentials_async import AsyncTokenCredential - + logger = get_logger() @@ -53,11 +52,18 @@ class LangGraphAdapter(FoundryCBAgent): Adapter for LangGraph Agent. """ - def __init__(self, graph: Union[CompiledStateGraph, GraphFactory], credentials: "Optional[AsyncTokenCredential]" = None, state_converter: "Optional[LanggraphStateConverter]" = None, **kwargs: Any) -> None: + def __init__( + self, + graph: Union[CompiledStateGraph, GraphFactory], + credentials: "Optional[AsyncTokenCredential]" = None, + state_converter: "Optional[LanggraphStateConverter]" = None, + **kwargs: Any + ) -> None: """ Initialize the LangGraphAdapter with a CompiledStateGraph or a function that returns one. - :param graph: The LangGraph StateGraph to adapt, or a callable that takes ToolClient and returns CompiledStateGraph (sync or async). + :param graph: The LangGraph StateGraph to adapt, or a callable that takes ToolClient + and returns CompiledStateGraph (sync or async). :type graph: Union[CompiledStateGraph, GraphFactory] :param credentials: Azure credentials for authentication. :type credentials: Optional[AsyncTokenCredential] @@ -68,7 +74,7 @@ def __init__(self, graph: Union[CompiledStateGraph, GraphFactory], credentials: self._graph_or_factory: Union[CompiledStateGraph, GraphFactory] = graph self._resolved_graph: "Optional[CompiledStateGraph]" = None self.azure_ai_tracer = None - + # If graph is already compiled, validate and set up state converter if isinstance(graph, CompiledStateGraph): self._resolved_graph = graph @@ -105,7 +111,7 @@ async def agent_run(self, context: AgentRunContext): graph = self._resolved_graph else: graph = self._resolved_graph - + input_data = self.state_converter.request_to_state(context) logger.debug(f"Converted input data: {input_data}") if not context.stream: @@ -122,15 +128,17 @@ async def agent_run(self, context: AgentRunContext): logger.warning(f"Error closing tool_client: {e}") async def _resolve_graph(self, context: AgentRunContext): - """ - Resolve the graph if it's a factory function (for single-use/first-time resolution). + """Resolve the graph if it's a factory function (for single-use/first-time resolution). Creates a ToolClient and calls the factory function with it. This is used for the initial resolution to set up state_converter. + + :param context: The context for the agent run. + :type context: AgentRunContext """ if callable(self._graph_or_factory): logger.debug("Resolving graph from factory function") - - + + # Create ToolClient with credentials tool_client = self.get_tool_client(tools = context.get_tools(), user_info = context.get_user_info()) tool_client_wrapper = ToolClient(tool_client) @@ -143,14 +151,14 @@ async def _resolve_graph(self, context: AgentRunContext): self._resolved_graph = await result else: self._resolved_graph = result - + # Validate and set up state converter if not already set from initialization if not self.state_converter: if is_state_schema_valid(self._resolved_graph.builder.state_schema): self.state_converter = LanggraphMessageStateConverter() else: raise ValueError("state_converter is required for non-MessagesState graph.") - + logger.debug("Graph resolved successfully") else: # Should not reach here, but just in case @@ -168,7 +176,7 @@ async def _resolve_graph_for_request(self, context: AgentRunContext): :rtype: tuple[CompiledStateGraph, ToolClient] """ logger.debug("Resolving fresh graph from factory function for request") - + # Create ToolClient with credentials tool_client = self.get_tool_client(tools = context.get_tools(), user_info = context.get_user_info()) tool_client_wrapper = ToolClient(tool_client) @@ -181,14 +189,14 @@ async def _resolve_graph_for_request(self, context: AgentRunContext): graph = await result else: graph = result - + # Ensure state converter is set up (use existing one or create new) if not self.state_converter: if is_state_schema_valid(graph.builder.state_schema): self.state_converter = LanggraphMessageStateConverter() else: raise ValueError("state_converter is required for non-MessagesState graph.") - + logger.debug("Fresh graph resolved successfully for request") return graph, tool_client_wrapper @@ -245,7 +253,13 @@ async def agent_run_non_stream(self, input_data: dict, context: AgentRunContext, logger.error(f"Error during agent run: {e}") raise e - async def agent_run_astream(self, input_data: dict, context: AgentRunContext, graph: CompiledStateGraph, tool_client: "Optional[ToolClient]" = None): + async def agent_run_astream( + self, + input_data: dict, + context: AgentRunContext, + graph: CompiledStateGraph, + tool_client: "Optional[ToolClient]" = None + ): """ Run the agent with streaming response. @@ -275,7 +289,7 @@ async def agent_run_astream(self, input_data: dict, context: AgentRunContext, gr # Close tool_client if provided if tool_client is not None: try: - await tool_client._tool_client.close() + await tool_client.close() logger.debug("Closed tool_client after streaming completed") except Exception as e: logger.warning(f"Error closing tool_client in stream: {e}") diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py index 5a5b75c13a03..49e36c54d802 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py @@ -11,7 +11,7 @@ if TYPE_CHECKING: from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient, FoundryTool - +# pylint: disable=client-accepts-api-version-keyword,missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs class ToolClient: """Client that integrates AzureAIToolClient with LangGraph. @@ -56,7 +56,7 @@ class ToolClient: :meta private: """ - + def __init__(self, tool_client: "AzureAIToolClient") -> None: """Initialize the ToolClient. @@ -65,7 +65,7 @@ def __init__(self, tool_client: "AzureAIToolClient") -> None: """ self._tool_client = tool_client self._langchain_tools_cache: List[StructuredTool] = None - + async def list_tools(self) -> List[StructuredTool]: """List all available tools as LangChain BaseTool instances. @@ -91,16 +91,16 @@ async def list_tools(self) -> List[StructuredTool]: # Get tools from AzureAIToolClient if self._langchain_tools_cache is not None: return self._langchain_tools_cache - + azure_tools = await self._tool_client.list_tools() - self._langchain_tools_cache = [] + self._langchain_tools_cache = [] # Convert to LangChain StructuredTool instances for azure_tool in azure_tools: langchain_tool = self._convert_to_langchain_tool(azure_tool) self._langchain_tools_cache.append(langchain_tool) - + return self._langchain_tools_cache - + def _convert_to_langchain_tool(self, azure_tool: "FoundryTool") -> StructuredTool: """Convert an AzureAITool to a LangChain StructuredTool. @@ -111,23 +111,27 @@ def _convert_to_langchain_tool(self, azure_tool: "FoundryTool") -> StructuredToo """ # Get the input schema from the tool descriptor input_schema = azure_tool.input_schema or {} - + # Create a Pydantic model for the tool's input schema args_schema = self._create_pydantic_model( tool_name=azure_tool.name, schema=input_schema ) - + # Create an async function that invokes the tool async def tool_func(**kwargs: Any) -> str: - """Invoke the Azure AI tool.""" + """Invoke the Azure AI tool. + + :return: The result from the tool invocation as a string. + :rtype: str + """ result = await azure_tool(**kwargs) # Convert result to string for LangChain compatibility if isinstance(result, dict): import json return json.dumps(result) return str(result) - + # Create a StructuredTool with the async function structured_tool = StructuredTool( name=azure_tool.name, @@ -135,9 +139,9 @@ async def tool_func(**kwargs: Any) -> str: coroutine=tool_func, args_schema=args_schema, ) - + return structured_tool - + def _create_pydantic_model( self, tool_name: str, @@ -155,16 +159,16 @@ def _create_pydantic_model( # Get properties from schema properties = schema.get("properties", {}) required_fields = schema.get("required", []) - + # Build field definitions for Pydantic model field_definitions = {} for prop_name, prop_schema in properties.items(): prop_type = self._json_type_to_python_type(prop_schema.get("type", "string")) prop_description = prop_schema.get("description", "") - + # Determine if field is required is_required = prop_name in required_fields - + if is_required: field_definitions[prop_name] = ( prop_type, @@ -175,11 +179,11 @@ def _create_pydantic_model( Optional[prop_type], Field(None, description=prop_description) ) - + # Create the model dynamically model_name = f"{tool_name.replace('-', '_').replace(' ', '_').title()}Input" return create_model(model_name, **field_definitions) - + def _json_type_to_python_type(self, json_type: str) -> type: """Convert JSON schema type to Python type. @@ -197,15 +201,24 @@ def _json_type_to_python_type(self, json_type: str) -> type: "object": dict, } return type_mapping.get(json_type, str) - + async def close(self) -> None: await self._tool_client.close() - + async def __aenter__(self) -> "ToolClient": - """Async context manager entry.""" + """Async context manager entry. + + :return: The ToolClient instance. + :rtype: ToolClient + """ return self - + async def __aexit__(self, *exc_details: Any) -> None: - """Async context manager exit.""" + """Async context manager exit. + + :param exc_details: Exception details if an exception occurred. + :type exc_details: Any + :return: None + :rtype: None + """ # The tool_client lifecycle is managed externally - pass From 76f935b2319ee86c092b5d3059a7bb6e04eccb98 Mon Sep 17 00:00:00 2001 From: Ganesh Bheemarasetty Date: Thu, 13 Nov 2025 16:32:35 -0800 Subject: [PATCH 17/35] Lint and mypy fixes --- .../agentserver/core/client/tools/_client.py | 56 +----------------- .../core/client/tools/aio/_client.py | 58 +------------------ .../azure/ai/agentserver/core/constants.py | 1 + .../azure/ai/agentserver/core/server/base.py | 41 ++++++++----- .../core/server/common/agent_run_context.py | 6 +- .../ai/agentserver/langgraph/__init__.py | 2 +- .../ai/agentserver/langgraph/langgraph.py | 10 ++-- .../ai/agentserver/langgraph/tool_client.py | 12 ++-- 8 files changed, 47 insertions(+), 139 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py index a7afd935df64..df19d4663fb3 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py @@ -13,58 +13,6 @@ from .operations._operations import MCPToolsOperations, RemoteToolsOperations from ._utils._model_base import InvocationPayloadBuilder from ._model_base import FoundryTool, ToolSource - -class AzureAITool: - """Azure AI tool wrapper for invocation. - - Represents a single tool that can be invoked either via MCP protocol or - Azure AI Tools API. This class provides a convenient interface for tool - invocation and exposes tool metadata. - - :ivar str name: The name of the tool. - :ivar str description: Human-readable description of what the tool does. - :ivar dict metadata: Additional metadata about the tool from the API. - :ivar ~Tool_Client.models.ToolSource source: - The source of the tool (MCP_TOOLS or REMOTE_TOOLS). - - .. admonition:: Example: - - .. literalinclude:: ../samples/simple_example.py - :start-after: [START use_tool] - :end-before: [END use_tool] - :language: python - :dedent: 4 - :caption: Using an AzureAITool instance. - """ - - def __init__(self, client: "AzureAIToolClient", descriptor: FoundryTool) -> None: - """Initialize an Azure AI Tool. - - :param client: Parent client instance for making API calls. - :type client: AzureAIToolClient - :param descriptor: Tool descriptor containing metadata and configuration. - :type descriptor: ~Tool_Client.models.FoundryTool - """ - self._client = client - self._descriptor = descriptor - self.name = descriptor.name - self.description = descriptor.description - self.metadata = dict(descriptor.metadata) - self.source = descriptor.source - - def invoke(self, *args: Any, **kwargs: Any) -> Any: - """Invoke the tool synchronously. - - :param args: Positional arguments to pass to the tool. - :param kwargs: Keyword arguments to pass to the tool. - :return: The result from the tool invocation. - :rtype: Any - """ - payload = InvocationPayloadBuilder.build_payload(args, kwargs, {}) - return self._client._invoke_tool(self._descriptor, payload) - - def __call__(self, *args: Any, **kwargs: Any) -> Any: - return self.invoke(*args, **kwargs) class AzureAIToolClient: """Synchronous client for aggregating tools from Azure AI MCP and Tools APIs. @@ -189,11 +137,9 @@ def invoke_tool( return self._invoke_tool(descriptor, payload, **kwargs) def _resolve_tool_descriptor( - self, tool: Union[AzureAITool, str, FoundryTool] + self, tool: Union[str, FoundryTool] ) -> FoundryTool: """Resolve a tool reference to a descriptor.""" - if isinstance(tool, AzureAITool): - return tool._descriptor if isinstance(tool, FoundryTool): return tool if isinstance(tool, str): diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py index 8fd092bab5f1..b49ed2b971cd 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py @@ -16,58 +16,6 @@ if TYPE_CHECKING: from azure.core.credentials_async import AsyncTokenCredential -class AzureAITool: - """Azure AI tool wrapper for invocation. - - Represents a single tool that can be invoked either via MCP protocol or - Azure AI Tools API. This class provides a convenient interface for tool - invocation and exposes tool metadata. - - :ivar str name: The name of the tool. - :ivar str description: Human-readable description of what the tool does. - :ivar dict metadata: Additional metadata about the tool from the API. - :ivar ~Tool_Client.models.ToolSource source: - The source of the tool (MCP_TOOLS or REMOTE_TOOLS). - - .. admonition:: Example: - - .. literalinclude:: ../samples/simple_example.py - :start-after: [START use_tool] - :end-before: [END use_tool] - :language: python - :dedent: 4 - :caption: Using an AzureAITool instance. - """ - - def __init__(self, client: "AzureAIToolClient", descriptor: FoundryTool) -> None: - """Initialize an Azure AI Tool. - - :param client: Parent client instance for making API calls. - :type client: AzureAIToolClient - :param descriptor: Tool descriptor containing metadata and configuration. - :type descriptor: ~Tool_Client.models.FoundryTool - """ - self._client = client - self._descriptor = descriptor - self.name = descriptor.name - self.description = descriptor.description - self.metadata = dict(descriptor.metadata) - self.source = descriptor.source - - async def invoke(self, *args: Any, **kwargs: Any) -> Any: - """Invoke the tool asynchronously. - - :param args: Positional arguments to pass to the tool. - :param kwargs: Keyword arguments to pass to the tool. - :return: The result from the tool invocation. - :rtype: Any - """ - payload = InvocationPayloadBuilder.build_payload(args, kwargs, {}) - return await self._client._invoke_tool(self._descriptor, payload) - - async def __call__(self, *args: Any, **kwargs: Any) -> Any: - return await self.invoke(*args, **kwargs) - class AzureAIToolClient: """Asynchronous client for aggregating tools from Azure AI MCP and Tools APIs. @@ -177,7 +125,7 @@ async def _invoker(*args, **kwargs): async def invoke_tool( self, - tool: Union[AzureAITool, str, FoundryTool], + tool: Union[str, FoundryTool], *args: Any, **kwargs: Any, ) -> Any: @@ -193,11 +141,9 @@ async def invoke_tool( return await self._invoke_tool(descriptor, payload, **kwargs) async def _resolve_tool_descriptor( - self, tool: Union[AzureAITool, str, FoundryTool] + self, tool: Union[str, FoundryTool] ) -> FoundryTool: """Resolve a tool reference to a descriptor.""" - if isinstance(tool, AzureAITool): - return tool._descriptor if isinstance(tool, FoundryTool): return tool if isinstance(tool, str): diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py index 33fcb0139fea..b8dd5c328780 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py @@ -11,3 +11,4 @@ class Constants: AGENT_LOG_LEVEL = "AGENT_LOG_LEVEL" AGENT_DEBUG_ERRORS = "AGENT_DEBUG_ERRORS" ENABLE_APPLICATION_INSIGHTS_LOGGER = "AGENT_APP_INSIGHTS_ENABLED" + AZURE_AI_WORKSPACE_ENDPOINT = "AZURE_AI_WORKSPACE_ENDPOINT" diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 1d1ba9e2eb3c..2b6345c66908 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -88,22 +88,23 @@ def set_run_context_to_context_var(self, run_context): ctx.update(res) request_context.set(ctx) - def set_user_info_to_context_var(self, request): - user_info: UserInfo = {} + def set_user_info_to_context_var(self, request) -> UserInfo: + user_info: UserInfo = None try: object_id_header = request.headers.get("x-aml-oid", None) - tenant_id_header = request.headers.get("x-aml-tenant-id", None) - - if object_id_header: - user_info["object_id"] = object_id_header - if tenant_id_header: - user_info["tenant_id"] = tenant_id_header + tenant_id_header = request.headers.get("x-aml-tid", None) + if not object_id_header and not tenant_id_header: + return None + user_info = UserInfo( + objectId=object_id_header, + tenantId=tenant_id_header + ) except Exception as e: logger.error(f"Failed to parse X-User-Info header: {e}", exc_info=True) if user_info: ctx = request_context.get() or {} - for key, value in user_info.items(): + for key, value in user_info.to_dict().items(): ctx[f"azure.ai.agentserver.user.{key}"] = str(value) request_context.set(ctx) return user_info @@ -340,12 +341,26 @@ def get_tool_client( logger.debug("Creating AzureAIToolClient with tools: %s", tools) if not self.credentials: raise ValueError("Credentials are required to create Tool Client.") - return AzureAIToolClient( + + workspace_endpoint = os.getenv(Constants.AZURE_AI_WORKSPACE_ENDPOINT) + if workspace_endpoint: + agent_name = os.getenv(Constants.AGENT_NAME) + if not agent_name: + raise ValueError("AGENT_NAME environment variable is required when using workspace endpoint.") + return AzureAIToolClient( + endpoint=workspace_endpoint, + credential=self.credentials, + tools=tools, + user=user_info, + agent_name=agent_name, + ) + else: + return AzureAIToolClient( endpoint=os.getenv(Constants.AZURE_AI_PROJECT_ENDPOINT), credential=self.credentials, - tools = tools, - user = user_info, - ) + tools=tools, + user=user_info, + ) def _event_to_sse_chunk(event: ResponseStreamEvent) -> str: diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py index 5188476b8339..5289df0b3524 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py @@ -7,7 +7,7 @@ from ...models.projects import AgentId, AgentReference, ResponseConversation1 from .id_generator.foundry_id_generator import FoundryIdGenerator from .id_generator.id_generator import IdGenerator - +from ...client.tools._model_base import UserInfo logger = get_logger() @@ -19,7 +19,7 @@ def __init__(self, payload: dict, **kwargs: Any) -> None: self._response_id = self._id_generator.response_id self._conversation_id = self._id_generator.conversation_id self._stream = self.request.get("stream", False) - self._user_info = kwargs.get("user_info", {}) + self._user_info = kwargs.get("user_info", None) self._agent_tools = kwargs.get("agent_tools", []) @property @@ -70,7 +70,7 @@ def get_tools(self) -> list: return self._agent_tools return request_tools - def get_user_info(self) -> dict: + def get_user_info(self) -> UserInfo: return self._user_info diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py index 4ad1719ba56b..569166bc3786 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py @@ -7,6 +7,7 @@ from ._version import VERSION from .tool_client import ToolClient +from .langgraph import LangGraphAdapter if TYPE_CHECKING: # pragma: no cover from . import models @@ -19,7 +20,6 @@ def from_langgraph( state_converter: Optional["models.LanggraphStateConverter"] = None, **kwargs: Any ) -> "LangGraphAdapter": - from .langgraph import LangGraphAdapter return LangGraphAdapter(agent, credentials=credentials, state_converter=state_converter, **kwargs) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index 6aac565660dd..beae4faf6499 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -70,7 +70,7 @@ def __init__( :param state_converter: custom state converter. Required if graph state is not MessagesState. :type state_converter: Optional[LanggraphStateConverter] """ - super().__init__(credentials=credentials, **kwargs) + super().__init__(credentials=credentials, **kwargs) # pylint: disable=unexpected-keyword-arg self._graph_or_factory: Union[CompiledStateGraph, GraphFactory] = graph self._resolved_graph: "Optional[CompiledStateGraph]" = None self.azure_ai_tracer = None @@ -140,7 +140,7 @@ async def _resolve_graph(self, context: AgentRunContext): # Create ToolClient with credentials - tool_client = self.get_tool_client(tools = context.get_tools(), user_info = context.get_user_info()) + tool_client = self.get_tool_client(tools = context.get_tools(), user_info = context.get_user_info()) # pylint: disable=no-member tool_client_wrapper = ToolClient(tool_client) tools = await tool_client_wrapper.list_tools() # Call the factory function with ToolClient @@ -153,7 +153,7 @@ async def _resolve_graph(self, context: AgentRunContext): self._resolved_graph = result # Validate and set up state converter if not already set from initialization - if not self.state_converter: + if not self.state_converter and self._resolved_graph is not None: if is_state_schema_valid(self._resolved_graph.builder.state_schema): self.state_converter = LanggraphMessageStateConverter() else: @@ -178,13 +178,13 @@ async def _resolve_graph_for_request(self, context: AgentRunContext): logger.debug("Resolving fresh graph from factory function for request") # Create ToolClient with credentials - tool_client = self.get_tool_client(tools = context.get_tools(), user_info = context.get_user_info()) + tool_client = self.get_tool_client(tools = context.get_tools(), user_info = context.get_user_info()) # pylint: disable=no-member tool_client_wrapper = ToolClient(tool_client) tools = await tool_client_wrapper.list_tools() # Call the factory function with ToolClient # Support both sync and async factories import inspect - result = self._graph_or_factory(tools) + result = self._graph_or_factory(tools) # type: ignore[operator] if inspect.iscoroutine(result): graph = await result else: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py index 49e36c54d802..374db1d1d98b 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py @@ -64,7 +64,7 @@ def __init__(self, tool_client: "AzureAIToolClient") -> None: :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient """ self._tool_client = tool_client - self._langchain_tools_cache: List[StructuredTool] = None + self._langchain_tools_cache: Optional[List[StructuredTool]] = None async def list_tools(self) -> List[StructuredTool]: """List all available tools as LangChain BaseTool instances. @@ -115,7 +115,7 @@ def _convert_to_langchain_tool(self, azure_tool: "FoundryTool") -> StructuredToo # Create a Pydantic model for the tool's input schema args_schema = self._create_pydantic_model( tool_name=azure_tool.name, - schema=input_schema + schema=dict(input_schema) ) # Create an async function that invokes the tool @@ -176,13 +176,13 @@ def _create_pydantic_model( ) else: field_definitions[prop_name] = ( - Optional[prop_type], - Field(None, description=prop_description) + prop_type, + Field(default=None, description=prop_description) ) # Create the model dynamically - model_name = f"{tool_name.replace('-', '_').replace(' ', '_').title()}Input" - return create_model(model_name, **field_definitions) + model_name = f"{tool_name.replace('-', '_').replace(' ', '_').title()}-Input" + return create_model(model_name, **field_definitions) # type: ignore[call-overload] def _json_type_to_python_type(self, json_type: str) -> type: """Convert JSON schema type to Python type. From ba7ba501c2109ccf3095a9117e4d8adc20da4986 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Thu, 13 Nov 2025 17:44:22 -0800 Subject: [PATCH 18/35] fix mypy and pylint --- .../agentserver/agentframework/tool_client.py | 2 +- .../azure/ai/agentserver/core/server/base.py | 17 ++++++++--------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py index 0049b3982b1c..aa1086a7050f 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py @@ -57,7 +57,7 @@ def __init__(self, tool_client: "AzureAIToolClient") -> None: :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient """ self._tool_client = tool_client - self._aifunction_cache: List[AIFunction] = None + self._aifunction_cache: List[AIFunction] = None # mypy: ignore[assignment] async def list_tools(self) -> List[AIFunction]: """List all available tools as Agent Framework tool definitions. diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 2b6345c66908..d39af78e2014 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -2,7 +2,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- # pylint: disable=broad-exception-caught,unused-argument,logging-fstring-interpolation,too-many-statements,too-many-return-statements -# mypy: disable-error-code="name-defined,annotation-unchecked,arg-type" +# mypy: ignore-errors import inspect import json import os @@ -341,7 +341,7 @@ def get_tool_client( logger.debug("Creating AzureAIToolClient with tools: %s", tools) if not self.credentials: raise ValueError("Credentials are required to create Tool Client.") - + workspace_endpoint = os.getenv(Constants.AZURE_AI_WORKSPACE_ENDPOINT) if workspace_endpoint: agent_name = os.getenv(Constants.AGENT_NAME) @@ -354,13 +354,12 @@ def get_tool_client( user=user_info, agent_name=agent_name, ) - else: - return AzureAIToolClient( - endpoint=os.getenv(Constants.AZURE_AI_PROJECT_ENDPOINT), - credential=self.credentials, - tools=tools, - user=user_info, - ) + return AzureAIToolClient( + endpoint=os.getenv(Constants.AZURE_AI_PROJECT_ENDPOINT), + credential=self.credentials, + tools=tools, + user=user_info, + ) def _event_to_sse_chunk(event: ResponseStreamEvent) -> str: From cb15c94161addb5f1afbea08c40ed7b5f4e76b0a Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Thu, 13 Nov 2025 18:28:02 -0800 Subject: [PATCH 19/35] fix mypy --- .../azure/ai/agentserver/agentframework/tool_client.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py index aa1086a7050f..6f410c29d484 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py @@ -1,6 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +# mypy: disable-error-code="assignment" """Tool client for integrating AzureAIToolClient with Agent Framework.""" from typing import TYPE_CHECKING, Any, Dict, List, Optional @@ -57,7 +58,7 @@ def __init__(self, tool_client: "AzureAIToolClient") -> None: :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient """ self._tool_client = tool_client - self._aifunction_cache: List[AIFunction] = None # mypy: ignore[assignment] + self._aifunction_cache: List[AIFunction] = None async def list_tools(self) -> List[AIFunction]: """List all available tools as Agent Framework tool definitions. From 1259358c28afed8fdb400878952f1751a55dff66 Mon Sep 17 00:00:00 2001 From: Jun'an Chen Date: Sun, 16 Nov 2025 17:38:33 -0800 Subject: [PATCH 20/35] [ai-agentserver] Fix AF streaming issue (#44068) * fix streaming issue in af * fix streaming issue in af * update version to 1.0.0b5 --- .../CHANGELOG.md | 7 +- .../ai/agentserver/agentframework/_version.py | 2 +- .../agentframework/agent_framework.py | 58 +- ...nt_framework_output_streaming_converter.py | 711 ++++++------------ .../agentframework/models/utils/__init__.py | 5 + .../agentframework/models/utils/async_iter.py | 136 ++++ .../azure-ai-agentserver-core/CHANGELOG.md | 5 + .../azure/ai/agentserver/core/_version.py | 2 +- .../azure/ai/agentserver/core/server/base.py | 2 +- .../CHANGELOG.md | 6 + .../ai/agentserver/langgraph/_version.py | 2 +- 11 files changed, 416 insertions(+), 520 deletions(-) create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/__init__.py create mode 100644 sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index 15d90e5660ab..a73c24633579 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -1,5 +1,10 @@ # Release History +## 1.0.0b5 (2025-11-16) + +### Bugs Fixed + +- Fixed streaming generation issues. ## 1.0.0b4 (2025-11-13) @@ -27,7 +32,7 @@ - Fixed Id generator format. -- Improved stream mode error messsage. +- Improved stream mode error message. - Updated application insights related configuration environment variables. diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py index 22553b18fb7e..c7d155d924dd 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b4" +VERSION = "1.0.0b5" diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 77f3a4b1ce85..f90545d7d1f6 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -4,7 +4,6 @@ # pylint: disable=logging-fstring-interpolation,no-name-in-module from __future__ import annotations -import asyncio # pylint: disable=do-not-import-asyncio import os from typing import TYPE_CHECKING, Any, AsyncGenerator, Awaitable, Optional, Protocol, Union, List import inspect @@ -40,14 +39,14 @@ class AgentFactory(Protocol): """Protocol for agent factory functions. - + An agent factory is a callable that takes a ToolClient and returns an AgentProtocol, either synchronously or asynchronously. """ def __call__(self, tools: List[AIFunction]) -> Union[AgentProtocol, Awaitable[AgentProtocol]]: """Create an AgentProtocol using the provided ToolClient. - + :param tools: The list of AIFunction tools available to the agent. :type tools: List[AIFunction] :return: An Agent Framework agent, or an awaitable that resolves to one. @@ -97,7 +96,7 @@ def __init__(self, agent: Union[AgentProtocol, AgentFactory], @property def agent(self) -> "Optional[AgentProtocol]": """Get the resolved agent. This property provides backward compatibility. - + :return: The resolved AgentProtocol if available, None otherwise. :rtype: Optional[AgentProtocol] """ @@ -220,30 +219,35 @@ async def agent_run( # pylint: disable=too-many-statements async def stream_updates(): try: update_count = 0 - timeout_s = self._resolve_stream_timeout(context.request) - logger.info("Starting streaming with idle-timeout=%.2fs", timeout_s) - for ev in streaming_converter.initial_events(): - yield ev - - # Iterate with per-update timeout; terminate if idle too long - aiter = agent.run_stream(message).__aiter__() - while True: - try: - update = await asyncio.wait_for(aiter.__anext__(), timeout=timeout_s) - except StopAsyncIteration: - logger.debug("Agent streaming iterator finished (StopAsyncIteration)") - break - except asyncio.TimeoutError: - logger.warning("Streaming idle timeout reached (%.1fs); terminating stream.", timeout_s) - for ev in streaming_converter.completion_events(): - yield ev - return + updates = agent.run_stream(message) + async for event in streaming_converter.convert(updates): update_count += 1 - transformed = streaming_converter.transform_output_for_streaming(update) - for event in transformed: - yield event - for ev in streaming_converter.completion_events(): - yield ev + yield event + + # timeout_s = self._resolve_stream_timeout(context.request) + # logger.info("Starting streaming with idle-timeout=%.2fs", timeout_s) + # for ev in streaming_converter.initial_events(): + # yield ev + # + # # Iterate with per-update timeout; terminate if idle too long + # aiter = agent.run_stream(message).__aiter__() + # while True: + # try: + # update = await asyncio.wait_for(aiter.__anext__(), timeout=timeout_s) + # except StopAsyncIteration: + # logger.debug("Agent streaming iterator finished (StopAsyncIteration)") + # break + # except asyncio.TimeoutError: + # logger.warning("Streaming idle timeout reached (%.1fs); terminating stream.", timeout_s) + # for ev in streaming_converter.completion_events(): + # yield ev + # return + # update_count += 1 + # transformed = streaming_converter.transform_output_for_streaming(update) + # for event in transformed: + # yield event + # for ev in streaming_converter.completion_events(): + # yield ev logger.info("Streaming completed with %d updates", update_count) finally: # Close tool_client if it was created for this request diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 4e3d12d4563e..96beb535d3fb 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -7,10 +7,9 @@ import datetime import json -import uuid -from typing import Any, List, Optional, cast +from typing import AsyncIterable, List, Optional -from agent_framework import AgentRunResponseUpdate, FunctionApprovalRequestContent, FunctionResultContent +from agent_framework import AgentRunResponseUpdate, BaseContent, FunctionApprovalRequestContent, FunctionResultContent from agent_framework._types import ( ErrorContent, FunctionCallContent, @@ -18,7 +17,6 @@ ) from azure.ai.agentserver.core import AgentRunContext -from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.models import ( Response as OpenAIResponse, ResponseStreamEvent, @@ -27,6 +25,7 @@ FunctionToolCallItemResource, FunctionToolCallOutputItemResource, ItemContentOutputText, + ItemResource, ResponseCompletedEvent, ResponseContentPartAddedEvent, ResponseContentPartDoneEvent, @@ -43,397 +42,187 @@ ) from .agent_id_generator import AgentIdGenerator - -logger = get_logger() +from .utils.async_iter import chunk_on_change, peek class _BaseStreamingState: """Base interface for streaming state handlers.""" - def prework(self, ctx: Any) -> List[ResponseStreamEvent]: # pylint: disable=unused-argument - return [] - - def convert_content(self, ctx: Any, content) -> List[ResponseStreamEvent]: # pylint: disable=unused-argument + def convert_contents(self, contents: AsyncIterable[BaseContent]) -> AsyncIterable[ResponseStreamEvent]: # pylint: disable=unused-argument raise NotImplementedError - def afterwork(self, ctx: Any) -> List[ResponseStreamEvent]: # pylint: disable=unused-argument - return [] - class _TextContentStreamingState(_BaseStreamingState): """State handler for text and reasoning-text content during streaming.""" - def __init__(self, context: AgentRunContext) -> None: - self.context = context - self.item_id = None - self.output_index = None - self.text_buffer = "" - self.text_part_started = False - - def prework(self, ctx: Any) -> List[ResponseStreamEvent]: - events: List[ResponseStreamEvent] = [] - if self.item_id is not None: - return events - - # Start a new assistant message item (in_progress) - self.item_id = self.context.id_generator.generate_message_id() - self.output_index = ctx._next_output_index # pylint: disable=protected-access - ctx._next_output_index += 1 - - message_item = ResponsesAssistantMessageItemResource( - id=self.item_id, - status="in_progress", - content=[], + def __init__(self, parent: AgentFrameworkOutputStreamingConverter): + self._parent = parent + + async def convert_contents(self, contents: AsyncIterable[TextContent]) -> AsyncIterable[ResponseStreamEvent]: + item_id = self._parent.context.id_generator.generate_message_id() + output_index = self._parent.next_output_index() + + yield ResponseOutputItemAddedEvent( + sequence_number=self._parent.next_sequence(), + output_index=output_index, + item=ResponsesAssistantMessageItemResource( + id=item_id, + status="in_progress", + content=[], + ), ) - events.append( - ResponseOutputItemAddedEvent( - sequence_number=ctx.next_sequence(), - output_index=self.output_index, - item=message_item, - ) + yield ResponseContentPartAddedEvent( + sequence_number=self._parent.next_sequence(), + item_id=item_id, + output_index=output_index, + content_index=0, + part=ItemContentOutputText(text="", annotations=[], logprobs=[]), ) - if not self.text_part_started: - empty_part = ItemContentOutputText(text="", annotations=[], logprobs=[]) - events.append( - ResponseContentPartAddedEvent( - sequence_number=ctx.next_sequence(), - item_id=self.item_id, - output_index=self.output_index, - content_index=0, - part=empty_part, - ) - ) - self.text_part_started = True - return events - - def convert_content(self, ctx: Any, content: TextContent) -> List[ResponseStreamEvent]: - events: List[ResponseStreamEvent] = [] - if isinstance(content, TextContent): - delta = content.text or "" - else: - delta = getattr(content, "text", None) or getattr(content, "reasoning", "") or "" - - # buffer accumulated text - self.text_buffer += delta - - # emit delta event for text - assert self.item_id is not None, "Text state not initialized: missing item_id" - assert self.output_index is not None, "Text state not initialized: missing output_index" - events.append( - ResponseTextDeltaEvent( - sequence_number=ctx.next_sequence(), - item_id=self.item_id, - output_index=self.output_index, + text = "" + async for content in contents: + delta = content.text + text += delta + + yield ResponseTextDeltaEvent( + sequence_number=self._parent.next_sequence(), + item_id=item_id, + output_index=output_index, content_index=0, delta=delta, ) + + yield ResponseTextDoneEvent( + sequence_number=self._parent.next_sequence(), + item_id=item_id, + output_index=output_index, + content_index=0, + text=text, ) - return events - - def afterwork(self, ctx: Any) -> List[ResponseStreamEvent]: - events: List[ResponseStreamEvent] = [] - if not self.item_id: - return events - - full_text = self.text_buffer - assert self.item_id is not None and self.output_index is not None - events.append( - ResponseTextDoneEvent( - sequence_number=ctx.next_sequence(), - item_id=self.item_id, - output_index=self.output_index, - content_index=0, - text=full_text, - ) - ) - final_part = ItemContentOutputText(text=full_text, annotations=[], logprobs=[]) - events.append( - ResponseContentPartDoneEvent( - sequence_number=ctx.next_sequence(), - item_id=self.item_id, - output_index=self.output_index, - content_index=0, - part=final_part, - ) - ) - completed_item = ResponsesAssistantMessageItemResource( - id=self.item_id, status="completed", content=[final_part] - ) - events.append( - ResponseOutputItemDoneEvent( - sequence_number=ctx.next_sequence(), - output_index=self.output_index, - item=completed_item, - ) + + content_part = ItemContentOutputText(text=text, annotations=[], logprobs=[]) + yield ResponseContentPartDoneEvent( + sequence_number=self._parent.next_sequence(), + item_id=item_id, + output_index=output_index, + content_index=0, + part=content_part, ) - ctx._last_completed_text = full_text # pylint: disable=protected-access - # store for final response - ctx._completed_output_items.append( - { - "id": self.item_id, - "type": "message", - "status": "completed", - "content": [ - { - "type": "output_text", - "text": full_text, - "annotations": [], - "logprobs": [], - } - ], - "role": "assistant", - } + + item = ResponsesAssistantMessageItemResource(id=item_id, status="completed", content=[content_part]) + yield ResponseOutputItemDoneEvent( + sequence_number=self._parent.next_sequence(), + output_index=output_index, + item=item, ) - # reset state - self.item_id = None - self.output_index = None - self.text_buffer = "" - self.text_part_started = False - return events + + self._parent.add_completed_output_item(item) # pylint: disable=protected-access class _FunctionCallStreamingState(_BaseStreamingState): """State handler for function_call content during streaming.""" - def __init__(self, context: AgentRunContext) -> None: - self.context = context - self.item_id = None - self.output_index = None - self.call_id = None - self.name = None - self.args_buffer = "" - self.requires_approval = False - self.approval_request_id: str | None = None - - def prework(self, ctx: Any) -> List[ResponseStreamEvent]: - events: List[ResponseStreamEvent] = [] - if self.item_id is not None: - return events - # initialize function-call item - self.item_id = self.context.id_generator.generate_function_call_id() - self.output_index = ctx._next_output_index - ctx._next_output_index += 1 - - self.call_id = self.call_id or str(uuid.uuid4()) - function_item = FunctionToolCallItemResource( - id=self.item_id, - status="in_progress", - call_id=self.call_id, - name=self.name or "", - arguments="", - ) - events.append( - ResponseOutputItemAddedEvent( - sequence_number=ctx.next_sequence(), - output_index=self.output_index, - item=function_item, - ) - ) - return events - - def convert_content(self, ctx: Any, content: FunctionCallContent) -> List[ResponseStreamEvent]: - events: List[ResponseStreamEvent] = [] - # record identifiers (once available) - self.name = getattr(content, "name", None) or self.name or "" - self.call_id = getattr(content, "call_id", None) or self.call_id or str(uuid.uuid4()) - - args_delta = content.arguments if isinstance(content.arguments, str) else json.dumps(content.arguments) - args_delta = args_delta or "" - self.args_buffer += args_delta - assert self.item_id is not None and self.output_index is not None - for ch in args_delta: - events.append( - ResponseFunctionCallArgumentsDeltaEvent( - sequence_number=ctx.next_sequence(), - item_id=self.item_id, - output_index=self.output_index, - delta=ch, + def __init__(self, parent: AgentFrameworkOutputStreamingConverter): + self._parent = parent + + async def convert_contents(self, contents: AsyncIterable[FunctionCallContent]) -> AsyncIterable[ResponseStreamEvent]: + content_by_call_id = {} + ids_by_call_id = {} + + async for content in contents: + if content.call_id not in content_by_call_id: + item_id = self._parent.context.id_generator.generate_function_call_id() + output_index = self._parent.next_output_index() + + content_by_call_id[content.call_id] = content + ids_by_call_id[content.call_id] = (item_id, output_index) + + yield ResponseOutputItemAddedEvent( + sequence_number=self._parent.next_sequence(), + output_index=output_index, + item=FunctionToolCallItemResource( + id=item_id, + status="in_progress", + call_id=content.call_id, + name=content.name, + arguments="", + ), ) + continue + else: + content_by_call_id[content.call_id] = content_by_call_id[content.call_id] + content + item_id, output_index = ids_by_call_id[content.call_id] + + args_delta = content.arguments if isinstance(content.arguments, str) else "" + yield ResponseFunctionCallArgumentsDeltaEvent( + sequence_number=self._parent.next_sequence(), + item_id=item_id, + output_index=output_index, + delta=args_delta, ) - # finalize if arguments are detected to be complete - is_done = bool( - getattr(content, "is_final", False) - or getattr(content, "final", False) - or getattr(content, "done", False) - or getattr(content, "arguments_final", False) - or getattr(content, "arguments_done", False) - or getattr(content, "finish", False) - ) - if not is_done and self.args_buffer: - try: - json.loads(self.args_buffer) - is_done = True - except Exception: # pylint: disable=broad-exception-caught - pass - - if is_done: - events.append( - ResponseFunctionCallArgumentsDoneEvent( - sequence_number=ctx.next_sequence(), - item_id=self.item_id, - output_index=self.output_index, - arguments=self.args_buffer, - ) + for call_id, content in content_by_call_id.items(): + item_id, output_index = ids_by_call_id[call_id] + args = content.arguments if isinstance(content.arguments, str) else json.dumps(content.arguments) + yield ResponseFunctionCallArgumentsDoneEvent( + sequence_number=self._parent.next_sequence(), + item_id=item_id, + output_index=output_index, + arguments=args, ) - events.extend(self.afterwork(ctx)) - return events - - def afterwork(self, ctx: Any) -> List[ResponseStreamEvent]: - events: List[ResponseStreamEvent] = [] - if not self.item_id: - return events - assert self.call_id is not None - done_item = FunctionToolCallItemResource( - id=self.item_id, - status="completed", - call_id=self.call_id, - name=self.name or "", - arguments=self.args_buffer, - ) - assert self.output_index is not None - events.append( - ResponseOutputItemDoneEvent( - sequence_number=ctx.next_sequence(), - output_index=self.output_index, - item=done_item, + + item = FunctionToolCallItemResource( + id=item_id, + status="completed", + call_id=call_id, + name=content.name, + arguments=args, ) - ) - # store for final response - ctx._completed_output_items.append( - { - "id": self.item_id, - "type": "function_call", - "call_id": self.call_id, - "name": self.name or "", - "arguments": self.args_buffer, - "status": "requires_approval" if self.requires_approval else "completed", - "requires_approval": self.requires_approval, - "approval_request_id": self.approval_request_id, - } - ) - # reset - self.item_id = None - self.output_index = None - self.args_buffer = "" - self.call_id = None - self.name = None - self.requires_approval = False - self.approval_request_id = None - return events + yield ResponseOutputItemDoneEvent( + sequence_number=self._parent.next_sequence(), + output_index=output_index, + item=item, + ) + + self._parent.add_completed_output_item(item) # pylint: disable=protected-access class _FunctionCallOutputStreamingState(_BaseStreamingState): """Handles function_call_output items streaming (non-chunked simple output).""" - def __init__( - self, - context: AgentRunContext, - call_id: Optional[str] = None, - output: Optional[list[str]] = None, - ) -> None: - # Avoid mutable default argument (Ruff B006) - self.context = context - self.item_id = None - self.output_index = None - self.call_id = call_id - self.output = output if output is not None else [] - - def prework(self, ctx: Any) -> List[ResponseStreamEvent]: - events: List[ResponseStreamEvent] = [] - if self.item_id is not None: - return events - self.item_id = self.context.id_generator.generate_function_output_id() - self.output_index = ctx._next_output_index - ctx._next_output_index += 1 - - self.call_id = self.call_id or str(uuid.uuid4()) - item = FunctionToolCallOutputItemResource( - id=self.item_id, - status="in_progress", - call_id=self.call_id, - output="", - ) - events.append( - ResponseOutputItemAddedEvent( - sequence_number=ctx.next_sequence(), - output_index=self.output_index, + def __init__(self, parent: AgentFrameworkOutputStreamingConverter): + self._parent = parent + + async def convert_contents(self, contents: AsyncIterable[FunctionResultContent]) -> AsyncIterable[ResponseStreamEvent]: + async for content in contents: + item_id = self._parent.context.id_generator.generate_function_output_id() + output_index = self._parent.next_output_index() + + output = (f"{type(content.exception)}({str(content.exception)})" + if content.exception + else json.dumps(content.result)) + + item = FunctionToolCallOutputItemResource( + id=item_id, + status="completed", + call_id=content.call_id, + output=output, + ) + + yield ResponseOutputItemAddedEvent( + sequence_number=self._parent.next_sequence(), + output_index=output_index, item=item, ) - ) - return events - - def convert_content(self, ctx: Any, content: Any) -> List[ResponseStreamEvent]: # no delta events for now - events: List[ResponseStreamEvent] = [] - # treat entire output as final - result = [] - raw = getattr(content, "result", None) - if isinstance(raw, str): - result = [raw or self.output] - elif isinstance(raw, list): - for item in raw: - result.append(self._coerce_result_text(item)) - self.output = json.dumps(result) if len(result) > 0 else "" - - events.extend(self.afterwork(ctx)) - return events - - def _coerce_result_text(self, value: Any) -> str | dict: - """ - Return a string if value is already str or a TextContent-like object; else str(value). - - :param value: The value to coerce. - :type value: Any - - :return: The coerced string or dict. - :rtype: str | dict - """ - if value is None: - return "" - if isinstance(value, str): - return value - # Direct TextContent instance - if isinstance(value, TextContent): - content_payload = {"type": "text", "text": getattr(value, "text", "")} - return content_payload - - return "" - - def afterwork(self, ctx: Any) -> List[ResponseStreamEvent]: - events: List[ResponseStreamEvent] = [] - if not self.item_id: - return events - # Ensure types conform: call_id must be str (guarantee non-None) and output is a single string - str_call_id = self.call_id or "" - single_output: str = cast(str, self.output[0]) if self.output else "" - done_item = FunctionToolCallOutputItemResource( - id=self.item_id, - status="completed", - call_id=str_call_id, - output=single_output, - ) - assert self.output_index is not None - events.append( - ResponseOutputItemDoneEvent( - sequence_number=ctx.next_sequence(), - output_index=self.output_index, - item=done_item, + + yield ResponseOutputItemDoneEvent( + sequence_number=self._parent.next_sequence(), + output_index=output_index, + item=item, ) - ) - ctx._completed_output_items.append( - { - "id": self.item_id, - "type": "function_call_output", - "status": "completed", - "call_id": self.call_id, - "output": self.output, - } - ) - self.item_id = None - self.output_index = None - return events + + self._parent.add_completed_output_item(item) # pylint: disable=protected-access class AgentFrameworkOutputStreamingConverter: @@ -442,101 +231,91 @@ class AgentFrameworkOutputStreamingConverter: def __init__(self, context: AgentRunContext) -> None: self._context = context # sequence numbers must start at 0 for first emitted event - self._sequence = 0 - self._response_id = None + self._sequence = -1 + self._next_output_index = -1 + self._response_id = self._context.response_id self._response_created_at = None - self._next_output_index = 0 - self._last_completed_text = "" - self._active_state: Optional[_BaseStreamingState] = None - self._active_kind = None # "text" | "function_call" | "error" - # accumulate completed output items for final response - self._completed_output_items: List[dict] = [] - - def _ensure_response_started(self) -> None: - if not self._response_id: - self._response_id = self._context.response_id - if not self._response_created_at: - self._response_created_at = int(datetime.datetime.now(datetime.timezone.utc).timestamp()) + self._completed_output_items: List[ItemResource] = [] def next_sequence(self) -> int: self._sequence += 1 return self._sequence - def _switch_state(self, kind: str) -> List[ResponseStreamEvent]: - events: List[ResponseStreamEvent] = [] - if self._active_state and self._active_kind != kind: - events.extend(self._active_state.afterwork(self)) - self._active_state = None - self._active_kind = None - - if self._active_state is None: - if kind == "text": - self._active_state = _TextContentStreamingState(self._context) - elif kind == "function_call": - self._active_state = _FunctionCallStreamingState(self._context) - elif kind == "function_call_output": - self._active_state = _FunctionCallOutputStreamingState(self._context) - else: - self._active_state = None - self._active_kind = kind - if self._active_state: - events.extend(self._active_state.prework(self)) - return events - - def transform_output_for_streaming(self, update: AgentRunResponseUpdate) -> List[ResponseStreamEvent]: - logger.debug( - "Transforming streaming update with %d contents", - len(update.contents) if getattr(update, "contents", None) else 0, - ) + def next_output_index(self) -> int: + self._next_output_index += 1 + return self._next_output_index + + def add_completed_output_item(self, item: ItemResource) -> None: + self._completed_output_items.append(item) + + @property + def context(self) -> AgentRunContext: + return self._context + + async def convert(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> AsyncIterable[ResponseStreamEvent]: self._ensure_response_started() - events: List[ResponseStreamEvent] = [] - - if getattr(update, "contents", None): - for i, content in enumerate(update.contents): - logger.debug("Processing content %d: %s", i, type(content)) - if isinstance(content, TextContent): - events.extend(self._switch_state("text")) - if isinstance(self._active_state, _TextContentStreamingState): - events.extend(self._active_state.convert_content(self, content)) - elif isinstance(content, FunctionCallContent): - events.extend(self._switch_state("function_call")) - if isinstance(self._active_state, _FunctionCallStreamingState): - events.extend(self._active_state.convert_content(self, content)) - elif isinstance(content, FunctionResultContent): - events.extend(self._switch_state("function_call_output")) - if isinstance(self._active_state, _FunctionCallOutputStreamingState): - call_id = getattr(content, "call_id", None) - if call_id: - self._active_state.call_id = call_id - events.extend(self._active_state.convert_content(self, content)) - elif isinstance(content, FunctionApprovalRequestContent): - events.extend(self._switch_state("function_call")) - if isinstance(self._active_state, _FunctionCallStreamingState): - self._active_state.requires_approval = True - self._active_state.approval_request_id = getattr(content, "id", None) - events.extend(self._active_state.convert_content(self, content.function_call)) - elif isinstance(content, ErrorContent): - # errors are stateless; flush current state and emit error - events.extend(self._switch_state("error")) - events.append( - ResponseErrorEvent( - sequence_number=self.next_sequence(), - code=getattr(content, "error_code", None) or "server_error", - message=getattr(content, "message", None) or "An error occurred", - param="", - ) - ) - return events - - def finalize_last_content(self) -> List[ResponseStreamEvent]: - events: List[ResponseStreamEvent] = [] - if self._active_state: - events.extend(self._active_state.afterwork(self)) - self._active_state = None - self._active_kind = None - return events - - def build_response(self, status: str) -> OpenAIResponse: + + created_response = self._build_response(status="in_progress") + yield ResponseCreatedEvent( + sequence_number=self.next_sequence(), + response=created_response, + ) + + yield ResponseInProgressEvent( + sequence_number=self.next_sequence(), + response=created_response, + ) + + is_changed = lambda a, b: a is not None and b is not None and a.message_id != b.message_id + async for group in chunk_on_change(updates, is_changed): + has_value, first, contents = await peek(self._read_updates(group)) + if not has_value: + continue + + state = None + if isinstance(first, TextContent): + state = _TextContentStreamingState(self) + elif isinstance(first, (FunctionCallContent, FunctionApprovalRequestContent)): + state = _FunctionCallStreamingState(self) + elif isinstance(first, FunctionResultContent): + state = _FunctionCallOutputStreamingState(self) + elif isinstance(first, ErrorContent): + yield ResponseErrorEvent( + sequence_number=self.next_sequence(), + code=getattr(first, "error_code", None) or "server_error", + message=getattr(first, "message", None) or "An error occurred", + param="", + ) + continue + + async for content in state.convert_contents(contents): + yield content + + yield ResponseCompletedEvent( + sequence_number=self.next_sequence(), + response=self._build_response(status="completed"), + ) + + @staticmethod + async def _read_updates(updates: AsyncIterable[AgentRunResponseUpdate]) -> AsyncIterable[BaseContent]: + async for update in updates: + if not update.contents: + continue + + accepted_types = (TextContent, + FunctionCallContent, + FunctionApprovalRequestContent, + FunctionResultContent, + ErrorContent) + for content in update.contents: + if isinstance(content, accepted_types): + yield content + + def _ensure_response_started(self) -> None: + if not self._response_created_at: + self._response_created_at = int(datetime.datetime.now(datetime.timezone.utc).timestamp()) + + def _build_response(self, status: str) -> OpenAIResponse: self._ensure_response_started() agent_id = AgentIdGenerator.generate(self._context) response_data = { @@ -550,47 +329,3 @@ def build_response(self, status: str) -> OpenAIResponse: if status == "completed" and self._completed_output_items: response_data["output"] = self._completed_output_items return OpenAIResponse(response_data) - - # High-level helpers to emit lifecycle events for streaming - def initial_events(self) -> List[ResponseStreamEvent]: - """ - Emit ResponseCreatedEvent and an initial ResponseInProgressEvent. - - :return: List of initial response stream events. - :rtype: List[ResponseStreamEvent] - """ - self._ensure_response_started() - events: List[ResponseStreamEvent] = [] - created_response = self.build_response(status="in_progress") - events.append( - ResponseCreatedEvent( - sequence_number=self.next_sequence(), - response=created_response, - ) - ) - events.append( - ResponseInProgressEvent( - sequence_number=self.next_sequence(), - response=self.build_response(status="in_progress"), - ) - ) - return events - - def completion_events(self) -> List[ResponseStreamEvent]: - """ - Finalize any active content and emit a single ResponseCompletedEvent. - - :return: List of completion response stream events. - :rtype: List[ResponseStreamEvent] - """ - self._ensure_response_started() - events: List[ResponseStreamEvent] = [] - events.extend(self.finalize_last_content()) - completed_response = self.build_response(status="completed") - events.append( - ResponseCompletedEvent( - sequence_number=self.next_sequence(), - response=completed_response, - ) - ) - return events diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/__init__.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/__init__.py new file mode 100644 index 000000000000..28077537d94b --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/__init__.py @@ -0,0 +1,5 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py new file mode 100644 index 000000000000..ef8525109554 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py @@ -0,0 +1,136 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from __future__ import annotations + +from collections.abc import AsyncIterable, AsyncIterator, Callable +from typing import TypeVar, Optional, Tuple, Awaitable + +TSource = TypeVar("TSource") +TKey = TypeVar("TKey") +T = TypeVar("T") + + +async def chunk_on_change( + source: AsyncIterable[TSource], + is_changed: Optional[Callable[[Optional[TSource], Optional[TSource]], bool]] = None, +) -> AsyncIterator[AsyncIterable[TSource]]: + """ + Chunks an async iterable into groups based on when consecutive elements change. + + :param source: Async iterable of items. + :param is_changed: Function(prev, current) -> bool indicating if value changed. + If None, uses != by default. + :return: An async iterator of async iterables (chunks). + """ + + if is_changed is None: + # Default equality: use the value itself as key, == as equality + async for group in chunk_by_key(source, lambda x: x): + yield group + else: + # Equivalent to C#: EqualityComparer.Create((x, y) => !isChanged(x, y)) + def key_equal(a: TSource, b: TSource) -> bool: + return not is_changed(a, b) + + async for group in chunk_by_key(source, lambda x: x, key_equal=key_equal): + yield group + + +async def chunk_by_key( + source: AsyncIterable[TSource], + key_selector: Callable[[TSource], TKey], + key_equal: Optional[Callable[[TKey, TKey], bool]] = None, +) -> AsyncIterator[AsyncIterable[TSource]]: + """ + Chunks the async iterable into groups based on a key selector. + + :param source: Async iterable of items. + :param key_selector: Function mapping item -> key. + :param key_equal: Optional equality function for keys. Defaults to '=='. + :return: An async iterator of async iterables (chunks). + """ + + if key_equal is None: + def key_equal(a: TKey, b: TKey) -> bool: # type: ignore[no-redef] + return a == b + + it = source.__aiter__() + + # Prime the iterator + try: + pending = await it.__anext__() + except StopAsyncIteration: + return + + pending_key = key_selector(pending) + has_pending = True + + while has_pending: + current_key = pending_key + + async def inner() -> AsyncIterator[TSource]: + nonlocal pending, pending_key, has_pending + + # First element of the group + yield pending + + # Consume until key changes or source ends + while True: + try: + item = await it.__anext__() + except StopAsyncIteration: + # Source ended; tell outer loop to stop after this group + has_pending = False + return + + k = key_selector(item) + if not key_equal(k, current_key): + # Hand first item of next group back to outer loop + pending = item + pending_key = k + return + + yield item + + # Yield an async iterable representing the current chunk + yield inner() + + +async def peek( + source: AsyncIterable[T], +) -> Tuple[bool, Optional[T], AsyncIterable[T]]: + """ + Peeks at the first element of an async iterable without consuming it. + + :param source: Async iterable. + :return: (has_value, first, full_sequence_including_first) + """ + + it = source.__aiter__() + + try: + first = await it.__anext__() + except StopAsyncIteration: + return False, None, _empty_async() + + async def sequence() -> AsyncIterator[T]: + try: + # Yield the peeked element first + yield first + # Then the rest of the original iterator + async for item in it: + yield item + finally: + # Try to close underlying async generator if it supports it + aclose = getattr(it, "aclose", None) + if aclose is not None: + await aclose() + + return True, first, sequence() + + +async def _empty_async() -> AsyncIterator[T]: + if False: + # This is just to make this an async generator for typing + yield None # type: ignore[misc] diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index 15d90e5660ab..a7cfbd49dd22 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -1,5 +1,10 @@ # Release History +## 1.0.0b5 (2025-11-16) + +### Bugs Fixed + +- Fixed streaming generation issues. ## 1.0.0b4 (2025-11-13) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py index 22553b18fb7e..c7d155d924dd 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b4" +VERSION = "1.0.0b5" diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index d39af78e2014..bc5a15a37775 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -375,7 +375,7 @@ def _format_error(exc: Exception) -> str: return message if DEBUG_ERRORS: return repr(exc) - return "Internal error" + return f"{type(exc)}: Internal error" def _to_response(result: Union[Response, dict]) -> Response: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index 15d90e5660ab..c7c4aaaa6369 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -1,5 +1,11 @@ # Release History +## 1.0.0b5 (2025-11-16) + +### Bugs Fixed + +- Fixed streaming generation issues. + ## 1.0.0b4 (2025-11-13) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py index 22553b18fb7e..c7d155d924dd 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b4" +VERSION = "1.0.0b5" From a26603e1db87e10aaaf9a9ac51c4e1c2bb083e48 Mon Sep 17 00:00:00 2001 From: Ganesh Bheemarasetty <1634042+ganeshyb@users.noreply.github.com> Date: Sun, 16 Nov 2025 23:24:22 -0800 Subject: [PATCH 21/35] Refactor Azure AI Tool Client Configuration and Enhance OAuth Consent Handling * Refactor Azure AI Tool Client Configuration and Enhance OAuth Consent Handling - Consolidated the AzureAIToolClientConfiguration class by removing redundant code and improving clarity. - Introduced OAuth consent handling in the agent's response methods to manage OAuthConsentRequiredError. - Updated the FoundryCBAgent to configure tools endpoint and agent name from environment variables. - Enhanced tool client to propagate OAuth consent errors for better handling in the agent. - Added methods to generate OAuth request IDs and handle OAuth consent requests in the LangGraph response converter. - Updated sample usage to include tool connection ID from environment variables. - Incremented version to 1.0.0b5 for the langgraph package. * Address Pylint and mypy issues * Updated change logs --- .../CHANGELOG.md | 4 + .../agentframework/agent_framework.py | 19 +- .../azure-ai-agentserver-core/CHANGELOG.md | 4 + .../ai/agentserver/core/client/__init__.py | 5 + .../agentserver/core/client/tools/__init__.py | 2 +- .../agentserver/core/client/tools/_client.py | 329 ++++++++--------- .../core/client/tools/_configuration.py | 143 ++++---- .../core/client/tools/_exceptions.py | 77 ++-- .../core/client/tools/_model_base.py | 302 ++++++++-------- .../core/client/tools/aio/__init__.py | 2 +- .../core/client/tools/aio/_client.py | 331 +++++++++--------- .../core/client/tools/aio/_configuration.py | 144 ++++---- .../tools/aio/operations/_operations.py | 38 +- .../client/tools/operations/_operations.py | 81 +++-- .../azure/ai/agentserver/core/constants.py | 1 + .../azure/ai/agentserver/core/server/base.py | 217 +++++++++++- .../common/id_generator/id_generator.py | 3 + .../CHANGELOG.md | 4 + .../ai/agentserver/langgraph/langgraph.py | 42 ++- .../ai/agentserver/langgraph/tool_client.py | 2 + .../use_tool_client_example.py | 3 +- 21 files changed, 1018 insertions(+), 735 deletions(-) create mode 100644 sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/__init__.py diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index a73c24633579..a01bc1990909 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -2,6 +2,10 @@ ## 1.0.0b5 (2025-11-16) +### Feature Added + +- Support Tools Oauth + ### Bugs Fixed - Fixed streaming generation issues. diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index f90545d7d1f6..2a7c28f9a3f8 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -12,6 +12,7 @@ from agent_framework.azure import AzureAIAgentClient # pylint: disable=no-name-in-module from opentelemetry import trace +from azure.ai.agentserver.core.client.tools import OAuthConsentRequiredError from azure.ai.agentserver.core import AgentRunContext, FoundryCBAgent from azure.ai.agentserver.core.constants import Constants as AdapterConstants from azure.ai.agentserver.core.logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger @@ -255,8 +256,8 @@ async def stream_updates(): try: await tool_client.close() logger.debug("Closed tool_client after streaming completed") - except Exception as e: # pylint: disable=broad-exception-caught - logger.warning(f"Error closing tool_client in stream: {e}") + except Exception as ex: # pylint: disable=broad-exception-caught + logger.warning(f"Error closing tool_client in stream: {ex}") return stream_updates() @@ -268,11 +269,21 @@ async def stream_updates(): transformed_result = non_streaming_converter.transform_output_for_response(result) logger.info("Agent run and transformation completed successfully") return transformed_result + except OAuthConsentRequiredError as e: + logger.info("OAuth consent required during agent run") + if context.stream: + # Yield OAuth consent response events + # Capture e in the closure by passing it as a default argument + async def oauth_consent_stream(error=e): + async for event in self.respond_with_oauth_consent_astream(context, error): + yield event + return oauth_consent_stream() + return await self.respond_with_oauth_consent(context, e) finally: # Close tool_client if it was created for this request (non-streaming only, streaming handles in generator) if not context.stream and tool_client is not None: try: await tool_client.close() logger.debug("Closed tool_client after request processing") - except Exception as e: # pylint: disable=broad-exception-caught - logger.warning(f"Error closing tool_client: {e}") + except Exception as ex: # pylint: disable=broad-exception-caught + logger.warning(f"Error closing tool_client: {ex}") diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index a7cfbd49dd22..55a56fed54ca 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -2,6 +2,10 @@ ## 1.0.0b5 (2025-11-16) +### Feature Added + +- Support Tools Oauth + ### Bugs Fixed - Fixed streaming generation issues. diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/__init__.py new file mode 100644 index 000000000000..fdf8caba9ef5 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/__init__.py @@ -0,0 +1,5 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +__path__ = __import__("pkgutil").extend_path(__path__, __name__) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/__init__.py index 8cf7c6b67389..3800740fb464 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/__init__.py @@ -10,4 +10,4 @@ "FoundryTool", "OAuthConsentRequiredError", "MCPToolApprovalRequiredError", -] \ No newline at end of file +] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py index df19d4663fb3..ea9a8479637f 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py @@ -2,12 +2,11 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from copy import deepcopy -from typing import Any, List, TYPE_CHECKING, Mapping, Union +from typing import Any, List, Mapping, Union from azure.core import PipelineClient from azure.core.pipeline import policies -from azure.core.rest import HttpRequest, HttpResponse from azure.core.credentials import TokenCredential +from azure.core.tracing.decorator import distributed_trace from ._configuration import AzureAIToolClientConfiguration from .operations._operations import MCPToolsOperations, RemoteToolsOperations @@ -15,156 +14,174 @@ from ._model_base import FoundryTool, ToolSource class AzureAIToolClient: - """Synchronous client for aggregating tools from Azure AI MCP and Tools APIs. - - This client provides access to tools from both MCP (Model Context Protocol) servers - and Azure AI Tools API endpoints, enabling unified tool discovery and invocation. - - :param str endpoint: - The fully qualified endpoint for the Azure AI Agents service. - Example: "https://.api.azureml.ms" - :param credential: - Credential for authenticating requests to the service. - Use credentials from azure-identity like DefaultAzureCredential. - :type credential: ~azure.core.credentials.TokenCredential - :keyword str agent_name: - Name of the agent to use for tool operations. Default is "$default". - :keyword List[Mapping[str, Any]] tools: - List of tool configurations defining which tools to include. - :keyword Mapping[str, Any] user: - User information for tool invocations (object_id, tenant_id). - :keyword str api_version: - API version to use when communicating with the service. - Default is the latest supported version. - :keyword transport: - Custom transport implementation. Default is RequestsTransport. - :paramtype transport: ~azure.core.pipeline.transport.HttpTransport - - """ - - def __init__( - self, - endpoint: str, - credential: "TokenCredential", - **kwargs: Any, - ) -> None: - """Initialize the synchronous Azure AI Tool Client. - - :param str endpoint: The service endpoint URL. - :param credential: Credentials for authenticating requests. - :type credential: ~azure.core.credentials.TokenCredential - :keyword kwargs: Additional keyword arguments for client configuration. - """ - self._config = AzureAIToolClientConfiguration( - endpoint, - credential, - **kwargs, - ) - - _policies = kwargs.pop("policies", None) - if _policies is None: - _policies = [ - policies.RequestIdPolicy(**kwargs), - self._config.headers_policy, - self._config.user_agent_policy, - self._config.proxy_policy, - policies.ContentDecodePolicy(**kwargs), - self._config.redirect_policy, - self._config.retry_policy, - self._config.authentication_policy, - self._config.custom_hook_policy, - self._config.logging_policy, - policies.DistributedTracingPolicy(**kwargs), - policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, - self._config.http_logging_policy, - ] - self._client: PipelineClient = PipelineClient(base_url=endpoint, policies=_policies, **kwargs) - - # Initialize specialized clients with client and config - self._mcp_tools = MCPToolsOperations(client=self._client, config=self._config) - self._remote_tools = RemoteToolsOperations(client=self._client, config=self._config) - - def list_tools(self) -> List[FoundryTool]: - """List all available tools from configured sources. - - Retrieves tools from both MCP servers and Azure AI Tools API endpoints, - returning them as FoundryTool instances ready for invocation. - :return: List of available tools from all configured sources. - :rtype: List[~AzureAITool] - :raises ~exceptions.OAuthConsentRequiredError: - Raised when the service requires user OAuth consent. - :raises ~exceptions.MCPToolApprovalRequiredError: - Raised when tool access requires human approval. - :raises ~azure.core.exceptions.HttpResponseError: - Raised for HTTP communication failures. - - """ - - existing_names: set[str] = set() - - tools: List[FoundryTool] = [] - - # Fetch MCP tools - mcp_tools = self._mcp_tools.list_tools(existing_names) - tools.extend(mcp_tools) - - # Fetch Tools API tools - tools_api_tools = self._remote_tools.resolve_tools(existing_names) - tools.extend(tools_api_tools) - - for tool in tools: - # Capture tool in a closure to avoid shadowing issues - def make_invoker(captured_tool): - return lambda *args, **kwargs: self.invoke_tool(captured_tool, *args, **kwargs) - tool.invoker = make_invoker(tool) - return tools - - def invoke_tool( - self, - tool: Union[str, FoundryTool], - *args: Any, - **kwargs: Any, - ) -> Any: - """Invoke a tool by instance, name, or descriptor. - - :param tool: Tool to invoke, specified as an AzureAITool instance, - tool name string, or FoundryTool. - :type tool: Union[str, ~FoundryTool] - :param args: Positional arguments to pass to the tool - """ - descriptor = self._resolve_tool_descriptor(tool) - payload = InvocationPayloadBuilder.build_payload(args, kwargs, {}) - return self._invoke_tool(descriptor, payload, **kwargs) - - def _resolve_tool_descriptor( - self, tool: Union[str, FoundryTool] - ) -> FoundryTool: - """Resolve a tool reference to a descriptor.""" - if isinstance(tool, FoundryTool): - return tool - if isinstance(tool, str): - # Fetch all tools and find matching descriptor - descriptors = self.list_tools() - for descriptor in descriptors: - if descriptor.name == tool or descriptor.key == tool: - return descriptor - raise KeyError(f"Unknown tool: {tool}") - raise TypeError("Tool must be an AzureAITool, FoundryTool, or registered name/key") - - def _invoke_tool(self, descriptor: FoundryTool, arguments: Mapping[str, Any], **kwargs: Any) -> Any: - """Invoke a tool descriptor.""" - if descriptor.source is ToolSource.MCP_TOOLS: - return self._mcp_tools.invoke_tool(descriptor, arguments) - if descriptor.source is ToolSource.REMOTE_TOOLS: - return self._remote_tools.invoke_tool(descriptor, arguments) - raise ValueError(f"Unsupported tool source: {descriptor.source}") - - def close(self) -> None: - self._client.close() - - def __enter__(self) -> "AzureAIToolClient": - self._client.__enter__() - return self - - def __exit__(self, *exc_details: Any) -> None: - self._client.__exit__(*exc_details) \ No newline at end of file + """Synchronous client for aggregating tools from Azure AI MCP and Tools APIs. + + This client provides access to tools from both MCP (Model Context Protocol) servers + and Azure AI Tools API endpoints, enabling unified tool discovery and invocation. + + :param str endpoint: + The fully qualified endpoint for the Azure AI Agents service. + Example: "https://.api.azureml.ms" + :param credential: + Credential for authenticating requests to the service. + Use credentials from azure-identity like DefaultAzureCredential. + :type credential: ~azure.core.credentials.TokenCredential + :keyword str agent_name: + Name of the agent to use for tool operations. Default is "$default". + :keyword List[Mapping[str, Any]] tools: + List of tool configurations defining which tools to include. + :keyword Mapping[str, Any] user: + User information for tool invocations (object_id, tenant_id). + :keyword str api_version: + API version to use when communicating with the service. + Default is the latest supported version. + :keyword transport: + Custom transport implementation. Default is RequestsTransport. + :paramtype transport: ~azure.core.pipeline.transport.HttpTransport + + """ + + def __init__( + self, + endpoint: str, + credential: "TokenCredential", + **kwargs: Any, + ) -> None: + """Initialize the synchronous Azure AI Tool Client. + + :param str endpoint: The service endpoint URL. + :param credential: Credentials for authenticating requests. + :type credential: ~azure.core.credentials.TokenCredential + :keyword kwargs: Additional keyword arguments for client configuration. + """ + self._config = AzureAIToolClientConfiguration( + endpoint, + credential, + **kwargs, + ) + + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: PipelineClient = PipelineClient(base_url=endpoint, policies=_policies, **kwargs) + + # Initialize specialized clients with client and config + self._mcp_tools = MCPToolsOperations(client=self._client, config=self._config) + self._remote_tools = RemoteToolsOperations(client=self._client, config=self._config) + + def list_tools(self) -> List[FoundryTool]: + """List all available tools from configured sources. + + Retrieves tools from both MCP servers and Azure AI Tools API endpoints, + returning them as FoundryTool instances ready for invocation. + :return: List of available tools from all configured sources. + :rtype: List[~AzureAITool] + :raises ~exceptions.OAuthConsentRequiredError: + Raised when the service requires user OAuth consent. + :raises ~exceptions.MCPToolApprovalRequiredError: + Raised when tool access requires human approval. + :raises ~azure.core.exceptions.HttpResponseError: + Raised for HTTP communication failures. + + """ + + existing_names: set[str] = set() + + tools: List[FoundryTool] = [] + + # Fetch MCP tools + mcp_tools = self._mcp_tools.list_tools(existing_names) + tools.extend(mcp_tools) + + # Fetch Tools API tools + tools_api_tools = self._remote_tools.resolve_tools(existing_names) + tools.extend(tools_api_tools) + + for tool in tools: + # Capture tool in a closure to avoid shadowing issues + def make_invoker(captured_tool): + return lambda *args, **kwargs: self.invoke_tool(captured_tool, *args, **kwargs) + tool.invoker = make_invoker(tool) + return tools + + @distributed_trace + def invoke_tool( + self, + tool: Union[str, FoundryTool], + *args: Any, + **kwargs: Any, + ) -> Any: + """Invoke a tool by instance, name, or descriptor. + + :param tool: Tool to invoke, specified as an AzureAITool instance, + tool name string, or FoundryTool. + :type tool: Union[str, ~FoundryTool] + :param args: Positional arguments to pass to the tool. + :type args: Any + :return: The result of invoking the tool. + :rtype: Any + """ + descriptor = self._resolve_tool_descriptor(tool) + payload = InvocationPayloadBuilder.build_payload(args, kwargs, configuration={}) + return self._invoke_tool(descriptor, payload, **kwargs) + + def _resolve_tool_descriptor( + self, tool: Union[str, FoundryTool] + ) -> FoundryTool: + """Resolve a tool reference to a descriptor. + + :param tool: Tool to resolve, either a FoundryTool instance or a string name/key. + :type tool: Union[str, FoundryTool] + :return: The resolved FoundryTool descriptor. + :rtype: FoundryTool + """ + if isinstance(tool, FoundryTool): + return tool + if isinstance(tool, str): + # Fetch all tools and find matching descriptor + descriptors = self.list_tools() + for descriptor in descriptors: + if tool in (descriptor.name, descriptor.key): + return descriptor + raise KeyError(f"Unknown tool: {tool}") + raise TypeError("Tool must be an AzureAITool, FoundryTool, or registered name/key") + + def _invoke_tool(self, descriptor: FoundryTool, arguments: Mapping[str, Any], **kwargs: Any) -> Any: + """Invoke a tool descriptor. + + :param descriptor: The tool descriptor to invoke. + :type descriptor: FoundryTool + :param arguments: Arguments to pass to the tool. + :type arguments: Mapping[str, Any] + :return: The result of the tool invocation. + :rtype: Any + """ + if descriptor.source is ToolSource.MCP_TOOLS: + return self._mcp_tools.invoke_tool(descriptor, arguments, **kwargs) + if descriptor.source is ToolSource.REMOTE_TOOLS: + return self._remote_tools.invoke_tool(descriptor, arguments, **kwargs) + raise ValueError(f"Unsupported tool source: {descriptor.source}") + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> "AzureAIToolClient": + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_configuration.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_configuration.py index 45e2ac178654..71cbdebec911 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_configuration.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_configuration.py @@ -2,87 +2,84 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from typing import Any, Mapping, List, Optional, TYPE_CHECKING +from typing import Any, List, Optional, TYPE_CHECKING from azure.core.pipeline import policies +from ._utils._model_base import ToolConfigurationParser, UserInfo, ToolDefinition if TYPE_CHECKING: from azure.core.credentials import TokenCredential -from ._utils._model_base import ToolConfigurationParser, UserInfo, ToolDefinition +class AzureAIToolClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for Azure AI Tool Client. + + Manages authentication, endpoint configuration, and policy settings for the + Azure AI Tool Client. This class is used internally by the client and should + not typically be instantiated directly. + + :param str endpoint: + Fully qualified endpoint for the Azure AI Agents service. + :param credential: + Azure TokenCredential for authentication. + :type credential: ~azure.core.credentials.TokenCredential + :keyword str api_version: + API version to use. Default is the latest supported version. + :keyword List[str] credential_scopes: + OAuth2 scopes for token requests. Default is ["https://ai.azure.com/.default"]. + :keyword str agent_name: + Name of the agent. Default is "$default". + :keyword List[Mapping[str, Any]] tools: + List of tool configurations. + :keyword Mapping[str, Any] user: + User information for tool invocations. + """ + + def __init__( + self, + endpoint: str, + credential: "TokenCredential", + **kwargs: Any, + ) -> None: + """Initialize the configuration. + + :param str endpoint: The service endpoint URL. + :param credential: Credentials for authenticating requests. + :type credential: ~azure.core.credentials.TokenCredential + :keyword kwargs: Additional configuration options. + """ + api_version: str = kwargs.pop("api_version", "2025-05-15-preview") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://ai.azure.com/.default"]) -class AzureAIToolClientConfiguration: - """Configuration for Azure AI Tool Client. - - Manages authentication, endpoint configuration, and policy settings for the - Azure AI Tool Client. This class is used internally by the client and should - not typically be instantiated directly. - - :param str endpoint: - Fully qualified endpoint for the Azure AI Agents service. - :param credential: - Azure TokenCredential for authentication. - :type credential: ~azure.core.credentials.TokenCredential - :keyword str api_version: - API version to use. Default is the latest supported version. - :keyword List[str] credential_scopes: - OAuth2 scopes for token requests. Default is ["https://ai.azure.com/.default"]. - :keyword str agent_name: - Name of the agent. Default is "$default". - :keyword List[Mapping[str, Any]] tools: - List of tool configurations. - :keyword Mapping[str, Any] user: - User information for tool invocations. - """ + # Tool configuration + self.agent_name: str = kwargs.pop("agent_name", "$default") + self.tools: Optional[List[ToolDefinition]] = kwargs.pop("tools", None) + self.user: Optional[UserInfo] = kwargs.pop("user", None) - def __init__( - self, - endpoint: str, - credential: "TokenCredential", - **kwargs: Any, - ) -> None: - """Initialize the configuration. - - :param str endpoint: The service endpoint URL. - :param credential: Credentials for authenticating requests. - :type credential: ~azure.core.credentials.TokenCredential - :keyword kwargs: Additional configuration options. - """ - api_version: str = kwargs.pop("api_version", "2025-05-15-preview") + # Initialize tool configuration parser + self.tool_config = ToolConfigurationParser(self.tools) - self.endpoint = endpoint - self.credential = credential - self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://ai.azure.com/.default"]) - + self._configure(**kwargs) - # Tool configuration - self.agent_name: str = kwargs.pop("agent_name", "$default") - self.tools: Optional[List[ToolDefinition]] = kwargs.pop("tools", None) - self.user: Optional[UserInfo] = kwargs.pop("user", None) - - # Initialize tool configuration parser - - self.tool_config = ToolConfigurationParser(self.tools) - - self._configure(**kwargs) - - # Warn about unused kwargs - if kwargs: - import warnings - warnings.warn(f"Unused configuration parameters: {list(kwargs.keys())}", UserWarning) + # Warn about unused kwargs + if kwargs: + import warnings + warnings.warn(f"Unused configuration parameters: {list(kwargs.keys())}", UserWarning) - def _configure(self, **kwargs: Any) -> None: - self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) - self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) - self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) - self.authentication_policy = kwargs.get("authentication_policy") - if self.credential and not self.authentication_policy: - self.authentication_policy = policies.BearerTokenCredentialPolicy( - self.credential, *self.credential_scopes, **kwargs - ) \ No newline at end of file + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.BearerTokenCredentialPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_exceptions.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_exceptions.py index aa00b6b5f4b5..41515592d698 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_exceptions.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_exceptions.py @@ -1,49 +1,52 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- from typing import Any, Mapping, Optional class OAuthConsentRequiredError(RuntimeError): - """Raised when the service requires end-user OAuth consent. + """Raised when the service requires end-user OAuth consent. - This exception is raised when a tool or service operation requires explicit - OAuth consent from the end user before the operation can proceed. + This exception is raised when a tool or service operation requires explicit + OAuth consent from the end user before the operation can proceed. - :ivar str message: Human-readable guidance returned by the service. - :ivar str consent_url: Link that the end user must visit to provide consent. - :ivar dict payload: Full response payload from the service. + :ivar str message: Human-readable guidance returned by the service. + :ivar str consent_url: Link that the end user must visit to provide consent. + :ivar dict payload: Full response payload from the service. - :param str message: Human-readable guidance returned by the service. - :param str consent_url: Link that the end user must visit to provide the required consent. - :param dict payload: Full response payload supplied by the service. - """ + :param str message: Human-readable guidance returned by the service. + :param str consent_url: Link that the end user must visit to provide the required consent. + :param dict payload: Full response payload supplied by the service. + """ - def __init__(self, message: str, consent_url: Optional[str], payload: Mapping[str, Any]): - super().__init__(message) - self.message = message - self.consent_url = consent_url - self.payload = dict(payload) + def __init__(self, message: str, consent_url: Optional[str], payload: Mapping[str, Any]): + super().__init__(message) + self.message = message + self.consent_url = consent_url + self.payload = dict(payload) class MCPToolApprovalRequiredError(RuntimeError): - """Raised when an MCP tool invocation needs human approval. - - This exception is raised when an MCP (Model Context Protocol) tool requires - explicit human approval before the invocation can proceed, typically for - security or compliance reasons. - - :ivar str message: Human-readable guidance returned by the service. - :ivar dict approval_arguments: - Arguments that must be approved or amended before continuing. - :ivar dict payload: Full response payload from the service. - - :param str message: Human-readable guidance returned by the service. - :param dict approval_arguments: - Arguments that must be approved or amended before continuing. - :param dict payload: Full response payload supplied by the service. - """ - - def __init__(self, message: str, approval_arguments: Mapping[str, Any], payload: Mapping[str, Any]): - super().__init__(message) - self.message = message - self.approval_arguments = dict(approval_arguments) - self.payload = dict(payload) + """Raised when an MCP tool invocation needs human approval. + + This exception is raised when an MCP (Model Context Protocol) tool requires + explicit human approval before the invocation can proceed, typically for + security or compliance reasons. + + :ivar str message: Human-readable guidance returned by the service. + :ivar dict approval_arguments: + Arguments that must be approved or amended before continuing. + :ivar dict payload: Full response payload from the service. + + :param str message: Human-readable guidance returned by the service. + :param dict approval_arguments: + Arguments that must be approved or amended before continuing. + :param dict payload: Full response payload supplied by the service. + """ + + def __init__(self, message: str, approval_arguments: Mapping[str, Any], payload: Mapping[str, Any]): + super().__init__(message) + self.message = message + self.approval_arguments = dict(approval_arguments) + self.payload = dict(payload) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_model_base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_model_base.py index 3c7bed8b5db1..7e20b20edeb0 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_model_base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_model_base.py @@ -5,164 +5,170 @@ from enum import Enum import json -from typing import Any, Awaitable, Callable, List, Mapping, Optional +from typing import Any, Awaitable, Callable, Mapping, Optional from dataclasses import dataclass -import asyncio +import asyncio # pylint: disable=do-not-import-asyncio import inspect +from azure.core import CaseInsensitiveEnumMeta -class ToolSource(str, Enum): - """Identifies the origin of a tool. - - Specifies whether a tool comes from an MCP (Model Context Protocol) server - or from the Azure AI Tools API (remote tools). - """ +class ToolSource(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Identifies the origin of a tool. - MCP_TOOLS = "mcp_tools" - REMOTE_TOOLS = "remote_tools" + Specifies whether a tool comes from an MCP (Model Context Protocol) server + or from the Azure AI Tools API (remote tools). + """ + + MCP_TOOLS = "mcp_tools" + REMOTE_TOOLS = "remote_tools" class ToolDefinition: - """Definition of a tool including its parameters. - - :ivar str type: JSON schema type (e.g., "mcp", "a2", other tools). - """ - - def __init__(self, type: str, **kwargs: Any) -> None: - """Initialize ToolDefinition with type and any additional properties. - - :param str type: JSON schema type (e.g., "mcp", "a2", other tools). - :param kwargs: Any additional properties to set on the tool definition. - """ - self.type = type - # Store all additional properties as attributes - for key, value in kwargs.items(): - setattr(self, key, value) - - def __repr__(self) -> str: - """Return a detailed string representation of the ToolDefinition.""" - return json.dumps(self.__dict__, default=str) - - def __str__(self) -> str: - """Return a human-readable string representation.""" - return json.dumps(self.__dict__, default=str) - + """Definition of a tool including its parameters. -@dataclass -class FoundryTool: - """Lightweight description of a tool that can be invoked. - - Represents metadata and configuration for a single tool, including its - name, description, input schema, and source information. - - :ivar str key: Unique identifier for this tool. - :ivar str name: Display name of the tool. - :ivar str description: Human-readable description of what the tool does. - :ivar ~ToolSource source: - Origin of the tool (MCP_TOOLS or REMOTE_TOOLS). - :ivar dict metadata: Raw metadata from the API response. - :ivar dict input_schema: - JSON schema describing the tool's input parameters, or None. - :ivar ToolDefinition tool_definition: - Optional tool definition object, or None. - """ - - key: str - name: str - description: str - source: ToolSource - metadata: Mapping[str, Any] - input_schema: Optional[Mapping[str, Any]] = None - tool_definition: Optional[ToolDefinition] = None - invoker: Optional[Callable[..., Awaitable[Any]]] = None - - def invoke(self, *args: Any, **kwargs: Any) -> Any: - """Invoke the tool synchronously. - - :param args: Positional arguments to pass to the tool. - :param kwargs: Keyword arguments to pass to the tool. - :return: The result from the tool invocation. - :rtype: Any - """ - - - if not self.invoker: - raise NotImplementedError("No invoker function defined for this tool.") - if inspect.iscoroutinefunction(self.invoker): - # If the invoker is async, check if we're already in an event loop - try: - loop = asyncio.get_running_loop() - # We're in a running loop, can't use asyncio.run() - raise RuntimeError( - "Cannot call invoke() on an async tool from within an async context. " - "Use 'await tool.ainvoke(...)' or 'await tool(...)' instead." - ) - except RuntimeError as e: - if "no running event loop" in str(e).lower(): - # No running loop, safe to use asyncio.run() - return asyncio.run(self.invoker(*args, **kwargs)) - else: - # Re-raise our custom error - raise - else: - return self.invoker(*args, **kwargs) - - async def ainvoke(self, *args: Any, **kwargs: Any) -> Any: - """Invoke the tool asynchronously. - - :param args: Positional arguments to pass to the tool. - :param kwargs: Keyword arguments to pass to the tool. - :return: The result from the tool invocation. - :rtype: Any - """ - - if not self.invoker: - raise NotImplementedError("No invoker function defined for this tool.") - if inspect.iscoroutinefunction(self.invoker): - return await self.invoker(*args, **kwargs) - else: - result = self.invoker(*args, **kwargs) - # If the result is awaitable (e.g., a coroutine), await it - if inspect.iscoroutine(result) or hasattr(result, '__await__'): - return await result - return result - - def __call__(self, *args: Any, **kwargs: Any) -> Any: - - # Check if the invoker is async - if self.invoker and inspect.iscoroutinefunction(self.invoker): - # Return coroutine for async context - return self.ainvoke(*args, **kwargs) - else: - # Use sync invoke - return self.invoke(*args, **kwargs) + :ivar str type: JSON schema type (e.g., "mcp", "a2", other tools). + """ + def __init__(self, type: str, **kwargs: Any) -> None: + """Initialize ToolDefinition with type and any additional properties. -class UserInfo: - """Represents user information. - - :ivar str objectId: User's object identifier. - :ivar str tenantId: Tenant identifier. - """ - - def __init__(self, objectId: str, tenantId: str, **kwargs: Any) -> None: - """Initialize UserInfo with user details. - - :param str objectId: User's object identifier. - :param str tenantId: Tenant identifier. - :param kwargs: Any additional properties to set on the user. - """ - self.objectId = objectId - self.tenantId = tenantId - # Store all additional properties as attributes - for key, value in kwargs.items(): - setattr(self, key, value) - - def to_dict(self) -> dict: - """Convert to dictionary for JSON serialization.""" - return { - "objectId": self.objectId, - "tenantId": self.tenantId - } + :param str type: JSON schema type (e.g., "mcp", "a2", other tools). + :param kwargs: Any additional properties to set on the tool definition. + """ + self.type = type + # Store all additional properties as attributes + for key, value in kwargs.items(): + setattr(self, key, value) + + def __repr__(self) -> str: + """Return a detailed string representation of the ToolDefinition. + :return: JSON string representation of the ToolDefinition. + :rtype: str + """ + return json.dumps(self.__dict__, default=str) + def __str__(self) -> str: + """Return a human-readable string representation. + :return: JSON string representation of the ToolDefinition. + :rtype: str + """ + return json.dumps(self.__dict__, default=str) +@dataclass +class FoundryTool: + """Lightweight description of a tool that can be invoked. + + Represents metadata and configuration for a single tool, including its + name, description, input schema, and source information. + + :ivar str key: Unique identifier for this tool. + :ivar str name: Display name of the tool. + :ivar str description: Human-readable description of what the tool does. + :ivar ~ToolSource source: + Origin of the tool (MCP_TOOLS or REMOTE_TOOLS). + :ivar dict metadata: Raw metadata from the API response. + :ivar dict input_schema: + JSON schema describing the tool's input parameters, or None. + :ivar ToolDefinition tool_definition: + Optional tool definition object, or None. + """ + + key: str + name: str + description: str + source: ToolSource + metadata: Mapping[str, Any] + input_schema: Optional[Mapping[str, Any]] = None + tool_definition: Optional[ToolDefinition] = None + invoker: Optional[Callable[..., Awaitable[Any]]] = None + + def invoke(self, *args: Any, **kwargs: Any) -> Any: + """Invoke the tool synchronously. + + :param args: Positional arguments to pass to the tool. + :type args: Any + :return: The result from the tool invocation. + :rtype: Any + """ + + if not self.invoker: + raise NotImplementedError("No invoker function defined for this tool.") + if inspect.iscoroutinefunction(self.invoker): + # If the invoker is async, check if we're already in an event loop + try: + asyncio.get_running_loop() + # We're in a running loop, can't use asyncio.run() + raise RuntimeError( + "Cannot call invoke() on an async tool from within an async context. " + "Use 'await tool.ainvoke(...)' or 'await tool(...)' instead." + ) + except RuntimeError as e: + if "no running event loop" in str(e).lower(): + # No running loop, safe to use asyncio.run() + return asyncio.run(self.invoker(*args, **kwargs)) + # Re-raise our custom error + raise + else: + return self.invoker(*args, **kwargs) + + async def ainvoke(self, *args: Any, **kwargs: Any) -> Any: + """Invoke the tool asynchronously. + + :param args: Positional arguments to pass to the tool. + :type args: Any + :return: The result from the tool invocation. + :rtype: Any + """ + + if not self.invoker: + raise NotImplementedError("No invoker function defined for this tool.") + if inspect.iscoroutinefunction(self.invoker): + return await self.invoker(*args, **kwargs) + + result = self.invoker(*args, **kwargs) + # If the result is awaitable (e.g., a coroutine), await it + if inspect.iscoroutine(result) or hasattr(result, '__await__'): + return await result + return result + + def __call__(self, *args: Any, **kwargs: Any) -> Any: + + # Check if the invoker is async + if self.invoker and inspect.iscoroutinefunction(self.invoker): + # Return coroutine for async context + return self.ainvoke(*args, **kwargs) + + # Use sync invoke + return self.invoke(*args, **kwargs) + + +class UserInfo: + """Represents user information. + + :ivar str objectId: User's object identifier. + :ivar str tenantId: Tenant identifier. + """ + + def __init__(self, objectId: str, tenantId: str, **kwargs: Any) -> None: + """Initialize UserInfo with user details. + + :param str objectId: User's object identifier. + :param str tenantId: Tenant identifier. + :param kwargs: Any additional properties to set on the user. + """ + self.objectId = objectId + self.tenantId = tenantId + # Store all additional properties as attributes + for key, value in kwargs.items(): + setattr(self, key, value) + + def to_dict(self) -> dict: + """Convert to dictionary for JSON serialization. + + :return: Dictionary containing objectId and tenantId. + :rtype: dict + """ + return { + "objectId": self.objectId, + "tenantId": self.tenantId + } diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/__init__.py index c0abe5b29bb9..047a3b7919e7 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/__init__.py @@ -10,4 +10,4 @@ "FoundryTool", "OAuthConsentRequiredError", "MCPToolApprovalRequiredError", -] \ No newline at end of file +] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py index b49ed2b971cd..e2d35b1dd919 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py @@ -6,6 +6,7 @@ from azure.core import AsyncPipelineClient from azure.core.pipeline import policies +from azure.core.tracing.decorator_async import distributed_trace_async from ._configuration import AzureAIToolClientConfiguration from .._utils._model_base import InvocationPayloadBuilder @@ -17,159 +18,177 @@ from azure.core.credentials_async import AsyncTokenCredential class AzureAIToolClient: - """Asynchronous client for aggregating tools from Azure AI MCP and Tools APIs. - - This client provides access to tools from both MCP (Model Context Protocol) servers - and Azure AI Tools API endpoints, enabling unified tool discovery and invocation. - - :param str endpoint: - The fully qualified endpoint for the Azure AI Agents service. - Example: "https://.api.azureml.ms" - :param credential: - Credential for authenticating requests to the service. - Use credentials from azure-identity like DefaultAzureCredential. - :type credential: ~azure.core.credentials.TokenCredential - :keyword str agent_name: - Name of the agent to use for tool operations. Default is "$default". - :keyword List[Mapping[str, Any]] tools: - List of tool configurations defining which tools to include. - :keyword Mapping[str, Any] user: - User information for tool invocations (object_id, tenant_id). - :keyword str api_version: - API version to use when communicating with the service. - Default is the latest supported version. - :keyword transport: - Custom transport implementation. Default is RequestsTransport. - :paramtype transport: ~azure.core.pipeline.transport.HttpTransport - - """ - - def __init__( - self, - endpoint: str, - credential: "AsyncTokenCredential", - **kwargs: Any, - ) -> None: - """Initialize the asynchronous Azure AI Tool Client. - - :param str endpoint: The service endpoint URL. - :param credential: Credentials for authenticating requests. - :type credential: ~azure.core.credentials.TokenCredential - :keyword kwargs: Additional keyword arguments for client configuration. - """ - self._config = AzureAIToolClientConfiguration( - endpoint, - credential, - **kwargs, - ) - - _policies = kwargs.pop("policies", None) - if _policies is None: - _policies = [ - policies.RequestIdPolicy(**kwargs), - self._config.headers_policy, - self._config.user_agent_policy, - self._config.proxy_policy, - policies.ContentDecodePolicy(**kwargs), - self._config.redirect_policy, - self._config.retry_policy, - self._config.authentication_policy, - self._config.custom_hook_policy, - self._config.logging_policy, - policies.DistributedTracingPolicy(**kwargs), - policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, - self._config.http_logging_policy, - ] - self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=endpoint, policies=_policies, **kwargs) - - # Initialize specialized clients with client and config - self._mcp_tools = MCPToolsOperations(client=self._client, config=self._config) - self._remote_tools = RemoteToolsOperations(client=self._client, config=self._config) - - async def list_tools(self) -> List[FoundryTool]: - """List all available tools from configured sources. - - Retrieves tools from both MCP servers and Azure AI Tools API endpoints, - returning them as AzureAITool instances ready for invocation. - :return: List of available tools from all configured sources. - :rtype: List[~AzureAITool] - :raises ~Tool_Client.exceptions.OAuthConsentRequiredError: - Raised when the service requires user OAuth consent. - :raises ~Tool_Client.exceptions.MCPToolApprovalRequiredError: - Raised when tool access requires human approval. - :raises ~azure.core.exceptions.HttpResponseError: - Raised for HTTP communication failures. - - """ - - existing_names: set[str] = set() - - tools: List[FoundryTool] = [] - - # Fetch MCP tools - mcp_tools = await self._mcp_tools.list_tools(existing_names) - tools.extend(mcp_tools) - # Fetch Tools API tools - tools_api_tools = await self._remote_tools.resolve_tools(existing_names) - tools.extend(tools_api_tools) - - for tool in tools: - # Capture tool in a closure to avoid shadowing issues - def make_invoker(captured_tool): - async def _invoker(*args, **kwargs): - return await self.invoke_tool(captured_tool, *args, **kwargs) - return _invoker - tool.invoker = make_invoker(tool) - - return tools - - async def invoke_tool( - self, - tool: Union[str, FoundryTool], - *args: Any, - **kwargs: Any, - ) -> Any: - """Invoke a tool by instance, name, or descriptor. - - :param tool: Tool to invoke, specified as an AzureAITool instance, - tool name string, or FoundryTool. - :type tool: Union[~AzureAITool, str, ~Tool_Client.models.FoundryTool] - :param args: Positional arguments to pass to the tool - """ - descriptor = await self._resolve_tool_descriptor(tool) - payload = InvocationPayloadBuilder.build_payload(args, kwargs, {}) - return await self._invoke_tool(descriptor, payload, **kwargs) - - async def _resolve_tool_descriptor( - self, tool: Union[str, FoundryTool] - ) -> FoundryTool: - """Resolve a tool reference to a descriptor.""" - if isinstance(tool, FoundryTool): - return tool - if isinstance(tool, str): - # Fetch all tools and find matching descriptor - descriptors = await self.list_tools() - for descriptor in descriptors: - if descriptor.name == tool or descriptor.key == tool: - return descriptor - raise KeyError(f"Unknown tool: {tool}") - raise TypeError("Tool must be an AsyncAzureAITool, FoundryTool, or registered name/key") - - async def _invoke_tool(self, descriptor: FoundryTool, arguments: Mapping[str, Any], **kwargs: Any) -> Any: - """Invoke a tool descriptor.""" - if descriptor.source is ToolSource.MCP_TOOLS: - return await self._mcp_tools.invoke_tool(descriptor, arguments) - if descriptor.source is ToolSource.REMOTE_TOOLS: - return await self._remote_tools.invoke_tool(descriptor, arguments) - raise ValueError(f"Unsupported tool source: {descriptor.source}") - - async def close(self) -> None: - """Close the underlying HTTP pipeline.""" - await self._client.close() - - async def __aenter__(self) -> "AzureAIToolClient": - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details: Any) -> None: - await self._client.__aexit__(*exc_details) \ No newline at end of file + """Asynchronous client for aggregating tools from Azure AI MCP and Tools APIs. + + This client provides access to tools from both MCP (Model Context Protocol) servers + and Azure AI Tools API endpoints, enabling unified tool discovery and invocation. + + :param str endpoint: + The fully qualified endpoint for the Azure AI Agents service. + Example: "https://.api.azureml.ms" + :param credential: + Credential for authenticating requests to the service. + Use credentials from azure-identity like DefaultAzureCredential. + :type credential: ~azure.core.credentials.TokenCredential + :keyword str agent_name: + Name of the agent to use for tool operations. Default is "$default". + :keyword List[Mapping[str, Any]] tools: + List of tool configurations defining which tools to include. + :keyword Mapping[str, Any] user: + User information for tool invocations (object_id, tenant_id). + :keyword str api_version: + API version to use when communicating with the service. + Default is the latest supported version. + :keyword transport: + Custom transport implementation. Default is RequestsTransport. + :paramtype transport: ~azure.core.pipeline.transport.HttpTransport + + """ + + def __init__( + self, + endpoint: str, + credential: "AsyncTokenCredential", + **kwargs: Any, + ) -> None: + """Initialize the asynchronous Azure AI Tool Client. + + :param str endpoint: The service endpoint URL. + :param credential: Credentials for authenticating requests. + :type credential: ~azure.core.credentials.TokenCredential + :keyword kwargs: Additional keyword arguments for client configuration. + """ + self._config = AzureAIToolClientConfiguration( + endpoint, + credential, + **kwargs, + ) + + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=endpoint, policies=_policies, **kwargs) + + # Initialize specialized clients with client and config + self._mcp_tools = MCPToolsOperations(client=self._client, config=self._config) + self._remote_tools = RemoteToolsOperations(client=self._client, config=self._config) + + async def list_tools(self) -> List[FoundryTool]: + """List all available tools from configured sources. + + Retrieves tools from both MCP servers and Azure AI Tools API endpoints, + returning them as AzureAITool instances ready for invocation. + :return: List of available tools from all configured sources. + :rtype: List[~AzureAITool] + :raises ~Tool_Client.exceptions.OAuthConsentRequiredError: + Raised when the service requires user OAuth consent. + :raises ~Tool_Client.exceptions.MCPToolApprovalRequiredError: + Raised when tool access requires human approval. + :raises ~azure.core.exceptions.HttpResponseError: + Raised for HTTP communication failures. + + """ + + existing_names: set[str] = set() + + tools: List[FoundryTool] = [] + + # Fetch MCP tools + mcp_tools = await self._mcp_tools.list_tools(existing_names) + tools.extend(mcp_tools) + # Fetch Tools API tools + tools_api_tools = await self._remote_tools.resolve_tools(existing_names) + tools.extend(tools_api_tools) + + for tool in tools: + # Capture tool in a closure to avoid shadowing issues + def make_invoker(captured_tool): + async def _invoker(*args, **kwargs): + return await self.invoke_tool(captured_tool, *args, **kwargs) + return _invoker + tool.invoker = make_invoker(tool) + + return tools + + @distributed_trace_async + async def invoke_tool( + self, + tool: Union[str, FoundryTool], + *args: Any, + **kwargs: Any, + ) -> Any: + """Invoke a tool by instance, name, or descriptor. + + :param tool: Tool to invoke, specified as an AzureAITool instance, + tool name string, or FoundryTool. + :type tool: Union[~AzureAITool, str, ~Tool_Client.models.FoundryTool] + :param args: Positional arguments to pass to the tool. + :type args: Any + :return: The result of invoking the tool. + :rtype: Any + """ + descriptor = await self._resolve_tool_descriptor(tool) + payload = InvocationPayloadBuilder.build_payload(args, kwargs, configuration={}) + return await self._invoke_tool(descriptor, payload, **kwargs) + + async def _resolve_tool_descriptor( + self, tool: Union[str, FoundryTool] + ) -> FoundryTool: + """Resolve a tool reference to a descriptor. + + :param tool: Tool to resolve, either a FoundryTool instance or a string name/key. + :type tool: Union[str, FoundryTool] + :return: The resolved FoundryTool descriptor. + :rtype: FoundryTool + """ + if isinstance(tool, FoundryTool): + return tool + if isinstance(tool, str): + # Fetch all tools and find matching descriptor + descriptors = await self.list_tools() + for descriptor in descriptors: + if tool in (descriptor.name, descriptor.key): + return descriptor + raise KeyError(f"Unknown tool: {tool}") + raise TypeError("Tool must be an AsyncAzureAITool, FoundryTool, or registered name/key") + + async def _invoke_tool(self, descriptor: FoundryTool, arguments: Mapping[str, Any], **kwargs: Any) -> Any: #pylint: disable=unused-argument + """Invoke a tool descriptor. + + :param descriptor: The tool descriptor to invoke. + :type descriptor: FoundryTool + :param arguments: Arguments to pass to the tool. + :type arguments: Mapping[str, Any] + :return: The result of the tool invocation. + :rtype: Any + """ + if descriptor.source is ToolSource.MCP_TOOLS: + return await self._mcp_tools.invoke_tool(descriptor, arguments) + if descriptor.source is ToolSource.REMOTE_TOOLS: + return await self._remote_tools.invoke_tool(descriptor, arguments) + raise ValueError(f"Unsupported tool source: {descriptor.source}") + + async def close(self) -> None: + """Close the underlying HTTP pipeline.""" + await self._client.close() + + async def __aenter__(self) -> "AzureAIToolClient": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_configuration.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_configuration.py index 79b819863399..4eb5503dee8d 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_configuration.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_configuration.py @@ -6,83 +6,81 @@ from azure.core.pipeline import policies +from .._utils._model_base import ToolConfigurationParser + if TYPE_CHECKING: - from azure.core.credentials_async import AsyncTokenCredential + from azure.core.credentials_async import AsyncTokenCredential -from .._utils._model_base import ToolConfigurationParser +class AzureAIToolClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for Azure AI Tool Client. + + Manages authentication, endpoint configuration, and policy settings for the + Azure AI Tool Client. This class is used internally by the client and should + not typically be instantiated directly. + + :param str endpoint: + Fully qualified endpoint for the Azure AI Agents service. + :param credential: + Azure TokenCredential for authentication. + :type credential: ~azure.core.credentials.TokenCredential + :keyword str api_version: + API version to use. Default is the latest supported version. + :keyword List[str] credential_scopes: + OAuth2 scopes for token requests. Default is ["https://ai.azure.com/.default"]. + :keyword str agent_name: + Name of the agent. Default is "$default". + :keyword List[Mapping[str, Any]] tools: + List of tool configurations. + :keyword Mapping[str, Any] user: + User information for tool invocations. + """ + + def __init__( + self, + endpoint: str, + credential: "AsyncTokenCredential", + **kwargs: Any, + ) -> None: + """Initialize the configuration. + + :param str endpoint: The service endpoint URL. + :param credential: Credentials for authenticating requests. + :type credential: ~azure.core.credentials.TokenCredential + :keyword kwargs: Additional configuration options. + """ + api_version: str = kwargs.pop("api_version", "2025-05-15-preview") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://ai.azure.com/.default"]) -class AzureAIToolClientConfiguration: - """Configuration for Azure AI Tool Client. - - Manages authentication, endpoint configuration, and policy settings for the - Azure AI Tool Client. This class is used internally by the client and should - not typically be instantiated directly. - - :param str endpoint: - Fully qualified endpoint for the Azure AI Agents service. - :param credential: - Azure TokenCredential for authentication. - :type credential: ~azure.core.credentials.TokenCredential - :keyword str api_version: - API version to use. Default is the latest supported version. - :keyword List[str] credential_scopes: - OAuth2 scopes for token requests. Default is ["https://ai.azure.com/.default"]. - :keyword str agent_name: - Name of the agent. Default is "$default". - :keyword List[Mapping[str, Any]] tools: - List of tool configurations. - :keyword Mapping[str, Any] user: - User information for tool invocations. - """ + # Tool configuration + self.agent_name: str = kwargs.pop("agent_name", "$default") + self.tools: Optional[List[Mapping[str, Any]]] = kwargs.pop("tools", None) + self.user: Optional[Mapping[str, Any]] = kwargs.pop("user", None) - def __init__( - self, - endpoint: str, - credential: "AsyncTokenCredential", - **kwargs: Any, - ) -> None: - """Initialize the configuration. - - :param str endpoint: The service endpoint URL. - :param credential: Credentials for authenticating requests. - :type credential: ~azure.core.credentials.TokenCredential - :keyword kwargs: Additional configuration options. - """ - api_version: str = kwargs.pop("api_version", "2025-05-15-preview") + # Initialize tool configuration parser + self.tool_config = ToolConfigurationParser(self.tools) - self.endpoint = endpoint - self.credential = credential - self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://ai.azure.com/.default"]) - + self._configure(**kwargs) - # Tool configuration - self.agent_name: str = kwargs.pop("agent_name", "$default") - self.tools: Optional[List[Mapping[str, Any]]] = kwargs.pop("tools", None) - self.user: Optional[Mapping[str, Any]] = kwargs.pop("user", None) - - # Initialize tool configuration parser - - self.tool_config = ToolConfigurationParser(self.tools) - - self._configure(**kwargs) - - # Warn about unused kwargs - if kwargs: - import warnings - warnings.warn(f"Unused configuration parameters: {list(kwargs.keys())}", UserWarning) + # Warn about unused kwargs + if kwargs: + import warnings + warnings.warn(f"Unused configuration parameters: {list(kwargs.keys())}", UserWarning) - def _configure(self, **kwargs: Any) -> None: - self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) - self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) - self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) - self.authentication_policy = kwargs.get("authentication_policy") - if self.credential and not self.authentication_policy: - self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy( - self.credential, *self.credential_scopes, **kwargs - ) \ No newline at end of file + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py index e55be880fb6a..7d1310518519 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/operations/_operations.py @@ -11,13 +11,13 @@ from .._configuration import AzureAIToolClientConfiguration from ...operations._operations import ( - build_remotetools_invoke_tool_request, - build_remotetools_resolve_tools_request, - prepare_remotetools_invoke_tool_request_content, + build_remotetools_invoke_tool_request, + build_remotetools_resolve_tools_request, + prepare_remotetools_invoke_tool_request_content, prepare_remotetools_resolve_tools_request_content, - build_mcptools_list_tools_request, + build_mcptools_list_tools_request, prepare_mcptools_list_tools_request_content, - build_mcptools_invoke_tool_request, + build_mcptools_invoke_tool_request, prepare_mcptools_invoke_tool_request_content, API_VERSION, MCP_ENDPOINT_PATH, @@ -57,7 +57,7 @@ class MCPToolsOperations: def __init__(self, *args, **kwargs) -> None: """Initialize MCP client. - + Parameters ---------- client : AsyncPipelineClient @@ -71,10 +71,10 @@ def __init__(self, *args, **kwargs) -> None: if self._client is None or self._config is None: raise ValueError("Both 'client' and 'config' must be provided") - + self._endpoint_path = MCP_ENDPOINT_PATH self._api_version = API_VERSION - + async def list_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTool]: """List MCP tools. @@ -82,16 +82,16 @@ async def list_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTo :rtype: List[FoundryTool] """ _request, error_map, remaining_kwargs = build_list_tools_request(self._api_version, kwargs) - + path_format_arguments = {"endpoint": self._config.endpoint} _request.url = self._client.format_url(_request.url, **path_format_arguments) pipeline_response: PipelineResponse = await self._client._pipeline.run(_request, **remaining_kwargs) response = pipeline_response.http_response - + handle_response_error(response, error_map) return process_list_tools_response(response, self._config.tool_config._named_mcp_tools, existing_names) - + async def invoke_tool( self, tool: FoundryTool, @@ -114,14 +114,14 @@ async def invoke_tool( pipeline_response: PipelineResponse = await self._client._pipeline.run(_request, **kwargs) response = pipeline_response.http_response - + handle_response_error(response, error_map) return response.json().get("result") class RemoteToolsOperations: def __init__(self, *args, **kwargs) -> None: """Initialize Tools API client. - + :param client: Azure PipelineClient for HTTP requests. :type client: ~azure.core.PipelineClient :param config: Configuration object. @@ -135,7 +135,7 @@ def __init__(self, *args, **kwargs) -> None: if self._client is None or self._config is None: raise ValueError("Both 'client' and 'config' must be provided") - + # Apply agent name substitution to endpoint paths self.agent = self._config.agent_name.strip() if self._config.agent_name and self._config.agent_name.strip() else "$default" self._api_version = API_VERSION @@ -149,18 +149,18 @@ async def resolve_tools(self, existing_names: set, **kwargs: Any) -> List[Foundr result = build_resolve_tools_request(self.agent, self._api_version, self._config.tool_config, self._config.user, kwargs) if result[0] is None: return [] - + _request, error_map, remaining_kwargs = result - + path_format_arguments = {"endpoint": self._config.endpoint} _request.url = self._client.format_url(_request.url, **path_format_arguments) pipeline_response: PipelineResponse = await self._client._pipeline.run(_request, **remaining_kwargs) response = pipeline_response.http_response - + handle_response_error(response, error_map) return process_resolve_tools_response(response, self._config.tool_config._remote_tools, existing_names) - + async def invoke_tool( self, tool: FoundryTool, @@ -182,6 +182,6 @@ async def invoke_tool( pipeline_response: PipelineResponse = await self._client._pipeline.run(_request) response = pipeline_response.http_response - + handle_response_error(response, error_map) return process_invoke_remote_tool_response(response) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py index b54d2d7f6538..0a84ef2e6409 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/operations/_operations.py @@ -63,7 +63,7 @@ # Helper functions for request/response processing def prepare_request_headers(base_headers: Dict[str, str], custom_headers: Mapping[str, str] = None) -> Dict[str, str]: """Prepare request headers by merging base and custom headers. - + :param base_headers: Base headers to use :param custom_headers: Custom headers to merge :return: Merged headers dictionary @@ -75,7 +75,7 @@ def prepare_request_headers(base_headers: Dict[str, str], custom_headers: Mappin def prepare_error_map(custom_error_map: Mapping[int, Any] = None) -> MutableMapping: """Prepare error map by merging default and custom error mappings. - + :param custom_error_map: Custom error mappings to merge :return: Merged error map """ @@ -91,7 +91,7 @@ def format_and_execute_request( **kwargs: Any ) -> HttpResponse: """Format request URL and execute pipeline. - + :param client: Pipeline client :param request: HTTP request to execute :param endpoint: Endpoint URL for formatting @@ -104,7 +104,7 @@ def format_and_execute_request( def handle_response_error(response: HttpResponse, error_map: MutableMapping) -> None: """Handle HTTP response errors. - + :param response: HTTP response to check :param error_map: Error map for status code mapping :raises HttpResponseError: If response status is not 200 @@ -119,7 +119,7 @@ def process_list_tools_response( existing_names: set ) -> List[FoundryTool]: """Process list_tools response and build descriptors. - + :param response: HTTP response with MCP tools :param named_mcp_tools: Named MCP tools configuration :param existing_names: Set of existing tool names @@ -139,13 +139,24 @@ def process_resolve_tools_response( existing_names: set ) -> List[FoundryTool]: """Process resolve_tools response and build descriptors. - + :param response: HTTP response with remote tools :param remote_tools: Remote tools configuration :param existing_names: Set of existing tool names :return: List of tool descriptors """ - toolResponse = ToolsResponse.from_dict(response.json(), remote_tools) + payload = response.json() + response_type = payload.get("type") + result = payload.get("toolResult") + + if response_type == "OAuthConsentRequired": + consent_url = result.get("consentUrl") + message = result.get("message") + if not consent_url: + consent_url = message + raise OAuthConsentRequiredError(message, consent_url=consent_url, payload=payload) + + toolResponse = ToolsResponse.from_dict(payload, remote_tools) return ToolDescriptorBuilder.build_descriptors( toolResponse.enriched_tools, ToolSource.REMOTE_TOOLS, @@ -157,7 +168,7 @@ def build_list_tools_request( kwargs: Dict[str, Any] ) -> Tuple[HttpRequest, MutableMapping, Dict[str, str]]: """Build request for listing MCP tools. - + :param api_version: API version :param kwargs: Additional arguments (headers, params, error_map) :return: Tuple of (request, error_map, params) @@ -165,11 +176,11 @@ def build_list_tools_request( error_map = prepare_error_map(kwargs.pop("error_map", None)) _headers = prepare_request_headers(MCP_HEADERS, kwargs.pop("headers", None)) _params = kwargs.pop("params", {}) or {} - + _content = prepare_mcptools_list_tools_request_content() content = json.dumps(_content) _request = build_mcptools_list_tools_request(api_version=api_version, headers=_headers, params=_params, content=content) - + return _request, error_map, kwargs def build_invoke_mcp_tool_request( @@ -179,7 +190,7 @@ def build_invoke_mcp_tool_request( **kwargs: Any ) -> Tuple[HttpRequest, MutableMapping]: """Build request for invoking MCP tool. - + :param api_version: API version :param tool: Tool descriptor :param arguments: Tool arguments @@ -188,12 +199,12 @@ def build_invoke_mcp_tool_request( error_map = prepare_error_map() _headers = prepare_request_headers(MCP_HEADERS) _params = {} - + _content = prepare_mcptools_invoke_tool_request_content(tool, arguments, TOOL_PROPERTY_OVERRIDES) content = json.dumps(_content) _request = build_mcptools_invoke_tool_request(api_version=api_version, headers=_headers, params=_params, content=content) - + return _request, error_map def build_resolve_tools_request( @@ -204,7 +215,7 @@ def build_resolve_tools_request( kwargs: Dict[str, Any] ) -> Union[Tuple[HttpRequest, MutableMapping, Dict[str, Any]], Tuple[None, None, None]]: """Build request for resolving remote tools. - + :param agent_name: Agent name :param api_version: API version :param tool_config: Tool configuration @@ -215,14 +226,14 @@ def build_resolve_tools_request( error_map = prepare_error_map(kwargs.pop("error_map", None)) _headers = prepare_request_headers(REMOTE_TOOLS_HEADERS, kwargs.pop("headers", None)) _params = kwargs.pop("params", {}) or {} - + _content = prepare_remotetools_resolve_tools_request_content(tool_config, user) if _content is None: return None, None, None - + content = json.dumps(_content.to_dict()) _request = build_remotetools_resolve_tools_request(agent_name, api_version=api_version, headers=_headers, params=_params, content=content) - + return _request, error_map, kwargs def build_invoke_remote_tool_request( @@ -233,7 +244,7 @@ def build_invoke_remote_tool_request( arguments: Mapping[str, Any] ) -> Tuple[HttpRequest, MutableMapping]: """Build request for invoking remote tool. - + :param agent_name: Agent name :param api_version: API version :param tool: Tool descriptor @@ -244,16 +255,16 @@ def build_invoke_remote_tool_request( error_map = prepare_error_map() _headers = prepare_request_headers(REMOTE_TOOLS_HEADERS) _params = {} - + _content = prepare_remotetools_invoke_tool_request_content(tool, user, arguments) content = json.dumps(_content) _request = build_remotetools_invoke_tool_request(agent_name, api_version=api_version, headers=_headers, params=_params, content=content) - + return _request, error_map def process_invoke_remote_tool_response(response: HttpResponse) -> Any: """Process remote tool invocation response. - + :param response: HTTP response :return: Tool result :raises OAuthConsentRequiredError: If OAuth consent is required @@ -261,7 +272,7 @@ def process_invoke_remote_tool_response(response: HttpResponse) -> Any: payload = response.json() response_type = payload.get("type") result = payload.get("toolResult") - + if response_type == "OAuthConsentRequired": raise OAuthConsentRequiredError(result.get("message"), consent_url=result.get("consentUrl"), payload=payload) return result @@ -270,7 +281,7 @@ class MCPToolsOperations: def __init__(self, *args, **kwargs) -> None: """Initialize MCP client. - + Parameters ---------- client : PipelineClient @@ -284,10 +295,10 @@ def __init__(self, *args, **kwargs) -> None: if self._client is None or self._config is None: raise ValueError("Both 'client' and 'config' must be provided") - + self._endpoint_path = MCP_ENDPOINT_PATH self._api_version = API_VERSION - + def list_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTool]: """List MCP tools. @@ -298,12 +309,11 @@ def list_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTool]: response = format_and_execute_request(self._client, _request, self._config.endpoint, **remaining_kwargs) handle_response_error(response, error_map) return process_list_tools_response(response, self._config.tool_config._named_mcp_tools, existing_names) - + def invoke_tool( self, tool: FoundryTool, arguments: Mapping[str, Any], - **kwargs: Any ) -> Any: """Invoke an MCP tool. @@ -315,7 +325,7 @@ def invoke_tool( :rtype: Any """ _request, error_map = build_invoke_mcp_tool_request(self._api_version, tool, arguments) - response = format_and_execute_request(self._client, _request, self._config.endpoint, **kwargs) + response = format_and_execute_request(self._client, _request, self._config.endpoint) handle_response_error(response, error_map) return response.json().get("result") @@ -357,9 +367,9 @@ def prepare_mcptools_invoke_tool_request_content(tool: FoundryTool, arguments: M "name": tool.name, "arguments": dict(arguments), } - + if tool.tool_definition: - + key_overrides = tool_overrides.get(tool.name, {}) meta_config = MetadataMapper.prepare_metadata_dict( tool.metadata, @@ -404,7 +414,7 @@ def build_mcptools_invoke_tool_request( class RemoteToolsOperations: def __init__(self, *args, **kwargs) -> None: """Initialize Tools API client. - + :param client: Azure PipelineClient for HTTP requests. :type client: ~azure.core.PipelineClient :param config: Configuration object. @@ -418,7 +428,7 @@ def __init__(self, *args, **kwargs) -> None: if self._client is None or self._config is None: raise ValueError("Both 'client' and 'config' must be provided") - + # Apply agent name substitution to endpoint paths self.agent = self._config.agent_name.strip() if self._config.agent_name and self._config.agent_name.strip() else "$default" self._api_version = API_VERSION @@ -432,12 +442,12 @@ def resolve_tools(self, existing_names: set, **kwargs: Any) -> List[FoundryTool] result = build_resolve_tools_request(self.agent, self._api_version, self._config.tool_config, self._config.user, kwargs) if result[0] is None: return [] - + _request, error_map, remaining_kwargs = result response = format_and_execute_request(self._client, _request, self._config.endpoint, **remaining_kwargs) handle_response_error(response, error_map) return process_resolve_tools_response(response, self._config.tool_config._remote_tools, existing_names) - + def invoke_tool( self, tool: FoundryTool, @@ -456,7 +466,7 @@ def invoke_tool( response = format_and_execute_request(self._client, _request, self._config.endpoint) handle_response_error(response, error_map) return process_invoke_remote_tool_response(response) - + def prepare_remotetools_invoke_tool_request_content(tool: FoundryTool, user: UserInfo, arguments: Mapping[str, Any]) -> Any: payload = { "toolName": tool.name, @@ -539,4 +549,3 @@ def build_remotetools_resolve_tools_request( _url = f"/agents/{agent_name}/tools/resolve" return HttpRequest(method="POST", url=_url, headers=_headers, params=_params, **kwargs) - \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py index b8dd5c328780..7844eee8d155 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/constants.py @@ -12,3 +12,4 @@ class Constants: AGENT_DEBUG_ERRORS = "AGENT_DEBUG_ERRORS" ENABLE_APPLICATION_INSIGHTS_LOGGER = "AGENT_APP_INSIGHTS_ENABLED" AZURE_AI_WORKSPACE_ENDPOINT = "AZURE_AI_WORKSPACE_ENDPOINT" + AZURE_AI_TOOLS_ENDPOINT = "AZURE_AI_TOOLS_ENDPOINT" diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index bc5a15a37775..1724be6e1f3b 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -6,6 +6,7 @@ import inspect import json import os +import time import traceback from abc import abstractmethod from typing import Any, AsyncGenerator, Generator, Optional, Union @@ -20,7 +21,7 @@ from starlette.responses import JSONResponse, Response, StreamingResponse from starlette.routing import Route from starlette.types import ASGIApp - +from ..models import projects as project_models from ..constants import Constants from ..logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger, request_context from ..models import ( @@ -35,14 +36,13 @@ logger = get_logger() DEBUG_ERRORS = os.environ.get(Constants.AGENT_DEBUG_ERRORS, "false").lower() == "true" - class AgentRunContextMiddleware(BaseHTTPMiddleware): def __init__(self, app: ASGIApp, agent: Optional['FoundryCBAgent'] = None): super().__init__(app) self.agent = agent async def dispatch(self, request: Request, call_next): - user_info = {} + user_info: Optional[UserInfo] = None if request.url.path in ("/runs", "/responses"): try: user_info = self.set_user_info_to_context_var(request) @@ -88,8 +88,8 @@ def set_run_context_to_context_var(self, run_context): ctx.update(res) request_context.set(ctx) - def set_user_info_to_context_var(self, request) -> UserInfo: - user_info: UserInfo = None + def set_user_info_to_context_var(self, request) -> Optional[UserInfo]: + user_info: Optional[UserInfo] = None try: object_id_header = request.headers.get("x-aml-oid", None) tenant_id_header = request.headers.get("x-aml-tid", None) @@ -111,6 +111,9 @@ def set_user_info_to_context_var(self, request) -> UserInfo: class FoundryCBAgent: + _cached_tools_endpoint: Optional[str] = None + _cached_agent_name: Optional[str] = None + def __init__(self, credentials: Optional["AsyncTokenCredential"] = None, **kwargs: Any) -> None: self.credentials = credentials self.tools = kwargs.get("tools", []) @@ -254,6 +257,118 @@ async def agent_run( ) -> Union[OpenAIResponse, Generator[ResponseStreamEvent, Any, Any], AsyncGenerator[ResponseStreamEvent, Any]]: raise NotImplementedError + async def respond_with_oauth_consent(self, context, error) -> project_models.Response: + """Generate a response indicating that OAuth consent is required. + + :param context: The agent run context. + :type context: AgentRunContext + :param error: The OAuthConsentRequiredError instance. + :type error: OAuthConsentRequiredError + :return: A Response indicating the need for OAuth consent. + :rtype: project_models.Response + """ + output = [ + project_models.OAuthConsentRequestItemResource( + id=context.id_generator.generate_oauthreq_id(), + consent_link=error.consent_url, + server_label="server_label" + ) + ] + agent_id = context.get_agent_id_object() + conversation = context.get_conversation_object() + response = project_models.Response({ + "object": "response", + "id": context.response_id, + "agent": agent_id, + "conversation": conversation, + "metadata": context.request.get("metadata"), + "created_at": int(time.time()), + "output": output, + }) + return response + + async def respond_with_oauth_consent_astream(self, context, error) -> AsyncGenerator[ResponseStreamEvent, None]: + """Generate a response stream indicating that OAuth consent is required. + + :param context: The agent run context. + :type context: AgentRunContext + :param error: The OAuthConsentRequiredError instance. + :type error: OAuthConsentRequiredError + :return: An async generator yielding ResponseStreamEvent instances. + :rtype: AsyncGenerator[ResponseStreamEvent, None] + """ + sequence_number = 0 + agent_id = context.get_agent_id_object() + conversation = context.get_conversation_object() + + response = project_models.Response({ + "object": "response", + "id": context.response_id, + "agent": agent_id, + "conversation": conversation, + "metadata": context.request.get("metadata"), + "status": "in_progress", + "created_at": int(time.time()), + }) + yield project_models.ResponseCreatedEvent(sequence_number=sequence_number, response=response) + sequence_number += 1 + + response = project_models.Response({ + "object": "response", + "id": context.response_id, + "agent": agent_id, + "conversation": conversation, + "metadata": context.request.get("metadata"), + "status": "in_progress", + "created_at": int(time.time()), + }) + yield project_models.ResponseInProgressEvent(sequence_number=sequence_number, response=response) + + sequence_number += 1 + output_index = 0 + oauth_id = context.id_generator.generate_oauthreq_id() + item = project_models.OAuthConsentRequestItemResource({ + "id": oauth_id, + "type": "oauth_consent_request", + "consent_link": error.consent_url, + "server_label": "server_label", + }) + yield project_models.ResponseOutputItemAddedEvent(sequence_number=sequence_number, + output_index=output_index, item=item) + sequence_number += 1 + yield project_models.ResponseStreamEvent({ + "sequence_number": sequence_number, + "output_index": output_index, + "id": oauth_id, + "type": "response.oauth_consent_requested", + "consent_link": error.consent_url, + "server_label": "server_label", + }) + + sequence_number += 1 + yield project_models.ResponseOutputItemDoneEvent(sequence_number=sequence_number, + output_index=output_index, item=item) + sequence_number += 1 + output = [ + project_models.OAuthConsentRequestItemResource( + id= oauth_id, + consent_link=error.consent_url, + server_label="server_label" + ) + ] + + response = project_models.Response({ + "object": "response", + "id": context.response_id, + "agent": agent_id, + "conversation": conversation, + "metadata": context.request.get("metadata"), + "created_at": int(time.time()), + "status": "completed", + "output": output, + }) + yield project_models.ResponseCompletedEvent(sequence_number=sequence_number, response=response) + async def agent_liveness(self, request) -> Union[Response, dict]: return Response(status_code=200) @@ -335,6 +450,81 @@ def setup_otlp_exporter(self, endpoint, provider): provider.add_span_processor(processor) logger.info(f"Tracing setup with OTLP exporter: {endpoint}") + @staticmethod + def _configure_endpoint() -> tuple[str, Optional[str]]: + """Configure and return the tools endpoint and agent name from environment variables. + + :return: A tuple of (tools_endpoint, agent_name). + :rtype: tuple[str, Optional[str]] + """ + if not FoundryCBAgent._cached_tools_endpoint: + project_endpoint_format: str = "https://{account_name}.services.ai.azure.com/api/projects/{project_name}" + workspace_endpoint = os.getenv(Constants.AZURE_AI_WORKSPACE_ENDPOINT) + tools_endpoint = os.getenv(Constants.AZURE_AI_TOOLS_ENDPOINT) + project_endpoint = os.getenv(Constants.AZURE_AI_PROJECT_ENDPOINT) + + if not tools_endpoint: + # project endpoint corrupted could have been an overridden enviornment variable + # try to reconstruct tools endpoint from workspace endpoint + # Robustly reconstruct project_endpoint from workspace_endpoint if needed. + + if workspace_endpoint: + # Expected format: + # "https://.api.azureml.ms/subscriptions//resourceGroups// + # providers/Microsoft.MachineLearningServices/workspaces/@@AML" + from urllib.parse import urlparse + parsed_url = urlparse(workspace_endpoint) + path_parts = [p for p in parsed_url.path.split('/') if p] + # Find the 'workspaces' part and extract account_name@project_name@AML + try: + workspaces_idx = path_parts.index("workspaces") + if workspaces_idx + 1 >= len(path_parts): + raise ValueError( + f"Workspace endpoint path does not contain workspace info " + f"after 'workspaces': {workspace_endpoint}" + ) + workspace_info = path_parts[workspaces_idx + 1] + workspace_parts = workspace_info.split('@') + if len(workspace_parts) < 2: + raise ValueError( + f"Workspace info '{workspace_info}' does not contain both account_name " + f"and project_name separated by '@'." + ) + account_name = workspace_parts[0] + project_name = workspace_parts[1] + # Documented expected format for PROJECT_ENDPOINT_FORMAT: + # "https://.api.azureml.ms/api/projects/{project_name}" + project_endpoint = project_endpoint_format.format( + account_name=account_name, project_name=project_name + ) + except (ValueError, IndexError) as e: + raise ValueError( + f"Failed to reconstruct project endpoint from workspace endpoint " + f"'{workspace_endpoint}': {e}" + ) from e + # should never reach here + logger.info("Reconstructed tools endpoint from project endpoint %s", project_endpoint) + tools_endpoint = project_endpoint + + tools_endpoint = project_endpoint + + if not tools_endpoint: + raise ValueError( + "Project endpoint needed for Azure AI tools endpoint is not found. " + ) + FoundryCBAgent._cached_tools_endpoint = tools_endpoint + + agent_name = os.getenv(Constants.AGENT_NAME) + if agent_name is None: + if os.getenv("CONTAINER_APP_NAME"): + raise ValueError( + "Agent name needed for Azure AI hosted agents is not found. " + ) + agent_name = "$default" + FoundryCBAgent._cached_agent_name = agent_name + + return FoundryCBAgent._cached_tools_endpoint, FoundryCBAgent._cached_agent_name + def get_tool_client( self, tools: Optional[list[ToolDefinition]], user_info: Optional[UserInfo] ) -> AzureAIToolClient: @@ -342,23 +532,14 @@ def get_tool_client( if not self.credentials: raise ValueError("Credentials are required to create Tool Client.") - workspace_endpoint = os.getenv(Constants.AZURE_AI_WORKSPACE_ENDPOINT) - if workspace_endpoint: - agent_name = os.getenv(Constants.AGENT_NAME) - if not agent_name: - raise ValueError("AGENT_NAME environment variable is required when using workspace endpoint.") - return AzureAIToolClient( - endpoint=workspace_endpoint, + tools_endpoint, agent_name = self._configure_endpoint() + + return AzureAIToolClient( + endpoint=tools_endpoint, credential=self.credentials, tools=tools, user=user_info, agent_name=agent_name, - ) - return AzureAIToolClient( - endpoint=os.getenv(Constants.AZURE_AI_PROJECT_ENDPOINT), - credential=self.credentials, - tools=tools, - user=user_info, ) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/id_generator.py index 48f0d9add17d..5b602a7fc686 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/id_generator.py @@ -17,3 +17,6 @@ def generate_function_output_id(self) -> str: def generate_message_id(self) -> str: return self.generate("msg") + + def generate_oauthreq_id(self) -> str: + return self.generate("oauthreq") diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index c7c4aaaa6369..6b02bba9a0fd 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -2,6 +2,10 @@ ## 1.0.0b5 (2025-11-16) +### Feature Added + +- Support Tools Oauth + ### Bugs Fixed - Fixed streaming generation issues. diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index beae4faf6499..65d76c6a5a03 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -11,6 +11,7 @@ from langchain_core.tools import StructuredTool from langgraph.graph.state import CompiledStateGraph +from azure.ai.agentserver.core.client.tools import OAuthConsentRequiredError from azure.ai.agentserver.core.constants import Constants from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.server.base import FoundryCBAgent @@ -31,7 +32,7 @@ class GraphFactory(Protocol): """Protocol for graph factory functions. - + A graph factory is a callable that takes a ToolClient and returns a CompiledStateGraph, either synchronously or asynchronously. """ @@ -93,7 +94,7 @@ def __init__( def graph(self) -> "Optional[CompiledStateGraph]": """ Get the resolved graph. This property provides backward compatibility. - + :return: The resolved CompiledStateGraph if available, None otherwise. :rtype: Optional[CompiledStateGraph] """ @@ -115,17 +116,34 @@ async def agent_run(self, context: AgentRunContext): input_data = self.state_converter.request_to_state(context) logger.debug(f"Converted input data: {input_data}") if not context.stream: - response = await self.agent_run_non_stream(input_data, context, graph) - return response + try: + response = await self.agent_run_non_stream(input_data, context, graph) + return response + finally: + # Close tool_client for non-streaming requests + if tool_client is not None: + try: + await tool_client.close() + logger.debug("Closed tool_client after non-streaming request") + except Exception as e: + logger.warning(f"Error closing tool_client: {e}") + + # For streaming, pass tool_client to be closed after streaming completes return self.agent_run_astream(input_data, context, graph, tool_client) - finally: - # Close tool_client if it was created for this request + except OAuthConsentRequiredError as e: + # Clean up tool_client if OAuth error occurs before streaming starts if tool_client is not None: - try: - await tool_client.close() - logger.debug("Closed tool_client after request processing") - except Exception as e: - logger.warning(f"Error closing tool_client: {e}") + await tool_client.close() + + if not context.stream: + response = await self.respond_with_oauth_consent(context, e) + return response + return self.respond_with_oauth_consent_astream(context, e) + except Exception: + # Clean up tool_client if error occurs before streaming starts + if tool_client is not None: + await tool_client.close() + raise async def _resolve_graph(self, context: AgentRunContext): """Resolve the graph if it's a factory function (for single-use/first-time resolution). @@ -169,7 +187,7 @@ async def _resolve_graph_for_request(self, context: AgentRunContext): Resolve a fresh graph instance for a single request to avoid concurrency issues. Creates a ToolClient and calls the factory function with it. This method returns a new graph instance and the tool_client for cleanup. - + :param context: The context for the agent run. :type context: AgentRunContext :return: A tuple of (compiled graph instance, tool_client wrapper). diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py index 374db1d1d98b..cde9a3756a58 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py @@ -124,7 +124,9 @@ async def tool_func(**kwargs: Any) -> str: :return: The result from the tool invocation as a string. :rtype: str + :raises OAuthConsentRequiredError: If OAuth consent is required for the tool invocation. """ + # Let OAuthConsentRequiredError propagate up to be handled by the agent result = await azure_tool(**kwargs) # Convert result to string for LangChain compatibility if isinstance(result, dict): diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py index f77a0b31b1d5..7daa62d0ec9f 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/tool_client_example/use_tool_client_example.py @@ -44,13 +44,14 @@ async def quickstart(): "Set it to your Azure AI project endpoint, e.g., " "https://.services.ai.azure.com/api/projects/" ) + tool_connection_id = os.getenv("AZURE_AI_PROJECT_TOOL_CONNECTION_ID") # Create Azure credentials credential = DefaultAzureCredential() tool_definitions = [ { "type": "mcp", - "project_connection_id": "" + "project_connection_id": tool_connection_id }, { "type": "code_interpreter", From 9cdf214fba92f636d5c07af1691ae4570354b91c Mon Sep 17 00:00:00 2001 From: junanchen Date: Mon, 17 Nov 2025 00:21:14 -0800 Subject: [PATCH 22/35] Fix function output parse --- ...nt_framework_output_streaming_converter.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 96beb535d3fb..2a1d2fb55366 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -7,7 +7,7 @@ import datetime import json -from typing import AsyncIterable, List, Optional +from typing import Any, AsyncIterable, List, Optional from agent_framework import AgentRunResponseUpdate, BaseContent, FunctionApprovalRequestContent, FunctionResultContent from agent_framework._types import ( @@ -201,7 +201,7 @@ async def convert_contents(self, contents: AsyncIterable[FunctionResultContent]) output = (f"{type(content.exception)}({str(content.exception)})" if content.exception - else json.dumps(content.result)) + else self._to_output(content.result)) item = FunctionToolCallOutputItemResource( id=item_id, @@ -224,6 +224,21 @@ async def convert_contents(self, contents: AsyncIterable[FunctionResultContent]) self._parent.add_completed_output_item(item) # pylint: disable=protected-access + @classmethod + def _to_output(cls, result: Any) -> str: + if isinstance(result, str): + return result + elif isinstance(result, list): + text = [] + for item in result: + if isinstance(item, BaseContent): + text.append(item.to_dict()) + else: + text.append(str(item)) + return json.dumps(text) + else: + return "" + class AgentFrameworkOutputStreamingConverter: """Streaming converter using content-type-specific state handlers.""" From 9b46eb90e8d6d6a7784e7e0a1f0ee162e3faa174 Mon Sep 17 00:00:00 2001 From: Ganesh Bheemarasetty Date: Mon, 17 Nov 2025 01:12:56 -0800 Subject: [PATCH 23/35] Refactor ToolClient to handle optional schema properties and required fields --- .../agentserver/agentframework/tool_client.py | 38 +++++++++---------- .../ai/agentserver/langgraph/tool_client.py | 4 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py index 6f410c29d484..8b7142f0862a 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/tool_client.py @@ -16,32 +16,32 @@ # pylint: disable=client-accepts-api-version-keyword,missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs class ToolClient: """Client that integrates AzureAIToolClient with Agent Framework. - + This class provides methods to list tools from AzureAIToolClient and invoke them in a format compatible with Agent Framework agents. - + :param tool_client: The AzureAIToolClient instance to use for tool operations. :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient - + .. admonition:: Example: - + .. code-block:: python - + from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient from azure.ai.agentserver.agentframework import ToolClient from azure.identity.aio import DefaultAzureCredential - + async with DefaultAzureCredential() as credential: tool_client = AzureAIToolClient( endpoint="https://", credential=credential ) - + client = ToolClient(tool_client) - + # List tools as Agent Framework tool definitions tools = await client.list_tools() - + # Invoke a tool directly result = await client.invoke_tool( tool_name="my_tool", @@ -53,7 +53,7 @@ class ToolClient: def __init__(self, tool_client: "AzureAIToolClient") -> None: """Initialize the ToolClient. - + :param tool_client: The AzureAIToolClient instance to use for tool operations. :type tool_client: ~azure.ai.agentserver.core.client.tools.aio.AzureAIToolClient """ @@ -62,19 +62,19 @@ def __init__(self, tool_client: "AzureAIToolClient") -> None: async def list_tools(self) -> List[AIFunction]: """List all available tools as Agent Framework tool definitions. - + Retrieves tools from AzureAIToolClient and returns them in a format compatible with Agent Framework. - + :return: List of tool definitions. :rtype: List[AIFunction] :raises ~azure.core.exceptions.HttpResponseError: Raised for HTTP communication failures. - + .. admonition:: Example: - + .. code-block:: python - + client = ToolClient(tool_client) tools = await client.list_tools() """ @@ -94,7 +94,7 @@ async def list_tools(self) -> List[AIFunction]: def _convert_to_agent_framework_tool(self, azure_tool: "FoundryTool") -> AIFunction: """Convert an AzureAITool to an Agent Framework AI Function - + :param azure_tool: The AzureAITool to convert. :type azure_tool: ~azure.ai.agentserver.core.client.tools.aio.FoundryTool :return: An AI Function Tool. @@ -104,8 +104,8 @@ def _convert_to_agent_framework_tool(self, azure_tool: "FoundryTool") -> AIFunct input_schema = azure_tool.input_schema or {} # Create a Pydantic model from the input schema - properties = input_schema.get("properties", {}) - required_fields = set(input_schema.get("required", [])) + properties = input_schema.get("properties") or {} + required_fields = set(input_schema.get("required") or []) # Build field definitions for the Pydantic model field_definitions: Dict[str, Any] = {} @@ -146,7 +146,7 @@ async def tool_func(**kwargs: Any) -> Any: def _json_schema_type_to_python(self, json_type: str) -> type: """Convert JSON schema type to Python type. - + :param json_type: The JSON schema type string. :type json_type: str :return: The corresponding Python type. diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py index cde9a3756a58..78baf96bee80 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tool_client.py @@ -159,8 +159,8 @@ def _create_pydantic_model( :rtype: type[BaseModel] """ # Get properties from schema - properties = schema.get("properties", {}) - required_fields = schema.get("required", []) + properties = schema.get("properties") or {} + required_fields = schema.get("required") or [] # Build field definitions for Pydantic model field_definitions = {} From f50155a8316fabdf12a5092f6757e35f211cb68a Mon Sep 17 00:00:00 2001 From: junanchen Date: Mon, 17 Nov 2025 09:42:19 -0800 Subject: [PATCH 24/35] fix mypy error on AF --- .../models/agent_framework_output_streaming_converter.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 2a1d2fb55366..0b862020c721 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -302,6 +302,8 @@ async def convert(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> Async param="", ) continue + if not state: + continue async for content in state.convert_contents(contents): yield content From b0dcb07445ea4264c8409eb398ea29f126009b4c Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 17 Nov 2025 09:59:23 -0800 Subject: [PATCH 25/35] do not index AgentRunContext --- .../doc/azure.ai.agentserver.core.server.common.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst index 26c4aaf4d15a..01e54afab103 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst @@ -24,3 +24,4 @@ azure.ai.agentserver.core.server.common.agent\_run\_context module :inherited-members: :members: :undoc-members: + :no-index: From b72be774556331c04c4f69da2e318324e75ecef7 Mon Sep 17 00:00:00 2001 From: Ganesh Bheemarasetty Date: Mon, 17 Nov 2025 10:04:50 -0800 Subject: [PATCH 26/35] Filter tools and update project dependenceis --- .../pyproject.toml | 4 +-- .../agentserver/core/client/tools/_client.py | 16 ++++++++--- .../core/client/tools/aio/_client.py | 27 ++++++++++++++----- .../pyproject.toml | 4 +-- 4 files changed, 36 insertions(+), 15 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml index 052d36d10c7d..e7422ec02146 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml @@ -20,7 +20,7 @@ classifiers = [ keywords = ["azure", "azure sdk"] dependencies = [ - "azure-ai-agentserver-core", + "azure-ai-agentserver-core>=1.0.0b5", "agent-framework-azure-ai==1.0.0b251007", "agent-framework-core==1.0.0b251007", "opentelemetry-exporter-otlp-proto-grpc>=1.36.0", @@ -66,4 +66,4 @@ pyright = false verifytypes = false # incompatible python version for -core verify_keywords = false mindependency = false # depends on -core package -whl_no_aio = false \ No newline at end of file +whl_no_aio = false diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py index ea9a8479637f..f28beb498fa8 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py @@ -104,12 +104,20 @@ def list_tools(self) -> List[FoundryTool]: tools: List[FoundryTool] = [] # Fetch MCP tools - mcp_tools = self._mcp_tools.list_tools(existing_names) - tools.extend(mcp_tools) + if ( + self._config.tool_config._named_mcp_tools + and len(self._config.tool_config._named_mcp_tools) > 0 + ): + mcp_tools = self._mcp_tools.list_tools(existing_names) + tools.extend(mcp_tools) # Fetch Tools API tools - tools_api_tools = self._remote_tools.resolve_tools(existing_names) - tools.extend(tools_api_tools) + if ( + self._config.tool_config._remote_tools + and len(self._config.tool_config._remote_tools) > 0 + ): + tools_api_tools = self._remote_tools.resolve_tools(existing_names) + tools.extend(tools_api_tools) for tool in tools: # Capture tool in a closure to avoid shadowing issues diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py index e2d35b1dd919..277be7930df5 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py @@ -3,7 +3,7 @@ # --------------------------------------------------------- from typing import Any, List, Mapping, Union, TYPE_CHECKING - +from asyncio import gather from azure.core import AsyncPipelineClient from azure.core.pipeline import policies from azure.core.tracing.decorator_async import distributed_trace_async @@ -107,12 +107,25 @@ async def list_tools(self) -> List[FoundryTool]: tools: List[FoundryTool] = [] - # Fetch MCP tools - mcp_tools = await self._mcp_tools.list_tools(existing_names) - tools.extend(mcp_tools) - # Fetch Tools API tools - tools_api_tools = await self._remote_tools.resolve_tools(existing_names) - tools.extend(tools_api_tools) + # Fetch MCP tools and Tools API tools in parallel + # Build list of coroutines to gather based on configuration + tasks = [] + if ( + self._config.tool_config._named_mcp_tools + and len(self._config.tool_config._named_mcp_tools) > 0 + ): + tasks.append(self._mcp_tools.list_tools(existing_names)) + if ( + self._config.tool_config._remote_tools + and len(self._config.tool_config._remote_tools) > 0 + ): + tasks.append(self._remote_tools.resolve_tools(existing_names)) + + # Execute all tasks in parallel if any exist + if tasks: + results = await gather(*tasks) + for result in results: + tools.extend(result) for tool in tools: # Capture tool in a closure to avoid shadowing issues diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml index a5140068e12d..77492d1496dd 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml @@ -19,7 +19,7 @@ classifiers = [ keywords = ["azure", "azure sdk"] dependencies = [ - "azure-ai-agentserver-core", + "azure-ai-agentserver-core>=1.0.0b5", "langchain>0.3.5", "langchain-openai>0.3.10", "langchain-azure-ai[opentelemetry]>=0.1.4", @@ -67,4 +67,4 @@ pyright = false verifytypes = false # incompatible python version for -core verify_keywords = false mindependency = false # depends on -core package -whl_no_aio = false \ No newline at end of file +whl_no_aio = false From bee67f7c9d281b5eb014f226091e9bb5aa50c4bc Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 17 Nov 2025 10:08:35 -0800 Subject: [PATCH 27/35] fixing pylint --- .../ai/agentserver/agentframework/agent_framework.py | 6 ++++-- .../models/agent_framework_output_streaming_converter.py | 9 +++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 2a7c28f9a3f8..81082ade7d7c 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation,no-name-in-module +# pylint: disable=logging-fstring-interpolation,no-name-in-module,no-member from __future__ import annotations import os @@ -239,7 +239,9 @@ async def stream_updates(): # logger.debug("Agent streaming iterator finished (StopAsyncIteration)") # break # except asyncio.TimeoutError: - # logger.warning("Streaming idle timeout reached (%.1fs); terminating stream.", timeout_s) + # logger.warning( + # "Streaming idle timeout reached (%.1fs); terminating stream.", timeout_s + # ) # for ev in streaming_converter.completion_events(): # yield ev # return diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 0b862020c721..d09e1f84fc24 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -228,7 +228,7 @@ async def convert_contents(self, contents: AsyncIterable[FunctionResultContent]) def _to_output(cls, result: Any) -> str: if isinstance(result, str): return result - elif isinstance(result, list): + if isinstance(result, list): text = [] for item in result: if isinstance(item, BaseContent): @@ -236,8 +236,7 @@ def _to_output(cls, result: Any) -> str: else: text.append(str(item)) return json.dumps(text) - else: - return "" + return "" class AgentFrameworkOutputStreamingConverter: @@ -281,7 +280,9 @@ async def convert(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> Async response=created_response, ) - is_changed = lambda a, b: a is not None and b is not None and a.message_id != b.message_id + is_changed = ( + lambda a, b: a is not None and b is not None and a.message_id != b.message_id + ) async for group in chunk_on_change(updates, is_changed): has_value, first, contents = await peek(self._read_updates(group)) if not has_value: From cba5fdc74de280af08cc0074c79e022ecb8f61de Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 17 Nov 2025 10:41:48 -0800 Subject: [PATCH 28/35] fix build --- ...nt_framework_output_streaming_converter.py | 29 ++++++++++--------- .../agentframework/models/utils/async_iter.py | 13 +++++++-- .../agentserver/core/client/tools/_client.py | 2 +- .../core/client/tools/aio/_client.py | 2 +- .../ai/agentserver/langgraph/langgraph.py | 2 +- 5 files changed, 30 insertions(+), 18 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index d09e1f84fc24..47aafbeb8a49 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -7,7 +7,7 @@ import datetime import json -from typing import Any, AsyncIterable, List, Optional +from typing import Any, AsyncIterable, List from agent_framework import AgentRunResponseUpdate, BaseContent, FunctionApprovalRequestContent, FunctionResultContent from agent_framework._types import ( @@ -48,7 +48,7 @@ class _BaseStreamingState: """Base interface for streaming state handlers.""" - def convert_contents(self, contents: AsyncIterable[BaseContent]) -> AsyncIterable[ResponseStreamEvent]: # pylint: disable=unused-argument + async def convert_contents(self, contents: AsyncIterable[BaseContent]) -> AsyncIterable[ResponseStreamEvent]: # pylint: disable=unused-argument raise NotImplementedError @@ -126,7 +126,9 @@ class _FunctionCallStreamingState(_BaseStreamingState): def __init__(self, parent: AgentFrameworkOutputStreamingConverter): self._parent = parent - async def convert_contents(self, contents: AsyncIterable[FunctionCallContent]) -> AsyncIterable[ResponseStreamEvent]: + async def convert_contents( + self, contents: AsyncIterable[FunctionCallContent] + ) -> AsyncIterable[ResponseStreamEvent]: content_by_call_id = {} ids_by_call_id = {} @@ -149,18 +151,17 @@ async def convert_contents(self, contents: AsyncIterable[FunctionCallContent]) - arguments="", ), ) - continue else: content_by_call_id[content.call_id] = content_by_call_id[content.call_id] + content item_id, output_index = ids_by_call_id[content.call_id] - args_delta = content.arguments if isinstance(content.arguments, str) else "" - yield ResponseFunctionCallArgumentsDeltaEvent( - sequence_number=self._parent.next_sequence(), - item_id=item_id, - output_index=output_index, - delta=args_delta, - ) + args_delta = content.arguments if isinstance(content.arguments, str) else "" + yield ResponseFunctionCallArgumentsDeltaEvent( + sequence_number=self._parent.next_sequence(), + item_id=item_id, + output_index=output_index, + delta=args_delta, + ) for call_id, content in content_by_call_id.items(): item_id, output_index = ids_by_call_id[call_id] @@ -194,7 +195,9 @@ class _FunctionCallOutputStreamingState(_BaseStreamingState): def __init__(self, parent: AgentFrameworkOutputStreamingConverter): self._parent = parent - async def convert_contents(self, contents: AsyncIterable[FunctionResultContent]) -> AsyncIterable[ResponseStreamEvent]: + async def convert_contents( + self, contents: AsyncIterable[FunctionResultContent] + ) -> AsyncIterable[ResponseStreamEvent]: async for content in contents: item_id = self._parent.context.id_generator.generate_function_output_id() output_index = self._parent.next_output_index() @@ -281,7 +284,7 @@ async def convert(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> Async ) is_changed = ( - lambda a, b: a is not None and b is not None and a.message_id != b.message_id + lambda a, b: a is not None and b is not None and a.message_id != b.message_id # pylint: disable=unnecessary-lambda-assignment ) async for group in chunk_on_change(updates, is_changed): has_value, first, contents = await peek(self._read_updates(group)) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py index ef8525109554..fdf3b2fbb2a3 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/utils/async_iter.py @@ -4,7 +4,7 @@ from __future__ import annotations from collections.abc import AsyncIterable, AsyncIterator, Callable -from typing import TypeVar, Optional, Tuple, Awaitable +from typing import TypeVar, Optional, Tuple TSource = TypeVar("TSource") TKey = TypeVar("TKey") @@ -19,9 +19,12 @@ async def chunk_on_change( Chunks an async iterable into groups based on when consecutive elements change. :param source: Async iterable of items. + :type source: AsyncIterable[TSource] :param is_changed: Function(prev, current) -> bool indicating if value changed. If None, uses != by default. + :type is_changed: Optional[Callable[[Optional[TSource], Optional[TSource]], bool]] :return: An async iterator of async iterables (chunks). + :rtype: AsyncIterator[AsyncIterable[TSource]] """ if is_changed is None: @@ -46,9 +49,13 @@ async def chunk_by_key( Chunks the async iterable into groups based on a key selector. :param source: Async iterable of items. + :type source: AsyncIterable[TSource] :param key_selector: Function mapping item -> key. + :type key_selector: Callable[[TSource], TKey] :param key_equal: Optional equality function for keys. Defaults to '=='. + :type key_equal: Optional[Callable[[TKey, TKey], bool]] :return: An async iterator of async iterables (chunks). + :rtype: AsyncIterator[AsyncIterable[TSource]] """ if key_equal is None: @@ -104,7 +111,9 @@ async def peek( Peeks at the first element of an async iterable without consuming it. :param source: Async iterable. + :type source: AsyncIterable[T] :return: (has_value, first, full_sequence_including_first) + :rtype: Tuple[bool, Optional[T], AsyncIterable[T]] """ it = source.__aiter__() @@ -131,6 +140,6 @@ async def sequence() -> AsyncIterator[T]: async def _empty_async() -> AsyncIterator[T]: - if False: + if False: # pylint: disable=using-constant-test # This is just to make this an async generator for typing yield None # type: ignore[misc] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py index f28beb498fa8..ee56a4d44a94 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/_client.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- - +# pylint: disable=protected-access from typing import Any, List, Mapping, Union from azure.core import PipelineClient from azure.core.pipeline import policies diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py index 277be7930df5..986e8756e1b6 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/client/tools/aio/_client.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- - +# pylint: disable=protected-access,do-not-import-asyncio from typing import Any, List, Mapping, Union, TYPE_CHECKING from asyncio import gather from azure.core import AsyncPipelineClient diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py index 65d76c6a5a03..e6bf10d0b5c2 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation,broad-exception-caught +# pylint: disable=logging-fstring-interpolation,broad-exception-caught,no-member # mypy: disable-error-code="assignment,arg-type" import os import re From b5b2086d793c77eaf0f44ee135c9b27c2f3c2d90 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 17 Nov 2025 11:18:04 -0800 Subject: [PATCH 29/35] fix mypy --- .../models/agent_framework_output_streaming_converter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 47aafbeb8a49..72aea41419a8 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -2,7 +2,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- # pylint: disable=attribute-defined-outside-init,protected-access -# mypy: disable-error-code="call-overload,assignment,arg-type" +# mypy: disable-error-code="call-overload,assignment,arg-type,override" from __future__ import annotations import datetime From 7d42f0db0be6d56d3b10502f861ed57b61a4d54f Mon Sep 17 00:00:00 2001 From: junanchen Date: Mon, 17 Nov 2025 13:23:17 -0800 Subject: [PATCH 30/35] remove DONE when getting error --- .../agentframework/agent_framework.py | 26 ------------------- ...nt_framework_output_streaming_converter.py | 17 +++++++----- .../azure/ai/agentserver/core/server/base.py | 1 - 3 files changed, 10 insertions(+), 34 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 81082ade7d7c..07142d888d37 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -225,32 +225,6 @@ async def stream_updates(): update_count += 1 yield event - # timeout_s = self._resolve_stream_timeout(context.request) - # logger.info("Starting streaming with idle-timeout=%.2fs", timeout_s) - # for ev in streaming_converter.initial_events(): - # yield ev - # - # # Iterate with per-update timeout; terminate if idle too long - # aiter = agent.run_stream(message).__aiter__() - # while True: - # try: - # update = await asyncio.wait_for(aiter.__anext__(), timeout=timeout_s) - # except StopAsyncIteration: - # logger.debug("Agent streaming iterator finished (StopAsyncIteration)") - # break - # except asyncio.TimeoutError: - # logger.warning( - # "Streaming idle timeout reached (%.1fs); terminating stream.", timeout_s - # ) - # for ev in streaming_converter.completion_events(): - # yield ev - # return - # update_count += 1 - # transformed = streaming_converter.transform_output_for_streaming(update) - # for event in transformed: - # yield event - # for ev in streaming_converter.completion_events(): - # yield ev logger.info("Streaming completed with %d updates", update_count) finally: # Close tool_client if it was created for this request diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 72aea41419a8..406c8191850a 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -299,13 +299,16 @@ async def convert(self, updates: AsyncIterable[AgentRunResponseUpdate]) -> Async elif isinstance(first, FunctionResultContent): state = _FunctionCallOutputStreamingState(self) elif isinstance(first, ErrorContent): - yield ResponseErrorEvent( - sequence_number=self.next_sequence(), - code=getattr(first, "error_code", None) or "server_error", - message=getattr(first, "message", None) or "An error occurred", - param="", - ) - continue + code=getattr(first, "error_code", None) or "server_error" + message=getattr(first, "message", None) or "An error occurred" + raise ValueError(f"ErrorContent received: code={code}, message={message}") + # yield ResponseErrorEvent( + # sequence_number=self.next_sequence(), + # code=getattr(first, "error_code", None) or "server_error", + # message=getattr(first, "message", None) or "An error occurred", + # param="", + # ) + # continue if not state: continue diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 1724be6e1f3b..618bb13441e5 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -158,7 +158,6 @@ def gen(): logger.error("Error in non-async generator: %s\n%s", e, traceback.format_exc()) payload = {"error": err_msg} yield f"event: error\ndata: {json.dumps(payload)}\n\n" - yield "data: [DONE]\n\n" error_sent = True finally: logger.info("End of processing CreateResponse request.") From dfd1bd18803a7e5e71597745b087bd425dad5e1b Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 17 Nov 2025 13:59:31 -0800 Subject: [PATCH 31/35] fix pylint --- .../models/agent_framework_output_streaming_converter.py | 1 - 1 file changed, 1 deletion(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 406c8191850a..12cfab983643 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -30,7 +30,6 @@ ResponseContentPartAddedEvent, ResponseContentPartDoneEvent, ResponseCreatedEvent, - ResponseErrorEvent, ResponseFunctionCallArgumentsDeltaEvent, ResponseFunctionCallArgumentsDoneEvent, ResponseInProgressEvent, From c18d494dbdf492c14cc6c2df8d1f53c6a97094af Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Mon, 17 Nov 2025 15:34:15 -0800 Subject: [PATCH 32/35] do not add user oid to tracing --- .../azure/ai/agentserver/core/server/base.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 618bb13441e5..cf7d4567f37f 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -105,6 +105,8 @@ def set_user_info_to_context_var(self, request) -> Optional[UserInfo]: if user_info: ctx = request_context.get() or {} for key, value in user_info.to_dict().items(): + if key == "objectId": + continue # skip user objectId ctx[f"azure.ai.agentserver.user.{key}"] = str(value) request_context.set(ctx) return user_info From 30b78e9064d21ed83d1dc85c3c05f984d8e87af5 Mon Sep 17 00:00:00 2001 From: lusu-msft <68949729+lusu-msft@users.noreply.github.com> Date: Wed, 19 Nov 2025 10:57:33 -0800 Subject: [PATCH 33/35] [AgentServer][Agentframework] update agent framework version (#44102) * upgrade af version * update agent framework azure client * revert change on -core * async init af tracing * fix minor --- .../agentframework/agent_framework.py | 23 +++++++++++++++---- .../pyproject.toml | 4 ++-- .../azure/ai/agentserver/core/server/base.py | 3 ++- .../azure-ai-agentserver-core/pyproject.toml | 2 +- 4 files changed, 24 insertions(+), 8 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 07142d888d37..01929cd00040 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -9,7 +9,7 @@ import inspect from agent_framework import AgentProtocol, AIFunction -from agent_framework.azure import AzureAIAgentClient # pylint: disable=no-name-in-module +from agent_framework.azure import AzureAIClient # pylint: disable=no-name-in-module from opentelemetry import trace from azure.ai.agentserver.core.client.tools import OAuthConsentRequiredError @@ -182,11 +182,26 @@ def init_tracing(self): applicationinsights_connection_string=app_insights_conn_str, ) elif project_endpoint: - project_client = AIProjectClient(endpoint=project_endpoint, credential=DefaultAzureCredential()) - agent_client = AzureAIAgentClient(project_client=project_client) - agent_client.setup_azure_ai_observability() + self.setup_tracing_with_azure_ai_client(project_endpoint) self.tracer = trace.get_tracer(__name__) + def setup_tracing_with_azure_ai_client(self, project_endpoint: str): + async def setup_async(): + async with AzureAIClient( + project_endpoint=project_endpoint, async_credential=self.credentials + ) as agent_client: + await agent_client.setup_azure_ai_observability() + + import asyncio + + loop = asyncio.get_event_loop() + if loop.is_running(): + # If loop is already running, schedule as a task + asyncio.create_task(setup_async()) + else: + # Run in new event loop + loop.run_until_complete(setup_async()) + async def agent_run( # pylint: disable=too-many-statements self, context: AgentRunContext ) -> Union[ diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml index e7422ec02146..19840e57fadb 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml @@ -21,8 +21,8 @@ keywords = ["azure", "azure sdk"] dependencies = [ "azure-ai-agentserver-core>=1.0.0b5", - "agent-framework-azure-ai==1.0.0b251007", - "agent-framework-core==1.0.0b251007", + "agent-framework-azure-ai>=1.0.0b251112", + "agent-framework-core>=1.0.0b251112", "opentelemetry-exporter-otlp-proto-grpc>=1.36.0", ] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index cf7d4567f37f..2603fd997b0e 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -12,6 +12,7 @@ from typing import Any, AsyncGenerator, Generator, Optional, Union import uvicorn +from azure.identity.aio import DefaultAzureCredential as AsyncDefaultTokenCredential from opentelemetry import context as otel_context, trace from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from starlette.applications import Starlette @@ -117,7 +118,7 @@ class FoundryCBAgent: _cached_agent_name: Optional[str] = None def __init__(self, credentials: Optional["AsyncTokenCredential"] = None, **kwargs: Any) -> None: - self.credentials = credentials + self.credentials = credentials or AsyncDefaultTokenCredential() self.tools = kwargs.get("tools", []) async def runs_endpoint(request): diff --git a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml index ad882b2ab596..76683e032cd0 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml @@ -20,7 +20,7 @@ keywords = ["azure", "azure sdk"] dependencies = [ "azure-monitor-opentelemetry>=1.5.0", - "azure-ai-projects==1.1.0b4", + "azure-ai-projects>=1.1.0b4", "azure-ai-agents==1.2.0b5", "azure-core>=1.35.0", "azure-identity", From 1137076289bbd10bed7e9ec4cf450a8a1fe30fa7 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Wed, 19 Nov 2025 16:44:33 -0800 Subject: [PATCH 34/35] fix dependency --- sdk/agentserver/azure-ai-agentserver-core/pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml index 76683e032cd0..4b2f295cec80 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml @@ -29,6 +29,7 @@ dependencies = [ "opentelemetry-exporter-otlp-proto-http", "starlette>=0.45.0", "uvicorn>=0.31.0", + "aiohttp", # used by azure-identity aio ] [build-system] From b7a2280fc87d2418e37d09602e529d0e14737b3d Mon Sep 17 00:00:00 2001 From: lusu-msft <68949729+lusu-msft@users.noreply.github.com> Date: Fri, 21 Nov 2025 16:29:28 -0800 Subject: [PATCH 35/35] [AgentServer] fix build pipelines (#44145) * fix mindependency for -core * fix langgraph min dependency * try test with mindependency * disable mindependency for af because of azure-ai-projects version * fix analyze build * upgrade version * fix pylint --- .../azure-ai-agentserver-agentframework/CHANGELOG.md | 11 ++++++++++- .../azure/ai/agentserver/agentframework/_version.py | 2 +- .../ai/agentserver/agentframework/agent_framework.py | 4 +--- .../azure-ai-agentserver-agentframework/cspell.json | 3 ++- .../pyproject.toml | 2 +- .../azure-ai-agentserver-core/CHANGELOG.md | 9 +++++++++ .../azure/ai/agentserver/core/_version.py | 2 +- .../azure/ai/agentserver/core/server/base.py | 6 ++++-- sdk/agentserver/azure-ai-agentserver-core/cspell.json | 3 ++- .../azure-ai-agentserver-core/pyproject.toml | 4 ++-- .../azure-ai-agentserver-langgraph/CHANGELOG.md | 8 ++++++++ .../azure/ai/agentserver/langgraph/_version.py | 2 +- .../azure-ai-agentserver-langgraph/pyproject.toml | 8 +++----- 13 files changed, 45 insertions(+), 19 deletions(-) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md index a01bc1990909..2393a16515f1 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/CHANGELOG.md @@ -1,5 +1,13 @@ # Release History + +## 1.0.0b6 (2025-11-26) + +### Feature Added + +- Support Agent-framework greater than 251112 + + ## 1.0.0b5 (2025-11-16) ### Feature Added @@ -10,6 +18,7 @@ - Fixed streaming generation issues. + ## 1.0.0b4 (2025-11-13) ### Feature Added @@ -36,7 +45,7 @@ - Fixed Id generator format. -- Improved stream mode error message. +- Improved stream mode error messsage. - Updated application insights related configuration environment variables. diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py index c7d155d924dd..d17ec8abfb6f 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b5" +VERSION = "1.0.0b6" diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py index 01929cd00040..4a0a074bd635 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/agent_framework.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation,no-name-in-module,no-member +# pylint: disable=logging-fstring-interpolation,no-name-in-module,no-member,do-not-import-asyncio from __future__ import annotations import os @@ -21,8 +21,6 @@ Response as OpenAIResponse, ResponseStreamEvent, ) -from azure.ai.projects import AIProjectClient -from azure.identity import DefaultAzureCredential from .models.agent_framework_input_converters import AgentFrameworkInputConverter from .models.agent_framework_output_non_streaming_converter import ( diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/cspell.json b/sdk/agentserver/azure-ai-agentserver-agentframework/cspell.json index 48c11927e406..951bfab2c88a 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/cspell.json +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/cspell.json @@ -6,7 +6,8 @@ "envtemplate", "pysort", "redef", - "aifunction" + "aifunction", + "ainvoke" ], "ignorePaths": [ "*.csv", diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml index 19840e57fadb..2b5ddf89a8de 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/pyproject.toml @@ -20,7 +20,7 @@ classifiers = [ keywords = ["azure", "azure sdk"] dependencies = [ - "azure-ai-agentserver-core>=1.0.0b5", + "azure-ai-agentserver-core>=1.0.0b6", "agent-framework-azure-ai>=1.0.0b251112", "agent-framework-core>=1.0.0b251112", "opentelemetry-exporter-otlp-proto-grpc>=1.36.0", diff --git a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md index 55a56fed54ca..2393a16515f1 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-core/CHANGELOG.md @@ -1,5 +1,13 @@ # Release History + +## 1.0.0b6 (2025-11-26) + +### Feature Added + +- Support Agent-framework greater than 251112 + + ## 1.0.0b5 (2025-11-16) ### Feature Added @@ -10,6 +18,7 @@ - Fixed streaming generation issues. + ## 1.0.0b4 (2025-11-13) ### Feature Added diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py index c7d155d924dd..d17ec8abfb6f 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b5" +VERSION = "1.0.0b6" diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py index 2603fd997b0e..eeb4b85cdc34 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py @@ -12,7 +12,6 @@ from typing import Any, AsyncGenerator, Generator, Optional, Union import uvicorn -from azure.identity.aio import DefaultAzureCredential as AsyncDefaultTokenCredential from opentelemetry import context as otel_context, trace from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from starlette.applications import Starlette @@ -22,6 +21,9 @@ from starlette.responses import JSONResponse, Response, StreamingResponse from starlette.routing import Route from starlette.types import ASGIApp + +from azure.identity.aio import DefaultAzureCredential as AsyncDefaultTokenCredential + from ..models import projects as project_models from ..constants import Constants from ..logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger, request_context @@ -466,7 +468,7 @@ def _configure_endpoint() -> tuple[str, Optional[str]]: project_endpoint = os.getenv(Constants.AZURE_AI_PROJECT_ENDPOINT) if not tools_endpoint: - # project endpoint corrupted could have been an overridden enviornment variable + # project endpoint corrupted could have been an overridden environment variable # try to reconstruct tools endpoint from workspace endpoint # Robustly reconstruct project_endpoint from workspace_endpoint if needed. diff --git a/sdk/agentserver/azure-ai-agentserver-core/cspell.json b/sdk/agentserver/azure-ai-agentserver-core/cspell.json index 17fb91b1e58f..55131ced0609 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/cspell.json +++ b/sdk/agentserver/azure-ai-agentserver-core/cspell.json @@ -18,7 +18,8 @@ "SETFL", "Planifica", "mcptools", - "ainvoke" + "ainvoke", + "oauthreq" ], "ignorePaths": [ "*.csv", diff --git a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml index 4b2f295cec80..9f3d01c09c88 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml @@ -23,13 +23,13 @@ dependencies = [ "azure-ai-projects>=1.1.0b4", "azure-ai-agents==1.2.0b5", "azure-core>=1.35.0", - "azure-identity", + "azure-identity>=1.25.1", "openai>=1.80.0", "opentelemetry-api>=1.35", "opentelemetry-exporter-otlp-proto-http", "starlette>=0.45.0", "uvicorn>=0.31.0", - "aiohttp", # used by azure-identity aio + "aiohttp>=3.13.0", # used by azure-identity aio ] [build-system] diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md index 6b02bba9a0fd..2393a16515f1 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/CHANGELOG.md @@ -1,5 +1,13 @@ # Release History + +## 1.0.0b6 (2025-11-26) + +### Feature Added + +- Support Agent-framework greater than 251112 + + ## 1.0.0b5 (2025-11-16) ### Feature Added diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py index c7d155d924dd..d17ec8abfb6f 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b5" +VERSION = "1.0.0b6" diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml index 77492d1496dd..9abeff0d58d6 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml @@ -20,11 +20,9 @@ keywords = ["azure", "azure sdk"] dependencies = [ "azure-ai-agentserver-core>=1.0.0b5", - "langchain>0.3.5", + "langchain>0.3.20", "langchain-openai>0.3.10", - "langchain-azure-ai[opentelemetry]>=0.1.4", - "langgraph>0.5.0", - "opentelemetry-exporter-otlp-proto-http", + "langchain-azure-ai[opentelemetry]>=0.1.8", ] [build-system] @@ -66,5 +64,5 @@ breaking = false # incompatible python version pyright = false verifytypes = false # incompatible python version for -core verify_keywords = false -mindependency = false # depends on -core package +# mindependency = false # depends on -core package whl_no_aio = false