diff --git a/examples/with_autogen.py b/examples/with_autogen.py new file mode 100644 index 0000000..e0271ab --- /dev/null +++ b/examples/with_autogen.py @@ -0,0 +1,45 @@ +""" +Example of using MCPHub with Autogen Agents. +1. Initialize MCPHub to manage MCP servers +2. Fetch MCP tools and adapters for Autogen +3. Create and run an agent with MCP tools +""" + +import asyncio + +from autogen_agentchat.agents import AssistantAgent +from autogen_agentchat.ui import Console +from autogen_core import CancellationToken +from autogen_ext.models.openai import OpenAIChatCompletionClient +from mcphub import MCPHub + + +async def main(): + # Initialize MCPHub - automatically loads .mcphub.json and sets up servers + hub = MCPHub() + + # Fetch MCP tools adapted for Autogen + tool_adapters = await hub.fetch_autogen_mcp_adapters("azure-storage-mcp") + model_client = OpenAIChatCompletionClient(model="gpt-4") + + # Create and run agent with MCP tools + complex_task = """Please help me analyze the following complex problem: + We need to design a new feature for our product that balances user privacy + with data collection for improving the service. Consider the ethical implications, + technical feasibility, and business impact. Break down your thinking process + step by step, and provide a detailed recommendation with clear justification + for each decision point.""" + agent = AssistantAgent( + name="assistant", + model_client=model_client, + tools=tool_adapters, + system_message="You are a helpful assistant.", + ) + + await Console( + agent.run_stream(task=complex_task, cancellation_token=CancellationToken()) + ) + +if __name__ == "__main__": + # Run the async main function + asyncio.run(main()) \ No newline at end of file diff --git a/examples/with_langchain.py b/examples/with_langchain.py new file mode 100644 index 0000000..5b0945d --- /dev/null +++ b/examples/with_langchain.py @@ -0,0 +1,45 @@ +""" +Example of using MCPHub with LangChain Agents. +1. Initialize MCPHub to manage MCP servers +2. Fetch MCP tools for LangChain +3. Create and run an agent with MCP tools +""" + +import asyncio +import json + +from langchain_mcp_adapters.tools import load_mcp_tools +from langchain_openai import ChatOpenAI +from langgraph.prebuilt import create_react_agent +from mcp import ClientSession, StdioServerParameters +from mcp.client.stdio import stdio_client +from mcphub import MCPHub + +model = ChatOpenAI(model="gpt-4o") + +async def main(): + # Initialize MCPHub - automatically loads .mcphub.json and sets up servers + hub = MCPHub() + + # Fetch MCP tools for LangChain + tools = await hub.fetch_langchain_mcp_tools("azure-storage-mcp") + tools_dict = [ + {"name": tool.name, "description": tool.description, "args_schema": tool.args_schema} for tool in tools + ] + print("Available MCP Tools:") + print(json.dumps(tools_dict, indent=2)) + + # Create and run agent with MCP tools + complex_task = """Please help me analyze the following complex problem: + We need to design a new feature for our product that balances user privacy + with data collection for improving the service. Consider the ethical implications, + technical feasibility, and business impact. Break down your thinking process + step by step, and provide a detailed recommendation with clear justification + for each decision point.""" + agent = create_react_agent(model, tools) + agent_response = await agent.ainvoke({"messages": complex_task}) + print("\nAgent Response:") + print(agent_response.get("messages")[1].content) + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/examples/test.py b/examples/with_openai.py similarity index 51% rename from examples/test.py rename to examples/with_openai.py index 5ca75c9..6367c5e 100644 --- a/examples/test.py +++ b/examples/with_openai.py @@ -1,55 +1,41 @@ +""" +Example of using MCPHub with OpenAI Agents. +1. Initialize MCPHub to manage MCP servers +2. Fetch an MCP server with async context manager +3. List available tools from the server +4. Create and run an agent with MCP tools +""" + import asyncio import json from agents import Agent, Runner from mcphub import MCPHub async def main(): - """ - Example of using MCPHub to integrate MCP servers with OpenAI Agents. - - This example demonstrates: - 1. Initializing MCPHub - 2. Fetching and using an MCP server - 3. Listing available tools - 4. Creating and running an agent with MCP tools - """ - - # Step 1: Initialize MCPHub - # MCPHub will automatically: - # - Find .mcphub.json in your project - # - Load server configurations - # - Set up servers (clone repos, run setup scripts if needed) + # Initialize MCPHub - automatically loads .mcphub.json and sets up servers hub = MCPHub() - # Step 2: Create an MCP server instance using async context manager - # Parameters: - # - mcp_name: The name of the server from your .mcphub.json - # - cache_tools_list: Cache the tools list for better performance + # Fetch MCP server - handles server setup and tool caching async with hub.fetch_openai_mcp_server( mcp_name="sequential-thinking-mcp", cache_tools_list=True ) as server: - # Step 3: List available tools from the MCP server - # This shows what capabilities are available to your agent + # Get available tools from the server tools = await server.list_tools() - - # Pretty print the tools for better readability tools_dict = [ dict(tool) if hasattr(tool, "__dict__") else tool for tool in tools ] print("Available MCP Tools:") print(json.dumps(tools_dict, indent=2)) - # Step 4: Create an OpenAI Agent with MCP server - # The agent can now use all tools provided by the MCP server + # Create agent with MCP server integration agent = Agent( name="Assistant", instructions="Use the available tools to accomplish the given task", - mcp_servers=[server] # Provide the MCP server to the agent + mcp_servers=[server] ) - # Step 5: Run your agent with a complex task - # The agent will automatically have access to all MCP tools + # Run agent with a task complex_task = """Please help me analyze the following complex problem: We need to design a new feature for our product that balances user privacy with data collection for improving the service. Consider the ethical implications, @@ -57,11 +43,9 @@ async def main(): step by step, and provide a detailed recommendation with clear justification for each decision point.""" - # Execute the task and get the result result = await Runner.run(agent, complex_task) print("\nAgent Response:") print(result) if __name__ == "__main__": - # Run the async main function asyncio.run(main()) \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 5728ee9..038461b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "mcphub" -version = "0.1.8" +version = "0.1.9" description = "A Python package for managing and integrating Model Context Protocol (MCP) servers with AI frameworks like OpenAI Agents, LangChain, and Autogen" readme = "README.md" authors = [ diff --git a/src/mcphub/adapters/autogen.py b/src/mcphub/adapters/autogen.py index b1c2de1..fc96b4b 100644 --- a/src/mcphub/adapters/autogen.py +++ b/src/mcphub/adapters/autogen.py @@ -1,17 +1,25 @@ try: from typing import List - from autogen_ext.tools.mcp import StdioMcpToolAdapter + from autogen_ext.tools.mcp import StdioMcpToolAdapter, StdioServerParams from .base import MCPBaseAdapter class MCPAutogenAdapter(MCPBaseAdapter): async def create_adapters(self, mcp_name: str) -> List[StdioMcpToolAdapter]: server_params = self.get_server_params(mcp_name) + + autogen_mcp_server_params = StdioServerParams( + command=server_params.command, + args=server_params.args, + env=server_params.env, + cwd=server_params.cwd + ) + async with self.create_session(mcp_name) as session: tools = await session.list_tools() return [ - await StdioMcpToolAdapter.from_server_params(server_params, tool.name) + await StdioMcpToolAdapter.from_server_params(autogen_mcp_server_params, tool.name) for tool in tools.tools ]