Skip to content
This repository was archived by the owner on Oct 17, 2025. It is now read-only.

Commit 57f2139

Browse files
committed
realserverfaketools, port handling cleanup
1 parent 7001d1b commit 57f2139

File tree

11 files changed

+1236
-11
lines changed

11 files changed

+1236
-11
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -264,7 +264,7 @@ Cursor should now be able to talk to the MCP Gateway and you should be able to u
264264

265265
### Steps to add a new MCP server to the Gateway and Registry
266266

267-
1. Option 1 (_recommended_): Use `Cursor` or your favorite MCP host of choice that supports SSE to add the MCP Gateway as a server as an MCP server and then simple ask it in naturla language to register a new MCP server and follow the prompts.
267+
1. Option 1 (_recommended_): Use `Cursor` or your favorite MCP host of choice that supports SSE to add the MCP Gateway as a server as an MCP server and then simple ask it in natural language to register a new MCP server and follow the prompts.
268268

269269
1. Option 2: Use `/register` API (first call the `/login` API and get the secure cookie value), see steps in the [API endpoints](#api-endpoints-brief-overview) section. Note the value for the `mcp_gateway_session` cookie from the `/login` API and then use it in `/register` API.
270270
```bash
Lines changed: 327 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,327 @@
1+
{
2+
"server_name": "Real Server Fake Tools",
3+
"description": "A collection of fake tools with interesting names that take different parameter types",
4+
"path": "/realserverfaketools",
5+
"proxy_pass_url": "http://localhost:8004/",
6+
"tags": ["demo", "fake", "tools", "testing"],
7+
"num_tools": 6,
8+
"num_stars": 0,
9+
"is_python": true,
10+
"license": "MIT",
11+
"tool_list": [
12+
{
13+
"name": "quantum_flux_analyzer",
14+
"parsed_description": {
15+
"main": "Analyzes quantum flux patterns with configurable energy levels and stabilization.",
16+
"args": "energy_level: Energy level for quantum analysis (1-10), stabilization_factor: Stabilization factor for quantum flux, enable_temporal_shift: Whether to enable temporal shifting in the analysis",
17+
"returns": "str: JSON response with mock quantum flux analysis results",
18+
"raises": ""
19+
},
20+
"schema": {
21+
"properties": {
22+
"energy_level": {
23+
"default": 5,
24+
"description": "Energy level for quantum analysis (1-10)",
25+
"maximum": 10,
26+
"minimum": 1,
27+
"title": "Energy Level",
28+
"type": "integer"
29+
},
30+
"stabilization_factor": {
31+
"default": 0.75,
32+
"description": "Stabilization factor for quantum flux",
33+
"title": "Stabilization Factor",
34+
"type": "number"
35+
},
36+
"enable_temporal_shift": {
37+
"default": false,
38+
"description": "Whether to enable temporal shifting in the analysis",
39+
"title": "Enable Temporal Shift",
40+
"type": "boolean"
41+
}
42+
},
43+
"title": "quantum_flux_analyzerArguments",
44+
"type": "object"
45+
}
46+
},
47+
{
48+
"name": "neural_pattern_synthesizer",
49+
"parsed_description": {
50+
"main": "Synthesizes neural patterns into coherent structures.",
51+
"args": "input_patterns: List of neural patterns to synthesize, coherence_threshold: Threshold for pattern coherence (0.0-1.0), dimensions: Number of dimensions for synthesis (1-10)",
52+
"returns": "Dict[str, Any]: Dictionary with mock neural pattern synthesis results",
53+
"raises": ""
54+
},
55+
"schema": {
56+
"properties": {
57+
"input_patterns": {
58+
"description": "List of neural patterns to synthesize",
59+
"items": {
60+
"type": "string"
61+
},
62+
"title": "Input Patterns",
63+
"type": "array"
64+
},
65+
"coherence_threshold": {
66+
"default": 0.7,
67+
"description": "Threshold for pattern coherence (0.0-1.0)",
68+
"maximum": 1.0,
69+
"minimum": 0.0,
70+
"title": "Coherence Threshold",
71+
"type": "number"
72+
},
73+
"dimensions": {
74+
"default": 3,
75+
"description": "Number of dimensions for synthesis (1-10)",
76+
"maximum": 10,
77+
"minimum": 1,
78+
"title": "Dimensions",
79+
"type": "integer"
80+
}
81+
},
82+
"required": [
83+
"input_patterns"
84+
],
85+
"title": "neural_pattern_synthesizerArguments",
86+
"type": "object"
87+
}
88+
},
89+
{
90+
"name": "hyper_dimensional_mapper",
91+
"parsed_description": {
92+
"main": "Maps geographical coordinates to hyper-dimensional space.",
93+
"args": "coordinates: Geographical coordinates to map, dimension_count: Number of hyper-dimensions to map to (4-11), reality_anchoring: Reality anchoring factor (0.1-1.0)",
94+
"returns": "str: JSON response with mock hyper-dimensional mapping results",
95+
"raises": ""
96+
},
97+
"schema": {
98+
"$defs": {
99+
"GeoCoordinates": {
100+
"properties": {
101+
"latitude": {
102+
"description": "Latitude coordinate",
103+
"title": "Latitude",
104+
"type": "number"
105+
},
106+
"longitude": {
107+
"description": "Longitude coordinate",
108+
"title": "Longitude",
109+
"type": "number"
110+
},
111+
"altitude": {
112+
"description": "Altitude in meters (optional)",
113+
"title": "Altitude",
114+
"type": ["number", "null"]
115+
}
116+
},
117+
"required": [
118+
"latitude",
119+
"longitude"
120+
],
121+
"title": "GeoCoordinates",
122+
"type": "object"
123+
}
124+
},
125+
"properties": {
126+
"coordinates": {
127+
"$ref": "#/$defs/GeoCoordinates",
128+
"description": "Geographical coordinates to map to hyper-dimensions"
129+
},
130+
"dimension_count": {
131+
"default": 5,
132+
"description": "Number of hyper-dimensions to map to (4-11)",
133+
"maximum": 11,
134+
"minimum": 4,
135+
"title": "Dimension Count",
136+
"type": "integer"
137+
},
138+
"reality_anchoring": {
139+
"default": 0.8,
140+
"description": "Reality anchoring factor (0.1-1.0)",
141+
"maximum": 1.0,
142+
"minimum": 0.1,
143+
"title": "Reality Anchoring",
144+
"type": "number"
145+
}
146+
},
147+
"required": [
148+
"coordinates"
149+
],
150+
"title": "hyper_dimensional_mapperArguments",
151+
"type": "object"
152+
}
153+
},
154+
{
155+
"name": "temporal_anomaly_detector",
156+
"parsed_description": {
157+
"main": "Detects temporal anomalies within a specified timeframe.",
158+
"args": "timeframe: Dictionary with 'start' and 'end' times for anomaly detection, sensitivity: Sensitivity level for detection (1-10), anomaly_types: Types of anomalies to detect",
159+
"returns": "Dict[str, Any]: Dictionary with mock temporal anomaly detection results",
160+
"raises": ""
161+
},
162+
"schema": {
163+
"properties": {
164+
"timeframe": {
165+
"description": "Start and end times for anomaly detection",
166+
"properties": {
167+
"start": {
168+
"type": "string"
169+
},
170+
"end": {
171+
"type": "string"
172+
}
173+
},
174+
"required": ["start", "end"],
175+
"title": "Timeframe",
176+
"type": "object"
177+
},
178+
"sensitivity": {
179+
"default": 7,
180+
"description": "Sensitivity level for detection (1-10)",
181+
"maximum": 10,
182+
"minimum": 1,
183+
"title": "Sensitivity",
184+
"type": "integer"
185+
},
186+
"anomaly_types": {
187+
"default": ["temporal_shift", "causal_loop", "timeline_divergence"],
188+
"description": "Types of anomalies to detect",
189+
"items": {
190+
"type": "string"
191+
},
192+
"title": "Anomaly Types",
193+
"type": "array"
194+
}
195+
},
196+
"required": [
197+
"timeframe"
198+
],
199+
"title": "temporal_anomaly_detectorArguments",
200+
"type": "object"
201+
}
202+
},
203+
{
204+
"name": "user_profile_analyzer",
205+
"parsed_description": {
206+
"main": "Analyzes a user profile with configurable analysis options.",
207+
"args": "profile: User profile to analyze, analysis_options: Options for the analysis",
208+
"returns": "str: JSON response with mock user profile analysis results",
209+
"raises": ""
210+
},
211+
"schema": {
212+
"$defs": {
213+
"UserProfile": {
214+
"properties": {
215+
"username": {
216+
"description": "User's username",
217+
"title": "Username",
218+
"type": "string"
219+
},
220+
"email": {
221+
"description": "User's email address",
222+
"title": "Email",
223+
"type": "string"
224+
},
225+
"age": {
226+
"description": "User's age (optional)",
227+
"title": "Age",
228+
"type": ["integer", "null"]
229+
},
230+
"interests": {
231+
"default": [],
232+
"description": "List of user interests",
233+
"items": {
234+
"type": "string"
235+
},
236+
"title": "Interests",
237+
"type": "array"
238+
}
239+
},
240+
"required": [
241+
"username",
242+
"email"
243+
],
244+
"title": "UserProfile",
245+
"type": "object"
246+
},
247+
"AnalysisOptions": {
248+
"properties": {
249+
"depth": {
250+
"default": 3,
251+
"description": "Depth of analysis (1-10)",
252+
"title": "Depth",
253+
"type": "integer"
254+
},
255+
"include_metadata": {
256+
"default": true,
257+
"description": "Whether to include metadata",
258+
"title": "Include Metadata",
259+
"type": "boolean"
260+
},
261+
"filters": {
262+
"default": {},
263+
"description": "Filters to apply",
264+
"title": "Filters",
265+
"type": "object"
266+
}
267+
},
268+
"title": "AnalysisOptions",
269+
"type": "object"
270+
}
271+
},
272+
"properties": {
273+
"profile": {
274+
"$ref": "#/$defs/UserProfile",
275+
"description": "User profile to analyze"
276+
},
277+
"analysis_options": {
278+
"$ref": "#/$defs/AnalysisOptions",
279+
"default": {},
280+
"description": "Options for the analysis"
281+
}
282+
},
283+
"required": [
284+
"profile"
285+
],
286+
"title": "user_profile_analyzerArguments",
287+
"type": "object"
288+
}
289+
},
290+
{
291+
"name": "synthetic_data_generator",
292+
"parsed_description": {
293+
"main": "Generates synthetic data based on a provided schema.",
294+
"args": "schema: Schema defining the structure of synthetic data, record_count: Number of synthetic records to generate (1-1000), seed: Random seed for reproducibility (optional)",
295+
"returns": "Dict[str, Any]: Dictionary with mock synthetic data generation results",
296+
"raises": ""
297+
},
298+
"schema": {
299+
"properties": {
300+
"schema": {
301+
"description": "Schema defining the structure of synthetic data",
302+
"title": "Schema",
303+
"type": "object"
304+
},
305+
"record_count": {
306+
"default": 10,
307+
"description": "Number of synthetic records to generate (1-1000)",
308+
"maximum": 1000,
309+
"minimum": 1,
310+
"title": "Record Count",
311+
"type": "integer"
312+
},
313+
"seed": {
314+
"description": "Random seed for reproducibility (optional)",
315+
"title": "Seed",
316+
"type": ["integer", "null"]
317+
}
318+
},
319+
"required": [
320+
"schema"
321+
],
322+
"title": "synthetic_data_generatorArguments",
323+
"type": "object"
324+
}
325+
}
326+
]
327+
}

servers/current_time/client.py renamed to servers/currenttime/client.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ async def run(server_url, args):
6060
tz_name = "Africa/Cairo"
6161
logger.info(f"\nCalling current_time_by_timezone tool with tz_name={tz_name}")
6262
result = await session.call_tool(
63-
"current_time_by_timezone", arguments={"params": {"tz_name": tz_name}}
63+
"current_time_by_timezone", arguments={"tz_name": tz_name}
6464
)
6565

6666
# Display the results
@@ -81,7 +81,12 @@ async def run(server_url, args):
8181
"--host", type=str, default="localhost", help="Hostname of the MCP server"
8282
)
8383
parser.add_argument("--port", type=int, default=8000, help="Port of the MCP server")
84-
84+
parser.add_argument(
85+
"--server-name",
86+
type=str,
87+
default="currenttime", # Default server name changed
88+
help='Name of the MCP server to connect to (e.g., "mcpgw")',
89+
)
8590
# Parse the arguments
8691
args = parser.parse_args()
8792

@@ -91,7 +96,7 @@ async def run(server_url, args):
9196
# Automatically turn to https if port is 443
9297
if args.port == 443:
9398
secure = "s"
94-
server_url = f"http{secure}://{args.host}:{args.port}/sse"
99+
server_url = f"http{secure}://{args.host}:{args.port}/{args.server_name}/sse"
95100

96101
# Run the async main function
97102
import asyncio
File renamed without changes.
File renamed without changes.
File renamed without changes.

0 commit comments

Comments
 (0)