Skip to content

Commit 60bf106

Browse files
authored
Merge pull request #972 from UiPath/akshaya/use_io_schema
fix(UseOutputSchema): use custom outputschema for mocking
2 parents 4f1042c + 96af4a2 commit 60bf106

File tree

6 files changed

+242
-32
lines changed

6 files changed

+242
-32
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "uipath"
3-
version = "2.2.10"
3+
version = "2.2.11"
44
description = "Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools."
55
readme = { file = "README.md", content-type = "text/markdown" }
66
requires-python = ">=3.11"

src/uipath/_cli/_evals/mocks/llm_mocker.py

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import logging
55
from typing import Any, Callable
66

7-
from pydantic import BaseModel
7+
from pydantic import BaseModel, TypeAdapter
88

99
from uipath.tracing import traced
1010
from uipath.tracing._utils import _SpanUtils
@@ -105,15 +105,16 @@ async def response(
105105
if return_type is None:
106106
return_type = Any
107107

108-
class OutputSchema(BaseModel):
109-
response: return_type
108+
output_schema = params.get(
109+
"output_schema", TypeAdapter(return_type).json_schema()
110+
)
110111

111112
response_format = {
112113
"type": "json_schema",
113114
"json_schema": {
114-
"name": OutputSchema.__name__.lower(),
115+
"name": "OutputSchema",
115116
"strict": True,
116-
"schema": _cleanup_schema(OutputSchema),
117+
"schema": _cleanup_schema(output_schema),
117118
},
118119
}
119120
try:
@@ -199,10 +200,7 @@ class OutputSchema(BaseModel):
199200
response_format=response_format,
200201
**completion_kwargs,
201202
)
202-
mocked_response = OutputSchema(
203-
**json.loads(response.choices[0].message.content)
204-
)
205-
result = mocked_response.model_dump(mode="json")["response"]
203+
result = json.loads(response.choices[0].message.content)
206204

207205
if cache_manager is not None:
208206
cache_manager.set(

src/uipath/platform/llm_gateway/_llm_gateway_service.py

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -77,15 +77,15 @@ class EmbeddingModels(object):
7777
text_embedding_ada_002 = "text-embedding-ada-002"
7878

7979

80-
def _cleanup_schema(model_class: type[BaseModel]) -> dict[str, Any]:
81-
"""Clean up a Pydantic model schema for use with LLM Gateway.
80+
def _cleanup_schema(schema: dict[str, Any]) -> dict[str, Any]:
81+
"""Clean up a JSON schema for use with LLM Gateway.
8282
83-
This function converts a Pydantic model's JSON schema to a format that's
83+
This function converts a JSON schema to a format that's
8484
compatible with the LLM Gateway's JSON schema requirements by removing
8585
titles and other metadata that might cause validation issues.
8686
8787
Args:
88-
model_class (type[BaseModel]): A Pydantic BaseModel class to convert to schema.
88+
schema (dict[str, Any]): an input JSON schema.
8989
9090
Returns:
9191
dict: A cleaned JSON schema dictionary suitable for LLM Gateway response_format.
@@ -99,11 +99,10 @@ class Country(BaseModel):
9999
capital: str
100100
languages: list[str]
101101
102-
schema = _cleanup_schema(Country)
102+
schema = _cleanup_schema(Country.model_json_schema())
103103
# Returns a clean schema without titles and unnecessary metadata
104104
```
105105
"""
106-
schema = model_class.model_json_schema()
107106

108107
def clean_type(type_def):
109108
"""Clean property definitions by removing titles and cleaning nested items. Additionally, `additionalProperties` is ensured on all objects."""
@@ -303,7 +302,7 @@ class Country(BaseModel):
303302
response_format, BaseModel
304303
):
305304
# Convert Pydantic model to JSON schema format
306-
cleaned_schema = _cleanup_schema(response_format)
305+
cleaned_schema = _cleanup_schema(response_format.model_json_schema())
307306
request_body["response_format"] = {
308307
"type": "json_schema",
309308
"json_schema": {
@@ -511,7 +510,7 @@ class Country(BaseModel):
511510
response_format, BaseModel
512511
):
513512
# Convert Pydantic model to JSON schema format
514-
cleaned_schema = _cleanup_schema(response_format)
513+
cleaned_schema = _cleanup_schema(response_format.model_json_schema())
515514
request_body["response_format"] = {
516515
"type": "json_schema",
517516
"json_schema": {

tests/cli/eval/mocks/test_mocks.py

Lines changed: 217 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,10 @@
1+
import json
12
from typing import Any
23
from unittest.mock import MagicMock
34

45
import pytest
56
from _pytest.monkeypatch import MonkeyPatch
7+
from pydantic import BaseModel
68
from pytest_httpx import HTTPXMock
79

810
from uipath._cli._evals._models._evaluation_set import (
@@ -142,7 +144,7 @@ def test_llm_mockable_sync(httpx_mock: HTTPXMock, monkeypatch: MonkeyPatch):
142144

143145
# Arrange
144146
@mockable()
145-
def foo(*args, **kwargs):
147+
def foo(*args, **kwargs) -> str:
146148
raise NotImplementedError()
147149

148150
@mockable()
@@ -189,7 +191,7 @@ def foofoo(*args, **kwargs):
189191
"index": 0,
190192
"message": {
191193
"role": "ai",
192-
"content": '{"response": "bar1"}',
194+
"content": '"bar1"',
193195
"tool_calls": None,
194196
},
195197
"finish_reason": "EOS",
@@ -206,6 +208,19 @@ def foofoo(*args, **kwargs):
206208
set_execution_context(evaluation, _mock_span_collector, "test-execution-id")
207209

208210
assert foo() == "bar1"
211+
212+
mock_request = httpx_mock.get_request()
213+
assert mock_request
214+
request = json.loads(mock_request.content.decode("utf-8"))
215+
assert request["response_format"] == {
216+
"type": "json_schema",
217+
"json_schema": {
218+
"name": "OutputSchema",
219+
"strict": True,
220+
"schema": {"type": "string"},
221+
},
222+
}
223+
209224
with pytest.raises(NotImplementedError):
210225
assert foofoo()
211226
httpx_mock.add_response(
@@ -228,7 +243,7 @@ async def test_llm_mockable_async(httpx_mock: HTTPXMock, monkeypatch: MonkeyPatc
228243

229244
# Arrange
230245
@mockable()
231-
async def foo(*args, **kwargs):
246+
async def foo(*args, **kwargs) -> str:
232247
raise NotImplementedError()
233248

234249
@mockable()
@@ -277,7 +292,7 @@ async def foofoo(*args, **kwargs):
277292
"index": 0,
278293
"message": {
279294
"role": "ai",
280-
"content": '{"response": "bar1"}',
295+
"content": '"bar1"',
281296
"tool_calls": None,
282297
},
283298
"finish_reason": "EOS",
@@ -294,6 +309,19 @@ async def foofoo(*args, **kwargs):
294309
set_execution_context(evaluation, _mock_span_collector, "test-execution-id")
295310

296311
assert await foo() == "bar1"
312+
313+
mock_request = httpx_mock.get_request()
314+
assert mock_request
315+
request = json.loads(mock_request.content.decode("utf-8"))
316+
assert request["response_format"] == {
317+
"type": "json_schema",
318+
"json_schema": {
319+
"name": "OutputSchema",
320+
"strict": True,
321+
"schema": {"type": "string"},
322+
},
323+
}
324+
297325
with pytest.raises(NotImplementedError):
298326
assert await foofoo()
299327

@@ -305,3 +333,188 @@ async def foofoo(*args, **kwargs):
305333
)
306334
with pytest.raises(UiPathMockResponseGenerationError):
307335
assert await foo()
336+
337+
338+
@pytest.mark.httpx_mock(assert_all_responses_were_requested=False)
339+
def test_llm_mockable_with_output_schema_sync(
340+
httpx_mock: HTTPXMock, monkeypatch: MonkeyPatch
341+
):
342+
monkeypatch.setenv("UIPATH_URL", "https://example.com")
343+
monkeypatch.setenv("UIPATH_ACCESS_TOKEN", "1234567890")
344+
monkeypatch.setattr(CacheManager, "get", lambda *args, **kwargs: None)
345+
monkeypatch.setattr(CacheManager, "set", lambda *args, **kwargs: None)
346+
347+
class ToolResponseMock(BaseModel):
348+
content: str
349+
350+
# Arrange
351+
@mockable(output_schema=ToolResponseMock.model_json_schema())
352+
def foo(*args, **kwargs) -> dict[str, Any]:
353+
raise NotImplementedError()
354+
355+
evaluation_item: dict[str, Any] = {
356+
"id": "evaluation-id",
357+
"name": "Mock foo",
358+
"inputs": {},
359+
"evaluationCriterias": {
360+
"ExactMatchEvaluator": None,
361+
},
362+
"mockingStrategy": {
363+
"type": "llm",
364+
"prompt": "response content is 'bar1'",
365+
"toolsToSimulate": [{"name": "foo"}],
366+
},
367+
}
368+
evaluation = EvaluationItem(**evaluation_item)
369+
assert isinstance(evaluation.mocking_strategy, LLMMockingStrategy)
370+
httpx_mock.add_response(
371+
url="https://example.com/agenthub_/llm/api/capabilities",
372+
status_code=200,
373+
json={},
374+
)
375+
httpx_mock.add_response(
376+
url="https://example.com/orchestrator_/llm/api/capabilities",
377+
status_code=200,
378+
json={},
379+
)
380+
381+
httpx_mock.add_response(
382+
url="https://example.com/llm/api/chat/completions"
383+
"?api-version=2024-08-01-preview",
384+
status_code=200,
385+
json={
386+
"id": "response-id",
387+
"object": "",
388+
"created": 0,
389+
"model": "model",
390+
"choices": [
391+
{
392+
"index": 0,
393+
"message": {
394+
"role": "ai",
395+
"content": '{"content": "bar1"}',
396+
"tool_calls": None,
397+
},
398+
"finish_reason": "EOS",
399+
}
400+
],
401+
"usage": {
402+
"prompt_tokens": 1,
403+
"completion_tokens": 1,
404+
"total_tokens": 2,
405+
},
406+
},
407+
)
408+
# Act & Assert
409+
set_execution_context(evaluation, _mock_span_collector, "test-execution-id")
410+
411+
assert foo() == {"content": "bar1"}
412+
mock_request = httpx_mock.get_request()
413+
assert mock_request
414+
request = json.loads(mock_request.content.decode("utf-8"))
415+
assert request["response_format"] == {
416+
"type": "json_schema",
417+
"json_schema": {
418+
"name": "OutputSchema",
419+
"strict": True,
420+
"schema": {
421+
"required": ["content"],
422+
"type": "object",
423+
"additionalProperties": False,
424+
"properties": {"content": {"type": "string"}},
425+
},
426+
},
427+
}
428+
429+
430+
@pytest.mark.asyncio
431+
@pytest.mark.httpx_mock(assert_all_responses_were_requested=False)
432+
async def test_llm_mockable_with_output_schema_async(
433+
httpx_mock: HTTPXMock, monkeypatch: MonkeyPatch
434+
):
435+
monkeypatch.setenv("UIPATH_URL", "https://example.com")
436+
monkeypatch.setenv("UIPATH_ACCESS_TOKEN", "1234567890")
437+
monkeypatch.setattr(CacheManager, "get", lambda *args, **kwargs: None)
438+
monkeypatch.setattr(CacheManager, "set", lambda *args, **kwargs: None)
439+
440+
class ToolResponseMock(BaseModel):
441+
content: str
442+
443+
# Arrange
444+
@mockable(output_schema=ToolResponseMock.model_json_schema())
445+
async def foo(*args, **kwargs) -> dict[str, Any]:
446+
raise NotImplementedError()
447+
448+
evaluation_item: dict[str, Any] = {
449+
"id": "evaluation-id",
450+
"name": "Mock foo",
451+
"inputs": {},
452+
"evaluationCriterias": {
453+
"ExactMatchEvaluator": None,
454+
},
455+
"mockingStrategy": {
456+
"type": "llm",
457+
"prompt": "response content is 'bar1'",
458+
"toolsToSimulate": [{"name": "foo"}],
459+
},
460+
}
461+
evaluation = EvaluationItem(**evaluation_item)
462+
assert isinstance(evaluation.mocking_strategy, LLMMockingStrategy)
463+
httpx_mock.add_response(
464+
url="https://example.com/agenthub_/llm/api/capabilities",
465+
status_code=200,
466+
json={},
467+
)
468+
httpx_mock.add_response(
469+
url="https://example.com/orchestrator_/llm/api/capabilities",
470+
status_code=200,
471+
json={},
472+
)
473+
474+
httpx_mock.add_response(
475+
url="https://example.com/llm/api/chat/completions"
476+
"?api-version=2024-08-01-preview",
477+
status_code=200,
478+
json={
479+
"id": "response-id",
480+
"object": "",
481+
"created": 0,
482+
"model": "model",
483+
"choices": [
484+
{
485+
"index": 0,
486+
"message": {
487+
"role": "ai",
488+
"content": '{"content": "bar1"}',
489+
"tool_calls": None,
490+
},
491+
"finish_reason": "EOS",
492+
}
493+
],
494+
"usage": {
495+
"prompt_tokens": 1,
496+
"completion_tokens": 1,
497+
"total_tokens": 2,
498+
},
499+
},
500+
)
501+
# Act & Assert
502+
set_execution_context(evaluation, _mock_span_collector, "test-execution-id")
503+
504+
assert await foo() == {"content": "bar1"}
505+
mock_request = httpx_mock.get_request()
506+
assert mock_request
507+
request = json.loads(mock_request.content.decode("utf-8"))
508+
assert request["response_format"] == {
509+
"type": "json_schema",
510+
"json_schema": {
511+
"name": "OutputSchema",
512+
"strict": True,
513+
"schema": {
514+
"required": ["content"],
515+
"type": "object",
516+
"additionalProperties": False,
517+
"properties": {"content": {"type": "string"}},
518+
},
519+
},
520+
}

0 commit comments

Comments
 (0)