Skip to content

Commit 0fe4427

Browse files
committed
feat: add basic tests
1 parent 6df3cc0 commit 0fe4427

File tree

8 files changed

+1644
-2
lines changed

8 files changed

+1644
-2
lines changed

libs/core/poetry.lock

Lines changed: 1219 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

libs/core/pyproject.toml

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,13 @@ uvicorn = "^0.27"
1515
openai = "^1.26"
1616
tiktoken = "^0.7"
1717

18+
[tool.poetry.group.dev.dependencies]
19+
pytest = "^7.4"
20+
pytest-asyncio = "^0"
21+
mypy = "^1.5"
22+
ruff = "^0.0.284"
23+
black = "^23.7"
24+
pre-commit = "^3.7"
1825

1926
[build-system]
2027
requires = ["poetry-core"]

libs/core/tests/unit_tests/__init__.py

Whitespace-only changes.
Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
import pytest
2+
from unittest.mock import AsyncMock, MagicMock
3+
from llmstudio_core.providers.provider import BaseProvider, ChatRequest, ProviderError
4+
5+
class MockProvider(BaseProvider):
6+
async def aparse_response(self, response, **kwargs):
7+
return response
8+
9+
def parse_response(self, response, **kwargs):
10+
return response
11+
12+
def chat(self, chat_input, model, **kwargs):
13+
# Mock the response to match expected structure
14+
return MagicMock(choices=[MagicMock(finish_reason="stop")])
15+
16+
async def achat(self, chat_input, model, **kwargs):
17+
# Mock the response to match expected structure
18+
return MagicMock(choices=[MagicMock(finish_reason="stop")])
19+
20+
def output_to_string(self, output):
21+
# Handle string inputs
22+
if isinstance(output, str):
23+
return output
24+
if output.choices[0].finish_reason == "stop":
25+
return output.choices[0].message.content
26+
return ""
27+
28+
@staticmethod
29+
def _provider_config_name():
30+
return "mock_provider"
31+
32+
@pytest.fixture
33+
def mock_provider():
34+
config = MagicMock()
35+
config.models = {"test_model": MagicMock(input_token_cost=0.01, output_token_cost=0.02)}
36+
config.id = "mock_provider"
37+
tokenizer = MagicMock()
38+
tokenizer.encode = lambda x: x.split() # Simple tokenizer mock
39+
return MockProvider(config=config, tokenizer=tokenizer)
Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
import pytest
2+
from unittest.mock import AsyncMock, MagicMock
3+
from llmstudio_core.providers.provider import BaseProvider, ChatRequest, ProviderError
4+
5+
request = ChatRequest(chat_input="Hello", model="test_model")
6+
7+
def test_chat(mock_provider):
8+
mock_provider.generate_client = MagicMock(return_value=MagicMock())
9+
mock_provider.handle_response = MagicMock(return_value=iter(["response"]))
10+
11+
print(request.model_dump())
12+
response = mock_provider.chat(request.chat_input, request.model)
13+
14+
assert response is not None
15+
16+
@pytest.mark.asyncio
17+
async def test_achat(mock_provider):
18+
mock_provider.agenerate_client = AsyncMock(return_value=AsyncMock())
19+
mock_provider.ahandle_response = AsyncMock(return_value=AsyncMock())
20+
21+
print(request.model_dump())
22+
response = await mock_provider.achat(request.chat_input, request.model)
23+
24+
assert response is not None
25+
26+
27+
def test_validate_model(mock_provider):
28+
request = ChatRequest(chat_input="Hello", model="test_model")
29+
mock_provider.validate_model(request) # Should not raise
30+
31+
request_invalid = ChatRequest(chat_input="Hello", model="invalid_model")
32+
with pytest.raises(ProviderError):
33+
mock_provider.validate_model(request_invalid)
34+
35+
def test_calculate_metrics(mock_provider):
36+
metrics = mock_provider.calculate_metrics(
37+
input="Hello",
38+
output="World",
39+
model="test_model",
40+
start_time=0,
41+
end_time=1,
42+
first_token_time=0.5,
43+
token_times=(0.1, 0.2),
44+
token_count=2
45+
)
46+
47+
assert metrics["input_tokens"] == pytest.approx(1)
48+
assert metrics["output_tokens"] == pytest.approx(1)
49+
assert metrics["cost_usd"] == pytest.approx(0.03)
50+
assert metrics["latency_s"] == pytest.approx(1)
51+
assert metrics["time_to_first_token_s"] == pytest.approx(0.5)
52+
assert metrics["inter_token_latency_s"] == pytest.approx(0.15)
53+
assert metrics["tokens_per_second"] == pytest.approx(2)

libs/llmstudio/poetry.lock

Lines changed: 312 additions & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

libs/llmstudio/pyproject.toml

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,17 @@ openai = "^1.26"
2323
tiktoken = "^0.7"
2424
python-dotenv = "^0"
2525
langchain-experimental = "^0.0"
26+
llmstudio-core = "^0"
27+
llmstudio-tracker = "^0"
28+
llmstudio-proxy = "^0"
29+
30+
31+
[tool.poetry.group.dev.dependencies]
32+
pytest = "^7.4"
33+
mypy = "^1.5"
34+
ruff = "^0.0.284"
35+
black = "^23.7"
36+
pre-commit = "^3.7"
2637
llmstudio-core = { path = "../core/", develop = true }
2738
llmstudio-tracker = { path = "../tracker/", develop = true }
2839
llmstudio-proxy = { path = "../proxy/", develop = true }

pyproject.toml

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,12 @@ keywords = ["ml", "ai", "llm", "llmops", "openai", "langchain", "chatgpt", "llms
1515

1616
[tool.poetry.dependencies]
1717
python = "^3.9"
18+
19+
[tool.poetry.group.dev.dependencies]
1820
llmstudio = { path = "libs/llmstudio/", develop = true }
1921
llmstudio-core = { path = "libs/core/", develop = true }
2022
llmstudio-tracker = { path = "libs/tracker/", develop = true }
21-
llmstudio-proxy = { path = "libs/proxy/", develop = true }
23+
langchain-proxy = { path = "libs/proxy/", develop = true }
2224

2325
[tool.poetry.scripts]
2426
llmstudio = "llmstudio.cli:main"

0 commit comments

Comments
 (0)