Skip to content

Commit 1854f73

Browse files
authored
fix output (#68)
1 parent bef00d1 commit 1854f73

File tree

2 files changed

+0
-49
lines changed

2 files changed

+0
-49
lines changed

src/litai/llm.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,6 @@
4141
"google/gemini-2.5-flash",
4242
}
4343

44-
# this nmodels support reasoning_effort='none'
45-
NONE_REASONING_MODELS = ["google/gemini-2.0-flash", "google/gemini-2.5-flash-lite-preview-06-17"]
4644

4745
logger = logging.getLogger(__name__)
4846

@@ -301,10 +299,6 @@ def chat( # noqa: D417
301299
"""
302300
if reasoning_effort is not None and reasoning_effort not in ["none", "low", "medium", "high"]:
303301
raise ValueError("reasoning_effort must be 'low', 'medium', 'high', or None")
304-
if reasoning_effort is None and (
305-
model in NONE_REASONING_MODELS or (self._model in NONE_REASONING_MODELS and model is None)
306-
):
307-
reasoning_effort = "none"
308302

309303
self._wait_for_model()
310304
lit_tools = LitTool.convert_tools(tools)

tests/test_llm.py

Lines changed: 0 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -144,49 +144,6 @@ def test_llm_chat(mock_llm_class):
144144
mock_llm_instance.reset_conversation.assert_called_once()
145145

146146

147-
@patch("litai.llm.SDKLLM")
148-
def test_reasoning_effort_override(mock_llm_class):
149-
"""Test LigtningLLM chat."""
150-
from litai.llm import LLM as LLMCLIENT
151-
152-
LLMCLIENT._sdkllm_cache.clear()
153-
mock_llm_instance = MagicMock()
154-
mock_llm_instance.chat.return_value = "Hello! I am a helpful assistant."
155-
156-
mock_llm_class.return_value = mock_llm_instance
157-
158-
llm = LLM(model="google/gemini-2.0-flash")
159-
160-
response = llm.chat(
161-
"Hello, who are you?",
162-
system_prompt="You are a helpful assistant.",
163-
metadata={"user_api": "123456"},
164-
my_kwarg="test-kwarg",
165-
reasoning_effort=None,
166-
)
167-
168-
assert isinstance(response, str)
169-
assert "helpful" in response.lower()
170-
mock_llm_instance.chat.assert_called_once_with(
171-
prompt="Hello, who are you?",
172-
system_prompt="You are a helpful assistant.",
173-
max_completion_tokens=None,
174-
images=None,
175-
conversation=None,
176-
metadata={"user_api": "123456"},
177-
stream=False,
178-
full_response=False,
179-
my_kwarg="test-kwarg",
180-
tools=None,
181-
reasoning_effort="none",
182-
)
183-
test_kwargs = mock_llm_instance.chat.call_args.kwargs
184-
assert test_kwargs.get("my_kwarg") == "test-kwarg"
185-
186-
llm.reset_conversation("test")
187-
mock_llm_instance.reset_conversation.assert_called_once()
188-
189-
190147
def test_model_override(monkeypatch):
191148
"""Test override model logic when main model fails."""
192149
mock_llm = MagicMock()

0 commit comments

Comments
 (0)