Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 0 additions & 6 deletions src/litai/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,6 @@
"google/gemini-2.5-flash",
}

# this nmodels support reasoning_effort='none'
NONE_REASONING_MODELS = ["google/gemini-2.0-flash", "google/gemini-2.5-flash-lite-preview-06-17"]

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -301,10 +299,6 @@ def chat( # noqa: D417
"""
if reasoning_effort is not None and reasoning_effort not in ["none", "low", "medium", "high"]:
raise ValueError("reasoning_effort must be 'low', 'medium', 'high', or None")
if reasoning_effort is None and (
model in NONE_REASONING_MODELS or (self._model in NONE_REASONING_MODELS and model is None)
):
reasoning_effort = "none"

self._wait_for_model()
lit_tools = LitTool.convert_tools(tools)
Expand Down
43 changes: 0 additions & 43 deletions tests/test_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,49 +144,6 @@ def test_llm_chat(mock_llm_class):
mock_llm_instance.reset_conversation.assert_called_once()


@patch("litai.llm.SDKLLM")
def test_reasoning_effort_override(mock_llm_class):
"""Test LigtningLLM chat."""
from litai.llm import LLM as LLMCLIENT

LLMCLIENT._sdkllm_cache.clear()
mock_llm_instance = MagicMock()
mock_llm_instance.chat.return_value = "Hello! I am a helpful assistant."

mock_llm_class.return_value = mock_llm_instance

llm = LLM(model="google/gemini-2.0-flash")

response = llm.chat(
"Hello, who are you?",
system_prompt="You are a helpful assistant.",
metadata={"user_api": "123456"},
my_kwarg="test-kwarg",
reasoning_effort=None,
)

assert isinstance(response, str)
assert "helpful" in response.lower()
mock_llm_instance.chat.assert_called_once_with(
prompt="Hello, who are you?",
system_prompt="You are a helpful assistant.",
max_completion_tokens=None,
images=None,
conversation=None,
metadata={"user_api": "123456"},
stream=False,
full_response=False,
my_kwarg="test-kwarg",
tools=None,
reasoning_effort="none",
)
test_kwargs = mock_llm_instance.chat.call_args.kwargs
assert test_kwargs.get("my_kwarg") == "test-kwarg"

llm.reset_conversation("test")
mock_llm_instance.reset_conversation.assert_called_once()


def test_model_override(monkeypatch):
"""Test override model logic when main model fails."""
mock_llm = MagicMock()
Expand Down
Loading