diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d1980dd..3584e65 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -58,3 +58,56 @@ jobs: - name: Run tests run: make -C packages/sdk/server-ai test + + server-ai-langchain-linux: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] + + steps: + - uses: actions/checkout@v4 + + - uses: ./.github/actions/ci + with: + workspace_path: packages/ai-providers/server-ai-langchain + python_version: ${{ matrix.python-version }} + + - uses: ./.github/actions/build + with: + workspace_path: packages/ai-providers/server-ai-langchain + + server-ai-langchain-windows: + runs-on: windows-latest + defaults: + run: + shell: powershell + + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install poetry + uses: abatilo/actions-poetry@7b6d33e44b4f08d7021a1dee3c044e9c253d6439 + + - name: Configure poetry for local virtualenvs + run: poetry config virtualenvs.in-project true + + - name: Install server-ai dependency first + working-directory: packages/sdk/server-ai + run: poetry install + + - name: Install requirements + working-directory: packages/ai-providers/server-ai-langchain + run: poetry install + + - name: Run tests + run: make -C packages/ai-providers/server-ai-langchain test diff --git a/packages/ai-providers/server-ai-langchain/Makefile b/packages/ai-providers/server-ai-langchain/Makefile index 2486d17..ca02807 100644 --- a/packages/ai-providers/server-ai-langchain/Makefile +++ b/packages/ai-providers/server-ai-langchain/Makefile @@ -20,6 +20,7 @@ test: install lint: #! Run type analysis and linting checks lint: install poetry run mypy src/ldai_langchain + poetry run isort --check --atomic src/ldai_langchain poetry run pycodestyle src/ldai_langchain .PHONY: build diff --git a/packages/ai-providers/server-ai-langchain/README.md b/packages/ai-providers/server-ai-langchain/README.md index f612c63..736f7ea 100644 --- a/packages/ai-providers/server-ai-langchain/README.md +++ b/packages/ai-providers/server-ai-langchain/README.md @@ -1,10 +1,8 @@ # LaunchDarkly AI SDK - LangChain Provider -This package provides LangChain integration for the LaunchDarkly Server-Side AI SDK. +[![PyPI](https://img.shields.io/pypi/v/launchdarkly-server-sdk-ai-langchain.svg)](https://pypi.org/project/launchdarkly-server-sdk-ai-langchain/) -## Status - -🚧 **Coming Soon** - This package is a placeholder for future LangChain integration. +This package provides LangChain integration for the LaunchDarkly Server-Side AI SDK, allowing you to use LangChain models and chains with LaunchDarkly's tracking and configuration capabilities. ## Installation @@ -12,12 +10,186 @@ This package provides LangChain integration for the LaunchDarkly Server-Side AI pip install launchdarkly-server-sdk-ai-langchain ``` +You'll also need to install the LangChain provider packages for the models you want to use: + +```bash +# For OpenAI +pip install langchain-openai + +# For Anthropic +pip install langchain-anthropic + +# For Google +pip install langchain-google-genai +``` + +## Quick Start + +```python +import asyncio +from ldclient import LDClient, Config, Context +from ldai import init +from ldai_langchain import LangChainProvider + +# Initialize LaunchDarkly client +ld_client = LDClient(Config("your-sdk-key")) +ai_client = init(ld_client) + +# Get AI configuration +context = Context.builder("user-123").build() +config = ai_client.config("ai-config-key", context, {}) + +async def main(): + # Create a LangChain provider from the AI configuration + provider = await LangChainProvider.create(config) + + # Use the provider to invoke the model + from ldai.models import LDMessage + messages = [ + LDMessage(role="system", content="You are a helpful assistant."), + LDMessage(role="user", content="Hello, how are you?"), + ] + + response = await provider.invoke_model(messages) + print(response.message.content) + +asyncio.run(main()) +``` + ## Usage +### Using LangChainProvider with the Create Factory + +The simplest way to use the LangChain provider is with the static `create` factory method, which automatically creates the appropriate LangChain model based on your LaunchDarkly AI configuration: + ```python -# Coming soon +from ldai_langchain import LangChainProvider + +# Create provider from AI configuration +provider = await LangChainProvider.create(ai_config) + +# Invoke the model +response = await provider.invoke_model(messages) ``` +### Using an Existing LangChain Model + +If you already have a LangChain model configured, you can use it directly: + +```python +from langchain_openai import ChatOpenAI +from ldai_langchain import LangChainProvider + +# Create your own LangChain model +llm = ChatOpenAI(model="gpt-4", temperature=0.7) + +# Wrap it with LangChainProvider +provider = LangChainProvider(llm) + +# Use with LaunchDarkly tracking +response = await provider.invoke_model(messages) +``` + +### Structured Output + +The provider supports structured output using LangChain's `with_structured_output`: + +```python +response_structure = { + "type": "object", + "properties": { + "sentiment": {"type": "string", "enum": ["positive", "negative", "neutral"]}, + "confidence": {"type": "number"}, + }, + "required": ["sentiment", "confidence"], +} + +result = await provider.invoke_structured_model(messages, response_structure) +print(result.data) # {"sentiment": "positive", "confidence": 0.95} +``` + +### Tracking Metrics + +Use the provider with LaunchDarkly's tracking capabilities: + +```python +# Get the AI config with tracker +config = ai_client.config("ai-config-key", context, {}) + +# Create provider +provider = await LangChainProvider.create(config) + +# Track metrics automatically +async def invoke(): + return await provider.invoke_model(messages) + +response = await config.tracker.track_metrics_of( + invoke, + lambda r: r.metrics +) +``` + +### Static Utility Methods + +The `LangChainProvider` class provides several utility methods: + +#### Converting Messages + +```python +from ldai.models import LDMessage +from ldai_langchain import LangChainProvider + +messages = [ + LDMessage(role="system", content="You are helpful."), + LDMessage(role="user", content="Hello!"), +] + +# Convert to LangChain messages +langchain_messages = LangChainProvider.convert_messages_to_langchain(messages) +``` + +#### Extracting Metrics + +```python +from ldai_langchain import LangChainProvider + +# After getting a response from LangChain +metrics = LangChainProvider.get_ai_metrics_from_response(ai_message) +print(f"Success: {metrics.success}") +print(f"Tokens used: {metrics.usage.total if metrics.usage else 'N/A'}") +``` + +#### Provider Name Mapping + +```python +# Map LaunchDarkly provider names to LangChain provider names +langchain_provider = LangChainProvider.map_provider("gemini") # Returns "google-genai" +``` + +## API Reference + +### LangChainProvider + +#### Constructor + +```python +LangChainProvider(llm: BaseChatModel, logger: Optional[Any] = None) +``` + +#### Static Methods + +- `create(ai_config: AIConfigKind, logger: Optional[Any] = None) -> LangChainProvider` - Factory method to create a provider from AI configuration +- `convert_messages_to_langchain(messages: List[LDMessage]) -> List[BaseMessage]` - Convert LaunchDarkly messages to LangChain messages +- `get_ai_metrics_from_response(response: AIMessage) -> LDAIMetrics` - Extract metrics from a LangChain response +- `map_provider(ld_provider_name: str) -> str` - Map LaunchDarkly provider names to LangChain names +- `create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel` - Create a LangChain model from AI configuration + +#### Instance Methods + +- `invoke_model(messages: List[LDMessage]) -> ChatResponse` - Invoke the model with messages +- `invoke_structured_model(messages: List[LDMessage], response_structure: Dict[str, Any]) -> StructuredResponse` - Invoke with structured output +- `get_chat_model() -> BaseChatModel` - Get the underlying LangChain model + ## Documentation For full documentation, please refer to the [LaunchDarkly AI SDK documentation](https://docs.launchdarkly.com/sdk/ai/python). diff --git a/packages/ai-providers/server-ai-langchain/pyproject.toml b/packages/ai-providers/server-ai-langchain/pyproject.toml index db792e1..0517f03 100644 --- a/packages/ai-providers/server-ai-langchain/pyproject.toml +++ b/packages/ai-providers/server-ai-langchain/pyproject.toml @@ -24,15 +24,17 @@ packages = [{ include = "ldai_langchain", from = "src" }] [tool.poetry.dependencies] python = ">=3.9,<4" -launchdarkly-server-sdk-ai = ">=0.10.1" -# langchain-core = ">=0.1.0" # Uncomment when implementing - +launchdarkly-server-sdk-ai = ">=0.11.0" +langchain-core = ">=0.2.0" +langchain = ">=0.2.0" [tool.poetry.group.dev.dependencies] pytest = ">=2.8" pytest-cov = ">=2.4.0" pytest-asyncio = ">=0.21.0" mypy = "==1.18.2" +pycodestyle = ">=2.11.0" +isort = ">=5.12.0" [tool.mypy] python_version = "3.9" @@ -40,10 +42,16 @@ ignore_missing_imports = true install_types = true non_interactive = true +[tool.isort] +profile = "black" +known_third_party = ["langchain", "langchain_core", "ldai"] +sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"] + [tool.pytest.ini_options] addopts = ["-ra"] testpaths = ["tests"] +asyncio_mode = "auto" [build-system] diff --git a/packages/ai-providers/server-ai-langchain/setup.cfg b/packages/ai-providers/server-ai-langchain/setup.cfg new file mode 100644 index 0000000..6224f31 --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/setup.cfg @@ -0,0 +1,2 @@ +[pycodestyle] +max-line-length = 120 diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py index bf15780..1282648 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py @@ -1,14 +1,13 @@ """LaunchDarkly AI SDK - LangChain Provider. -This package provides LangChain integration for the LaunchDarkly Server-Side AI SDK. +This package provides LangChain integration for the LaunchDarkly Server-Side AI SDK, """ -__version__ = "0.1.0" +from ldai_langchain.langchain_provider import LangChainProvider -# Placeholder for future LangChain provider implementation -# from ldai_langchain.langchain_provider import LangChainProvider +__version__ = "0.1.0" __all__ = [ '__version__', - # 'LangChainProvider', # Uncomment when implemented + 'LangChainProvider', ] diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py new file mode 100644 index 0000000..d710809 --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py @@ -0,0 +1,243 @@ +"""LangChain implementation of AIProvider for LaunchDarkly AI SDK.""" + +from typing import Any, Dict, List, Optional, Union + +from langchain_core.language_models.chat_models import BaseChatModel +from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage +from ldai import LDMessage +from ldai.models import AIConfigKind +from ldai.providers import AIProvider +from ldai.providers.types import ChatResponse, LDAIMetrics, StructuredResponse +from ldai.tracker import TokenUsage + + +class LangChainProvider(AIProvider): + """ + LangChain implementation of AIProvider. + + This provider integrates LangChain models with LaunchDarkly's tracking capabilities. + """ + + def __init__(self, llm: BaseChatModel, logger: Optional[Any] = None): + """ + Initialize the LangChain provider. + + :param llm: A LangChain BaseChatModel instance + :param logger: Optional logger for logging provider operations + """ + super().__init__(logger) + self._llm = llm + + @staticmethod + async def create(ai_config: AIConfigKind, logger: Optional[Any] = None) -> 'LangChainProvider': + """ + Static factory method to create a LangChain AIProvider from an AI configuration. + + :param ai_config: The LaunchDarkly AI configuration + :param logger: Optional logger for the provider + :return: Configured LangChainProvider instance + """ + llm = LangChainProvider.create_langchain_model(ai_config) + return LangChainProvider(llm, logger) + + async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: + """ + Invoke the LangChain model with an array of messages. + + :param messages: Array of LDMessage objects representing the conversation + :return: ChatResponse containing the model's response and metrics + """ + try: + langchain_messages = LangChainProvider.convert_messages_to_langchain(messages) + response: BaseMessage = await self._llm.ainvoke(langchain_messages) + metrics = LangChainProvider.get_ai_metrics_from_response(response) + + content: str = '' + if isinstance(response.content, str): + content = response.content + else: + if self.logger: + self.logger.warn( + f'Multimodal response not supported, expecting a string. ' + f'Content type: {type(response.content)}, Content: {response.content}' + ) + metrics = LDAIMetrics(success=False, usage=metrics.usage) + + return ChatResponse( + message=LDMessage(role='assistant', content=content), + metrics=metrics, + ) + except Exception as error: + if self.logger: + self.logger.warn(f'LangChain model invocation failed: {error}') + + return ChatResponse( + message=LDMessage(role='assistant', content=''), + metrics=LDAIMetrics(success=False, usage=None), + ) + + async def invoke_structured_model( + self, + messages: List[LDMessage], + response_structure: Dict[str, Any], + ) -> StructuredResponse: + """ + Invoke the LangChain model with structured output support. + + :param messages: Array of LDMessage objects representing the conversation + :param response_structure: Dictionary defining the output structure + :return: StructuredResponse containing the structured data + """ + try: + langchain_messages = LangChainProvider.convert_messages_to_langchain(messages) + structured_llm = self._llm.with_structured_output(response_structure) + response = await structured_llm.ainvoke(langchain_messages) + + if not isinstance(response, dict): + if self.logger: + self.logger.warn( + f'Structured output did not return a dict. ' + f'Got: {type(response)}' + ) + return StructuredResponse( + data={}, + raw_response='', + metrics=LDAIMetrics( + success=False, + usage=TokenUsage(total=0, input=0, output=0), + ), + ) + + return StructuredResponse( + data=response, + raw_response=str(response), + metrics=LDAIMetrics( + success=True, + usage=TokenUsage(total=0, input=0, output=0), + ), + ) + except Exception as error: + if self.logger: + self.logger.warn(f'LangChain structured model invocation failed: {error}') + + return StructuredResponse( + data={}, + raw_response='', + metrics=LDAIMetrics( + success=False, + usage=TokenUsage(total=0, input=0, output=0), + ), + ) + + def get_chat_model(self) -> BaseChatModel: + """ + Get the underlying LangChain model instance. + + :return: The underlying BaseChatModel + """ + return self._llm + + @staticmethod + def map_provider(ld_provider_name: str) -> str: + """ + Map LaunchDarkly provider names to LangChain provider names. + + This method enables seamless integration between LaunchDarkly's standardized + provider naming and LangChain's naming conventions. + + :param ld_provider_name: LaunchDarkly provider name + :return: LangChain-compatible provider name + """ + lowercased_name = ld_provider_name.lower() + + mapping: Dict[str, str] = { + 'gemini': 'google-genai', + } + + return mapping.get(lowercased_name, lowercased_name) + + @staticmethod + def get_ai_metrics_from_response(response: BaseMessage) -> LDAIMetrics: + """ + Get AI metrics from a LangChain provider response. + + This method extracts token usage information and success status from LangChain responses + and returns a LaunchDarkly AIMetrics object. + + :param response: The response from the LangChain model + :return: LDAIMetrics with success status and token usage + + Example: + # Use with tracker.track_metrics_of for automatic tracking + response = await tracker.track_metrics_of( + lambda: llm.ainvoke(messages), + LangChainProvider.get_ai_metrics_from_response + ) + """ + # Extract token usage if available + usage: Optional[TokenUsage] = None + if hasattr(response, 'response_metadata') and response.response_metadata: + token_usage = response.response_metadata.get('tokenUsage') or response.response_metadata.get('token_usage') + if token_usage: + usage = TokenUsage( + total=token_usage.get('totalTokens', 0) or token_usage.get('total_tokens', 0), + input=token_usage.get('promptTokens', 0) or token_usage.get('prompt_tokens', 0), + output=token_usage.get('completionTokens', 0) or token_usage.get('completion_tokens', 0), + ) + + return LDAIMetrics(success=True, usage=usage) + + @staticmethod + def convert_messages_to_langchain( + messages: List[LDMessage], + ) -> List[Union[HumanMessage, SystemMessage, AIMessage]]: + """ + Convert LaunchDarkly messages to LangChain messages. + + This helper method enables developers to work directly with LangChain message types + while maintaining compatibility with LaunchDarkly's standardized message format. + + :param messages: List of LDMessage objects + :return: List of LangChain message objects + :raises ValueError: If an unsupported message role is encountered + """ + result: List[Union[HumanMessage, SystemMessage, AIMessage]] = [] + + for msg in messages: + if msg.role == 'system': + result.append(SystemMessage(content=msg.content)) + elif msg.role == 'user': + result.append(HumanMessage(content=msg.content)) + elif msg.role == 'assistant': + result.append(AIMessage(content=msg.content)) + else: + raise ValueError(f'Unsupported message role: {msg.role}') + + return result + + @staticmethod + def create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel: + """ + Create a LangChain model from an AI configuration. + + This public helper method enables developers to initialize their own LangChain models + using LaunchDarkly AI configurations. + + :param ai_config: The LaunchDarkly AI configuration + :return: A configured LangChain BaseChatModel + """ + from langchain.chat_models import init_chat_model + + config_dict = ai_config.to_dict() + model_dict = config_dict.get('model') or {} + provider_dict = config_dict.get('provider') or {} + + model_name = model_dict.get('name', '') + provider = provider_dict.get('name', '') + parameters = model_dict.get('parameters') or {} + + return init_chat_model( + model_name, + model_provider=LangChainProvider.map_provider(provider), + **parameters, + ) diff --git a/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py b/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py new file mode 100644 index 0000000..af270d6 --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py @@ -0,0 +1,258 @@ +"""Tests for LangChain Provider.""" + +import pytest +from unittest.mock import AsyncMock, MagicMock + +from langchain_core.messages import AIMessage, HumanMessage, SystemMessage + +from ldai import LDMessage + +from ldai_langchain import LangChainProvider + + +class TestConvertMessagesToLangchain: + """Tests for convert_messages_to_langchain static method.""" + + def test_converts_system_messages_to_system_message(self): + """Should convert system messages to SystemMessage.""" + messages = [LDMessage(role='system', content='You are a helpful assistant.')] + result = LangChainProvider.convert_messages_to_langchain(messages) + + assert len(result) == 1 + assert isinstance(result[0], SystemMessage) + assert result[0].content == 'You are a helpful assistant.' + + def test_converts_user_messages_to_human_message(self): + """Should convert user messages to HumanMessage.""" + messages = [LDMessage(role='user', content='Hello, how are you?')] + result = LangChainProvider.convert_messages_to_langchain(messages) + + assert len(result) == 1 + assert isinstance(result[0], HumanMessage) + assert result[0].content == 'Hello, how are you?' + + def test_converts_assistant_messages_to_ai_message(self): + """Should convert assistant messages to AIMessage.""" + messages = [LDMessage(role='assistant', content='I am doing well, thank you!')] + result = LangChainProvider.convert_messages_to_langchain(messages) + + assert len(result) == 1 + assert isinstance(result[0], AIMessage) + assert result[0].content == 'I am doing well, thank you!' + + def test_converts_multiple_messages_in_order(self): + """Should convert multiple messages in order.""" + messages = [ + LDMessage(role='system', content='You are a helpful assistant.'), + LDMessage(role='user', content='What is the weather like?'), + LDMessage(role='assistant', content='I cannot check the weather.'), + ] + result = LangChainProvider.convert_messages_to_langchain(messages) + + assert len(result) == 3 + assert isinstance(result[0], SystemMessage) + assert isinstance(result[1], HumanMessage) + assert isinstance(result[2], AIMessage) + + def test_throws_error_for_unsupported_message_role(self): + """Should throw error for unsupported message role.""" + # Create a mock message with unsupported role + class MockMessage: + role = 'unknown' + content = 'Test message' + + with pytest.raises(ValueError, match='Unsupported message role: unknown'): + LangChainProvider.convert_messages_to_langchain([MockMessage()]) # type: ignore + + def test_handles_empty_message_array(self): + """Should handle empty message array.""" + result = LangChainProvider.convert_messages_to_langchain([]) + assert len(result) == 0 + + +class TestGetAIMetricsFromResponse: + """Tests for get_ai_metrics_from_response static method.""" + + def test_creates_metrics_with_success_true_and_token_usage(self): + """Should create metrics with success=True and token usage.""" + mock_response = AIMessage(content='Test response') + mock_response.response_metadata = { + 'tokenUsage': { + 'totalTokens': 100, + 'promptTokens': 50, + 'completionTokens': 50, + }, + } + + result = LangChainProvider.get_ai_metrics_from_response(mock_response) + + assert result.success is True + assert result.usage is not None + assert result.usage.total == 100 + assert result.usage.input == 50 + assert result.usage.output == 50 + + def test_creates_metrics_with_snake_case_token_usage(self): + """Should create metrics with snake_case token usage keys.""" + mock_response = AIMessage(content='Test response') + mock_response.response_metadata = { + 'token_usage': { + 'total_tokens': 150, + 'prompt_tokens': 75, + 'completion_tokens': 75, + }, + } + + result = LangChainProvider.get_ai_metrics_from_response(mock_response) + + assert result.success is True + assert result.usage is not None + assert result.usage.total == 150 + assert result.usage.input == 75 + assert result.usage.output == 75 + + def test_creates_metrics_with_success_true_and_no_usage_when_metadata_missing(self): + """Should create metrics with success=True and no usage when metadata is missing.""" + mock_response = AIMessage(content='Test response') + + result = LangChainProvider.get_ai_metrics_from_response(mock_response) + + assert result.success is True + assert result.usage is None + + +class TestMapProvider: + """Tests for map_provider static method.""" + + def test_maps_gemini_to_google_genai(self): + """Should map gemini to google-genai.""" + assert LangChainProvider.map_provider('gemini') == 'google-genai' + assert LangChainProvider.map_provider('Gemini') == 'google-genai' + assert LangChainProvider.map_provider('GEMINI') == 'google-genai' + + def test_returns_provider_name_unchanged_for_unmapped_providers(self): + """Should return provider name unchanged for unmapped providers.""" + assert LangChainProvider.map_provider('openai') == 'openai' + assert LangChainProvider.map_provider('anthropic') == 'anthropic' + assert LangChainProvider.map_provider('unknown') == 'unknown' + + +class TestInvokeModel: + """Tests for invoke_model instance method.""" + + @pytest.fixture + def mock_llm(self): + """Create a mock LLM.""" + return MagicMock() + + @pytest.fixture + def mock_logger(self): + """Create a mock logger.""" + return MagicMock() + + @pytest.mark.asyncio + async def test_returns_success_true_for_string_content(self, mock_llm, mock_logger): + """Should return success=True for string content.""" + mock_response = AIMessage(content='Test response') + mock_llm.ainvoke = AsyncMock(return_value=mock_response) + provider = LangChainProvider(mock_llm, mock_logger) + + messages = [LDMessage(role='user', content='Hello')] + result = await provider.invoke_model(messages) + + assert result.metrics.success is True + assert result.message.content == 'Test response' + mock_logger.warn.assert_not_called() + + @pytest.mark.asyncio + async def test_returns_success_false_for_non_string_content_and_logs_warning(self, mock_llm, mock_logger): + """Should return success=False for non-string content and log warning.""" + mock_response = AIMessage(content=[{'type': 'image', 'data': 'base64data'}]) + mock_llm.ainvoke = AsyncMock(return_value=mock_response) + provider = LangChainProvider(mock_llm, mock_logger) + + messages = [LDMessage(role='user', content='Hello')] + result = await provider.invoke_model(messages) + + assert result.metrics.success is False + assert result.message.content == '' + mock_logger.warn.assert_called_once() + + @pytest.mark.asyncio + async def test_returns_success_false_when_model_invocation_throws_error(self, mock_llm, mock_logger): + """Should return success=False when model invocation throws an error.""" + error = Exception('Model invocation failed') + mock_llm.ainvoke = AsyncMock(side_effect=error) + provider = LangChainProvider(mock_llm, mock_logger) + + messages = [LDMessage(role='user', content='Hello')] + result = await provider.invoke_model(messages) + + assert result.metrics.success is False + assert result.message.content == '' + assert result.message.role == 'assistant' + mock_logger.warn.assert_called() + + +class TestInvokeStructuredModel: + """Tests for invoke_structured_model instance method.""" + + @pytest.fixture + def mock_llm(self): + """Create a mock LLM.""" + return MagicMock() + + @pytest.fixture + def mock_logger(self): + """Create a mock logger.""" + return MagicMock() + + @pytest.mark.asyncio + async def test_returns_success_true_for_successful_invocation(self, mock_llm, mock_logger): + """Should return success=True for successful invocation.""" + mock_response = {'result': 'structured data'} + mock_structured_llm = MagicMock() + mock_structured_llm.ainvoke = AsyncMock(return_value=mock_response) + mock_llm.with_structured_output = MagicMock(return_value=mock_structured_llm) + provider = LangChainProvider(mock_llm, mock_logger) + + messages = [LDMessage(role='user', content='Hello')] + response_structure = {'type': 'object', 'properties': {}} + result = await provider.invoke_structured_model(messages, response_structure) + + assert result.metrics.success is True + assert result.data == mock_response + mock_logger.warn.assert_not_called() + + @pytest.mark.asyncio + async def test_returns_success_false_when_structured_model_invocation_throws_error(self, mock_llm, mock_logger): + """Should return success=False when structured model invocation throws an error.""" + error = Exception('Structured invocation failed') + mock_structured_llm = MagicMock() + mock_structured_llm.ainvoke = AsyncMock(side_effect=error) + mock_llm.with_structured_output = MagicMock(return_value=mock_structured_llm) + provider = LangChainProvider(mock_llm, mock_logger) + + messages = [LDMessage(role='user', content='Hello')] + response_structure = {'type': 'object', 'properties': {}} + result = await provider.invoke_structured_model(messages, response_structure) + + assert result.metrics.success is False + assert result.data == {} + assert result.raw_response == '' + assert result.metrics.usage is not None + assert result.metrics.usage.total == 0 + mock_logger.warn.assert_called() + + +class TestGetChatModel: + """Tests for get_chat_model instance method.""" + + def test_returns_underlying_llm(self): + """Should return the underlying LLM.""" + mock_llm = MagicMock() + provider = LangChainProvider(mock_llm) + + assert provider.get_chat_model() is mock_llm + +