diff --git a/docs/search_llm_assist.png b/docs/search_llm_assist.png index 3b0d1f2..4a705fd 100644 Binary files a/docs/search_llm_assist.png and b/docs/search_llm_assist.png differ diff --git a/docs/videos/seaxng_ai_assist.mp4 b/docs/videos/seaxng_ai_assist.mp4 new file mode 100644 index 0000000..990897b Binary files /dev/null and b/docs/videos/seaxng_ai_assist.mp4 differ diff --git a/python/searxng-addons/search_answers_llm/llm_answer.html b/python/searxng-addons/search_answers_llm/llm_answer.html index f17b644..0aed9ec 100644 --- a/python/searxng-addons/search_answers_llm/llm_answer.html +++ b/python/searxng-addons/search_answers_llm/llm_answer.html @@ -3,7 +3,12 @@
- 🤖 AI Assistant + + + + Search Assist
@@ -145,10 +150,58 @@

About this Answer

} .assist-content { - line-height: 1.6; + line-height: 1.5; padding-bottom: 8px; } + /* Markdown Rendered Content */ + .assist-content h1, + .assist-content h2, + .assist-content h3, + .assist-content h4, + .assist-content h5, + .assist-content h6 { + margin-top: 0.6em; + margin-bottom: 0.4em; + line-height: 1.2; + font-weight: 600; + } + + .assist-content p { + margin-block-start: 0.5em; + margin-block-end: 0.5em; + } + + .assist-content ul, + .assist-content ol { + margin-block-start: 0.5em; + margin-block-end: 0.5em; + padding-inline-start: 30px; + } + + .assist-content li { + margin-bottom: 0.3em; + } + + .assist-content pre { + margin-block-start: 0.8em; + margin-block-end: 0.8em; + } + + .assist-content code { + font-size: 0.9em; + padding: 0.1em 0.3em; + border-radius: 4px; + background-color: #2b2d31; + } + + .assist-content pre code { + display: block; + padding: 0.8em; + border-radius: 6px; + background-color: #1e1e1e; + } + /* Toggle Button */ .assist-toggle-container { text-align: center; diff --git a/python/searxng-addons/search_answers_llm/plugins_langchain_llm.py b/python/searxng-addons/search_answers_llm/plugins_langchain_llm.py index 951ac73..1201803 100644 --- a/python/searxng-addons/search_answers_llm/plugins_langchain_llm.py +++ b/python/searxng-addons/search_answers_llm/plugins_langchain_llm.py @@ -73,12 +73,14 @@ class SXNGPlugin(Plugin): def __init__(self, plg_cfg: "PluginCfg") -> None: super().__init__(plg_cfg) - print(f"[DEBUG] LangChain plugin initialized with active={plg_cfg.active}") + print( + f"[DEBUG] LangChain plugin initialized with active={plg_cfg.active}") self.info = PluginInfo( id=self.id, name=gettext("LangChain LLM"), - description=gettext("Generate AI answers using LLM with rich formatting"), + description=gettext( + "Generate AI answers using LLM with rich formatting"), preference_section="general", ) @@ -86,7 +88,7 @@ def __init__(self, plg_cfg: "PluginCfg") -> None: # Initialize ChatOpenAI once and reuse self.llm = ChatOpenAI( model=self.model_name, - temperature=0.7, + temperature=0.5, base_url=environ.get( "LLM_BASE_URL", "https://generativelanguage.googleapis.com/v1beta/openai/", @@ -105,7 +107,8 @@ def post_search( ) -> EngineResults: results = EngineResults() - print(f"[DEBUG] post_search called for query: {search.search_query.query}") + print( + f"[DEBUG] post_search called for query: {search.search_query.query}") # Only process on first page if search.search_query.pageno > 1: @@ -195,7 +198,8 @@ def _get_search_context(self, query: str) -> list[dict]: timeout_limit=5.0, # 5 second timeout for context search ) - print(f"[DEBUG] Created SearchQuery with {len(engine_refs)} engines") + print( + f"[DEBUG] Created SearchQuery with {len(engine_refs)} engines") # Execute the search context_search = Search(context_search_query) @@ -253,9 +257,9 @@ def _generate_contextual_answer_html( messages = [ SystemMessage( content="""You are a helpful Search Engine assistant that provides accurate answers and sources based on search results. + Use extractive summarization to identify key information from search results and avoid fillers. Identify the most important information and links from the search results. Format your response using Markdown syntax for better readability. - Warn against potential malicious links when encounterd. Keep the response concise but well-formatted in Markdown.""" ), HumanMessage( @@ -276,7 +280,8 @@ def _generate_contextual_answer_html( print(f"[DEBUG] Generated contextual response: {answer[:100]}...") # Create formatted HTML answer from markdown - formatted_answer = self._format_html_answer(answer, has_context=True) + formatted_answer = self._format_html_answer( + answer, has_context=True) return formatted_answer except Exception as e: @@ -313,7 +318,8 @@ def _generate_simple_answer_html(self, query: str) -> str: print(f"[DEBUG] Generated simple response: {answer[:100]}...") # Create formatted HTML answer from markdown - formatted_answer = self._format_html_answer(answer, has_context=False) + formatted_answer = self._format_html_answer( + answer, has_context=False) return formatted_answer except Exception as e: @@ -352,7 +358,8 @@ def _format_search_context(self, search_context: list[dict]) -> str: content = result.get("content", "") if content: # Truncate content to avoid token limits - content = content[:300] + "..." if len(content) > 300 else content + content = content[:300] + \ + "..." if len(content) > 300 else content context_parts.append(f"Content: {content}") source = result.get("engine", "Unknown")