diff --git a/.env.example b/.env.example index 9bec51ec64..aa6ef05abf 100644 --- a/.env.example +++ b/.env.example @@ -96,6 +96,10 @@ HYPERBOLIC_API_BASE_URL=https://api.hyperbolic.xyz/v1/chat/completions # USE: http://127.0.0.1:1234 LMSTUDIO_API_BASE_URL=http://127.0.0.1:1234 +# Docker Model Runner (DMR) +# Default host TCP endpoint when Model Runner TCP is enabled in Docker Desktop +DMR_API_BASE_URL=http://127.0.0.1:12434 + # ====================================== # CLOUD SERVICES CONFIGURATION # ====================================== diff --git a/app/components/@settings/tabs/providers/cloud/CloudProvidersTab.tsx b/app/components/@settings/tabs/providers/cloud/CloudProvidersTab.tsx index 7311851426..a16fd3f293 100644 --- a/app/components/@settings/tabs/providers/cloud/CloudProvidersTab.tsx +++ b/app/components/@settings/tabs/providers/cloud/CloudProvidersTab.tsx @@ -68,7 +68,8 @@ const CloudProvidersTab = () => { // Load and filter providers useEffect(() => { const newFilteredProviders = Object.entries(settings.providers || {}) - .filter(([key]) => !['Ollama', 'LMStudio', 'OpenAILike'].includes(key)) + // Exclude local providers from the cloud list + .filter(([key]) => !['Ollama', 'LMStudio', 'OpenAILike', 'DockerModelRunner'].includes(key)) .map(([key, value]) => ({ name: key, settings: value.settings, diff --git a/app/components/@settings/tabs/providers/local/LocalProvidersTab.tsx b/app/components/@settings/tabs/providers/local/LocalProvidersTab.tsx index 5e0f61108a..5bc79c4f50 100644 --- a/app/components/@settings/tabs/providers/local/LocalProvidersTab.tsx +++ b/app/components/@settings/tabs/providers/local/LocalProvidersTab.tsx @@ -32,6 +32,8 @@ export default function LocalProvidersTab() { const [isLoadingLMStudioModels, setIsLoadingLMStudioModels] = useState(false); const { toast } = useToast(); const { startMonitoring, stopMonitoring } = useLocalModelHealth(); + const [dmrModels, setDmrModels] = useState([]); + const [isLoadingDmrModels, setIsLoadingDmrModels] = useState(false); // Memoized filtered providers to prevent unnecessary re-renders const filteredProviders = useMemo(() => { @@ -45,13 +47,15 @@ export default function LocalProvidersTab() { // Set default base URLs for local providers let defaultBaseUrl = provider.settings.baseUrl || envUrl; - if (!defaultBaseUrl) { - if (key === 'Ollama') { - defaultBaseUrl = 'http://127.0.0.1:11434'; - } else if (key === 'LMStudio') { - defaultBaseUrl = 'http://127.0.0.1:1234'; + if (!defaultBaseUrl) { + if (key === 'Ollama') { + defaultBaseUrl = 'http://127.0.0.1:11434'; + } else if (key === 'LMStudio') { + defaultBaseUrl = 'http://127.0.0.1:1234'; + } else if (key === 'DockerModelRunner') { + defaultBaseUrl = 'http://127.0.0.1:12434'; + } } - } return { name: key, @@ -84,10 +88,10 @@ export default function LocalProvidersTab() { if (provider.settings.enabled && baseUrl) { console.log(`[LocalProvidersTab] Starting monitoring for ${provider.name} at ${baseUrl}`); - startMonitoring(provider.name as 'Ollama' | 'LMStudio' | 'OpenAILike', baseUrl); + startMonitoring(provider.name as 'Ollama' | 'LMStudio' | 'OpenAILike' | 'DockerModelRunner', baseUrl); } else if (!provider.settings.enabled && baseUrl) { console.log(`[LocalProvidersTab] Stopping monitoring for ${provider.name} at ${baseUrl}`); - stopMonitoring(provider.name as 'Ollama' | 'LMStudio' | 'OpenAILike', baseUrl); + stopMonitoring(provider.name as 'Ollama' | 'LMStudio' | 'OpenAILike' | 'DockerModelRunner', baseUrl); } }); }, [filteredProviders, startMonitoring, stopMonitoring]); @@ -110,6 +114,16 @@ export default function LocalProvidersTab() { } }, [filteredProviders]); + // Fetch Docker Model Runner models when enabled + useEffect(() => { + const dmrProvider = filteredProviders.find((p) => p.name === 'DockerModelRunner'); + + if (dmrProvider?.settings.enabled && dmrProvider.settings.baseUrl) { + fetchDMRModels(dmrProvider.settings.baseUrl); + } + }, [filteredProviders]); + + const fetchOllamaModels = async () => { try { setIsLoadingModels(true); @@ -190,6 +204,28 @@ export default function LocalProvidersTab() { [updateProviderSettings, toast], ); + // Fetch Docker Model Runner models using local proxy to avoid CORS + const fetchDMRModels = async (baseUrl: string) => { + try { + setIsLoadingDmrModels(true); + const normalized = baseUrl.endsWith('/') ? baseUrl.slice(0, -1) : baseUrl; + const proxyUrl = `/api/local-proxy?url=${encodeURIComponent(`${normalized}/engines/v1/models`)}`; + const response = await fetch(proxyUrl); + + if (!response.ok) { + throw new Error('Failed to fetch Docker Model Runner models'); + } + + const data = (await response.json()) as { data?: Array<{ id: string }> }; + setDmrModels((data.data || []).map((m) => m.id)); + } catch (e) { + console.error('Error fetching Docker Model Runner models', e); + setDmrModels([]); + } finally { + setIsLoadingDmrModels(false); + } + }; + const handleUpdateOllamaModel = async (modelName: string) => { try { setOllamaModels((prev) => prev.map((m) => (m.name === modelName ? { ...m, status: 'updating' } : m))); @@ -259,6 +295,7 @@ export default function LocalProvidersTab() { } }; + const handleDeleteOllamaModel = async (modelName: string) => { if (!window.confirm(`Are you sure you want to delete ${modelName}?`)) { return; @@ -440,6 +477,69 @@ export default function LocalProvidersTab() { )} + + {/* Docker Model Runner Models Section */} + {provider.name === 'DockerModelRunner' && provider.settings.enabled && ( + + +
+
+ +

Available Models

+
+ +
+
+ + {isLoadingDmrModels ? ( +
+ {Array.from({ length: 3 }).map((_, i) => ( + + ))} +
+ ) : dmrModels.length === 0 ? ( +
+ +

No Models Available

+

+ Pull a model with Docker (e.g., "docker model pull ai/smollm2") and ensure DMR is enabled. +

+
+ ) : ( +
+ {dmrModels.map((id) => ( + + +
+

+ {id} +

+ + Available + +
+
+
+ ))} +
+ )} +
+
+ )} + {/* LM Studio Models Section */} {provider.name === 'LMStudio' && provider.settings.enabled && ( diff --git a/app/components/@settings/tabs/providers/local/SetupGuide.tsx b/app/components/@settings/tabs/providers/local/SetupGuide.tsx index a3e4406b6c..cd38032070 100644 --- a/app/components/@settings/tabs/providers/local/SetupGuide.tsx +++ b/app/components/@settings/tabs/providers/local/SetupGuide.tsx @@ -449,6 +449,97 @@ function SetupGuide({ onBack }: { onBack: () => void }) { + {/* Docker Model Runner Setup Section */} + + +
+
+ +
+
+

Docker Model Runner (DMR) Setup

+

OpenAI-compatible API via Docker Desktop

+
+
+
+ + {/* Requirements */} +
+

Requirements

+
    +
  • Docker Desktop 4.41+ (Windows) or 4.40+ (macOS) or Docker Engine (Linux)
  • +
  • Enable GPU for Windows (NVIDIA drivers 576.57+) if using GPU models
  • +
+
+ + {/* Enable DMR and TCP */} +
+

+ + 1. Enable DMR + Host TCP (12434) +

+
+
# Docker Desktop CLI
+
docker desktop enable model-runner --tcp 12434
+
+

Alternatively, enable in Docker Desktop: Settings → Features in development → Model Runner → Enable host-side TCP support.

+
+ + {/* Pull a model */} +
+

+ + 2. Pull a model +

+
+
docker model pull ai/smollm2
+
+

Models are pulled from Docker Hub and cached locally.

+
+ + {/* Verify API */} +
+

+ + 3. Verify OpenAI-compatible API +

+
+
# List models
+
curl http://localhost:12434/engines/v1/models
+
+
# Create chat completion
+
{`curl http://localhost:12434/engines/v1/chat/completions -H "Content-Type: application/json" -d '{"model":"ai/smollm2","messages":[{"role":"user","content":"hello"}]}'`}
+
+

From containers, use http://model-runner.docker.internal/engines/v1/…

+
+ + {/* Configure in Bolt DIY */} +
+

+ + 4. Configure in Bolt DIY +

+
    +
  • Enable provider: Settings → Providers → Local → Docker Model Runner
  • +
  • Base URL: http://127.0.0.1:12434 (we route to /engines/v1 automatically)
  • +
  • If Bolt runs in Docker, we automatically use host.docker.internal
  • +
+
+ + {/* Known issues */} +
+
+ + Known issues +
+
    +
  • docker: 'model' is not a docker command → ensure the plugin is detected by Docker Desktop.
  • +
  • Linux containers in Compose may need extra_hosts: "model-runner.docker.internal:host-gateway"
  • +
+
+
+
+ {/* LocalAI Setup Section */} diff --git a/app/components/@settings/tabs/providers/local/types.ts b/app/components/@settings/tabs/providers/local/types.ts index cf1955428e..12dc474e8a 100644 --- a/app/components/@settings/tabs/providers/local/types.ts +++ b/app/components/@settings/tabs/providers/local/types.ts @@ -1,5 +1,8 @@ // Type definitions -export type ProviderName = 'Ollama' | 'LMStudio' | 'OpenAILike'; +import type { ComponentType } from 'react'; +import { Server, Monitor, Globe } from 'lucide-react'; + +export type ProviderName = 'Ollama' | 'LMStudio' | 'OpenAILike' | 'DockerModelRunner'; export interface OllamaModel { name: string; @@ -31,14 +34,16 @@ export interface LMStudioModel { // Constants export const OLLAMA_API_URL = 'http://127.0.0.1:11434'; -export const PROVIDER_ICONS = { - Ollama: 'Server', - LMStudio: 'Monitor', - OpenAILike: 'Globe', +export const PROVIDER_ICONS: Record> = { + Ollama: Server, + LMStudio: Monitor, + OpenAILike: Globe, + DockerModelRunner: Server, } as const; export const PROVIDER_DESCRIPTIONS = { Ollama: 'Run open-source models locally on your machine', LMStudio: 'Local model inference with LM Studio', OpenAILike: 'Connect to OpenAI-compatible API endpoints', + DockerModelRunner: 'Docker Desktop Model Runner with OpenAI-compatible API (/engines/v1)', } as const; diff --git a/app/components/chat/ChatBox.tsx b/app/components/chat/ChatBox.tsx index 4cd9a149a2..f5db24a8b8 100644 --- a/app/components/chat/ChatBox.tsx +++ b/app/components/chat/ChatBox.tsx @@ -105,7 +105,7 @@ export const ChatBox: React.FC = (props) => {
{() => ( -
+
{/* Model Combobox */} -
+
ModelHealthStatus | undefined; - startMonitoring: (provider: 'Ollama' | 'LMStudio' | 'OpenAILike', baseUrl: string, checkInterval?: number) => void; - stopMonitoring: (provider: 'Ollama' | 'LMStudio' | 'OpenAILike', baseUrl: string) => void; - performHealthCheck: (provider: 'Ollama' | 'LMStudio' | 'OpenAILike', baseUrl: string) => Promise; - isHealthy: (provider: 'Ollama' | 'LMStudio' | 'OpenAILike', baseUrl: string) => boolean; + getHealthStatus: (provider: 'Ollama' | 'LMStudio' | 'OpenAILike' | 'DockerModelRunner', baseUrl: string) => ModelHealthStatus | undefined; + startMonitoring: (provider: 'Ollama' | 'LMStudio' | 'OpenAILike' | 'DockerModelRunner', baseUrl: string, checkInterval?: number) => void; + stopMonitoring: (provider: 'Ollama' | 'LMStudio' | 'OpenAILike' | 'DockerModelRunner', baseUrl: string) => void; + performHealthCheck: (provider: 'Ollama' | 'LMStudio' | 'OpenAILike' | 'DockerModelRunner', baseUrl: string) => Promise; + isHealthy: (provider: 'Ollama' | 'LMStudio' | 'OpenAILike' | 'DockerModelRunner', baseUrl: string) => boolean; getOverallHealth: () => { healthy: number; unhealthy: number; checking: number; unknown: number }; } @@ -51,13 +51,13 @@ export function useLocalModelHealth(options: UseLocalModelHealthOptions = {}): U }, []); // Get health status for a specific provider - const getHealthStatus = useCallback((provider: 'Ollama' | 'LMStudio' | 'OpenAILike', baseUrl: string) => { +const getHealthStatus = useCallback((provider: 'Ollama' | 'LMStudio' | 'OpenAILike' | 'DockerModelRunner', baseUrl: string) => { return localModelHealthMonitor.getHealthStatus(provider, baseUrl); }, []); // Start monitoring a provider - const startMonitoring = useCallback( - (provider: 'Ollama' | 'LMStudio' | 'OpenAILike', baseUrl: string, interval?: number) => { +const startMonitoring = useCallback( + (provider: 'Ollama' | 'LMStudio' | 'OpenAILike' | 'DockerModelRunner', baseUrl: string, interval?: number) => { console.log(`[Health Monitor] Starting monitoring for ${provider} at ${baseUrl}`); localModelHealthMonitor.startMonitoring(provider, baseUrl, interval || checkInterval); }, @@ -65,7 +65,7 @@ export function useLocalModelHealth(options: UseLocalModelHealthOptions = {}): U ); // Stop monitoring a provider - const stopMonitoring = useCallback((provider: 'Ollama' | 'LMStudio' | 'OpenAILike', baseUrl: string) => { +const stopMonitoring = useCallback((provider: 'Ollama' | 'LMStudio' | 'OpenAILike' | 'DockerModelRunner', baseUrl: string) => { console.log(`[Health Monitor] Stopping monitoring for ${provider} at ${baseUrl}`); localModelHealthMonitor.stopMonitoring(provider, baseUrl); @@ -74,13 +74,13 @@ export function useLocalModelHealth(options: UseLocalModelHealthOptions = {}): U }, []); // Perform manual health check - const performHealthCheck = useCallback(async (provider: 'Ollama' | 'LMStudio' | 'OpenAILike', baseUrl: string) => { +const performHealthCheck = useCallback(async (provider: 'Ollama' | 'LMStudio' | 'OpenAILike' | 'DockerModelRunner', baseUrl: string) => { await localModelHealthMonitor.performHealthCheck(provider, baseUrl); }, []); // Check if a provider is healthy const isHealthy = useCallback( - (provider: 'Ollama' | 'LMStudio' | 'OpenAILike', baseUrl: string) => { +(provider: 'Ollama' | 'LMStudio' | 'OpenAILike' | 'DockerModelRunner', baseUrl: string) => { const status = getHealthStatus(provider, baseUrl); return status?.status === 'healthy'; }, @@ -113,7 +113,7 @@ export function useLocalModelHealth(options: UseLocalModelHealthOptions = {}): U * Hook for monitoring a specific provider */ export function useProviderHealth( - provider: 'Ollama' | 'LMStudio' | 'OpenAILike', + provider: 'Ollama' | 'LMStudio' | 'OpenAILike' | 'DockerModelRunner', baseUrl: string, options: UseLocalModelHealthOptions = {}, ) { diff --git a/app/lib/modules/llm/providers/docker-model-runner.ts b/app/lib/modules/llm/providers/docker-model-runner.ts new file mode 100644 index 0000000000..a47fe92c11 --- /dev/null +++ b/app/lib/modules/llm/providers/docker-model-runner.ts @@ -0,0 +1,115 @@ +import { BaseProvider } from '~/lib/modules/llm/base-provider'; +import type { ModelInfo } from '~/lib/modules/llm/types'; +import type { IProviderSetting } from '~/types/model'; +import type { LanguageModelV1 } from 'ai'; +import { createOpenAI } from '@ai-sdk/openai'; +import { logger } from '~/utils/logger'; + +export default class DockerModelRunnerProvider extends BaseProvider { + name = 'DockerModelRunner'; + getApiKeyLink = 'https://docs.docker.com/ai/model-runner/'; + labelForGetApiKey = 'Enable Model Runner'; + icon = 'i-ph:docker-logo'; + + config = { + baseUrlKey: 'DMR_API_BASE_URL', + baseUrl: 'http://localhost:12434', + }; + + staticModels: ModelInfo[] = []; + + private _normalizeBaseUrl(baseUrl: string, isDocker: boolean): string { + let url = baseUrl; + + if (isDocker) { + url = url.replace('localhost', 'host.docker.internal'); + url = url.replace('127.0.0.1', 'host.docker.internal'); + } + + if (url.endsWith('/')) { + url = url.slice(0, -1); + } + + return url; + } + + async getDynamicModels( + apiKeys?: Record, + settings?: IProviderSetting, + serverEnv: Record = {}, + ): Promise { + let { baseUrl } = this.getProviderBaseUrlAndKey({ + apiKeys, + providerSettings: settings, + serverEnv, + defaultBaseUrlKey: 'DMR_API_BASE_URL', + defaultApiTokenKey: '', + }); + + if (!baseUrl) { + throw new Error('No baseUrl found for Docker Model Runner provider'); + } + + const isDocker = process?.env?.RUNNING_IN_DOCKER === 'true' || serverEnv?.RUNNING_IN_DOCKER === 'true'; + const normalized = this._normalizeBaseUrl(baseUrl, !!isDocker); + + const modelsUrl = `${normalized}/engines/v1/models`; + + const response = await fetch(modelsUrl); + + if (!response.ok) { + throw new Error(`DMR GET /engines/v1/models failed: ${response.status} ${response.statusText}`); + } + + const data = (await response.json()) as { data?: Array<{ id: string }>; [key: string]: any }; + const items = data?.data || []; + + return items.map((m) => ({ + name: m.id, + label: m.id, + provider: this.name, + maxTokenAllowed: 8000, + })); + } + + getModelInstance(options: { + model: string; + serverEnv?: Env; + apiKeys?: Record; + providerSettings?: Record; + }): LanguageModelV1 { + const { model, serverEnv, apiKeys, providerSettings } = options; + + let { baseUrl } = this.getProviderBaseUrlAndKey({ + apiKeys, + providerSettings: providerSettings?.[this.name], + serverEnv: serverEnv as any, + defaultBaseUrlKey: 'DMR_API_BASE_URL', + defaultApiTokenKey: '', + }); + + if (!baseUrl) { + throw new Error('No baseUrl found for Docker Model Runner provider'); + } + + const envRecord = Object.entries(serverEnv || ({} as any)).reduce( + (acc, [k, v]) => { + acc[k] = String(v); + return acc; + }, + {} as Record, + ); + + const isDocker = process?.env?.RUNNING_IN_DOCKER === 'true' || envRecord?.RUNNING_IN_DOCKER === 'true'; + const normalized = this._normalizeBaseUrl(baseUrl, !!isDocker); + + logger.debug('Docker Model Runner Base Url used: ', normalized); + + const openai = createOpenAI({ + baseURL: `${normalized}/engines/v1`, + apiKey: '', + }); + + return openai(model); + } +} \ No newline at end of file diff --git a/app/lib/modules/llm/registry.ts b/app/lib/modules/llm/registry.ts index a28e4f9f3a..512b8d16e0 100644 --- a/app/lib/modules/llm/registry.ts +++ b/app/lib/modules/llm/registry.ts @@ -17,6 +17,7 @@ import HyperbolicProvider from './providers/hyperbolic'; import AmazonBedrockProvider from './providers/amazon-bedrock'; import GithubProvider from './providers/github'; import MoonshotProvider from './providers/moonshot'; +import DockerModelRunnerProvider from './providers/docker-model-runner'; export { AnthropicProvider, @@ -38,4 +39,5 @@ export { LMStudioProvider, AmazonBedrockProvider, GithubProvider, + DockerModelRunnerProvider, }; diff --git a/app/lib/services/localModelHealthMonitor.ts b/app/lib/services/localModelHealthMonitor.ts index 0ce28f898d..0ac64fe62f 100644 --- a/app/lib/services/localModelHealthMonitor.ts +++ b/app/lib/services/localModelHealthMonitor.ts @@ -1,4 +1,3 @@ -// Simple EventEmitter implementation for browser compatibility class SimpleEventEmitter { private _events: Record void)[]> = {}; @@ -32,7 +31,7 @@ class SimpleEventEmitter { } export interface ModelHealthStatus { - provider: 'Ollama' | 'LMStudio' | 'OpenAILike'; + provider: 'Ollama' | 'LMStudio' | 'OpenAILike' | 'DockerModelRunner'; baseUrl: string; status: 'healthy' | 'unhealthy' | 'checking' | 'unknown'; lastChecked: Date; @@ -63,7 +62,7 @@ export class LocalModelHealthMonitor extends SimpleEventEmitter { /** * Start monitoring a local provider */ - startMonitoring(provider: 'Ollama' | 'LMStudio' | 'OpenAILike', baseUrl: string, checkInterval?: number): void { + startMonitoring(provider: 'Ollama' | 'LMStudio' | 'OpenAILike' | 'DockerModelRunner', baseUrl: string, checkInterval?: number): void { const key = this._getProviderKey(provider, baseUrl); // Stop existing monitoring if any @@ -91,7 +90,7 @@ export class LocalModelHealthMonitor extends SimpleEventEmitter { /** * Stop monitoring a local provider */ - stopMonitoring(provider: 'Ollama' | 'LMStudio' | 'OpenAILike', baseUrl: string): void { + stopMonitoring(provider: 'Ollama' | 'LMStudio' | 'OpenAILike' | 'DockerModelRunner', baseUrl: string): void { const key = this._getProviderKey(provider, baseUrl); const interval = this._checkIntervals.get(key); @@ -107,7 +106,7 @@ export class LocalModelHealthMonitor extends SimpleEventEmitter { /** * Get current health status for a provider */ - getHealthStatus(provider: 'Ollama' | 'LMStudio' | 'OpenAILike', baseUrl: string): ModelHealthStatus | undefined { + getHealthStatus(provider: 'Ollama' | 'LMStudio' | 'OpenAILike' | 'DockerModelRunner', baseUrl: string): ModelHealthStatus | undefined { const key = this._getProviderKey(provider, baseUrl); return this._healthStatuses.get(key); } @@ -122,8 +121,8 @@ export class LocalModelHealthMonitor extends SimpleEventEmitter { /** * Perform a manual health check */ - async performHealthCheck( - provider: 'Ollama' | 'LMStudio' | 'OpenAILike', +async performHealthCheck( + provider: 'Ollama' | 'LMStudio' | 'OpenAILike' | 'DockerModelRunner', baseUrl: string, ): Promise { const key = this._getProviderKey(provider, baseUrl); @@ -191,8 +190,8 @@ export class LocalModelHealthMonitor extends SimpleEventEmitter { /** * Check health of a specific provider */ - private async _checkProviderHealth( - provider: 'Ollama' | 'LMStudio' | 'OpenAILike', +private async _checkProviderHealth( + provider: 'Ollama' | 'LMStudio' | 'OpenAILike' | 'DockerModelRunner', baseUrl: string, ): Promise { const controller = new AbortController(); @@ -206,6 +205,8 @@ export class LocalModelHealthMonitor extends SimpleEventEmitter { return await this._checkLMStudioHealth(baseUrl, controller.signal); case 'OpenAILike': return await this._checkOpenAILikeHealth(baseUrl, controller.signal); + case 'DockerModelRunner': + return await this._checkDockerModelRunnerHealth(baseUrl, controller.signal); default: throw new Error(`Unsupported provider: ${provider}`); } @@ -214,6 +215,52 @@ export class LocalModelHealthMonitor extends SimpleEventEmitter { } } + /** + * Check Docker Model Runner health + */ + private async _checkDockerModelRunnerHealth(baseUrl: string, signal: AbortSignal): Promise { + try { + // DMR uses OpenAI-compatible endpoints under /engines/v1 + const normalizedUrl = baseUrl.endsWith('/') ? baseUrl.slice(0, -1) : baseUrl; + // Use server-side local proxy to avoid browser CORS issues + const proxyUrl = `/api/local-proxy?url=${encodeURIComponent(`${normalizedUrl}/engines/v1/models`)}`; + const response = await fetch(proxyUrl, { + method: 'GET', + headers: { 'Content-Type': 'application/json' }, + signal, + }); + + if (!response.ok) { + // Handle potential CORS/network issues as generic error + if (response.type === 'opaque' || response.status === 0) { + throw new Error('CORS_ERROR: Docker Model Runner is blocking requests from this origin. Ensure host TCP support is enabled or use the desktop app.'); + } + + throw new Error(`HTTP ${response.status}: ${response.statusText}`); + } + + const data = (await response.json()) as { data?: Array<{ id: string }>; [key: string]: any }; + const models = (data.data || []).map((m) => m.id); + + // Optionally pick a version string if exposed under 'version' or 'dmr_version' + const version: string | undefined = (data as any).version || (data as any).dmr_version; + + return { + isHealthy: true, + responseTime: 0, + availableModels: models, + version, + }; + } catch (error) { + const message = error instanceof Error ? error.message : 'Unknown error'; + return { + isHealthy: false, + responseTime: 0, + error: message, + }; + } + } + /** * Check Ollama health */ diff --git a/app/lib/stores/settings.ts b/app/lib/stores/settings.ts index fb123e6a94..228227fd09 100644 --- a/app/lib/stores/settings.ts +++ b/app/lib/stores/settings.ts @@ -23,8 +23,8 @@ export interface Shortcuts { toggleTerminal: Shortcut; } -export const URL_CONFIGURABLE_PROVIDERS = ['Ollama', 'LMStudio', 'OpenAILike']; -export const LOCAL_PROVIDERS = ['OpenAILike', 'LMStudio', 'Ollama']; +export const URL_CONFIGURABLE_PROVIDERS = ['Ollama', 'LMStudio', 'OpenAILike', 'DockerModelRunner']; +export const LOCAL_PROVIDERS = ['OpenAILike', 'LMStudio', 'Ollama', 'DockerModelRunner']; export type ProviderSetting = Record; diff --git a/app/routes/api.local-proxy.ts b/app/routes/api.local-proxy.ts new file mode 100644 index 0000000000..eb830b3032 --- /dev/null +++ b/app/routes/api.local-proxy.ts @@ -0,0 +1,82 @@ +import { json } from '@remix-run/cloudflare'; +import type { ActionFunctionArgs, LoaderFunctionArgs } from '@remix-run/cloudflare'; + +// Very small proxy to access local services (LM Studio, Ollama, Docker Model Runner) +// from the browser without running into CORS. Restricted to localhost-style hosts. + +const ALLOWED_HOSTS = new Set([ + '127.0.0.1', + 'localhost', + '0.0.0.0', + 'host.docker.internal', + 'model-runner.docker.internal', +]); + +const CORS_HEADERS: HeadersInit = { + 'Access-Control-Allow-Origin': '*', + 'Access-Control-Allow-Methods': 'GET,POST,OPTIONS', + 'Access-Control-Allow-Headers': 'content-type,authorization', + 'Access-Control-Max-Age': '86400', +}; + +function isAllowed(urlStr: string): boolean { + try { + const url = new URL(urlStr); + return (url.protocol === 'http:' || url.protocol === 'https:') && ALLOWED_HOSTS.has(url.hostname); + } catch { + return false; + } +} + +async function handle(request: Request) { + const url = new URL(request.url); + const target = url.searchParams.get('url'); + + // Preflight + if (request.method === 'OPTIONS') { + return new Response(null, { status: 200, headers: CORS_HEADERS }); + } + + if (!target || !isAllowed(target)) { + return json({ error: 'Invalid or disallowed target URL' }, { status: 400, headers: CORS_HEADERS }); + } + + try { + const init: RequestInit = { + method: request.method, + headers: { 'Content-Type': request.headers.get('Content-Type') || 'application/json' }, + }; + + if (request.method !== 'GET' && request.method !== 'HEAD') { + init.body = request.body; + // @ts-ignore - duplex is required by some runtimes for streaming bodies + init.duplex = 'half'; + } + + const resp = await fetch(target, init); + + const headers = new Headers(CORS_HEADERS); + // Pass through content type if present + const contentType = resp.headers.get('content-type'); + if (contentType) headers.set('content-type', contentType); + + return new Response(resp.body, { + status: resp.status, + statusText: resp.statusText, + headers, + }); + } catch (err) { + return json({ error: 'Proxy request failed', message: err instanceof Error ? err.message : String(err) }, { + status: 502, + headers: CORS_HEADERS, + }); + } +} + +export async function loader({ request }: LoaderFunctionArgs) { + return handle(request); +} + +export async function action({ request }: ActionFunctionArgs) { + return handle(request); +}