Skip to content

Commit b020fcb

Browse files
committed
Handle DEFAULT_META_PROVIDER_CONFIG
1 parent 8c91940 commit b020fcb

File tree

4 files changed

+16
-46
lines changed

4 files changed

+16
-46
lines changed

docker/.env.dev

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ DEFAULT_AZURE_OPENAI_API_KEY=
2525
DEFAULT_COHERE_API_KEY=
2626
DEFAULT_FOREFRONTAI_API_KEY=
2727
DEFAULT_ELEVENLABS_API_KEY=
28+
DEFAULT_META_PROVIDER_CONFIG=
2829

2930
# Runner
3031
RUNNER_WSS_HOSTNAME=localhost

llmstack/base/models.py

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -102,18 +102,9 @@ def get_vendor_env_platform_defaults():
102102
promptly_provider_config.embeddings_generator = EmbeddingsGeneratorConfig(
103103
**settings.DEFAULT_EMBEDDINGS_GENERATOR_CONFIG
104104
)
105-
if settings.DEFAULT_META_PROVIDER_CONFIG:
106-
meta_provider_configs = {}
107-
try:
108-
meta_provider_configs = json.loads(settings.DEFAULT_META_PROVIDER_CONFIG)
109-
except json.JSONDecodeError as e:
110-
logger.error(
111-
f"Error parsing DEFAULT_META_PROVIDER_CONFIG: {settings.DEFAULT_META_PROVIDER_CONFIG}",
112-
)
113-
logger.error(e)
114-
105+
if settings.DEFAULT_META_PROVIDER_CONFIG and isinstance(settings.DEFAULT_META_PROVIDER_CONFIG, dict):
115106
# Iterate over the meta provider config and add the provider configs
116-
for k, v in meta_provider_configs.items():
107+
for k, v in settings.DEFAULT_META_PROVIDER_CONFIG.items():
117108
provider_configs[k] = MetaProviderConfig(
118109
deployment_config=v,
119110
provider_config_source=ProviderConfigSource.PLATFORM_DEFAULT.value,

llmstack/processors/providers/meta/chat_completions.py

Lines changed: 4 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
from typing import List, Optional
44

55
from asgiref.sync import async_to_sync
6-
from django.conf import settings
76
from pydantic import Field
87

98
from llmstack.apps.schemas import OutputTemplate
@@ -161,28 +160,9 @@ def session_data_to_persist(self) -> dict:
161160
def process(self) -> MessagesOutput:
162161
from llmstack.common.utils.sslr import LLM
163162

164-
deployment_configs = settings.CUSTOM_MODELS_DEPLOYMENT_CONFIG.get(
165-
f"{self.provider_slug()}/{self._config.model.model_name()}"
166-
)
167-
if not deployment_configs:
168-
raise Exception(
169-
f"Model deployment config not found for {self.provider_slug()}/{self._config.model.model_name()}"
170-
)
171-
172-
model_deployment_configs = []
173-
174-
if not self._config.deployment_names:
175-
if deployment_configs.get("default"):
176-
model_deployment_configs = [deployment_configs["default"]]
177-
else:
178-
for entry in self._config.deployment_names:
179-
if deployment_configs.get(entry):
180-
model_deployment_configs.append(deployment_configs.get(entry))
181-
182-
if not model_deployment_configs:
183-
raise Exception(
184-
f"Model deployment config not found for {self.provider_slug()}/{self._config.model.model_name()}"
185-
)
163+
deployment_config = self.get_provider_config(model_slug=self._config.model)
164+
if not deployment_config:
165+
raise Exception(f"Model deployment config not found for {self.provider_slug()}/{self._config.model}")
186166

187167
messages = []
188168

@@ -196,7 +176,7 @@ def process(self) -> MessagesOutput:
196176
for message in self._input.messages:
197177
messages.append({"role": str(message.role), "content": str(message.message)})
198178

199-
client = LLM(provider="custom", deployment_config=model_deployment_configs[0])
179+
client = LLM(provider="custom", deployment_config=deployment_config.model_dump().get("deployment_config"))
200180

201181
result = client.chat.completions.create(
202182
messages=messages,

llmstack/server/settings.py

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -365,7 +365,15 @@
365365
"DEFAULT_GOOGLE_CUSTOM_SEARCH_CX",
366366
"",
367367
)
368-
DEFAULT_META_PROVIDER_CONFIG = os.getenv("DEFAULT_META_PROVIDER_CONFIG", "{}")
368+
DEFAULT_META_PROVIDER_CONFIG = {}
369+
try:
370+
DEFAULT_META_PROVIDER_CONFIG = (
371+
json.loads(base64.b64decode(os.getenv("DEFAULT_META_PROVIDER_CONFIG")))
372+
if os.getenv("DEFAULT_META_PROVIDER_CONFIG")
373+
else {}
374+
)
375+
except Exception:
376+
print("Error parsing DEFAULT_META_PROVIDER_CONFIG")
369377

370378
WEAVIATE_URL = os.getenv("WEAVIATE_URL", "http://weaviate:8080")
371379
WEAVIATE_TEXT2VEC_MODULE_CONFIG = {
@@ -682,16 +690,6 @@
682690

683691
ENABLE_JOBS = os.getenv("ENABLE_JOBS", "True") == "True"
684692

685-
CUSTOM_MODELS_DEPLOYMENT_CONFIG = {}
686-
try:
687-
CUSTOM_MODELS_DEPLOYMENT_CONFIG = (
688-
json.loads(base64.b64decode(os.getenv("CUSTOM_MODELS_DEPLOYMENT_CONFIG")))
689-
if os.getenv("CUSTOM_MODELS_DEPLOYMENT_CONFIG")
690-
else {}
691-
)
692-
except Exception:
693-
print("Error parsing CUSTOM_MODELS_DEPLOYMENT_CONFIG")
694-
695693
CONNECTION_TYPE_INTERFACE_EXCLUDED_PACKAGES = os.getenv("CONNECTION_TYPE_INTERFACE_EXCLUDED_PACKAGES", "").split(",")
696694

697695
DEFAULT_DATA_DESTINATION_CONFIG = {"provider_slug": "weaviate", "processor_slug": "vector-store"}

0 commit comments

Comments
 (0)