Skip to content

Commit c453d3d

Browse files
committed
feat: adapt for modularization
1 parent 98bb2af commit c453d3d

File tree

147 files changed

+1266
-17120
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

147 files changed

+1266
-17120
lines changed

examples/01_intro_to_llmstudio copy.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,7 @@
222222
"metadata": {},
223223
"outputs": [],
224224
"source": [
225-
"from llmstudio import LLM\n",
225+
"from libs.llmstudio.llmstudio import LLM\n",
226226
"import nest_asyncio\n",
227227
"nest_asyncio.apply()"
228228
]

examples/01_intro_to_llmstudio_core.ipynb

Lines changed: 501 additions & 0 deletions
Large diffs are not rendered by default.

examples/01_intro_to_llmstudio_with_proxy.ipynb

Lines changed: 33 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -23,14 +23,14 @@
2323
"name": "stdout",
2424
"output_type": "stream",
2525
"text": [
26-
"Running LLMstudio Engine on http://0.0.0.0:8001 \n",
26+
"Running LLMstudio Proxy on http://0.0.0.0:8001 \n",
2727
"Running LLMstudio Tracking on http://0.0.0.0:8002 \n"
2828
]
2929
}
3030
],
3131
"source": [
32-
"from llmstudio.server import start_server\n",
33-
"start_server()"
32+
"from llmstudio.server import start_servers\n",
33+
"start_servers()"
3434
]
3535
},
3636
{
@@ -47,8 +47,8 @@
4747
}
4848
],
4949
"source": [
50-
"from llmstudio.engine.provider import LLMProxyProvider as LLM\n",
51-
"from llmstudio.engine.provider import ProxyConfig\n",
50+
"from llmstudio_proxy.provider import LLMProxyProvider as LLM\n",
51+
"from llmstudio_proxy.provider import ProxyConfig\n",
5252
"\n",
5353
"# from llmstudio_core import LLMCore as LLM\n",
5454
"# from llmstudio import LLM\n",
@@ -61,7 +61,20 @@
6161
"cell_type": "code",
6262
"execution_count": 3,
6363
"metadata": {},
64-
"outputs": [],
64+
"outputs": [
65+
{
66+
"ename": "Exception",
67+
"evalue": "{\"detail\":\"Not Found\"}",
68+
"output_type": "error",
69+
"traceback": [
70+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
71+
"\u001b[0;31mException\u001b[0m Traceback (most recent call last)",
72+
"Cell \u001b[0;32mIn[3], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43mllm\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mchat\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43molá\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mgpt-4o\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
73+
"File \u001b[0;32m~/fun/LLMstudio/libs/proxy/llmstudio_proxy/provider.py:63\u001b[0m, in \u001b[0;36mLLMProxyProvider.chat\u001b[0;34m(self, chat_input, model, is_stream, retries, parameters, **kwargs)\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m response\u001b[38;5;241m.\u001b[39mok:\n\u001b[1;32m 62\u001b[0m error_data \u001b[38;5;241m=\u001b[39m response\u001b[38;5;241m.\u001b[39mtext\n\u001b[0;32m---> 63\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m(error_data)\n\u001b[1;32m 65\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m is_stream:\n\u001b[1;32m 66\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mgenerate_chat(response)\n",
74+
"\u001b[0;31mException\u001b[0m: {\"detail\":\"Not Found\"}"
75+
]
76+
}
77+
],
6578
"source": [
6679
"result = llm.chat(\"olá\", model=\"gpt-4o\")\n"
6780
]
@@ -74,15 +87,15 @@
7487
{
7588
"data": {
7689
"text/plain": [
77-
"('Olá! Como posso ajudar você hoje?',\n",
90+
"('Olá! Como posso ajudá-lo hoje?',\n",
7891
" {'input_tokens': 2,\n",
7992
" 'output_tokens': 11,\n",
8093
" 'total_tokens': 13,\n",
8194
" 'cost_usd': 0.000175,\n",
82-
" 'latency_s': 0.6496210098266602,\n",
83-
" 'time_to_first_token_s': 0.5341501235961914,\n",
84-
" 'inter_token_latency_s': 0.01241710450914171,\n",
85-
" 'tokens_per_second': 15.393590799454474})"
95+
" 'latency_s': 0.9914300441741943,\n",
96+
" 'time_to_first_token_s': 0.8828918933868408,\n",
97+
" 'inter_token_latency_s': 0.011675781673855253,\n",
98+
" 'tokens_per_second': 10.086440348223903})"
8699
]
87100
},
88101
"execution_count": 4,
@@ -117,17 +130,20 @@
117130
"metadata": {},
118131
"outputs": [
119132
{
120-
"name": "stdout",
121-
"output_type": "stream",
122-
"text": [
123-
"host='0.0.0.0' port='8001' url=None username=None password=None\n",
124-
"Connected to LLMStudio Proxy @ 0.0.0.0:8001\n"
133+
"ename": "ModuleNotFoundError",
134+
"evalue": "No module named 'llmstudio_proxyprovider'",
135+
"output_type": "error",
136+
"traceback": [
137+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
138+
"\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)",
139+
"Cell \u001b[0;32mIn[6], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# You can set OPENAI_API_KEY and ANTHROPIC_API_KEY on .env file\u001b[39;00m\n\u001b[0;32m----> 2\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mllmstudio_proxyprovider\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m ProxyConfig\n\u001b[1;32m 3\u001b[0m proxy \u001b[38;5;241m=\u001b[39m ProxyConfig(host\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m0.0.0.0\u001b[39m\u001b[38;5;124m\"\u001b[39m, port\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m8001\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28mprint\u001b[39m(proxy)\n",
140+
"\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'llmstudio_proxyprovider'"
125141
]
126142
}
127143
],
128144
"source": [
129145
"# You can set OPENAI_API_KEY and ANTHROPIC_API_KEY on .env file\n",
130-
"from llmstudio.engine.provider import ProxyConfig\n",
146+
"from llmstudio_proxyprovider import ProxyConfig\n",
131147
"proxy = ProxyConfig(host=\"0.0.0.0\", port=\"8001\")\n",
132148
"print(proxy)\n",
133149
"\n",

examples/llm_proxy.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from llmstudio.server import start_server
22
start_server()
33

4-
from llmstudio.engine.provider import LLMProxyProvider
4+
from llmstudio_proxy.provider import LLMProxyProvider
55

66

77
llm = LLMProxyProvider(provider="openai", host="0.0.0.0", port="8001")

libs/core/llmstudio_core/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,9 @@
22
from typing import Optional
33
from llmstudio_core.providers.provider import BaseProvider, provider_registry
44

5-
from llmstudio_core.providers import _load_engine_config
5+
from llmstudio_core.providers import _load_providers_config
66

7-
_engine_config = _load_engine_config()
7+
_engine_config = _load_providers_config()
88

99

1010
def LLMCore(provider: str, api_key: Optional[str] = None, **kwargs) -> BaseProvider:

libs/core/llmstudio_core/providers/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ class EngineConfig(BaseModel):
3838
providers: Dict[str, ProviderConfig]
3939

4040

41-
def _load_engine_config() -> EngineConfig:
41+
def _load_providers_config() -> EngineConfig:
4242
#TODO read from github
4343
default_config_path = Path(os.path.join(os.path.dirname(__file__), "config.yaml"))
4444
local_config_path = Path(os.getcwd(), "config.yaml")

0 commit comments

Comments
 (0)