Skip to content

Commit 6e6743a

Browse files
ready
1 parent 72f9bf4 commit 6e6743a

File tree

13 files changed

+250
-356
lines changed

13 files changed

+250
-356
lines changed

.gitignore

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,3 @@
11
__pycache__/
2-
venv/
2+
venv/
3+
test.py

README.md

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,19 @@
1-
# UNDER DEVELOPMENT
1+
# Simple AI UI
2+
Simple AI UI is a simple , minimalist and lightweight web GUI. It offers an server also to host your own AI in your local network or on Internet.
3+
4+
### Features
5+
- Robust API infrastructure.
6+
- Simple , minimalist and lightweight UI
7+
- Privacy (Your data is stored in your computer)
8+
9+
### Supported Platforms for Hosting
10+
- Windows
11+
- Mac
12+
- Linux
13+
**NOTE :** Ollama must be installed on the system.
14+
15+
### Flavors
16+
- Python
17+
- Rust-lang (coming soon)
18+
219

3-
Made By Junaid (www.abujuni.dev)

app.py

Lines changed: 24 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,23 @@
11
from flask import Flask, render_template, jsonify, request, send_from_directory
22
import requests
3-
from ollamaClient import OllamaClient
43
import threading
54
import queue
5+
from ollamaClient import OllamaClient
66

77
# Initialize Flask app and Ollama client
88
app = Flask(__name__)
99
ai = OllamaClient()
1010

11-
# Queue for handling requests
12-
request_queue = queue.Queue()
11+
# Queue for handling asynchronous chat requests
12+
request_queue: queue.Queue = queue.Queue()
1313

1414

15-
def process_requests():
15+
def process_requests() -> None:
16+
"""Worker thread function to process queued chat requests."""
1617
while True:
1718
task = request_queue.get()
1819
if task is None:
19-
break # Exit thread when None is received
20+
break # Exit when a termination signal is received
2021

2122
request_data, response_queue = task
2223
prompt = request_data.get("prompt")
@@ -25,32 +26,27 @@ def process_requests():
2526
if not prompt or not model:
2627
response_queue.put({"error": "Invalid request data"})
2728
else:
28-
response = ai.chat(prompt=prompt, model=model)
29+
response = ai.chat(model=model, prompt=prompt)
2930
response_queue.put({"response": response})
3031

3132

32-
# Start the background worker thread
33-
worker_thread = threading.Thread(target=process_requests, daemon=True)
34-
worker_thread.start()
33+
# Start background worker thread
34+
threading.Thread(target=process_requests, daemon=True).start()
3535

3636

37+
# Routes
3738
@app.route("/")
3839
def index():
3940
return render_template("index.html"), 200
4041

4142

42-
@app.route("/static/<name>")
43-
def file_manager_static(name):
44-
return send_from_directory(directory="./static", path=name), 200
45-
46-
47-
@app.route("/fonts/<name>")
48-
def file_manager_fonts(name):
49-
return send_from_directory(directory="./fonts", path=name), 200
43+
@app.route("/static/<path:filename>")
44+
def serve_static(filename: str):
45+
return send_from_directory("static", filename), 200
5046

5147

5248
@app.errorhandler(Exception)
53-
def error_page(error):
49+
def handle_error(error: Exception):
5450
if hasattr(error, "code") and error.code == 404:
5551
return render_template("404.html"), 404
5652
return render_template("error.html", error=str(error)), 500
@@ -62,7 +58,7 @@ def api_connection():
6258
res = requests.get("http://localhost:11434", timeout=5)
6359
status_code = res.status_code
6460
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
65-
print("[LOG] ERROR: UNABLE TO CONNECT TO SERVER")
61+
app.logger.error("Unable to connect to server")
6662
status_code = 404
6763

6864
return (
@@ -79,25 +75,24 @@ def api_connection():
7975

8076
@app.route("/api/connection/stats")
8177
def api_connection_stats():
82-
list_of_models = ai.list_models()
83-
list_of_active_models = ai.list_active_models()
78+
models = ai.list_models()
79+
active_models = ai.list_active_models()
8480

85-
if not list_of_models and not list_of_active_models:
81+
if not models and not active_models:
8682
return jsonify({"error": "No models found"}), 404
8783

88-
return jsonify({"available": list_of_models, "active": list_of_active_models}), 200
84+
return jsonify({"available": models, "active": active_models}), 200
8985

9086

9187
@app.route("/api/chat", methods=["POST"])
9288
def api_chat():
9389
data = request.json
94-
response_queue = queue.Queue()
90+
response_queue: queue.Queue = queue.Queue()
9591
request_queue.put((data, response_queue))
96-
97-
# Wait for the response from the queue
98-
response = response_queue.get()
99-
return jsonify(response), 200 if "response" in response else 400
92+
response = response_queue.get() # Blocking wait for response
93+
status = 200 if "response" in response else 400
94+
return jsonify(response), status
10095

10196

10297
if __name__ == "__main__":
103-
app.run(debug=True)
98+
app.run()

ollamaClient.py

Lines changed: 33 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -10,96 +10,77 @@
1010
}
1111

1212

13-
# Helper functions
14-
def fetch_data(url):
13+
def fetch_data(url: str) -> dict:
1514
"""Fetch data from a given URL using a GET request."""
1615
try:
1716
response = requests.get(url, timeout=5)
18-
response.raise_for_status() # Raise an exception for HTTP errors
17+
response.raise_for_status()
1918
return response.json()
2019
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as e:
2120
print(f"[LOG] ERROR: Unable to connect to server - {e}")
22-
return {}
2321
except requests.exceptions.RequestException as e:
2422
print(f"[LOG] ERROR: Request failed - {e}")
25-
return {}
23+
return {}
2624

2725

28-
def send_data(url, data):
26+
def send_data(url: str, data: dict) -> str:
2927
"""Send data to a given URL using a POST request."""
3028
headers = {"Content-Type": "application/json"}
3129
try:
3230
response = requests.post(url, json=data, headers=headers, stream=True)
33-
response.raise_for_status() # Raise an exception for HTTP errors
34-
return response.text # Return text instead of raw content
31+
response.raise_for_status()
32+
return response.text
3533
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as e:
3634
print(f"[LOG] ERROR: Unable to connect to server - {e}")
37-
return ""
3835
except requests.exceptions.RequestException as e:
3936
print(f"[LOG] ERROR: Request failed - {e}")
40-
return ""
37+
return ""
38+
39+
40+
def parse_models(data: dict) -> list:
41+
"""Parse and return a list of models from response data."""
42+
models = data.get("models", [])
43+
return [
44+
{"name": item.get("name", ""), "model": item.get("model", "")}
45+
for item in models
46+
]
4147

4248

43-
# Ollama Client Class
4449
class OllamaClient:
4550
@staticmethod
46-
def list_models():
47-
"""Fetch and return a list of available models."""
51+
def list_models() -> list:
4852
data = fetch_data(OLLAMA_SERVER + API_ENDPOINTS["list-models"])
49-
if not data:
50-
return []
51-
52-
models = data.get("models", [])
53-
return [
54-
{"name": item.get("name", ""), "model": item.get("model", "")}
55-
for item in models
56-
]
53+
return parse_models(data)
5754

5855
@staticmethod
59-
def list_active_models():
60-
"""Fetch and return a list of active models."""
56+
def list_active_models() -> list:
6157
data = fetch_data(OLLAMA_SERVER + API_ENDPOINTS["list-active-models"])
62-
if not data:
63-
return []
64-
65-
models = data.get("models", [])
66-
return [
67-
{"name": item.get("name", ""), "model": item.get("model", "")}
68-
for item in models
69-
]
58+
return parse_models(data)
7059

7160
@staticmethod
72-
def chat(model, prompt):
73-
"""Send a chat prompt to a specified model and return the response."""
61+
def chat(model: str, prompt: str) -> str:
7462
data = {
7563
"model": model,
7664
"messages": [{"role": "user", "content": prompt}],
7765
}
78-
7966
response_text = send_data(OLLAMA_SERVER + API_ENDPOINTS["chat"], data)
8067
if not response_text:
8168
return ""
8269

83-
# Parse the response (assuming it's a stream of JSON objects)
84-
parsed_lines = []
85-
for line in response_text.split("\n"):
86-
if line.strip():
87-
try:
88-
parsed_lines.append(json.loads(line))
89-
except json.JSONDecodeError as e:
90-
print(f"[LOG] ERROR: Failed to parse JSON - {e}")
91-
92-
# Extract the message contents
93-
response_messages = [
94-
item.get("message", {}).get("content", "")
95-
for item in parsed_lines
96-
if "message" in item
97-
]
70+
response_lines = [line for line in response_text.splitlines() if line.strip()]
71+
response_messages = []
72+
for line in response_lines:
73+
try:
74+
json_line = json.loads(line)
75+
if "message" in json_line:
76+
response_messages.append(
77+
json_line.get("message", {}).get("content", "")
78+
)
79+
except json.JSONDecodeError as e:
80+
print(f"[LOG] ERROR: Failed to parse JSON - {e}")
9881

9982
response_str = "".join(response_messages)
100-
10183
print(
102-
f"[LOG] INCOMING DATA:\nmodel: {model}\nprompt: {prompt}\nOUTGOING:\nResponse: {response_str}"
84+
f"[LOG] INCOMING DATA:\nModel: {model}\nPrompt: {prompt}\nResponse: {response_str}"
10385
)
104-
10586
return response_str

0 commit comments

Comments
 (0)