11from flask import Flask , render_template , jsonify , request , send_from_directory
22import requests
3- from ollamaClient import OllamaClient
43import threading
54import queue
5+ from ollamaClient import OllamaClient
66
77# Initialize Flask app and Ollama client
88app = Flask (__name__ )
99ai = OllamaClient ()
1010
11- # Queue for handling requests
12- request_queue = queue .Queue ()
11+ # Queue for handling asynchronous chat requests
12+ request_queue : queue . Queue = queue .Queue ()
1313
1414
15- def process_requests ():
15+ def process_requests () -> None :
16+ """Worker thread function to process queued chat requests."""
1617 while True :
1718 task = request_queue .get ()
1819 if task is None :
19- break # Exit thread when None is received
20+ break # Exit when a termination signal is received
2021
2122 request_data , response_queue = task
2223 prompt = request_data .get ("prompt" )
@@ -25,32 +26,27 @@ def process_requests():
2526 if not prompt or not model :
2627 response_queue .put ({"error" : "Invalid request data" })
2728 else :
28- response = ai .chat (prompt = prompt , model = model )
29+ response = ai .chat (model = model , prompt = prompt )
2930 response_queue .put ({"response" : response })
3031
3132
32- # Start the background worker thread
33- worker_thread = threading .Thread (target = process_requests , daemon = True )
34- worker_thread .start ()
33+ # Start background worker thread
34+ threading .Thread (target = process_requests , daemon = True ).start ()
3535
3636
37+ # Routes
3738@app .route ("/" )
3839def index ():
3940 return render_template ("index.html" ), 200
4041
4142
42- @app .route ("/static/<name>" )
43- def file_manager_static (name ):
44- return send_from_directory (directory = "./static" , path = name ), 200
45-
46-
47- @app .route ("/fonts/<name>" )
48- def file_manager_fonts (name ):
49- return send_from_directory (directory = "./fonts" , path = name ), 200
43+ @app .route ("/static/<path:filename>" )
44+ def serve_static (filename : str ):
45+ return send_from_directory ("static" , filename ), 200
5046
5147
5248@app .errorhandler (Exception )
53- def error_page (error ):
49+ def handle_error (error : Exception ):
5450 if hasattr (error , "code" ) and error .code == 404 :
5551 return render_template ("404.html" ), 404
5652 return render_template ("error.html" , error = str (error )), 500
@@ -62,7 +58,7 @@ def api_connection():
6258 res = requests .get ("http://localhost:11434" , timeout = 5 )
6359 status_code = res .status_code
6460 except (requests .exceptions .ConnectionError , requests .exceptions .Timeout ):
65- print ( "[LOG] ERROR: UNABLE TO CONNECT TO SERVER " )
61+ app . logger . error ( "Unable to connect to server " )
6662 status_code = 404
6763
6864 return (
@@ -79,25 +75,24 @@ def api_connection():
7975
8076@app .route ("/api/connection/stats" )
8177def api_connection_stats ():
82- list_of_models = ai .list_models ()
83- list_of_active_models = ai .list_active_models ()
78+ models = ai .list_models ()
79+ active_models = ai .list_active_models ()
8480
85- if not list_of_models and not list_of_active_models :
81+ if not models and not active_models :
8682 return jsonify ({"error" : "No models found" }), 404
8783
88- return jsonify ({"available" : list_of_models , "active" : list_of_active_models }), 200
84+ return jsonify ({"available" : models , "active" : active_models }), 200
8985
9086
9187@app .route ("/api/chat" , methods = ["POST" ])
9288def api_chat ():
9389 data = request .json
94- response_queue = queue .Queue ()
90+ response_queue : queue . Queue = queue .Queue ()
9591 request_queue .put ((data , response_queue ))
96-
97- # Wait for the response from the queue
98- response = response_queue .get ()
99- return jsonify (response ), 200 if "response" in response else 400
92+ response = response_queue .get () # Blocking wait for response
93+ status = 200 if "response" in response else 400
94+ return jsonify (response ), status
10095
10196
10297if __name__ == "__main__" :
103- app .run (debug = True )
98+ app .run ()
0 commit comments