Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit c9604fe

Browse files
authored
Merge pull request #380 from wujjpp/check-model-loaded-before-embedding
add checking model loaded first in embedding call
2 parents 396b713 + 03a2e2b commit c9604fe

File tree

1 file changed

+11
-0
lines changed

1 file changed

+11
-0
lines changed

controllers/llamaCPP.cc

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -143,6 +143,7 @@ void llamaCPP::chatCompletion(
143143
auto resp = nitro_utils::nitroHttpJsonResponse(jsonResp);
144144
resp->setStatusCode(drogon::k409Conflict);
145145
callback(resp);
146+
return;
146147
}
147148

148149
const auto &jsonBody = req->getJsonObject();
@@ -401,6 +402,16 @@ void llamaCPP::chatCompletion(
401402
void llamaCPP::embedding(
402403
const HttpRequestPtr &req,
403404
std::function<void(const HttpResponsePtr &)> &&callback) {
405+
if (!llama.model_loaded_external) {
406+
Json::Value jsonResp;
407+
jsonResp["message"] =
408+
"Model has not been loaded, please load model into nitro";
409+
auto resp = nitro_utils::nitroHttpJsonResponse(jsonResp);
410+
resp->setStatusCode(drogon::k409Conflict);
411+
callback(resp);
412+
return;
413+
}
414+
404415
const auto &jsonBody = req->getJsonObject();
405416

406417
Json::Value responseData(Json::arrayValue);

0 commit comments

Comments
 (0)