Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit f33d6f4

Browse files
authored
Merge pull request #388 from janhq/387-bug-buggy-folder-feature
remove model folder feature
2 parents c9604fe + d2edbda commit f33d6f4

File tree

1 file changed

+1
-20
lines changed

1 file changed

+1
-20
lines changed

controllers/llamaCPP.h

Lines changed: 1 addition & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -2496,25 +2496,6 @@ class llamaCPP : public drogon::HttpController<llamaCPP> {
24962496
log_disable(); // Disable the log to file feature, reduce bloat for
24972497
// target
24982498
// system ()
2499-
std::vector<std::string> llama_models =
2500-
nitro_utils::listFilesInDir(nitro_utils::models_folder);
2501-
std::string model_index;
2502-
if (llama_models.size() > 0) {
2503-
LOG_INFO << "Found models folder, here are the llama models you have:";
2504-
int index_val = 0;
2505-
for (auto llama_model : llama_models) {
2506-
LOG_INFO << "index: " << index_val++ << "| model: " << llama_model;
2507-
std::cout
2508-
<< "Please type the index of the model you want to load here >> ";
2509-
std::cin >> model_index;
2510-
Json::Value jsonBody;
2511-
jsonBody["llama_model_path"] = nitro_utils::models_folder + "/" +
2512-
llama_models[std::stoi(model_index)];
2513-
loadModelImpl(jsonBody);
2514-
}
2515-
} else {
2516-
LOG_INFO << "Not found models folder, start server as usual";
2517-
}
25182499
}
25192500

25202501
~llamaCPP() { stopBackgroundTask(); }
@@ -2575,4 +2556,4 @@ class llamaCPP : public drogon::HttpController<llamaCPP> {
25752556
std::atomic<bool> single_queue_is_busy; // This value only used under the
25762557
// condition n_parallel is 1
25772558
};
2578-
}; // namespace inferences
2559+
}; // namespace inferences

0 commit comments

Comments
 (0)