Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit d5e3a7a

Browse files
committed
chore: minor reformatting
1 parent ebc7a56 commit d5e3a7a

File tree

1 file changed

+7
-7
lines changed

1 file changed

+7
-7
lines changed

controllers/llamaCPP.cc

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ std::string create_full_return_json(const std::string &id,
104104
root["usage"] = usage;
105105

106106
Json::StreamWriterBuilder writer;
107-
writer["indentation"] = ""; // Compact output
107+
writer["indentation"] = ""; // Compact output
108108
return Json::writeString(writer, root);
109109
}
110110

@@ -131,16 +131,16 @@ std::string create_return_json(const std::string &id, const std::string &model,
131131
root["choices"] = choicesArray;
132132

133133
Json::StreamWriterBuilder writer;
134-
writer["indentation"] = ""; // This sets the indentation to an empty string,
135-
// producing compact output.
134+
writer["indentation"] = ""; // This sets the indentation to an empty string,
135+
// producing compact output.
136136
return Json::writeString(writer, root);
137137
}
138138

139139
llamaCPP::llamaCPP()
140140
: queue(new trantor::ConcurrentTaskQueue(llama.params.n_parallel,
141141
"llamaCPP")) {
142142
// Some default values for now below
143-
log_disable(); // Disable the log to file feature, reduce bloat for
143+
log_disable(); // Disable the log to file feature, reduce bloat for
144144
// target
145145
// system ()
146146
};
@@ -620,12 +620,12 @@ bool llamaCPP::loadModelImpl(std::shared_ptr<Json::Value> jsonBody) {
620620
std::string llama_log_folder =
621621
jsonBody->operator[]("llama_log_folder").asString();
622622
log_set_target(llama_log_folder + "llama.log");
623-
} // Set folder for llama log
623+
} // Set folder for llama log
624624
}
625625
#ifdef GGML_USE_CUBLAS
626626
LOG_INFO << "Setting up GGML CUBLAS PARAMS";
627627
params.mul_mat_q = false;
628-
#endif // GGML_USE_CUBLAS
628+
#endif // GGML_USE_CUBLAS
629629
if (params.model_alias == "unknown") {
630630
params.model_alias = params.model;
631631
}
@@ -644,7 +644,7 @@ bool llamaCPP::loadModelImpl(std::shared_ptr<Json::Value> jsonBody) {
644644
// load the model
645645
if (!llama.load_model(params)) {
646646
LOG_ERROR << "Error loading the model";
647-
return false; // Indicate failure
647+
return false; // Indicate failure
648648
}
649649
llama.initialize();
650650

0 commit comments

Comments
 (0)