Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit 1262a41

Browse files
committed
update
1 parent a1a60d9 commit 1262a41

14 files changed

+81
-71
lines changed

engine/commands/chat_completion_cmd.cc

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,11 @@
11
#include "chat_completion_cmd.h"
2-
#include "httplib.h"
3-
2+
#include "config/yaml_config.h"
43
#include "cortex_upd_cmd.h"
54
#include "database/models.h"
5+
#include "httplib.h"
66
#include "model_status_cmd.h"
7-
#include "run_cmd.h"
87
#include "server_start_cmd.h"
9-
#include "trantor/utils/Logger.h"
108
#include "utils/logging_utils.h"
11-
#include "config/yaml_config.h"
129

1310
namespace commands {
1411
namespace {
@@ -73,7 +70,8 @@ void ChatCompletionCmd::Exec(const std::string& host, int port,
7370

7471
// Only check if llamacpp engine
7572
if ((mc.engine.find("llamacpp") != std::string::npos) &&
76-
!commands::ModelStatusCmd().IsLoaded(host, port, model_handle)) {
73+
!commands::ModelStatusCmd(model_service_)
74+
.IsLoaded(host, port, model_handle)) {
7775
CLI_LOG("Model is not loaded yet!");
7876
return;
7977
}
@@ -144,4 +142,4 @@ void ChatCompletionCmd::Exec(const std::string& host, int port,
144142
}
145143
}
146144

147-
}; // namespace commands
145+
}; // namespace commands

engine/commands/chat_completion_cmd.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,16 +3,21 @@
33
#include <vector>
44
#include "config/model_config.h"
55
#include "nlohmann/json.hpp"
6+
#include "services/model_service.h"
67

78
namespace commands {
89
class ChatCompletionCmd {
910
public:
11+
explicit ChatCompletionCmd(const ModelService& model_service)
12+
: model_service_{model_service} {};
13+
1014
void Exec(const std::string& host, int port, const std::string& model_handle,
1115
std::string msg);
1216
void Exec(const std::string& host, int port, const std::string& model_handle,
1317
const config::ModelConfig& mc, std::string msg);
1418

1519
private:
1620
std::vector<nlohmann::json> histories_;
21+
ModelService model_service_;
1722
};
18-
} // namespace commands
23+
} // namespace commands

engine/commands/model_start_cmd.cc

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,16 @@
11
#include "model_start_cmd.h"
2-
#include "cortex_upd_cmd.h"
3-
#include "database/models.h"
4-
#include "httplib.h"
5-
#include "model_status_cmd.h"
6-
#include "nlohmann/json.hpp"
7-
#include "server_start_cmd.h"
8-
#include "services/model_service.h"
9-
#include "trantor/utils/Logger.h"
10-
#include "utils/file_manager_utils.h"
112
#include "utils/logging_utils.h"
123

134
namespace commands {
145
bool ModelStartCmd::Exec(const std::string& host, int port,
156
const std::string& model_handle) {
16-
ModelService ms;
17-
auto res = ms.StartModel(host, port, model_handle);
7+
auto res = model_service_.StartModel(host, port, model_handle);
188

199
if (res.has_error()) {
2010
CLI_LOG("Error: " + res.error());
2111
return false;
2212
}
13+
2314
CLI_LOG("Model loaded!");
2415
return true;
2516
}

engine/commands/model_start_cmd.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,17 @@
11
#pragma once
22
#include <string>
3+
#include "services/model_service.h"
34

45
namespace commands {
56

67
class ModelStartCmd {
78
public:
9+
explicit ModelStartCmd(const ModelService& model_service)
10+
: model_service_{model_service} {};
11+
812
bool Exec(const std::string& host, int port, const std::string& model_handle);
913

14+
private:
15+
ModelService model_service_;
1016
};
1117
} // namespace commands
Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,15 @@
11
#include "model_status_cmd.h"
2-
#include "config/yaml_config.h"
3-
#include "database/models.h"
4-
#include "httplib.h"
5-
#include "nlohmann/json.hpp"
62
#include "utils/logging_utils.h"
7-
#include "services/model_service.h"
83

94
namespace commands {
105
bool ModelStatusCmd::IsLoaded(const std::string& host, int port,
116
const std::string& model_handle) {
12-
ModelService ms;
13-
auto res = ms.GetModelStatus(host, port, model_handle);
7+
auto res = model_service_.GetModelStatus(host, port, model_handle);
148

159
if (res.has_error()) {
16-
// CLI_LOG("Error: " + res.error());
10+
CTL_ERR("Error: " + res.error());
1711
return false;
1812
}
1913
return true;
2014
}
21-
} // namespace commands
15+
} // namespace commands

engine/commands/model_status_cmd.h

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,18 @@
11
#pragma once
22
#include <string>
3+
#include "services/model_service.h"
34

45
namespace commands {
56

67
class ModelStatusCmd {
78
public:
9+
explicit ModelStatusCmd(const ModelService& model_service)
10+
: model_service_{model_service} {};
11+
812
bool IsLoaded(const std::string& host, int port,
913
const std::string& model_handle);
14+
15+
private:
16+
ModelService model_service_;
1017
};
11-
} // namespace commands
18+
} // namespace commands

engine/commands/model_stop_cmd.cc

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,11 @@
11
#include "model_stop_cmd.h"
2-
#include "config/yaml_config.h"
3-
#include "database/models.h"
4-
#include "httplib.h"
5-
#include "nlohmann/json.hpp"
6-
#include "utils/file_manager_utils.h"
72
#include "utils/logging_utils.h"
8-
#include "services/model_service.h"
93

104
namespace commands {
115

126
void ModelStopCmd::Exec(const std::string& host, int port,
137
const std::string& model_handle) {
14-
ModelService ms;
15-
auto res = ms.StopModel(host, port, model_handle);
8+
auto res = model_service_.StopModel(host, port, model_handle);
169

1710
if (res.has_error()) {
1811
CLI_LOG("Error: " + res.error());

engine/commands/model_stop_cmd.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,18 @@
11
#pragma once
22

33
#include <string>
4-
#include "config/model_config.h"
4+
#include "services/model_service.h"
55

66
namespace commands {
77

88
class ModelStopCmd {
99
public:
10+
explicit ModelStopCmd(const ModelService& model_service)
11+
: model_service_{model_service} {};
12+
1013
void Exec(const std::string& host, int port, const std::string& model_handle);
14+
15+
private:
16+
ModelService model_service_;
1117
};
1218
} // namespace commands

engine/commands/run_cmd.cc

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
#include "chat_completion_cmd.h"
33
#include "config/yaml_config.h"
44
#include "database/models.h"
5-
#include "model_start_cmd.h"
65
#include "model_status_cmd.h"
76
#include "server_start_cmd.h"
87
#include "utils/logging_utils.h"
@@ -72,16 +71,24 @@ void RunCmd::Exec(bool chat_flag) {
7271
// If it is llamacpp, then check model status first
7372
{
7473
if ((mc.engine.find("llamacpp") == std::string::npos) ||
75-
!commands::ModelStatusCmd().IsLoaded(host_, port_, *model_id)) {
76-
if (!ModelStartCmd().Exec(host_, port_, *model_id)) {
74+
!commands::ModelStatusCmd(model_service_)
75+
.IsLoaded(host_, port_, *model_id)) {
76+
77+
auto result = model_service_.StartModel(host_, port_, *model_id);
78+
if (result.has_error()) {
79+
CLI_LOG("Error: " + result.error());
80+
return;
81+
}
82+
if (!result.value()) {
83+
CLI_LOG("Error: Failed to start model");
7784
return;
7885
}
7986
}
8087
}
8188

8289
// Chat
8390
if (chat_flag) {
84-
ChatCompletionCmd().Exec(host_, port_, *model_id, mc, "");
91+
ChatCompletionCmd(model_service_).Exec(host_, port_, *model_id, mc, "");
8592
} else {
8693
CLI_LOG(*model_id << " model started successfully. Use `"
8794
<< commands::GetCortexBinary() << " chat " << *model_id

engine/controllers/command_line_parser.cc

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020
#include "commands/run_cmd.h"
2121
#include "commands/server_start_cmd.h"
2222
#include "commands/server_stop_cmd.h"
23-
#include "config/yaml_config.h"
2423
#include "services/engine_service.h"
2524
#include "utils/file_manager_utils.h"
2625
#include "utils/logging_utils.h"
@@ -35,6 +34,7 @@ constexpr const auto kSubcommands = "Subcommands";
3534
} // namespace
3635
CommandLineParser::CommandLineParser()
3736
: app_("Cortex.cpp CLI"),
37+
model_service_{ModelService(std::make_shared<DownloadService>())},
3838
engine_service_{EngineService(std::make_shared<DownloadService>())} {}
3939

4040
bool CommandLineParser::SetupCommand(int argc, char** argv) {
@@ -152,7 +152,7 @@ void CommandLineParser::SetupCommonCommands() {
152152
chat_cmd->add_option("model_id", cml_data_.model_id, "");
153153
chat_cmd->add_option("-m,--message", cml_data_.msg,
154154
"Message to chat with model");
155-
chat_cmd->callback([this, chat_cmd] {
155+
chat_cmd->callback([&, chat_cmd] {
156156
if (cml_data_.model_id.empty()) {
157157
CLI_LOG("[model_id] is required\n");
158158
CLI_LOG(chat_cmd->help());
@@ -164,10 +164,10 @@ void CommandLineParser::SetupCommonCommands() {
164164
std::stoi(cml_data_.config.apiServerPort),
165165
cml_data_.model_id);
166166
} else {
167-
commands::ChatCompletionCmd().Exec(
168-
cml_data_.config.apiServerHost,
169-
std::stoi(cml_data_.config.apiServerPort), cml_data_.model_id,
170-
cml_data_.msg);
167+
commands::ChatCompletionCmd(model_service_)
168+
.Exec(cml_data_.config.apiServerHost,
169+
std::stoi(cml_data_.config.apiServerPort), cml_data_.model_id,
170+
cml_data_.msg);
171171
}
172172
});
173173
}
@@ -198,15 +198,15 @@ void CommandLineParser::SetupModelCommands() {
198198
" models start [model_id]");
199199
model_start_cmd->add_option("model_id", cml_data_.model_id, "");
200200
model_start_cmd->group(kSubcommands);
201-
model_start_cmd->callback([this, model_start_cmd]() {
201+
model_start_cmd->callback([&, model_start_cmd]() {
202202
if (cml_data_.model_id.empty()) {
203203
CLI_LOG("[model_id] is required\n");
204204
CLI_LOG(model_start_cmd->help());
205205
return;
206206
};
207-
commands::ModelStartCmd().Exec(cml_data_.config.apiServerHost,
208-
std::stoi(cml_data_.config.apiServerPort),
209-
cml_data_.model_id);
207+
commands::ModelStartCmd(model_service_)
208+
.Exec(cml_data_.config.apiServerHost,
209+
std::stoi(cml_data_.config.apiServerPort), cml_data_.model_id);
210210
});
211211

212212
auto stop_model_cmd =
@@ -215,15 +215,15 @@ void CommandLineParser::SetupModelCommands() {
215215
" models stop [model_id]");
216216
stop_model_cmd->group(kSubcommands);
217217
stop_model_cmd->add_option("model_id", cml_data_.model_id, "");
218-
stop_model_cmd->callback([this, stop_model_cmd]() {
218+
stop_model_cmd->callback([&, stop_model_cmd]() {
219219
if (cml_data_.model_id.empty()) {
220220
CLI_LOG("[model_id] is required\n");
221221
CLI_LOG(stop_model_cmd->help());
222222
return;
223223
};
224-
commands::ModelStopCmd().Exec(cml_data_.config.apiServerHost,
225-
std::stoi(cml_data_.config.apiServerPort),
226-
cml_data_.model_id);
224+
commands::ModelStopCmd(model_service_)
225+
.Exec(cml_data_.config.apiServerHost,
226+
std::stoi(cml_data_.config.apiServerPort), cml_data_.model_id);
227227
});
228228

229229
auto list_models_cmd =

0 commit comments

Comments
 (0)