Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit 7938c34

Browse files
authored
Merge pull request #1708 from janhq/dev
Sync dev to main 1.0.3-rc4
2 parents bc217f3 + 027002f commit 7938c34

File tree

15 files changed

+283
-195
lines changed

15 files changed

+283
-195
lines changed

docs/sidebars.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ const sidebars: SidebarsConfig = {
104104
type: "doc",
105105
id: "configurations/token",
106106
label: "Token",
107-
}
107+
},
108108
],
109109
},
110110
{

engine/cli/command_line_parser.cc

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -602,8 +602,9 @@ void CommandLineParser::SetupSystemCommands() {
602602
<< " to " << cml_data_.port);
603603
auto config_path = file_manager_utils::GetConfigurationPath();
604604
cml_data_.config.apiServerPort = std::to_string(cml_data_.port);
605-
auto result = config_yaml_utils::DumpYamlConfig(cml_data_.config,
606-
config_path.string());
605+
auto result =
606+
config_yaml_utils::CortexConfigMgr::GetInstance().DumpYamlConfig(
607+
cml_data_.config, config_path.string());
607608
if (result.has_error()) {
608609
CLI_LOG("Error update " << config_path.string() << result.error());
609610
}

engine/cli/commands/cortex_upd_cmd.cc

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -192,8 +192,9 @@ std::optional<std::string> CheckNewUpdate(
192192
CTL_INF("Got the latest release, update to the config file: "
193193
<< latest_version)
194194
config.latestRelease = latest_version;
195-
auto result = config_yaml_utils::DumpYamlConfig(
196-
config, file_manager_utils::GetConfigurationPath().string());
195+
auto result =
196+
config_yaml_utils::CortexConfigMgr::GetInstance().DumpYamlConfig(
197+
config, file_manager_utils::GetConfigurationPath().string());
197198
if (result.has_error()) {
198199
CTL_ERR("Error update "
199200
<< file_manager_utils::GetConfigurationPath().string()

engine/cli/main.cc

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -151,8 +151,9 @@ int main(int argc, char* argv[]) {
151151
.count();
152152
config.latestLlamacppRelease = res.value();
153153

154-
auto upd_config_res = config_yaml_utils::DumpYamlConfig(
155-
config, file_manager_utils::GetConfigurationPath().string());
154+
auto upd_config_res =
155+
config_yaml_utils::CortexConfigMgr::GetInstance().DumpYamlConfig(
156+
config, file_manager_utils::GetConfigurationPath().string());
156157
if (upd_config_res.has_error()) {
157158
CTL_ERR("Failed to update config file: " << upd_config_res.error());
158159
} else {

engine/controllers/models.cc

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -442,6 +442,14 @@ void Models::StartModel(
442442
// model_path has higher priority
443443
if (auto& o = (*(req->getJsonObject()))["llama_model_path"]; !o.isNull()) {
444444
params_override.model_path = o.asString();
445+
if (auto& mp = (*(req->getJsonObject()))["model_path"]; mp.isNull()) {
446+
// Bypass if model does not exist in DB and llama_model_path exists
447+
if (std::filesystem::exists(params_override.model_path.value()) &&
448+
!model_service_->HasModel(model_handle)) {
449+
CTL_INF("llama_model_path exists, bypass check model id");
450+
params_override.bypass_llama_model_path = true;
451+
}
452+
}
445453
}
446454

447455
if (auto& o = (*(req->getJsonObject()))["model_path"]; !o.isNull()) {
@@ -489,7 +497,7 @@ void Models::StartModel(
489497
auto& v = result.value();
490498
Json::Value ret;
491499
ret["message"] = "Started successfully!";
492-
if(v.warning) {
500+
if (v.warning) {
493501
ret["warning"] = *(v.warning);
494502
}
495503
auto resp = cortex_utils::CreateCortexHttpJsonResponse(ret);

engine/main.cc

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,8 @@ void RunServer(std::optional<int> port, bool ignore_cout) {
5151
auto config_path = file_manager_utils::GetConfigurationPath();
5252
config.apiServerPort = std::to_string(*port);
5353
auto result =
54-
config_yaml_utils::DumpYamlConfig(config, config_path.string());
54+
config_yaml_utils::CortexConfigMgr::GetInstance().DumpYamlConfig(
55+
config, config_path.string());
5556
if (result.has_error()) {
5657
CTL_ERR("Error update " << config_path.string() << result.error());
5758
}

engine/services/engine_service.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -899,7 +899,7 @@ cpp::result<void, std::string> EngineService::LoadEngine(
899899
CTL_WRN("Method SetFileLogger is not supported yet");
900900
}
901901
if (en->IsSupported("SetLogLevel")) {
902-
en->SetLogLevel(trantor::Logger::logLevel());
902+
en->SetLogLevel(logging_utils_helper::global_log_level);
903903
} else {
904904
CTL_WRN("Method SetLogLevel is not supported yet");
905905
}

engine/services/hardware_service.cc

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,7 @@ HardwareInfo HardwareService::GetHardwareInfo() {
5757
}
5858

5959
bool HardwareService::Restart(const std::string& host, int port) {
60+
namespace luh = logging_utils_helper;
6061
if (!ahc_)
6162
return true;
6263
auto exe = commands::GetCortexServerBinary();
@@ -117,6 +118,7 @@ bool HardwareService::Restart(const std::string& host, int port) {
117118
std::string params = "--ignore_cout";
118119
params += " --config_file_path " + get_config_file_path();
119120
params += " --data_folder_path " + get_data_folder_path();
121+
params += " --loglevel " + luh::LogLevelStr(luh::global_log_level);
120122
std::string cmds = cortex_utils::GetCurrentPath() + "/" + exe + " " + params;
121123
// Create child process
122124
if (!CreateProcess(
@@ -168,7 +170,8 @@ bool HardwareService::Restart(const std::string& host, int port) {
168170
std::string p = cortex_utils::GetCurrentPath() + "/" + exe;
169171
execl(p.c_str(), exe.c_str(), "--ignore_cout", "--config_file_path",
170172
get_config_file_path().c_str(), "--data_folder_path",
171-
get_data_folder_path().c_str(), "--loglevel", "INFO", (char*)0);
173+
get_data_folder_path().c_str(), "--loglevel",
174+
luh::LogLevelStr(luh::global_log_level).c_str(), (char*)0);
172175
} else {
173176
// Parent process
174177
if (!TryConnectToServer(host, port)) {

engine/services/model_service.cc

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -381,6 +381,10 @@ cpp::result<std::string, std::string> ModelService::HandleUrl(
381381
return unique_model_id;
382382
}
383383

384+
bool ModelService::HasModel(const std::string& id) const {
385+
return cortex::db::Models().HasModel(id);
386+
}
387+
384388
cpp::result<DownloadTask, std::string>
385389
ModelService::DownloadModelFromCortexsoAsync(
386390
const std::string& name, const std::string& branch,
@@ -745,7 +749,8 @@ cpp::result<StartModelResult, std::string> ModelService::StartModel(
745749
return cpp::fail(
746750
"Not enough VRAM - required: " + std::to_string(vram_needed_MiB) +
747751
" MiB, available: " + std::to_string(free_vram_MiB) +
748-
" MiB - Should adjust ngl to " + std::to_string(free_vram_MiB / (vram_needed_MiB / ngl) - 1));
752+
" MiB - Should adjust ngl to " +
753+
std::to_string(free_vram_MiB / (vram_needed_MiB / ngl) - 1));
749754
}
750755

751756
if (ram_needed_MiB > free_ram_MiB) {

engine/services/model_service.h

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,10 @@
33
#include <memory>
44
#include <optional>
55
#include <string>
6+
#include "common/engine_servicei.h"
67
#include "config/model_config.h"
78
#include "services/download_service.h"
89
#include "services/inference_service.h"
9-
#include "common/engine_servicei.h"
1010

1111
struct ModelPullInfo {
1212
std::string id;
@@ -26,12 +26,15 @@ struct StartParameterOverride {
2626
std::optional<std::string> cache_type;
2727
std::optional<std::string> mmproj;
2828
std::optional<std::string> model_path;
29-
bool bypass_model_check() const { return mmproj.has_value(); }
29+
bool bypass_llama_model_path = false;
30+
bool bypass_model_check() const {
31+
return mmproj.has_value() || bypass_llama_model_path;
32+
}
3033
};
3134

3235
struct StartModelResult {
33-
bool success;
34-
std::optional<std::string> warning;
36+
bool success;
37+
std::optional<std::string> warning;
3538
};
3639

3740
class ModelService {
@@ -89,6 +92,8 @@ class ModelService {
8992
const std::string& url, std::optional<std::string> temp_model_id,
9093
std::optional<std::string> temp_name);
9194

95+
bool HasModel(const std::string& id) const;
96+
9297
private:
9398
/**
9499
* Handle downloading model which have following pattern: author/model_name

0 commit comments

Comments
 (0)