Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit 719de33

Browse files
fix: support path with special characters on windows (#1730)
* fix: utf8 * fix: uft8 for cli * fix: codecvt_utf8_utf16 is deprecated * fix: more * feat: support wstring string conversion * fix: build * fix: engine path env * fix: wstring * fix: cli start server * fix: utf8 file * fix: get env * fix: db * fix: e2e * fix: e2e * fix: cli delete * fix: comment * fix: e2e windows * fix: e2e windows continue * fix: e2e windows skip because of progress bar log issue * fix: add sleep in case of cuda for e2e * fix: import --------- Co-authored-by: vansangpfiev <sang@jan.ai>
1 parent 43dab3b commit 719de33

24 files changed

+340
-140
lines changed

engine/CMakeLists.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,10 @@ if(MSVC)
2828
$<$<CONFIG:Debug>:/MTd> #---|-- Statically link the runtime libraries
2929
$<$<CONFIG:Release>:/MT> #--|
3030
)
31+
32+
add_compile_options(/utf-8)
33+
add_definitions(-DUNICODE -D_UNICODE)
34+
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /DUNICODE /D_UNICODE")
3135
endif()
3236

3337
if(NOT DEFINED CORTEX_VARIANT)

engine/cli/CMakeLists.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,10 @@ if(MSVC)
2626
$<$<CONFIG:Debug>:/MTd> #---|-- Statically link the runtime libraries
2727
$<$<CONFIG:Release>:/MT> #--|
2828
)
29+
30+
add_compile_options(/utf-8)
31+
add_definitions(-DUNICODE -D_UNICODE)
32+
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /DUNICODE /D_UNICODE")
2933
endif()
3034

3135
if(NOT DEFINED CORTEX_VARIANT)

engine/cli/commands/server_start_cmd.cc

Lines changed: 26 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
#include "commands/cortex_upd_cmd.h"
33
#include "utils/cortex_utils.h"
44
#include "utils/file_manager_utils.h"
5+
#include "utils/widechar_conv.h"
56

67
namespace commands {
78

@@ -57,24 +58,32 @@ bool ServerStartCmd::Exec(const std::string& host, int port,
5758
ZeroMemory(&si, sizeof(si));
5859
si.cb = sizeof(si);
5960
ZeroMemory(&pi, sizeof(pi));
60-
std::string params = "--start-server";
61-
params += " --config_file_path " + get_config_file_path();
62-
params += " --data_folder_path " + get_data_folder_path();
63-
params += " --loglevel " + log_level_;
64-
std::string cmds = cortex_utils::GetCurrentPath() + "/" + exe + " " + params;
61+
std::wstring params = L"--start-server";
62+
params += L" --config_file_path " +
63+
file_manager_utils::GetConfigurationPath().wstring();
64+
params += L" --data_folder_path " +
65+
file_manager_utils::GetCortexDataPath().wstring();
66+
params += L" --loglevel " + cortex::wc::Utf8ToWstring(log_level_);
67+
std::wstring exe_w = cortex::wc::Utf8ToWstring(exe);
68+
std::wstring current_path_w =
69+
file_manager_utils::GetExecutableFolderContainerPath().wstring();
70+
std::wstring wcmds = current_path_w + L"/" + exe_w + L" " + params;
71+
CTL_DBG("wcmds: " << wcmds);
72+
std::vector<wchar_t> mutable_cmds(wcmds.begin(), wcmds.end());
73+
mutable_cmds.push_back(L'\0');
6574
// Create child process
6675
if (!CreateProcess(
6776
NULL, // No module name (use command line)
68-
const_cast<char*>(
69-
cmds.c_str()), // Command line (replace with your actual executable)
70-
NULL, // Process handle not inheritable
71-
NULL, // Thread handle not inheritable
72-
FALSE, // Set handle inheritance to FALSE
73-
0, // No creation flags
74-
NULL, // Use parent's environment block
75-
NULL, // Use parent's starting directory
76-
&si, // Pointer to STARTUPINFO structure
77-
&pi)) // Pointer to PROCESS_INFORMATION structure
77+
mutable_cmds
78+
.data(), // Command line (replace with your actual executable)
79+
NULL, // Process handle not inheritable
80+
NULL, // Thread handle not inheritable
81+
FALSE, // Set handle inheritance
82+
0, // No creation flags
83+
NULL, // Use parent's environment block
84+
NULL, // Use parent's starting directory
85+
&si, // Pointer to STARTUPINFO structure
86+
&pi)) // Pointer to PROCESS_INFORMATION structure
7887
{
7988
std::cout << "Could not start server: " << GetLastError() << std::endl;
8089
return false;
@@ -115,7 +124,8 @@ bool ServerStartCmd::Exec(const std::string& host, int port,
115124
std::string p = cortex_utils::GetCurrentPath() + "/" + exe;
116125
execl(p.c_str(), exe.c_str(), "--start-server", "--config_file_path",
117126
get_config_file_path().c_str(), "--data_folder_path",
118-
get_data_folder_path().c_str(), "--loglevel", log_level_.c_str(), (char*)0);
127+
get_data_folder_path().c_str(), "--loglevel", log_level_.c_str(),
128+
(char*)0);
119129
} else {
120130
// Parent process
121131
if (!TryConnectToServer(host, port)) {

engine/cli/main.cc

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,9 @@
2525
#error "Unsupported platform!"
2626
#endif
2727

28+
#include <codecvt>
29+
#include <locale>
30+
2831
void RemoveBinaryTempFileIfExists() {
2932
auto temp =
3033
file_manager_utils::GetExecutableFolderContainerPath() / "cortex_temp";
@@ -40,11 +43,20 @@ void RemoveBinaryTempFileIfExists() {
4043
void SetupLogger(trantor::FileLogger& async_logger, bool verbose) {
4144
if (!verbose) {
4245
auto config = file_manager_utils::GetCortexConfig();
46+
4347
std::filesystem::create_directories(
48+
#if defined(_WIN32)
49+
std::filesystem::u8path(config.logFolderPath) /
50+
#else
4451
std::filesystem::path(config.logFolderPath) /
52+
#endif
4553
std::filesystem::path(cortex_utils::logs_folder));
46-
async_logger.setFileName(config.logFolderPath + "/" +
47-
cortex_utils::logs_cli_base_name);
54+
55+
// Do not need to use u8path here because trantor handles itself
56+
async_logger.setFileName(
57+
(std::filesystem::path(config.logFolderPath) /
58+
std::filesystem::path(cortex_utils::logs_cli_base_name))
59+
.string());
4860
async_logger.setMaxLines(config.maxLogLines); // Keep last 100000 lines
4961
async_logger.startLogging();
5062
trantor::Logger::setOutputFunction(
@@ -192,8 +204,7 @@ int main(int argc, char* argv[]) {
192204
// Check if server exists, if not notify to user to install server
193205
auto exe = commands::GetCortexServerBinary();
194206
auto server_binary_path =
195-
std::filesystem::path(cortex_utils::GetCurrentPath()) /
196-
std::filesystem::path(exe);
207+
file_manager_utils::GetExecutableFolderContainerPath() / exe;
197208
if (!std::filesystem::exists(server_binary_path)) {
198209
std::cout << CORTEX_CPP_VERSION
199210
<< " requires server binary, to install server, run: "

engine/database/database.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ class Database {
2020

2121
private:
2222
Database()
23-
: db_(file_manager_utils::GetCortexDataPath().string() + "/cortex.db",
23+
: db_(file_manager_utils::GetCortexDataPath() / "cortex.db",
2424
SQLite::OPEN_READWRITE | SQLite::OPEN_CREATE) {}
2525
SQLite::Database db_;
2626
};

engine/e2e-test/test_api_engine_uninstall.py

Lines changed: 16 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import pytest
2+
import time
23
import requests
34
from test_runner import (
45
run,
@@ -21,26 +22,26 @@ def setup_and_teardown(self):
2122

2223
# Teardown
2324
stop_server()
24-
25-
def test_engines_uninstall_llamacpp_should_be_successful(self):
26-
# install first, using cli for synchronously
27-
run(
28-
"Install Engine",
29-
["engines", "install", "llama-cpp"],
30-
timeout=120,
31-
capture=False,
32-
)
25+
26+
@pytest.mark.asyncio
27+
async def test_engines_uninstall_llamacpp_should_be_successful(self):
28+
response = requests.post("http://localhost:3928/v1/engines/llama-cpp/install")
29+
assert response.status_code == 200
30+
await wait_for_websocket_download_success_event(timeout=None)
31+
time.sleep(30)
32+
3333
response = requests.delete("http://localhost:3928/v1/engines/llama-cpp/install")
3434
assert response.status_code == 200
3535

36-
def test_engines_uninstall_llamacpp_with_only_version_should_be_failed(self):
36+
@pytest.mark.asyncio
37+
async def test_engines_uninstall_llamacpp_with_only_version_should_be_failed(self):
3738
# install first
38-
run(
39-
"Install Engine",
40-
["engines", "install", "llama-cpp", "-v", "v0.1.35"],
41-
timeout=None,
42-
capture=False,
39+
data = {"variant": "mac-arm64"}
40+
install_response = requests.post(
41+
"http://127.0.0.1:3928/v1/engines/llama-cpp/install", json=data
4342
)
43+
await wait_for_websocket_download_success_event(timeout=120)
44+
assert install_response.status_code == 200
4445

4546
data = {"version": "v0.1.35"}
4647
response = requests.delete(

engine/e2e-test/test_api_model_start.py

Lines changed: 20 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,10 @@
11
import pytest
2+
import time
23
import requests
34
from test_runner import run, start_server, stop_server
4-
5-
5+
from test_runner import (
6+
wait_for_websocket_download_success_event
7+
)
68
class TestApiModelStart:
79

810
@pytest.fixture(autouse=True)
@@ -12,20 +14,28 @@ def setup_and_teardown(self):
1214
success = start_server()
1315
if not success:
1416
raise Exception("Failed to start server")
15-
run("Install engine", ["engines", "install", "llama-cpp"], 5 * 60)
1617
run("Delete model", ["models", "delete", "tinyllama:gguf"])
17-
run(
18-
"Pull model",
19-
["pull", "tinyllama:gguf"],
20-
timeout=None,
21-
)
2218

2319
yield
2420

2521
# Teardown
2622
stop_server()
27-
28-
def test_models_start_should_be_successful(self):
23+
24+
@pytest.mark.asyncio
25+
async def test_models_start_should_be_successful(self):
26+
response = requests.post("http://localhost:3928/v1/engines/llama-cpp/install")
27+
assert response.status_code == 200
28+
await wait_for_websocket_download_success_event(timeout=None)
29+
# TODO(sang) need to fix for cuda download
30+
time.sleep(30)
31+
32+
json_body = {
33+
"model": "tinyllama:gguf"
34+
}
35+
response = requests.post("http://localhost:3928/v1/models/pull", json=json_body)
36+
assert response.status_code == 200, f"Failed to pull model: tinyllama:gguf"
37+
await wait_for_websocket_download_success_event(timeout=None)
38+
2939
json_body = {"model": "tinyllama:gguf"}
3040
response = requests.post(
3141
"http://localhost:3928/v1/models/start", json=json_body

engine/e2e-test/test_api_model_stop.py

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,10 @@
11
import pytest
2+
import time
23
import requests
34
from test_runner import run, start_server, stop_server
4-
5+
from test_runner import (
6+
wait_for_websocket_download_success_event
7+
)
58

69
class TestApiModelStop:
710

@@ -13,14 +16,19 @@ def setup_and_teardown(self):
1316
if not success:
1417
raise Exception("Failed to start server")
1518

16-
run("Install engine", ["engines", "install", "llama-cpp"], 5 * 60)
1719
yield
1820

1921
run("Uninstall engine", ["engines", "uninstall", "llama-cpp"])
2022
# Teardown
2123
stop_server()
2224

23-
def test_models_stop_should_be_successful(self):
25+
@pytest.mark.asyncio
26+
async def test_models_stop_should_be_successful(self):
27+
response = requests.post("http://localhost:3928/v1/engines/llama-cpp/install")
28+
assert response.status_code == 200
29+
await wait_for_websocket_download_success_event(timeout=None)
30+
time.sleep(30)
31+
2432
json_body = {"model": "tinyllama:gguf"}
2533
response = requests.post(
2634
"http://localhost:3928/v1/models/start", json=json_body

engine/e2e-test/test_cli_engine_install.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ def setup_and_teardown(self):
1919
# Teardown
2020
stop_server()
2121

22+
@pytest.mark.skipif(platform.system() == "Windows", reason="Progress bar log issue on Windows")
2223
def test_engines_install_llamacpp_should_be_successfully(self):
2324
exit_code, output, error = run(
2425
"Install Engine",
@@ -46,6 +47,7 @@ def test_engines_install_onnx_on_tensorrt_should_be_failed(self):
4647
assert "is not supported on" in output, "Should display error message"
4748
assert exit_code == 0, f"Install engine failed with error: {error}"
4849

50+
@pytest.mark.skipif(platform.system() == "Windows", reason="Progress bar log issue on Windows")
4951
def test_engines_install_pre_release_llamacpp(self):
5052
engine_version = "v0.1.29"
5153
exit_code, output, error = run(
@@ -67,6 +69,7 @@ def test_engines_install_pre_release_llamacpp(self):
6769
assert is_engine_version_exist, f"Engine version {engine_version} is not found"
6870
assert exit_code == 0, f"Install engine failed with error: {error}"
6971

72+
@pytest.mark.skipif(platform.system() == "Windows", reason="Progress bar log issue on Windows")
7073
def test_engines_should_fallback_to_download_llamacpp_engine_if_not_exists(self):
7174
exit_code, output, error = run(
7275
"Install Engine",

engine/e2e-test/test_cli_engine_uninstall.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ def setup_and_teardown(self):
2525
@pytest.mark.asyncio
2626
async def test_engines_uninstall_llamacpp_should_be_successfully(self):
2727
requests.post("http://127.0.0.1:3928/v1/engines/llama-cpp/install")
28-
await wait_for_websocket_download_success_event(timeout=120)
28+
await wait_for_websocket_download_success_event(timeout=None)
2929
exit_code, output, error = run(
3030
"Uninstall engine", ["engines", "uninstall", "llama-cpp"]
3131
)

0 commit comments

Comments
 (0)