fix: redirect LLM server logs to local files instead of deep path
Adjusted the command strings used to start the LLM and embedding servers on both Windows and Unix. - Replaced the previous log redirection `../../../../logs/llm/stdout.log` with simpler local files (`llm-stdout.log` and `stdout.log`). - Updated both normal and embedding server launch commands to use the new paths. This change simplifies log management, ensures logs are correctly written regardless of the working directory, and resolves issues where the previous relative path could be invalid or inaccessible.
This commit is contained in:
parent
14ab06b9f1
commit
3f1a36a1f5
1 changed files with 4 additions and 4 deletions
|
|
@ -230,14 +230,14 @@ pub async fn start_llm_server(
|
|||
if cfg!(windows) {
|
||||
let mut cmd = tokio::process::Command::new("cmd");
|
||||
cmd.arg("/C").arg(format!(
|
||||
"cd {} && .\\llama-server.exe {} --verbose>../../../../logs/llm/stdout.log",
|
||||
"cd {} && .\\llama-server.exe {} --verbose>llm-stdout.log",
|
||||
llama_cpp_path, args
|
||||
));
|
||||
cmd.spawn()?;
|
||||
} else {
|
||||
let mut cmd = tokio::process::Command::new("sh");
|
||||
cmd.arg("-c").arg(format!(
|
||||
"cd {} && ./llama-server {} --verbose >../../../../logs/llm/stdout.log 2>&1 &",
|
||||
"cd {} && ./llama-server {} --verbose >llm-stdout.log 2>&1 &",
|
||||
llama_cpp_path, args
|
||||
));
|
||||
cmd.spawn()?;
|
||||
|
|
@ -256,14 +256,14 @@ pub async fn start_embedding_server(
|
|||
if cfg!(windows) {
|
||||
let mut cmd = tokio::process::Command::new("cmd");
|
||||
cmd.arg("/c").arg(format!(
|
||||
"cd {} && .\\llama-server.exe -m {} --verbose --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >../../../../logs/llm/stdout.log",
|
||||
"cd {} && .\\llama-server.exe -m {} --verbose --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >stdout.log",
|
||||
llama_cpp_path, model_path, port
|
||||
));
|
||||
cmd.spawn()?;
|
||||
} else {
|
||||
let mut cmd = tokio::process::Command::new("sh");
|
||||
cmd.arg("-c").arg(format!(
|
||||
"cd {} && ./llama-server -m {} --verbose --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >../../../../logs/llm/stdout.log 2>&1 &",
|
||||
"cd {} && ./llama-server -m {} --verbose --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >stdout.log 2>&1 &",
|
||||
llama_cpp_path, model_path, port
|
||||
));
|
||||
cmd.spawn()?;
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue