fix: redirect LLM server logs to local files instead of deep path
Adjusted the command strings used to start the LLM and embedding servers on both Windows and Unix. - Replaced the previous log redirection `../../../../logs/llm/stdout.log` with simpler local files (`llm-stdout.log` and `stdout.log`). - Updated both normal and embedding server launch commands to use the new paths. This change simplifies log management, ensures logs are correctly written regardless of the working directory, and resolves issues where the previous relative path could be invalid or inaccessible.
This commit is contained in:
parent
e7e84c6cfc
commit
dca836a429
1 changed files with 4 additions and 4 deletions
|
|
@ -230,14 +230,14 @@ pub async fn start_llm_server(
|
||||||
if cfg!(windows) {
|
if cfg!(windows) {
|
||||||
let mut cmd = tokio::process::Command::new("cmd");
|
let mut cmd = tokio::process::Command::new("cmd");
|
||||||
cmd.arg("/C").arg(format!(
|
cmd.arg("/C").arg(format!(
|
||||||
"cd {} && .\\llama-server.exe {} --verbose>../../../../logs/llm/stdout.log",
|
"cd {} && .\\llama-server.exe {} --verbose>llm-stdout.log",
|
||||||
llama_cpp_path, args
|
llama_cpp_path, args
|
||||||
));
|
));
|
||||||
cmd.spawn()?;
|
cmd.spawn()?;
|
||||||
} else {
|
} else {
|
||||||
let mut cmd = tokio::process::Command::new("sh");
|
let mut cmd = tokio::process::Command::new("sh");
|
||||||
cmd.arg("-c").arg(format!(
|
cmd.arg("-c").arg(format!(
|
||||||
"cd {} && ./llama-server {} --verbose >../../../../logs/llm/stdout.log 2>&1 &",
|
"cd {} && ./llama-server {} --verbose >llm-stdout.log 2>&1 &",
|
||||||
llama_cpp_path, args
|
llama_cpp_path, args
|
||||||
));
|
));
|
||||||
cmd.spawn()?;
|
cmd.spawn()?;
|
||||||
|
|
@ -256,14 +256,14 @@ pub async fn start_embedding_server(
|
||||||
if cfg!(windows) {
|
if cfg!(windows) {
|
||||||
let mut cmd = tokio::process::Command::new("cmd");
|
let mut cmd = tokio::process::Command::new("cmd");
|
||||||
cmd.arg("/c").arg(format!(
|
cmd.arg("/c").arg(format!(
|
||||||
"cd {} && .\\llama-server.exe -m {} --verbose --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >../../../../logs/llm/stdout.log",
|
"cd {} && .\\llama-server.exe -m {} --verbose --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >stdout.log",
|
||||||
llama_cpp_path, model_path, port
|
llama_cpp_path, model_path, port
|
||||||
));
|
));
|
||||||
cmd.spawn()?;
|
cmd.spawn()?;
|
||||||
} else {
|
} else {
|
||||||
let mut cmd = tokio::process::Command::new("sh");
|
let mut cmd = tokio::process::Command::new("sh");
|
||||||
cmd.arg("-c").arg(format!(
|
cmd.arg("-c").arg(format!(
|
||||||
"cd {} && ./llama-server -m {} --verbose --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >../../../../logs/llm/stdout.log 2>&1 &",
|
"cd {} && ./llama-server -m {} --verbose --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >stdout.log 2>&1 &",
|
||||||
llama_cpp_path, model_path, port
|
llama_cpp_path, model_path, port
|
||||||
));
|
));
|
||||||
cmd.spawn()?;
|
cmd.spawn()?;
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue