From dca836a42988e9222b43df44a185561245cb2beb Mon Sep 17 00:00:00 2001 From: "Rodrigo Rodriguez (Pragmatismo)" Date: Fri, 7 Nov 2025 16:19:48 -0300 Subject: [PATCH] fix: redirect LLM server logs to local files instead of deep path Adjusted the command strings used to start the LLM and embedding servers on both Windows and Unix. - Replaced the previous log redirection `../../../../logs/llm/stdout.log` with simpler local files (`llm-stdout.log` and `stdout.log`). - Updated both normal and embedding server launch commands to use the new paths. This change simplifies log management, ensures logs are correctly written regardless of the working directory, and resolves issues where the previous relative path could be invalid or inaccessible. --- src/llm/local.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/llm/local.rs b/src/llm/local.rs index 99fcd438..9bc53f44 100644 --- a/src/llm/local.rs +++ b/src/llm/local.rs @@ -230,14 +230,14 @@ pub async fn start_llm_server( if cfg!(windows) { let mut cmd = tokio::process::Command::new("cmd"); cmd.arg("/C").arg(format!( - "cd {} && .\\llama-server.exe {} --verbose>../../../../logs/llm/stdout.log", + "cd {} && .\\llama-server.exe {} --verbose>llm-stdout.log", llama_cpp_path, args )); cmd.spawn()?; } else { let mut cmd = tokio::process::Command::new("sh"); cmd.arg("-c").arg(format!( - "cd {} && ./llama-server {} --verbose >../../../../logs/llm/stdout.log 2>&1 &", + "cd {} && ./llama-server {} --verbose >llm-stdout.log 2>&1 &", llama_cpp_path, args )); cmd.spawn()?; @@ -256,14 +256,14 @@ pub async fn start_embedding_server( if cfg!(windows) { let mut cmd = tokio::process::Command::new("cmd"); cmd.arg("/c").arg(format!( - "cd {} && .\\llama-server.exe -m {} --verbose --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >../../../../logs/llm/stdout.log", + "cd {} && .\\llama-server.exe -m {} --verbose --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >stdout.log", llama_cpp_path, model_path, port )); cmd.spawn()?; } else { let mut cmd = tokio::process::Command::new("sh"); cmd.arg("-c").arg(format!( - "cd {} && ./llama-server -m {} --verbose --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >../../../../logs/llm/stdout.log 2>&1 &", + "cd {} && ./llama-server -m {} --verbose --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >stdout.log 2>&1 &", llama_cpp_path, model_path, port )); cmd.spawn()?;