diff --git a/src/llm/local.rs b/src/llm/local.rs index 99fcd438..9bc53f44 100644 --- a/src/llm/local.rs +++ b/src/llm/local.rs @@ -230,14 +230,14 @@ pub async fn start_llm_server( if cfg!(windows) { let mut cmd = tokio::process::Command::new("cmd"); cmd.arg("/C").arg(format!( - "cd {} && .\\llama-server.exe {} --verbose>../../../../logs/llm/stdout.log", + "cd {} && .\\llama-server.exe {} --verbose>llm-stdout.log", llama_cpp_path, args )); cmd.spawn()?; } else { let mut cmd = tokio::process::Command::new("sh"); cmd.arg("-c").arg(format!( - "cd {} && ./llama-server {} --verbose >../../../../logs/llm/stdout.log 2>&1 &", + "cd {} && ./llama-server {} --verbose >llm-stdout.log 2>&1 &", llama_cpp_path, args )); cmd.spawn()?; @@ -256,14 +256,14 @@ pub async fn start_embedding_server( if cfg!(windows) { let mut cmd = tokio::process::Command::new("cmd"); cmd.arg("/c").arg(format!( - "cd {} && .\\llama-server.exe -m {} --verbose --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >../../../../logs/llm/stdout.log", + "cd {} && .\\llama-server.exe -m {} --verbose --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >stdout.log", llama_cpp_path, model_path, port )); cmd.spawn()?; } else { let mut cmd = tokio::process::Command::new("sh"); cmd.arg("-c").arg(format!( - "cd {} && ./llama-server -m {} --verbose --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >../../../../logs/llm/stdout.log 2>&1 &", + "cd {} && ./llama-server -m {} --verbose --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >stdout.log 2>&1 &", llama_cpp_path, model_path, port )); cmd.spawn()?;