feat: add detailed logging for LLM server startup commands

Add `info!` statements that output the exact command used to launch the LLM server on both Windows and Unix platforms. This enhances observability and aids debugging by showing the constructed command line before the process is spawned.
This commit is contained in:
Rodrigo Rodriguez (Pragmatismo) 2025-11-07 17:12:55 -03:00
parent c4c0a1d693
commit 312503ae66

View file

@ -237,6 +237,7 @@ pub async fn start_llm_server(
"cd {} && .\\llama-server.exe {} --verbose>llm-stdout.log",
llama_cpp_path, args
));
info!("Executing LLM server command: cd {} && .\\llama-server.exe {} --verbose", llama_cpp_path, args);
cmd.spawn()?;
} else {
let mut cmd = tokio::process::Command::new("sh");
@ -244,6 +245,7 @@ pub async fn start_llm_server(
"cd {} && ./llama-server {} --verbose >llm-stdout.log 2>&1 &",
llama_cpp_path, args
));
info!("Executing LLM server command: cd {} && ./llama-server {} --verbose", llama_cpp_path, args);
cmd.spawn()?;
}