feat: add detailed logging for LLM server startup commands
Add `info!` statements that output the exact command used to launch the LLM server on both Windows and Unix platforms. This enhances observability and aids debugging by showing the constructed command line before the process is spawned.
This commit is contained in:
parent
c4c0a1d693
commit
312503ae66
1 changed files with 2 additions and 0 deletions
|
|
@ -237,6 +237,7 @@ pub async fn start_llm_server(
|
|||
"cd {} && .\\llama-server.exe {} --verbose>llm-stdout.log",
|
||||
llama_cpp_path, args
|
||||
));
|
||||
info!("Executing LLM server command: cd {} && .\\llama-server.exe {} --verbose", llama_cpp_path, args);
|
||||
cmd.spawn()?;
|
||||
} else {
|
||||
let mut cmd = tokio::process::Command::new("sh");
|
||||
|
|
@ -244,6 +245,7 @@ pub async fn start_llm_server(
|
|||
"cd {} && ./llama-server {} --verbose >llm-stdout.log 2>&1 &",
|
||||
llama_cpp_path, args
|
||||
));
|
||||
info!("Executing LLM server command: cd {} && ./llama-server {} --verbose", llama_cpp_path, args);
|
||||
cmd.spawn()?;
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue