fix: Correct LLM model paths and remove unnecessary cd command

- Change model paths to use ./data/llm/ instead of relative paths from build dir
- Remove cd command when starting llama-server to keep botserver root as cwd
- This fixes model loading when servers are started from different directories
- Both LLM and embedding servers now start successfully

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
Rodrigo Rodriguez 2026-02-15 20:15:17 +00:00
parent e9a428ab1c
commit 1cee912b72

View file

@ -94,9 +94,10 @@ pub async fn ensure_llama_servers_running(
embedding_model embedding_model
}; };
// For llama-server startup, we need the full path // For llama-server startup, use path relative to botserver root
let llm_model_path = format!("{}/../../../../data/llm/{}", llm_server_path, llm_model); // The models are in ./data/llm/ and the llama-server runs from botserver root
let embedding_model_path = format!("{}/../../../../data/llm/{}", llm_server_path, embedding_model); let llm_model_path = format!("./data/llm/{}", llm_model);
let embedding_model_path = format!("./data/llm/{}", embedding_model);
if !llm_server_enabled { if !llm_server_enabled {
info!("Local LLM server management disabled (llm-server=false). Using external endpoints."); info!("Local LLM server management disabled (llm-server=false). Using external endpoints.");
info!(" LLM URL: {llm_url}"); info!(" LLM URL: {llm_url}");
@ -440,10 +441,10 @@ pub fn start_llm_server(
})?; })?;
} else { } else {
let cmd_arg = format!( let cmd_arg = format!(
"cd {llama_cpp_path} && ./llama-server {args} --verbose >llm-stdout.log 2>&1 &" "{llama_cpp_path}/llama-server {args} --verbose >{llama_cpp_path}/llm-stdout.log 2>&1 &"
); );
info!( info!(
"Executing LLM server command: cd {llama_cpp_path} && ./llama-server {args} --verbose" "Executing LLM server command: {llama_cpp_path}/llama-server {args} --verbose"
); );
let cmd = SafeCommand::new("sh") let cmd = SafeCommand::new("sh")
.and_then(|c| c.arg("-c")) .and_then(|c| c.arg("-c"))
@ -468,9 +469,13 @@ pub async fn start_embedding_server(
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> { ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let port = extract_port(&url); let port = extract_port(&url);
let full_model_path = if model_path.starts_with('/') { // model_path is already the full path (constructed with ../../../../data/llm/ prefix)
// Only prepend llama_cpp_path if model_path is a simple filename (not a path)
let full_model_path = if model_path.contains('/') || model_path.contains('.') {
// model_path is already a full or relative path, use as-is
model_path.clone() model_path.clone()
} else { } else {
// model_path is just a filename, prepend llama_cpp_path
format!("{llama_cpp_path}/{model_path}") format!("{llama_cpp_path}/{model_path}")
}; };
@ -500,10 +505,10 @@ pub async fn start_embedding_server(
})?; })?;
} else { } else {
let cmd_arg = format!( let cmd_arg = format!(
"cd {llama_cpp_path} && ./llama-server -m {model_path} --verbose --host 0.0.0.0 --port {port} --embedding --n-gpu-layers 99 --ubatch-size 2048 >llmembd-stdout.log 2>&1 &" "{llama_cpp_path}/llama-server -m {model_path} --verbose --host 0.0.0.0 --port {port} --embedding --n-gpu-layers 99 --ubatch-size 2048 >{llama_cpp_path}/llmembd-stdout.log 2>&1 &"
); );
info!( info!(
"Executing embedding server command: cd {llama_cpp_path} && ./llama-server -m {model_path} --host 0.0.0.0 --port {port} --embedding" "Executing embedding server command: {llama_cpp_path}/llama-server -m {model_path} --host 0.0.0.0 --port {port} --embedding"
); );
let cmd = SafeCommand::new("sh") let cmd = SafeCommand::new("sh")
.and_then(|c| c.arg("-c")) .and_then(|c| c.arg("-c"))