fix(llm): Respect llm-server config flag to skip local server startup

When llm-server=false in bot_configuration, the code now skips
attempting to start local llama-server processes. This prevents
the 60-attempt timeout error when using external LLM endpoints
or when local LLM serving is intentionally disabled.
This commit is contained in:
Rodrigo Rodriguez (Pragmatismo) 2025-12-08 23:39:01 -03:00
parent f3e38d8d8b
commit 21855fab99

View file

@ -23,6 +23,9 @@ pub async fn ensure_llama_servers_running(
let config_manager = ConfigManager::new(app_state.conn.clone());
(
default_bot_id,
config_manager
.get_config(&default_bot_id, "llm-server", None)
.unwrap_or_else(|_| "false".to_string()),
config_manager
.get_config(&default_bot_id, "llm-url", None)
.unwrap_or_default(),
@ -40,8 +43,24 @@ pub async fn ensure_llama_servers_running(
.unwrap_or_default(),
)
};
let (_default_bot_id, llm_url, llm_model, embedding_url, embedding_model, llm_server_path) =
config_values;
let (
_default_bot_id,
llm_server_enabled,
llm_url,
llm_model,
embedding_url,
embedding_model,
llm_server_path,
) = config_values;
// Check if local LLM server management is enabled
let llm_server_enabled = llm_server_enabled.to_lowercase() == "true";
if !llm_server_enabled {
info!("Local LLM server management disabled (llm-server=false). Using external endpoints.");
info!(" LLM URL: {}", llm_url);
info!(" Embedding URL: {}", embedding_url);
return Ok(());
}
info!("Starting LLM servers...");
info!("Configuration:");
info!(" LLM URL: {}", llm_url);