From adda3bcea015eb9e7fc0b935ad46d558d17ede22 Mon Sep 17 00:00:00 2001 From: "Rodrigo Rodriguez (Pragmatismo)" Date: Mon, 3 Nov 2025 13:12:05 -0300 Subject: [PATCH] feat(llm): add model selection and response processing Added support for selecting an LLM model based on configuration and processing the raw response. The execute_llm_generation function now: 1. Fetches the configured model using ConfigManager 2. Gets the appropriate handler for the model 3. Processes the raw response through the handler before returning This provides more flexibility in model selection and allows for model-specific response handling. --- src/basic/keywords/llm_keyword.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/basic/keywords/llm_keyword.rs b/src/basic/keywords/llm_keyword.rs index 20fef284c..79392bcd7 100644 --- a/src/basic/keywords/llm_keyword.rs +++ b/src/basic/keywords/llm_keyword.rs @@ -2,6 +2,7 @@ use crate::shared::models::UserSession; use crate::shared::state::AppState; use log::{error, info}; use rhai::{Dynamic, Engine}; +use uuid::Uuid; use std::sync::Arc; use std::time::Duration; @@ -80,10 +81,16 @@ pub async fn execute_llm_generation( state: Arc, prompt: String, ) -> Result> { + let config_manager = crate::config::ConfigManager::new(Arc::clone(&state.conn)); + let model = config_manager + .get_config(&Uuid::nil(), "llm-model", None) + .unwrap_or_default(); - state + let handler = crate::llm_models::get_handler(&model); + let raw_response = state .llm_provider .generate(&prompt, &serde_json::Value::Null) - .await - .map_err(|e| format!("LLM call failed: {}", e).into()) + .await?; + + Ok(handler.process_content(&raw_response)) }