use crate::shared::models::UserSession; use crate::shared::state::AppState; use log::{error, info}; use rhai::{Dynamic, Engine}; use uuid::Uuid; use std::sync::Arc; use std::time::Duration; pub fn llm_keyword(state: Arc, _user: UserSession, engine: &mut Engine) { let state_clone = Arc::clone(&state); engine .register_custom_syntax(&["LLM", "$expr$"], false, move |context, inputs| { let text = context.eval_expression_tree(&inputs[0])?.to_string(); info!("LLM keyword processing text: {}", text); let state_for_thread = Arc::clone(&state_clone); let prompt = build_llm_prompt(&text); // ---- safe runtime isolation: no deadlocks possible ---- let (tx, rx) = std::sync::mpsc::channel(); std::thread::spawn(move || { let rt = tokio::runtime::Builder::new_multi_thread() .worker_threads(2) .enable_all() .build(); let send_err = if let Ok(rt) = rt { let result = rt.block_on(async move { execute_llm_generation(state_for_thread, prompt).await }); tx.send(result).err() } else { tx.send(Err("failed to build tokio runtime".into())).err() }; if send_err.is_some() { error!("Failed to send LLM thread result"); } }); match rx.recv_timeout(Duration::from_secs(500)) { Ok(Ok(result)) => Ok(Dynamic::from(result)), Ok(Err(e)) => Err(Box::new(rhai::EvalAltResult::ErrorRuntime( e.to_string().into(), rhai::Position::NONE, ))), Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { Err(Box::new(rhai::EvalAltResult::ErrorRuntime( "LLM generation timed out".into(), rhai::Position::NONE, ))) } Err(e) => Err(Box::new(rhai::EvalAltResult::ErrorRuntime( format!("LLM thread failed: {e}").into(), rhai::Position::NONE, ))), } }) .unwrap(); } /// Builds a consistent LLM prompt used by all Rhai scripts. /// You can change the style/structure here to guide the model's behavior. fn build_llm_prompt(user_text: &str) -> String { user_text.trim().to_string() } /// Runs the async LLM provider call safely. pub async fn execute_llm_generation( state: Arc, prompt: String, ) -> Result> { let config_manager = crate::config::ConfigManager::new(Arc::clone(&state.conn)); let model = config_manager .get_config(&Uuid::nil(), "llm-model", None) .unwrap_or_default(); info!("Using LLM model: {}", model); let handler = crate::llm_models::get_handler(&model); let raw_response = state .llm_provider .generate(&prompt, &serde_json::Value::Null) .await?; let processed = handler.process_content(&raw_response); info!("Processed content: {}", processed); Ok(processed) }