botserver/src/basic/keywords/llm_keyword.rs

39 lines
1.2 KiB
Rust
Raw Normal View History

2025-10-11 12:29:03 -03:00
use crate::shared::models::UserSession;
2025-10-11 20:02:14 -03:00
use crate::shared::state::AppState;
use log::info;
2025-10-06 10:30:17 -03:00
use rhai::{Dynamic, Engine};
2025-10-11 20:02:14 -03:00
pub fn llm_keyword(state: &AppState, _user: UserSession, engine: &mut Engine) {
let state_clone = state.clone();
2025-10-06 10:30:17 -03:00
engine
2025-10-11 20:02:14 -03:00
.register_custom_syntax(&["LLM", "$expr$"], false, move |context, inputs| {
let text = context.eval_expression_tree(&inputs[0])?.to_string();
2025-10-06 10:30:17 -03:00
info!("LLM processing text: {}", text);
2025-10-06 10:30:17 -03:00
let state_inner = state_clone.clone();
let fut = execute_llm_generation(state_inner, text);
2025-10-11 20:02:14 -03:00
let result =
tokio::task::block_in_place(|| tokio::runtime::Handle::current().block_on(fut))
.map_err(|e| format!("LLM generation failed: {}", e))?;
2025-10-06 10:30:17 -03:00
2025-10-11 20:02:14 -03:00
Ok(Dynamic::from(result))
})
2025-10-06 10:30:17 -03:00
.unwrap();
}
pub async fn execute_llm_generation(
state: AppState,
prompt: String,
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
info!("Starting LLM generation for prompt: '{}'", prompt);
state
.llm_provider
.generate(&prompt, &serde_json::Value::Null)
.await
.map_err(|e| format!("LLM call failed: {}", e).into())
}