From cc741b378e532f90d3e61d7bc35a0248c5e4608f Mon Sep 17 00:00:00 2001 From: "Rodrigo Rodriguez (Pragmatismo)" Date: Wed, 5 Nov 2025 14:15:12 -0300 Subject: [PATCH] feat: simplify LLM prompt and add debug logging - Simplified build_llm_prompt by removing redundant formatting - Added info logging for LLM model and processed content - Updated README with development philosophy note - Adjusted announcement schedule timing from 55 to 59 minutes past the hour --- prompts/dev/platform/README.md | 1 + src/basic/keywords/llm_keyword.rs | 10 +++++----- .../announcements.gbdialog/update-summary.bas | 2 +- test_llm.rhai | 3 +++ 4 files changed, 10 insertions(+), 6 deletions(-) create mode 100644 test_llm.rhai diff --git a/prompts/dev/platform/README.md b/prompts/dev/platform/README.md index 66284cb9..09e652e7 100644 --- a/prompts/dev/platform/README.md +++ b/prompts/dev/platform/README.md @@ -14,3 +14,4 @@ When initial attempts fail, sequentially try these LLMs: - **On error**: Stop and consult Claude for guidance - **Change progression**: Start with DeepSeek, conclude with gpt-oss-120b - **Final validation**: Use prompt "cargo check" with gpt-oss-120b +- Be humble, one requirement, one commit. But sometimes, freedom of caos is welcome - when no deadlines are set. \ No newline at end of file diff --git a/src/basic/keywords/llm_keyword.rs b/src/basic/keywords/llm_keyword.rs index 1c8e0c45..cc6670a8 100644 --- a/src/basic/keywords/llm_keyword.rs +++ b/src/basic/keywords/llm_keyword.rs @@ -65,10 +65,7 @@ pub fn llm_keyword(state: Arc, _user: UserSession, engine: &mut Engine /// Builds a consistent LLM prompt used by all Rhai scripts. /// You can change the style/structure here to guide the model's behavior. fn build_llm_prompt(user_text: &str) -> String { - format!( - "User: {}", - user_text.trim() - ) + user_text.trim().to_string() } /// Runs the async LLM provider call safely. @@ -81,11 +78,14 @@ pub async fn execute_llm_generation( .get_config(&Uuid::nil(), "llm-model", None) .unwrap_or_default(); + info!("Using LLM model: {}", model); let handler = crate::llm_models::get_handler(&model); let raw_response = state .llm_provider .generate(&prompt, &serde_json::Value::Null) .await?; - Ok(handler.process_content(&raw_response)) + let processed = handler.process_content(&raw_response); + info!("Processed content: {}", processed); + Ok(processed) } diff --git a/templates/announcements.gbai/announcements.gbdialog/update-summary.bas b/templates/announcements.gbai/announcements.gbdialog/update-summary.bas index e5bb4d55..16360829 100644 --- a/templates/announcements.gbai/announcements.gbdialog/update-summary.bas +++ b/templates/announcements.gbai/announcements.gbdialog/update-summary.bas @@ -1,4 +1,4 @@ -SET_SCHEDULE "55 * * * *" +SET_SCHEDULE "59 * * * *" let text = GET "announcements.gbkb/news/news.pdf" let resume = LLM "In a few words, resume this: " + text diff --git a/test_llm.rhai b/test_llm.rhai new file mode 100644 index 00000000..eedbcc6b --- /dev/null +++ b/test_llm.rhai @@ -0,0 +1,3 @@ +// Simple test script for LLM keyword +let result = LLM "Hello world"; +result