diff --git a/src/automation/mod.rs b/src/automation/mod.rs index 11a2aa4f..d452ec91 100644 --- a/src/automation/mod.rs +++ b/src/automation/mod.rs @@ -245,15 +245,26 @@ impl AutomationService { let day = dt.day() as i32; let month = dt.month() as i32; let weekday = dt.weekday().num_days_from_monday() as i32; - let match_result = [minute, hour, day, month, weekday] - .iter() - .enumerate() - .all(|(i, &val)| Self::cron_part_matches(parts[i], val)); + + // More strict matching with additional logging + let minute_match = Self::cron_part_matches(parts[0], minute); + let hour_match = Self::cron_part_matches(parts[1], hour); + let day_match = Self::cron_part_matches(parts[2], day); + let month_match = Self::cron_part_matches(parts[3], month); + let weekday_match = Self::cron_part_matches(parts[4], weekday); + + let match_result = minute_match && hour_match && day_match && month_match && weekday_match; + trace!( - "Cron pattern='{}' result={} at {}", + "Cron pattern='{}' result={} at {} (minute={}, hour={}, day={}, month={}, weekday={})", pattern, match_result, - dt + dt, + minute_match, + hour_match, + day_match, + month_match, + weekday_match ); match_result } @@ -288,30 +299,42 @@ impl AutomationService { match redis_client.get_multiplexed_async_connection().await { Ok(mut conn) => { trace!("Connected to Redis; checking if job '{}' is running", param); - let is_running: Result = redis::cmd("EXISTS") + + // Use SET with NX (only set if not exists) and EX (expire) for atomic operation + let set_result: Result = redis::cmd("SET") .arg(&redis_key) - .query_async(&mut conn) - .await; - - if let Ok(true) = is_running { - warn!( - "Job '{}' is already running for bot '{}'; skipping execution", - param, bot_id - ); - } - - let _: Result<(), redis::RedisError> = redis::cmd("SETEX") - .arg(&redis_key) - .arg(300) .arg("1") + .arg("NX") + .arg("EX") + .arg(300) .query_async(&mut conn) .await; - trace!("Job '{}' marked as running in Redis", param); + + match set_result { + Ok(res) if res == "OK" => { + trace!("Acquired lock for job '{}'", param); + } + Ok(_) => { + warn!( + "Job '{}' is already running for bot '{}'; skipping execution", + param, bot_id + ); + return Ok(()); + } + Err(e) => { + warn!("Redis error checking job status for '{}': {}", param, e); + return Ok(()); // Skip execution if we can't verify lock status + } + } } Err(e) => { warn!("Failed to connect to Redis for job tracking: {}", e); + return Ok(()); // Skip execution if we can't connect to Redis } } + } else { + warn!("Redis client not available for job tracking"); + return Ok(()); // Skip execution if Redis isn't configured } let bot_name: String = { diff --git a/src/llm/local.rs b/src/llm/local.rs index 28c108fd..b06ff45a 100644 --- a/src/llm/local.rs +++ b/src/llm/local.rs @@ -226,14 +226,14 @@ pub async fn start_llm_server( if cfg!(windows) { let mut cmd = tokio::process::Command::new("cmd"); cmd.arg("/C").arg(format!( - "cd {} && .\\llama-server.exe {} >../../../../logs/llm/stdout.log", + "cd {} && .\\llama-server.exe {} --verbose>../../../../logs/llm/stdout.log", llama_cpp_path, args )); cmd.spawn()?; } else { let mut cmd = tokio::process::Command::new("sh"); cmd.arg("-c").arg(format!( - "cd {} && ./llama-server {} >../../../../logs/llm/stdout.log 2>&1 &", + "cd {} && ./llama-server {} --verbose >../../../../logs/llm/stdout.log 2>&1 &", llama_cpp_path, args )); cmd.spawn()?; @@ -252,14 +252,14 @@ pub async fn start_embedding_server( if cfg!(windows) { let mut cmd = tokio::process::Command::new("cmd"); cmd.arg("/c").arg(format!( - "cd {} && .\\llama-server.exe -m {} --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >../../../../logs/llm/stdout.log", + "cd {} && .\\llama-server.exe -m {} --verbose --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >../../../../logs/llm/stdout.log", llama_cpp_path, model_path, port )); cmd.spawn()?; } else { let mut cmd = tokio::process::Command::new("sh"); cmd.arg("-c").arg(format!( - "cd {} && ./llama-server -m {} --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >../../../../logs/llm/stdout.log 2>&1 &", + "cd {} && ./llama-server -m {} --verbose --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >../../../../logs/llm/stdout.log 2>&1 &", llama_cpp_path, model_path, port )); cmd.spawn()?; diff --git a/templates/announcements.gbai/announcements.gbdialog/update-summary.bas b/templates/announcements.gbai/announcements.gbdialog/update-summary.bas index d4b2a89d..4ba2541e 100644 --- a/templates/announcements.gbai/announcements.gbdialog/update-summary.bas +++ b/templates/announcements.gbai/announcements.gbdialog/update-summary.bas @@ -1,7 +1,7 @@ SET_SCHEDULE "37 * * * *" let text = GET "announcements.gbkb/news/news.pdf" -let resume = LLM "In a short phrase, resume this: " + text +let resume = LLM "In a few words, resume this: " + text SET_BOT_MEMORY "resume", resume diff --git a/templates/default.gbai/default.gbot/config.csv b/templates/default.gbai/default.gbot/config.csv index deb99347..15752e54 100644 --- a/templates/default.gbai/default.gbot/config.csv +++ b/templates/default.gbai/default.gbot/config.csv @@ -17,8 +17,8 @@ llm-server-host,0.0.0.0 llm-server-port,8081 llm-server-gpu-layers,0 llm-server-n-moe,0 -llm-server-ctx-size,2048 -llm-server-parallel,2 +llm-server-ctx-size,512 +llm-server-parallel,6 llm-server-cont-batching,true llm-server-mlock,false llm-server-no-mmap,false