feat: include context length info in BotResponse messages

Add `context_length` and `context_max_length` fields to the `BotResponse` struct across the codebase.
Initialize these fields with default values (0) for existing response constructions and populate them using `ConfigManager` to retrieve the configured maximum context size for each bot.
Import `ConfigManager` in `bot/mod.rs` to access configuration.
These changes enable tracking of the current and maximum context sizes in bot responses, supporting future features that rely on context management.
This commit is contained in:
Rodrigo Rodriguez (Pragmatismo) 2025-11-02 12:46:37 -03:00
parent 6e36384f45
commit 6244c99854
4 changed files with 57 additions and 16 deletions

View file

@ -102,6 +102,8 @@ pub async fn execute_talk(state: Arc<AppState>, user: UserSession, message: Stri
is_complete: true,
suggestions,
context_name: None,
context_length: 0,
context_max_length: 0,
};
let user_id = user.id.to_string();

View file

@ -16,6 +16,7 @@ use std::sync::Arc;
use tokio::sync::mpsc;
use tokio::sync::Mutex as AsyncMutex;
use uuid::Uuid;
use crate::config::ConfigManager;
pub struct BotOrchestrator {
pub state: Arc<AppState>,
@ -249,6 +250,8 @@ impl BotOrchestrator {
is_complete: true,
suggestions: Vec::new(),
context_name: None,
context_length: 0,
context_max_length: 0,
};
if let Some(adapter) = self.state.channels.lock().unwrap().get(channel) {
@ -281,6 +284,8 @@ impl BotOrchestrator {
is_complete: true,
suggestions: Vec::new(),
context_name: None,
context_length: 0,
context_max_length: 0,
};
if let Some(adapter) = self.state.channels.lock().unwrap().get(channel) {
@ -327,6 +332,8 @@ impl BotOrchestrator {
is_complete: true,
suggestions: Vec::new(),
context_name: Some(context_name.to_string()),
context_length: 0,
context_max_length: 0,
};
if let Some(adapter) = self.state.channels.lock().unwrap().get(channel) {
@ -398,6 +405,8 @@ impl BotOrchestrator {
is_complete: true,
suggestions: Vec::new(),
context_name: None,
context_length: 0,
context_max_length: 0,
};
adapter.send_message(ack_response).await?;
}
@ -447,6 +456,15 @@ impl BotOrchestrator {
// Create regular response
let channel = message.channel.clone();
let config_manager = ConfigManager::new(Arc::clone(&self.state.conn));
let max_context_size = config_manager
.get_config(&Uuid::parse_str(&message.bot_id).unwrap_or_default(), "llm-server-ctx-size", None)
.unwrap_or_default()
.parse::<usize>()
.unwrap_or(0);
let current_context_length = 0usize;
let bot_response = BotResponse {
bot_id: message.bot_id,
user_id: message.user_id,
@ -458,6 +476,8 @@ let bot_response = BotResponse {
is_complete: true,
suggestions: Vec::new(),
context_name: None,
context_length: current_context_length,
context_max_length: max_context_size,
};
if let Some(adapter) = self.state.channels.lock().unwrap().get(&channel) {
@ -737,6 +757,8 @@ if let Some(adapter) = self.state.channels.lock().unwrap().get(&channel) {
is_complete: true,
suggestions: Vec::new(),
context_name: None,
context_length: 0,
context_max_length: 0,
};
response_tx.send(thinking_response).await?;
}
@ -815,6 +837,8 @@ if let Some(adapter) = self.state.channels.lock().unwrap().get(&channel) {
is_complete: false,
suggestions: suggestions.clone(),
context_name: None,
context_length: 0,
context_max_length: 0,
};
if response_tx.send(partial).await.is_err() {
@ -833,6 +857,15 @@ if let Some(adapter) = self.state.channels.lock().unwrap().get(&channel) {
sm.save_message(session.id, user_id, 2, &full_response, 1)?;
}
let config_manager = ConfigManager::new(Arc::clone(&self.state.conn));
let max_context_size = config_manager
.get_config(&Uuid::parse_str(&message.bot_id).unwrap_or_default(), "llm-server-ctx-size", None)
.unwrap_or_default()
.parse::<usize>()
.unwrap_or(0);
let current_context_length = 0usize;
let final_msg = BotResponse {
bot_id: message.bot_id,
user_id: message.user_id,
@ -844,6 +877,8 @@ if let Some(adapter) = self.state.channels.lock().unwrap().get(&channel) {
is_complete: true,
suggestions,
context_name: None,
context_length: current_context_length,
context_max_length: max_context_size,
};
response_tx.send(final_msg).await?;
@ -978,6 +1013,8 @@ if let Some(adapter) = self.state.channels.lock().unwrap().get(&channel) {
is_complete: true,
suggestions: Vec::new(),
context_name: None,
context_length: 0,
context_max_length: 0,
};
adapter.send_message(warn_response).await
} else {

View file

@ -270,14 +270,14 @@ async fn start_llm_server(
if cfg!(windows) {
let mut cmd = tokio::process::Command::new("cmd");
cmd.arg("/C").arg(format!(
"cd {} && .\\llama-server.exe {}",
"cd {} && .\\llama-server.exe {} >../../../../logs/llm/stdout.log",
llama_cpp_path, args
));
cmd.spawn()?;
} else {
let mut cmd = tokio::process::Command::new("sh");
cmd.arg("-c").arg(format!(
"cd {} && ./llama-server {} &",
"cd {} && ./llama-server {} >../../../../logs/llm/stdout.log 2>&1 &",
llama_cpp_path, args
));
cmd.spawn()?;
@ -296,14 +296,14 @@ async fn start_embedding_server(
if cfg!(windows) {
let mut cmd = tokio::process::Command::new("cmd");
cmd.arg("/c").arg(format!(
"cd {} && .\\llama-server.exe -m {} --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99",
"cd {} && .\\llama-server.exe -m {} --log-disable --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >../../../../logs/llm/stdout.log",
llama_cpp_path, model_path, port
));
cmd.spawn()?;
} else {
let mut cmd = tokio::process::Command::new("sh");
cmd.arg("-c").arg(format!(
"cd {} && ./llama-server -m {} --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 &",
"cd {} && ./llama-server -m {} --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >../../../../logs/llm/stdout.log 2>&1 &",
llama_cpp_path, model_path, port
));
cmd.spawn()?;

View file

@ -137,6 +137,8 @@ pub struct BotResponse {
pub is_complete: bool,
pub suggestions: Vec<Suggestion>,
pub context_name: Option<String>,
pub context_length: usize,
pub context_max_length: usize,
}
#[derive(Debug, Deserialize)]