feat: add trace logging, refactor bot streaming, add config fallback

- Added `trace!` logging in `bot_memory.rs` to record retrieved memory values for easier debugging.
- Refactored `BotOrchestrator` in `bot/mod.rs`:
  - Removed duplicate session save block and consolidated message persistence.
  - Replaced low‑level LLM streaming with a structured `UserMessage` and `stream_response` workflow, improving error handling and readability.
- Updated configuration loading in `config/mod.rs`:
  - Imported `get_default_bot` and enhanced `get_config` to fall back to the default bot configuration when the primary query fails.
  - Established a fresh DB connection for the fallback path to avoid borrowing issues.
This commit is contained in:
Rodrigo Rodriguez (Pragmatismo) 2025-11-03 10:13:39 -03:00
parent b3415a3db4
commit b5e1501454
10 changed files with 138 additions and 530 deletions

View file

@ -1,7 +1,7 @@
use crate::shared::models::UserSession;
use crate::shared::state::AppState;
use diesel::prelude::*;
use log::{error, info};
use log::{error, info, trace};
use rhai::{Dynamic, Engine};
use std::sync::Arc;
use uuid::Uuid;
@ -133,6 +133,7 @@ pub fn get_bot_memory_keyword(state: Arc<AppState>, user: UserSession, engine: &
.optional()
.unwrap_or(None);
trace!("GET_MEMORY for key '{}' returned value: {:?}", key_param, memory_value);
memory_value.unwrap_or_default()
} else {
String::new()

View file

@ -455,16 +455,6 @@ impl BotOrchestrator {
return Ok(());
}
{
let mut session_manager = self.state.session_manager.lock().await;
session_manager.save_message(
session.id,
user_id,
1,
&message.content,
message.message_type,
)?;
}
// Handle context change messages (type 4) immediately
@ -489,6 +479,13 @@ impl BotOrchestrator {
{
let mut session_manager = self.state.session_manager.lock().await;
session_manager.save_message(
session.id,
user_id,
1,
&message.content,
message.message_type,
)?;
session_manager.save_message(session.id, user_id, 2, &response_content, 1)?;
}
@ -570,19 +567,26 @@ impl BotOrchestrator {
prompt.push_str(&format!("User: {}\nAssistant:", message.content));
let (tx, mut rx) = mpsc::channel::<String>(100); let llm = self.state.llm_provider.clone();
tokio::spawn(async move {
if let Err(e) = llm
.generate_stream(&prompt, &serde_json::Value::Null, tx)
.await
{
error!("LLM streaming error in direct_mode_handler: {}", e);
}
});
let user_message = UserMessage {
bot_id: "default".to_string(),
user_id: session.user_id.to_string(),
session_id: session.id.to_string(),
channel: "web".to_string(),
content: message.content.clone(),
message_type: 1,
media_url: None,
timestamp: Utc::now(),
context_name: None,
};
let (response_tx, mut response_rx) = mpsc::channel::<BotResponse>(100);
if let Err(e) = self.stream_response(user_message, response_tx).await {
error!("Failed to stream response in direct_mode_handler: {}", e);
}
let mut full_response = String::new();
while let Some(chunk) = rx.recv().await {
full_response.push_str(&chunk);
while let Some(response) = response_rx.recv().await {
full_response.push_str(&response.content);
}
Ok(full_response)

View file

@ -121,6 +121,7 @@ impl AppConfig {
info!("Loading configuration from database");
use crate::shared::models::schema::bot_configuration::dsl::*;
use crate::bot::get_default_bot;
use diesel::prelude::*;
let config_map: HashMap<String, ServerConfigRow> = bot_configuration
@ -351,12 +352,28 @@ impl AppConfig {
value: String,
}
// First attempt: use the current context (existing query)
let result = diesel::sql_query("SELECT get_config($1, $2) as value")
.bind::<Text, _>(key)
.bind::<Text, _>(fallback_str)
.get_result::<ConfigValue>(conn)
.map(|row| row.value)?;
Ok(result)
.map(|row| row.value);
match result {
Ok(v) => Ok(v),
Err(_) => {
// Fallback to default bot
let (default_bot_id, _default_bot_name) = crate::bot::get_default_bot(conn);
// Use a fresh connection for ConfigManager to avoid borrowing issues
let fresh_conn = establish_pg_connection()
.map_err(|e| diesel::result::Error::DatabaseError(
diesel::result::DatabaseErrorKind::UnableToSendCommand,
Box::new(e.to_string())
))?;
let manager = ConfigManager::new(Arc::new(Mutex::new(fresh_conn)));
manager.get_config(&default_bot_id, key, fallback)
}
}
}
}
@ -424,18 +441,33 @@ impl ConfigManager {
fallback: Option<&str>,
) -> Result<String, diesel::result::Error> {
use crate::shared::models::schema::bot_configuration::dsl::*;
use crate::bot::get_default_bot;
let mut conn = self.conn.lock().unwrap();
let fallback_str = fallback.unwrap_or("");
// Try config for provided bot_id
let result = bot_configuration
.filter(bot_id.eq(code_bot_id))
.filter(config_key.eq(key))
.select(config_value)
.first::<String>(&mut *conn)
.unwrap_or(fallback_str.to_string());
.first::<String>(&mut *conn);
Ok(result)
let value = match result {
Ok(v) => v,
Err(_) => {
// Fallback to default bot
let (default_bot_id, _default_bot_name) = crate::bot::get_default_bot(&mut *conn);
bot_configuration
.filter(bot_id.eq(default_bot_id))
.filter(config_key.eq(key))
.select(config_value)
.first::<String>(&mut *conn)
.unwrap_or(fallback_str.to_string())
}
};
Ok(value)
}
pub fn sync_gbot_config(

View file

@ -1,472 +0,0 @@
use crate::basic::keywords::add_tool::get_session_tools;
use crate::kb::embeddings::search_similar;
use crate::shared::models::UserSession;
use crate::shared::state::AppState;
use log::{debug, error, info};
use serde::{Deserialize, Serialize};
use std::error::Error;
use std::sync::Arc;
/// Answer modes for the bot
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
pub enum AnswerMode {
Direct = 0, // Direct LLM response
WithTools = 1, // LLM with tool calling
DocumentsOnly = 2, // Search KB documents only, no LLM
WebSearch = 3, // Include web search results
Mixed = 4, // Use tools stack from ADD_TOOL and KB from session
}
impl AnswerMode {
pub fn from_i32(value: i32) -> Self {
match value {
0 => Self::Direct,
1 => Self::WithTools,
2 => Self::DocumentsOnly,
3 => Self::WebSearch,
4 => Self::Mixed,
_ => Self::Direct,
}
}
}
/// Context from KB documents
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DocumentContext {
pub source: String,
pub content: String,
pub score: f32,
pub collection_name: String,
}
/// Context from tools
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ToolContext {
pub tool_name: String,
pub description: String,
pub endpoint: String,
}
/// Enhanced prompt with context
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EnhancedPrompt {
pub original_query: String,
pub system_prompt: String,
pub user_prompt: String,
pub document_contexts: Vec<DocumentContext>,
pub available_tools: Vec<ToolContext>,
pub answer_mode: AnswerMode,
}
/// Prompt processor that enhances queries with KB and tool context
pub struct PromptProcessor {
state: Arc<AppState>,
}
impl PromptProcessor {
pub fn new(state: Arc<AppState>) -> Self {
Self { state }
}
/// Process a user query and enhance it with context
pub async fn process_query(
&self,
session: &UserSession,
query: &str,
) -> Result<EnhancedPrompt, Box<dyn Error + Send + Sync>> {
let answer_mode = AnswerMode::from_i32(session.answer_mode);
info!(
"Processing query in {:?} mode: {}",
answer_mode,
query.chars().take(50).collect::<String>()
);
match answer_mode {
AnswerMode::Direct => self.process_direct(query).await,
AnswerMode::WithTools => self.process_with_tools(session, query).await,
AnswerMode::DocumentsOnly => self.process_documents_only(session, query).await,
AnswerMode::WebSearch => self.process_web_search(session, query).await,
AnswerMode::Mixed => self.process_mixed(session, query).await,
}
}
/// Direct mode: no additional context
async fn process_direct(
&self,
query: &str,
) -> Result<EnhancedPrompt, Box<dyn Error + Send + Sync>> {
Ok(EnhancedPrompt {
original_query: query.to_string(),
system_prompt: "You are a helpful AI assistant.".to_string(),
user_prompt: query.to_string(),
document_contexts: Vec::new(),
available_tools: Vec::new(),
answer_mode: AnswerMode::Direct,
})
}
/// With tools mode: include available tools
async fn process_with_tools(
&self,
session: &UserSession,
query: &str,
) -> Result<EnhancedPrompt, Box<dyn Error + Send + Sync>> {
let tools = self.get_available_tools(session).await?;
let system_prompt = if tools.is_empty() {
"You are a helpful AI assistant.".to_string()
} else {
format!(
"You are a helpful AI assistant with access to the following tools:\n{}",
self.format_tools_for_prompt(&tools)
)
};
Ok(EnhancedPrompt {
original_query: query.to_string(),
system_prompt,
user_prompt: query.to_string(),
document_contexts: Vec::new(),
available_tools: tools,
answer_mode: AnswerMode::WithTools,
})
}
/// Documents only mode: search KB and use documents to answer
async fn process_documents_only(
&self,
session: &UserSession,
query: &str,
) -> Result<EnhancedPrompt, Box<dyn Error + Send + Sync>> {
let documents = self.search_kb_documents(session, query, 5).await?;
let system_prompt = "You are a helpful AI assistant. Answer the user's question based ONLY on the provided documents. If the documents don't contain relevant information, say so.".to_string();
let user_prompt = if documents.is_empty() {
format!("Question: {}\n\nNo relevant documents found.", query)
} else {
format!(
"Question: {}\n\nRelevant documents:\n{}",
query,
self.format_documents_for_prompt(&documents)
)
};
Ok(EnhancedPrompt {
original_query: query.to_string(),
system_prompt,
user_prompt,
document_contexts: documents,
available_tools: Vec::new(),
answer_mode: AnswerMode::DocumentsOnly,
})
}
/// Web search mode: include web search results
async fn process_web_search(
&self,
_session: &UserSession,
query: &str,
) -> Result<EnhancedPrompt, Box<dyn Error + Send + Sync>> {
// TODO: Implement web search integration
debug!("Web search mode not fully implemented yet");
self.process_direct(query).await
}
/// Mixed mode: combine KB documents and tools
async fn process_mixed(
&self,
session: &UserSession,
query: &str,
) -> Result<EnhancedPrompt, Box<dyn Error + Send + Sync>> {
// Get both documents and tools
let documents = self.search_kb_documents(session, query, 3).await?;
let tools = self.get_available_tools(session).await?;
let mut system_parts = vec!["You are a helpful AI assistant.".to_string()];
if !documents.is_empty() {
system_parts.push(
"Use the provided documents as knowledge base to answer questions.".to_string(),
);
}
if !tools.is_empty() {
system_parts.push(format!(
"You have access to the following tools:\n{}",
self.format_tools_for_prompt(&tools)
));
}
let system_prompt = system_parts.join("\n\n");
let user_prompt = if documents.is_empty() {
query.to_string()
} else {
format!(
"Context from knowledge base:\n{}\n\nQuestion: {}",
self.format_documents_for_prompt(&documents),
query
)
};
Ok(EnhancedPrompt {
original_query: query.to_string(),
system_prompt,
user_prompt,
document_contexts: documents,
available_tools: tools,
answer_mode: AnswerMode::Mixed,
})
}
/// Search KB documents for a query
async fn search_kb_documents(
&self,
session: &UserSession,
query: &str,
limit: usize,
) -> Result<Vec<DocumentContext>, Box<dyn Error + Send + Sync>> {
// Get active KB collections from session context
let collections = self.get_active_collections(session).await?;
if collections.is_empty() {
debug!("No active KB collections for session");
return Ok(Vec::new());
}
let mut all_results = Vec::new();
// Search in each collection
for collection_name in collections {
debug!("Searching in collection: {}", collection_name);
match search_similar(&self.state, &collection_name, query, limit).await {
Ok(results) => {
for result in results {
all_results.push(DocumentContext {
source: result.file_path,
content: result.chunk_text,
score: result.score,
collection_name: collection_name.clone(),
});
}
}
Err(e) => {
error!("Failed to search collection {}: {}", collection_name, e);
}
}
}
// Sort by score and limit
all_results.sort_by(|a, b| b.score.partial_cmp(&a.score).unwrap());
all_results.truncate(limit);
info!("Found {} relevant documents", all_results.len());
Ok(all_results)
}
/// Get active KB collections from session context
async fn get_active_collections(
&self,
session: &UserSession,
) -> Result<Vec<String>, Box<dyn Error + Send + Sync>> {
let mut collections = Vec::new();
// Check for active_kb_collection in context_data
if let Some(active_kb) = session.context_data.get("active_kb_collection") {
if let Some(name) = active_kb.as_str() {
let collection_name = format!("kb_{}_{}", session.bot_id, name);
collections.push(collection_name);
}
}
// Check for temporary website collections
if let Some(temp_website) = session.context_data.get("temporary_website_collection") {
if let Some(name) = temp_website.as_str() {
collections.push(name.to_string());
}
}
// Check for additional collections from ADD_KB
if let Some(additional) = session.context_data.get("additional_kb_collections") {
if let Some(arr) = additional.as_array() {
for item in arr {
if let Some(name) = item.as_str() {
let collection_name = format!("kb_{}_{}", session.bot_id, name);
collections.push(collection_name);
}
}
}
}
Ok(collections)
}
/// Get available tools from session context
async fn get_available_tools(
&self,
session: &UserSession,
) -> Result<Vec<ToolContext>, Box<dyn Error + Send + Sync>> {
let mut tools = Vec::new();
// Check for tools in session context
if let Some(tools_data) = session.context_data.get("available_tools") {
if let Some(arr) = tools_data.as_array() {
for item in arr {
if let (Some(name), Some(desc), Some(endpoint)) = (
item.get("name").and_then(|v| v.as_str()),
item.get("description").and_then(|v| v.as_str()),
item.get("endpoint").and_then(|v| v.as_str()),
) {
tools.push(ToolContext {
tool_name: name.to_string(),
description: desc.to_string(),
endpoint: endpoint.to_string(),
});
}
}
}
}
// Load all tools associated with this session from session_tool_associations
if let Ok(mut conn) = self.state.conn.lock() {
match get_session_tools(&mut *conn, &session.id) {
Ok(session_tools) => {
info!(
"Loaded {} tools from session_tool_associations for session {}",
session_tools.len(),
session.id
);
for tool_name in session_tools {
// Add the tool if not already in list
if !tools.iter().any(|t| t.tool_name == tool_name) {
tools.push(ToolContext {
tool_name: tool_name.clone(),
description: format!("Tool: {}", tool_name),
endpoint: format!("/default/{}", tool_name),
});
}
}
}
Err(e) => {
error!("Failed to load session tools: {}", e);
}
}
} else {
error!("Failed to acquire database lock for loading session tools");
}
// Also check for legacy current_tool (backward compatibility)
if let Some(current_tool) = &session.current_tool {
// Add the current tool if not already in list
if !tools.iter().any(|t| &t.tool_name == current_tool) {
tools.push(ToolContext {
tool_name: current_tool.clone(),
description: format!("Legacy tool: {}", current_tool),
endpoint: format!("/default/{}", current_tool),
});
}
}
debug!("Found {} available tools", tools.len());
Ok(tools)
}
/// Format documents for inclusion in prompt
fn format_documents_for_prompt(&self, documents: &[DocumentContext]) -> String {
documents
.iter()
.enumerate()
.map(|(idx, doc)| {
format!(
"[Document {}] (Source: {}, Relevance: {:.2})\n{}",
idx + 1,
doc.source,
doc.score,
doc.content.chars().take(500).collect::<String>()
)
})
.collect::<Vec<_>>()
.join("\n\n")
}
/// Format tools for inclusion in prompt
fn format_tools_for_prompt(&self, tools: &[ToolContext]) -> String {
tools
.iter()
.map(|tool| format!("- {}: {}", tool.tool_name, tool.description))
.collect::<Vec<_>>()
.join("\n")
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_answer_mode_from_i32() {
assert_eq!(AnswerMode::from_i32(0), AnswerMode::Direct);
assert_eq!(AnswerMode::from_i32(1), AnswerMode::WithTools);
assert_eq!(AnswerMode::from_i32(2), AnswerMode::DocumentsOnly);
assert_eq!(AnswerMode::from_i32(3), AnswerMode::WebSearch);
assert_eq!(AnswerMode::from_i32(4), AnswerMode::Mixed);
assert_eq!(AnswerMode::from_i32(99), AnswerMode::Direct); // Default
}
#[test]
fn test_format_documents() {
let processor = PromptProcessor::new(Arc::new(AppState::default()));
let docs = vec![
DocumentContext {
source: "test.pdf".to_string(),
content: "This is test content".to_string(),
score: 0.95,
collection_name: "test_collection".to_string(),
},
DocumentContext {
source: "another.pdf".to_string(),
content: "More content here".to_string(),
score: 0.85,
collection_name: "test_collection".to_string(),
},
];
let formatted = processor.format_documents_for_prompt(&docs);
assert!(formatted.contains("[Document 1]"));
assert!(formatted.contains("[Document 2]"));
assert!(formatted.contains("test.pdf"));
assert!(formatted.contains("This is test content"));
}
#[test]
fn test_format_tools() {
let processor = PromptProcessor::new(Arc::new(AppState::default()));
let tools = vec![
ToolContext {
tool_name: "enrollment".to_string(),
description: "Enroll a user".to_string(),
endpoint: "/default/enrollment".to_string(),
},
ToolContext {
tool_name: "pricing".to_string(),
description: "Get product pricing".to_string(),
endpoint: "/default/pricing".to_string(),
},
];
let formatted = processor.format_tools_for_prompt(&tools);
assert!(formatted.contains("enrollment"));
assert!(formatted.contains("Enroll a user"));
assert!(formatted.contains("pricing"));
}
}

View file

@ -71,12 +71,6 @@ impl DriveMonitor {
}
};
if !crate::llm::local::is_server_running(&llm_url).await ||
!crate::llm::local::is_server_running(&embedding_url).await {
trace!("LLM servers not ready - llm: {}, embedding: {}", llm_url, embedding_url);
return;
}
let mut tick = interval(Duration::from_secs(30));
loop {
tick.tick().await;

View file

@ -252,7 +252,7 @@ pub async fn start_embedding_server(
if cfg!(windows) {
let mut cmd = tokio::process::Command::new("cmd");
cmd.arg("/c").arg(format!(
"cd {} && .\\llama-server.exe -m {} --log-disable --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >../../../../logs/llm/stdout.log",
"cd {} && .\\llama-server.exe -m {} --host 0.0.0.0 --port {} --embedding --n-gpu-layers 99 >../../../../logs/llm/stdout.log",
llama_cpp_path, model_path, port
));
cmd.spawn()?;

View file

@ -1,4 +1,4 @@
#![allow(dead_code)]
#![allow(warnings)]
#![cfg_attr(feature = "desktop", windows_subsystem = "windows")]
use actix_cors::Cors;

View file

@ -239,12 +239,10 @@ impl SessionManager {
session_id: &Uuid,
user_id: &Uuid,
) -> Result<String, Box<dyn Error + Send + Sync>> {
// Bring the Redis command trait into scope so we can call `get`.
use redis::Commands;
let redis_key = format!("context:{}:{}", user_id, session_id);
let base_key = format!("context:{}:{}", user_id, session_id);
if let Some(redis_client) = &self.redis {
// Attempt to obtain a Redis connection; log and ignore errors
let conn_option = redis_client
.get_connection()
.map_err(|e| {
@ -254,26 +252,39 @@ impl SessionManager {
.ok();
if let Some(mut connection) = conn_option {
match connection.get::<_, Option<String>>(&redis_key) {
Ok(Some(context)) => {
debug!(
"Retrieved context from Cache for key {}: {} chars",
redis_key,
context.len()
);
return Ok(context);
// First cache trip: get context name
match connection.get::<_, Option<String>>(&base_key) {
Ok(Some(context_name)) => {
debug!("Found context name '{}' for key {}", context_name, base_key);
// Second cache trip: get actual context value
let full_key = format!("context:{}:{}:{}", user_id, session_id, context_name);
match connection.get::<_, Option<String>>(&full_key) {
Ok(Some(context_value)) => {
debug!(
"Retrieved context value from Cache for key {}: {} chars",
full_key,
context_value.len()
);
return Ok(context_value);
}
Ok(None) => {
debug!("No context value found for key {}", full_key);
}
Err(e) => {
warn!("Failed to retrieve context value from Cache: {}", e);
}
}
}
Ok(None) => {
debug!("No context found in Cache for key {}", redis_key);
debug!("No context name found for key {}", base_key);
}
Err(e) => {
warn!("Failed to retrieve context from Cache: {}", e);
warn!("Failed to retrieve context name from Cache: {}", e);
}
}
}
}
// If no context found, return empty string
Ok(String::new())
}

View file

@ -17,7 +17,7 @@ llm-server-host,0.0.0.0
llm-server-port,8081
llm-server-gpu-layers,35
llm-server-n-moe,23
llm-server-ctx-size,16000
llm-server-ctx-size,512
llm-server-parallel,8
llm-server-cont-batching,true
llm-server-mlock,true

1 name value
17 llm-server-parallel 8
18 llm-server-cont-batching true
19 llm-server-mlock true
20 llm-server-no-mmap true
21 email-from from@domain.com
22 email-server mail.domain.com
23 email-port 587

View file

@ -915,7 +915,7 @@
}
.theme-toggle {
top: 16px;
top: 24px;
right: 60px;
width: 44px;
height: 44px;
@ -959,7 +959,7 @@
}
.theme-toggle {
top: 16px;
top: 24px;
right: 60px;
width: 40px;
height: 40px;
@ -1075,6 +1075,7 @@
let isUserScrolling = false;
let autoScrollEnabled = true;
let currentTheme = 'dark';
let isContextChange = false;
const messagesDiv = document.getElementById("messages");
const input = document.getElementById("messageInput");
@ -1207,7 +1208,7 @@
}
if (percentage >= 50) {
contextIndicator.style.display = "block";
//contextIndicator.style.display = "block";
} else {
contextIndicator.style.display = "none";
}
@ -1251,7 +1252,9 @@
hasReceivedInitialMessage = false;
connectWebSocket();
loadSessions();
document.getElementById("messages").innerHTML = `
// Clear messages and show empty state
messagesDiv.innerHTML = `
<div id="emptyState">
<div class="empty-icon">
<img src="https://pragmatismo.com.br/gb-logo.png" alt="General Bots Logo">
@ -1260,9 +1263,22 @@
<p class="empty-subtitle">Seu assistente de IA avançado</p>
</div>
`;
// Clear suggestions
clearSuggestions();
// Reset context usage
updateContextUsage(0);
// Stop voice if active
if (isVoiceMode) {
await startVoiceSession();
await stopVoiceSession();
isVoiceMode = false;
const voiceToggle = document.getElementById("voiceToggle");
voiceToggle.textContent = "🎤 Modo Voz";
voiceToggle.classList.remove("recording");
const voiceStatus = document.getElementById("voiceStatus");
voiceStatus.style.display = "none";
}
if (window.innerWidth <= 768) {
@ -1338,6 +1354,12 @@
return;
}
// Check if this is a context change message and skip processing
if (response.message_type === 5) {
isContextChange = true;
return;
}
processMessageContent(response);
};
@ -1377,6 +1399,12 @@
}
function processMessageContent(response) {
// Skip processing if this is a context change
if (isContextChange) {
isContextChange = false;
return;
}
const emptyState = document.getElementById("emptyState");
if (emptyState) {
emptyState.remove();
@ -1685,6 +1713,14 @@
return div.innerHTML;
}
function clearSuggestions() {
const footer = document.querySelector('footer');
const container = footer.querySelector('.suggestions-container');
if (container) {
container.innerHTML = '';
}
}
function handleSuggestions(suggestions) {
const footer = document.querySelector('footer');
let container = footer.querySelector('.suggestions-container');
@ -1695,7 +1731,9 @@
footer.insertBefore(container, footer.firstChild);
}
// Clear existing suggestions before adding new ones
container.innerHTML = '';
suggestions.forEach(s => {
const btn = document.createElement('button');
btn.textContent = s.text;
@ -1750,7 +1788,7 @@
await pendingContextChange;
const contextIndicator = document.getElementById('contextIndicator');
if (contextIndicator) {
contextIndicator.style.display = 'block';
//contextIndicator.style.display = 'block';
document.getElementById('contextPercentage').textContent = context;
}
} else {