diff --git a/src/bot/mod.rs b/src/bot/mod.rs index bc47981f..f407d838 100644 --- a/src/bot/mod.rs +++ b/src/bot/mod.rs @@ -1,7 +1,10 @@ +mod ui; + use crate::config::ConfigManager; use crate::drive_monitor::DriveMonitor; use crate::llm_models; use crate::nvidia::get_system_metrics; +use crate::bot::ui::BotUI; use crate::shared::models::{BotResponse, Suggestion, UserMessage, UserSession}; use crate::shared::state::AppState; use actix_web::{web, HttpRequest, HttpResponse, Result}; @@ -508,13 +511,6 @@ impl BotOrchestrator { // Show initial progress if let Ok(metrics) = get_system_metrics(initial_tokens, max_context_size) { - eprintln!( - "\nNVIDIA: {:.1}% | CPU: {:.1}% | Tokens: {}/{}", - metrics.gpu_usage.unwrap_or(0.0), - metrics.cpu_usage, - initial_tokens, - max_context_size - ); } let model = config_manager .get_config( @@ -572,18 +568,8 @@ impl BotOrchestrator { let cpu_bar = "█".repeat((metrics.cpu_usage / 5.0).round() as usize); let token_ratio = current_tokens as f64 / max_context_size.max(1) as f64; let token_bar = "█".repeat((token_ratio * 20.0).round() as usize); - use std::io::{self, Write}; - print!( - "\rGPU [{:<20}] {:.1}% | CPU [{:<20}] {:.1}% | TOKENS [{:<20}] {}/{}", - gpu_bar, - metrics.gpu_usage.unwrap_or(0.0), - cpu_bar, - metrics.cpu_usage, - token_bar, - current_tokens, - max_context_size - ); - io::stdout().flush().unwrap(); + let mut ui = BotUI::new().unwrap(); + ui.render_progress(current_tokens, max_context_size).unwrap(); } last_progress_update = Instant::now(); } @@ -795,44 +781,9 @@ impl BotOrchestrator { session_id, channel, message ); - if channel == "web" { - self.send_event( - "system", - "system", - session_id, - channel, - "warn", - serde_json::json!({ - "message": message, - "timestamp": Utc::now().to_rfc3339() - }), - ) - .await - } else { - if let Some(adapter) = self.state.channels.lock().unwrap().get(channel) { - let warn_response = BotResponse { - bot_id: "system".to_string(), - user_id: "system".to_string(), - session_id: session_id.to_string(), - channel: channel.to_string(), - content: format!("⚠️ WARNING: {}", message), - message_type: 1, - stream_token: None, - is_complete: true, - suggestions: Vec::new(), - context_name: None, - context_length: 0, - context_max_length: 0, - }; - adapter.send_message(warn_response).await - } else { - warn!( - "No channel adapter found for warning on channel: {}", - channel - ); - Ok(()) - } - } + let mut ui = BotUI::new().unwrap(); + ui.render_warning(message).unwrap(); + Ok(()) } pub async fn trigger_auto_welcome( diff --git a/src/bot/ui.rs b/src/bot/ui.rs new file mode 100644 index 00000000..a2cba219 --- /dev/null +++ b/src/bot/ui.rs @@ -0,0 +1,78 @@ +use ratatui::{ + backend::CrosstermBackend, + layout::{Constraint, Direction, Layout}, + style::{Color, Modifier, Style}, + widgets::{Block, Borders, Gauge, Paragraph}, + Terminal, +}; +use std::io::{self, Stdout}; +use crate::nvidia::get_system_metrics; + +pub struct BotUI { + terminal: Terminal>, +} + +impl BotUI { + pub fn new() -> io::Result { + let stdout = io::stdout(); + let backend = CrosstermBackend::new(stdout); + let terminal = Terminal::new(backend)?; + Ok(Self { terminal }) + } + + pub fn render_progress(&mut self, current_tokens: usize, max_context_size: usize) -> io::Result<()> { + let metrics = get_system_metrics(current_tokens, max_context_size).unwrap_or_default(); + let gpu_usage = metrics.gpu_usage.unwrap_or(0.0); + let cpu_usage = metrics.cpu_usage; + let token_ratio = current_tokens as f64 / max_context_size.max(1) as f64; + + self.terminal.draw(|f| { + let chunks = Layout::default() + .direction(Direction::Vertical) + .constraints([ + Constraint::Length(3), + Constraint::Length(3), + Constraint::Length(3), + Constraint::Min(0), + ]) + .split(f.area()); + + let gpu_gauge = Gauge::default() + .block(Block::default().title("GPU Usage").borders(Borders::ALL)) + .gauge_style(Style::default().fg(Color::Green).add_modifier(Modifier::BOLD)) + .ratio(gpu_usage as f64 / 100.0) + .label(format!("{:.1}%", gpu_usage)); + + let cpu_gauge = Gauge::default() + .block(Block::default().title("CPU Usage").borders(Borders::ALL)) + .gauge_style(Style::default().fg(Color::Yellow).add_modifier(Modifier::BOLD)) + .ratio(cpu_usage as f64 / 100.0) + .label(format!("{:.1}%", cpu_usage)); + + let token_gauge = Gauge::default() + .block(Block::default().title("Token Progress").borders(Borders::ALL)) + .gauge_style(Style::default().fg(Color::Cyan).add_modifier(Modifier::BOLD)) + .ratio(token_ratio) + .label(format!("{}/{}", current_tokens, max_context_size)); + + f.render_widget(gpu_gauge, chunks[0]); + f.render_widget(cpu_gauge, chunks[1]); + f.render_widget(token_gauge, chunks[2]); + })?; + Ok(()) + } + + pub fn render_warning(&mut self, message: &str) -> io::Result<()> { + self.terminal.draw(|f| { + let block = Block::default() + .title("⚠️ NVIDIA Warning") + .borders(Borders::ALL) + .border_style(Style::default().fg(Color::Red)); + let paragraph = Paragraph::new(message) + .style(Style::default().fg(Color::Red).add_modifier(Modifier::BOLD)) + .block(block); + f.render_widget(paragraph, f.area()); + })?; + Ok(()) + } +} diff --git a/src/nvidia/mod.rs b/src/nvidia/mod.rs index 588ba430..8624c3ee 100644 --- a/src/nvidia/mod.rs +++ b/src/nvidia/mod.rs @@ -4,6 +4,7 @@ use std::collections::HashMap; use sysinfo::{System}; /// System monitoring data +#[derive(Default)] pub struct SystemMetrics { pub gpu_usage: Option, pub cpu_usage: f32, diff --git a/src/ui_tree/status_panel.rs b/src/ui_tree/status_panel.rs index 9d28715e..a4780eee 100644 --- a/src/ui_tree/status_panel.rs +++ b/src/ui_tree/status_panel.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use crate::shared::state::AppState; use crate::shared::models::schema::bots::dsl::*; +use crate::nvidia; use diesel::prelude::*; pub struct StatusPanel { @@ -53,6 +54,30 @@ impl StatusPanel { let llm_status = "🟢 ONLINE"; lines.push(format!(" LLM: {}", llm_status)); + // Get system metrics + let system_metrics = match nvidia::get_system_metrics(0, 0) { + Ok(metrics) => metrics, + Err(_) => nvidia::SystemMetrics::default(), + }; + + // Add system metrics with progress bars + lines.push("".to_string()); + lines.push("───────────────────────────────────────".to_string()); + lines.push(" SYSTEM METRICS".to_string()); + lines.push("───────────────────────────────────────".to_string()); + + // CPU usage with progress bar + let cpu_bar = Self::create_progress_bar(system_metrics.cpu_usage, 20); + lines.push(format!(" CPU: {:5.1}% {}", system_metrics.cpu_usage, cpu_bar)); + + // GPU usage with progress bar (if available) + if let Some(gpu_usage) = system_metrics.gpu_usage { + let gpu_bar = Self::create_progress_bar(gpu_usage, 20); + lines.push(format!(" GPU: {:5.1}% {}", gpu_usage, gpu_bar)); + } else { + lines.push(" GPU: Not available".to_string()); + } + lines.push("".to_string()); lines.push("───────────────────────────────────────".to_string()); lines.push(" ACTIVE BOTS".to_string()); @@ -102,4 +127,15 @@ impl StatusPanel { pub fn render(&self) -> String { self.cached_content.clone() } + + /// Creates a visual progress bar for percentage values + fn create_progress_bar(percentage: f32, width: usize) -> String { + let filled = (percentage / 100.0 * width as f32).round() as usize; + let empty = width.saturating_sub(filled); + + let filled_chars = "█".repeat(filled); + let empty_chars = "░".repeat(empty); + + format!("[{}{}]", filled_chars, empty_chars) + } }