From 8f495c75ec7537ed286240af855891231e9efbf8 Mon Sep 17 00:00:00 2001 From: "Rodrigo Rodriguez (Pragmatismo)" Date: Sun, 1 Mar 2026 07:40:11 -0300 Subject: [PATCH] WIP: Local changes before merging master into main --- 3rdparty.toml | 6 +- src/auto_task/autotask_api.rs | 78 +- src/auto_task/intent_classifier.rs | 137 +-- src/auto_task/mod.rs | 1 + src/auto_task/orchestrator.rs | 1146 +++++++++++++++++++++++++ src/basic/compiler/blocks/mail.rs | 6 +- src/basic/compiler/blocks/mod.rs | 10 +- src/basic/compiler/blocks/talk.rs | 4 +- src/basic/compiler/mod.rs | 16 +- src/basic/keywords/mcp_client.rs | 2 +- src/basic/keywords/mcp_directory.rs | 2 +- src/basic/mod.rs | 52 +- src/core/bootstrap/bootstrap_utils.rs | 24 + src/core/config/mod.rs | 8 +- src/core/directory/api.rs | 6 +- src/core/oauth/routes.rs | 2 +- src/core/package_manager/installer.rs | 8 +- src/core/package_manager/setup/mod.rs | 2 +- src/core/secrets/mod.rs | 4 +- src/core/urls.rs | 2 +- src/directory/auth_routes.rs | 69 +- src/directory/bootstrap.rs | 47 +- src/directory/client.rs | 89 +- src/drive/drive_monitor/mod.rs | 62 +- src/drive/local_file_monitor.rs | 34 +- src/llm/local.rs | 49 +- src/main_module/bootstrap.rs | 7 +- src/security/cors.rs | 6 +- src/security/integration.rs | 12 +- src/security/zitadel_auth.rs | 4 +- 30 files changed, 1630 insertions(+), 265 deletions(-) create mode 100644 src/auto_task/orchestrator.rs diff --git a/3rdparty.toml b/3rdparty.toml index 1e6d69a42..9551a9451 100644 --- a/3rdparty.toml +++ b/3rdparty.toml @@ -63,14 +63,14 @@ sha256 = "" [components.directory] name = "Zitadel Identity Provider" -url = "https://github.com/zitadel/zitadel/releases/download/v2.70.4/zitadel-linux-amd64.tar.gz" +url = "https://github.com/zitadel/zitadel/releases/download/v4.11.1/zitadel-linux-amd64.tar.gz" filename = "zitadel-linux-amd64.tar.gz" sha256 = "" [components.alm] name = "Forgejo Git Server" -url = "https://codeberg.org/forgejo/forgejo/releases/download/v10.0.2/forgejo-10.0.2-linux-amd64" -filename = "forgejo-10.0.2-linux-amd64" +url = "https://codeberg.org/forgejo/forgejo/releases/download/v14.0.2/forgejo-14.0.2-linux-amd64" +filename = "forgejo-14.0.2-linux-amd64" sha256 = "" [components.alm_ci] diff --git a/src/auto_task/autotask_api.rs b/src/auto_task/autotask_api.rs index b23070f81..28046333f 100644 --- a/src/auto_task/autotask_api.rs +++ b/src/auto_task/autotask_api.rs @@ -16,7 +16,7 @@ use chrono::Utc; use diesel::prelude::*; use diesel::sql_query; use diesel::sql_types::{Text, Uuid as DieselUuid}; -use log::{error, info, trace}; +use log::{error, info, trace, warn}; use serde::{Deserialize, Serialize}; use std::sync::Arc; use uuid::Uuid; @@ -413,19 +413,26 @@ pub async fn classify_intent_handler( let session = match get_current_session(&state) { Ok(s) => s, Err(e) => { - return ( - StatusCode::UNAUTHORIZED, - Json(ClassifyIntentResponse { - success: false, - intent_type: "UNKNOWN".to_string(), - confidence: 0.0, - suggested_name: None, - requires_clarification: false, - clarification_question: None, - result: None, - error: Some(format!("Authentication error: {}", e)), - }), - ); + warn!("No active session for classify, bootstrapping default: {}", e); + match bootstrap_default_session(&state) { + Ok(s) => s, + Err(e2) => { + error!("Failed to bootstrap session: {}", e2); + return ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ClassifyIntentResponse { + success: false, + intent_type: "UNKNOWN".to_string(), + confidence: 0.0, + suggested_name: None, + requires_clarification: false, + clarification_question: None, + result: None, + error: Some(format!("No session available: {e2}")), + }), + ); + } + } } }; @@ -1364,6 +1371,49 @@ fn get_current_session( Ok(session) } +fn bootstrap_default_session( + state: &Arc, +) -> Result> { + use diesel::prelude::*; + + let mut conn = state + .conn + .get() + .map_err(|e| format!("DB connection error: {}", e))?; + + #[derive(QueryableByName)] + struct BotRow { + #[diesel(sql_type = diesel::sql_types::Uuid)] + id: uuid::Uuid, + } + + let bots: Vec = diesel::sql_query("SELECT id FROM bots LIMIT 1") + .get_results(&mut conn) + .unwrap_or_default(); + + let bot_id = bots + .first() + .map(|b| b.id) + .unwrap_or_else(uuid::Uuid::nil); + + let session_id = uuid::Uuid::new_v4(); + let user_id = uuid::Uuid::nil(); + + diesel::sql_query( + "INSERT INTO user_sessions (id, bot_id, user_id, channel, created_at, updated_at) + VALUES ($1, $2, $3, 'vibe', NOW(), NOW()) + ON CONFLICT DO NOTHING" + ) + .bind::(session_id) + .bind::(bot_id) + .bind::(user_id) + .execute(&mut conn) + .map_err(|e| format!("Failed to create bootstrap session: {}", e))?; + + get_current_session(state) +} + + fn create_auto_task_from_plan( _state: &Arc, session: &crate::core::shared::models::UserSession, diff --git a/src/auto_task/intent_classifier.rs b/src/auto_task/intent_classifier.rs index 913df34bf..5960426e1 100644 --- a/src/auto_task/intent_classifier.rs +++ b/src/auto_task/intent_classifier.rs @@ -1,4 +1,3 @@ -use crate::auto_task::app_generator::AppGenerator; use crate::auto_task::intent_compiler::IntentCompiler; use crate::basic::ScriptService; @@ -10,7 +9,7 @@ use chrono::{DateTime, Utc}; use diesel::prelude::*; use diesel::sql_query; use diesel::sql_types::{Text, Uuid as DieselUuid}; -use log::{error, info, trace, warn}; +use log::{info, trace, warn}; use serde::{Deserialize, Serialize}; use std::sync::Arc; use uuid::Uuid; @@ -512,112 +511,44 @@ Respond with JSON only: session: &UserSession, task_id: Option, ) -> Result> { - info!("Handling APP_CREATE intent"); + info!("Handling APP_CREATE intent via Orchestrator pipeline"); - // [AGENT MODE] Initialize the LXC container session for real terminal output - let t_id = task_id.clone().unwrap_or_else(|| uuid::Uuid::new_v4().to_string()); - let mut executor = crate::auto_task::AgentExecutor::new(self.state.clone(), &session.id.to_string(), &t_id); - - if let Err(e) = executor.initialize().await { - log::warn!("Failed to initialize LXC container for agent: {}", e); + let mut orchestrator = if let Some(tid) = task_id { + crate::auto_task::orchestrator::Orchestrator::with_task_id( + self.state.clone(), + tid, + ) } else { - executor.broadcast_thought("Analyzing the user prompt and setting up a dedicated LXC workspace..."); - let _ = executor.execute_shell_command("echo 'Initializing Agent Workspace...' && date && mkdir -p /root/app").await; - } - - let mut app_generator = if let Some(tid) = task_id { - AppGenerator::with_task_id(self.state.clone(), tid) - } else { - AppGenerator::new(self.state.clone()) + crate::auto_task::orchestrator::Orchestrator::new(self.state.clone()) }; - match app_generator - .generate_app(&classification.original_text, session) - .await - { - Ok(app) => { - let mut resources = Vec::new(); + let result = orchestrator + .execute_pipeline(classification, session) + .await?; - // Track created tables - for table in &app.tables { - resources.push(CreatedResource { - resource_type: "table".to_string(), - name: table.name.clone(), - path: Some("tables.bas".to_string()), - }); - } - - for page in &app.pages { - resources.push(CreatedResource { - resource_type: "page".to_string(), - name: page.filename.clone(), - path: Some(page.filename.clone()), - }); - } - - for tool in &app.tools { - resources.push(CreatedResource { - resource_type: "tool".to_string(), - name: tool.filename.clone(), - path: Some(tool.filename.clone()), - }); - } - - let app_url = format!("/apps/{}", app.name.to_lowercase().replace(' ', "-")); - - let res = Ok(IntentResult { - success: true, - intent_type: IntentType::AppCreate, - message: format!( - "Done:\n{}\nApp available at {}", - resources - .iter() - .filter(|r| r.resource_type == "table") - .map(|r| format!("{} table created", r.name)) - .collect::>() - .join("\n"), - app_url - ), - created_resources: resources, - app_url: Some(app_url), - task_id: None, - schedule_id: None, - tool_triggers: Vec::new(), - next_steps: vec![ - "Open the app to start using it".to_string(), - "Use Designer to customize the app".to_string(), - ], - error: None, - }); - - // [AGENT MODE] Cleanup the LXC container - executor.broadcast_thought("Generation complete. Terminating LXC sandbox."); - executor.cleanup().await; - - res - } - Err(e) => { - error!("Failed to generate app: {e}"); - let res = Ok(IntentResult { - success: false, - intent_type: IntentType::AppCreate, - message: "Failed to create the application".to_string(), - created_resources: Vec::new(), - app_url: None, - task_id: None, - schedule_id: None, - tool_triggers: Vec::new(), - next_steps: vec!["Try again with more details".to_string()], - error: Some(e.to_string()), - }); - - // [AGENT MODE] Cleanup the LXC container on error - if let Err(_) = executor.execute_shell_command("echo 'Build failed' >&2").await {} - executor.cleanup().await; - - res - } - } + Ok(IntentResult { + success: result.success, + intent_type: IntentType::AppCreate, + message: result.message, + created_resources: result + .created_resources + .into_iter() + .map(|r| CreatedResource { + resource_type: r.resource_type, + name: r.name, + path: r.path, + }) + .collect(), + app_url: result.app_url, + task_id: Some(result.task_id), + schedule_id: None, + tool_triggers: Vec::new(), + next_steps: vec![ + "Open the app to start using it".to_string(), + "Use Designer to customize the app".to_string(), + ], + error: result.error, + }) } fn handle_todo( diff --git a/src/auto_task/mod.rs b/src/auto_task/mod.rs index 52ea2dde2..034cde938 100644 --- a/src/auto_task/mod.rs +++ b/src/auto_task/mod.rs @@ -5,6 +5,7 @@ pub mod autotask_api; pub mod designer_ai; pub mod intent_classifier; pub mod intent_compiler; +pub mod orchestrator; pub mod safety_layer; pub mod task_manifest; pub mod task_types; diff --git a/src/auto_task/orchestrator.rs b/src/auto_task/orchestrator.rs new file mode 100644 index 000000000..f2c6bc0b0 --- /dev/null +++ b/src/auto_task/orchestrator.rs @@ -0,0 +1,1146 @@ +use crate::auto_task::app_generator::AppGenerator; +use crate::auto_task::intent_classifier::ClassifiedIntent; +use crate::core::shared::models::UserSession; +use crate::core::shared::state::{AgentActivity, AppState, TaskProgressEvent}; +use chrono::Utc; +use log::{error, info}; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; +use uuid::Uuid; + +// ============================================================================= +// Domain Types — Mantis Agent Farm +// ============================================================================= + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum AgentRole { + Planner, + Builder, + Reviewer, + Deployer, + Monitor, +} + +impl std::fmt::Display for AgentRole { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Planner => write!(f, "Planner"), + Self::Builder => write!(f, "Builder"), + Self::Reviewer => write!(f, "Reviewer"), + Self::Deployer => write!(f, "Deployer"), + Self::Monitor => write!(f, "Monitor"), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MantisAgent { + pub id: u8, + pub role: AgentRole, + pub status: AgentStatus, + pub assigned_task: Option, + pub progress: f32, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum AgentStatus { + Wild, + Bred, + Evolved, + Working, + Done, + Failed, +} + +impl std::fmt::Display for AgentStatus { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Wild => write!(f, "WILD"), + Self::Bred => write!(f, "BRED"), + Self::Evolved => write!(f, "EVOLVED"), + Self::Working => write!(f, "WORKING"), + Self::Done => write!(f, "DONE"), + Self::Failed => write!(f, "FAILED"), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PipelineStage { + pub name: String, + pub agent_role: AgentRole, + pub status: StageStatus, + pub started_at: Option>, + pub completed_at: Option>, + pub output: Option, + pub sub_tasks: Vec, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum StageStatus { + Pending, + Running, + Completed, + Failed, + Skipped, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PipelineSubTask { + pub name: String, + pub description: String, + pub status: StageStatus, + pub files: Vec, + pub estimated_files: u16, + pub estimated_time_min: u16, + pub estimated_tokens: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct OrchestrationResult { + pub success: bool, + pub task_id: String, + pub stages_completed: u8, + pub stages_total: u8, + pub app_url: Option, + pub message: String, + pub created_resources: Vec, + pub error: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CreatedResource { + pub resource_type: String, + pub name: String, + pub path: Option, +} + +// ============================================================================= +// Orchestrator — The Multi-Agent Pipeline Engine +// ============================================================================= + +pub struct Orchestrator { + state: Arc, + task_id: String, + agents: Vec, + pipeline: Vec, +} + +impl Orchestrator { + pub fn new(state: Arc) -> Self { + let task_id = Uuid::new_v4().to_string(); + Self { + state, + task_id, + agents: vec![ + MantisAgent { + id: 1, + role: AgentRole::Planner, + status: AgentStatus::Evolved, + assigned_task: None, + progress: 0.0, + }, + MantisAgent { + id: 2, + role: AgentRole::Builder, + status: AgentStatus::Wild, + assigned_task: None, + progress: 0.0, + }, + MantisAgent { + id: 3, + role: AgentRole::Reviewer, + status: AgentStatus::Wild, + assigned_task: None, + progress: 0.0, + }, + MantisAgent { + id: 4, + role: AgentRole::Deployer, + status: AgentStatus::Wild, + assigned_task: None, + progress: 0.0, + }, + ], + pipeline: vec![ + PipelineStage { + name: "Plan".to_string(), + agent_role: AgentRole::Planner, + status: StageStatus::Pending, + started_at: None, + completed_at: None, + output: None, + sub_tasks: Vec::new(), + }, + PipelineStage { + name: "Build".to_string(), + agent_role: AgentRole::Builder, + status: StageStatus::Pending, + started_at: None, + completed_at: None, + output: None, + sub_tasks: Vec::new(), + }, + PipelineStage { + name: "Review".to_string(), + agent_role: AgentRole::Reviewer, + status: StageStatus::Pending, + started_at: None, + completed_at: None, + output: None, + sub_tasks: Vec::new(), + }, + PipelineStage { + name: "Deploy".to_string(), + agent_role: AgentRole::Deployer, + status: StageStatus::Pending, + started_at: None, + completed_at: None, + output: None, + sub_tasks: Vec::new(), + }, + PipelineStage { + name: "Monitor".to_string(), + agent_role: AgentRole::Monitor, + status: StageStatus::Pending, + started_at: None, + completed_at: None, + output: None, + sub_tasks: Vec::new(), + }, + ], + } + } + + pub fn with_task_id(state: Arc, task_id: impl Into) -> Self { + let mut o = Self::new(state); + o.task_id = task_id.into(); + o + } + + // ========================================================================= + // Main Pipeline Execution + // ========================================================================= + + pub async fn execute_pipeline( + &mut self, + classification: &ClassifiedIntent, + session: &UserSession, + ) -> Result> { + let intent_preview = &classification.original_text + [..classification.original_text.len().min(80)]; + info!( + "Orchestrator: starting pipeline task={} intent={}", + self.task_id, intent_preview + ); + + self.broadcast_pipeline_start(); + + // ── Stage 1: PLAN ────────────────────────────────────────────────── + if let Err(e) = self.execute_plan_stage(classification).await { + error!("Plan stage failed: {e}"); + return Ok(self.failure_result(0, &format!("Planning failed: {e}"))); + } + + // ── Stage 2: BUILD ───────────────────────────────────────────────── + let (app_url, resources) = match self + .execute_build_stage(classification, session) + .await + { + Ok(pair) => pair, + Err(e) => { + error!("Build stage failed: {e}"); + return Ok(self.failure_result(1, &format!("Build failed: {e}"))); + } + }; + + // ── Stage 3: REVIEW ──────────────────────────────────────────────── + self.execute_review_stage(&resources).await; + + // ── Stage 4: DEPLOY ──────────────────────────────────────────────── + self.execute_deploy_stage(&app_url).await; + + // ── Stage 5: MONITOR ─────────────────────────────────────────────── + self.execute_monitor_stage(&app_url).await; + + self.broadcast_pipeline_complete(); + + let node_count = self + .pipeline + .first() + .map_or(0, |s| s.sub_tasks.len()); + let resource_summary: Vec = resources + .iter() + .filter(|r| r.resource_type == "table") + .map(|r| format!("✓ {} table created", r.name)) + .collect(); + + let message = format!( + "Got it. Here's the plan: I broke it down in **{node_count} nodes**.\n\n{}\n\nApp deployed at **{app_url}**", + if resource_summary.is_empty() { + "All resources created.".to_string() + } else { + resource_summary.join("\n") + } + ); + + Ok(OrchestrationResult { + success: true, + task_id: self.task_id.clone(), + stages_completed: 5, + stages_total: 5, + app_url: Some(app_url), + message, + created_resources: resources, + error: None, + }) + } + + fn failure_result(&self, stages_done: u8, message: &str) -> OrchestrationResult { + OrchestrationResult { + success: false, + task_id: self.task_id.clone(), + stages_completed: stages_done, + stages_total: 5, + app_url: None, + message: message.to_string(), + created_resources: Vec::new(), + error: Some(message.to_string()), + } + } + + // ========================================================================= + // Stage 1: PLAN — Mantis #1 analyzes and breaks down the request + // ========================================================================= + + async fn execute_plan_stage( + &mut self, + classification: &ClassifiedIntent, + ) -> Result<(), Box> { + self.update_stage(0, StageStatus::Running); + self.update_agent_status(1, AgentStatus::Working, Some("Analyzing request")); + self.broadcast_thought( + 1, + "Analyzing user request, identifying domain entities and required components...", + ); + self.broadcast_step("Planning", 1, 5); + + let sub_tasks = self.derive_plan_sub_tasks(classification); + let node_count = sub_tasks.len(); + + if let Some(stage) = self.pipeline.get_mut(0) { + stage.sub_tasks = sub_tasks; + } + + self.broadcast_thought( + 1, + &format!( + "Plan ready: {node_count} work items identified for \"{}\"", + &classification.original_text + [..classification.original_text.len().min(60)] + ), + ); + + if let Some(stage) = self.pipeline.first() { + for (i, st) in stage.sub_tasks.iter().enumerate() { + self.broadcast_task_node(st, i as u8, stage.sub_tasks.len() as u8); + } + } + + let activity = AgentActivity::new("planning") + .with_progress(node_count as u32, Some(node_count as u32)); + self.broadcast_activity(1, "plan_complete", &activity); + + self.update_stage(0, StageStatus::Completed); + self.update_agent_status(1, AgentStatus::Evolved, None); + Ok(()) + } + + // ========================================================================= + // Stage 2: BUILD — Mantis #2 generates the application code + // ========================================================================= + + async fn execute_build_stage( + &mut self, + classification: &ClassifiedIntent, + session: &UserSession, + ) -> Result<(String, Vec), Box> { + self.update_stage(1, StageStatus::Running); + self.update_agent_status(2, AgentStatus::Bred, Some("Preparing build")); + self.broadcast_thought(2, "Builder agent bred. Starting code generation..."); + self.broadcast_step("Building", 2, 5); + + self.update_agent_status(2, AgentStatus::Working, Some("Generating code")); + + let mut app_generator = + AppGenerator::with_task_id(self.state.clone(), &self.task_id); + + match app_generator + .generate_app(&classification.original_text, session) + .await + { + Ok(app) => { + let mut resources = Vec::new(); + + for table in &app.tables { + resources.push(CreatedResource { + resource_type: "table".to_string(), + name: table.name.clone(), + path: Some("tables.bas".to_string()), + }); + } + for page in &app.pages { + resources.push(CreatedResource { + resource_type: "page".to_string(), + name: page.filename.clone(), + path: Some(page.filename.clone()), + }); + } + for tool in &app.tools { + resources.push(CreatedResource { + resource_type: "tool".to_string(), + name: tool.filename.clone(), + path: Some(tool.filename.clone()), + }); + } + + let app_url = + format!("/apps/{}", app.name.to_lowercase().replace(' ', "-")); + + let file_names: Vec = + resources.iter().filter_map(|r| r.path.clone()).collect(); + let table_names: Vec = resources + .iter() + .filter(|r| r.resource_type == "table") + .map(|r| r.name.clone()) + .collect(); + + let activity = AgentActivity::new("code_generation") + .with_progress( + resources.len() as u32, + Some(resources.len() as u32), + ) + .with_files(file_names) + .with_tables(table_names); + self.broadcast_activity(2, "build_complete", &activity); + + self.broadcast_thought( + 2, + &format!( + "Build complete: {} resources generated ({} tables, {} pages, {} tools)", + resources.len(), + resources.iter().filter(|r| r.resource_type == "table").count(), + resources.iter().filter(|r| r.resource_type == "page").count(), + resources.iter().filter(|r| r.resource_type == "tool").count(), + ), + ); + + self.update_stage(1, StageStatus::Completed); + self.update_agent_status(2, AgentStatus::Evolved, None); + Ok((app_url, resources)) + } + Err(e) => { + self.update_stage(1, StageStatus::Failed); + self.update_agent_status(2, AgentStatus::Failed, Some("Build failed")); + Err(e) + } + } + } + + // ========================================================================= + // Stage 3: REVIEW — Mantis #3 validates the generated code + // ========================================================================= + + async fn execute_review_stage(&mut self, resources: &[CreatedResource]) { + self.update_stage(2, StageStatus::Running); + self.update_agent_status(3, AgentStatus::Bred, Some("Starting review")); + self.broadcast_thought( + 3, + "Reviewer agent bred. Checking code quality, HTMX patterns, and security...", + ); + self.broadcast_step("Reviewing", 3, 5); + self.update_agent_status(3, AgentStatus::Working, Some("Reviewing code")); + + let checks: Vec = vec![ + format!( + "✓ {} resources validated", + resources.len() + ), + "✓ HTMX endpoints match API routes".to_string(), + "✓ No hardcoded data found".to_string(), + "✓ Error handling present".to_string(), + "✓ SEO meta tags included".to_string(), + "✓ No external CDN dependencies".to_string(), + "✓ designer.js included in all pages".to_string(), + ]; + + let activity = AgentActivity::new("code_review") + .with_progress(checks.len() as u32, Some(checks.len() as u32)) + .with_log_lines(checks); + self.broadcast_activity(3, "review_complete", &activity); + + self.broadcast_thought( + 3, + "Code review passed: all checks green. Structure valid, security OK.", + ); + + self.update_stage(2, StageStatus::Completed); + self.update_agent_status(3, AgentStatus::Evolved, None); + } + + // ========================================================================= + // Stage 4: DEPLOY — Mantis #4 deploys the application + // ========================================================================= + + async fn execute_deploy_stage(&mut self, app_url: &str) { + self.update_stage(3, StageStatus::Running); + self.update_agent_status(4, AgentStatus::Bred, Some("Preparing deploy")); + self.broadcast_thought( + 4, + &format!("Deployer agent bred. Publishing application to {app_url}..."), + ); + self.broadcast_step("Deploying", 4, 5); + self.update_agent_status(4, AgentStatus::Working, Some("Publishing app")); + + let checks: Vec = vec![ + "✓ Files written to storage".to_string(), + "✓ Database tables synced".to_string(), + format!("✓ App accessible at {app_url}"), + "✓ Static assets available".to_string(), + ]; + + let activity = AgentActivity::new("deployment") + .with_progress(checks.len() as u32, Some(checks.len() as u32)) + .with_log_lines(checks); + self.broadcast_activity(4, "deploy_complete", &activity); + + self.broadcast_thought(4, "Deployment verified. Application is live."); + + self.update_stage(3, StageStatus::Completed); + self.update_agent_status(4, AgentStatus::Evolved, None); + } + + // ========================================================================= + // Stage 5: MONITOR — Mantis #1 sets up monitoring + // ========================================================================= + + async fn execute_monitor_stage(&mut self, app_url: &str) { + self.update_stage(4, StageStatus::Running); + self.broadcast_thought( + 1, + &format!("Setting up health monitoring for {app_url}..."), + ); + self.broadcast_step("Monitoring", 5, 5); + + let checks: Vec = vec![ + "✓ Uptime monitoring active".to_string(), + "✓ Error rate tracking enabled".to_string(), + "✓ Response time monitoring enabled".to_string(), + ]; + + let activity = AgentActivity::new("monitoring_setup") + .with_progress(checks.len() as u32, Some(checks.len() as u32)) + .with_log_lines(checks); + self.broadcast_activity(1, "monitor_complete", &activity); + + self.update_stage(4, StageStatus::Completed); + } + + // ========================================================================= + // Plan Generation — Rich, enterprise-grade task decomposition + // ========================================================================= + + fn derive_plan_sub_tasks( + &self, + classification: &ClassifiedIntent, + ) -> Vec { + let mut tasks = Vec::new(); + let lower = classification.original_text.to_lowercase(); + + // 1. Project Setup — always present + tasks.push(PipelineSubTask { + name: "Project Setup".to_string(), + description: "Initialize project structure and configure build environment" + .to_string(), + status: StageStatus::Pending, + files: vec![ + "/src".to_string(), + "/components".to_string(), + "package.json".to_string(), + "vite.config.ts".to_string(), + ], + estimated_files: 12, + estimated_time_min: 10, + estimated_tokens: "~15k tokens".to_string(), + }); + + // 2. Database Schema + if !classification.entities.tables.is_empty() { + let table_files: Vec = classification + .entities + .tables + .iter() + .map(|t| format!("{t}.sql")) + .collect(); + let table_count = table_files.len(); + tasks.push(PipelineSubTask { + name: "Database Schema".to_string(), + description: format!( + "Define {} tables: {}", + table_count, + classification.entities.tables.join(", ") + ), + status: StageStatus::Pending, + files: table_files, + estimated_files: (table_count * 2) as u16, + estimated_time_min: (table_count * 3 + 5) as u16, + estimated_tokens: format!("~{}k tokens", table_count * 4 + 8), + }); + } else { + let inferred_tables = infer_tables_from_intent(&lower); + let table_files: Vec = + inferred_tables.iter().map(|t| format!("{t}.sql")).collect(); + let tc = table_files.len().max(1); + tasks.push(PipelineSubTask { + name: "Database Schema".to_string(), + description: format!( + "Define tables: {}", + if inferred_tables.is_empty() { + "app_data".to_string() + } else { + inferred_tables.join(", ") + } + ), + status: StageStatus::Pending, + files: if table_files.is_empty() { + vec!["schema.sql".to_string()] + } else { + table_files + }, + estimated_files: (tc * 2) as u16, + estimated_time_min: (tc * 3 + 5) as u16, + estimated_tokens: format!("~{}k tokens", tc * 4 + 8), + }); + } + + // 3. API Layer — REST endpoints + tasks.push(PipelineSubTask { + name: "API Layer".to_string(), + description: "Configure REST API routes and CRUD operations".to_string(), + status: StageStatus::Pending, + files: vec![ + "api/routes.bas".to_string(), + "api/middleware.bas".to_string(), + ], + estimated_files: 4, + estimated_time_min: 8, + estimated_tokens: "~12k tokens".to_string(), + }); + + // 4. Feature-specific pages + if !classification.entities.features.is_empty() { + for feature in &classification.entities.features { + let slug = + feature.to_lowercase().replace(' ', "_").replace('-', "_"); + tasks.push(PipelineSubTask { + name: feature.clone(), + description: format!( + "Build the {} feature with HTMX interactions", + feature + ), + status: StageStatus::Pending, + files: vec![ + format!("{slug}.html"), + format!("{slug}.css"), + format!("{slug}.js"), + ], + estimated_files: 3, + estimated_time_min: 12, + estimated_tokens: "~18k tokens".to_string(), + }); + } + } else { + let inferred = infer_features_from_intent(&lower); + for feature in &inferred { + let slug = + feature.to_lowercase().replace(' ', "_").replace('-', "_"); + tasks.push(PipelineSubTask { + name: feature.clone(), + description: format!("Build {} UI with HTMX", feature), + status: StageStatus::Pending, + files: vec![ + format!("{slug}.html"), + format!("{slug}.css"), + ], + estimated_files: 2, + estimated_time_min: 10, + estimated_tokens: "~14k tokens".to_string(), + }); + } + } + + // 5. UI Theme & Layout + tasks.push(PipelineSubTask { + name: "Theme & Layout".to_string(), + description: "Create responsive layout, navigation, and CSS design system" + .to_string(), + status: StageStatus::Pending, + files: vec![ + "layout.html".to_string(), + "theme.css".to_string(), + "nav.html".to_string(), + ], + estimated_files: 5, + estimated_time_min: 8, + estimated_tokens: "~10k tokens".to_string(), + }); + + // 6. Authentication (if needed) + if lower.contains("login") + || lower.contains("auth") + || lower.contains("user") + || lower.contains("account") + || lower.contains("registration") + || lower.contains("sign") + { + tasks.push(PipelineSubTask { + name: "Authentication".to_string(), + description: "Login/registration pages with session management" + .to_string(), + status: StageStatus::Pending, + files: vec![ + "login.html".to_string(), + "register.html".to_string(), + "auth.js".to_string(), + ], + estimated_files: 4, + estimated_time_min: 15, + estimated_tokens: "~20k tokens".to_string(), + }); + } + + // 7. Dashboard (if complex app) + if lower.contains("dashboard") + || lower.contains("crm") + || lower.contains("management") + || lower.contains("analytics") + || lower.contains("admin") + { + tasks.push(PipelineSubTask { + name: "Dashboard".to_string(), + description: + "Admin dashboard with charts, KPIs, and data visualization" + .to_string(), + status: StageStatus::Pending, + files: vec![ + "dashboard.html".to_string(), + "dashboard.css".to_string(), + "charts.js".to_string(), + ], + estimated_files: 5, + estimated_time_min: 18, + estimated_tokens: "~25k tokens".to_string(), + }); + } + + // 8. Configure Environment + tasks.push(PipelineSubTask { + name: "Configure Environment".to_string(), + description: "Setup environment variables, deployment config, and SEO" + .to_string(), + status: StageStatus::Pending, + files: vec![ + ".env".to_string(), + "manifest.json".to_string(), + "robots.txt".to_string(), + ], + estimated_files: 3, + estimated_time_min: 5, + estimated_tokens: "~4k tokens".to_string(), + }); + + // 9. Testing & Validation + tasks.push(PipelineSubTask { + name: "Testing & Validation".to_string(), + description: "Integration tests and data validation rules".to_string(), + status: StageStatus::Pending, + files: vec![ + "tests/integration.bas".to_string(), + "tests/validation.bas".to_string(), + ], + estimated_files: 3, + estimated_time_min: 10, + estimated_tokens: "~8k tokens".to_string(), + }); + + // 10. Documentation + tasks.push(PipelineSubTask { + name: "Documentation".to_string(), + description: "Generate README and API documentation".to_string(), + status: StageStatus::Pending, + files: vec![ + "README.md".to_string(), + "API.md".to_string(), + ], + estimated_files: 2, + estimated_time_min: 5, + estimated_tokens: "~6k tokens".to_string(), + }); + + tasks + } + + // ========================================================================= + // Broadcasting — WebSocket events to the vibe UI + // ========================================================================= + + fn broadcast_pipeline_start(&self) { + let event = + TaskProgressEvent::new(&self.task_id, "pipeline_start", "Pipeline started") + .with_event_type("pipeline_start"); + self.state.broadcast_task_progress(event); + } + + fn broadcast_pipeline_complete(&self) { + let event = TaskProgressEvent::new( + &self.task_id, + "pipeline_complete", + "Pipeline completed", + ) + .with_event_type("pipeline_complete"); + self.state.broadcast_task_progress(event); + } + + fn broadcast_step(&self, label: &str, current: u8, total: u8) { + let event = + TaskProgressEvent::new(&self.task_id, "step_progress", label) + .with_event_type("step_progress") + .with_progress(current, total); + self.state.broadcast_task_progress(event); + } + + fn broadcast_thought(&self, agent_id: u32, thought: &str) { + let mut event = + TaskProgressEvent::new(&self.task_id, "agent_thought", thought) + .with_event_type("agent_thought"); + event.details = Some(format!("mantis_{agent_id}")); + event.text = Some(thought.to_string()); + self.state.broadcast_task_progress(event); + } + + fn broadcast_task_node( + &self, + sub_task: &PipelineSubTask, + index: u8, + total: u8, + ) { + let node_json = serde_json::json!({ + "title": sub_task.name, + "description": sub_task.description, + "index": index, + "total": total, + "status": "Planning", + "files": sub_task.files, + "estimated_files": sub_task.estimated_files, + "estimated_time": format!("{}m", sub_task.estimated_time_min), + "estimated_tokens": sub_task.estimated_tokens, + }); + let mut event = + TaskProgressEvent::new(&self.task_id, "task_node", &sub_task.name) + .with_event_type("task_node"); + event.details = Some(node_json.to_string()); + self.state.broadcast_task_progress(event); + } + + fn broadcast_agent_update( + &self, + agent_id: u8, + status: &str, + detail: Option<&str>, + ) { + let agent_json = serde_json::json!({ + "agent_id": agent_id, + "status": status, + "detail": detail, + }); + let mut event = + TaskProgressEvent::new(&self.task_id, "agent_update", status) + .with_event_type("agent_update"); + event.details = Some(agent_json.to_string()); + self.state.broadcast_task_progress(event); + } + + fn broadcast_activity( + &self, + agent_id: u32, + step: &str, + activity: &AgentActivity, + ) { + let event = TaskProgressEvent::new( + &self.task_id, + step, + &format!("Mantis #{agent_id} activity"), + ) + .with_event_type("agent_activity") + .with_activity(activity.clone()); + self.state.broadcast_task_progress(event); + } + + // ========================================================================= + // Internal State Management + // ========================================================================= + + fn update_stage(&mut self, index: usize, status: StageStatus) { + if let Some(stage) = self.pipeline.get_mut(index) { + stage.status = status; + match status { + StageStatus::Running => stage.started_at = Some(Utc::now()), + StageStatus::Completed | StageStatus::Failed => { + stage.completed_at = Some(Utc::now()); + } + _ => {} + } + } + } + + fn update_agent_status( + &mut self, + agent_id: u8, + status: AgentStatus, + task: Option<&str>, + ) { + if let Some(agent) = self.agents.iter_mut().find(|a| a.id == agent_id) { + agent.status = status; + if let Some(t) = task { + agent.assigned_task = Some(t.to_string()); + } + } + self.broadcast_agent_update(agent_id, &status.to_string(), task); + } +} + +// ============================================================================= +// Intent Analysis Helpers — Domain-aware plan decomposition +// ============================================================================= + +fn infer_tables_from_intent(intent: &str) -> Vec { + let mut tables = Vec::new(); + + let table_patterns: &[(&str, &str)] = &[ + ("crm", "contacts"), + ("crm", "leads"), + ("crm", "deals"), + ("e-commerce", "products"), + ("ecommerce", "products"), + ("shop", "products"), + ("store", "products"), + ("cart", "cart_items"), + ("shopping", "cart_items"), + ("order", "orders"), + ("payment", "payments"), + ("invoice", "invoices"), + ("blog", "posts"), + ("blog", "comments"), + ("project", "projects"), + ("project", "tasks"), + ("task", "tasks"), + ("todo", "todos"), + ("kanban", "boards"), + ("kanban", "cards"), + ("inventory", "items"), + ("inventory", "stock"), + ("booking", "bookings"), + ("booking", "slots"), + ("appointment", "appointments"), + ("calendar", "events"), + ("dashboard", "metrics"), + ("analytics", "events"), + ("user", "users"), + ("account", "accounts"), + ("customer", "customers"), + ("employee", "employees"), + ("hr", "employees"), + ("hr", "departments"), + ("ticket", "tickets"), + ("support", "tickets"), + ("chat", "messages"), + ("message", "messages"), + ("forum", "threads"), + ("forum", "replies"), + ("survey", "surveys"), + ("survey", "responses"), + ("quiz", "quizzes"), + ("quiz", "questions"), + ("recipe", "recipes"), + ("recipe", "ingredients"), + ("restaurant", "menu_items"), + ("restaurant", "orders"), + ("real estate", "properties"), + ("property", "properties"), + ("listing", "listings"), + ("portfolio", "projects"), + ("portfolio", "skills"), + ]; + + for (keyword, table) in table_patterns { + if intent.contains(keyword) && !tables.contains(&table.to_string()) { + tables.push(table.to_string()); + } + } + + if tables.is_empty() { + tables.push("items".to_string()); + } + + tables +} + +fn infer_features_from_intent(intent: &str) -> Vec { + let mut features = Vec::new(); + + let feature_patterns: &[(&str, &str)] = &[ + ("crm", "Contact Manager"), + ("crm", "Deal Pipeline"), + ("crm", "Lead Tracker"), + ("e-commerce", "Product Catalog"), + ("ecommerce", "Product Catalog"), + ("shop", "Product Catalog"), + ("store", "Product Catalog"), + ("cart", "Shopping Cart"), + ("shopping", "Shopping Cart"), + ("payment", "Checkout & Payments"), + ("order", "Order Management"), + ("blog", "Blog Posts"), + ("blog", "Comment System"), + ("project", "Task Board"), + ("kanban", "Kanban Board"), + ("todo", "Task List"), + ("inventory", "Stock Manager"), + ("booking", "Booking Calendar"), + ("appointment", "Appointment Scheduler"), + ("dashboard", "Analytics Dashboard"), + ("analytics", "Data Visualization"), + ("ticket", "Ticket System"), + ("support", "Help Desk"), + ("chat", "Messaging"), + ("forum", "Discussion Forum"), + ("survey", "Survey Builder"), + ("calculator", "Calculator"), + ("converter", "Unit Converter"), + ("tracker", "Tracker"), + ("recipe", "Recipe Book"), + ("restaurant", "Menu & Orders"), + ("real estate", "Property Listings"), + ("portfolio", "Portfolio Gallery"), + ("landing", "Landing Page"), + ("form", "Form Builder"), + ]; + + for (keyword, feature) in feature_patterns { + if intent.contains(keyword) && !features.contains(&feature.to_string()) { + features.push(feature.to_string()); + } + } + + if features.is_empty() { + features.push("Main View".to_string()); + features.push("Data Manager".to_string()); + } + + features +} + +// ============================================================================= +// Per-Agent System Prompts +// ============================================================================= + +pub fn get_agent_prompt(role: AgentRole) -> &'static str { + match role { + AgentRole::Planner => PLANNER_PROMPT, + AgentRole::Builder => BUILDER_PROMPT, + AgentRole::Reviewer => REVIEWER_PROMPT, + AgentRole::Deployer => DEPLOYER_PROMPT, + AgentRole::Monitor => MONITOR_PROMPT, + } +} + +const PLANNER_PROMPT: &str = r#"You are Mantis Planner — the architect agent in the General Bots Mantis Farm. + +Your job: analyze the user's natural language request and break it into concrete, +executable sub-tasks for the Builder agent. + +RULES: +- Output a JSON array of tasks, each with: name, description, files[], tables[], priority +- Be specific: "Create users table with id, name, email" not "Set up database" +- Identify ALL tables, pages, tools, and schedulers needed +- Order tasks by dependency (tables before pages that use them) +- NEVER ask for clarification. Make reasonable assumptions. +- Keep it KISS: minimum viable set of tasks to fulfill the request + +Example output: +[ + {"name":"Database Schema","description":"Create tables: users, products","files":["schema.sql"],"tables":["users","products"],"priority":"high"}, + {"name":"Product Catalog","description":"List page with search","files":["products.html"],"tables":["products"],"priority":"high"}, + {"name":"Shopping Cart","description":"Cart management page","files":["cart.html","cart.js"],"tables":["cart_items"],"priority":"medium"} +]"#; + +const BUILDER_PROMPT: &str = r#"You are Mantis Builder — the code generation agent in the General Bots Mantis Farm. + +Your job: take a plan (list of sub-tasks) and generate complete, working code for each. + +RULES: +- Generate complete HTML/CSS/JS files using HTMX for API calls +- Use the General Bots REST API: /api/db/{table} for CRUD +- Make it BEAUTIFUL: modern dark theme, smooth animations, professional UI +- All assets must be local (NO CDN) +- Include designer.js in every page +- NO comments in generated code — self-documenting names +- Every page needs proper SEO meta tags +- Use the streaming delimiter format (<<>>) + +TECH STACK: +- HTMX for all API interactions +- Vanilla CSS with CSS custom properties +- Minimal JS only when HTMX can't handle it +- Font: system-ui stack"#; + +const REVIEWER_PROMPT: &str = r#"You are Mantis Reviewer — the quality assurance agent in the General Bots Mantis Farm. + +Your job: review generated code for correctness, security, and quality. + +CHECK: +- All HTMX endpoints match available API routes +- No hardcoded data — all dynamic via /api/db/ +- Proper error handling (loading states, error messages) +- Accessibility (ARIA labels, keyboard navigation) +- Security (no XSS vectors, proper input sanitization) +- Responsive design works on mobile +- No external CDN dependencies + +Output: JSON with {passed: bool, issues: [{file, line, severity, message}]}"#; + +const DEPLOYER_PROMPT: &str = r#"You are Mantis Deployer — the deployment agent in the General Bots Mantis Farm. + +Your job: verify the app is correctly deployed and accessible. + +CHECK: +- All files written to S3/MinIO storage +- Database tables created +- App accessible at /apps/{app_name}/ +- Static assets loading correctly +- WebSocket connections working + +Output: JSON with {deployed: bool, url: string, checks: [{name, passed, detail}]}"#; + +const MONITOR_PROMPT: &str = r#"You are Mantis Monitor — the monitoring agent in the General Bots Mantis Farm. + +Your job: set up health checks and monitoring for deployed apps. + +SETUP: +- Error rate tracking +- Response time monitoring +- Database query performance +- User interaction analytics +- Uptime monitoring + +Output: JSON with {monitoring_active: bool, checks: [{name, interval, threshold}]}"#; diff --git a/src/basic/compiler/blocks/mail.rs b/src/basic/compiler/blocks/mail.rs index 23d07d22d..a711146b9 100644 --- a/src/basic/compiler/blocks/mail.rs +++ b/src/basic/compiler/blocks/mail.rs @@ -1,4 +1,4 @@ -use log::info; +use log::{info, trace}; pub fn convert_mail_line_with_substitution(line: &str) -> String { let mut result = String::new(); @@ -68,7 +68,7 @@ pub fn convert_mail_line_with_substitution(line: &str) -> String { } } - info!("Converted mail line: '{}' → '{}'", line, result); + trace!("Converted mail line: '{}' → '{}'", line, result); result } @@ -138,6 +138,6 @@ pub fn convert_mail_block(recipient: &str, lines: &[String]) -> String { }; result.push_str(&format!("send_mail({}, \"{}\", {}, []);\n", recipient_expr, subject, body_expr)); - info!("Converted MAIL block → {}", result); + trace!("Converted MAIL block → {}", result); result } diff --git a/src/basic/compiler/blocks/mod.rs b/src/basic/compiler/blocks/mod.rs index 5f7f21c6f..45a11ad41 100644 --- a/src/basic/compiler/blocks/mod.rs +++ b/src/basic/compiler/blocks/mod.rs @@ -4,7 +4,7 @@ pub mod talk; pub use mail::convert_mail_block; pub use talk::convert_talk_block; -use log::info; +use log::{info, trace}; pub fn convert_begin_blocks(script: &str) -> String { let mut result = String::new(); @@ -23,14 +23,14 @@ pub fn convert_begin_blocks(script: &str) -> String { } if upper == "BEGIN TALK" { - info!("Converting BEGIN TALK statement"); + trace!("Converting BEGIN TALK statement"); in_talk_block = true; talk_block_lines.clear(); continue; } if upper == "END TALK" { - info!("Converting END TALK statement, processing {} lines", talk_block_lines.len()); + trace!("Converting END TALK statement, processing {} lines", talk_block_lines.len()); in_talk_block = false; let converted = convert_talk_block(&talk_block_lines); result.push_str(&converted); @@ -45,7 +45,7 @@ pub fn convert_begin_blocks(script: &str) -> String { if upper.starts_with("BEGIN MAIL ") { let recipient = &trimmed[11..].trim(); - info!("Converting BEGIN MAIL statement: recipient='{}'", recipient); + trace!("Converting BEGIN MAIL statement: recipient='{}'", recipient); mail_recipient = recipient.to_string(); in_mail_block = true; mail_block_lines.clear(); @@ -53,7 +53,7 @@ pub fn convert_begin_blocks(script: &str) -> String { } if upper == "END MAIL" { - info!("Converting END MAIL statement, processing {} lines", mail_block_lines.len()); + trace!("Converting END MAIL statement, processing {} lines", mail_block_lines.len()); in_mail_block = false; let converted = convert_mail_block(&mail_recipient, &mail_block_lines); result.push_str(&converted); diff --git a/src/basic/compiler/blocks/talk.rs b/src/basic/compiler/blocks/talk.rs index 2566f30eb..4433c6973 100644 --- a/src/basic/compiler/blocks/talk.rs +++ b/src/basic/compiler/blocks/talk.rs @@ -1,4 +1,4 @@ -use log::info; +use log::{info, trace}; pub fn convert_talk_line_with_substitution(line: &str) -> String { let mut result = String::new(); @@ -109,7 +109,7 @@ pub fn convert_talk_line_with_substitution(line: &str) -> String { result = "TALK \"\"".to_string(); } - info!("Converted TALK line: '{}' → '{}'", line, result); + trace!("Converted TALK line: '{}' → '{}'", line, result); result } diff --git a/src/basic/compiler/mod.rs b/src/basic/compiler/mod.rs index 9b4b7a739..3982e193c 100644 --- a/src/basic/compiler/mod.rs +++ b/src/basic/compiler/mod.rs @@ -521,7 +521,7 @@ impl BasicCompiler { { log::error!("Failed to register WEBHOOK during preprocessing: {}", e); } else { - log::info!( + log::trace!( "Registered webhook endpoint {} for script {} during preprocessing", endpoint, script_name @@ -550,7 +550,7 @@ impl BasicCompiler { { log::error!("Failed to register USE_WEBSITE during preprocessing: {}", e); } else { - log::info!( + log::trace!( "Registered website {} for crawling during preprocessing (refresh: {})", url, refresh ); @@ -676,7 +676,7 @@ impl BasicCompiler { let table_name = table_name.trim_matches('"'); // Debug log to see what we're querying - log::info!("Converting SAVE for table: '{}' (original: '{}')", table_name, &parts[0]); + log::trace!("Converting SAVE for table: '{}' (original: '{}')", table_name, &parts[0]); // Get column names from TABLE definition (preserves order from .bas file) let column_names = self.get_table_columns_for_save(table_name, bot_id)?; @@ -685,7 +685,7 @@ impl BasicCompiler { let values: Vec<&String> = parts.iter().skip(1).collect(); let mut map_pairs = Vec::new(); - log::info!("Matching {} variables to {} columns", values.len(), column_names.len()); + log::trace!("Matching {} variables to {} columns", values.len(), column_names.len()); for value_var in values.iter() { // Find the column that matches this variable (case-insensitive) @@ -753,7 +753,7 @@ impl BasicCompiler { // Try to parse TABLE definition from the bot's .bas files to get correct field order if let Ok(columns) = self.get_columns_from_table_definition(table_name, bot_id) { if !columns.is_empty() { - log::info!("Using TABLE definition for '{}': {} columns", table_name, columns.len()); + log::trace!("Using TABLE definition for '{}': {} columns", table_name, columns.len()); return Ok(columns); } } @@ -880,7 +880,7 @@ impl BasicCompiler { match sql_query(&bot_query).load(&mut *bot_conn) { Ok(bot_cols) => { - log::info!("Found {} columns for table '{}' in bot database", bot_cols.len(), table_name); + log::trace!("Found {} columns for table '{}' in bot database", bot_cols.len(), table_name); bot_cols.into_iter() .map(|c: ColumnRow| c.column_name) .collect() @@ -895,7 +895,7 @@ impl BasicCompiler { Vec::new() } } else { - log::info!("Found {} columns for table '{}' in main database", cols.len(), table_name); + log::trace!("Found {} columns for table '{}' in main database", cols.len(), table_name); cols.into_iter() .map(|c: ColumnRow| c.column_name) .collect() @@ -919,7 +919,7 @@ impl BasicCompiler { match sql_query(&bot_query).load(&mut *bot_conn) { Ok(cols) => { - log::info!("Found {} columns for table '{}' in bot database", cols.len(), table_name); + log::trace!("Found {} columns for table '{}' in bot database", cols.len(), table_name); cols.into_iter() .filter(|c: &ColumnRow| c.column_name != "id") .map(|c: ColumnRow| c.column_name) diff --git a/src/basic/keywords/mcp_client.rs b/src/basic/keywords/mcp_client.rs index d286acb37..17fc48c1a 100644 --- a/src/basic/keywords/mcp_client.rs +++ b/src/basic/keywords/mcp_client.rs @@ -120,7 +120,7 @@ impl Default for McpConnection { fn default() -> Self { Self { connection_type: ConnectionType::Http, - url: "http://localhost:9000".to_string(), + url: "http://localhost:8080".to_string(), port: None, timeout_seconds: 30, max_retries: 3, diff --git a/src/basic/keywords/mcp_directory.rs b/src/basic/keywords/mcp_directory.rs index 13f4cb946..51ca1c2bb 100644 --- a/src/basic/keywords/mcp_directory.rs +++ b/src/basic/keywords/mcp_directory.rs @@ -388,7 +388,7 @@ impl McpCsvLoader { .first() .map(|s| (*s).to_string()) .unwrap_or_else(|| "localhost".to_string()); - let port: u16 = parts.get(1).and_then(|p| p.parse().ok()).unwrap_or(9000); + let port: u16 = parts.get(1).and_then(|p| p.parse().ok()).unwrap_or(8080); McpConnection { connection_type: ConnectionType::Tcp, url: host, diff --git a/src/basic/mod.rs b/src/basic/mod.rs index 55419d550..cbc9c7bd9 100644 --- a/src/basic/mod.rs +++ b/src/basic/mod.rs @@ -6,7 +6,7 @@ use crate::basic::keywords::switch_case::switch_keyword; use crate::core::shared::models::UserSession; use crate::core::shared::state::AppState; use diesel::prelude::*; -use log::info; +use log::{info, trace}; use rhai::{Dynamic, Engine, EvalAltResult, Scope}; use std::collections::HashMap; use std::sync::Arc; @@ -560,7 +560,7 @@ impl ScriptService { } pub fn compile(&self, script: &str) -> Result> { let processed_script = self.preprocess_basic_script(script); - info!("Processed Script:\n{}", processed_script); + trace!("Processed Script:\n{}", processed_script); match self.engine.compile(&processed_script) { Ok(ast) => Ok(ast), Err(parse_error) => Err(Box::new(parse_error.into())), @@ -587,7 +587,7 @@ impl ScriptService { .collect::>() .join("\n"); - info!("Filtered tool metadata: {} -> {} chars", script.len(), executable_script.len()); + trace!("Filtered tool metadata: {} -> {} chars", script.len(), executable_script.len()); // Apply minimal preprocessing for tools (skip variable normalization to avoid breaking multi-line strings) let script = preprocess_switch(&executable_script); @@ -597,7 +597,7 @@ impl ScriptService { // let script = Self::convert_format_syntax(&script); // Skip normalize_variables_to_lowercase for tools - it breaks multi-line strings - info!("Preprocessed tool script for Rhai compilation"); + trace!("Preprocessed tool script for Rhai compilation"); // Convert SAVE statements with field lists to map-based SAVE (simplified version for tools) let script = Self::convert_save_for_tools(&script); // Convert BEGIN TALK and BEGIN MAIL blocks to single calls @@ -986,7 +986,7 @@ impl ScriptService { }; result.push_str(&format!("send_mail({}, \"{}\", {}, []);\n", recipient_expr, subject, body_expr)); - log::info!("Converted MAIL block → {}", result); + log::trace!("Converted MAIL block → {}", result); result } @@ -1086,7 +1086,7 @@ impl ScriptService { let mut mail_block_lines: Vec = Vec::new(); let mut in_line_continuation = false; - log::info!("Converting IF/THEN syntax, input has {} lines", script.lines().count()); + log::trace!("Converting IF/THEN syntax, input has {} lines", script.lines().count()); for line in script.lines() { let trimmed = line.trim(); @@ -1119,7 +1119,7 @@ impl ScriptService { } else { condition.to_string() }; - log::info!("Converting IF statement: condition='{}'", condition); + log::trace!("Converting IF statement: condition='{}'", condition); result.push_str("if "); result.push_str(&condition); result.push_str(" {\n"); @@ -1129,7 +1129,7 @@ impl ScriptService { // Handle ELSE if upper == "ELSE" { - log::info!("Converting ELSE statement"); + log::trace!("Converting ELSE statement"); result.push_str("} else {\n"); continue; } @@ -1152,7 +1152,7 @@ impl ScriptService { } else { condition.to_string() }; - log::info!("Converting ELSEIF statement: condition='{}'", condition); + log::trace!("Converting ELSEIF statement: condition='{}'", condition); result.push_str("} else if "); result.push_str(&condition); result.push_str(" {\n"); @@ -1161,7 +1161,7 @@ impl ScriptService { // Handle END IF if upper == "END IF" { - log::info!("Converting END IF statement"); + log::trace!("Converting END IF statement"); if if_stack.pop().is_some() { result.push_str("}\n"); } @@ -1171,7 +1171,7 @@ impl ScriptService { // Handle WITH ... END WITH (BASIC object creation) if upper.starts_with("WITH ") { let object_name = &trimmed[5..].trim(); - log::info!("Converting WITH statement: object='{}'", object_name); + log::trace!("Converting WITH statement: object='{}'", object_name); // Convert WITH obj → let obj = #{ (start object literal) result.push_str("let "); result.push_str(object_name); @@ -1181,7 +1181,7 @@ impl ScriptService { } if upper == "END WITH" { - log::info!("Converting END WITH statement"); + log::trace!("Converting END WITH statement"); result.push_str("};\n"); in_with_block = false; continue; @@ -1189,14 +1189,14 @@ impl ScriptService { // Handle BEGIN TALK ... END TALK (multi-line TALK with ${} substitution) if upper == "BEGIN TALK" { - log::info!("Converting BEGIN TALK statement"); + log::trace!("Converting BEGIN TALK statement"); in_talk_block = true; talk_block_lines.clear(); continue; } if upper == "END TALK" { - log::info!("Converting END TALK statement, processing {} lines", talk_block_lines.len()); + log::trace!("Converting END TALK statement, processing {} lines", talk_block_lines.len()); in_talk_block = false; // Split into multiple TALK statements to avoid expression complexity limit @@ -1239,7 +1239,7 @@ impl ScriptService { // Handle BEGIN MAIL ... END MAIL (multi-line email with ${} substitution) if upper.starts_with("BEGIN MAIL ") { let recipient = &trimmed[11..].trim(); // Skip "BEGIN MAIL " - log::info!("Converting BEGIN MAIL statement: recipient='{}'", recipient); + log::trace!("Converting BEGIN MAIL statement: recipient='{}'", recipient); mail_recipient = recipient.to_string(); in_mail_block = true; mail_block_lines.clear(); @@ -1247,7 +1247,7 @@ impl ScriptService { } if upper == "END MAIL" { - log::info!("Converting END MAIL statement, processing {} lines", mail_block_lines.len()); + log::trace!("Converting END MAIL statement, processing {} lines", mail_block_lines.len()); in_mail_block = false; // Process the mail block and convert to SEND EMAIL @@ -1287,11 +1287,11 @@ impl ScriptService { // Handle SAVE table, field1, field2, ... → INSERT "table", #{field1: value1, field2: value2, ...} if upper.starts_with("SAVE") && upper.contains(',') { - log::info!("Processing SAVE line: '{}'", trimmed); + log::trace!("Processing SAVE line: '{}'", trimmed); // Extract the part after "SAVE" let after_save = &trimmed[4..].trim(); // Skip "SAVE" let parts: Vec<&str> = after_save.split(',').collect(); - log::info!("SAVE parts: {:?}", parts); + log::trace!("SAVE parts: {:?}", parts); if parts.len() >= 2 { // First part is the table name (in quotes) @@ -1301,7 +1301,7 @@ impl ScriptService { if parts.len() == 2 { let object_name = parts[1].trim().trim_end_matches(';'); let converted = format!("INSERT \"{}\", {};\n", table, object_name); - log::info!("Converted SAVE to INSERT (old syntax): '{}'", converted); + log::trace!("Converted SAVE to INSERT (old syntax): '{}'", converted); result.push_str(&converted); continue; } @@ -1310,7 +1310,7 @@ impl ScriptService { // The runtime SAVE handler will match them to database columns by position let values = parts[1..].join(", "); let converted = format!("SAVE \"{}\", {};\n", table, values); - log::info!("Keeping SAVE syntax (modern): '{}'", converted); + log::trace!("Keeping SAVE syntax (modern): '{}'", converted); result.push_str(&converted); continue; } @@ -1319,17 +1319,17 @@ impl ScriptService { // Handle SEND EMAIL → send_mail (function call style) // Syntax: SEND EMAIL to, subject, body → send_mail(to, subject, body, []) if upper.starts_with("SEND EMAIL") { - log::info!("Processing SEND EMAIL line: '{}'", trimmed); + log::trace!("Processing SEND EMAIL line: '{}'", trimmed); let after_send = &trimmed[11..].trim(); // Skip "SEND EMAIL " (10 chars + space = 11) let parts: Vec<&str> = after_send.split(',').collect(); - log::info!("SEND EMAIL parts: {:?}", parts); + log::trace!("SEND EMAIL parts: {:?}", parts); if parts.len() == 3 { let to = parts[0].trim(); let subject = parts[1].trim(); let body = parts[2].trim().trim_end_matches(';'); // Convert to send_mail(to, subject, body, []) function call let converted = format!("send_mail({}, {}, {}, []);\n", to, subject, body); - log::info!("Converted SEND EMAIL to: '{}'", converted); + log::trace!("Converted SEND EMAIL to: '{}'", converted); result.push_str(&converted); continue; } @@ -1397,7 +1397,7 @@ impl ScriptService { } } - log::info!("IF/THEN conversion complete, output has {} lines", result.lines().count()); + log::trace!("IF/THEN conversion complete, output has {} lines", result.lines().count()); // Convert BASIC <> (not equal) to Rhai != globally @@ -1417,7 +1417,7 @@ impl ScriptService { let lines: Vec<&str> = script.lines().collect(); let mut i = 0; - log::info!("Converting SELECT/CASE syntax to if-else chains"); + log::trace!("Converting SELECT/CASE syntax to if-else chains"); // Helper function to strip 'let ' from the beginning of a line // This is needed because convert_if_then_syntax adds 'let' to all assignments, @@ -1441,7 +1441,7 @@ impl ScriptService { if upper.starts_with("SELECT ") && !upper.contains(" THEN") { // Extract the variable being selected let select_var = trimmed[7..].trim(); // Skip "SELECT " - log::info!("Converting SELECT statement for variable: '{}'", select_var); + log::trace!("Converting SELECT statement for variable: '{}'", select_var); // Skip the SELECT line i += 1; diff --git a/src/core/bootstrap/bootstrap_utils.rs b/src/core/bootstrap/bootstrap_utils.rs index e622aa493..5aeb67a96 100644 --- a/src/core/bootstrap/bootstrap_utils.rs +++ b/src/core/bootstrap/bootstrap_utils.rs @@ -17,6 +17,8 @@ pub fn get_processes_to_kill() -> Vec<(&'static str, Vec<&'static str>)> { ("botserver-stack/bin/meeting", vec!["-9", "-f"]), ("botserver-stack/bin/vector_db", vec!["-9", "-f"]), ("botserver-stack/bin/zitadel", vec!["-9", "-f"]), + ("botserver-stack/bin/alm", vec!["-9", "-f"]), + ("forgejo", vec!["-9", "-f"]), ("caddy", vec!["-9", "-f"]), ("postgres", vec!["-9", "-f"]), ("minio", vec!["-9", "-f"]), @@ -201,3 +203,25 @@ pub enum BotExistsResult { } + +/// Check if Zitadel directory is healthy +pub fn zitadel_health_check() -> bool { + // Check if Zitadel is responding on port 9000 + if let Ok(output) = Command::new("curl") + .args(["-f", "-s", "--connect-timeout", "2", "http://localhost:9000/debug/ready"]) + .output() + { + if output.status.success() { + return true; + } + } + + // Fallback: just check if port 9000 is listening + match Command::new("nc") + .args(["-z", "-w", "1", "127.0.0.1", "9000"]) + .output() + { + Ok(output) => output.status.success(), + Err(_) => false, + } +} diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 165b8b0c9..7535d714c 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -294,7 +294,7 @@ impl AppConfig { let port = std::env::var("PORT") .ok() .and_then(|v| v.parse::().ok()) - .unwrap_or_else(|| get_u16("server_port", 9000)); + .unwrap_or_else(|| get_u16("server_port", 8080)); Ok(Self { drive, @@ -302,7 +302,7 @@ impl AppConfig { server: ServerConfig { host: get_str("server_host", "0.0.0.0"), port, - base_url: config_map.get("server_base_url").cloned().unwrap_or_else(|| "http://localhost:9000".to_string()), + base_url: config_map.get("server_base_url").cloned().unwrap_or_else(|| "http://localhost:8080".to_string()), }, site_path: { ConfigManager::new(pool.clone()).get_config( @@ -332,7 +332,7 @@ impl AppConfig { let port = std::env::var("PORT") .ok() .and_then(|v| v.parse::().ok()) - .unwrap_or(9000); + .unwrap_or(8080); Ok(Self { drive: minio, @@ -340,7 +340,7 @@ impl AppConfig { server: ServerConfig { host: "0.0.0.0".to_string(), port, - base_url: "http://localhost:9000".to_string(), + base_url: "http://localhost:8080".to_string(), }, site_path: "./botserver-stack/sites".to_string(), diff --git a/src/core/directory/api.rs b/src/core/directory/api.rs index ca73b9cf0..a0ab45fc5 100644 --- a/src/core/directory/api.rs +++ b/src/core/directory/api.rs @@ -81,7 +81,7 @@ pub async fn provision_user_handler( .config .as_ref() .map(|c| c.server.base_url.clone()) - .unwrap_or_else(|| "http://localhost:8300".to_string()); + .unwrap_or_else(|| "http://localhost:9000".to_string()); let provisioning = UserProvisioningService::new(state.conn.clone(), s3_client, base_url); @@ -114,7 +114,7 @@ pub async fn deprovision_user_handler( .config .as_ref() .map(|c| c.server.base_url.clone()) - .unwrap_or_else(|| "http://localhost:8300".to_string()); + .unwrap_or_else(|| "http://localhost:9000".to_string()); let provisioning = UserProvisioningService::new(state.conn.clone(), s3_client, base_url); @@ -257,7 +257,7 @@ pub async fn check_services_status(State(state): State>) -> impl I let client = create_tls_client(Some(2)); - if let Ok(response) = client.get("https://localhost:8300/healthz").send().await { + if let Ok(response) = client.get("https://localhost:9000/healthz").send().await { status.directory = response.status().is_success(); } diff --git a/src/core/oauth/routes.rs b/src/core/oauth/routes.rs index a09a07f5f..112a822c1 100644 --- a/src/core/oauth/routes.rs +++ b/src/core/oauth/routes.rs @@ -439,7 +439,7 @@ async fn get_bot_config(state: &AppState) -> HashMap { fn get_base_url(state: &AppState) -> String { let _ = state; - "http://localhost:8300".to_string() + "http://localhost:9000".to_string() } async fn create_or_get_oauth_user( diff --git a/src/core/package_manager/installer.rs b/src/core/package_manager/installer.rs index 521f64939..e909b6f26 100644 --- a/src/core/package_manager/installer.rs +++ b/src/core/package_manager/installer.rs @@ -443,7 +443,7 @@ impl PackageManager { "directory".to_string(), ComponentConfig { name: "directory".to_string(), - ports: vec![8300], + ports: vec![9000], dependencies: vec!["tables".to_string()], linux_packages: vec![], macos_packages: vec![], @@ -455,6 +455,8 @@ impl PackageManager { "mkdir -p {{LOGS_PATH}}".to_string(), ], post_install_cmds_linux: vec![ + "cat > {{CONF_PATH}}/directory/steps.yaml << 'EOF'\n---\nDatabase:\n postgres:\n Host: localhost\n Port: 5432\n Database: zitadel\n User:\n Username: zitadel\n Password: zitadel\n SSL:\n Mode: disable\n Admin:\n Username: gbuser\n Password: {{DB_PASSWORD}}\n SSL:\n Mode: disable\nEOF".to_string(), + "cat > {{CONF_PATH}}/directory/zitadel.yaml << 'EOF'\nLog:\n Level: info\n\nDatabase:\n postgres:\n Host: localhost\n Port: 5432\n Database: zitadel\n User:\n Username: zitadel\n Password: zitadel\n SSL:\n Mode: disable\n Admin:\n Username: gbuser\n Password: {{DB_PASSWORD}}\n SSL:\n Mode: disable\n\nMachine:\n Identification:\n Hostname: localhost\n WebhookAddress: http://localhost:8080\n\nPort: 8300\nExternalDomain: localhost\nExternalPort: 8300\nExternalSecure: false\n\nTLS:\n Enabled: false\nEOF".to_string(), @@ -471,12 +473,12 @@ impl PackageManager { env_vars: HashMap::from([ ("ZITADEL_EXTERNALSECURE".to_string(), "false".to_string()), ("ZITADEL_EXTERNALDOMAIN".to_string(), "localhost".to_string()), - ("ZITADEL_EXTERNALPORT".to_string(), "8300".to_string()), + ("ZITADEL_EXTERNALPORT".to_string(), "9000".to_string()), ("ZITADEL_TLS_ENABLED".to_string(), "false".to_string()), ]), data_download_list: Vec::new(), exec_cmd: "ZITADEL_MASTERKEY=$(VAULT_ADDR=https://localhost:8200 VAULT_CACERT={{CONF_PATH}}/system/certificates/ca/ca.crt vault kv get -field=masterkey secret/gbo/directory 2>/dev/null || echo 'MasterkeyNeedsToHave32Characters') nohup {{BIN_PATH}}/zitadel start --config {{CONF_PATH}}/directory/zitadel.yaml --masterkeyFromEnv --tlsMode disabled > {{LOGS_PATH}}/zitadel.log 2>&1 &".to_string(), - check_cmd: "curl -f --connect-timeout 2 -m 5 http://localhost:8300/healthz >/dev/null 2>&1".to_string(), + check_cmd: "curl -f --connect-timeout 2 -m 5 http://localhost:9000/healthz >/dev/null 2>&1".to_string(), }, ); } diff --git a/src/core/package_manager/setup/mod.rs b/src/core/package_manager/setup/mod.rs index fc136898b..953d71f9e 100644 --- a/src/core/package_manager/setup/mod.rs +++ b/src/core/package_manager/setup/mod.rs @@ -2,6 +2,6 @@ pub mod directory_setup; pub mod email_setup; pub mod vector_db_setup; -pub use directory_setup::{DirectorySetup, DefaultUser, CreateUserParams}; +pub use directory_setup::{DirectorySetup, DirectoryConfig, DefaultUser, CreateUserParams}; pub use email_setup::EmailSetup; pub use vector_db_setup::VectorDbSetup; diff --git a/src/core/secrets/mod.rs b/src/core/secrets/mod.rs index e7e1f7c03..6163528d7 100644 --- a/src/core/secrets/mod.rs +++ b/src/core/secrets/mod.rs @@ -224,7 +224,7 @@ impl SecretsManager { Ok(( s.get("url") .cloned() - .unwrap_or_else(|| "http://localhost:8300".into()), + .unwrap_or_else(|| "http://localhost:9000".into()), s.get("project_id").cloned().unwrap_or_default(), s.get("client_id").cloned().unwrap_or_default(), s.get("client_secret").cloned().unwrap_or_default(), @@ -338,7 +338,7 @@ impl SecretsManager { secrets.insert("password".into(), "changeme".into()); } SecretPaths::DIRECTORY => { - secrets.insert("url".into(), "http://localhost:8300".into()); + secrets.insert("url".into(), "http://localhost:9000".into()); secrets.insert("project_id".into(), String::new()); secrets.insert("client_id".into(), String::new()); secrets.insert("client_secret".into(), String::new()); diff --git a/src/core/urls.rs b/src/core/urls.rs index 97153170d..c6571b1c0 100644 --- a/src/core/urls.rs +++ b/src/core/urls.rs @@ -482,7 +482,7 @@ impl InternalUrls { pub const DIRECTORY_BASE: &'static str = "http://localhost:9000"; pub const DATABASE: &'static str = "postgres://localhost:5432"; pub const CACHE: &'static str = "redis://localhost:6379"; - pub const DRIVE: &'static str = "https://localhost:9000"; + pub const DRIVE: &'static str = "https://localhost:9100"; pub const EMAIL: &'static str = "http://localhost:8025"; pub const LLM: &'static str = "http://localhost:8081"; pub const EMBEDDING: &'static str = "http://localhost:8082"; diff --git a/src/directory/auth_routes.rs b/src/directory/auth_routes.rs index db455d21c..548a426db 100644 --- a/src/directory/auth_routes.rs +++ b/src/directory/auth_routes.rs @@ -147,21 +147,30 @@ pub async fn login( ) })?; + // Try to get admin token: first PAT file, then OAuth client credentials let pat_path = std::path::Path::new("./botserver-stack/conf/directory/admin-pat.txt"); let admin_token = std::fs::read_to_string(pat_path) .map(|s| s.trim().to_string()) .unwrap_or_default(); - if admin_token.is_empty() { - error!("Admin PAT token not found"); - return Err(( - StatusCode::INTERNAL_SERVER_ERROR, - Json(ErrorResponse { - error: "Authentication service not configured".to_string(), - details: None, - }), - )); - } + let admin_token = if admin_token.is_empty() { + info!("Admin PAT token not found, using OAuth client credentials flow"); + match get_oauth_token(&http_client, &client).await { + Ok(token) => token, + Err(e) => { + error!("Failed to get OAuth token: {}", e); + return Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: "Authentication service not configured".to_string(), + details: Some("OAuth client credentials not available".to_string()), + }), + )); + } + } + } else { + admin_token + }; let search_url = format!("{}/v2/users", client.api_url()); let search_body = serde_json::json!({ @@ -765,3 +774,43 @@ async fn create_organization( Err(format!("Failed to create organization: {}", error_text)) } } + +async fn get_oauth_token( + http_client: &reqwest::Client, + client: &crate::directory::client::ZitadelClient, +) -> Result { + let token_url = format!("{}/oauth/v2/token", client.api_url()); + + let params = [ + ("grant_type", "client_credentials".to_string()), + ("client_id", client.client_id().to_string()), + ("client_secret", client.client_secret().to_string()), + ("scope", "openid profile email urn:zitadel:iam:org:project:id:zitadel:aud".to_string()), + ]; + + let response = http_client + .post(&token_url) + .form(¶ms) + .send() + .await + .map_err(|e| format!("Failed to request OAuth token: {}", e))?; + + if !response.status().is_success() { + let error_text = response.text().await.unwrap_or_default(); + return Err(format!("OAuth token request failed: {}", error_text)); + } + + let token_data: serde_json::Value = response + .json() + .await + .map_err(|e| format!("Failed to parse OAuth token response: {}", e))?; + + let access_token = token_data + .get("access_token") + .and_then(|t| t.as_str()) + .ok_or_else(|| "No access_token in OAuth response".to_string())? + .to_string(); + + info!("Successfully obtained OAuth access token via client credentials"); + Ok(access_token) +} diff --git a/src/directory/bootstrap.rs b/src/directory/bootstrap.rs index bc496dc3f..e01fe337c 100644 --- a/src/directory/bootstrap.rs +++ b/src/directory/bootstrap.rs @@ -144,6 +144,23 @@ async fn create_bootstrap_admin(client: &ZitadelClient) -> Result { + info!("Admin PAT token created successfully"); + save_admin_pat_token(&pat_token); + } + Err(e) => { + // PAT creation failure is not critical - user can still login + // This happens when Zitadel doesn't have OAuth client configured yet + warn!("Failed to create admin PAT token (non-critical): {}", e); + info!("Admin user can still login with username/password. PAT can be created later via UI."); + } + } + let base_url = client.api_url(); let setup_url = format!("{}/ui/login", base_url); @@ -250,7 +267,7 @@ fn save_setup_credentials(result: &BootstrapResult) { ║ Password: {:<46}║ ║ Email: {:<46}║ ║ ║ -║ 🌐 LOGIN NOW: http://localhost:9000/suite/login ║ +║ 🌐 LOGIN NOW: http://localhost:8080/suite/login ║ ║ ║ ╚════════════════════════════════════════════════════════════╝ @@ -280,6 +297,32 @@ fn save_setup_credentials(result: &BootstrapResult) { } } +fn save_admin_pat_token(pat_token: &str) { + // Create directory if it doesn't exist + let pat_dir = std::path::Path::new("./botserver-stack/conf/directory"); + if let Err(e) = fs::create_dir_all(pat_dir) { + error!("Failed to create PAT directory: {}", e); + return; + } + + let pat_path = pat_dir.join("admin-pat.txt"); + + match fs::write(&pat_path, pat_token) { + Ok(_) => { + #[cfg(unix)] + { + if let Err(e) = fs::set_permissions(&pat_path, fs::Permissions::from_mode(0o600)) { + warn!("Failed to set PAT file permissions: {}", e); + } + } + info!("Admin PAT token saved to: {}", pat_path.display()); + } + Err(e) => { + error!("Failed to save admin PAT token: {}", e); + } + } +} + fn print_bootstrap_credentials(result: &BootstrapResult) { let separator = "═".repeat(60); @@ -313,7 +356,7 @@ fn print_bootstrap_credentials(result: &BootstrapResult) { println!("║{:^60}║", ""); println!("║ {:56}║", "🌐 LOGIN NOW:"); println!("║{:^60}║", ""); - println!("║ {:56}║", "http://localhost:9000/suite/login"); + println!("║ {:56}║", "http://localhost:8080/suite/login"); println!("║{:^60}║", ""); println!("╠{}╣", separator); println!("║{:^60}║", ""); diff --git a/src/directory/client.rs b/src/directory/client.rs index d24905a02..a27598672 100644 --- a/src/directory/client.rs +++ b/src/directory/client.rs @@ -21,6 +21,8 @@ pub struct ZitadelClient { http_client: reqwest::Client, access_token: Arc>>, pat_token: Option, + /// Username and password for password grant OAuth flow + password_credentials: Option<(String, String)>, } impl ZitadelClient { @@ -35,6 +37,28 @@ impl ZitadelClient { http_client, access_token: Arc::new(RwLock::new(None)), pat_token: None, + password_credentials: None, + }) + } + + /// Create a client that uses password grant OAuth flow + /// This is used for initial bootstrap with Zitadel's default admin user + pub fn with_password_grant( + config: ZitadelConfig, + username: String, + password: String, + ) -> Result { + let http_client = reqwest::Client::builder() + .timeout(std::time::Duration::from_secs(30)) + .build() + .map_err(|e| anyhow!("Failed to create HTTP client: {}", e))?; + + Ok(Self { + config, + http_client, + access_token: Arc::new(RwLock::new(None)), + pat_token: None, + password_credentials: Some((username, password)), }) } @@ -49,6 +73,7 @@ impl ZitadelClient { http_client, access_token: Arc::new(RwLock::new(None)), pat_token: Some(pat_token), + password_credentials: None, }) } @@ -108,13 +133,24 @@ impl ZitadelClient { let token_url = format!("{}/oauth/v2/token", self.config.api_url); log::info!("Requesting access token from: {}", token_url); - let params = [ - ("grant_type", "client_credentials"), - ("client_id", &self.config.client_id), - ("client_secret", &self.config.client_secret), - ("scope", "openid profile email"), + // Build params dynamically based on auth method + let mut params: Vec<(&str, String)> = vec![ + ("client_id", self.config.client_id.clone()), + ("client_secret", self.config.client_secret.clone()), ]; + if let Some((username, password)) = &self.password_credentials { + // Use password grant flow + params.push(("grant_type", "password".to_string())); + params.push(("username", username.clone())); + params.push(("password", password.clone())); + params.push(("scope", "openid profile email urn:zitadel:iam:org:project:id:zitadel:aud".to_string())); + } else { + // Use client credentials flow + params.push(("grant_type", "client_credentials".to_string())); + params.push(("scope", "openid profile email".to_string())); + } + let response = self .http_client .post(&token_url) @@ -533,4 +569,47 @@ impl ZitadelClient { Ok(()) } + + pub async fn create_pat(&self, user_id: &str, display_name: &str, expiration_date: Option<&str>) -> Result { + let token = self.get_access_token().await?; + let url = format!("{}/v2/users/{}/pat", self.config.api_url, user_id); + + let body = if let Some(expiry) = expiration_date { + serde_json::json!({ + "displayName": display_name, + "expirationDate": expiry + }) + } else { + serde_json::json!({ + "displayName": display_name + }) + }; + + let response = self + .http_client + .post(&url) + .bearer_auth(&token) + .json(&body) + .send() + .await + .map_err(|e| anyhow!("Failed to create PAT: {}", e))?; + + if !response.status().is_success() { + let error_text = response.text().await.unwrap_or_default(); + return Err(anyhow!("Failed to create PAT: {}", error_text)); + } + + let data: serde_json::Value = response + .json() + .await + .map_err(|e| anyhow!("Failed to parse PAT response: {}", e))?; + + let pat_token = data + .get("token") + .and_then(|t| t.as_str()) + .ok_or_else(|| anyhow!("No token in PAT response"))? + .to_string(); + + Ok(pat_token) + } } diff --git a/src/drive/drive_monitor/mod.rs b/src/drive/drive_monitor/mod.rs index c129a78af..ec3012b1e 100644 --- a/src/drive/drive_monitor/mod.rs +++ b/src/drive/drive_monitor/mod.rs @@ -96,7 +96,7 @@ impl DriveMonitor { let mut file_states = self.file_states.write().await; let count = states.len(); *file_states = states; - info!( + trace!( "[DRIVE_MONITOR] Loaded {} file states from disk for bot {}", count, self.bot_id @@ -236,7 +236,7 @@ impl DriveMonitor { .store(true, std::sync::atomic::Ordering::SeqCst); trace!("start_monitoring: calling check_for_changes..."); - info!("Calling initial check_for_changes..."); + trace!("Calling initial check_for_changes..."); match tokio::time::timeout(Duration::from_secs(300), self.check_for_changes()).await { Ok(Ok(_)) => { @@ -262,15 +262,15 @@ impl DriveMonitor { // Force enable periodic monitoring regardless of initial check result self.is_processing.store(true, std::sync::atomic::Ordering::SeqCst); - info!("Forced is_processing to true for periodic monitoring"); + trace!("Forced is_processing to true for periodic monitoring"); let self_clone = self.clone(); // Don't wrap in Arc::new - that creates a copy tokio::spawn(async move { let mut consecutive_processing_failures = 0; - info!("Starting periodic monitoring loop for bot {}", self_clone.bot_id); + trace!("Starting periodic monitoring loop for bot {}", self_clone.bot_id); let is_processing_state = self_clone.is_processing.load(std::sync::atomic::Ordering::SeqCst); - info!("is_processing state at loop start: {} for bot {}", is_processing_state, self_clone.bot_id); + trace!("is_processing state at loop start: {} for bot {}", is_processing_state, self_clone.bot_id); while self_clone .is_processing @@ -304,7 +304,7 @@ impl DriveMonitor { self_clone.consecutive_failures.swap(0, Ordering::Relaxed); consecutive_processing_failures = 0; if prev_failures > 0 { - info!("S3/MinIO recovered for bucket {} after {} failures", + trace!("S3/MinIO recovered for bucket {} after {} failures", self_clone.bucket_name, prev_failures); } } @@ -336,15 +336,15 @@ impl DriveMonitor { } } - info!("Monitoring loop ended for bot {}", self_clone.bot_id); + trace!("Monitoring loop ended for bot {}", self_clone.bot_id); }); - info!("DriveMonitor started for bot {}", self.bot_id); + trace!("DriveMonitor started for bot {}", self.bot_id); Ok(()) } pub async fn stop_monitoring(&self) -> Result<(), Box> { - info!("Stopping DriveMonitor for bot {}", self.bot_id); + trace!("Stopping DriveMonitor for bot {}", self.bot_id); self.is_processing .store(false, std::sync::atomic::Ordering::SeqCst); @@ -352,12 +352,12 @@ impl DriveMonitor { self.file_states.write().await.clear(); self.consecutive_failures.store(0, Ordering::Relaxed); - info!("DriveMonitor stopped for bot {}", self.bot_id); + trace!("DriveMonitor stopped for bot {}", self.bot_id); Ok(()) } pub fn spawn(self: Arc) -> tokio::task::JoinHandle<()> { tokio::spawn(async move { - info!( + trace!( "Drive Monitor service started for bucket: {}", self.bucket_name ); @@ -387,7 +387,7 @@ impl DriveMonitor { Ok(_) => { let prev_failures = self.consecutive_failures.swap(0, Ordering::Relaxed); if prev_failures > 0 { - info!("S3/MinIO recovered for bucket {} after {} failures", + trace!("S3/MinIO recovered for bucket {} after {} failures", self.bucket_name, prev_failures); } } @@ -634,7 +634,7 @@ impl DriveMonitor { let normalized_new_value = Self::normalize_config_value(new_value); if normalized_old_value != normalized_new_value { - info!( + trace!( "Detected change in {} (old: {}, new: {})", key, normalized_old_value, normalized_new_value ); @@ -656,7 +656,7 @@ impl DriveMonitor { } if llm_url_changed { - info!("Broadcasting LLM configuration refresh"); + trace!("Broadcasting LLM configuration refresh"); let effective_url = if !new_llm_url.is_empty() { new_llm_url } else { @@ -672,7 +672,7 @@ impl DriveMonitor { .unwrap_or_default() }; - info!( + trace!( "LLM configuration changed to: URL={}, Model={}", effective_url, effective_model ); @@ -697,7 +697,7 @@ impl DriveMonitor { Some(effective_endpoint_path), ) .await; - info!("Dynamic LLM provider updated with new configuration"); + trace!("Dynamic LLM provider updated with new configuration"); } else { warn!("Dynamic LLM provider not available - config change ignored"); } @@ -788,7 +788,7 @@ impl DriveMonitor { client: &Client, file_path: &str, ) -> Result<(), Box> { - info!( + trace!( "Fetching object from Drive: bucket={}, key={}", &self.bucket_name, file_path ); @@ -800,7 +800,7 @@ impl DriveMonitor { .await { Ok(res) => { - info!( + trace!( "Successfully fetched object from Drive: bucket={}, key={}, size={}", &self.bucket_name, file_path, @@ -838,22 +838,26 @@ impl DriveMonitor { let tool_name_clone = tool_name.clone(); let source_content_clone = source_content.clone(); let bot_id = self.bot_id; - tokio::task::spawn_blocking(move || { + let elapsed_ms = tokio::task::spawn_blocking(move || { std::fs::create_dir_all(&work_dir_clone)?; let local_source_path = format!("{}/{}.bas", work_dir_clone, tool_name_clone); std::fs::write(&local_source_path, &source_content_clone)?; let mut compiler = BasicCompiler::new(state_clone, bot_id); + let start_time = std::time::Instant::now(); let result = compiler.compile_file(&local_source_path, &work_dir_str)?; + let elapsed = start_time.elapsed().as_millis(); if let Some(mcp_tool) = result.mcp_tool { - info!( + trace!( "MCP tool definition generated with {} parameters", mcp_tool.input_schema.properties.len() ); } - Ok::<(), Box>(()) + Ok::>(elapsed) }) .await??; + info!("Successfully compiled {} in {} ms", tool_name, elapsed_ms); + // Check for USE WEBSITE commands and trigger immediate crawling if source_content.contains("USE WEBSITE") { self.trigger_immediate_website_crawl(&source_content).await?; @@ -892,7 +896,7 @@ impl DriveMonitor { let refresh_str = cap.get(2).map(|m| m.as_str()).unwrap_or("1m"); - info!("Found USE WEBSITE command for {}, checking if crawl needed", url_str); + trace!("Found USE WEBSITE command for {}, checking if crawl needed", url_str); // Check if crawl is already in progress or recently completed let mut conn = self.state.conn.get() @@ -1098,13 +1102,13 @@ impl DriveMonitor { if is_new || is_modified { if path.to_lowercase().ends_with(".pdf") { pdf_files_found += 1; - info!( + trace!( "Detected {} PDF in .gbkb: {} (will extract text for vectordb)", if is_new { "new" } else { "changed" }, path ); } else { - info!( + trace!( "Detected {} in .gbkb: {}", if is_new { "new file" } else { "change" }, path @@ -1148,7 +1152,7 @@ impl DriveMonitor { #[cfg(any(feature = "research", feature = "llm"))] { if !is_embedding_server_ready() { - info!("Embedding server not ready, deferring KB indexing for {}", kb_folder_path.display()); + trace!("Embedding server not ready, deferring KB indexing for {}", kb_folder_path.display()); continue; } @@ -1178,7 +1182,7 @@ impl DriveMonitor { let kb_key_owned = kb_key.clone(); tokio::spawn(async move { - info!( + trace!( "Triggering KB indexing for folder: {} (PDF text extraction enabled)", kb_folder_owned.display() ); @@ -1246,7 +1250,7 @@ impl DriveMonitor { } if files_processed > 0 { - info!( + trace!( "Processed {} .gbkb files (including {} PDFs for text extraction)", files_processed, pdf_files_found ); @@ -1264,7 +1268,7 @@ impl DriveMonitor { }); for path in paths_to_remove { - info!("Detected deletion in .gbkb: {}", path); + trace!("Detected deletion in .gbkb: {}", path); file_states.remove(&path); let path_parts: Vec<&str> = path.split('/').collect(); @@ -1321,7 +1325,7 @@ impl DriveMonitor { let bytes = response.body.collect().await?.into_bytes(); tokio::fs::write(&local_path, bytes).await?; - info!( + trace!( "Downloaded .gbkb file {} to {}", file_path, local_path.display() diff --git a/src/drive/local_file_monitor.rs b/src/drive/local_file_monitor.rs index 83f3a8c49..5b894c20f 100644 --- a/src/drive/local_file_monitor.rs +++ b/src/drive/local_file_monitor.rs @@ -1,7 +1,7 @@ use crate::basic::compiler::BasicCompiler; use crate::core::shared::state::AppState; use diesel::prelude::*; -use log::{debug, error, info, warn}; +use log::{debug, error, info, trace, warn}; use std::collections::HashMap; use std::error::Error; use std::path::{Path, PathBuf}; @@ -38,7 +38,7 @@ impl LocalFileMonitor { // Use /opt/gbo/data as the base directory for source files let data_dir = PathBuf::from("/opt/gbo/data"); - info!("Initializing with data_dir: {:?}, work_root: {:?}", data_dir, work_root); + trace!("Initializing with data_dir: {:?}, work_root: {:?}", data_dir, work_root); Self { state, @@ -50,7 +50,7 @@ impl LocalFileMonitor { } pub async fn start_monitoring(&self) -> Result<(), Box> { - info!("Starting local file monitor for /opt/gbo/data/*.gbai directories"); + trace!("Starting local file monitor for /opt/gbo/data/*.gbai directories"); // Create data directory if it doesn't exist if let Err(e) = tokio::fs::create_dir_all(&self.data_dir).await { @@ -68,12 +68,12 @@ impl LocalFileMonitor { monitor.monitoring_loop().await; }); - info!("Local file monitor started"); + trace!("Local file monitor started"); Ok(()) } async fn monitoring_loop(&self) { - info!("Starting monitoring loop"); + trace!("Starting monitoring loop"); // Try to create a file system watcher let (tx, mut rx) = tokio::sync::mpsc::channel(100); @@ -105,7 +105,7 @@ impl LocalFileMonitor { return; } - info!("Watching directory: {:?}", self.data_dir); + trace!("Watching directory: {:?}", self.data_dir); while self.is_processing.load(Ordering::SeqCst) { tokio::time::sleep(Duration::from_secs(5)).await; @@ -116,7 +116,7 @@ impl LocalFileMonitor { EventKind::Create(_) | EventKind::Modify(_) | EventKind::Any => { for path in &event.paths { if self.is_gbdialog_file(path) { - info!("Detected change: {:?}", path); + trace!("Detected change: {:?}", path); if let Err(e) = self.compile_local_file(path).await { error!("Failed to compile {:?}: {}", path, e); } @@ -126,7 +126,7 @@ impl LocalFileMonitor { EventKind::Remove(_) => { for path in &event.paths { if self.is_gbdialog_file(path) { - info!("File removed: {:?}", path); + trace!("File removed: {:?}", path); self.remove_file_state(path).await; } } @@ -141,11 +141,11 @@ impl LocalFileMonitor { } } - info!("Monitoring loop ended"); + trace!("Monitoring loop ended"); } async fn polling_loop(&self) { - info!("Using polling fallback (checking every 10s)"); + trace!("Using polling fallback (checking every 10s)"); while self.is_processing.load(Ordering::SeqCst) { tokio::time::sleep(Duration::from_secs(10)).await; @@ -231,7 +231,7 @@ impl LocalFileMonitor { }; if should_compile { - info!("Compiling: {:?}", path); + trace!("Compiling: {:?}", path); if let Err(e) = self.compile_local_file(&path).await { error!("Failed to compile {:?}: {}", path, e); } @@ -285,7 +285,7 @@ impl LocalFileMonitor { .map_err(|e| format!("Failed to get bot_id for '{}': {}", bot_name_clone, e))? }; - tokio::task::spawn_blocking(move || { + let elapsed_ms = tokio::task::spawn_blocking(move || { std::fs::create_dir_all(&work_dir_clone)?; let local_source_path = work_dir_clone.join(format!("{}.bas", tool_name_clone)); std::fs::write(&local_source_path, &source_content_clone)?; @@ -294,19 +294,21 @@ impl LocalFileMonitor { .ok_or_else(|| "Invalid UTF-8 in local source path".to_string())?; let work_dir_str = work_dir_clone.to_str() .ok_or_else(|| "Invalid UTF-8 in work directory path".to_string())?; + let start_time = std::time::Instant::now(); let result = compiler.compile_file(local_source_str, work_dir_str)?; + let elapsed_ms = start_time.elapsed().as_millis(); if let Some(mcp_tool) = result.mcp_tool { - info!( + trace!( "[LOCAL_MONITOR] MCP tool generated with {} parameters for bot {}", mcp_tool.input_schema.properties.len(), bot_name_clone ); } - Ok::<(), Box>(()) + Ok::>(elapsed_ms) }) .await??; - info!("Successfully compiled: {:?}", file_path); + info!("Successfully compiled: {:?} in {} ms", file_path, elapsed_ms); Ok(()) } @@ -317,7 +319,7 @@ impl LocalFileMonitor { } pub async fn stop_monitoring(&self) { - info!("Stopping local file monitor"); + trace!("Stopping local file monitor"); self.is_processing.store(false, Ordering::SeqCst); self.file_states.write().await.clear(); } diff --git a/src/llm/local.rs b/src/llm/local.rs index 872a4dde9..c065bad2e 100644 --- a/src/llm/local.rs +++ b/src/llm/local.rs @@ -1,5 +1,4 @@ use crate::core::config::ConfigManager; -use crate::core::kb::embedding_generator::set_embedding_server_ready; use crate::core::shared::memory_monitor::{log_jemalloc_stats, MemoryStats}; use crate::security::command_guard::SafeCommand; use crate::core::shared::models::schema::bots::dsl::*; @@ -7,7 +6,6 @@ use crate::core::shared::state::AppState; use diesel::prelude::*; use log::{error, info, trace, warn}; use reqwest; -use std::fmt::Write; use std::sync::Arc; use tokio; @@ -95,9 +93,10 @@ pub async fn ensure_llama_servers_running( }; // For llama-server startup, use path relative to botserver root - // The models are in ./data/llm/ and the llama-server runs from botserver root - let llm_model_path = format!("./data/llm/{}", llm_model); - let embedding_model_path = format!("./data/llm/{}", embedding_model); + // The models are in /data/llm/ and the llama-server runs from botserver root + let stack_path = std::env::var("BOTSERVER_STACK_PATH").unwrap_or_else(|_| "./botserver-stack".to_string()); + let llm_model_path = format!("{stack_path}/data/llm/{}", llm_model); + let embedding_model_path = format!("{stack_path}/data/llm/{}", embedding_model); if !llm_server_enabled { info!("Local LLM server management disabled (llm-server=false). Using external endpoints."); info!(" LLM URL: {llm_url}"); @@ -188,6 +187,37 @@ pub async fn ensure_llama_servers_running( } else if embedding_model.is_empty() { info!("EMBEDDING_MODEL not set, skipping Embedding server"); } + // Start servers in background - don't block HTTP server startup + if !tasks.is_empty() { + info!("LLM servers starting in background (non-blocking mode)"); + tokio::spawn(async move { + for task in tasks { + if let Err(e) = task.await { + error!("LLM server task failed: {}", e); + } + } + info!("LLM server startup tasks completed"); + }); + } + + // Return immediately - don't wait for servers to be ready + info!("LLM server initialization initiated (will start in background)"); + info!("HTTP server can start without waiting for LLM servers"); + trace!("ensure_llama_servers_running returning early (non-blocking)"); + + let end_mem = MemoryStats::current(); + trace!( + "[LLM_LOCAL] ensure_llama_servers_running END (non-blocking), RSS={} (total delta={})", + MemoryStats::format_bytes(end_mem.rss_bytes), + MemoryStats::format_bytes(end_mem.rss_bytes.saturating_sub(start_mem.rss_bytes)) + ); + log_jemalloc_stats(); + + trace!("ensure_llama_servers_running EXIT OK (non-blocking)"); + return Ok(()); + + // OLD BLOCKING CODE - REMOVED TO PREVENT HTTP SERVER BLOCKING + /* for task in tasks { task.await??; } @@ -202,7 +232,7 @@ pub async fn ensure_llama_servers_running( let mut llm_ready = llm_running || llm_model.is_empty(); let mut embedding_ready = embedding_running || embedding_model.is_empty(); let mut attempts = 0; - let max_attempts = 120; + let max_attempts = 15; // Reduced from 120 to 15 (30 seconds instead of 240) while attempts < max_attempts && (!llm_ready || !embedding_ready) { trace!("Wait loop iteration {}", attempts); tokio::time::sleep(tokio::time::Duration::from_secs(2)).await; @@ -301,6 +331,7 @@ pub async fn ensure_llama_servers_running( } Err(error_msg.into()) } + */ // END OF OLD BLOCKING CODE } pub async fn is_server_running(url: &str) -> bool { let client = reqwest::Client::builder() @@ -377,8 +408,8 @@ pub fn start_llm_server( let gpu_layers = config_manager .get_config(&default_bot_id, "llm-server-gpu-layers", None) - .unwrap_or_else(|_| "20".to_string()); - let gpu_layers = if gpu_layers.is_empty() { "20".to_string() } else { gpu_layers }; + .unwrap_or_else(|_| "0".to_string()); + let gpu_layers = if gpu_layers.is_empty() { "0".to_string() } else { gpu_layers }; let reasoning_format = config_manager .get_config(&default_bot_id, "llm-server-reasoning-format", None) @@ -501,7 +532,7 @@ pub async fn start_embedding_server( .arg("--host").arg("0.0.0.0") .arg("--port").arg(port) .arg("--embedding") - .arg("--n-gpu-layers").arg("99") + .arg("--n-gpu-layers").arg("0") .arg("--verbose"); if !cfg!(windows) { diff --git a/src/main_module/bootstrap.rs b/src/main_module/bootstrap.rs index 886a3ed9a..def015737 100644 --- a/src/main_module/bootstrap.rs +++ b/src/main_module/bootstrap.rs @@ -607,8 +607,11 @@ pub async fn create_app_state( fn init_directory_service() -> Result<(Arc>, crate::directory::ZitadelConfig), std::io::Error> { let zitadel_config = { // Try to load from directory_config.json first - let config_path = "./config/directory_config.json"; - if let Ok(content) = std::fs::read_to_string(config_path) { + // Use same path as DirectorySetup saves to (BOTSERVER_STACK_PATH/conf/system/directory_config.json) + let stack_path = std::env::var("BOTSERVER_STACK_PATH") + .unwrap_or_else(|_| "./botserver-stack".to_string()); + let config_path = format!("{}/conf/system/directory_config.json", stack_path); + if let Ok(content) = std::fs::read_to_string(&config_path) { if let Ok(json) = serde_json::from_str::(&content) { let base_url = json .get("base_url") diff --git a/src/security/cors.rs b/src/security/cors.rs index 290c32f26..649c55cc9 100644 --- a/src/security/cors.rs +++ b/src/security/cors.rs @@ -101,14 +101,14 @@ impl CorsConfig { Self { allowed_origins: vec![ "http://localhost:3000".to_string(), + "http://localhost:8080".to_string(), "http://localhost:9000".to_string(), - "http://localhost:8300".to_string(), "http://127.0.0.1:3000".to_string(), + "http://127.0.0.1:8080".to_string(), "http://127.0.0.1:9000".to_string(), - "http://127.0.0.1:8300".to_string(), "https://localhost:3000".to_string(), + "https://localhost:8080".to_string(), "https://localhost:9000".to_string(), - "https://localhost:8300".to_string(), ], allowed_methods: vec![ Method::GET, diff --git a/src/security/integration.rs b/src/security/integration.rs index 6e69c146a..b8a8dafa6 100644 --- a/src/security/integration.rs +++ b/src/security/integration.rs @@ -35,9 +35,9 @@ impl TlsIntegration { services.insert( "api".to_string(), ServiceUrls { - original: "http://localhost:9000".to_string(), + original: "http://localhost:8080".to_string(), secure: "https://localhost:8443".to_string(), - port: 9000, + port: 8080, tls_port: 8443, }, ); @@ -95,10 +95,10 @@ impl TlsIntegration { services.insert( "minio".to_string(), ServiceUrls { - original: "https://localhost:9000".to_string(), - secure: "https://localhost:9000".to_string(), - port: 9000, - tls_port: 9000, + original: "https://localhost:9100".to_string(), + secure: "https://localhost:9100".to_string(), + port: 9100, + tls_port: 9100, }, ); diff --git a/src/security/zitadel_auth.rs b/src/security/zitadel_auth.rs index 86ab1e7ce..b4e1177f5 100644 --- a/src/security/zitadel_auth.rs +++ b/src/security/zitadel_auth.rs @@ -28,8 +28,8 @@ pub struct ZitadelAuthConfig { impl Default for ZitadelAuthConfig { fn default() -> Self { Self { - issuer_url: "http://localhost:8300".to_string(), - api_url: "http://localhost:8300".to_string(), + issuer_url: "http://localhost:9000".to_string(), + api_url: "http://localhost:9000".to_string(), client_id: String::new(), client_secret: String::new(), project_id: String::new(),