2025-11-22 22:55:35 -03:00
|
|
|
use crate::config::AppConfig;
|
|
|
|
|
use crate::package_manager::setup::{DirectorySetup, EmailSetup};
|
|
|
|
|
use crate::package_manager::{InstallMode, PackageManager};
|
2025-12-08 00:19:29 -03:00
|
|
|
use crate::shared::utils::{establish_pg_connection, init_secrets_manager};
|
2025-11-22 22:55:35 -03:00
|
|
|
use anyhow::Result;
|
|
|
|
|
use aws_config::BehaviorVersion;
|
|
|
|
|
use aws_sdk_s3::Client;
|
2025-12-09 07:49:01 -03:00
|
|
|
use diesel::{Connection, RunQueryDsl};
|
2025-12-08 00:19:29 -03:00
|
|
|
use log::debug;
|
2025-11-28 13:50:28 -03:00
|
|
|
use log::{error, info, trace, warn};
|
2025-11-22 22:55:35 -03:00
|
|
|
use rand::distr::Alphanumeric;
|
2025-12-03 16:05:30 -03:00
|
|
|
use rcgen::{
|
|
|
|
|
BasicConstraints, CertificateParams, DistinguishedName, DnType, IsCa, Issuer, KeyPair,
|
|
|
|
|
};
|
2025-11-29 16:29:28 -03:00
|
|
|
use std::fs;
|
2025-12-07 02:13:28 -03:00
|
|
|
#[cfg(unix)]
|
|
|
|
|
use std::os::unix::fs::PermissionsExt;
|
2025-11-22 22:55:35 -03:00
|
|
|
use std::path::{Path, PathBuf};
|
|
|
|
|
use std::process::Command;
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
|
pub struct ComponentInfo {
|
|
|
|
|
pub name: &'static str,
|
|
|
|
|
}
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
|
pub struct BootstrapManager {
|
|
|
|
|
pub install_mode: InstallMode,
|
|
|
|
|
pub tenant: Option<String>,
|
|
|
|
|
}
|
|
|
|
|
impl BootstrapManager {
|
2025-11-28 13:50:28 -03:00
|
|
|
pub async fn new(mode: InstallMode, tenant: Option<String>) -> Self {
|
2025-11-22 22:55:35 -03:00
|
|
|
trace!(
|
|
|
|
|
"Initializing BootstrapManager with mode {:?} and tenant {:?}",
|
2025-11-28 13:50:28 -03:00
|
|
|
mode,
|
2025-11-22 22:55:35 -03:00
|
|
|
tenant
|
|
|
|
|
);
|
|
|
|
|
Self {
|
2025-11-28 13:50:28 -03:00
|
|
|
install_mode: mode,
|
2025-11-22 22:55:35 -03:00
|
|
|
tenant,
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
/// Kill all processes running from the botserver-stack directory
|
|
|
|
|
/// This ensures a clean startup when bootstrapping fresh
|
|
|
|
|
pub fn kill_stack_processes() {
|
|
|
|
|
info!("Killing any existing stack processes...");
|
|
|
|
|
|
|
|
|
|
// Kill processes by pattern matching on botserver-stack path
|
|
|
|
|
let patterns = vec![
|
|
|
|
|
"botserver-stack/bin/vault",
|
|
|
|
|
"botserver-stack/bin/tables",
|
|
|
|
|
"botserver-stack/bin/drive",
|
|
|
|
|
"botserver-stack/bin/cache",
|
|
|
|
|
"botserver-stack/bin/directory",
|
|
|
|
|
"botserver-stack/bin/llm",
|
|
|
|
|
"botserver-stack/bin/email",
|
|
|
|
|
"botserver-stack/bin/proxy",
|
|
|
|
|
"botserver-stack/bin/dns",
|
|
|
|
|
"botserver-stack/bin/meeting",
|
|
|
|
|
"botserver-stack/bin/vector_db",
|
|
|
|
|
];
|
|
|
|
|
|
|
|
|
|
for pattern in patterns {
|
|
|
|
|
let _ = Command::new("pkill").args(["-9", "-f", pattern]).output();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Also kill by specific process names
|
|
|
|
|
let process_names = vec![
|
|
|
|
|
"vault",
|
|
|
|
|
"postgres",
|
|
|
|
|
"minio",
|
|
|
|
|
"redis-server",
|
|
|
|
|
"zitadel",
|
|
|
|
|
"ollama",
|
|
|
|
|
"stalwart",
|
|
|
|
|
"caddy",
|
|
|
|
|
"coredns",
|
|
|
|
|
"livekit",
|
|
|
|
|
"qdrant",
|
|
|
|
|
];
|
|
|
|
|
|
|
|
|
|
for name in process_names {
|
|
|
|
|
let _ = Command::new("pkill").args(["-9", "-x", name]).output();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Give processes time to die
|
|
|
|
|
std::thread::sleep(std::time::Duration::from_millis(500));
|
|
|
|
|
info!("Stack processes terminated");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Clean up the entire stack directory for a fresh bootstrap
|
|
|
|
|
pub fn clean_stack_directory() -> Result<()> {
|
|
|
|
|
let stack_dir = PathBuf::from("./botserver-stack");
|
|
|
|
|
let env_file = PathBuf::from("./.env");
|
|
|
|
|
|
|
|
|
|
if stack_dir.exists() {
|
|
|
|
|
info!("Removing existing stack directory...");
|
|
|
|
|
fs::remove_dir_all(&stack_dir)?;
|
|
|
|
|
info!("Stack directory removed");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if env_file.exists() {
|
|
|
|
|
info!("Removing existing .env file...");
|
|
|
|
|
fs::remove_file(&env_file)?;
|
|
|
|
|
info!(".env file removed");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
pub async fn start_all(&mut self) -> Result<()> {
|
2025-11-22 22:55:35 -03:00
|
|
|
let pm = PackageManager::new(self.install_mode.clone(), self.tenant.clone())?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
// VAULT MUST START FIRST - all other services depend on it for secrets
|
|
|
|
|
if pm.is_installed("vault") {
|
2025-12-08 23:35:33 -03:00
|
|
|
// Check if Vault is already running before trying to start
|
|
|
|
|
let vault_already_running = Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg("curl -f -s http://localhost:8200/v1/sys/health?standbyok=true&uninitcode=200&sealedcode=200 >/dev/null 2>&1")
|
|
|
|
|
.stdout(std::process::Stdio::null())
|
|
|
|
|
.stderr(std::process::Stdio::null())
|
|
|
|
|
.status()
|
|
|
|
|
.map(|s| s.success())
|
|
|
|
|
.unwrap_or(false);
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-08 23:35:33 -03:00
|
|
|
if vault_already_running {
|
|
|
|
|
info!("Vault is already running");
|
|
|
|
|
} else {
|
|
|
|
|
info!("Starting Vault secrets service...");
|
|
|
|
|
match pm.start("vault") {
|
|
|
|
|
Ok(_child) => {
|
|
|
|
|
info!("Vault process started, waiting for initialization...");
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("Vault might already be running: {}", e);
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
}
|
2025-12-08 23:35:33 -03:00
|
|
|
|
|
|
|
|
// Wait for Vault to be ready (up to 10 seconds)
|
|
|
|
|
for i in 0..10 {
|
|
|
|
|
let vault_ready = Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg("curl -f -s http://localhost:8200/v1/sys/health?standbyok=true&uninitcode=200&sealedcode=200 >/dev/null 2>&1")
|
|
|
|
|
.stdout(std::process::Stdio::null())
|
|
|
|
|
.stderr(std::process::Stdio::null())
|
|
|
|
|
.status()
|
|
|
|
|
.map(|s| s.success())
|
|
|
|
|
.unwrap_or(false);
|
|
|
|
|
|
|
|
|
|
if vault_ready {
|
|
|
|
|
info!("Vault is responding");
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
if i < 9 {
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Try to unseal Vault
|
|
|
|
|
if let Err(e) = self.ensure_vault_unsealed().await {
|
|
|
|
|
warn!("Vault unseal check: {}", e);
|
|
|
|
|
}
|
2025-12-09 07:58:39 -03:00
|
|
|
|
|
|
|
|
// Initialize SecretsManager so other code can use Vault
|
|
|
|
|
info!("Initializing SecretsManager...");
|
|
|
|
|
match init_secrets_manager().await {
|
|
|
|
|
Ok(_) => info!("SecretsManager initialized successfully"),
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("Failed to initialize SecretsManager: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Start tables (PostgreSQL) - needed for database operations
|
|
|
|
|
if pm.is_installed("tables") {
|
|
|
|
|
info!("Starting PostgreSQL database...");
|
|
|
|
|
match pm.start("tables") {
|
|
|
|
|
Ok(_child) => {
|
|
|
|
|
// Give PostgreSQL time to initialize
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
|
|
|
|
|
info!("PostgreSQL started");
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("PostgreSQL might already be running: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Start other components (order matters less for these)
|
|
|
|
|
let other_components = vec![
|
2025-11-22 22:55:35 -03:00
|
|
|
ComponentInfo { name: "cache" },
|
|
|
|
|
ComponentInfo { name: "drive" },
|
|
|
|
|
ComponentInfo { name: "llm" },
|
|
|
|
|
ComponentInfo { name: "email" },
|
|
|
|
|
ComponentInfo { name: "proxy" },
|
|
|
|
|
ComponentInfo { name: "directory" },
|
|
|
|
|
ComponentInfo { name: "alm" },
|
|
|
|
|
ComponentInfo { name: "alm_ci" },
|
|
|
|
|
ComponentInfo { name: "dns" },
|
|
|
|
|
ComponentInfo { name: "meeting" },
|
2025-12-02 21:09:43 -03:00
|
|
|
ComponentInfo {
|
|
|
|
|
name: "remote_terminal",
|
|
|
|
|
},
|
2025-11-22 22:55:35 -03:00
|
|
|
ComponentInfo { name: "vector_db" },
|
|
|
|
|
ComponentInfo { name: "host" },
|
|
|
|
|
];
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
for component in other_components {
|
2025-11-22 22:55:35 -03:00
|
|
|
if pm.is_installed(component.name) {
|
2025-11-28 13:50:28 -03:00
|
|
|
match pm.start(component.name) {
|
|
|
|
|
Ok(_child) => {
|
|
|
|
|
trace!("Started component: {}", component.name);
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
2025-12-08 00:19:29 -03:00
|
|
|
trace!(
|
2025-11-28 13:50:28 -03:00
|
|
|
"Component {} might already be running: {}",
|
2025-12-08 00:19:29 -03:00
|
|
|
component.name,
|
|
|
|
|
e
|
2025-11-28 13:50:28 -03:00
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-11-22 22:55:35 -03:00
|
|
|
}
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-11-22 22:55:35 -03:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn generate_secure_password(&self, length: usize) -> String {
|
|
|
|
|
let mut rng = rand::rng();
|
2025-12-08 00:19:29 -03:00
|
|
|
let base: String = (0..length.saturating_sub(4))
|
2025-11-22 22:55:35 -03:00
|
|
|
.map(|_| {
|
|
|
|
|
let byte = rand::Rng::sample(&mut rng, Alphanumeric);
|
|
|
|
|
char::from(byte)
|
|
|
|
|
})
|
2025-12-08 00:19:29 -03:00
|
|
|
.collect();
|
|
|
|
|
// Add required symbols/complexity for Zitadel password policy
|
|
|
|
|
// Use ! instead of @ to avoid breaking database connection strings
|
|
|
|
|
format!("{}!1Aa", base)
|
2025-11-22 22:55:35 -03:00
|
|
|
}
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
/// Ensure critical services are running - Vault MUST be first
|
|
|
|
|
/// Order: vault -> tables -> drive
|
2025-12-08 00:19:29 -03:00
|
|
|
/// If fresh_start is true, kills existing processes first
|
2025-11-28 13:50:28 -03:00
|
|
|
pub async fn ensure_services_running(&mut self) -> Result<()> {
|
|
|
|
|
info!("Ensuring critical services are running...");
|
|
|
|
|
|
|
|
|
|
let installer = PackageManager::new(self.install_mode.clone(), self.tenant.clone())?;
|
|
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
// Check if we need to bootstrap first
|
|
|
|
|
let vault_installed = installer.is_installed("vault");
|
|
|
|
|
let vault_initialized = PathBuf::from("./botserver-stack/conf/vault/init.json").exists();
|
|
|
|
|
|
|
|
|
|
if !vault_installed || !vault_initialized {
|
|
|
|
|
info!("Stack not fully bootstrapped, running bootstrap first...");
|
|
|
|
|
// Kill any leftover processes
|
|
|
|
|
Self::kill_stack_processes();
|
|
|
|
|
|
|
|
|
|
// Run bootstrap - this will start all services
|
|
|
|
|
self.bootstrap().await?;
|
|
|
|
|
|
|
|
|
|
// After bootstrap, services are already running, just ensure Vault is unsealed and env vars set
|
|
|
|
|
info!("Bootstrap complete, verifying Vault is ready...");
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
|
|
|
|
|
|
|
|
|
|
if let Err(e) = self.ensure_vault_unsealed().await {
|
|
|
|
|
warn!("Failed to unseal Vault after bootstrap: {}", e);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Services were started by bootstrap, no need to restart them
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If we get here, bootstrap was already done previously - just start services
|
2025-12-07 02:13:28 -03:00
|
|
|
// VAULT MUST BE FIRST - it provides all secrets
|
|
|
|
|
if installer.is_installed("vault") {
|
2025-12-08 00:19:29 -03:00
|
|
|
// Check if Vault is already running
|
|
|
|
|
let vault_running = Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg("curl -f -s http://localhost:8200/v1/sys/health?standbyok=true&uninitcode=200&sealedcode=200 >/dev/null 2>&1")
|
2025-12-08 23:35:33 -03:00
|
|
|
.stdout(std::process::Stdio::null())
|
|
|
|
|
.stderr(std::process::Stdio::null())
|
2025-12-08 00:19:29 -03:00
|
|
|
.status()
|
|
|
|
|
.map(|s| s.success())
|
|
|
|
|
.unwrap_or(false);
|
|
|
|
|
|
|
|
|
|
if !vault_running {
|
|
|
|
|
info!("Starting Vault secrets service...");
|
|
|
|
|
match installer.start("vault") {
|
|
|
|
|
Ok(_child) => {
|
|
|
|
|
info!("Vault started successfully");
|
|
|
|
|
// Give Vault time to initialize
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("Vault might already be running or failed to start: {}", e);
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
} else {
|
|
|
|
|
info!("Vault is already running");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Always try to unseal Vault (it may have restarted)
|
|
|
|
|
// If unseal fails, Vault may need re-initialization (data deleted)
|
|
|
|
|
if let Err(e) = self.ensure_vault_unsealed().await {
|
|
|
|
|
warn!("Vault unseal failed: {} - running re-bootstrap", e);
|
|
|
|
|
|
|
|
|
|
// Kill all processes and run fresh bootstrap
|
|
|
|
|
Self::kill_stack_processes();
|
|
|
|
|
Self::clean_stack_directory()?;
|
|
|
|
|
|
|
|
|
|
// Run bootstrap from scratch
|
|
|
|
|
self.bootstrap().await?;
|
|
|
|
|
|
|
|
|
|
// After bootstrap, services are already running
|
|
|
|
|
info!("Re-bootstrap complete, verifying Vault is ready...");
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
|
|
|
|
|
|
|
|
|
|
if let Err(e) = self.ensure_vault_unsealed().await {
|
|
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"Failed to configure Vault after re-bootstrap: {}",
|
|
|
|
|
e
|
|
|
|
|
));
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
// Services were started by bootstrap, no need to restart them
|
|
|
|
|
return Ok(());
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
2025-12-09 07:58:39 -03:00
|
|
|
|
|
|
|
|
// Initialize SecretsManager so other code can use Vault
|
|
|
|
|
info!("Initializing SecretsManager...");
|
|
|
|
|
match init_secrets_manager().await {
|
|
|
|
|
Ok(_) => info!("SecretsManager initialized successfully"),
|
|
|
|
|
Err(e) => {
|
|
|
|
|
error!("Failed to initialize SecretsManager: {}", e);
|
|
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"SecretsManager initialization failed: {}",
|
|
|
|
|
e
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-12-07 02:13:28 -03:00
|
|
|
} else {
|
|
|
|
|
// Vault not installed - cannot proceed, need to run bootstrap
|
|
|
|
|
warn!("Vault (secrets) component not installed - run bootstrap first");
|
2025-12-08 00:19:29 -03:00
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"Vault not installed. Run bootstrap command first."
|
|
|
|
|
));
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check and start PostgreSQL (after Vault is running)
|
2025-11-28 13:50:28 -03:00
|
|
|
if installer.is_installed("tables") {
|
|
|
|
|
info!("Starting PostgreSQL database service...");
|
|
|
|
|
match installer.start("tables") {
|
|
|
|
|
Ok(_child) => {
|
|
|
|
|
info!("PostgreSQL started successfully");
|
|
|
|
|
// Give PostgreSQL time to initialize
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(3)).await;
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!(
|
|
|
|
|
"PostgreSQL might already be running or failed to start: {}",
|
|
|
|
|
e
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
warn!("PostgreSQL (tables) component not installed");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check and start MinIO
|
|
|
|
|
if installer.is_installed("drive") {
|
|
|
|
|
info!("Starting MinIO drive service...");
|
|
|
|
|
match installer.start("drive") {
|
|
|
|
|
Ok(_child) => {
|
|
|
|
|
info!("MinIO started successfully");
|
|
|
|
|
// Give MinIO time to initialize
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("MinIO might already be running or failed to start: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
warn!("MinIO (drive) component not installed");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
/// Ensure Vault is unsealed (required after restart)
|
2025-12-08 00:19:29 -03:00
|
|
|
/// Returns Ok(()) if Vault is ready, Err if it needs re-initialization
|
2025-12-07 02:13:28 -03:00
|
|
|
async fn ensure_vault_unsealed(&self) -> Result<()> {
|
|
|
|
|
let vault_init_path = PathBuf::from("./botserver-stack/conf/vault/init.json");
|
2025-12-08 00:19:29 -03:00
|
|
|
let vault_addr = "http://localhost:8200";
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
if !vault_init_path.exists() {
|
2025-12-08 00:19:29 -03:00
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"Vault init.json not found - needs re-initialization"
|
|
|
|
|
));
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Read unseal key from init.json
|
|
|
|
|
let init_json = fs::read_to_string(&vault_init_path)?;
|
|
|
|
|
let init_data: serde_json::Value = serde_json::from_str(&init_json)?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
let unseal_key = init_data["unseal_keys_b64"]
|
|
|
|
|
.as_array()
|
|
|
|
|
.and_then(|arr| arr.first())
|
|
|
|
|
.and_then(|v| v.as_str())
|
|
|
|
|
.unwrap_or("")
|
|
|
|
|
.to_string();
|
|
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
let root_token = init_data["root_token"].as_str().unwrap_or("").to_string();
|
2025-12-07 02:13:28 -03:00
|
|
|
|
|
|
|
|
if unseal_key.is_empty() || root_token.is_empty() {
|
2025-12-08 00:19:29 -03:00
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"Invalid Vault init.json - needs re-initialization"
|
|
|
|
|
));
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
|
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
// First check if Vault is initialized (not just running)
|
2025-12-07 02:13:28 -03:00
|
|
|
let status_output = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-08 23:35:33 -03:00
|
|
|
"VAULT_ADDR={} ./botserver-stack/bin/vault/vault status -format=json 2>/dev/null",
|
2025-12-07 02:13:28 -03:00
|
|
|
vault_addr
|
|
|
|
|
))
|
2025-12-08 23:35:33 -03:00
|
|
|
.stdout(std::process::Stdio::piped())
|
|
|
|
|
.stderr(std::process::Stdio::null())
|
2025-12-07 02:13:28 -03:00
|
|
|
.output()?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
let status_str = String::from_utf8_lossy(&status_output.stdout);
|
|
|
|
|
|
|
|
|
|
// Parse status - handle both success and error cases
|
|
|
|
|
if let Ok(status) = serde_json::from_str::<serde_json::Value>(&status_str) {
|
|
|
|
|
let initialized = status["initialized"].as_bool().unwrap_or(false);
|
|
|
|
|
let sealed = status["sealed"].as_bool().unwrap_or(true);
|
|
|
|
|
|
|
|
|
|
if !initialized {
|
|
|
|
|
// Vault is running but not initialized - this means data was deleted
|
|
|
|
|
// We need to re-run bootstrap
|
|
|
|
|
warn!("Vault is running but not initialized - data may have been deleted");
|
|
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"Vault not initialized - needs re-bootstrap"
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if sealed {
|
2025-12-07 02:13:28 -03:00
|
|
|
info!("Unsealing Vault...");
|
2025-12-08 00:19:29 -03:00
|
|
|
let unseal_output = std::process::Command::new("sh")
|
2025-12-07 02:13:28 -03:00
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-08 23:35:33 -03:00
|
|
|
"VAULT_ADDR={} ./botserver-stack/bin/vault/vault operator unseal {} >/dev/null 2>&1",
|
2025-12-07 02:13:28 -03:00
|
|
|
vault_addr, unseal_key
|
|
|
|
|
))
|
2025-12-08 23:35:33 -03:00
|
|
|
.stdout(std::process::Stdio::null())
|
|
|
|
|
.stderr(std::process::Stdio::null())
|
2025-12-07 02:13:28 -03:00
|
|
|
.output()?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
if !unseal_output.status.success() {
|
|
|
|
|
let stderr = String::from_utf8_lossy(&unseal_output.stderr);
|
|
|
|
|
warn!("Vault unseal may have failed: {}", stderr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Verify unseal succeeded
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
|
|
|
|
|
let verify_output = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-08 23:35:33 -03:00
|
|
|
"VAULT_ADDR={} ./botserver-stack/bin/vault/vault status -format=json 2>/dev/null",
|
2025-12-08 00:19:29 -03:00
|
|
|
vault_addr
|
|
|
|
|
))
|
2025-12-08 23:35:33 -03:00
|
|
|
.stdout(std::process::Stdio::piped())
|
|
|
|
|
.stderr(std::process::Stdio::null())
|
2025-12-08 00:19:29 -03:00
|
|
|
.output()?;
|
|
|
|
|
|
|
|
|
|
let verify_str = String::from_utf8_lossy(&verify_output.stdout);
|
|
|
|
|
if let Ok(verify_status) = serde_json::from_str::<serde_json::Value>(&verify_str) {
|
|
|
|
|
if verify_status["sealed"].as_bool().unwrap_or(true) {
|
|
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"Failed to unseal Vault - may need re-initialization"
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
info!("Vault unsealed successfully");
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
} else {
|
|
|
|
|
// Could not parse status - Vault might not be responding properly
|
|
|
|
|
warn!("Could not get Vault status: {}", status_str);
|
|
|
|
|
return Err(anyhow::anyhow!("Vault not responding properly"));
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Set environment variables for other components
|
|
|
|
|
std::env::set_var("VAULT_ADDR", vault_addr);
|
|
|
|
|
std::env::set_var("VAULT_TOKEN", &root_token);
|
|
|
|
|
std::env::set_var("VAULT_SKIP_VERIFY", "true");
|
|
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
// Also set mTLS cert paths
|
|
|
|
|
std::env::set_var(
|
|
|
|
|
"VAULT_CACERT",
|
|
|
|
|
"./botserver-stack/conf/system/certificates/ca/ca.crt",
|
|
|
|
|
);
|
|
|
|
|
std::env::set_var(
|
|
|
|
|
"VAULT_CLIENT_CERT",
|
|
|
|
|
"./botserver-stack/conf/system/certificates/botserver/client.crt",
|
|
|
|
|
);
|
|
|
|
|
std::env::set_var(
|
|
|
|
|
"VAULT_CLIENT_KEY",
|
|
|
|
|
"./botserver-stack/conf/system/certificates/botserver/client.key",
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
info!("Vault environment configured");
|
2025-12-07 02:13:28 -03:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-22 22:55:35 -03:00
|
|
|
pub async fn bootstrap(&mut self) -> Result<()> {
|
2025-12-07 02:13:28 -03:00
|
|
|
// Generate certificates first (including for Vault)
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Generating TLS certificates...");
|
2025-11-29 16:29:28 -03:00
|
|
|
if let Err(e) = self.generate_certificates().await {
|
|
|
|
|
error!("Failed to generate certificates: {}", e);
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Create Vault configuration with mTLS
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Creating Vault configuration...");
|
2025-12-07 02:13:28 -03:00
|
|
|
if let Err(e) = self.create_vault_config().await {
|
|
|
|
|
error!("Failed to create Vault config: {}", e);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Generate secure passwords for all services - these are ONLY used during bootstrap
|
|
|
|
|
// and immediately stored in Vault. NO LEGACY ENV VARS.
|
|
|
|
|
let db_password = self.generate_secure_password(24);
|
|
|
|
|
let drive_accesskey = self.generate_secure_password(20);
|
|
|
|
|
let drive_secret = self.generate_secure_password(40);
|
|
|
|
|
let cache_password = self.generate_secure_password(24);
|
2025-11-29 17:27:13 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Configuration is stored in Vault, not .env files
|
|
|
|
|
info!("Configuring services through Vault...");
|
2025-11-22 22:55:35 -03:00
|
|
|
|
|
|
|
|
let pm = PackageManager::new(self.install_mode.clone(), self.tenant.clone()).unwrap();
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Vault MUST be installed first - it stores all secrets
|
|
|
|
|
// Order: vault -> tables -> directory -> drive -> cache -> llm
|
2025-11-29 16:29:28 -03:00
|
|
|
let required_components = vec![
|
2025-12-07 02:13:28 -03:00
|
|
|
"vault", // Secrets management - MUST BE FIRST
|
|
|
|
|
"tables", // Database - required by Directory
|
|
|
|
|
"directory", // Identity service - manages users
|
|
|
|
|
"drive", // S3 storage - credentials in Vault
|
2025-11-29 16:29:28 -03:00
|
|
|
"cache", // Redis cache
|
|
|
|
|
"llm", // LLM service
|
|
|
|
|
];
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
// Special check: Vault needs setup even if binary exists but not initialized
|
|
|
|
|
let vault_needs_setup = !PathBuf::from("./botserver-stack/conf/vault/init.json").exists();
|
|
|
|
|
|
2025-11-22 22:55:35 -03:00
|
|
|
for component in required_components {
|
2025-12-08 00:19:29 -03:00
|
|
|
// For vault, also check if it needs initialization
|
|
|
|
|
let needs_install = if component == "vault" {
|
|
|
|
|
!pm.is_installed(component) || vault_needs_setup
|
|
|
|
|
} else {
|
|
|
|
|
!pm.is_installed(component)
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if needs_install {
|
|
|
|
|
// Quick check if component might be running - don't hang on this
|
|
|
|
|
let bin_path = pm.base_path.join("bin").join(component);
|
|
|
|
|
let binary_name = pm
|
2025-11-22 22:55:35 -03:00
|
|
|
.components
|
|
|
|
|
.get(component)
|
|
|
|
|
.and_then(|cfg| cfg.binary_name.clone())
|
|
|
|
|
.unwrap_or_else(|| component.to_string());
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
// Only terminate for services that are known to conflict
|
|
|
|
|
// Use simple, fast commands with timeout
|
|
|
|
|
if component == "vault" || component == "tables" || component == "directory" {
|
|
|
|
|
let _ = Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
|
|
|
|
"pkill -9 -f '{}/{}' 2>/dev/null; true",
|
|
|
|
|
bin_path.display(),
|
|
|
|
|
binary_name
|
|
|
|
|
))
|
|
|
|
|
.status();
|
|
|
|
|
std::thread::sleep(std::time::Duration::from_millis(200));
|
2025-11-22 22:55:35 -03:00
|
|
|
}
|
|
|
|
|
_ = pm.install(component).await;
|
|
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
// After tables is installed, START PostgreSQL and create Zitadel config files before installing directory
|
2025-12-07 02:13:28 -03:00
|
|
|
if component == "tables" {
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Starting PostgreSQL database...");
|
2025-12-08 00:19:29 -03:00
|
|
|
match pm.start("tables") {
|
|
|
|
|
Ok(_) => {
|
|
|
|
|
info!("PostgreSQL started successfully");
|
|
|
|
|
// Give PostgreSQL time to initialize
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(3)).await;
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("Failed to start PostgreSQL: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-09 07:49:01 -03:00
|
|
|
// Run migrations using direct connection (Vault not set up yet)
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Running database migrations...");
|
2025-12-09 07:49:01 -03:00
|
|
|
let database_url =
|
|
|
|
|
format!("postgres://gbuser:{}@localhost:5432/botserver", db_password);
|
|
|
|
|
match diesel::PgConnection::establish(&database_url) {
|
|
|
|
|
Ok(mut conn) => {
|
|
|
|
|
if let Err(e) = self.apply_migrations(&mut conn) {
|
|
|
|
|
error!("Failed to apply migrations: {}", e);
|
|
|
|
|
} else {
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Database migrations applied");
|
2025-12-09 07:49:01 -03:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
error!("Failed to connect to database for migrations: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Creating Directory configuration files...");
|
2025-12-07 02:13:28 -03:00
|
|
|
if let Err(e) = self.configure_services_in_directory(&db_password).await {
|
|
|
|
|
error!("Failed to create Directory config files: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Directory configuration - setup happens after install starts Zitadel
|
2025-11-22 22:55:35 -03:00
|
|
|
if component == "directory" {
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Waiting for Directory to be ready...");
|
2025-11-22 22:55:35 -03:00
|
|
|
if let Err(e) = self.setup_directory().await {
|
2025-12-07 02:13:28 -03:00
|
|
|
// Don't fail completely - Zitadel may still be usable with first instance setup
|
|
|
|
|
warn!("Directory additional setup had issues: {}", e);
|
2025-11-29 16:29:28 -03:00
|
|
|
}
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
// After Vault is installed, START the server then initialize it
|
2025-12-07 02:13:28 -03:00
|
|
|
if component == "vault" {
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Starting Vault server...");
|
2025-12-08 00:19:29 -03:00
|
|
|
match pm.start("vault") {
|
|
|
|
|
Ok(_) => {
|
|
|
|
|
info!("Vault server started");
|
|
|
|
|
// Give Vault time to start
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(3)).await;
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("Failed to start Vault server: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Initializing Vault with secrets...");
|
2025-12-08 00:19:29 -03:00
|
|
|
if let Err(e) = self
|
|
|
|
|
.setup_vault(
|
|
|
|
|
&db_password,
|
|
|
|
|
&drive_accesskey,
|
|
|
|
|
&drive_secret,
|
|
|
|
|
&cache_password,
|
|
|
|
|
)
|
|
|
|
|
.await
|
|
|
|
|
{
|
2025-12-07 02:13:28 -03:00
|
|
|
error!("Failed to setup Vault: {}", e);
|
2025-11-22 22:55:35 -03:00
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
// Initialize the global SecretsManager so other components can use Vault
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Initializing SecretsManager...");
|
2025-12-08 00:19:29 -03:00
|
|
|
debug!(
|
|
|
|
|
"VAULT_ADDR={:?}, VAULT_TOKEN set={}",
|
|
|
|
|
std::env::var("VAULT_ADDR").ok(),
|
|
|
|
|
std::env::var("VAULT_TOKEN").is_ok()
|
|
|
|
|
);
|
|
|
|
|
match init_secrets_manager().await {
|
2025-12-09 07:55:11 -03:00
|
|
|
Ok(_) => info!("SecretsManager initialized successfully"),
|
2025-12-08 00:19:29 -03:00
|
|
|
Err(e) => {
|
|
|
|
|
error!("Failed to initialize SecretsManager: {}", e);
|
|
|
|
|
// Don't continue if SecretsManager fails - it's required for DB connection
|
|
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"SecretsManager initialization failed: {}",
|
|
|
|
|
e
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-11-22 22:55:35 -03:00
|
|
|
}
|
2025-11-26 22:54:22 -03:00
|
|
|
|
|
|
|
|
if component == "email" {
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Auto-configuring Email (Stalwart)...");
|
2025-11-26 22:54:22 -03:00
|
|
|
if let Err(e) = self.setup_email().await {
|
|
|
|
|
error!("Failed to setup Email: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-11-29 16:29:28 -03:00
|
|
|
|
|
|
|
|
if component == "proxy" {
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Configuring Caddy reverse proxy...");
|
2025-11-29 16:29:28 -03:00
|
|
|
if let Err(e) = self.setup_caddy_proxy().await {
|
|
|
|
|
error!("Failed to setup Caddy: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if component == "dns" {
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Configuring CoreDNS for dynamic DNS...");
|
2025-11-29 16:29:28 -03:00
|
|
|
if let Err(e) = self.setup_coredns().await {
|
|
|
|
|
error!("Failed to setup CoreDNS: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-11-22 22:55:35 -03:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-29 16:29:28 -03:00
|
|
|
/// Configure database and drive credentials in Directory
|
2025-12-07 02:13:28 -03:00
|
|
|
/// This creates the Zitadel config files BEFORE Zitadel is installed
|
|
|
|
|
/// db_password is passed directly from bootstrap - NO ENV VARS
|
|
|
|
|
async fn configure_services_in_directory(&self, db_password: &str) -> Result<()> {
|
|
|
|
|
info!("Creating Zitadel configuration files...");
|
2025-11-29 16:29:28 -03:00
|
|
|
|
|
|
|
|
let zitadel_config_path = PathBuf::from("./botserver-stack/conf/directory/zitadel.yaml");
|
2025-12-07 02:13:28 -03:00
|
|
|
let steps_config_path = PathBuf::from("./botserver-stack/conf/directory/steps.yaml");
|
2025-12-08 00:19:29 -03:00
|
|
|
// Use absolute path for PAT file since zitadel runs from bin/directory/
|
|
|
|
|
let pat_path =
|
|
|
|
|
std::env::current_dir()?.join("botserver-stack/conf/directory/admin-pat.txt");
|
|
|
|
|
|
2025-11-29 16:29:28 -03:00
|
|
|
fs::create_dir_all(zitadel_config_path.parent().unwrap())?;
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Generate Zitadel database password
|
|
|
|
|
let zitadel_db_password = self.generate_secure_password(24);
|
|
|
|
|
|
|
|
|
|
// Create zitadel.yaml - main configuration
|
2025-12-08 00:19:29 -03:00
|
|
|
// Note: Zitadel uses lowercase 'postgres' and nested User/Admin with Username field
|
2025-11-29 16:29:28 -03:00
|
|
|
let zitadel_config = format!(
|
2025-12-07 02:13:28 -03:00
|
|
|
r#"Log:
|
|
|
|
|
Level: info
|
|
|
|
|
Formatter:
|
|
|
|
|
Format: text
|
|
|
|
|
|
2025-11-29 16:29:28 -03:00
|
|
|
Database:
|
|
|
|
|
postgres:
|
|
|
|
|
Host: localhost
|
|
|
|
|
Port: 5432
|
|
|
|
|
Database: zitadel
|
2025-12-08 00:19:29 -03:00
|
|
|
User:
|
|
|
|
|
Username: zitadel
|
|
|
|
|
Password: "{}"
|
|
|
|
|
SSL:
|
|
|
|
|
Mode: disable
|
2025-12-07 02:13:28 -03:00
|
|
|
Admin:
|
|
|
|
|
Username: gbuser
|
|
|
|
|
Password: "{}"
|
|
|
|
|
SSL:
|
|
|
|
|
Mode: disable
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
Machine:
|
|
|
|
|
Identification:
|
|
|
|
|
Hostname:
|
|
|
|
|
Enabled: true
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
ExternalSecure: false
|
2025-11-29 16:29:28 -03:00
|
|
|
ExternalDomain: localhost
|
2025-12-07 02:13:28 -03:00
|
|
|
ExternalPort: 8080
|
|
|
|
|
|
|
|
|
|
DefaultInstance:
|
|
|
|
|
OIDCSettings:
|
|
|
|
|
AccessTokenLifetime: 12h
|
|
|
|
|
IdTokenLifetime: 12h
|
|
|
|
|
RefreshTokenIdleExpiration: 720h
|
|
|
|
|
RefreshTokenExpiration: 2160h
|
|
|
|
|
"#,
|
|
|
|
|
zitadel_db_password,
|
2025-12-08 00:19:29 -03:00
|
|
|
db_password, // Use the password passed directly from bootstrap
|
2025-12-07 02:13:28 -03:00
|
|
|
);
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
fs::write(&zitadel_config_path, zitadel_config)?;
|
|
|
|
|
info!("Created zitadel.yaml configuration");
|
|
|
|
|
|
|
|
|
|
// Create steps.yaml - first instance setup that generates admin PAT
|
2025-12-08 00:19:29 -03:00
|
|
|
// Use Machine user with PAT for API access (Human users don't generate PAT files)
|
2025-12-07 02:13:28 -03:00
|
|
|
let steps_config = format!(
|
|
|
|
|
r#"FirstInstance:
|
2025-12-08 00:19:29 -03:00
|
|
|
InstanceName: "BotServer"
|
|
|
|
|
DefaultLanguage: "en"
|
|
|
|
|
PatPath: "{}"
|
2025-12-07 02:13:28 -03:00
|
|
|
Org:
|
|
|
|
|
Name: "BotServer"
|
2025-12-08 00:19:29 -03:00
|
|
|
Machine:
|
|
|
|
|
Machine:
|
|
|
|
|
Username: "admin-sa"
|
|
|
|
|
Name: "Admin Service Account"
|
|
|
|
|
Pat:
|
|
|
|
|
ExpirationDate: "2099-12-31T23:59:59Z"
|
2025-12-07 02:13:28 -03:00
|
|
|
Human:
|
|
|
|
|
UserName: "admin"
|
|
|
|
|
FirstName: "Admin"
|
|
|
|
|
LastName: "User"
|
|
|
|
|
Email:
|
|
|
|
|
Address: "admin@localhost"
|
|
|
|
|
Verified: true
|
|
|
|
|
Password: "{}"
|
2025-12-08 00:19:29 -03:00
|
|
|
PasswordChangeRequired: false
|
2025-11-29 16:29:28 -03:00
|
|
|
"#,
|
2025-12-07 02:13:28 -03:00
|
|
|
pat_path.to_string_lossy(),
|
2025-12-08 00:19:29 -03:00
|
|
|
self.generate_secure_password(16),
|
2025-11-29 16:29:28 -03:00
|
|
|
);
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
fs::write(&steps_config_path, steps_config)?;
|
|
|
|
|
info!("Created steps.yaml for first instance setup");
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Create zitadel database in PostgreSQL
|
|
|
|
|
info!("Creating zitadel database...");
|
|
|
|
|
let create_db_result = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
|
|
|
|
"PGPASSWORD='{}' psql -h localhost -p 5432 -U gbuser -d postgres -c \"CREATE DATABASE zitadel\" 2>&1 || true",
|
|
|
|
|
db_password
|
|
|
|
|
))
|
|
|
|
|
.output();
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
if let Ok(output) = create_db_result {
|
|
|
|
|
let stdout = String::from_utf8_lossy(&output.stdout);
|
|
|
|
|
if !stdout.contains("already exists") {
|
|
|
|
|
info!("Created zitadel database");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Create zitadel user
|
|
|
|
|
let create_user_result = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
|
|
|
|
"PGPASSWORD='{}' psql -h localhost -p 5432 -U gbuser -d postgres -c \"CREATE USER zitadel WITH PASSWORD '{}' SUPERUSER\" 2>&1 || true",
|
|
|
|
|
db_password,
|
|
|
|
|
zitadel_db_password
|
|
|
|
|
))
|
|
|
|
|
.output();
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
if let Ok(output) = create_user_result {
|
|
|
|
|
let stdout = String::from_utf8_lossy(&output.stdout);
|
|
|
|
|
if !stdout.contains("already exists") {
|
|
|
|
|
info!("Created zitadel database user");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info!("Zitadel configuration files created");
|
2025-11-29 16:29:28 -03:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Setup Caddy as reverse proxy for all services
|
|
|
|
|
async fn setup_caddy_proxy(&self) -> Result<()> {
|
|
|
|
|
let caddy_config = PathBuf::from("./botserver-stack/conf/proxy/Caddyfile");
|
|
|
|
|
fs::create_dir_all(caddy_config.parent().unwrap())?;
|
|
|
|
|
|
2025-11-29 17:27:13 -03:00
|
|
|
let config = format!(
|
|
|
|
|
r#"{{
|
2025-11-29 16:29:28 -03:00
|
|
|
admin off
|
|
|
|
|
auto_https disable_redirects
|
2025-11-29 17:27:13 -03:00
|
|
|
}}
|
2025-11-29 16:29:28 -03:00
|
|
|
|
|
|
|
|
# Main API
|
2025-11-29 17:27:13 -03:00
|
|
|
api.botserver.local {{
|
2025-11-29 16:29:28 -03:00
|
|
|
tls /botserver-stack/conf/system/certificates/caddy/server.crt /botserver-stack/conf/system/certificates/caddy/server.key
|
2025-11-29 17:27:13 -03:00
|
|
|
reverse_proxy {}
|
|
|
|
|
}}
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-11-29 17:27:13 -03:00
|
|
|
# Directory/Auth service
|
|
|
|
|
auth.botserver.local {{
|
2025-11-29 16:29:28 -03:00
|
|
|
tls /botserver-stack/conf/system/certificates/caddy/server.crt /botserver-stack/conf/system/certificates/caddy/server.key
|
2025-11-29 17:27:13 -03:00
|
|
|
reverse_proxy {}
|
|
|
|
|
}}
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-11-29 17:27:13 -03:00
|
|
|
# LLM service
|
|
|
|
|
llm.botserver.local {{
|
2025-11-29 16:29:28 -03:00
|
|
|
tls /botserver-stack/conf/system/certificates/caddy/server.crt /botserver-stack/conf/system/certificates/caddy/server.key
|
2025-11-29 17:27:13 -03:00
|
|
|
reverse_proxy {}
|
|
|
|
|
}}
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-11-29 17:27:13 -03:00
|
|
|
# Mail service
|
|
|
|
|
mail.botserver.local {{
|
2025-11-29 16:29:28 -03:00
|
|
|
tls /botserver-stack/conf/system/certificates/caddy/server.crt /botserver-stack/conf/system/certificates/caddy/server.key
|
2025-11-29 17:27:13 -03:00
|
|
|
reverse_proxy {}
|
|
|
|
|
}}
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-11-29 17:27:13 -03:00
|
|
|
# Meet service
|
|
|
|
|
meet.botserver.local {{
|
2025-11-29 16:29:28 -03:00
|
|
|
tls /botserver-stack/conf/system/certificates/caddy/server.crt /botserver-stack/conf/system/certificates/caddy/server.key
|
2025-11-29 17:27:13 -03:00
|
|
|
reverse_proxy {}
|
|
|
|
|
}}
|
|
|
|
|
"#,
|
|
|
|
|
crate::core::urls::InternalUrls::DIRECTORY_BASE.replace("https://", ""),
|
|
|
|
|
crate::core::urls::InternalUrls::DIRECTORY_BASE.replace("https://", ""),
|
|
|
|
|
crate::core::urls::InternalUrls::LLM.replace("https://", ""),
|
|
|
|
|
crate::core::urls::InternalUrls::EMAIL.replace("https://", ""),
|
|
|
|
|
crate::core::urls::InternalUrls::LIVEKIT.replace("https://", "")
|
|
|
|
|
);
|
2025-11-29 16:29:28 -03:00
|
|
|
|
|
|
|
|
fs::write(caddy_config, config)?;
|
2025-11-29 17:27:13 -03:00
|
|
|
info!("Caddy proxy configured");
|
2025-11-29 16:29:28 -03:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Setup CoreDNS for dynamic DNS service
|
|
|
|
|
async fn setup_coredns(&self) -> Result<()> {
|
|
|
|
|
let dns_config = PathBuf::from("./botserver-stack/conf/dns/Corefile");
|
|
|
|
|
fs::create_dir_all(dns_config.parent().unwrap())?;
|
|
|
|
|
|
|
|
|
|
let zone_file = PathBuf::from("./botserver-stack/conf/dns/botserver.local.zone");
|
|
|
|
|
|
|
|
|
|
// Create Corefile
|
|
|
|
|
let corefile = r#"botserver.local:53 {
|
|
|
|
|
file /botserver-stack/conf/dns/botserver.local.zone
|
|
|
|
|
reload 10s
|
|
|
|
|
log
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
.:53 {
|
|
|
|
|
forward . 8.8.8.8 8.8.4.4
|
|
|
|
|
cache 30
|
|
|
|
|
log
|
|
|
|
|
}
|
|
|
|
|
"#;
|
|
|
|
|
|
|
|
|
|
fs::write(dns_config, corefile)?;
|
|
|
|
|
|
2025-12-07 10:42:02 -03:00
|
|
|
// Create initial zone file with component names
|
2025-11-29 16:29:28 -03:00
|
|
|
let zone = r#"$ORIGIN botserver.local.
|
|
|
|
|
$TTL 60
|
|
|
|
|
@ IN SOA ns1.botserver.local. admin.botserver.local. (
|
|
|
|
|
2024010101 ; Serial
|
|
|
|
|
3600 ; Refresh
|
|
|
|
|
1800 ; Retry
|
|
|
|
|
604800 ; Expire
|
|
|
|
|
60 ; Minimum TTL
|
|
|
|
|
)
|
|
|
|
|
IN NS ns1.botserver.local.
|
|
|
|
|
ns1 IN A 127.0.0.1
|
|
|
|
|
|
2025-12-07 10:42:02 -03:00
|
|
|
; Core services
|
|
|
|
|
api IN A 127.0.0.1
|
|
|
|
|
tables IN A 127.0.0.1
|
|
|
|
|
drive IN A 127.0.0.1
|
|
|
|
|
cache IN A 127.0.0.1
|
|
|
|
|
vectordb IN A 127.0.0.1
|
|
|
|
|
vault IN A 127.0.0.1
|
|
|
|
|
|
|
|
|
|
; Application services
|
|
|
|
|
llm IN A 127.0.0.1
|
|
|
|
|
embedding IN A 127.0.0.1
|
|
|
|
|
directory IN A 127.0.0.1
|
|
|
|
|
auth IN A 127.0.0.1
|
|
|
|
|
email IN A 127.0.0.1
|
|
|
|
|
meet IN A 127.0.0.1
|
2025-11-29 16:29:28 -03:00
|
|
|
|
|
|
|
|
; Dynamic entries will be added below
|
|
|
|
|
"#;
|
|
|
|
|
|
|
|
|
|
fs::write(zone_file, zone)?;
|
2025-11-29 17:27:13 -03:00
|
|
|
info!("CoreDNS configured for dynamic DNS");
|
2025-11-29 16:29:28 -03:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-22 22:55:35 -03:00
|
|
|
/// Setup Directory (Zitadel) with default organization and user
|
|
|
|
|
async fn setup_directory(&self) -> Result<()> {
|
|
|
|
|
let config_path = PathBuf::from("./config/directory_config.json");
|
2025-12-07 02:13:28 -03:00
|
|
|
let pat_path = PathBuf::from("./botserver-stack/conf/directory/admin-pat.txt");
|
2025-11-22 22:55:35 -03:00
|
|
|
|
|
|
|
|
// Ensure config directory exists
|
|
|
|
|
tokio::fs::create_dir_all("./config").await?;
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Wait for Directory to be ready and check for PAT file
|
|
|
|
|
info!("Waiting for Zitadel to be ready...");
|
|
|
|
|
let mut attempts = 0;
|
|
|
|
|
let max_attempts = 60; // 60 seconds max wait
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
while attempts < max_attempts {
|
|
|
|
|
// Check if Zitadel is healthy
|
|
|
|
|
let health_check = std::process::Command::new("curl")
|
|
|
|
|
.args(["-f", "-s", "http://localhost:8080/healthz"])
|
|
|
|
|
.output();
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
if let Ok(output) = health_check {
|
|
|
|
|
if output.status.success() {
|
|
|
|
|
info!("Zitadel is healthy");
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
attempts += 1;
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if attempts >= max_attempts {
|
|
|
|
|
warn!("Zitadel health check timed out, continuing anyway...");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Wait a bit more for PAT file to be generated
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(3)).await;
|
|
|
|
|
|
|
|
|
|
// Read the admin PAT generated by Zitadel first instance setup
|
|
|
|
|
let admin_token = if pat_path.exists() {
|
|
|
|
|
let token = fs::read_to_string(&pat_path)?;
|
|
|
|
|
let token = token.trim().to_string();
|
|
|
|
|
info!("Loaded admin PAT from {}", pat_path.display());
|
|
|
|
|
Some(token)
|
|
|
|
|
} else {
|
|
|
|
|
warn!("Admin PAT file not found at {}", pat_path.display());
|
|
|
|
|
warn!("Zitadel first instance setup may not have completed");
|
|
|
|
|
None
|
|
|
|
|
};
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-11-29 17:27:13 -03:00
|
|
|
let mut setup = DirectorySetup::new(
|
2025-12-07 02:13:28 -03:00
|
|
|
"http://localhost:8080".to_string(), // Use HTTP since TLS is disabled
|
2025-11-29 17:27:13 -03:00
|
|
|
config_path,
|
|
|
|
|
);
|
2025-11-22 22:55:35 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Set the admin token if we have it
|
|
|
|
|
if let Some(token) = admin_token {
|
|
|
|
|
setup.set_admin_token(token);
|
|
|
|
|
} else {
|
|
|
|
|
// If no PAT, we can't proceed with API calls
|
|
|
|
|
info!("Directory setup skipped - no admin token available");
|
|
|
|
|
info!("First instance setup created initial admin user via steps.yaml");
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Wait a bit more for Zitadel to be fully ready
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
|
|
|
|
|
|
|
|
|
|
// Try to create additional organization for bot users
|
2025-11-22 22:55:35 -03:00
|
|
|
let org_name = "default";
|
2025-12-08 00:19:29 -03:00
|
|
|
match setup
|
|
|
|
|
.create_organization(org_name, "Default Organization")
|
|
|
|
|
.await
|
|
|
|
|
{
|
2025-12-07 02:13:28 -03:00
|
|
|
Ok(org_id) => {
|
|
|
|
|
info!("Created default organization: {}", org_name);
|
|
|
|
|
|
|
|
|
|
// Generate secure passwords
|
|
|
|
|
let user_password = self.generate_secure_password(16);
|
|
|
|
|
|
|
|
|
|
// Create user@default account for regular bot usage
|
2025-12-08 00:19:29 -03:00
|
|
|
match setup
|
|
|
|
|
.create_user(
|
|
|
|
|
&org_id,
|
|
|
|
|
"user",
|
|
|
|
|
"user@default",
|
|
|
|
|
&user_password,
|
|
|
|
|
"User",
|
|
|
|
|
"Default",
|
|
|
|
|
false,
|
|
|
|
|
)
|
|
|
|
|
.await
|
|
|
|
|
{
|
2025-12-07 02:13:28 -03:00
|
|
|
Ok(regular_user) => {
|
|
|
|
|
info!("Created regular user: user@default");
|
|
|
|
|
info!(" Regular user ID: {}", regular_user.id);
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("Failed to create regular user: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Create OAuth2 application for BotServer
|
|
|
|
|
match setup.create_oauth_application(&org_id).await {
|
|
|
|
|
Ok((project_id, client_id, client_secret)) => {
|
|
|
|
|
info!("Created OAuth2 application in project: {}", project_id);
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Save configuration
|
|
|
|
|
let admin_user = crate::package_manager::setup::DefaultUser {
|
|
|
|
|
id: "admin".to_string(),
|
|
|
|
|
username: "admin".to_string(),
|
|
|
|
|
email: "admin@localhost".to_string(),
|
|
|
|
|
password: "".to_string(), // Don't store password
|
|
|
|
|
first_name: "Admin".to_string(),
|
|
|
|
|
last_name: "User".to_string(),
|
|
|
|
|
};
|
|
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
if let Ok(config) = setup
|
|
|
|
|
.save_config(
|
|
|
|
|
org_id.clone(),
|
|
|
|
|
org_name.to_string(),
|
|
|
|
|
admin_user,
|
|
|
|
|
client_id.clone(),
|
|
|
|
|
client_secret,
|
|
|
|
|
)
|
|
|
|
|
.await
|
|
|
|
|
{
|
2025-12-07 02:13:28 -03:00
|
|
|
info!("Directory initialized successfully!");
|
|
|
|
|
info!(" Organization: default");
|
|
|
|
|
info!(" Client ID: {}", client_id);
|
|
|
|
|
info!(" Login URL: {}", config.base_url);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("Failed to create OAuth2 application: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("Failed to create organization: {}", e);
|
|
|
|
|
info!("Using Zitadel's default organization from first instance setup");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info!("Directory setup complete");
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Setup Vault with all service secrets and write .env file with VAULT_* variables
|
2025-12-08 00:19:29 -03:00
|
|
|
async fn setup_vault(
|
|
|
|
|
&self,
|
|
|
|
|
db_password: &str,
|
|
|
|
|
drive_accesskey: &str,
|
|
|
|
|
drive_secret: &str,
|
|
|
|
|
cache_password: &str,
|
|
|
|
|
) -> Result<()> {
|
2025-12-07 02:13:28 -03:00
|
|
|
let vault_conf_path = PathBuf::from("./botserver-stack/conf/vault");
|
|
|
|
|
let vault_init_path = vault_conf_path.join("init.json");
|
|
|
|
|
let env_file_path = PathBuf::from("./.env");
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Wait for Vault to be ready
|
|
|
|
|
info!("Waiting for Vault to be ready...");
|
|
|
|
|
let mut attempts = 0;
|
|
|
|
|
let max_attempts = 30;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
while attempts < max_attempts {
|
|
|
|
|
let health_check = std::process::Command::new("curl")
|
2025-12-08 00:19:29 -03:00
|
|
|
.args(["-f", "-s", "http://localhost:8200/v1/sys/health?standbyok=true&uninitcode=200&sealedcode=200"])
|
2025-12-07 02:13:28 -03:00
|
|
|
.output();
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
if let Ok(output) = health_check {
|
|
|
|
|
if output.status.success() {
|
|
|
|
|
info!("Vault is responding");
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
attempts += 1;
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if attempts >= max_attempts {
|
|
|
|
|
warn!("Vault health check timed out");
|
2025-12-08 00:19:29 -03:00
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"Vault not ready after {} seconds",
|
|
|
|
|
max_attempts
|
|
|
|
|
));
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Check if Vault is already initialized
|
2025-12-08 00:19:29 -03:00
|
|
|
let vault_addr = "http://localhost:8200";
|
2025-12-07 02:13:28 -03:00
|
|
|
std::env::set_var("VAULT_ADDR", vault_addr);
|
|
|
|
|
std::env::set_var("VAULT_SKIP_VERIFY", "true");
|
|
|
|
|
|
|
|
|
|
// Read init.json if it exists (from post_install_cmds)
|
|
|
|
|
let (unseal_key, root_token) = if vault_init_path.exists() {
|
|
|
|
|
info!("Reading Vault initialization from init.json...");
|
|
|
|
|
let init_json = fs::read_to_string(&vault_init_path)?;
|
|
|
|
|
let init_data: serde_json::Value = serde_json::from_str(&init_json)?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
let unseal_key = init_data["unseal_keys_b64"]
|
|
|
|
|
.as_array()
|
|
|
|
|
.and_then(|arr| arr.first())
|
|
|
|
|
.and_then(|v| v.as_str())
|
|
|
|
|
.unwrap_or("")
|
|
|
|
|
.to_string();
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
let root_token = init_data["root_token"].as_str().unwrap_or("").to_string();
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
(unseal_key, root_token)
|
|
|
|
|
} else {
|
|
|
|
|
// Initialize Vault if not already done
|
|
|
|
|
info!("Initializing Vault...");
|
2025-12-08 09:14:31 -03:00
|
|
|
// Clear any mTLS env vars that might interfere with CLI
|
2025-12-07 02:13:28 -03:00
|
|
|
let init_output = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-08 09:14:31 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} ./botserver-stack/bin/vault/vault operator init -key-shares=1 -key-threshold=1 -format=json",
|
2025-12-07 02:13:28 -03:00
|
|
|
vault_addr
|
|
|
|
|
))
|
|
|
|
|
.output()?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
if !init_output.status.success() {
|
|
|
|
|
let stderr = String::from_utf8_lossy(&init_output.stderr);
|
|
|
|
|
if stderr.contains("already initialized") {
|
|
|
|
|
warn!("Vault already initialized but init.json not found");
|
|
|
|
|
return Err(anyhow::anyhow!("Vault initialized but credentials lost"));
|
|
|
|
|
}
|
|
|
|
|
return Err(anyhow::anyhow!("Vault init failed: {}", stderr));
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
let init_json = String::from_utf8_lossy(&init_output.stdout);
|
|
|
|
|
fs::write(&vault_init_path, init_json.as_ref())?;
|
|
|
|
|
fs::set_permissions(&vault_init_path, std::fs::Permissions::from_mode(0o600))?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
let init_data: serde_json::Value = serde_json::from_str(&init_json)?;
|
|
|
|
|
let unseal_key = init_data["unseal_keys_b64"]
|
|
|
|
|
.as_array()
|
|
|
|
|
.and_then(|arr| arr.first())
|
|
|
|
|
.and_then(|v| v.as_str())
|
|
|
|
|
.unwrap_or("")
|
|
|
|
|
.to_string();
|
2025-12-08 00:19:29 -03:00
|
|
|
let root_token = init_data["root_token"].as_str().unwrap_or("").to_string();
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
(unseal_key, root_token)
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if root_token.is_empty() {
|
|
|
|
|
return Err(anyhow::anyhow!("Failed to get Vault root token"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Unseal Vault
|
|
|
|
|
info!("Unsealing Vault...");
|
2025-12-08 09:14:31 -03:00
|
|
|
// Clear any mTLS env vars that might interfere with CLI
|
2025-12-07 02:13:28 -03:00
|
|
|
let unseal_output = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-08 09:14:31 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} ./botserver-stack/bin/vault/vault operator unseal {}",
|
2025-12-07 02:13:28 -03:00
|
|
|
vault_addr, unseal_key
|
|
|
|
|
))
|
|
|
|
|
.output()?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
if !unseal_output.status.success() {
|
|
|
|
|
let stderr = String::from_utf8_lossy(&unseal_output.stderr);
|
|
|
|
|
if !stderr.contains("already unsealed") {
|
|
|
|
|
warn!("Vault unseal warning: {}", stderr);
|
|
|
|
|
}
|
2025-11-29 16:29:28 -03:00
|
|
|
}
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Set VAULT_TOKEN for subsequent commands
|
|
|
|
|
std::env::set_var("VAULT_TOKEN", &root_token);
|
|
|
|
|
|
|
|
|
|
// Enable KV secrets engine at gbo/ path
|
|
|
|
|
info!("Enabling KV secrets engine...");
|
|
|
|
|
let _ = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-08 09:14:31 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} VAULT_TOKEN={} ./botserver-stack/bin/vault/vault secrets enable -path=secret kv-v2 2>&1 || true",
|
2025-12-07 02:13:28 -03:00
|
|
|
vault_addr, root_token
|
|
|
|
|
))
|
|
|
|
|
.output();
|
|
|
|
|
|
|
|
|
|
// Store all secrets in Vault
|
|
|
|
|
info!("Storing secrets in Vault...");
|
|
|
|
|
|
|
|
|
|
// Database credentials
|
|
|
|
|
let _ = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-08 09:14:31 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} VAULT_TOKEN={} ./botserver-stack/bin/vault/vault kv put secret/gbo/tables host=localhost port=5432 database=botserver username=gbuser password='{}'",
|
2025-12-07 02:13:28 -03:00
|
|
|
vault_addr, root_token, db_password
|
|
|
|
|
))
|
|
|
|
|
.output()?;
|
2025-12-09 07:55:11 -03:00
|
|
|
info!(" Stored database credentials");
|
2025-12-07 02:13:28 -03:00
|
|
|
|
|
|
|
|
// Drive credentials
|
|
|
|
|
let _ = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-08 09:14:31 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} VAULT_TOKEN={} ./botserver-stack/bin/vault/vault kv put secret/gbo/drive accesskey='{}' secret='{}'",
|
2025-12-07 02:13:28 -03:00
|
|
|
vault_addr, root_token, drive_accesskey, drive_secret
|
|
|
|
|
))
|
|
|
|
|
.output()?;
|
2025-12-09 07:55:11 -03:00
|
|
|
info!(" Stored drive credentials");
|
2025-12-07 02:13:28 -03:00
|
|
|
|
|
|
|
|
// Cache credentials
|
|
|
|
|
let _ = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-08 09:14:31 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} VAULT_TOKEN={} ./botserver-stack/bin/vault/vault kv put secret/gbo/cache password='{}'",
|
2025-12-07 02:13:28 -03:00
|
|
|
vault_addr, root_token, cache_password
|
|
|
|
|
))
|
|
|
|
|
.output()?;
|
2025-12-09 07:55:11 -03:00
|
|
|
info!(" Stored cache credentials");
|
2025-12-07 02:13:28 -03:00
|
|
|
|
|
|
|
|
// Directory placeholder (will be updated after Zitadel setup)
|
|
|
|
|
let _ = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-08 09:14:31 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} VAULT_TOKEN={} ./botserver-stack/bin/vault/vault kv put secret/gbo/directory url=https://localhost:8080 project_id= client_id= client_secret=",
|
2025-12-07 02:13:28 -03:00
|
|
|
vault_addr, root_token
|
|
|
|
|
))
|
|
|
|
|
.output()?;
|
2025-12-09 07:55:11 -03:00
|
|
|
info!(" Created directory placeholder");
|
2025-12-07 02:13:28 -03:00
|
|
|
|
|
|
|
|
// LLM placeholder
|
|
|
|
|
let _ = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-08 09:14:31 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} VAULT_TOKEN={} ./botserver-stack/bin/vault/vault kv put secret/gbo/llm openai_key= anthropic_key= groq_key=",
|
2025-12-07 02:13:28 -03:00
|
|
|
vault_addr, root_token
|
|
|
|
|
))
|
|
|
|
|
.output()?;
|
2025-12-09 07:55:11 -03:00
|
|
|
info!(" Created LLM placeholder");
|
2025-12-07 02:13:28 -03:00
|
|
|
|
|
|
|
|
// Email placeholder
|
|
|
|
|
let _ = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-08 09:14:31 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} VAULT_TOKEN={} ./botserver-stack/bin/vault/vault kv put secret/gbo/email username= password=",
|
2025-12-07 02:13:28 -03:00
|
|
|
vault_addr, root_token
|
|
|
|
|
))
|
|
|
|
|
.output()?;
|
2025-12-09 07:55:11 -03:00
|
|
|
info!(" Created email placeholder");
|
2025-12-07 02:13:28 -03:00
|
|
|
|
|
|
|
|
// Encryption key
|
|
|
|
|
let encryption_key = self.generate_secure_password(32);
|
|
|
|
|
let _ = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-08 09:14:31 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} VAULT_TOKEN={} ./botserver-stack/bin/vault/vault kv put secret/gbo/encryption master_key='{}'",
|
2025-12-07 02:13:28 -03:00
|
|
|
vault_addr, root_token, encryption_key
|
|
|
|
|
))
|
|
|
|
|
.output()?;
|
2025-12-09 07:55:11 -03:00
|
|
|
info!(" Generated and stored encryption key");
|
2025-12-07 02:13:28 -03:00
|
|
|
|
|
|
|
|
// Write .env file with ONLY Vault variables - NO LEGACY FALLBACK
|
|
|
|
|
info!("Writing .env file with Vault configuration...");
|
|
|
|
|
let env_content = format!(
|
2025-12-08 00:19:29 -03:00
|
|
|
r#"# BotServer Environment Configuration
|
2025-12-07 02:13:28 -03:00
|
|
|
# Generated by bootstrap - DO NOT ADD OTHER SECRETS HERE
|
|
|
|
|
# All secrets are stored in Vault at the paths below:
|
|
|
|
|
# - gbo/tables - PostgreSQL credentials
|
|
|
|
|
# - gbo/drive - MinIO/S3 credentials
|
|
|
|
|
# - gbo/cache - Redis credentials
|
|
|
|
|
# - gbo/directory - Zitadel credentials
|
|
|
|
|
# - gbo/email - Email credentials
|
|
|
|
|
# - gbo/llm - LLM API keys
|
|
|
|
|
# - gbo/encryption - Encryption keys
|
|
|
|
|
|
|
|
|
|
# Vault Configuration - THESE ARE THE ONLY ALLOWED ENV VARS
|
|
|
|
|
VAULT_ADDR={}
|
|
|
|
|
VAULT_TOKEN={}
|
|
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
# Vault uses HTTP for local development (TLS disabled in config.hcl)
|
2025-12-08 09:14:31 -03:00
|
|
|
# In production, enable TLS and set VAULT_CACERT, VAULT_CLIENT_CERT, VAULT_CLIENT_KEY
|
2025-12-07 02:13:28 -03:00
|
|
|
|
|
|
|
|
# Cache TTL for secrets (seconds)
|
|
|
|
|
VAULT_CACHE_TTL=300
|
|
|
|
|
"#,
|
|
|
|
|
vault_addr, root_token
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
fs::write(&env_file_path, env_content)?;
|
2025-12-09 07:55:11 -03:00
|
|
|
info!(" Created .env file with Vault configuration");
|
2025-11-22 22:55:35 -03:00
|
|
|
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Vault setup complete!");
|
2025-12-07 02:13:28 -03:00
|
|
|
info!(" Vault UI: {}/ui", vault_addr);
|
|
|
|
|
info!(" Root token saved to: {}", vault_init_path.display());
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-11-22 22:55:35 -03:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Setup Email (Stalwart) with Directory integration
|
2025-11-26 22:54:22 -03:00
|
|
|
pub async fn setup_email(&self) -> Result<()> {
|
2025-11-22 22:55:35 -03:00
|
|
|
let config_path = PathBuf::from("./config/email_config.json");
|
|
|
|
|
let directory_config_path = PathBuf::from("./config/directory_config.json");
|
|
|
|
|
|
2025-11-29 17:27:13 -03:00
|
|
|
let mut setup = EmailSetup::new(
|
|
|
|
|
crate::core::urls::InternalUrls::DIRECTORY_BASE.to_string(),
|
|
|
|
|
config_path,
|
|
|
|
|
);
|
2025-11-22 22:55:35 -03:00
|
|
|
|
|
|
|
|
// Try to integrate with Directory if it exists
|
|
|
|
|
let directory_config = if directory_config_path.exists() {
|
|
|
|
|
Some(directory_config_path)
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let config = setup.initialize(directory_config).await?;
|
|
|
|
|
|
2025-11-29 17:27:13 -03:00
|
|
|
info!("Email server initialized successfully!");
|
2025-11-22 22:55:35 -03:00
|
|
|
info!(" SMTP: {}:{}", config.smtp_host, config.smtp_port);
|
|
|
|
|
info!(" IMAP: {}:{}", config.imap_host, config.imap_port);
|
|
|
|
|
info!(" Admin: {} / {}", config.admin_user, config.admin_pass);
|
|
|
|
|
if config.directory_integration {
|
2025-12-09 07:55:11 -03:00
|
|
|
info!(" Integrated with Directory for authentication");
|
2025-11-22 22:55:35 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
async fn get_drive_client(config: &AppConfig) -> Client {
|
2025-11-27 15:19:17 -03:00
|
|
|
let endpoint = if config.drive.server.ends_with('/') {
|
2025-11-22 22:55:35 -03:00
|
|
|
config.drive.server.clone()
|
2025-11-27 15:19:17 -03:00
|
|
|
} else {
|
|
|
|
|
format!("{}/", config.drive.server)
|
2025-11-22 22:55:35 -03:00
|
|
|
};
|
2025-12-08 23:35:33 -03:00
|
|
|
|
|
|
|
|
// Get credentials from config, or fetch from Vault if empty
|
|
|
|
|
let (access_key, secret_key) =
|
|
|
|
|
if config.drive.access_key.is_empty() || config.drive.secret_key.is_empty() {
|
|
|
|
|
// Try to get from Vault using the global SecretsManager
|
|
|
|
|
match crate::shared::utils::get_secrets_manager().await {
|
|
|
|
|
Some(manager) if manager.is_enabled() => {
|
|
|
|
|
match manager.get_drive_credentials().await {
|
|
|
|
|
Ok((ak, sk)) => (ak, sk),
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("Failed to get drive credentials from Vault: {}", e);
|
|
|
|
|
(
|
|
|
|
|
config.drive.access_key.clone(),
|
|
|
|
|
config.drive.secret_key.clone(),
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
_ => (
|
|
|
|
|
config.drive.access_key.clone(),
|
|
|
|
|
config.drive.secret_key.clone(),
|
|
|
|
|
),
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
(
|
|
|
|
|
config.drive.access_key.clone(),
|
|
|
|
|
config.drive.secret_key.clone(),
|
|
|
|
|
)
|
|
|
|
|
};
|
|
|
|
|
|
2025-11-22 22:55:35 -03:00
|
|
|
let base_config = aws_config::defaults(BehaviorVersion::latest())
|
|
|
|
|
.endpoint_url(endpoint)
|
|
|
|
|
.region("auto")
|
|
|
|
|
.credentials_provider(aws_sdk_s3::config::Credentials::new(
|
2025-12-08 23:35:33 -03:00
|
|
|
access_key, secret_key, None, None, "static",
|
2025-11-22 22:55:35 -03:00
|
|
|
))
|
|
|
|
|
.load()
|
|
|
|
|
.await;
|
|
|
|
|
let s3_config = aws_sdk_s3::config::Builder::from(&base_config)
|
|
|
|
|
.force_path_style(true)
|
|
|
|
|
.build();
|
|
|
|
|
aws_sdk_s3::Client::from_conf(s3_config)
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
/// Sync bot configurations from template config.csv files to database
|
|
|
|
|
/// This is separate from drive upload and does not require S3 connection
|
|
|
|
|
pub fn sync_templates_to_database(&self) -> Result<()> {
|
2025-11-22 22:55:35 -03:00
|
|
|
let mut conn = establish_pg_connection()?;
|
|
|
|
|
self.create_bots_from_templates(&mut conn)?;
|
2025-12-08 00:19:29 -03:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub async fn upload_templates_to_drive(&self, _config: &AppConfig) -> Result<()> {
|
2025-11-22 22:55:35 -03:00
|
|
|
let templates_dir = Path::new("templates");
|
|
|
|
|
if !templates_dir.exists() {
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
let client = Self::get_drive_client(_config).await;
|
|
|
|
|
let mut read_dir = tokio::fs::read_dir(templates_dir).await?;
|
|
|
|
|
while let Some(entry) = read_dir.next_entry().await? {
|
|
|
|
|
let path = entry.path();
|
|
|
|
|
if path.is_dir()
|
|
|
|
|
&& path
|
|
|
|
|
.file_name()
|
|
|
|
|
.unwrap()
|
|
|
|
|
.to_string_lossy()
|
|
|
|
|
.ends_with(".gbai")
|
|
|
|
|
{
|
|
|
|
|
let bot_name = path.file_name().unwrap().to_string_lossy().to_string();
|
|
|
|
|
let bucket = bot_name.trim_start_matches('/').to_string();
|
|
|
|
|
if client.head_bucket().bucket(&bucket).send().await.is_err() {
|
|
|
|
|
match client.create_bucket().bucket(&bucket).send().await {
|
|
|
|
|
Ok(_) => {
|
|
|
|
|
self.upload_directory_recursive(&client, &path, &bucket, "/")
|
|
|
|
|
.await?;
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
error!("Failed to create bucket {}: {:?}", bucket, e);
|
|
|
|
|
return Err(anyhow::anyhow!("Failed to create bucket {}: {}. Check S3 credentials and endpoint configuration", bucket, e));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
trace!("Bucket {} already exists", bucket);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
fn create_bots_from_templates(&self, conn: &mut diesel::PgConnection) -> Result<()> {
|
|
|
|
|
use crate::shared::models::schema::bots;
|
|
|
|
|
use diesel::prelude::*;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-11-22 22:55:35 -03:00
|
|
|
let templates_dir = Path::new("templates");
|
|
|
|
|
if !templates_dir.exists() {
|
2025-12-08 00:19:29 -03:00
|
|
|
warn!("Templates directory does not exist");
|
2025-11-22 22:55:35 -03:00
|
|
|
return Ok(());
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
// Get the default bot (created by migrations) - we'll sync all template configs to it
|
|
|
|
|
let default_bot: Option<(uuid::Uuid, String)> = bots::table
|
|
|
|
|
.filter(bots::is_active.eq(true))
|
|
|
|
|
.select((bots::id, bots::name))
|
|
|
|
|
.first(conn)
|
|
|
|
|
.optional()?;
|
|
|
|
|
|
|
|
|
|
let (default_bot_id, default_bot_name) = match default_bot {
|
|
|
|
|
Some((id, name)) => (id, name),
|
|
|
|
|
None => {
|
|
|
|
|
error!("No active bot found in database - cannot sync template configs");
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
info!(
|
|
|
|
|
"Syncing template configs to bot '{}' ({})",
|
|
|
|
|
default_bot_name, default_bot_id
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Only sync the default.gbai template config (main config for the system)
|
|
|
|
|
let default_template = templates_dir.join("default.gbai");
|
|
|
|
|
if default_template.exists() {
|
|
|
|
|
let config_path = default_template.join("default.gbot").join("config.csv");
|
|
|
|
|
|
|
|
|
|
if config_path.exists() {
|
|
|
|
|
match std::fs::read_to_string(&config_path) {
|
|
|
|
|
Ok(csv_content) => {
|
|
|
|
|
info!("Syncing config.csv from {:?}", config_path);
|
|
|
|
|
if let Err(e) =
|
|
|
|
|
self.sync_config_csv_to_db(conn, &default_bot_id, &csv_content)
|
|
|
|
|
{
|
|
|
|
|
error!("Failed to sync config.csv: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("Could not read config.csv: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
warn!("No config.csv found at {:?}", config_path);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
warn!("default.gbai template not found");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Sync config.csv content to the bot_configuration table
|
|
|
|
|
/// This is critical for loading LLM settings on fresh starts
|
|
|
|
|
fn sync_config_csv_to_db(
|
|
|
|
|
&self,
|
|
|
|
|
conn: &mut diesel::PgConnection,
|
|
|
|
|
bot_id: &uuid::Uuid,
|
|
|
|
|
content: &str,
|
|
|
|
|
) -> Result<()> {
|
|
|
|
|
let mut synced = 0;
|
|
|
|
|
let mut skipped = 0;
|
|
|
|
|
let lines: Vec<&str> = content.lines().collect();
|
|
|
|
|
|
|
|
|
|
debug!(
|
|
|
|
|
"Parsing config.csv with {} lines for bot {}",
|
|
|
|
|
lines.len(),
|
|
|
|
|
bot_id
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
for (line_num, line) in lines.iter().enumerate().skip(1) {
|
|
|
|
|
// Skip header line (name,value)
|
|
|
|
|
let line = line.trim();
|
|
|
|
|
if line.is_empty() || line.starts_with('#') {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let parts: Vec<&str> = line.splitn(2, ',').collect();
|
|
|
|
|
if parts.len() >= 2 {
|
|
|
|
|
let key = parts[0].trim();
|
|
|
|
|
let value = parts[1].trim();
|
|
|
|
|
|
|
|
|
|
if key.is_empty() {
|
|
|
|
|
skipped += 1;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Use UUID type since migration 6.1.1 converted column to UUID
|
|
|
|
|
let new_id = uuid::Uuid::new_v4();
|
|
|
|
|
|
|
|
|
|
match diesel::sql_query(
|
|
|
|
|
"INSERT INTO bot_configuration (id, bot_id, config_key, config_value, config_type, created_at, updated_at) \
|
|
|
|
|
VALUES ($1, $2, $3, $4, 'string', NOW(), NOW()) \
|
|
|
|
|
ON CONFLICT (bot_id, config_key) DO UPDATE SET config_value = EXCLUDED.config_value, updated_at = NOW()"
|
|
|
|
|
)
|
|
|
|
|
.bind::<diesel::sql_types::Uuid, _>(new_id)
|
|
|
|
|
.bind::<diesel::sql_types::Uuid, _>(bot_id)
|
|
|
|
|
.bind::<diesel::sql_types::Text, _>(key)
|
|
|
|
|
.bind::<diesel::sql_types::Text, _>(value)
|
|
|
|
|
.execute(conn) {
|
|
|
|
|
Ok(_) => {
|
|
|
|
|
trace!(" Synced config: {} = {}", key, if key.contains("pass") || key.contains("secret") || key.contains("key") { "***" } else { value });
|
|
|
|
|
synced += 1;
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
error!("Failed to sync config key '{}' at line {}: {}", key, line_num + 1, e);
|
|
|
|
|
// Continue with other keys instead of failing completely
|
|
|
|
|
}
|
2025-11-22 22:55:35 -03:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
if synced > 0 {
|
|
|
|
|
info!(
|
2025-12-09 07:55:11 -03:00
|
|
|
"Synced {} config values for bot {} (skipped {} empty lines)",
|
2025-12-08 00:19:29 -03:00
|
|
|
synced, bot_id, skipped
|
|
|
|
|
);
|
|
|
|
|
} else {
|
|
|
|
|
warn!(
|
|
|
|
|
"No config values synced for bot {} - check config.csv format",
|
|
|
|
|
bot_id
|
|
|
|
|
);
|
|
|
|
|
}
|
2025-11-22 22:55:35 -03:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
fn upload_directory_recursive<'a>(
|
|
|
|
|
&'a self,
|
|
|
|
|
client: &'a Client,
|
|
|
|
|
local_path: &'a Path,
|
|
|
|
|
bucket: &'a str,
|
|
|
|
|
prefix: &'a str,
|
|
|
|
|
) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<()>> + 'a>> {
|
|
|
|
|
Box::pin(async move {
|
2025-11-27 15:19:17 -03:00
|
|
|
let _normalized_path = if local_path.to_string_lossy().ends_with('/') {
|
2025-11-22 22:55:35 -03:00
|
|
|
local_path.to_string_lossy().to_string()
|
2025-11-27 15:19:17 -03:00
|
|
|
} else {
|
|
|
|
|
format!("{}/", local_path.display())
|
2025-11-22 22:55:35 -03:00
|
|
|
};
|
|
|
|
|
let mut read_dir = tokio::fs::read_dir(local_path).await?;
|
|
|
|
|
while let Some(entry) = read_dir.next_entry().await? {
|
|
|
|
|
let path = entry.path();
|
|
|
|
|
let file_name = path.file_name().unwrap().to_string_lossy().to_string();
|
|
|
|
|
let mut key = prefix.trim_matches('/').to_string();
|
|
|
|
|
if !key.is_empty() {
|
|
|
|
|
key.push('/');
|
|
|
|
|
}
|
|
|
|
|
key.push_str(&file_name);
|
|
|
|
|
if path.is_file() {
|
|
|
|
|
trace!(
|
|
|
|
|
"Uploading file {} to bucket {} with key {}",
|
|
|
|
|
path.display(),
|
|
|
|
|
bucket,
|
|
|
|
|
key
|
|
|
|
|
);
|
|
|
|
|
let content = tokio::fs::read(&path).await?;
|
|
|
|
|
client
|
|
|
|
|
.put_object()
|
|
|
|
|
.bucket(bucket)
|
|
|
|
|
.key(&key)
|
|
|
|
|
.body(content.into())
|
|
|
|
|
.send()
|
|
|
|
|
.await?;
|
|
|
|
|
} else if path.is_dir() {
|
|
|
|
|
self.upload_directory_recursive(client, &path, bucket, &key)
|
|
|
|
|
.await?;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
pub fn apply_migrations(&self, conn: &mut diesel::PgConnection) -> Result<()> {
|
|
|
|
|
use diesel_migrations::HarnessWithOutput;
|
|
|
|
|
use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness};
|
|
|
|
|
|
|
|
|
|
const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations");
|
|
|
|
|
|
|
|
|
|
let mut harness = HarnessWithOutput::write_to_stdout(conn);
|
|
|
|
|
if let Err(e) = harness.run_pending_migrations(MIGRATIONS) {
|
|
|
|
|
error!("Failed to apply migrations: {}", e);
|
|
|
|
|
return Err(anyhow::anyhow!("Migration error: {}", e));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
/// Create Vault configuration with mTLS settings
|
|
|
|
|
async fn create_vault_config(&self) -> Result<()> {
|
|
|
|
|
let vault_conf_dir = PathBuf::from("./botserver-stack/conf/vault");
|
|
|
|
|
let config_path = vault_conf_dir.join("config.hcl");
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
fs::create_dir_all(&vault_conf_dir)?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
// Vault is started from botserver-stack/bin/vault/, so paths must be relative to that
|
|
|
|
|
// From bin/vault/ to conf/ is ../../conf/
|
|
|
|
|
// From bin/vault/ to data/ is ../../data/
|
|
|
|
|
let config = r#"# Vault Configuration
|
2025-12-07 02:13:28 -03:00
|
|
|
# Generated by BotServer bootstrap
|
2025-12-08 00:19:29 -03:00
|
|
|
# Note: Paths are relative to botserver-stack/bin/vault/ (Vault's working directory)
|
2025-12-07 02:13:28 -03:00
|
|
|
|
|
|
|
|
# Storage backend - file-based for single instance
|
|
|
|
|
storage "file" {
|
2025-12-08 00:19:29 -03:00
|
|
|
path = "../../data/vault"
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
|
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
# Listener with TLS DISABLED for local development
|
|
|
|
|
# In production, enable TLS with proper certificates
|
2025-12-07 02:13:28 -03:00
|
|
|
listener "tcp" {
|
2025-12-08 00:19:29 -03:00
|
|
|
address = "0.0.0.0:8200"
|
|
|
|
|
tls_disable = true
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
|
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
# API settings - use HTTP for local dev
|
|
|
|
|
api_addr = "http://localhost:8200"
|
|
|
|
|
cluster_addr = "http://localhost:8201"
|
2025-12-07 02:13:28 -03:00
|
|
|
|
|
|
|
|
# UI enabled for administration
|
|
|
|
|
ui = true
|
|
|
|
|
|
|
|
|
|
# Disable memory locking (for development - enable in production)
|
|
|
|
|
disable_mlock = true
|
|
|
|
|
|
|
|
|
|
# Telemetry
|
|
|
|
|
telemetry {
|
|
|
|
|
disable_hostname = true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Log level
|
|
|
|
|
log_level = "info"
|
|
|
|
|
"#;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
fs::write(&config_path, config)?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Create data directory for Vault storage
|
|
|
|
|
fs::create_dir_all("./botserver-stack/data/vault")?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
info!(
|
|
|
|
|
"Created Vault config with mTLS at {}",
|
|
|
|
|
config_path.display()
|
|
|
|
|
);
|
2025-12-07 02:13:28 -03:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-29 16:29:28 -03:00
|
|
|
/// Generate TLS certificates for all services
|
|
|
|
|
async fn generate_certificates(&self) -> Result<()> {
|
|
|
|
|
let cert_dir = PathBuf::from("./botserver-stack/conf/system/certificates");
|
|
|
|
|
|
|
|
|
|
// Create certificate directory structure
|
|
|
|
|
fs::create_dir_all(&cert_dir)?;
|
|
|
|
|
fs::create_dir_all(cert_dir.join("ca"))?;
|
|
|
|
|
|
|
|
|
|
// Check if CA already exists
|
|
|
|
|
let ca_cert_path = cert_dir.join("ca/ca.crt");
|
|
|
|
|
let ca_key_path = cert_dir.join("ca/ca.key");
|
|
|
|
|
|
2025-12-03 16:05:30 -03:00
|
|
|
// CA params for issuer creation
|
|
|
|
|
let mut ca_params = CertificateParams::default();
|
|
|
|
|
ca_params.is_ca = IsCa::Ca(BasicConstraints::Unconstrained);
|
2025-11-30 23:48:08 -03:00
|
|
|
|
2025-12-03 16:05:30 -03:00
|
|
|
let mut dn = DistinguishedName::new();
|
|
|
|
|
dn.push(DnType::CountryName, "BR");
|
|
|
|
|
dn.push(DnType::OrganizationName, "BotServer");
|
|
|
|
|
dn.push(DnType::CommonName, "BotServer CA");
|
|
|
|
|
ca_params.distinguished_name = dn;
|
2025-11-30 23:48:08 -03:00
|
|
|
|
2025-12-03 16:05:30 -03:00
|
|
|
ca_params.not_before = time::OffsetDateTime::now_utc();
|
|
|
|
|
ca_params.not_after = time::OffsetDateTime::now_utc() + time::Duration::days(3650);
|
2025-11-30 23:48:08 -03:00
|
|
|
|
2025-12-03 16:05:30 -03:00
|
|
|
let ca_key_pair: KeyPair = if ca_cert_path.exists() && ca_key_path.exists() {
|
|
|
|
|
info!("Using existing CA certificate");
|
|
|
|
|
// Load existing CA key
|
|
|
|
|
let key_pem = fs::read_to_string(&ca_key_path)?;
|
|
|
|
|
KeyPair::from_pem(&key_pem)?
|
2025-11-29 16:29:28 -03:00
|
|
|
} else {
|
|
|
|
|
info!("Generating new CA certificate");
|
2025-12-03 16:05:30 -03:00
|
|
|
let key_pair = KeyPair::generate()?;
|
|
|
|
|
let cert = ca_params.self_signed(&key_pair)?;
|
2025-11-29 16:29:28 -03:00
|
|
|
|
|
|
|
|
// Save CA certificate and key
|
2025-12-03 16:05:30 -03:00
|
|
|
fs::write(&ca_cert_path, cert.pem())?;
|
|
|
|
|
fs::write(&ca_key_path, key_pair.serialize_pem())?;
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-12-03 16:05:30 -03:00
|
|
|
key_pair
|
2025-11-29 16:29:28 -03:00
|
|
|
};
|
|
|
|
|
|
2025-12-03 16:05:30 -03:00
|
|
|
// Create issuer from CA params and key
|
|
|
|
|
let ca_issuer = Issuer::from_params(&ca_params, &ca_key_pair);
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Generate client certificate for botserver (for mTLS to all services)
|
|
|
|
|
let botserver_dir = cert_dir.join("botserver");
|
|
|
|
|
fs::create_dir_all(&botserver_dir)?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
let client_cert_path = botserver_dir.join("client.crt");
|
|
|
|
|
let client_key_path = botserver_dir.join("client.key");
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
if !client_cert_path.exists() || !client_key_path.exists() {
|
|
|
|
|
info!("Generating mTLS client certificate for botserver");
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
let mut client_params = CertificateParams::default();
|
|
|
|
|
client_params.not_before = time::OffsetDateTime::now_utc();
|
|
|
|
|
client_params.not_after = time::OffsetDateTime::now_utc() + time::Duration::days(365);
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
let mut client_dn = DistinguishedName::new();
|
|
|
|
|
client_dn.push(DnType::CountryName, "BR");
|
|
|
|
|
client_dn.push(DnType::OrganizationName, "BotServer");
|
|
|
|
|
client_dn.push(DnType::CommonName, "botserver-client");
|
|
|
|
|
client_params.distinguished_name = client_dn;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Add client auth extended key usage
|
2025-12-08 00:19:29 -03:00
|
|
|
client_params
|
|
|
|
|
.subject_alt_names
|
|
|
|
|
.push(rcgen::SanType::DnsName("botserver".to_string().try_into()?));
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
let client_key = KeyPair::generate()?;
|
|
|
|
|
let client_cert = client_params.signed_by(&client_key, &ca_issuer)?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
fs::write(&client_cert_path, client_cert.pem())?;
|
|
|
|
|
fs::write(&client_key_path, client_key.serialize_pem())?;
|
|
|
|
|
fs::copy(&ca_cert_path, botserver_dir.join("ca.crt"))?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
info!(
|
|
|
|
|
"Generated mTLS client certificate at {}",
|
|
|
|
|
client_cert_path.display()
|
|
|
|
|
);
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Services that need certificates - Vault FIRST
|
2025-12-07 10:42:02 -03:00
|
|
|
// Using component names: tables (postgres), drive (minio), cache (redis), vectordb (qdrant)
|
2025-11-29 16:29:28 -03:00
|
|
|
let services = vec![
|
2025-12-08 00:19:29 -03:00
|
|
|
(
|
|
|
|
|
"vault",
|
|
|
|
|
vec!["localhost", "127.0.0.1", "vault.botserver.local"],
|
|
|
|
|
),
|
2025-11-29 16:29:28 -03:00
|
|
|
("api", vec!["localhost", "127.0.0.1", "api.botserver.local"]),
|
|
|
|
|
("llm", vec!["localhost", "127.0.0.1", "llm.botserver.local"]),
|
|
|
|
|
(
|
|
|
|
|
"embedding",
|
|
|
|
|
vec!["localhost", "127.0.0.1", "embedding.botserver.local"],
|
|
|
|
|
),
|
|
|
|
|
(
|
2025-12-07 10:42:02 -03:00
|
|
|
"vectordb",
|
|
|
|
|
vec!["localhost", "127.0.0.1", "vectordb.botserver.local"],
|
2025-11-29 16:29:28 -03:00
|
|
|
),
|
|
|
|
|
(
|
2025-12-07 10:42:02 -03:00
|
|
|
"tables",
|
|
|
|
|
vec!["localhost", "127.0.0.1", "tables.botserver.local"],
|
2025-11-29 16:29:28 -03:00
|
|
|
),
|
|
|
|
|
(
|
2025-12-07 10:42:02 -03:00
|
|
|
"cache",
|
|
|
|
|
vec!["localhost", "127.0.0.1", "cache.botserver.local"],
|
2025-11-29 16:29:28 -03:00
|
|
|
),
|
|
|
|
|
(
|
2025-12-07 10:42:02 -03:00
|
|
|
"drive",
|
|
|
|
|
vec!["localhost", "127.0.0.1", "drive.botserver.local"],
|
2025-11-29 16:29:28 -03:00
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
"directory",
|
|
|
|
|
vec![
|
|
|
|
|
"localhost",
|
|
|
|
|
"127.0.0.1",
|
|
|
|
|
"directory.botserver.local",
|
|
|
|
|
"auth.botserver.local",
|
|
|
|
|
],
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
"email",
|
|
|
|
|
vec![
|
|
|
|
|
"localhost",
|
|
|
|
|
"127.0.0.1",
|
2025-12-07 10:42:02 -03:00
|
|
|
"email.botserver.local",
|
2025-11-29 16:29:28 -03:00
|
|
|
"smtp.botserver.local",
|
|
|
|
|
"imap.botserver.local",
|
|
|
|
|
],
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
"meet",
|
|
|
|
|
vec![
|
|
|
|
|
"localhost",
|
|
|
|
|
"127.0.0.1",
|
|
|
|
|
"meet.botserver.local",
|
|
|
|
|
"turn.botserver.local",
|
|
|
|
|
],
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
"caddy",
|
|
|
|
|
vec![
|
|
|
|
|
"localhost",
|
|
|
|
|
"127.0.0.1",
|
|
|
|
|
"*.botserver.local",
|
|
|
|
|
"botserver.local",
|
|
|
|
|
],
|
|
|
|
|
),
|
|
|
|
|
];
|
|
|
|
|
|
|
|
|
|
for (service, sans) in services {
|
|
|
|
|
let service_dir = cert_dir.join(service);
|
|
|
|
|
fs::create_dir_all(&service_dir)?;
|
|
|
|
|
|
|
|
|
|
let cert_path = service_dir.join("server.crt");
|
|
|
|
|
let key_path = service_dir.join("server.key");
|
|
|
|
|
|
|
|
|
|
// Skip if certificate already exists
|
|
|
|
|
if cert_path.exists() && key_path.exists() {
|
|
|
|
|
trace!("Certificate for {} already exists", service);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info!("Generating certificate for {}", service);
|
|
|
|
|
|
|
|
|
|
// Generate service certificate
|
|
|
|
|
let mut params = CertificateParams::default();
|
|
|
|
|
params.not_before = time::OffsetDateTime::now_utc();
|
|
|
|
|
params.not_after = time::OffsetDateTime::now_utc() + time::Duration::days(365);
|
|
|
|
|
|
|
|
|
|
let mut dn = DistinguishedName::new();
|
|
|
|
|
dn.push(DnType::CountryName, "BR");
|
|
|
|
|
dn.push(DnType::OrganizationName, "BotServer");
|
|
|
|
|
dn.push(DnType::CommonName, &format!("{}.botserver.local", service));
|
|
|
|
|
params.distinguished_name = dn;
|
|
|
|
|
|
|
|
|
|
// Add SANs
|
|
|
|
|
for san in sans {
|
|
|
|
|
params
|
|
|
|
|
.subject_alt_names
|
2025-12-03 16:05:30 -03:00
|
|
|
.push(rcgen::SanType::DnsName(san.to_string().try_into()?));
|
2025-11-29 16:29:28 -03:00
|
|
|
}
|
|
|
|
|
|
2025-12-03 16:05:30 -03:00
|
|
|
let key_pair = KeyPair::generate()?;
|
|
|
|
|
let cert = params.signed_by(&key_pair, &ca_issuer)?;
|
2025-11-29 16:29:28 -03:00
|
|
|
|
|
|
|
|
// Save certificate and key
|
2025-12-03 16:05:30 -03:00
|
|
|
fs::write(cert_path, cert.pem())?;
|
|
|
|
|
fs::write(key_path, key_pair.serialize_pem())?;
|
2025-11-29 16:29:28 -03:00
|
|
|
|
|
|
|
|
// Copy CA cert to service directory for easy access
|
|
|
|
|
fs::copy(&ca_cert_path, service_dir.join("ca.crt"))?;
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-29 17:27:13 -03:00
|
|
|
info!("TLS certificates generated successfully");
|
2025-11-29 16:29:28 -03:00
|
|
|
Ok(())
|
|
|
|
|
}
|
2025-11-22 22:55:35 -03:00
|
|
|
}
|