2025-11-22 22:55:35 -03:00
|
|
|
use crate::config::AppConfig;
|
|
|
|
|
use crate::package_manager::setup::{DirectorySetup, EmailSetup};
|
|
|
|
|
use crate::package_manager::{InstallMode, PackageManager};
|
2025-12-08 00:19:29 -03:00
|
|
|
use crate::shared::utils::{establish_pg_connection, init_secrets_manager};
|
2025-11-22 22:55:35 -03:00
|
|
|
use anyhow::Result;
|
|
|
|
|
use aws_config::BehaviorVersion;
|
|
|
|
|
use aws_sdk_s3::Client;
|
2025-12-09 07:49:01 -03:00
|
|
|
use diesel::{Connection, RunQueryDsl};
|
2025-12-12 12:33:17 -03:00
|
|
|
use log::{debug, error, info, warn};
|
2025-11-22 22:55:35 -03:00
|
|
|
use rand::distr::Alphanumeric;
|
2025-12-03 16:05:30 -03:00
|
|
|
use rcgen::{
|
|
|
|
|
BasicConstraints, CertificateParams, DistinguishedName, DnType, IsCa, Issuer, KeyPair,
|
|
|
|
|
};
|
2025-11-29 16:29:28 -03:00
|
|
|
use std::fs;
|
2025-12-07 02:13:28 -03:00
|
|
|
#[cfg(unix)]
|
|
|
|
|
use std::os::unix::fs::PermissionsExt;
|
2025-11-22 22:55:35 -03:00
|
|
|
use std::path::{Path, PathBuf};
|
|
|
|
|
use std::process::Command;
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
|
pub struct ComponentInfo {
|
|
|
|
|
pub name: &'static str,
|
|
|
|
|
}
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
|
pub struct BootstrapManager {
|
|
|
|
|
pub install_mode: InstallMode,
|
|
|
|
|
pub tenant: Option<String>,
|
2025-12-14 15:58:54 -03:00
|
|
|
pub stack_path: PathBuf,
|
2025-11-22 22:55:35 -03:00
|
|
|
}
|
|
|
|
|
impl BootstrapManager {
|
2025-11-28 13:50:28 -03:00
|
|
|
pub async fn new(mode: InstallMode, tenant: Option<String>) -> Self {
|
2025-12-14 15:58:54 -03:00
|
|
|
// Get stack path from env var or use default
|
|
|
|
|
let stack_path = std::env::var("BOTSERVER_STACK_PATH")
|
|
|
|
|
.map(PathBuf::from)
|
|
|
|
|
.unwrap_or_else(|_| PathBuf::from("./botserver-stack"));
|
|
|
|
|
|
2025-11-22 22:55:35 -03:00
|
|
|
Self {
|
2025-11-28 13:50:28 -03:00
|
|
|
install_mode: mode,
|
2025-11-22 22:55:35 -03:00
|
|
|
tenant,
|
2025-12-14 15:58:54 -03:00
|
|
|
stack_path,
|
2025-11-22 22:55:35 -03:00
|
|
|
}
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-14 15:58:54 -03:00
|
|
|
/// Get a path relative to the stack directory
|
|
|
|
|
fn stack_dir(&self, subpath: &str) -> PathBuf {
|
|
|
|
|
self.stack_path.join(subpath)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Get the vault binary path as a string for shell commands
|
|
|
|
|
fn vault_bin(&self) -> String {
|
|
|
|
|
self.stack_dir("bin/vault/vault")
|
|
|
|
|
.to_str()
|
|
|
|
|
.unwrap_or("./botserver-stack/bin/vault/vault")
|
|
|
|
|
.to_string()
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
/// Kill all processes running from the botserver-stack directory
|
|
|
|
|
/// This ensures a clean startup when bootstrapping fresh
|
|
|
|
|
pub fn kill_stack_processes() {
|
|
|
|
|
info!("Killing any existing stack processes...");
|
|
|
|
|
|
|
|
|
|
// Kill processes by pattern matching on botserver-stack path
|
|
|
|
|
let patterns = vec![
|
|
|
|
|
"botserver-stack/bin/vault",
|
|
|
|
|
"botserver-stack/bin/tables",
|
|
|
|
|
"botserver-stack/bin/drive",
|
|
|
|
|
"botserver-stack/bin/cache",
|
|
|
|
|
"botserver-stack/bin/directory",
|
|
|
|
|
"botserver-stack/bin/llm",
|
|
|
|
|
"botserver-stack/bin/email",
|
|
|
|
|
"botserver-stack/bin/proxy",
|
|
|
|
|
"botserver-stack/bin/dns",
|
|
|
|
|
"botserver-stack/bin/meeting",
|
|
|
|
|
"botserver-stack/bin/vector_db",
|
|
|
|
|
];
|
|
|
|
|
|
|
|
|
|
for pattern in patterns {
|
|
|
|
|
let _ = Command::new("pkill").args(["-9", "-f", pattern]).output();
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-10 08:54:51 -03:00
|
|
|
// Also kill by specific process names (use -f for pattern match, not -x for exact)
|
2025-12-08 00:19:29 -03:00
|
|
|
let process_names = vec![
|
2025-12-10 08:54:51 -03:00
|
|
|
"vault server",
|
2025-12-08 00:19:29 -03:00
|
|
|
"postgres",
|
|
|
|
|
"minio",
|
|
|
|
|
"redis-server",
|
|
|
|
|
"zitadel",
|
2025-12-10 08:54:51 -03:00
|
|
|
"llama-server",
|
2025-12-08 00:19:29 -03:00
|
|
|
"stalwart",
|
|
|
|
|
"caddy",
|
|
|
|
|
"coredns",
|
|
|
|
|
"livekit",
|
|
|
|
|
"qdrant",
|
|
|
|
|
];
|
|
|
|
|
|
|
|
|
|
for name in process_names {
|
2025-12-10 08:54:51 -03:00
|
|
|
let _ = Command::new("pkill").args(["-9", "-f", name]).output();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Kill processes by port - this catches any process using our ports
|
|
|
|
|
// even if started from a different path
|
|
|
|
|
let ports = vec![
|
|
|
|
|
8200, // Vault
|
|
|
|
|
5432, // PostgreSQL
|
|
|
|
|
9000, // MinIO
|
|
|
|
|
6379, // Redis
|
2025-12-11 08:43:28 -03:00
|
|
|
8300, // Zitadel / Main API
|
2025-12-10 08:54:51 -03:00
|
|
|
8081, // LLM server
|
|
|
|
|
8082, // Embedding server
|
|
|
|
|
25, // Email SMTP
|
|
|
|
|
443, // HTTPS proxy
|
|
|
|
|
53, // DNS
|
|
|
|
|
];
|
|
|
|
|
|
|
|
|
|
for port in ports {
|
|
|
|
|
// Use fuser to kill processes on specific ports
|
|
|
|
|
let _ = Command::new("fuser")
|
|
|
|
|
.args(["-k", "-9", &format!("{}/tcp", port)])
|
|
|
|
|
.output();
|
2025-12-08 00:19:29 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Give processes time to die
|
2025-12-10 08:54:51 -03:00
|
|
|
std::thread::sleep(std::time::Duration::from_millis(1000));
|
2025-12-08 00:19:29 -03:00
|
|
|
info!("Stack processes terminated");
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-09 09:04:56 -03:00
|
|
|
/// Check if another botserver process is already running on this stack
|
|
|
|
|
pub fn check_single_instance() -> Result<bool> {
|
2025-12-14 15:58:54 -03:00
|
|
|
let stack_path = std::env::var("BOTSERVER_STACK_PATH")
|
|
|
|
|
.unwrap_or_else(|_| "./botserver-stack".to_string());
|
|
|
|
|
let lock_file = PathBuf::from(&stack_path).join(".lock");
|
2025-12-09 09:04:56 -03:00
|
|
|
if lock_file.exists() {
|
|
|
|
|
// Check if the PID in the lock file is still running
|
|
|
|
|
if let Ok(pid_str) = fs::read_to_string(&lock_file) {
|
|
|
|
|
if let Ok(pid) = pid_str.trim().parse::<i32>() {
|
|
|
|
|
let check = Command::new("kill").args(["-0", &pid.to_string()]).output();
|
|
|
|
|
if let Ok(output) = check {
|
|
|
|
|
if output.status.success() {
|
|
|
|
|
warn!("Another botserver process (PID {}) is already running on this stack", pid);
|
|
|
|
|
return Ok(false);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// Write our PID to the lock file
|
|
|
|
|
let pid = std::process::id();
|
|
|
|
|
if let Some(parent) = lock_file.parent() {
|
|
|
|
|
fs::create_dir_all(parent).ok();
|
|
|
|
|
}
|
|
|
|
|
fs::write(&lock_file, pid.to_string()).ok();
|
|
|
|
|
Ok(true)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Release the instance lock on shutdown
|
|
|
|
|
pub fn release_instance_lock() {
|
2025-12-14 15:58:54 -03:00
|
|
|
let stack_path = std::env::var("BOTSERVER_STACK_PATH")
|
|
|
|
|
.unwrap_or_else(|_| "./botserver-stack".to_string());
|
|
|
|
|
let lock_file = PathBuf::from(&stack_path).join(".lock");
|
2025-12-09 09:04:56 -03:00
|
|
|
if lock_file.exists() {
|
|
|
|
|
fs::remove_file(&lock_file).ok();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-12 12:33:17 -03:00
|
|
|
/// Check if botserver-stack has installed components (indicating a working installation)
|
|
|
|
|
/// This is used to prevent accidental re-initialization of existing installations
|
|
|
|
|
fn has_installed_stack() -> bool {
|
2025-12-14 15:58:54 -03:00
|
|
|
let stack_path = std::env::var("BOTSERVER_STACK_PATH")
|
|
|
|
|
.unwrap_or_else(|_| "./botserver-stack".to_string());
|
|
|
|
|
let stack_dir = PathBuf::from(&stack_path);
|
2025-12-12 12:33:17 -03:00
|
|
|
if !stack_dir.exists() {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
2025-12-14 15:58:54 -03:00
|
|
|
|
2025-12-12 12:33:17 -03:00
|
|
|
// Check for key indicators of an installed stack
|
|
|
|
|
let indicators = vec![
|
2025-12-14 15:58:54 -03:00
|
|
|
stack_dir.join("bin/vault/vault"),
|
|
|
|
|
stack_dir.join("data/vault"),
|
|
|
|
|
stack_dir.join("conf/vault/config.hcl"),
|
2025-12-12 12:33:17 -03:00
|
|
|
];
|
2025-12-14 15:58:54 -03:00
|
|
|
|
|
|
|
|
indicators.iter().any(|path| path.exists())
|
2025-12-12 12:33:17 -03:00
|
|
|
}
|
|
|
|
|
|
2025-12-09 09:04:56 -03:00
|
|
|
/// Reset only Vault credentials (when re-initialization is needed)
|
2025-12-12 12:33:17 -03:00
|
|
|
/// CRITICAL: This should NEVER be called if botserver-stack exists with installed components!
|
2025-12-09 09:04:56 -03:00
|
|
|
/// NEVER deletes user data in botserver-stack
|
|
|
|
|
fn reset_vault_only() -> Result<()> {
|
2025-12-12 12:33:17 -03:00
|
|
|
// SAFETY CHECK: NEVER reset if stack is installed
|
|
|
|
|
if Self::has_installed_stack() {
|
|
|
|
|
error!("REFUSING to reset Vault credentials - botserver-stack is installed!");
|
|
|
|
|
error!("If you need to re-initialize, manually delete botserver-stack directory first");
|
|
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"Cannot reset Vault - existing installation detected. Manual intervention required."
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-14 15:58:54 -03:00
|
|
|
let stack_path = std::env::var("BOTSERVER_STACK_PATH")
|
|
|
|
|
.unwrap_or_else(|_| "./botserver-stack".to_string());
|
|
|
|
|
let vault_init = PathBuf::from(&stack_path).join("conf/vault/init.json");
|
2025-12-08 00:19:29 -03:00
|
|
|
let env_file = PathBuf::from("./.env");
|
|
|
|
|
|
2025-12-09 09:04:56 -03:00
|
|
|
// Only remove vault init.json and .env - NEVER touch data/
|
|
|
|
|
if vault_init.exists() {
|
|
|
|
|
info!("Removing vault init.json for re-initialization...");
|
|
|
|
|
fs::remove_file(&vault_init)?;
|
2025-12-08 00:19:29 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if env_file.exists() {
|
2025-12-09 09:04:56 -03:00
|
|
|
info!("Removing .env file for re-initialization...");
|
2025-12-08 00:19:29 -03:00
|
|
|
fs::remove_file(&env_file)?;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
pub async fn start_all(&mut self) -> Result<()> {
|
2025-11-22 22:55:35 -03:00
|
|
|
let pm = PackageManager::new(self.install_mode.clone(), self.tenant.clone())?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
// VAULT MUST START FIRST - all other services depend on it for secrets
|
|
|
|
|
if pm.is_installed("vault") {
|
2025-12-08 23:35:33 -03:00
|
|
|
// Check if Vault is already running before trying to start
|
|
|
|
|
let vault_already_running = Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg("curl -f -s http://localhost:8200/v1/sys/health?standbyok=true&uninitcode=200&sealedcode=200 >/dev/null 2>&1")
|
|
|
|
|
.stdout(std::process::Stdio::null())
|
|
|
|
|
.stderr(std::process::Stdio::null())
|
|
|
|
|
.status()
|
|
|
|
|
.map(|s| s.success())
|
|
|
|
|
.unwrap_or(false);
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-08 23:35:33 -03:00
|
|
|
if vault_already_running {
|
|
|
|
|
info!("Vault is already running");
|
|
|
|
|
} else {
|
|
|
|
|
info!("Starting Vault secrets service...");
|
|
|
|
|
match pm.start("vault") {
|
|
|
|
|
Ok(_child) => {
|
|
|
|
|
info!("Vault process started, waiting for initialization...");
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("Vault might already be running: {}", e);
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
}
|
2025-12-08 23:35:33 -03:00
|
|
|
|
|
|
|
|
// Wait for Vault to be ready (up to 10 seconds)
|
|
|
|
|
for i in 0..10 {
|
|
|
|
|
let vault_ready = Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg("curl -f -s http://localhost:8200/v1/sys/health?standbyok=true&uninitcode=200&sealedcode=200 >/dev/null 2>&1")
|
|
|
|
|
.stdout(std::process::Stdio::null())
|
|
|
|
|
.stderr(std::process::Stdio::null())
|
|
|
|
|
.status()
|
|
|
|
|
.map(|s| s.success())
|
|
|
|
|
.unwrap_or(false);
|
|
|
|
|
|
|
|
|
|
if vault_ready {
|
|
|
|
|
info!("Vault is responding");
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
if i < 9 {
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-12 12:33:17 -03:00
|
|
|
// Try to unseal Vault - if this fails, we need to handle carefully
|
2025-12-08 00:19:29 -03:00
|
|
|
if let Err(e) = self.ensure_vault_unsealed().await {
|
2025-12-12 12:33:17 -03:00
|
|
|
warn!("Vault unseal failed: {}", e);
|
|
|
|
|
|
|
|
|
|
// CRITICAL: If stack is installed, NEVER try to re-initialize
|
|
|
|
|
// Just try restarting Vault a few more times
|
|
|
|
|
if Self::has_installed_stack() {
|
|
|
|
|
error!("Vault failed to unseal but stack is installed - NOT re-initializing");
|
|
|
|
|
error!("Try manually restarting Vault or check ./botserver-stack/logs/vault/vault.log");
|
2025-12-14 15:58:54 -03:00
|
|
|
|
2025-12-12 12:33:17 -03:00
|
|
|
// Kill only Vault process and try to restart
|
|
|
|
|
let _ = Command::new("pkill")
|
|
|
|
|
.args(["-9", "-f", "botserver-stack/bin/vault"])
|
|
|
|
|
.output();
|
2025-12-14 15:58:54 -03:00
|
|
|
|
2025-12-12 12:33:17 -03:00
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
|
2025-12-14 15:58:54 -03:00
|
|
|
|
2025-12-12 12:33:17 -03:00
|
|
|
// Try to restart Vault
|
|
|
|
|
if let Err(e) = pm.start("vault") {
|
|
|
|
|
warn!("Failed to restart Vault: {}", e);
|
|
|
|
|
}
|
2025-12-14 15:58:54 -03:00
|
|
|
|
2025-12-12 12:33:17 -03:00
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
|
2025-12-14 15:58:54 -03:00
|
|
|
|
2025-12-12 12:33:17 -03:00
|
|
|
// Final attempt to unseal
|
|
|
|
|
if let Err(e) = self.ensure_vault_unsealed().await {
|
|
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"Vault failed to start/unseal after restart: {}. Manual intervention required.", e
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
// No installed stack, safe to re-initialize
|
|
|
|
|
warn!("No installed stack detected - proceeding with re-initialization");
|
2025-12-09 09:04:56 -03:00
|
|
|
|
2025-12-12 12:33:17 -03:00
|
|
|
// Kill only Vault process, reset only Vault credentials
|
|
|
|
|
let _ = Command::new("pkill")
|
|
|
|
|
.args(["-9", "-f", "botserver-stack/bin/vault"])
|
|
|
|
|
.output();
|
2025-12-09 08:56:58 -03:00
|
|
|
|
2025-12-12 12:33:17 -03:00
|
|
|
if let Err(e) = Self::reset_vault_only() {
|
|
|
|
|
error!("Failed to reset Vault: {}", e);
|
|
|
|
|
return Err(e);
|
|
|
|
|
}
|
2025-12-09 08:56:58 -03:00
|
|
|
|
2025-12-12 12:33:17 -03:00
|
|
|
// Run bootstrap to re-initialize Vault
|
|
|
|
|
self.bootstrap().await?;
|
2025-12-09 08:56:58 -03:00
|
|
|
|
2025-12-12 12:33:17 -03:00
|
|
|
// After bootstrap, services are already running
|
|
|
|
|
info!("Vault re-initialization complete");
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
}
|
2025-12-09 07:58:39 -03:00
|
|
|
|
|
|
|
|
// Initialize SecretsManager so other code can use Vault
|
|
|
|
|
info!("Initializing SecretsManager...");
|
|
|
|
|
match init_secrets_manager().await {
|
|
|
|
|
Ok(_) => info!("SecretsManager initialized successfully"),
|
|
|
|
|
Err(e) => {
|
2025-12-09 08:56:58 -03:00
|
|
|
error!("Failed to initialize SecretsManager: {}", e);
|
|
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"SecretsManager initialization failed: {}",
|
|
|
|
|
e
|
|
|
|
|
));
|
2025-12-09 07:58:39 -03:00
|
|
|
}
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Start tables (PostgreSQL) - needed for database operations
|
|
|
|
|
if pm.is_installed("tables") {
|
|
|
|
|
info!("Starting PostgreSQL database...");
|
|
|
|
|
match pm.start("tables") {
|
|
|
|
|
Ok(_child) => {
|
|
|
|
|
// Give PostgreSQL time to initialize
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
|
|
|
|
|
info!("PostgreSQL started");
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("PostgreSQL might already be running: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Start other components (order matters less for these)
|
|
|
|
|
let other_components = vec![
|
2025-11-22 22:55:35 -03:00
|
|
|
ComponentInfo { name: "cache" },
|
|
|
|
|
ComponentInfo { name: "drive" },
|
|
|
|
|
ComponentInfo { name: "llm" },
|
|
|
|
|
ComponentInfo { name: "email" },
|
|
|
|
|
ComponentInfo { name: "proxy" },
|
|
|
|
|
ComponentInfo { name: "directory" },
|
|
|
|
|
ComponentInfo { name: "alm" },
|
|
|
|
|
ComponentInfo { name: "alm_ci" },
|
|
|
|
|
ComponentInfo { name: "dns" },
|
|
|
|
|
ComponentInfo { name: "meeting" },
|
2025-12-02 21:09:43 -03:00
|
|
|
ComponentInfo {
|
|
|
|
|
name: "remote_terminal",
|
|
|
|
|
},
|
2025-11-22 22:55:35 -03:00
|
|
|
ComponentInfo { name: "vector_db" },
|
|
|
|
|
ComponentInfo { name: "host" },
|
|
|
|
|
];
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
for component in other_components {
|
2025-11-22 22:55:35 -03:00
|
|
|
if pm.is_installed(component.name) {
|
2025-11-28 13:50:28 -03:00
|
|
|
match pm.start(component.name) {
|
|
|
|
|
Ok(_child) => {
|
2025-12-12 12:33:17 -03:00
|
|
|
info!("Started component: {}", component.name);
|
2025-11-28 13:50:28 -03:00
|
|
|
}
|
|
|
|
|
Err(e) => {
|
2025-12-12 12:33:17 -03:00
|
|
|
debug!(
|
2025-11-28 13:50:28 -03:00
|
|
|
"Component {} might already be running: {}",
|
2025-12-14 15:58:54 -03:00
|
|
|
component.name, e
|
2025-11-28 13:50:28 -03:00
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-11-22 22:55:35 -03:00
|
|
|
}
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-11-22 22:55:35 -03:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn generate_secure_password(&self, length: usize) -> String {
|
|
|
|
|
let mut rng = rand::rng();
|
2025-12-08 00:19:29 -03:00
|
|
|
let base: String = (0..length.saturating_sub(4))
|
2025-11-22 22:55:35 -03:00
|
|
|
.map(|_| {
|
|
|
|
|
let byte = rand::Rng::sample(&mut rng, Alphanumeric);
|
|
|
|
|
char::from(byte)
|
|
|
|
|
})
|
2025-12-08 00:19:29 -03:00
|
|
|
.collect();
|
|
|
|
|
// Add required symbols/complexity for Zitadel password policy
|
|
|
|
|
// Use ! instead of @ to avoid breaking database connection strings
|
|
|
|
|
format!("{}!1Aa", base)
|
2025-11-22 22:55:35 -03:00
|
|
|
}
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
/// Ensure critical services are running - Vault MUST be first
|
|
|
|
|
/// Order: vault -> tables -> drive
|
2025-12-08 00:19:29 -03:00
|
|
|
/// If fresh_start is true, kills existing processes first
|
2025-11-28 13:50:28 -03:00
|
|
|
pub async fn ensure_services_running(&mut self) -> Result<()> {
|
|
|
|
|
info!("Ensuring critical services are running...");
|
|
|
|
|
|
|
|
|
|
let installer = PackageManager::new(self.install_mode.clone(), self.tenant.clone())?;
|
|
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
// Check if we need to bootstrap first
|
|
|
|
|
let vault_installed = installer.is_installed("vault");
|
2025-12-14 15:58:54 -03:00
|
|
|
let vault_initialized = self.stack_dir("conf/vault/init.json").exists();
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
if !vault_installed || !vault_initialized {
|
|
|
|
|
info!("Stack not fully bootstrapped, running bootstrap first...");
|
|
|
|
|
// Kill any leftover processes
|
|
|
|
|
Self::kill_stack_processes();
|
|
|
|
|
|
|
|
|
|
// Run bootstrap - this will start all services
|
|
|
|
|
self.bootstrap().await?;
|
|
|
|
|
|
|
|
|
|
// After bootstrap, services are already running, just ensure Vault is unsealed and env vars set
|
|
|
|
|
info!("Bootstrap complete, verifying Vault is ready...");
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
|
|
|
|
|
|
|
|
|
|
if let Err(e) = self.ensure_vault_unsealed().await {
|
|
|
|
|
warn!("Failed to unseal Vault after bootstrap: {}", e);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Services were started by bootstrap, no need to restart them
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If we get here, bootstrap was already done previously - just start services
|
2025-12-07 02:13:28 -03:00
|
|
|
// VAULT MUST BE FIRST - it provides all secrets
|
|
|
|
|
if installer.is_installed("vault") {
|
2025-12-08 00:19:29 -03:00
|
|
|
// Check if Vault is already running
|
|
|
|
|
let vault_running = Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg("curl -f -s http://localhost:8200/v1/sys/health?standbyok=true&uninitcode=200&sealedcode=200 >/dev/null 2>&1")
|
2025-12-08 23:35:33 -03:00
|
|
|
.stdout(std::process::Stdio::null())
|
|
|
|
|
.stderr(std::process::Stdio::null())
|
2025-12-08 00:19:29 -03:00
|
|
|
.status()
|
|
|
|
|
.map(|s| s.success())
|
|
|
|
|
.unwrap_or(false);
|
|
|
|
|
|
|
|
|
|
if !vault_running {
|
|
|
|
|
info!("Starting Vault secrets service...");
|
|
|
|
|
match installer.start("vault") {
|
|
|
|
|
Ok(_child) => {
|
|
|
|
|
info!("Vault started successfully");
|
|
|
|
|
// Give Vault time to initialize
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("Vault might already be running or failed to start: {}", e);
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
} else {
|
|
|
|
|
info!("Vault is already running");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Always try to unseal Vault (it may have restarted)
|
2025-12-10 18:41:45 -03:00
|
|
|
// If unseal fails, try to restart Vault process only - NEVER delete other services
|
2025-12-08 00:19:29 -03:00
|
|
|
if let Err(e) = self.ensure_vault_unsealed().await {
|
2025-12-10 18:41:45 -03:00
|
|
|
warn!("Vault unseal failed: {} - attempting Vault restart only", e);
|
2025-12-09 09:04:56 -03:00
|
|
|
|
2025-12-10 18:41:45 -03:00
|
|
|
// Kill ONLY Vault process - preserve all other services
|
2025-12-09 09:04:56 -03:00
|
|
|
let _ = Command::new("pkill")
|
|
|
|
|
.args(["-9", "-f", "botserver-stack/bin/vault"])
|
|
|
|
|
.output();
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-10 18:41:45 -03:00
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-10 18:41:45 -03:00
|
|
|
// Try to restart Vault without full bootstrap
|
|
|
|
|
let pm = PackageManager::new(self.install_mode.clone(), self.tenant.clone())?;
|
|
|
|
|
if let Err(e) = pm.start("vault") {
|
|
|
|
|
warn!("Failed to restart Vault: {}", e);
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-10 18:41:45 -03:00
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(3)).await;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-10 18:41:45 -03:00
|
|
|
// Try unseal again
|
2025-12-08 00:19:29 -03:00
|
|
|
if let Err(e) = self.ensure_vault_unsealed().await {
|
2025-12-10 18:41:45 -03:00
|
|
|
warn!("Vault still not responding after restart: {}", e);
|
|
|
|
|
|
2025-12-12 12:33:17 -03:00
|
|
|
// CRITICAL: If stack is installed, NEVER try to re-initialize
|
|
|
|
|
// This protects existing installations from being destroyed
|
|
|
|
|
if Self::has_installed_stack() {
|
|
|
|
|
error!("CRITICAL: Vault failed but botserver-stack is installed!");
|
|
|
|
|
error!("REFUSING to delete init.json or .env - this would destroy your installation");
|
|
|
|
|
error!("Please check ./botserver-stack/logs/vault/vault.log for errors");
|
|
|
|
|
error!("You may need to manually restart Vault or check its configuration");
|
|
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"Vault failed to start. Manual intervention required. Check logs at ./botserver-stack/logs/vault/vault.log"
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Only reset if NO installed stack (fresh/broken install)
|
|
|
|
|
warn!("No installed stack detected - attempting Vault re-initialization");
|
|
|
|
|
if let Err(reset_err) = Self::reset_vault_only() {
|
|
|
|
|
error!("Failed to reset Vault: {}", reset_err);
|
|
|
|
|
return Err(reset_err);
|
|
|
|
|
}
|
2025-12-10 18:41:45 -03:00
|
|
|
|
|
|
|
|
// Install/configure ONLY Vault - NOT full bootstrap
|
|
|
|
|
info!("Re-initializing Vault only (preserving other services)...");
|
|
|
|
|
if let Err(e) = pm.install("vault").await {
|
|
|
|
|
return Err(anyhow::anyhow!("Failed to re-initialize Vault: {}", e));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(3)).await;
|
|
|
|
|
|
|
|
|
|
if let Err(e) = self.ensure_vault_unsealed().await {
|
|
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"Failed to configure Vault after re-initialization: {}",
|
|
|
|
|
e
|
|
|
|
|
));
|
|
|
|
|
}
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-12 12:33:17 -03:00
|
|
|
info!("Vault recovery complete");
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
2025-12-09 07:58:39 -03:00
|
|
|
|
|
|
|
|
// Initialize SecretsManager so other code can use Vault
|
|
|
|
|
info!("Initializing SecretsManager...");
|
|
|
|
|
match init_secrets_manager().await {
|
|
|
|
|
Ok(_) => info!("SecretsManager initialized successfully"),
|
|
|
|
|
Err(e) => {
|
|
|
|
|
error!("Failed to initialize SecretsManager: {}", e);
|
|
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"SecretsManager initialization failed: {}",
|
|
|
|
|
e
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-12-07 02:13:28 -03:00
|
|
|
} else {
|
|
|
|
|
// Vault not installed - cannot proceed, need to run bootstrap
|
|
|
|
|
warn!("Vault (secrets) component not installed - run bootstrap first");
|
2025-12-08 00:19:29 -03:00
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"Vault not installed. Run bootstrap command first."
|
|
|
|
|
));
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check and start PostgreSQL (after Vault is running)
|
2025-11-28 13:50:28 -03:00
|
|
|
if installer.is_installed("tables") {
|
|
|
|
|
info!("Starting PostgreSQL database service...");
|
|
|
|
|
match installer.start("tables") {
|
|
|
|
|
Ok(_child) => {
|
|
|
|
|
info!("PostgreSQL started successfully");
|
|
|
|
|
// Give PostgreSQL time to initialize
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(3)).await;
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!(
|
|
|
|
|
"PostgreSQL might already be running or failed to start: {}",
|
|
|
|
|
e
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
warn!("PostgreSQL (tables) component not installed");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check and start MinIO
|
|
|
|
|
if installer.is_installed("drive") {
|
|
|
|
|
info!("Starting MinIO drive service...");
|
|
|
|
|
match installer.start("drive") {
|
|
|
|
|
Ok(_child) => {
|
|
|
|
|
info!("MinIO started successfully");
|
|
|
|
|
// Give MinIO time to initialize
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("MinIO might already be running or failed to start: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
warn!("MinIO (drive) component not installed");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
/// Ensure Vault is unsealed (required after restart)
|
2025-12-08 00:19:29 -03:00
|
|
|
/// Returns Ok(()) if Vault is ready, Err if it needs re-initialization
|
2025-12-07 02:13:28 -03:00
|
|
|
async fn ensure_vault_unsealed(&self) -> Result<()> {
|
2025-12-14 15:58:54 -03:00
|
|
|
let vault_init_path = self.stack_dir("conf/vault/init.json");
|
2025-12-08 00:19:29 -03:00
|
|
|
let vault_addr = "http://localhost:8200";
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
if !vault_init_path.exists() {
|
2025-12-08 00:19:29 -03:00
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"Vault init.json not found - needs re-initialization"
|
|
|
|
|
));
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Read unseal key from init.json
|
|
|
|
|
let init_json = fs::read_to_string(&vault_init_path)?;
|
|
|
|
|
let init_data: serde_json::Value = serde_json::from_str(&init_json)?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
let unseal_key = init_data["unseal_keys_b64"]
|
|
|
|
|
.as_array()
|
|
|
|
|
.and_then(|arr| arr.first())
|
|
|
|
|
.and_then(|v| v.as_str())
|
|
|
|
|
.unwrap_or("")
|
|
|
|
|
.to_string();
|
|
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
let root_token = init_data["root_token"].as_str().unwrap_or("").to_string();
|
2025-12-07 02:13:28 -03:00
|
|
|
|
|
|
|
|
if unseal_key.is_empty() || root_token.is_empty() {
|
2025-12-08 00:19:29 -03:00
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"Invalid Vault init.json - needs re-initialization"
|
|
|
|
|
));
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
|
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
// First check if Vault is initialized (not just running)
|
2025-12-14 15:58:54 -03:00
|
|
|
let vault_bin = self.vault_bin();
|
2025-12-07 02:13:28 -03:00
|
|
|
let status_output = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-14 15:58:54 -03:00
|
|
|
"VAULT_ADDR={} {} status -format=json 2>/dev/null",
|
|
|
|
|
vault_addr, vault_bin
|
2025-12-07 02:13:28 -03:00
|
|
|
))
|
2025-12-08 23:35:33 -03:00
|
|
|
.stdout(std::process::Stdio::piped())
|
|
|
|
|
.stderr(std::process::Stdio::null())
|
2025-12-07 02:13:28 -03:00
|
|
|
.output()?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
let status_str = String::from_utf8_lossy(&status_output.stdout);
|
|
|
|
|
|
|
|
|
|
// Parse status - handle both success and error cases
|
|
|
|
|
if let Ok(status) = serde_json::from_str::<serde_json::Value>(&status_str) {
|
|
|
|
|
let initialized = status["initialized"].as_bool().unwrap_or(false);
|
|
|
|
|
let sealed = status["sealed"].as_bool().unwrap_or(true);
|
|
|
|
|
|
|
|
|
|
if !initialized {
|
|
|
|
|
// Vault is running but not initialized - this means data was deleted
|
|
|
|
|
// We need to re-run bootstrap
|
|
|
|
|
warn!("Vault is running but not initialized - data may have been deleted");
|
|
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"Vault not initialized - needs re-bootstrap"
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if sealed {
|
2025-12-07 02:13:28 -03:00
|
|
|
info!("Unsealing Vault...");
|
2025-12-08 00:19:29 -03:00
|
|
|
let unseal_output = std::process::Command::new("sh")
|
2025-12-07 02:13:28 -03:00
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-14 15:58:54 -03:00
|
|
|
"VAULT_ADDR={} {} operator unseal {} >/dev/null 2>&1",
|
|
|
|
|
vault_addr, vault_bin, unseal_key
|
2025-12-07 02:13:28 -03:00
|
|
|
))
|
2025-12-08 23:35:33 -03:00
|
|
|
.stdout(std::process::Stdio::null())
|
|
|
|
|
.stderr(std::process::Stdio::null())
|
2025-12-07 02:13:28 -03:00
|
|
|
.output()?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
if !unseal_output.status.success() {
|
|
|
|
|
let stderr = String::from_utf8_lossy(&unseal_output.stderr);
|
|
|
|
|
warn!("Vault unseal may have failed: {}", stderr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Verify unseal succeeded
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
|
|
|
|
|
let verify_output = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-14 15:58:54 -03:00
|
|
|
"VAULT_ADDR={} {} status -format=json 2>/dev/null",
|
|
|
|
|
vault_addr, vault_bin
|
2025-12-08 00:19:29 -03:00
|
|
|
))
|
2025-12-08 23:35:33 -03:00
|
|
|
.stdout(std::process::Stdio::piped())
|
|
|
|
|
.stderr(std::process::Stdio::null())
|
2025-12-08 00:19:29 -03:00
|
|
|
.output()?;
|
|
|
|
|
|
|
|
|
|
let verify_str = String::from_utf8_lossy(&verify_output.stdout);
|
|
|
|
|
if let Ok(verify_status) = serde_json::from_str::<serde_json::Value>(&verify_str) {
|
|
|
|
|
if verify_status["sealed"].as_bool().unwrap_or(true) {
|
|
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"Failed to unseal Vault - may need re-initialization"
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
info!("Vault unsealed successfully");
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
} else {
|
|
|
|
|
// Could not parse status - Vault might not be responding properly
|
|
|
|
|
warn!("Could not get Vault status: {}", status_str);
|
|
|
|
|
return Err(anyhow::anyhow!("Vault not responding properly"));
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Set environment variables for other components
|
|
|
|
|
std::env::set_var("VAULT_ADDR", vault_addr);
|
|
|
|
|
std::env::set_var("VAULT_TOKEN", &root_token);
|
|
|
|
|
std::env::set_var("VAULT_SKIP_VERIFY", "true");
|
|
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
// Also set mTLS cert paths
|
|
|
|
|
std::env::set_var(
|
|
|
|
|
"VAULT_CACERT",
|
2025-12-14 15:58:54 -03:00
|
|
|
self.stack_dir("conf/system/certificates/ca/ca.crt")
|
|
|
|
|
.to_str()
|
|
|
|
|
.unwrap_or(""),
|
2025-12-08 00:19:29 -03:00
|
|
|
);
|
|
|
|
|
std::env::set_var(
|
|
|
|
|
"VAULT_CLIENT_CERT",
|
2025-12-14 15:58:54 -03:00
|
|
|
self.stack_dir("conf/system/certificates/botserver/client.crt")
|
|
|
|
|
.to_str()
|
|
|
|
|
.unwrap_or(""),
|
2025-12-08 00:19:29 -03:00
|
|
|
);
|
|
|
|
|
std::env::set_var(
|
|
|
|
|
"VAULT_CLIENT_KEY",
|
2025-12-14 15:58:54 -03:00
|
|
|
self.stack_dir("conf/system/certificates/botserver/client.key")
|
|
|
|
|
.to_str()
|
|
|
|
|
.unwrap_or(""),
|
2025-12-08 00:19:29 -03:00
|
|
|
);
|
|
|
|
|
|
|
|
|
|
info!("Vault environment configured");
|
2025-12-07 02:13:28 -03:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-22 22:55:35 -03:00
|
|
|
pub async fn bootstrap(&mut self) -> Result<()> {
|
2025-12-09 08:10:47 -03:00
|
|
|
info!("=== BOOTSTRAP STARTING ===");
|
|
|
|
|
|
2025-12-10 08:54:51 -03:00
|
|
|
// Kill any existing stack processes first - critical for dev machines
|
|
|
|
|
// where old processes may be running from a deleted/recreated stack
|
|
|
|
|
info!("Cleaning up any existing stack processes...");
|
|
|
|
|
Self::kill_stack_processes();
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Generate certificates first (including for Vault)
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Generating TLS certificates...");
|
2025-11-29 16:29:28 -03:00
|
|
|
if let Err(e) = self.generate_certificates().await {
|
|
|
|
|
error!("Failed to generate certificates: {}", e);
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Create Vault configuration with mTLS
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Creating Vault configuration...");
|
2025-12-07 02:13:28 -03:00
|
|
|
if let Err(e) = self.create_vault_config().await {
|
|
|
|
|
error!("Failed to create Vault config: {}", e);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Generate secure passwords for all services - these are ONLY used during bootstrap
|
|
|
|
|
// and immediately stored in Vault. NO LEGACY ENV VARS.
|
|
|
|
|
let db_password = self.generate_secure_password(24);
|
|
|
|
|
let drive_accesskey = self.generate_secure_password(20);
|
|
|
|
|
let drive_secret = self.generate_secure_password(40);
|
|
|
|
|
let cache_password = self.generate_secure_password(24);
|
2025-11-29 17:27:13 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Configuration is stored in Vault, not .env files
|
|
|
|
|
info!("Configuring services through Vault...");
|
2025-11-22 22:55:35 -03:00
|
|
|
|
|
|
|
|
let pm = PackageManager::new(self.install_mode.clone(), self.tenant.clone()).unwrap();
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Vault MUST be installed first - it stores all secrets
|
|
|
|
|
// Order: vault -> tables -> directory -> drive -> cache -> llm
|
2025-11-29 16:29:28 -03:00
|
|
|
let required_components = vec![
|
2025-12-07 02:13:28 -03:00
|
|
|
"vault", // Secrets management - MUST BE FIRST
|
|
|
|
|
"tables", // Database - required by Directory
|
|
|
|
|
"directory", // Identity service - manages users
|
|
|
|
|
"drive", // S3 storage - credentials in Vault
|
2025-11-29 16:29:28 -03:00
|
|
|
"cache", // Redis cache
|
|
|
|
|
"llm", // LLM service
|
|
|
|
|
];
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
// Special check: Vault needs setup even if binary exists but not initialized
|
2025-12-14 15:58:54 -03:00
|
|
|
let vault_needs_setup = !self.stack_dir("conf/vault/init.json").exists();
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-11-22 22:55:35 -03:00
|
|
|
for component in required_components {
|
2025-12-08 00:19:29 -03:00
|
|
|
// For vault, also check if it needs initialization
|
2025-12-09 08:10:47 -03:00
|
|
|
let is_installed = pm.is_installed(component);
|
2025-12-08 00:19:29 -03:00
|
|
|
let needs_install = if component == "vault" {
|
2025-12-09 08:10:47 -03:00
|
|
|
!is_installed || vault_needs_setup
|
2025-12-08 00:19:29 -03:00
|
|
|
} else {
|
2025-12-09 08:10:47 -03:00
|
|
|
!is_installed
|
2025-12-08 00:19:29 -03:00
|
|
|
};
|
|
|
|
|
|
2025-12-09 08:10:47 -03:00
|
|
|
info!(
|
|
|
|
|
"Component {}: installed={}, needs_install={}, vault_needs_setup={}",
|
|
|
|
|
component, is_installed, needs_install, vault_needs_setup
|
|
|
|
|
);
|
|
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
if needs_install {
|
2025-12-09 08:10:47 -03:00
|
|
|
info!("Installing/configuring component: {}", component);
|
2025-12-08 00:19:29 -03:00
|
|
|
// Quick check if component might be running - don't hang on this
|
|
|
|
|
let bin_path = pm.base_path.join("bin").join(component);
|
|
|
|
|
let binary_name = pm
|
2025-11-22 22:55:35 -03:00
|
|
|
.components
|
|
|
|
|
.get(component)
|
|
|
|
|
.and_then(|cfg| cfg.binary_name.clone())
|
|
|
|
|
.unwrap_or_else(|| component.to_string());
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
// Only terminate for services that are known to conflict
|
|
|
|
|
// Use simple, fast commands with timeout
|
|
|
|
|
if component == "vault" || component == "tables" || component == "directory" {
|
|
|
|
|
let _ = Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
|
|
|
|
"pkill -9 -f '{}/{}' 2>/dev/null; true",
|
|
|
|
|
bin_path.display(),
|
|
|
|
|
binary_name
|
|
|
|
|
))
|
|
|
|
|
.status();
|
|
|
|
|
std::thread::sleep(std::time::Duration::from_millis(200));
|
2025-11-22 22:55:35 -03:00
|
|
|
}
|
2025-12-10 08:30:49 -03:00
|
|
|
|
|
|
|
|
info!("Installing component: {}", component);
|
|
|
|
|
let install_result = pm.install(component).await;
|
|
|
|
|
if let Err(e) = install_result {
|
|
|
|
|
error!("Failed to install component {}: {}", component, e);
|
|
|
|
|
if component == "vault" {
|
|
|
|
|
return Err(anyhow::anyhow!("Failed to install Vault: {}", e));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
info!("Component {} installed successfully", component);
|
2025-11-22 22:55:35 -03:00
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
// After tables is installed, START PostgreSQL and create Zitadel config files before installing directory
|
2025-12-07 02:13:28 -03:00
|
|
|
if component == "tables" {
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Starting PostgreSQL database...");
|
2025-12-08 00:19:29 -03:00
|
|
|
match pm.start("tables") {
|
|
|
|
|
Ok(_) => {
|
|
|
|
|
info!("PostgreSQL started successfully");
|
|
|
|
|
// Give PostgreSQL time to initialize
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(3)).await;
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("Failed to start PostgreSQL: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-09 07:49:01 -03:00
|
|
|
// Run migrations using direct connection (Vault not set up yet)
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Running database migrations...");
|
2025-12-09 07:49:01 -03:00
|
|
|
let database_url =
|
|
|
|
|
format!("postgres://gbuser:{}@localhost:5432/botserver", db_password);
|
|
|
|
|
match diesel::PgConnection::establish(&database_url) {
|
|
|
|
|
Ok(mut conn) => {
|
|
|
|
|
if let Err(e) = self.apply_migrations(&mut conn) {
|
|
|
|
|
error!("Failed to apply migrations: {}", e);
|
|
|
|
|
} else {
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Database migrations applied");
|
2025-12-09 07:49:01 -03:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
error!("Failed to connect to database for migrations: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Creating Directory configuration files...");
|
2025-12-07 02:13:28 -03:00
|
|
|
if let Err(e) = self.configure_services_in_directory(&db_password).await {
|
|
|
|
|
error!("Failed to create Directory config files: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Directory configuration - setup happens after install starts Zitadel
|
2025-11-22 22:55:35 -03:00
|
|
|
if component == "directory" {
|
2025-12-14 15:58:54 -03:00
|
|
|
info!("Starting Directory (Zitadel) service...");
|
|
|
|
|
match pm.start("directory") {
|
|
|
|
|
Ok(_) => {
|
|
|
|
|
info!("Directory service started successfully");
|
|
|
|
|
// Give Zitadel time to initialize before health checks
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("Failed to start Directory service: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Waiting for Directory to be ready...");
|
2025-11-22 22:55:35 -03:00
|
|
|
if let Err(e) = self.setup_directory().await {
|
2025-12-07 02:13:28 -03:00
|
|
|
// Don't fail completely - Zitadel may still be usable with first instance setup
|
|
|
|
|
warn!("Directory additional setup had issues: {}", e);
|
2025-11-29 16:29:28 -03:00
|
|
|
}
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
// After Vault is installed, START the server then initialize it
|
2025-12-07 02:13:28 -03:00
|
|
|
if component == "vault" {
|
2025-12-12 12:33:17 -03:00
|
|
|
info!("Setting up Vault secrets service...");
|
2025-12-10 08:30:49 -03:00
|
|
|
|
|
|
|
|
// Verify vault binary exists and is executable
|
2025-12-14 15:58:54 -03:00
|
|
|
let vault_bin = self.stack_dir("bin/vault/vault");
|
2025-12-10 08:30:49 -03:00
|
|
|
if !vault_bin.exists() {
|
|
|
|
|
error!("Vault binary not found at {:?}", vault_bin);
|
|
|
|
|
return Err(anyhow::anyhow!("Vault binary not found after installation"));
|
|
|
|
|
}
|
2025-12-12 12:33:17 -03:00
|
|
|
info!("Vault binary verified at {:?}", vault_bin);
|
2025-12-10 08:30:49 -03:00
|
|
|
|
|
|
|
|
// Ensure logs directory exists
|
2025-12-14 15:58:54 -03:00
|
|
|
let vault_log_path = self.stack_dir("logs/vault/vault.log");
|
2025-12-10 08:30:49 -03:00
|
|
|
if let Some(parent) = vault_log_path.parent() {
|
|
|
|
|
if let Err(e) = fs::create_dir_all(parent) {
|
|
|
|
|
error!("Failed to create vault logs directory: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Ensure data directory exists
|
2025-12-14 15:58:54 -03:00
|
|
|
let vault_data_path = self.stack_dir("data/vault");
|
2025-12-10 08:30:49 -03:00
|
|
|
if let Err(e) = fs::create_dir_all(&vault_data_path) {
|
|
|
|
|
error!("Failed to create vault data directory: {}", e);
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Starting Vault server...");
|
2025-12-10 08:30:49 -03:00
|
|
|
|
2025-12-12 12:33:17 -03:00
|
|
|
// Try starting vault directly first
|
2025-12-14 15:58:54 -03:00
|
|
|
let vault_bin_dir = self.stack_dir("bin/vault");
|
|
|
|
|
let vault_start_cmd = format!(
|
|
|
|
|
"cd {} && nohup ./vault server -config=../../conf/vault/config.hcl > ../../logs/vault/vault.log 2>&1 &",
|
|
|
|
|
vault_bin_dir.display()
|
|
|
|
|
);
|
2025-12-12 12:33:17 -03:00
|
|
|
let _ = std::process::Command::new("sh")
|
2025-12-10 08:30:49 -03:00
|
|
|
.arg("-c")
|
2025-12-14 15:58:54 -03:00
|
|
|
.arg(&vault_start_cmd)
|
2025-12-10 08:30:49 -03:00
|
|
|
.status();
|
|
|
|
|
std::thread::sleep(std::time::Duration::from_secs(2));
|
|
|
|
|
|
|
|
|
|
// Check if it's running now
|
|
|
|
|
let check = std::process::Command::new("pgrep")
|
|
|
|
|
.args(["-f", "vault server"])
|
|
|
|
|
.output();
|
|
|
|
|
if let Ok(output) = &check {
|
|
|
|
|
let pids = String::from_utf8_lossy(&output.stdout);
|
|
|
|
|
if !pids.trim().is_empty() {
|
2025-12-08 00:19:29 -03:00
|
|
|
info!("Vault server started");
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(3)).await;
|
2025-12-10 08:30:49 -03:00
|
|
|
} else {
|
2025-12-12 12:33:17 -03:00
|
|
|
debug!("Direct start failed, trying pm.start...");
|
2025-12-10 08:30:49 -03:00
|
|
|
match pm.start("vault") {
|
|
|
|
|
Ok(_) => {
|
|
|
|
|
info!("Vault server started");
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
error!("Failed to start Vault server: {}", e);
|
|
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"Failed to start Vault server: {}",
|
|
|
|
|
e
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
}
|
2025-12-10 08:30:49 -03:00
|
|
|
}
|
|
|
|
|
|
2025-12-12 12:33:17 -03:00
|
|
|
// Verify vault is running
|
2025-12-10 08:30:49 -03:00
|
|
|
let final_check = std::process::Command::new("pgrep")
|
|
|
|
|
.args(["-f", "vault server"])
|
|
|
|
|
.output();
|
|
|
|
|
if let Ok(output) = final_check {
|
|
|
|
|
let pids = String::from_utf8_lossy(&output.stdout);
|
|
|
|
|
if pids.trim().is_empty() {
|
2025-12-12 12:33:17 -03:00
|
|
|
error!("Vault is not running after all start attempts");
|
2025-12-10 08:30:49 -03:00
|
|
|
return Err(anyhow::anyhow!("Failed to start Vault server"));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Initializing Vault with secrets...");
|
2025-12-08 00:19:29 -03:00
|
|
|
if let Err(e) = self
|
|
|
|
|
.setup_vault(
|
|
|
|
|
&db_password,
|
|
|
|
|
&drive_accesskey,
|
|
|
|
|
&drive_secret,
|
|
|
|
|
&cache_password,
|
|
|
|
|
)
|
|
|
|
|
.await
|
|
|
|
|
{
|
2025-12-07 02:13:28 -03:00
|
|
|
error!("Failed to setup Vault: {}", e);
|
2025-12-10 08:30:49 -03:00
|
|
|
// Check vault.log for more details
|
|
|
|
|
if vault_log_path.exists() {
|
|
|
|
|
if let Ok(log_content) = fs::read_to_string(&vault_log_path) {
|
|
|
|
|
let last_lines: Vec<&str> =
|
|
|
|
|
log_content.lines().rev().take(20).collect();
|
|
|
|
|
error!("Vault log (last 20 lines):");
|
|
|
|
|
for line in last_lines.iter().rev() {
|
|
|
|
|
error!(" {}", line);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// Vault is critical - fail the bootstrap
|
|
|
|
|
return Err(anyhow::anyhow!("Vault setup failed: {}. Check ./botserver-stack/logs/vault/vault.log for details.", e));
|
2025-11-22 22:55:35 -03:00
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
// Initialize the global SecretsManager so other components can use Vault
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Initializing SecretsManager...");
|
2025-12-08 00:19:29 -03:00
|
|
|
debug!(
|
|
|
|
|
"VAULT_ADDR={:?}, VAULT_TOKEN set={}",
|
|
|
|
|
std::env::var("VAULT_ADDR").ok(),
|
|
|
|
|
std::env::var("VAULT_TOKEN").is_ok()
|
|
|
|
|
);
|
|
|
|
|
match init_secrets_manager().await {
|
2025-12-09 07:55:11 -03:00
|
|
|
Ok(_) => info!("SecretsManager initialized successfully"),
|
2025-12-08 00:19:29 -03:00
|
|
|
Err(e) => {
|
|
|
|
|
error!("Failed to initialize SecretsManager: {}", e);
|
|
|
|
|
// Don't continue if SecretsManager fails - it's required for DB connection
|
|
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"SecretsManager initialization failed: {}",
|
|
|
|
|
e
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-11-22 22:55:35 -03:00
|
|
|
}
|
2025-11-26 22:54:22 -03:00
|
|
|
|
|
|
|
|
if component == "email" {
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Auto-configuring Email (Stalwart)...");
|
2025-11-26 22:54:22 -03:00
|
|
|
if let Err(e) = self.setup_email().await {
|
|
|
|
|
error!("Failed to setup Email: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-11-29 16:29:28 -03:00
|
|
|
|
|
|
|
|
if component == "proxy" {
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Configuring Caddy reverse proxy...");
|
2025-11-29 16:29:28 -03:00
|
|
|
if let Err(e) = self.setup_caddy_proxy().await {
|
|
|
|
|
error!("Failed to setup Caddy: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if component == "dns" {
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Configuring CoreDNS for dynamic DNS...");
|
2025-11-29 16:29:28 -03:00
|
|
|
if let Err(e) = self.setup_coredns().await {
|
|
|
|
|
error!("Failed to setup CoreDNS: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-11-22 22:55:35 -03:00
|
|
|
}
|
|
|
|
|
}
|
2025-12-09 08:10:47 -03:00
|
|
|
info!("=== BOOTSTRAP COMPLETED SUCCESSFULLY ===");
|
2025-11-22 22:55:35 -03:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-29 16:29:28 -03:00
|
|
|
/// Configure database and drive credentials in Directory
|
2025-12-07 02:13:28 -03:00
|
|
|
/// This creates the Zitadel config files BEFORE Zitadel is installed
|
|
|
|
|
/// db_password is passed directly from bootstrap - NO ENV VARS
|
|
|
|
|
async fn configure_services_in_directory(&self, db_password: &str) -> Result<()> {
|
|
|
|
|
info!("Creating Zitadel configuration files...");
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-12-14 15:58:54 -03:00
|
|
|
let zitadel_config_path = self.stack_dir("conf/directory/zitadel.yaml");
|
|
|
|
|
let steps_config_path = self.stack_dir("conf/directory/steps.yaml");
|
2025-12-08 00:19:29 -03:00
|
|
|
// Use absolute path for PAT file since zitadel runs from bin/directory/
|
2025-12-14 15:58:54 -03:00
|
|
|
let pat_path = if self.stack_path.is_absolute() {
|
|
|
|
|
self.stack_dir("conf/directory/admin-pat.txt")
|
|
|
|
|
} else {
|
|
|
|
|
std::env::current_dir()?.join(self.stack_dir("conf/directory/admin-pat.txt"))
|
|
|
|
|
};
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-11-29 16:29:28 -03:00
|
|
|
fs::create_dir_all(zitadel_config_path.parent().unwrap())?;
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Generate Zitadel database password
|
|
|
|
|
let zitadel_db_password = self.generate_secure_password(24);
|
|
|
|
|
|
|
|
|
|
// Create zitadel.yaml - main configuration
|
2025-12-08 00:19:29 -03:00
|
|
|
// Note: Zitadel uses lowercase 'postgres' and nested User/Admin with Username field
|
2025-11-29 16:29:28 -03:00
|
|
|
let zitadel_config = format!(
|
2025-12-07 02:13:28 -03:00
|
|
|
r#"Log:
|
|
|
|
|
Level: info
|
|
|
|
|
Formatter:
|
|
|
|
|
Format: text
|
|
|
|
|
|
2025-12-14 15:58:54 -03:00
|
|
|
Port: 8300
|
|
|
|
|
|
2025-11-29 16:29:28 -03:00
|
|
|
Database:
|
|
|
|
|
postgres:
|
|
|
|
|
Host: localhost
|
|
|
|
|
Port: 5432
|
|
|
|
|
Database: zitadel
|
2025-12-08 00:19:29 -03:00
|
|
|
User:
|
|
|
|
|
Username: zitadel
|
|
|
|
|
Password: "{}"
|
|
|
|
|
SSL:
|
|
|
|
|
Mode: disable
|
2025-12-07 02:13:28 -03:00
|
|
|
Admin:
|
|
|
|
|
Username: gbuser
|
|
|
|
|
Password: "{}"
|
|
|
|
|
SSL:
|
|
|
|
|
Mode: disable
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
Machine:
|
|
|
|
|
Identification:
|
|
|
|
|
Hostname:
|
|
|
|
|
Enabled: true
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
ExternalSecure: false
|
2025-11-29 16:29:28 -03:00
|
|
|
ExternalDomain: localhost
|
2025-12-11 08:43:28 -03:00
|
|
|
ExternalPort: 8300
|
2025-12-07 02:13:28 -03:00
|
|
|
|
|
|
|
|
DefaultInstance:
|
|
|
|
|
OIDCSettings:
|
|
|
|
|
AccessTokenLifetime: 12h
|
|
|
|
|
IdTokenLifetime: 12h
|
|
|
|
|
RefreshTokenIdleExpiration: 720h
|
|
|
|
|
RefreshTokenExpiration: 2160h
|
|
|
|
|
"#,
|
|
|
|
|
zitadel_db_password,
|
2025-12-08 00:19:29 -03:00
|
|
|
db_password, // Use the password passed directly from bootstrap
|
2025-12-07 02:13:28 -03:00
|
|
|
);
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
fs::write(&zitadel_config_path, zitadel_config)?;
|
|
|
|
|
info!("Created zitadel.yaml configuration");
|
|
|
|
|
|
|
|
|
|
// Create steps.yaml - first instance setup that generates admin PAT
|
2025-12-08 00:19:29 -03:00
|
|
|
// Use Machine user with PAT for API access (Human users don't generate PAT files)
|
2025-12-07 02:13:28 -03:00
|
|
|
let steps_config = format!(
|
|
|
|
|
r#"FirstInstance:
|
2025-12-08 00:19:29 -03:00
|
|
|
InstanceName: "BotServer"
|
|
|
|
|
DefaultLanguage: "en"
|
|
|
|
|
PatPath: "{}"
|
2025-12-07 02:13:28 -03:00
|
|
|
Org:
|
|
|
|
|
Name: "BotServer"
|
2025-12-08 00:19:29 -03:00
|
|
|
Machine:
|
|
|
|
|
Machine:
|
|
|
|
|
Username: "admin-sa"
|
|
|
|
|
Name: "Admin Service Account"
|
|
|
|
|
Pat:
|
|
|
|
|
ExpirationDate: "2099-12-31T23:59:59Z"
|
2025-12-07 02:13:28 -03:00
|
|
|
Human:
|
|
|
|
|
UserName: "admin"
|
|
|
|
|
FirstName: "Admin"
|
|
|
|
|
LastName: "User"
|
|
|
|
|
Email:
|
|
|
|
|
Address: "admin@localhost"
|
|
|
|
|
Verified: true
|
|
|
|
|
Password: "{}"
|
2025-12-08 00:19:29 -03:00
|
|
|
PasswordChangeRequired: false
|
2025-11-29 16:29:28 -03:00
|
|
|
"#,
|
2025-12-07 02:13:28 -03:00
|
|
|
pat_path.to_string_lossy(),
|
2025-12-08 00:19:29 -03:00
|
|
|
self.generate_secure_password(16),
|
2025-11-29 16:29:28 -03:00
|
|
|
);
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
fs::write(&steps_config_path, steps_config)?;
|
|
|
|
|
info!("Created steps.yaml for first instance setup");
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Create zitadel database in PostgreSQL
|
|
|
|
|
info!("Creating zitadel database...");
|
|
|
|
|
let create_db_result = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
|
|
|
|
"PGPASSWORD='{}' psql -h localhost -p 5432 -U gbuser -d postgres -c \"CREATE DATABASE zitadel\" 2>&1 || true",
|
|
|
|
|
db_password
|
|
|
|
|
))
|
|
|
|
|
.output();
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
if let Ok(output) = create_db_result {
|
|
|
|
|
let stdout = String::from_utf8_lossy(&output.stdout);
|
|
|
|
|
if !stdout.contains("already exists") {
|
|
|
|
|
info!("Created zitadel database");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Create zitadel user
|
|
|
|
|
let create_user_result = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
|
|
|
|
"PGPASSWORD='{}' psql -h localhost -p 5432 -U gbuser -d postgres -c \"CREATE USER zitadel WITH PASSWORD '{}' SUPERUSER\" 2>&1 || true",
|
|
|
|
|
db_password,
|
|
|
|
|
zitadel_db_password
|
|
|
|
|
))
|
|
|
|
|
.output();
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
if let Ok(output) = create_user_result {
|
|
|
|
|
let stdout = String::from_utf8_lossy(&output.stdout);
|
|
|
|
|
if !stdout.contains("already exists") {
|
|
|
|
|
info!("Created zitadel database user");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info!("Zitadel configuration files created");
|
2025-11-29 16:29:28 -03:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Setup Caddy as reverse proxy for all services
|
|
|
|
|
async fn setup_caddy_proxy(&self) -> Result<()> {
|
2025-12-14 15:58:54 -03:00
|
|
|
let caddy_config = self.stack_dir("conf/proxy/Caddyfile");
|
2025-11-29 16:29:28 -03:00
|
|
|
fs::create_dir_all(caddy_config.parent().unwrap())?;
|
|
|
|
|
|
2025-11-29 17:27:13 -03:00
|
|
|
let config = format!(
|
|
|
|
|
r#"{{
|
2025-11-29 16:29:28 -03:00
|
|
|
admin off
|
|
|
|
|
auto_https disable_redirects
|
2025-11-29 17:27:13 -03:00
|
|
|
}}
|
2025-11-29 16:29:28 -03:00
|
|
|
|
|
|
|
|
# Main API
|
2025-11-29 17:27:13 -03:00
|
|
|
api.botserver.local {{
|
2025-11-29 16:29:28 -03:00
|
|
|
tls /botserver-stack/conf/system/certificates/caddy/server.crt /botserver-stack/conf/system/certificates/caddy/server.key
|
2025-11-29 17:27:13 -03:00
|
|
|
reverse_proxy {}
|
|
|
|
|
}}
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-11-29 17:27:13 -03:00
|
|
|
# Directory/Auth service
|
|
|
|
|
auth.botserver.local {{
|
2025-11-29 16:29:28 -03:00
|
|
|
tls /botserver-stack/conf/system/certificates/caddy/server.crt /botserver-stack/conf/system/certificates/caddy/server.key
|
2025-11-29 17:27:13 -03:00
|
|
|
reverse_proxy {}
|
|
|
|
|
}}
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-11-29 17:27:13 -03:00
|
|
|
# LLM service
|
|
|
|
|
llm.botserver.local {{
|
2025-11-29 16:29:28 -03:00
|
|
|
tls /botserver-stack/conf/system/certificates/caddy/server.crt /botserver-stack/conf/system/certificates/caddy/server.key
|
2025-11-29 17:27:13 -03:00
|
|
|
reverse_proxy {}
|
|
|
|
|
}}
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-11-29 17:27:13 -03:00
|
|
|
# Mail service
|
|
|
|
|
mail.botserver.local {{
|
2025-11-29 16:29:28 -03:00
|
|
|
tls /botserver-stack/conf/system/certificates/caddy/server.crt /botserver-stack/conf/system/certificates/caddy/server.key
|
2025-11-29 17:27:13 -03:00
|
|
|
reverse_proxy {}
|
|
|
|
|
}}
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-11-29 17:27:13 -03:00
|
|
|
# Meet service
|
|
|
|
|
meet.botserver.local {{
|
2025-11-29 16:29:28 -03:00
|
|
|
tls /botserver-stack/conf/system/certificates/caddy/server.crt /botserver-stack/conf/system/certificates/caddy/server.key
|
2025-11-29 17:27:13 -03:00
|
|
|
reverse_proxy {}
|
|
|
|
|
}}
|
|
|
|
|
"#,
|
|
|
|
|
crate::core::urls::InternalUrls::DIRECTORY_BASE.replace("https://", ""),
|
|
|
|
|
crate::core::urls::InternalUrls::DIRECTORY_BASE.replace("https://", ""),
|
|
|
|
|
crate::core::urls::InternalUrls::LLM.replace("https://", ""),
|
|
|
|
|
crate::core::urls::InternalUrls::EMAIL.replace("https://", ""),
|
|
|
|
|
crate::core::urls::InternalUrls::LIVEKIT.replace("https://", "")
|
|
|
|
|
);
|
2025-11-29 16:29:28 -03:00
|
|
|
|
|
|
|
|
fs::write(caddy_config, config)?;
|
2025-11-29 17:27:13 -03:00
|
|
|
info!("Caddy proxy configured");
|
2025-11-29 16:29:28 -03:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Setup CoreDNS for dynamic DNS service
|
|
|
|
|
async fn setup_coredns(&self) -> Result<()> {
|
2025-12-14 15:58:54 -03:00
|
|
|
let dns_config = self.stack_dir("conf/dns/Corefile");
|
2025-11-29 16:29:28 -03:00
|
|
|
fs::create_dir_all(dns_config.parent().unwrap())?;
|
|
|
|
|
|
2025-12-14 15:58:54 -03:00
|
|
|
let zone_file = self.stack_dir("conf/dns/botserver.local.zone");
|
2025-11-29 16:29:28 -03:00
|
|
|
|
|
|
|
|
// Create Corefile
|
|
|
|
|
let corefile = r#"botserver.local:53 {
|
|
|
|
|
file /botserver-stack/conf/dns/botserver.local.zone
|
|
|
|
|
reload 10s
|
|
|
|
|
log
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
.:53 {
|
|
|
|
|
forward . 8.8.8.8 8.8.4.4
|
|
|
|
|
cache 30
|
|
|
|
|
log
|
|
|
|
|
}
|
|
|
|
|
"#;
|
|
|
|
|
|
|
|
|
|
fs::write(dns_config, corefile)?;
|
|
|
|
|
|
2025-12-07 10:42:02 -03:00
|
|
|
// Create initial zone file with component names
|
2025-11-29 16:29:28 -03:00
|
|
|
let zone = r#"$ORIGIN botserver.local.
|
|
|
|
|
$TTL 60
|
|
|
|
|
@ IN SOA ns1.botserver.local. admin.botserver.local. (
|
|
|
|
|
2024010101 ; Serial
|
|
|
|
|
3600 ; Refresh
|
|
|
|
|
1800 ; Retry
|
|
|
|
|
604800 ; Expire
|
|
|
|
|
60 ; Minimum TTL
|
|
|
|
|
)
|
|
|
|
|
IN NS ns1.botserver.local.
|
|
|
|
|
ns1 IN A 127.0.0.1
|
|
|
|
|
|
2025-12-07 10:42:02 -03:00
|
|
|
; Core services
|
|
|
|
|
api IN A 127.0.0.1
|
|
|
|
|
tables IN A 127.0.0.1
|
|
|
|
|
drive IN A 127.0.0.1
|
|
|
|
|
cache IN A 127.0.0.1
|
|
|
|
|
vectordb IN A 127.0.0.1
|
|
|
|
|
vault IN A 127.0.0.1
|
|
|
|
|
|
|
|
|
|
; Application services
|
|
|
|
|
llm IN A 127.0.0.1
|
|
|
|
|
embedding IN A 127.0.0.1
|
|
|
|
|
directory IN A 127.0.0.1
|
|
|
|
|
auth IN A 127.0.0.1
|
|
|
|
|
email IN A 127.0.0.1
|
|
|
|
|
meet IN A 127.0.0.1
|
2025-11-29 16:29:28 -03:00
|
|
|
|
|
|
|
|
; Dynamic entries will be added below
|
|
|
|
|
"#;
|
|
|
|
|
|
|
|
|
|
fs::write(zone_file, zone)?;
|
2025-11-29 17:27:13 -03:00
|
|
|
info!("CoreDNS configured for dynamic DNS");
|
2025-11-29 16:29:28 -03:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-22 22:55:35 -03:00
|
|
|
/// Setup Directory (Zitadel) with default organization and user
|
|
|
|
|
async fn setup_directory(&self) -> Result<()> {
|
|
|
|
|
let config_path = PathBuf::from("./config/directory_config.json");
|
2025-12-14 15:58:54 -03:00
|
|
|
let pat_path = self.stack_dir("conf/directory/admin-pat.txt");
|
2025-11-22 22:55:35 -03:00
|
|
|
|
|
|
|
|
// Ensure config directory exists
|
|
|
|
|
tokio::fs::create_dir_all("./config").await?;
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Wait for Directory to be ready and check for PAT file
|
|
|
|
|
info!("Waiting for Zitadel to be ready...");
|
|
|
|
|
let mut attempts = 0;
|
|
|
|
|
let max_attempts = 60; // 60 seconds max wait
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
while attempts < max_attempts {
|
|
|
|
|
// Check if Zitadel is healthy
|
|
|
|
|
let health_check = std::process::Command::new("curl")
|
2025-12-11 08:43:28 -03:00
|
|
|
.args(["-f", "-s", "http://localhost:8300/healthz"])
|
2025-12-07 02:13:28 -03:00
|
|
|
.output();
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
if let Ok(output) = health_check {
|
|
|
|
|
if output.status.success() {
|
|
|
|
|
info!("Zitadel is healthy");
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
attempts += 1;
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if attempts >= max_attempts {
|
|
|
|
|
warn!("Zitadel health check timed out, continuing anyway...");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Wait a bit more for PAT file to be generated
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(3)).await;
|
|
|
|
|
|
|
|
|
|
// Read the admin PAT generated by Zitadel first instance setup
|
|
|
|
|
let admin_token = if pat_path.exists() {
|
|
|
|
|
let token = fs::read_to_string(&pat_path)?;
|
|
|
|
|
let token = token.trim().to_string();
|
|
|
|
|
info!("Loaded admin PAT from {}", pat_path.display());
|
|
|
|
|
Some(token)
|
|
|
|
|
} else {
|
|
|
|
|
warn!("Admin PAT file not found at {}", pat_path.display());
|
|
|
|
|
warn!("Zitadel first instance setup may not have completed");
|
|
|
|
|
None
|
|
|
|
|
};
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-11-29 17:27:13 -03:00
|
|
|
let mut setup = DirectorySetup::new(
|
2025-12-11 08:43:28 -03:00
|
|
|
"http://localhost:8300".to_string(), // Use HTTP since TLS is disabled
|
2025-11-29 17:27:13 -03:00
|
|
|
config_path,
|
|
|
|
|
);
|
2025-11-22 22:55:35 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Set the admin token if we have it
|
|
|
|
|
if let Some(token) = admin_token {
|
|
|
|
|
setup.set_admin_token(token);
|
|
|
|
|
} else {
|
|
|
|
|
// If no PAT, we can't proceed with API calls
|
|
|
|
|
info!("Directory setup skipped - no admin token available");
|
|
|
|
|
info!("First instance setup created initial admin user via steps.yaml");
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Wait a bit more for Zitadel to be fully ready
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
|
|
|
|
|
|
|
|
|
|
// Try to create additional organization for bot users
|
2025-11-22 22:55:35 -03:00
|
|
|
let org_name = "default";
|
2025-12-08 00:19:29 -03:00
|
|
|
match setup
|
|
|
|
|
.create_organization(org_name, "Default Organization")
|
|
|
|
|
.await
|
|
|
|
|
{
|
2025-12-07 02:13:28 -03:00
|
|
|
Ok(org_id) => {
|
|
|
|
|
info!("Created default organization: {}", org_name);
|
|
|
|
|
|
|
|
|
|
// Generate secure passwords
|
|
|
|
|
let user_password = self.generate_secure_password(16);
|
|
|
|
|
|
|
|
|
|
// Create user@default account for regular bot usage
|
2025-12-08 00:19:29 -03:00
|
|
|
match setup
|
|
|
|
|
.create_user(
|
|
|
|
|
&org_id,
|
|
|
|
|
"user",
|
|
|
|
|
"user@default",
|
|
|
|
|
&user_password,
|
|
|
|
|
"User",
|
|
|
|
|
"Default",
|
|
|
|
|
false,
|
|
|
|
|
)
|
|
|
|
|
.await
|
|
|
|
|
{
|
2025-12-07 02:13:28 -03:00
|
|
|
Ok(regular_user) => {
|
|
|
|
|
info!("Created regular user: user@default");
|
|
|
|
|
info!(" Regular user ID: {}", regular_user.id);
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("Failed to create regular user: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Create OAuth2 application for BotServer
|
|
|
|
|
match setup.create_oauth_application(&org_id).await {
|
|
|
|
|
Ok((project_id, client_id, client_secret)) => {
|
|
|
|
|
info!("Created OAuth2 application in project: {}", project_id);
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Save configuration
|
|
|
|
|
let admin_user = crate::package_manager::setup::DefaultUser {
|
|
|
|
|
id: "admin".to_string(),
|
|
|
|
|
username: "admin".to_string(),
|
|
|
|
|
email: "admin@localhost".to_string(),
|
|
|
|
|
password: "".to_string(), // Don't store password
|
|
|
|
|
first_name: "Admin".to_string(),
|
|
|
|
|
last_name: "User".to_string(),
|
|
|
|
|
};
|
|
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
if let Ok(config) = setup
|
|
|
|
|
.save_config(
|
|
|
|
|
org_id.clone(),
|
|
|
|
|
org_name.to_string(),
|
|
|
|
|
admin_user,
|
|
|
|
|
client_id.clone(),
|
|
|
|
|
client_secret,
|
|
|
|
|
)
|
|
|
|
|
.await
|
|
|
|
|
{
|
2025-12-07 02:13:28 -03:00
|
|
|
info!("Directory initialized successfully!");
|
|
|
|
|
info!(" Organization: default");
|
|
|
|
|
info!(" Client ID: {}", client_id);
|
|
|
|
|
info!(" Login URL: {}", config.base_url);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("Failed to create OAuth2 application: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("Failed to create organization: {}", e);
|
|
|
|
|
info!("Using Zitadel's default organization from first instance setup");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info!("Directory setup complete");
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Setup Vault with all service secrets and write .env file with VAULT_* variables
|
2025-12-08 00:19:29 -03:00
|
|
|
async fn setup_vault(
|
|
|
|
|
&self,
|
|
|
|
|
db_password: &str,
|
|
|
|
|
drive_accesskey: &str,
|
|
|
|
|
drive_secret: &str,
|
|
|
|
|
cache_password: &str,
|
|
|
|
|
) -> Result<()> {
|
2025-12-14 15:58:54 -03:00
|
|
|
let vault_conf_path = self.stack_dir("conf/vault");
|
2025-12-07 02:13:28 -03:00
|
|
|
let vault_init_path = vault_conf_path.join("init.json");
|
|
|
|
|
let env_file_path = PathBuf::from("./.env");
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Wait for Vault to be ready
|
|
|
|
|
info!("Waiting for Vault to be ready...");
|
|
|
|
|
let mut attempts = 0;
|
|
|
|
|
let max_attempts = 30;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
while attempts < max_attempts {
|
2025-12-10 08:30:49 -03:00
|
|
|
// First check if Vault process is running
|
|
|
|
|
let ps_check = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg("pgrep -f 'vault server' || echo 'NOT_RUNNING'")
|
|
|
|
|
.output();
|
|
|
|
|
|
|
|
|
|
if let Ok(ps_output) = ps_check {
|
|
|
|
|
let ps_result = String::from_utf8_lossy(&ps_output.stdout);
|
|
|
|
|
if ps_result.contains("NOT_RUNNING") {
|
|
|
|
|
warn!("Vault process is not running (attempt {})", attempts + 1);
|
|
|
|
|
// Check vault.log for crash info
|
2025-12-14 15:58:54 -03:00
|
|
|
let vault_log_path = self.stack_dir("logs/vault/vault.log");
|
2025-12-10 08:30:49 -03:00
|
|
|
if vault_log_path.exists() {
|
|
|
|
|
if let Ok(log_content) = fs::read_to_string(&vault_log_path) {
|
|
|
|
|
let last_lines: Vec<&str> =
|
|
|
|
|
log_content.lines().rev().take(10).collect();
|
|
|
|
|
warn!("Vault log (last 10 lines):");
|
|
|
|
|
for line in last_lines.iter().rev() {
|
|
|
|
|
warn!(" {}", line);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
let health_check = std::process::Command::new("curl")
|
2025-12-08 00:19:29 -03:00
|
|
|
.args(["-f", "-s", "http://localhost:8200/v1/sys/health?standbyok=true&uninitcode=200&sealedcode=200"])
|
2025-12-07 02:13:28 -03:00
|
|
|
.output();
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
if let Ok(output) = health_check {
|
|
|
|
|
if output.status.success() {
|
|
|
|
|
info!("Vault is responding");
|
|
|
|
|
break;
|
2025-12-10 08:30:49 -03:00
|
|
|
} else {
|
|
|
|
|
// Log the HTTP response for debugging
|
|
|
|
|
let stderr = String::from_utf8_lossy(&output.stderr);
|
|
|
|
|
if !stderr.is_empty() && attempts % 5 == 0 {
|
|
|
|
|
debug!("Vault health check attempt {}: {}", attempts + 1, stderr);
|
|
|
|
|
}
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
2025-12-10 08:30:49 -03:00
|
|
|
} else if attempts % 5 == 0 {
|
|
|
|
|
warn!("Vault health check curl failed (attempt {})", attempts + 1);
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
attempts += 1;
|
|
|
|
|
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if attempts >= max_attempts {
|
2025-12-10 08:30:49 -03:00
|
|
|
warn!(
|
|
|
|
|
"Vault health check timed out after {} attempts",
|
|
|
|
|
max_attempts
|
|
|
|
|
);
|
|
|
|
|
// Final check of vault.log
|
2025-12-14 15:58:54 -03:00
|
|
|
let vault_log_path = self.stack_dir("logs/vault/vault.log");
|
2025-12-10 08:30:49 -03:00
|
|
|
if vault_log_path.exists() {
|
|
|
|
|
if let Ok(log_content) = fs::read_to_string(&vault_log_path) {
|
|
|
|
|
let last_lines: Vec<&str> = log_content.lines().rev().take(20).collect();
|
|
|
|
|
error!("Vault log (last 20 lines):");
|
|
|
|
|
for line in last_lines.iter().rev() {
|
|
|
|
|
error!(" {}", line);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
error!("Vault log file does not exist at {:?}", vault_log_path);
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
return Err(anyhow::anyhow!(
|
2025-12-10 08:30:49 -03:00
|
|
|
"Vault not ready after {} seconds. Check ./botserver-stack/logs/vault/vault.log for details.",
|
2025-12-08 00:19:29 -03:00
|
|
|
max_attempts
|
|
|
|
|
));
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Check if Vault is already initialized
|
2025-12-08 00:19:29 -03:00
|
|
|
let vault_addr = "http://localhost:8200";
|
2025-12-07 02:13:28 -03:00
|
|
|
std::env::set_var("VAULT_ADDR", vault_addr);
|
|
|
|
|
std::env::set_var("VAULT_SKIP_VERIFY", "true");
|
|
|
|
|
|
|
|
|
|
// Read init.json if it exists (from post_install_cmds)
|
|
|
|
|
let (unseal_key, root_token) = if vault_init_path.exists() {
|
|
|
|
|
info!("Reading Vault initialization from init.json...");
|
|
|
|
|
let init_json = fs::read_to_string(&vault_init_path)?;
|
|
|
|
|
let init_data: serde_json::Value = serde_json::from_str(&init_json)?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
let unseal_key = init_data["unseal_keys_b64"]
|
|
|
|
|
.as_array()
|
|
|
|
|
.and_then(|arr| arr.first())
|
|
|
|
|
.and_then(|v| v.as_str())
|
|
|
|
|
.unwrap_or("")
|
|
|
|
|
.to_string();
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
let root_token = init_data["root_token"].as_str().unwrap_or("").to_string();
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
(unseal_key, root_token)
|
|
|
|
|
} else {
|
2025-12-12 12:33:17 -03:00
|
|
|
// Check if .env exists with VAULT_TOKEN - try to recover from that
|
|
|
|
|
let env_token = if env_file_path.exists() {
|
|
|
|
|
if let Ok(env_content) = fs::read_to_string(&env_file_path) {
|
2025-12-14 15:58:54 -03:00
|
|
|
env_content
|
|
|
|
|
.lines()
|
2025-12-12 12:33:17 -03:00
|
|
|
.find(|line| line.starts_with("VAULT_TOKEN="))
|
|
|
|
|
.map(|line| line.trim_start_matches("VAULT_TOKEN=").to_string())
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
};
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Initialize Vault if not already done
|
|
|
|
|
info!("Initializing Vault...");
|
2025-12-14 15:58:54 -03:00
|
|
|
let vault_bin = self.vault_bin();
|
2025-12-08 09:14:31 -03:00
|
|
|
// Clear any mTLS env vars that might interfere with CLI
|
2025-12-07 02:13:28 -03:00
|
|
|
let init_output = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-14 15:58:54 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} {} operator init -key-shares=1 -key-threshold=1 -format=json",
|
|
|
|
|
vault_addr, vault_bin
|
2025-12-07 02:13:28 -03:00
|
|
|
))
|
|
|
|
|
.output()?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
if !init_output.status.success() {
|
|
|
|
|
let stderr = String::from_utf8_lossy(&init_output.stderr);
|
|
|
|
|
if stderr.contains("already initialized") {
|
|
|
|
|
warn!("Vault already initialized but init.json not found");
|
2025-12-14 15:58:54 -03:00
|
|
|
|
2025-12-12 12:33:17 -03:00
|
|
|
// If we have a token from .env, check if Vault is already unsealed
|
|
|
|
|
// and we can continue (maybe it was manually unsealed)
|
|
|
|
|
if let Some(_token) = env_token {
|
|
|
|
|
info!("Found VAULT_TOKEN in .env, checking if Vault is unsealed...");
|
2025-12-14 15:58:54 -03:00
|
|
|
|
2025-12-12 12:33:17 -03:00
|
|
|
// Check Vault status
|
|
|
|
|
let status_check = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-14 15:58:54 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} {} status -format=json 2>/dev/null",
|
|
|
|
|
vault_addr, vault_bin
|
2025-12-12 12:33:17 -03:00
|
|
|
))
|
|
|
|
|
.output();
|
2025-12-14 15:58:54 -03:00
|
|
|
|
2025-12-12 12:33:17 -03:00
|
|
|
if let Ok(status_output) = status_check {
|
|
|
|
|
let status_str = String::from_utf8_lossy(&status_output.stdout);
|
2025-12-14 15:58:54 -03:00
|
|
|
if let Ok(status) =
|
|
|
|
|
serde_json::from_str::<serde_json::Value>(&status_str)
|
|
|
|
|
{
|
2025-12-12 12:33:17 -03:00
|
|
|
let sealed = status["sealed"].as_bool().unwrap_or(true);
|
|
|
|
|
if !sealed {
|
|
|
|
|
// Vault is unsealed! We can continue with the token from .env
|
|
|
|
|
warn!("Vault is already unsealed - continuing with existing token");
|
|
|
|
|
warn!("NOTE: Unseal key is lost - Vault will need manual unseal after restart");
|
2025-12-14 15:58:54 -03:00
|
|
|
return Ok(()); // Skip rest of setup, Vault is already working
|
2025-12-12 12:33:17 -03:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-12-14 15:58:54 -03:00
|
|
|
|
2025-12-12 12:33:17 -03:00
|
|
|
// Vault is sealed but we don't have unseal key
|
|
|
|
|
error!("Vault is sealed and unseal key is lost (init.json missing)");
|
|
|
|
|
error!("Options:");
|
|
|
|
|
error!(" 1. If you have a backup of init.json, restore it to ./botserver-stack/conf/vault/init.json");
|
2025-12-14 15:58:54 -03:00
|
|
|
error!(
|
|
|
|
|
" 2. To start fresh, delete ./botserver-stack/data/vault/ and restart"
|
|
|
|
|
);
|
2025-12-12 12:33:17 -03:00
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"Vault is sealed but unseal key is lost. See error messages above for recovery options."
|
|
|
|
|
));
|
|
|
|
|
}
|
2025-12-14 15:58:54 -03:00
|
|
|
|
2025-12-12 12:33:17 -03:00
|
|
|
// No token in .env either
|
|
|
|
|
error!("Vault already initialized but credentials are lost");
|
|
|
|
|
error!("Options:");
|
2025-12-14 15:58:54 -03:00
|
|
|
error!(" 1. If you have a backup of init.json, restore it to ./botserver-stack/conf/vault/init.json");
|
2025-12-12 12:33:17 -03:00
|
|
|
error!(" 2. To start fresh, delete ./botserver-stack/data/vault/ and ./botserver-stack/conf/vault/init.json and restart");
|
|
|
|
|
return Err(anyhow::anyhow!(
|
|
|
|
|
"Vault initialized but credentials lost. See error messages above for recovery options."
|
|
|
|
|
));
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
|
|
|
|
return Err(anyhow::anyhow!("Vault init failed: {}", stderr));
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
let init_json = String::from_utf8_lossy(&init_output.stdout);
|
|
|
|
|
fs::write(&vault_init_path, init_json.as_ref())?;
|
|
|
|
|
fs::set_permissions(&vault_init_path, std::fs::Permissions::from_mode(0o600))?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
let init_data: serde_json::Value = serde_json::from_str(&init_json)?;
|
|
|
|
|
let unseal_key = init_data["unseal_keys_b64"]
|
|
|
|
|
.as_array()
|
|
|
|
|
.and_then(|arr| arr.first())
|
|
|
|
|
.and_then(|v| v.as_str())
|
|
|
|
|
.unwrap_or("")
|
|
|
|
|
.to_string();
|
2025-12-08 00:19:29 -03:00
|
|
|
let root_token = init_data["root_token"].as_str().unwrap_or("").to_string();
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
(unseal_key, root_token)
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if root_token.is_empty() {
|
|
|
|
|
return Err(anyhow::anyhow!("Failed to get Vault root token"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Unseal Vault
|
|
|
|
|
info!("Unsealing Vault...");
|
2025-12-14 15:58:54 -03:00
|
|
|
let vault_bin = self.vault_bin();
|
2025-12-08 09:14:31 -03:00
|
|
|
// Clear any mTLS env vars that might interfere with CLI
|
2025-12-07 02:13:28 -03:00
|
|
|
let unseal_output = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-14 15:58:54 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} {} operator unseal {}",
|
|
|
|
|
vault_addr, vault_bin, unseal_key
|
2025-12-07 02:13:28 -03:00
|
|
|
))
|
|
|
|
|
.output()?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
if !unseal_output.status.success() {
|
|
|
|
|
let stderr = String::from_utf8_lossy(&unseal_output.stderr);
|
|
|
|
|
if !stderr.contains("already unsealed") {
|
|
|
|
|
warn!("Vault unseal warning: {}", stderr);
|
|
|
|
|
}
|
2025-11-29 16:29:28 -03:00
|
|
|
}
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Set VAULT_TOKEN for subsequent commands
|
|
|
|
|
std::env::set_var("VAULT_TOKEN", &root_token);
|
|
|
|
|
|
2025-12-09 08:52:19 -03:00
|
|
|
// WRITE .env IMMEDIATELY so SecretsManager can work
|
|
|
|
|
info!("Writing .env file with Vault configuration...");
|
|
|
|
|
let env_content = format!(
|
|
|
|
|
r#"# BotServer Environment Configuration
|
|
|
|
|
# Generated by bootstrap - DO NOT ADD OTHER SECRETS HERE
|
|
|
|
|
# All secrets are stored in Vault at the paths below:
|
|
|
|
|
# - gbo/tables - PostgreSQL credentials
|
|
|
|
|
# - gbo/drive - MinIO/S3 credentials
|
|
|
|
|
# - gbo/cache - Redis credentials
|
|
|
|
|
# - gbo/directory - Zitadel credentials
|
|
|
|
|
# - gbo/email - Email credentials
|
|
|
|
|
# - gbo/llm - LLM API keys
|
|
|
|
|
# - gbo/encryption - Encryption keys
|
|
|
|
|
|
|
|
|
|
# Vault Configuration - THESE ARE THE ONLY ALLOWED ENV VARS
|
|
|
|
|
VAULT_ADDR={}
|
|
|
|
|
VAULT_TOKEN={}
|
|
|
|
|
|
|
|
|
|
# Vault uses HTTP for local development (TLS disabled in config.hcl)
|
|
|
|
|
# In production, enable TLS and set VAULT_CACERT, VAULT_CLIENT_CERT, VAULT_CLIENT_KEY
|
|
|
|
|
|
|
|
|
|
# Cache TTL for secrets (seconds)
|
|
|
|
|
VAULT_CACHE_TTL=300
|
|
|
|
|
"#,
|
|
|
|
|
vault_addr, root_token
|
|
|
|
|
);
|
|
|
|
|
fs::write(&env_file_path, &env_content)?;
|
|
|
|
|
info!(" * Created .env file with Vault configuration");
|
|
|
|
|
|
|
|
|
|
// Re-initialize SecretsManager now that .env exists
|
|
|
|
|
info!("Re-initializing SecretsManager with Vault credentials...");
|
|
|
|
|
match init_secrets_manager().await {
|
|
|
|
|
Ok(_) => info!(" * SecretsManager now connected to Vault"),
|
|
|
|
|
Err(e) => warn!("SecretsManager re-init warning: {}", e),
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Enable KV secrets engine at gbo/ path
|
|
|
|
|
info!("Enabling KV secrets engine...");
|
|
|
|
|
let _ = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-14 15:58:54 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} VAULT_TOKEN={} {} secrets enable -path=secret kv-v2 2>&1 || true",
|
|
|
|
|
vault_addr, root_token, vault_bin
|
2025-12-07 02:13:28 -03:00
|
|
|
))
|
|
|
|
|
.output();
|
|
|
|
|
|
2025-12-09 09:04:56 -03:00
|
|
|
// Store secrets in Vault - ONLY if they don't already exist
|
|
|
|
|
// This protects existing customer data in distributed environments
|
|
|
|
|
info!("Storing secrets in Vault (only if not existing)...");
|
2025-12-07 02:13:28 -03:00
|
|
|
|
2025-12-09 09:04:56 -03:00
|
|
|
// Helper to check if a secret path exists
|
2025-12-14 15:58:54 -03:00
|
|
|
let vault_bin_clone = vault_bin.clone();
|
2025-12-09 09:04:56 -03:00
|
|
|
let secret_exists = |path: &str| -> bool {
|
|
|
|
|
let output = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-14 15:58:54 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} VAULT_TOKEN={} {} kv get {} 2>/dev/null",
|
|
|
|
|
vault_addr, root_token, vault_bin_clone, path
|
2025-12-09 09:04:56 -03:00
|
|
|
))
|
|
|
|
|
.output();
|
|
|
|
|
output.map(|o| o.status.success()).unwrap_or(false)
|
|
|
|
|
};
|
2025-12-07 02:13:28 -03:00
|
|
|
|
2025-12-09 09:04:56 -03:00
|
|
|
// Database credentials - only create if not existing
|
|
|
|
|
if !secret_exists("secret/gbo/tables") {
|
|
|
|
|
let _ = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-14 15:58:54 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} VAULT_TOKEN={} {} kv put secret/gbo/tables host=localhost port=5432 database=botserver username=gbuser password='{}'",
|
|
|
|
|
vault_addr, root_token, vault_bin, db_password
|
2025-12-09 09:04:56 -03:00
|
|
|
))
|
|
|
|
|
.output()?;
|
|
|
|
|
info!(" Stored database credentials");
|
|
|
|
|
} else {
|
|
|
|
|
info!(" Database credentials already exist - preserving");
|
|
|
|
|
}
|
2025-12-07 02:13:28 -03:00
|
|
|
|
2025-12-09 09:04:56 -03:00
|
|
|
// Drive credentials - only create if not existing
|
|
|
|
|
if !secret_exists("secret/gbo/drive") {
|
|
|
|
|
let _ = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-14 15:58:54 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} VAULT_TOKEN={} {} kv put secret/gbo/drive accesskey='{}' secret='{}'",
|
|
|
|
|
vault_addr, root_token, vault_bin, drive_accesskey, drive_secret
|
2025-12-09 09:04:56 -03:00
|
|
|
))
|
|
|
|
|
.output()?;
|
|
|
|
|
info!(" Stored drive credentials");
|
|
|
|
|
} else {
|
|
|
|
|
info!(" Drive credentials already exist - preserving");
|
|
|
|
|
}
|
2025-12-07 02:13:28 -03:00
|
|
|
|
2025-12-09 09:04:56 -03:00
|
|
|
// Cache credentials - only create if not existing
|
|
|
|
|
if !secret_exists("secret/gbo/cache") {
|
|
|
|
|
let _ = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-14 15:58:54 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} VAULT_TOKEN={} {} kv put secret/gbo/cache password='{}'",
|
|
|
|
|
vault_addr, root_token, vault_bin, cache_password
|
2025-12-09 09:04:56 -03:00
|
|
|
))
|
|
|
|
|
.output()?;
|
|
|
|
|
info!(" Stored cache credentials");
|
|
|
|
|
} else {
|
|
|
|
|
info!(" Cache credentials already exist - preserving");
|
|
|
|
|
}
|
2025-12-07 02:13:28 -03:00
|
|
|
|
2025-12-09 09:04:56 -03:00
|
|
|
// Directory placeholder - only create if not existing
|
|
|
|
|
if !secret_exists("secret/gbo/directory") {
|
|
|
|
|
let _ = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-14 15:58:54 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} VAULT_TOKEN={} {} kv put secret/gbo/directory url=https://localhost:8300 project_id= client_id= client_secret=",
|
|
|
|
|
vault_addr, root_token, vault_bin
|
2025-12-09 09:04:56 -03:00
|
|
|
))
|
|
|
|
|
.output()?;
|
|
|
|
|
info!(" Created directory placeholder");
|
|
|
|
|
} else {
|
|
|
|
|
info!(" Directory credentials already exist - preserving");
|
|
|
|
|
}
|
2025-12-07 02:13:28 -03:00
|
|
|
|
2025-12-09 09:04:56 -03:00
|
|
|
// LLM placeholder - only create if not existing
|
|
|
|
|
if !secret_exists("secret/gbo/llm") {
|
|
|
|
|
let _ = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-14 15:58:54 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} VAULT_TOKEN={} {} kv put secret/gbo/llm openai_key= anthropic_key= groq_key=",
|
|
|
|
|
vault_addr, root_token, vault_bin
|
2025-12-09 09:04:56 -03:00
|
|
|
))
|
|
|
|
|
.output()?;
|
|
|
|
|
info!(" Created LLM placeholder");
|
|
|
|
|
} else {
|
|
|
|
|
info!(" LLM credentials already exist - preserving");
|
|
|
|
|
}
|
2025-12-07 02:13:28 -03:00
|
|
|
|
2025-12-09 09:04:56 -03:00
|
|
|
// Email placeholder - only create if not existing
|
|
|
|
|
if !secret_exists("secret/gbo/email") {
|
|
|
|
|
let _ = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-14 15:58:54 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} VAULT_TOKEN={} {} kv put secret/gbo/email username= password=",
|
|
|
|
|
vault_addr, root_token, vault_bin
|
2025-12-09 09:04:56 -03:00
|
|
|
))
|
|
|
|
|
.output()?;
|
|
|
|
|
info!(" Created email placeholder");
|
|
|
|
|
} else {
|
|
|
|
|
info!(" Email credentials already exist - preserving");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Encryption key - only create if not existing (CRITICAL - never overwrite!)
|
|
|
|
|
if !secret_exists("secret/gbo/encryption") {
|
|
|
|
|
let encryption_key = self.generate_secure_password(32);
|
|
|
|
|
let _ = std::process::Command::new("sh")
|
|
|
|
|
.arg("-c")
|
|
|
|
|
.arg(format!(
|
2025-12-14 15:58:54 -03:00
|
|
|
"unset VAULT_CLIENT_CERT VAULT_CLIENT_KEY VAULT_CACERT; VAULT_ADDR={} VAULT_TOKEN={} {} kv put secret/gbo/encryption master_key='{}'",
|
|
|
|
|
vault_addr, root_token, vault_bin, encryption_key
|
2025-12-09 09:04:56 -03:00
|
|
|
))
|
|
|
|
|
.output()?;
|
|
|
|
|
info!(" Generated and stored encryption key");
|
|
|
|
|
} else {
|
|
|
|
|
info!(" Encryption key already exists - preserving (CRITICAL)");
|
|
|
|
|
}
|
2025-12-07 02:13:28 -03:00
|
|
|
|
2025-12-09 07:55:11 -03:00
|
|
|
info!("Vault setup complete!");
|
2025-12-07 02:13:28 -03:00
|
|
|
info!(" Vault UI: {}/ui", vault_addr);
|
|
|
|
|
info!(" Root token saved to: {}", vault_init_path.display());
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-11-22 22:55:35 -03:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Setup Email (Stalwart) with Directory integration
|
2025-11-26 22:54:22 -03:00
|
|
|
pub async fn setup_email(&self) -> Result<()> {
|
2025-11-22 22:55:35 -03:00
|
|
|
let config_path = PathBuf::from("./config/email_config.json");
|
|
|
|
|
let directory_config_path = PathBuf::from("./config/directory_config.json");
|
|
|
|
|
|
2025-11-29 17:27:13 -03:00
|
|
|
let mut setup = EmailSetup::new(
|
|
|
|
|
crate::core::urls::InternalUrls::DIRECTORY_BASE.to_string(),
|
|
|
|
|
config_path,
|
|
|
|
|
);
|
2025-11-22 22:55:35 -03:00
|
|
|
|
|
|
|
|
// Try to integrate with Directory if it exists
|
|
|
|
|
let directory_config = if directory_config_path.exists() {
|
|
|
|
|
Some(directory_config_path)
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let config = setup.initialize(directory_config).await?;
|
|
|
|
|
|
2025-11-29 17:27:13 -03:00
|
|
|
info!("Email server initialized successfully!");
|
2025-11-22 22:55:35 -03:00
|
|
|
info!(" SMTP: {}:{}", config.smtp_host, config.smtp_port);
|
|
|
|
|
info!(" IMAP: {}:{}", config.imap_host, config.imap_port);
|
|
|
|
|
info!(" Admin: {} / {}", config.admin_user, config.admin_pass);
|
|
|
|
|
if config.directory_integration {
|
2025-12-09 07:55:11 -03:00
|
|
|
info!(" Integrated with Directory for authentication");
|
2025-11-22 22:55:35 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
async fn get_drive_client(config: &AppConfig) -> Client {
|
2025-11-27 15:19:17 -03:00
|
|
|
let endpoint = if config.drive.server.ends_with('/') {
|
2025-11-22 22:55:35 -03:00
|
|
|
config.drive.server.clone()
|
2025-11-27 15:19:17 -03:00
|
|
|
} else {
|
|
|
|
|
format!("{}/", config.drive.server)
|
2025-11-22 22:55:35 -03:00
|
|
|
};
|
2025-12-08 23:35:33 -03:00
|
|
|
|
|
|
|
|
// Get credentials from config, or fetch from Vault if empty
|
|
|
|
|
let (access_key, secret_key) =
|
|
|
|
|
if config.drive.access_key.is_empty() || config.drive.secret_key.is_empty() {
|
|
|
|
|
// Try to get from Vault using the global SecretsManager
|
|
|
|
|
match crate::shared::utils::get_secrets_manager().await {
|
|
|
|
|
Some(manager) if manager.is_enabled() => {
|
|
|
|
|
match manager.get_drive_credentials().await {
|
|
|
|
|
Ok((ak, sk)) => (ak, sk),
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("Failed to get drive credentials from Vault: {}", e);
|
|
|
|
|
(
|
|
|
|
|
config.drive.access_key.clone(),
|
|
|
|
|
config.drive.secret_key.clone(),
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
_ => (
|
|
|
|
|
config.drive.access_key.clone(),
|
|
|
|
|
config.drive.secret_key.clone(),
|
|
|
|
|
),
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
(
|
|
|
|
|
config.drive.access_key.clone(),
|
|
|
|
|
config.drive.secret_key.clone(),
|
|
|
|
|
)
|
|
|
|
|
};
|
|
|
|
|
|
2025-11-22 22:55:35 -03:00
|
|
|
let base_config = aws_config::defaults(BehaviorVersion::latest())
|
|
|
|
|
.endpoint_url(endpoint)
|
|
|
|
|
.region("auto")
|
|
|
|
|
.credentials_provider(aws_sdk_s3::config::Credentials::new(
|
2025-12-08 23:35:33 -03:00
|
|
|
access_key, secret_key, None, None, "static",
|
2025-11-22 22:55:35 -03:00
|
|
|
))
|
|
|
|
|
.load()
|
|
|
|
|
.await;
|
|
|
|
|
let s3_config = aws_sdk_s3::config::Builder::from(&base_config)
|
|
|
|
|
.force_path_style(true)
|
|
|
|
|
.build();
|
|
|
|
|
aws_sdk_s3::Client::from_conf(s3_config)
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
/// Sync bot configurations from template config.csv files to database
|
|
|
|
|
/// This is separate from drive upload and does not require S3 connection
|
|
|
|
|
pub fn sync_templates_to_database(&self) -> Result<()> {
|
2025-11-22 22:55:35 -03:00
|
|
|
let mut conn = establish_pg_connection()?;
|
|
|
|
|
self.create_bots_from_templates(&mut conn)?;
|
2025-12-08 00:19:29 -03:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub async fn upload_templates_to_drive(&self, _config: &AppConfig) -> Result<()> {
|
2025-12-14 15:58:54 -03:00
|
|
|
// Check multiple possible template locations
|
|
|
|
|
let possible_paths = [
|
|
|
|
|
"../bottemplates", // Development: sibling directory
|
|
|
|
|
"bottemplates", // In current directory
|
|
|
|
|
"botserver-templates", // Installation bundle subdirectory
|
|
|
|
|
"templates", // Legacy path
|
|
|
|
|
];
|
|
|
|
|
|
|
|
|
|
let templates_dir = possible_paths.iter().map(Path::new).find(|p| p.exists());
|
|
|
|
|
|
|
|
|
|
let templates_dir = match templates_dir {
|
|
|
|
|
Some(dir) => {
|
|
|
|
|
info!("Using templates from: {:?}", dir);
|
|
|
|
|
dir
|
|
|
|
|
}
|
|
|
|
|
None => {
|
|
|
|
|
info!("No templates directory found, skipping template upload");
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
};
|
2025-11-22 22:55:35 -03:00
|
|
|
let client = Self::get_drive_client(_config).await;
|
|
|
|
|
let mut read_dir = tokio::fs::read_dir(templates_dir).await?;
|
|
|
|
|
while let Some(entry) = read_dir.next_entry().await? {
|
|
|
|
|
let path = entry.path();
|
|
|
|
|
if path.is_dir()
|
|
|
|
|
&& path
|
|
|
|
|
.file_name()
|
|
|
|
|
.unwrap()
|
|
|
|
|
.to_string_lossy()
|
|
|
|
|
.ends_with(".gbai")
|
|
|
|
|
{
|
|
|
|
|
let bot_name = path.file_name().unwrap().to_string_lossy().to_string();
|
|
|
|
|
let bucket = bot_name.trim_start_matches('/').to_string();
|
|
|
|
|
if client.head_bucket().bucket(&bucket).send().await.is_err() {
|
|
|
|
|
match client.create_bucket().bucket(&bucket).send().await {
|
|
|
|
|
Ok(_) => {
|
|
|
|
|
self.upload_directory_recursive(&client, &path, &bucket, "/")
|
|
|
|
|
.await?;
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
error!("Failed to create bucket {}: {:?}", bucket, e);
|
|
|
|
|
return Err(anyhow::anyhow!("Failed to create bucket {}: {}. Check S3 credentials and endpoint configuration", bucket, e));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
fn create_bots_from_templates(&self, conn: &mut diesel::PgConnection) -> Result<()> {
|
|
|
|
|
use crate::shared::models::schema::bots;
|
|
|
|
|
use diesel::prelude::*;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-14 15:58:54 -03:00
|
|
|
// Check multiple possible template locations
|
|
|
|
|
let possible_paths = [
|
|
|
|
|
"../bottemplates", // Development: sibling directory
|
|
|
|
|
"bottemplates", // In current directory
|
|
|
|
|
"botserver-templates", // Installation bundle subdirectory
|
|
|
|
|
"templates", // Legacy path
|
|
|
|
|
];
|
|
|
|
|
|
|
|
|
|
let templates_dir = possible_paths
|
|
|
|
|
.iter()
|
|
|
|
|
.map(|p| PathBuf::from(p))
|
|
|
|
|
.find(|p| p.exists());
|
|
|
|
|
|
|
|
|
|
let templates_dir = match templates_dir {
|
|
|
|
|
Some(dir) => {
|
|
|
|
|
info!("Loading templates from: {:?}", dir);
|
|
|
|
|
dir
|
|
|
|
|
}
|
|
|
|
|
None => {
|
|
|
|
|
warn!(
|
|
|
|
|
"Templates directory does not exist (checked: {:?})",
|
|
|
|
|
possible_paths
|
|
|
|
|
);
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
};
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
// Get the default bot (created by migrations) - we'll sync all template configs to it
|
|
|
|
|
let default_bot: Option<(uuid::Uuid, String)> = bots::table
|
|
|
|
|
.filter(bots::is_active.eq(true))
|
|
|
|
|
.select((bots::id, bots::name))
|
|
|
|
|
.first(conn)
|
|
|
|
|
.optional()?;
|
|
|
|
|
|
|
|
|
|
let (default_bot_id, default_bot_name) = match default_bot {
|
|
|
|
|
Some((id, name)) => (id, name),
|
|
|
|
|
None => {
|
|
|
|
|
error!("No active bot found in database - cannot sync template configs");
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
info!(
|
|
|
|
|
"Syncing template configs to bot '{}' ({})",
|
|
|
|
|
default_bot_name, default_bot_id
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Only sync the default.gbai template config (main config for the system)
|
|
|
|
|
let default_template = templates_dir.join("default.gbai");
|
2025-12-14 15:58:54 -03:00
|
|
|
info!("Looking for default template at: {:?}", default_template);
|
2025-12-08 00:19:29 -03:00
|
|
|
if default_template.exists() {
|
|
|
|
|
let config_path = default_template.join("default.gbot").join("config.csv");
|
|
|
|
|
|
|
|
|
|
if config_path.exists() {
|
|
|
|
|
match std::fs::read_to_string(&config_path) {
|
|
|
|
|
Ok(csv_content) => {
|
2025-12-12 12:33:17 -03:00
|
|
|
debug!("Syncing config.csv from {:?}", config_path);
|
2025-12-08 00:19:29 -03:00
|
|
|
if let Err(e) =
|
|
|
|
|
self.sync_config_csv_to_db(conn, &default_bot_id, &csv_content)
|
|
|
|
|
{
|
|
|
|
|
error!("Failed to sync config.csv: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
warn!("Could not read config.csv: {}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
2025-12-12 12:33:17 -03:00
|
|
|
debug!("No config.csv found at {:?}", config_path);
|
2025-12-08 00:19:29 -03:00
|
|
|
}
|
|
|
|
|
} else {
|
2025-12-12 12:33:17 -03:00
|
|
|
debug!("default.gbai template not found");
|
2025-12-08 00:19:29 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Sync config.csv content to the bot_configuration table
|
|
|
|
|
/// This is critical for loading LLM settings on fresh starts
|
|
|
|
|
fn sync_config_csv_to_db(
|
|
|
|
|
&self,
|
|
|
|
|
conn: &mut diesel::PgConnection,
|
|
|
|
|
bot_id: &uuid::Uuid,
|
|
|
|
|
content: &str,
|
|
|
|
|
) -> Result<()> {
|
|
|
|
|
let mut synced = 0;
|
|
|
|
|
let mut skipped = 0;
|
|
|
|
|
let lines: Vec<&str> = content.lines().collect();
|
|
|
|
|
|
|
|
|
|
debug!(
|
|
|
|
|
"Parsing config.csv with {} lines for bot {}",
|
|
|
|
|
lines.len(),
|
|
|
|
|
bot_id
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
for (line_num, line) in lines.iter().enumerate().skip(1) {
|
|
|
|
|
// Skip header line (name,value)
|
|
|
|
|
let line = line.trim();
|
|
|
|
|
if line.is_empty() || line.starts_with('#') {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let parts: Vec<&str> = line.splitn(2, ',').collect();
|
|
|
|
|
if parts.len() >= 2 {
|
|
|
|
|
let key = parts[0].trim();
|
|
|
|
|
let value = parts[1].trim();
|
|
|
|
|
|
|
|
|
|
if key.is_empty() {
|
|
|
|
|
skipped += 1;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Use UUID type since migration 6.1.1 converted column to UUID
|
|
|
|
|
let new_id = uuid::Uuid::new_v4();
|
|
|
|
|
|
|
|
|
|
match diesel::sql_query(
|
|
|
|
|
"INSERT INTO bot_configuration (id, bot_id, config_key, config_value, config_type, created_at, updated_at) \
|
|
|
|
|
VALUES ($1, $2, $3, $4, 'string', NOW(), NOW()) \
|
|
|
|
|
ON CONFLICT (bot_id, config_key) DO UPDATE SET config_value = EXCLUDED.config_value, updated_at = NOW()"
|
|
|
|
|
)
|
|
|
|
|
.bind::<diesel::sql_types::Uuid, _>(new_id)
|
|
|
|
|
.bind::<diesel::sql_types::Uuid, _>(bot_id)
|
|
|
|
|
.bind::<diesel::sql_types::Text, _>(key)
|
|
|
|
|
.bind::<diesel::sql_types::Text, _>(value)
|
|
|
|
|
.execute(conn) {
|
|
|
|
|
Ok(_) => {
|
|
|
|
|
synced += 1;
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
error!("Failed to sync config key '{}' at line {}: {}", key, line_num + 1, e);
|
|
|
|
|
// Continue with other keys instead of failing completely
|
|
|
|
|
}
|
2025-11-22 22:55:35 -03:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
if synced > 0 {
|
|
|
|
|
info!(
|
2025-12-09 07:55:11 -03:00
|
|
|
"Synced {} config values for bot {} (skipped {} empty lines)",
|
2025-12-08 00:19:29 -03:00
|
|
|
synced, bot_id, skipped
|
|
|
|
|
);
|
|
|
|
|
} else {
|
|
|
|
|
warn!(
|
|
|
|
|
"No config values synced for bot {} - check config.csv format",
|
|
|
|
|
bot_id
|
|
|
|
|
);
|
|
|
|
|
}
|
2025-11-22 22:55:35 -03:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
fn upload_directory_recursive<'a>(
|
|
|
|
|
&'a self,
|
|
|
|
|
client: &'a Client,
|
|
|
|
|
local_path: &'a Path,
|
|
|
|
|
bucket: &'a str,
|
|
|
|
|
prefix: &'a str,
|
|
|
|
|
) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<()>> + 'a>> {
|
|
|
|
|
Box::pin(async move {
|
2025-11-27 15:19:17 -03:00
|
|
|
let _normalized_path = if local_path.to_string_lossy().ends_with('/') {
|
2025-11-22 22:55:35 -03:00
|
|
|
local_path.to_string_lossy().to_string()
|
2025-11-27 15:19:17 -03:00
|
|
|
} else {
|
|
|
|
|
format!("{}/", local_path.display())
|
2025-11-22 22:55:35 -03:00
|
|
|
};
|
|
|
|
|
let mut read_dir = tokio::fs::read_dir(local_path).await?;
|
|
|
|
|
while let Some(entry) = read_dir.next_entry().await? {
|
|
|
|
|
let path = entry.path();
|
|
|
|
|
let file_name = path.file_name().unwrap().to_string_lossy().to_string();
|
|
|
|
|
let mut key = prefix.trim_matches('/').to_string();
|
|
|
|
|
if !key.is_empty() {
|
|
|
|
|
key.push('/');
|
|
|
|
|
}
|
|
|
|
|
key.push_str(&file_name);
|
|
|
|
|
if path.is_file() {
|
|
|
|
|
let content = tokio::fs::read(&path).await?;
|
|
|
|
|
client
|
|
|
|
|
.put_object()
|
|
|
|
|
.bucket(bucket)
|
|
|
|
|
.key(&key)
|
|
|
|
|
.body(content.into())
|
|
|
|
|
.send()
|
|
|
|
|
.await?;
|
|
|
|
|
} else if path.is_dir() {
|
|
|
|
|
self.upload_directory_recursive(client, &path, bucket, &key)
|
|
|
|
|
.await?;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
pub fn apply_migrations(&self, conn: &mut diesel::PgConnection) -> Result<()> {
|
|
|
|
|
use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness};
|
|
|
|
|
|
|
|
|
|
const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations");
|
|
|
|
|
|
2025-12-09 09:04:56 -03:00
|
|
|
// Run migrations silently - don't output to console
|
|
|
|
|
if let Err(e) = conn.run_pending_migrations(MIGRATIONS) {
|
2025-11-22 22:55:35 -03:00
|
|
|
error!("Failed to apply migrations: {}", e);
|
|
|
|
|
return Err(anyhow::anyhow!("Migration error: {}", e));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
/// Create Vault configuration with mTLS settings
|
|
|
|
|
async fn create_vault_config(&self) -> Result<()> {
|
2025-12-14 15:58:54 -03:00
|
|
|
let vault_conf_dir = self.stack_dir("conf/vault");
|
2025-12-07 02:13:28 -03:00
|
|
|
let config_path = vault_conf_dir.join("config.hcl");
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
fs::create_dir_all(&vault_conf_dir)?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
// Vault is started from botserver-stack/bin/vault/, so paths must be relative to that
|
|
|
|
|
// From bin/vault/ to conf/ is ../../conf/
|
|
|
|
|
// From bin/vault/ to data/ is ../../data/
|
|
|
|
|
let config = r#"# Vault Configuration
|
2025-12-07 02:13:28 -03:00
|
|
|
# Generated by BotServer bootstrap
|
2025-12-08 00:19:29 -03:00
|
|
|
# Note: Paths are relative to botserver-stack/bin/vault/ (Vault's working directory)
|
2025-12-07 02:13:28 -03:00
|
|
|
|
|
|
|
|
# Storage backend - file-based for single instance
|
|
|
|
|
storage "file" {
|
2025-12-08 00:19:29 -03:00
|
|
|
path = "../../data/vault"
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
|
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
# Listener with TLS DISABLED for local development
|
|
|
|
|
# In production, enable TLS with proper certificates
|
2025-12-07 02:13:28 -03:00
|
|
|
listener "tcp" {
|
2025-12-08 00:19:29 -03:00
|
|
|
address = "0.0.0.0:8200"
|
|
|
|
|
tls_disable = true
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
|
|
|
|
|
2025-12-08 00:19:29 -03:00
|
|
|
# API settings - use HTTP for local dev
|
|
|
|
|
api_addr = "http://localhost:8200"
|
|
|
|
|
cluster_addr = "http://localhost:8201"
|
2025-12-07 02:13:28 -03:00
|
|
|
|
|
|
|
|
# UI enabled for administration
|
|
|
|
|
ui = true
|
|
|
|
|
|
|
|
|
|
# Disable memory locking (for development - enable in production)
|
|
|
|
|
disable_mlock = true
|
|
|
|
|
|
|
|
|
|
# Telemetry
|
|
|
|
|
telemetry {
|
|
|
|
|
disable_hostname = true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Log level
|
|
|
|
|
log_level = "info"
|
|
|
|
|
"#;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
fs::write(&config_path, config)?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Create data directory for Vault storage
|
2025-12-14 15:58:54 -03:00
|
|
|
fs::create_dir_all(self.stack_dir("data/vault"))?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
info!(
|
|
|
|
|
"Created Vault config with mTLS at {}",
|
|
|
|
|
config_path.display()
|
|
|
|
|
);
|
2025-12-07 02:13:28 -03:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-29 16:29:28 -03:00
|
|
|
/// Generate TLS certificates for all services
|
|
|
|
|
async fn generate_certificates(&self) -> Result<()> {
|
2025-12-14 15:58:54 -03:00
|
|
|
let cert_dir = self.stack_dir("conf/system/certificates");
|
2025-11-29 16:29:28 -03:00
|
|
|
|
|
|
|
|
// Create certificate directory structure
|
|
|
|
|
fs::create_dir_all(&cert_dir)?;
|
|
|
|
|
fs::create_dir_all(cert_dir.join("ca"))?;
|
|
|
|
|
|
|
|
|
|
// Check if CA already exists
|
|
|
|
|
let ca_cert_path = cert_dir.join("ca/ca.crt");
|
|
|
|
|
let ca_key_path = cert_dir.join("ca/ca.key");
|
|
|
|
|
|
2025-12-03 16:05:30 -03:00
|
|
|
// CA params for issuer creation
|
|
|
|
|
let mut ca_params = CertificateParams::default();
|
|
|
|
|
ca_params.is_ca = IsCa::Ca(BasicConstraints::Unconstrained);
|
2025-11-30 23:48:08 -03:00
|
|
|
|
2025-12-03 16:05:30 -03:00
|
|
|
let mut dn = DistinguishedName::new();
|
|
|
|
|
dn.push(DnType::CountryName, "BR");
|
|
|
|
|
dn.push(DnType::OrganizationName, "BotServer");
|
|
|
|
|
dn.push(DnType::CommonName, "BotServer CA");
|
|
|
|
|
ca_params.distinguished_name = dn;
|
2025-11-30 23:48:08 -03:00
|
|
|
|
2025-12-03 16:05:30 -03:00
|
|
|
ca_params.not_before = time::OffsetDateTime::now_utc();
|
|
|
|
|
ca_params.not_after = time::OffsetDateTime::now_utc() + time::Duration::days(3650);
|
2025-11-30 23:48:08 -03:00
|
|
|
|
2025-12-03 16:05:30 -03:00
|
|
|
let ca_key_pair: KeyPair = if ca_cert_path.exists() && ca_key_path.exists() {
|
|
|
|
|
info!("Using existing CA certificate");
|
|
|
|
|
// Load existing CA key
|
|
|
|
|
let key_pem = fs::read_to_string(&ca_key_path)?;
|
|
|
|
|
KeyPair::from_pem(&key_pem)?
|
2025-11-29 16:29:28 -03:00
|
|
|
} else {
|
|
|
|
|
info!("Generating new CA certificate");
|
2025-12-03 16:05:30 -03:00
|
|
|
let key_pair = KeyPair::generate()?;
|
|
|
|
|
let cert = ca_params.self_signed(&key_pair)?;
|
2025-11-29 16:29:28 -03:00
|
|
|
|
|
|
|
|
// Save CA certificate and key
|
2025-12-03 16:05:30 -03:00
|
|
|
fs::write(&ca_cert_path, cert.pem())?;
|
|
|
|
|
fs::write(&ca_key_path, key_pair.serialize_pem())?;
|
2025-11-29 16:29:28 -03:00
|
|
|
|
2025-12-03 16:05:30 -03:00
|
|
|
key_pair
|
2025-11-29 16:29:28 -03:00
|
|
|
};
|
|
|
|
|
|
2025-12-03 16:05:30 -03:00
|
|
|
// Create issuer from CA params and key
|
|
|
|
|
let ca_issuer = Issuer::from_params(&ca_params, &ca_key_pair);
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Generate client certificate for botserver (for mTLS to all services)
|
|
|
|
|
let botserver_dir = cert_dir.join("botserver");
|
|
|
|
|
fs::create_dir_all(&botserver_dir)?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
let client_cert_path = botserver_dir.join("client.crt");
|
|
|
|
|
let client_key_path = botserver_dir.join("client.key");
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
if !client_cert_path.exists() || !client_key_path.exists() {
|
|
|
|
|
info!("Generating mTLS client certificate for botserver");
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
let mut client_params = CertificateParams::default();
|
|
|
|
|
client_params.not_before = time::OffsetDateTime::now_utc();
|
|
|
|
|
client_params.not_after = time::OffsetDateTime::now_utc() + time::Duration::days(365);
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
let mut client_dn = DistinguishedName::new();
|
|
|
|
|
client_dn.push(DnType::CountryName, "BR");
|
|
|
|
|
client_dn.push(DnType::OrganizationName, "BotServer");
|
|
|
|
|
client_dn.push(DnType::CommonName, "botserver-client");
|
|
|
|
|
client_params.distinguished_name = client_dn;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
// Add client auth extended key usage
|
2025-12-08 00:19:29 -03:00
|
|
|
client_params
|
|
|
|
|
.subject_alt_names
|
|
|
|
|
.push(rcgen::SanType::DnsName("botserver".to_string().try_into()?));
|
|
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
let client_key = KeyPair::generate()?;
|
|
|
|
|
let client_cert = client_params.signed_by(&client_key, &ca_issuer)?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
2025-12-07 02:13:28 -03:00
|
|
|
fs::write(&client_cert_path, client_cert.pem())?;
|
|
|
|
|
fs::write(&client_key_path, client_key.serialize_pem())?;
|
|
|
|
|
fs::copy(&ca_cert_path, botserver_dir.join("ca.crt"))?;
|
2025-12-08 00:19:29 -03:00
|
|
|
|
|
|
|
|
info!(
|
|
|
|
|
"Generated mTLS client certificate at {}",
|
|
|
|
|
client_cert_path.display()
|
|
|
|
|
);
|
2025-12-07 02:13:28 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Services that need certificates - Vault FIRST
|
2025-12-07 10:42:02 -03:00
|
|
|
// Using component names: tables (postgres), drive (minio), cache (redis), vectordb (qdrant)
|
2025-11-29 16:29:28 -03:00
|
|
|
let services = vec![
|
2025-12-08 00:19:29 -03:00
|
|
|
(
|
|
|
|
|
"vault",
|
|
|
|
|
vec!["localhost", "127.0.0.1", "vault.botserver.local"],
|
|
|
|
|
),
|
2025-11-29 16:29:28 -03:00
|
|
|
("api", vec!["localhost", "127.0.0.1", "api.botserver.local"]),
|
|
|
|
|
("llm", vec!["localhost", "127.0.0.1", "llm.botserver.local"]),
|
|
|
|
|
(
|
|
|
|
|
"embedding",
|
|
|
|
|
vec!["localhost", "127.0.0.1", "embedding.botserver.local"],
|
|
|
|
|
),
|
|
|
|
|
(
|
2025-12-07 10:42:02 -03:00
|
|
|
"vectordb",
|
|
|
|
|
vec!["localhost", "127.0.0.1", "vectordb.botserver.local"],
|
2025-11-29 16:29:28 -03:00
|
|
|
),
|
|
|
|
|
(
|
2025-12-07 10:42:02 -03:00
|
|
|
"tables",
|
|
|
|
|
vec!["localhost", "127.0.0.1", "tables.botserver.local"],
|
2025-11-29 16:29:28 -03:00
|
|
|
),
|
|
|
|
|
(
|
2025-12-07 10:42:02 -03:00
|
|
|
"cache",
|
|
|
|
|
vec!["localhost", "127.0.0.1", "cache.botserver.local"],
|
2025-11-29 16:29:28 -03:00
|
|
|
),
|
|
|
|
|
(
|
2025-12-07 10:42:02 -03:00
|
|
|
"drive",
|
|
|
|
|
vec!["localhost", "127.0.0.1", "drive.botserver.local"],
|
2025-11-29 16:29:28 -03:00
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
"directory",
|
|
|
|
|
vec![
|
|
|
|
|
"localhost",
|
|
|
|
|
"127.0.0.1",
|
|
|
|
|
"directory.botserver.local",
|
|
|
|
|
"auth.botserver.local",
|
|
|
|
|
],
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
"email",
|
|
|
|
|
vec![
|
|
|
|
|
"localhost",
|
|
|
|
|
"127.0.0.1",
|
2025-12-07 10:42:02 -03:00
|
|
|
"email.botserver.local",
|
2025-11-29 16:29:28 -03:00
|
|
|
"smtp.botserver.local",
|
|
|
|
|
"imap.botserver.local",
|
|
|
|
|
],
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
"meet",
|
|
|
|
|
vec![
|
|
|
|
|
"localhost",
|
|
|
|
|
"127.0.0.1",
|
|
|
|
|
"meet.botserver.local",
|
|
|
|
|
"turn.botserver.local",
|
|
|
|
|
],
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
"caddy",
|
|
|
|
|
vec![
|
|
|
|
|
"localhost",
|
|
|
|
|
"127.0.0.1",
|
|
|
|
|
"*.botserver.local",
|
|
|
|
|
"botserver.local",
|
|
|
|
|
],
|
|
|
|
|
),
|
|
|
|
|
];
|
|
|
|
|
|
|
|
|
|
for (service, sans) in services {
|
|
|
|
|
let service_dir = cert_dir.join(service);
|
|
|
|
|
fs::create_dir_all(&service_dir)?;
|
|
|
|
|
|
|
|
|
|
let cert_path = service_dir.join("server.crt");
|
|
|
|
|
let key_path = service_dir.join("server.key");
|
|
|
|
|
|
|
|
|
|
// Skip if certificate already exists
|
|
|
|
|
if cert_path.exists() && key_path.exists() {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info!("Generating certificate for {}", service);
|
|
|
|
|
|
|
|
|
|
// Generate service certificate
|
|
|
|
|
let mut params = CertificateParams::default();
|
|
|
|
|
params.not_before = time::OffsetDateTime::now_utc();
|
|
|
|
|
params.not_after = time::OffsetDateTime::now_utc() + time::Duration::days(365);
|
|
|
|
|
|
|
|
|
|
let mut dn = DistinguishedName::new();
|
|
|
|
|
dn.push(DnType::CountryName, "BR");
|
|
|
|
|
dn.push(DnType::OrganizationName, "BotServer");
|
|
|
|
|
dn.push(DnType::CommonName, &format!("{}.botserver.local", service));
|
|
|
|
|
params.distinguished_name = dn;
|
|
|
|
|
|
|
|
|
|
// Add SANs
|
|
|
|
|
for san in sans {
|
|
|
|
|
params
|
|
|
|
|
.subject_alt_names
|
2025-12-03 16:05:30 -03:00
|
|
|
.push(rcgen::SanType::DnsName(san.to_string().try_into()?));
|
2025-11-29 16:29:28 -03:00
|
|
|
}
|
|
|
|
|
|
2025-12-03 16:05:30 -03:00
|
|
|
let key_pair = KeyPair::generate()?;
|
|
|
|
|
let cert = params.signed_by(&key_pair, &ca_issuer)?;
|
2025-11-29 16:29:28 -03:00
|
|
|
|
|
|
|
|
// Save certificate and key
|
2025-12-03 16:05:30 -03:00
|
|
|
fs::write(cert_path, cert.pem())?;
|
|
|
|
|
fs::write(key_path, key_pair.serialize_pem())?;
|
2025-11-29 16:29:28 -03:00
|
|
|
|
|
|
|
|
// Copy CA cert to service directory for easy access
|
|
|
|
|
fs::copy(&ca_cert_path, service_dir.join("ca.crt"))?;
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-29 17:27:13 -03:00
|
|
|
info!("TLS certificates generated successfully");
|
2025-11-29 16:29:28 -03:00
|
|
|
Ok(())
|
|
|
|
|
}
|
2025-11-22 22:55:35 -03:00
|
|
|
}
|