Fix migration errors and reorganize migration files

- Fixed 'relation session_kb_associations does not exist' error in core consolidated migration.
- Renamed migration directories from timestamp-based to version-based (6.0.x, 6.1.x, 6.2.x).
- Reorganized migrations into dedicated feature folders (products, dashboards, learn, video).
- Updated migration execution order in core/shared/utils.rs.
- Moves legacy migrations to 6.0.x/6.1.x and workflow to 6.2.0.
This commit is contained in:
Rodrigo Rodriguez (Pragmatismo) 2026-01-27 13:45:54 -03:00
parent 3c279f43e5
commit b103c07248
75 changed files with 633 additions and 60 deletions

View file

@ -713,12 +713,12 @@ CREATE INDEX idx_user_login_tokens_expires ON public.user_login_tokens USING btr
-- Session KB Associations moved to migrations/research
-- Comments
COMMENT ON TABLE session_kb_associations IS 'Tracks which Knowledge Base collections are active in each conversation session';
COMMENT ON COLUMN session_kb_associations.kb_name IS 'Name of the KB folder (e.g., "circular", "comunicado", "geral")';
COMMENT ON COLUMN session_kb_associations.kb_folder_path IS 'Full path to KB folder: work/{bot}/{bot}.gbkb/{kb_name}';
COMMENT ON COLUMN session_kb_associations.qdrant_collection IS 'Qdrant collection name for this KB';
COMMENT ON COLUMN session_kb_associations.added_by_tool IS 'Name of the .bas tool that added this KB (e.g., "change-subject.bas")';
COMMENT ON COLUMN session_kb_associations.is_active IS 'Whether this KB is currently active in the session';
-- COMMENT ON TABLE session_kb_associations IS 'Tracks which Knowledge Base collections are active in each conversation session';
-- COMMENT ON COLUMN session_kb_associations.kb_name IS 'Name of the KB folder (e.g., "circular", "comunicado", "geral")';
-- COMMENT ON COLUMN session_kb_associations.kb_folder_path IS 'Full path to KB folder: work/{bot}/{bot}.gbkb/{kb_name}';
-- COMMENT ON COLUMN session_kb_associations.qdrant_collection IS 'Qdrant collection name for this KB';
-- COMMENT ON COLUMN session_kb_associations.added_by_tool IS 'Name of the .bas tool that added this KB (e.g., "change-subject.bas")';
-- COMMENT ON COLUMN session_kb_associations.is_active IS 'Whether this KB is currently active in the session';
-- Add organization relationship to bots
ALTER TABLE public.bots
ADD COLUMN IF NOT EXISTS org_id UUID,

View file

@ -6,14 +6,16 @@ declare -A container_limits=(
["*tables*"]="4096MB:100ms/100ms"
["*postgre*"]="4096MB:100ms/100ms" # PostgreSQL alternative
["*dns*"]="2048MB:100ms/100ms"
["*table-editor*"]="2048MB:25s/100ms"
["*oppbot*"]="4048MB:100ms/100ms"
["*table-editor*"]="2048MB:25ms/100ms"
["*proxy*"]="2048MB:100ms/100ms"
["*directory*"]="1024MB:50ms/100ms"
["*drive*"]="4096MB:100ms/100ms"
["*minio*"]="4096MB:100ms/100ms" # MinIO alternative
["*email*"]="4096MB:100ms/100ms"
["*webmail*"]="2096MB:100ms/100ms"
["*bot*"]="2048MB:5ms/100ms"
["*bot*"]="2048MB:25ms/100ms"
["*oppbot*"]="2048MB:50ms/100ms"
["*meeting*"]="4096MB:100ms/100ms"
["*alm*"]="512MB:50ms/100ms"
["*vault*"]="512MB:50ms/100ms"

View file

@ -1,5 +1,5 @@
lxc config device override $CONTAINER_NAME root
lxc config device set $CONTAINER_NAME root size 6GB
lxc config device set $CONTAINER_NAME root size 12GB
zpool set autoexpand=on default
zpool online -e default /var/snap/lxd/common/lxd/disks/default.img

374
src/core/incus/cloud.rs Normal file
View file

@ -0,0 +1,374 @@
use anyhow::Result;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::process::Command;
use tokio::process::Command as AsyncCommand;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct IncusCloudConfig {
pub cluster_name: String,
pub nodes: Vec<IncusNode>,
pub storage_pools: Vec<StoragePool>,
pub networks: Vec<NetworkConfig>,
pub profiles: Vec<ProfileConfig>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct IncusNode {
pub name: String,
pub address: String,
pub role: NodeRole,
pub resources: NodeResources,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum NodeRole {
Controller,
Worker,
Storage,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeResources {
pub cpu_cores: u32,
pub memory_gb: u32,
pub storage_gb: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StoragePool {
pub name: String,
pub driver: String,
pub config: HashMap<String, String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NetworkConfig {
pub name: String,
pub type_: String,
pub config: HashMap<String, String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProfileConfig {
pub name: String,
pub devices: HashMap<String, HashMap<String, String>>,
pub config: HashMap<String, String>,
}
pub struct IncusCloudManager {
config: IncusCloudConfig,
}
impl IncusCloudManager {
pub fn new(config: IncusCloudConfig) -> Self {
Self { config }
}
pub async fn bootstrap_cluster(&self) -> Result<()> {
self.init_first_node().await?;
self.setup_storage_pools().await?;
self.setup_networks().await?;
self.setup_profiles().await?;
self.join_additional_nodes().await?;
Ok(())
}
async fn init_first_node(&self) -> Result<()> {
let first_node = &self.config.nodes[0];
let output = AsyncCommand::new("incus")
.args(&["admin", "init", "--auto"])
.output()
.await?;
if !output.status.success() {
return Err(anyhow::anyhow!("Failed to initialize Incus: {}",
String::from_utf8_lossy(&output.stderr)));
}
AsyncCommand::new("incus")
.args(&["config", "set", "cluster.https_address", &first_node.address])
.output()
.await?;
AsyncCommand::new("incus")
.args(&["config", "set", "core.https_address", &first_node.address])
.output()
.await?;
Ok(())
}
async fn setup_storage_pools(&self) -> Result<()> {
for pool in &self.config.storage_pools {
let mut args = vec!["storage", "create", &pool.name, &pool.driver];
for (key, value) in &pool.config {
args.push(key);
args.push(value);
}
AsyncCommand::new("incus")
.args(&args)
.output()
.await?;
}
Ok(())
}
async fn setup_networks(&self) -> Result<()> {
for network in &self.config.networks {
let mut args = vec!["network", "create", &network.name, "--type", &network.type_];
for (key, value) in &network.config {
args.push("--config");
args.push(&format!("{}={}", key, value));
}
AsyncCommand::new("incus")
.args(&args)
.output()
.await?;
}
Ok(())
}
async fn setup_profiles(&self) -> Result<()> {
for profile in &self.config.profiles {
AsyncCommand::new("incus")
.args(&["profile", "create", &profile.name])
.output()
.await?;
for (key, value) in &profile.config {
AsyncCommand::new("incus")
.args(&["profile", "set", &profile.name, key, value])
.output()
.await?;
}
for (device_name, device_config) in &profile.devices {
let mut args = vec!["profile", "device", "add", &profile.name, device_name];
for (key, value) in device_config {
args.push(key);
args.push(value);
}
AsyncCommand::new("incus")
.args(&args)
.output()
.await?;
}
}
Ok(())
}
async fn join_additional_nodes(&self) -> Result<()> {
if self.config.nodes.len() <= 1 {
return Ok(());
}
let token_output = AsyncCommand::new("incus")
.args(&["cluster", "add", "new-node"])
.output()
.await?;
let token = String::from_utf8_lossy(&token_output.stdout).trim().to_string();
for node in &self.config.nodes[1..] {
self.join_node_to_cluster(&node.address, &token).await?;
}
Ok(())
}
async fn join_node_to_cluster(&self, node_address: &str, token: &str) -> Result<()> {
AsyncCommand::new("ssh")
.args(&[
node_address,
&format!("incus admin init --join-token {}", token)
])
.output()
.await?;
Ok(())
}
pub async fn deploy_component(&self, component_name: &str, node_name: Option<&str>) -> Result<String> {
let instance_name = format!("gb-{}-{}", component_name, uuid::Uuid::new_v4().to_string()[..8].to_string());
let mut args = vec!["launch", "ubuntu:24.04", &instance_name, "--profile", "gbo"];
if let Some(node) = node_name {
args.extend(&["--target", node]);
}
let output = AsyncCommand::new("incus")
.args(&args)
.output()
.await?;
if !output.status.success() {
return Err(anyhow::anyhow!("Failed to launch instance: {}",
String::from_utf8_lossy(&output.stderr)));
}
AsyncCommand::new("incus")
.args(&["exec", &instance_name, "--", "cloud-init", "status", "--wait"])
.output()
.await?;
self.setup_component_in_instance(&instance_name, component_name).await?;
Ok(instance_name)
}
async fn setup_component_in_instance(&self, instance_name: &str, component_name: &str) -> Result<()> {
let setup_script = format!(r#"
#!/bin/bash
set -e
# Update system
apt-get update -qq
DEBIAN_FRONTEND=noninteractive apt-get install -y -qq wget curl unzip ca-certificates
# Create gbo directories
mkdir -p /opt/gbo/{{bin,data,conf,logs}}
# Create gbo user
useradd --system --no-create-home --shell /bin/false gbuser
chown -R gbuser:gbuser /opt/gbo
# Install component: {}
echo "Component {} setup complete"
"#, component_name, component_name);
AsyncCommand::new("incus")
.args(&["exec", instance_name, "--", "bash", "-c", &setup_script])
.output()
.await?;
Ok(())
}
pub async fn create_vm(&self, vm_name: &str, template: &str) -> Result<String> {
let output = AsyncCommand::new("incus")
.args(&["launch", template, vm_name, "--vm", "--profile", "gbo-vm"])
.output()
.await?;
if !output.status.success() {
return Err(anyhow::anyhow!("Failed to create VM: {}",
String::from_utf8_lossy(&output.stderr)));
}
Ok(vm_name.to_string())
}
pub async fn get_cluster_status(&self) -> Result<serde_json::Value> {
let output = AsyncCommand::new("incus")
.args(&["cluster", "list", "--format", "json"])
.output()
.await?;
let status: serde_json::Value = serde_json::from_slice(&output.stdout)?;
Ok(status)
}
pub async fn get_instances(&self) -> Result<serde_json::Value> {
let output = AsyncCommand::new("incus")
.args(&["list", "--format", "json"])
.output()
.await?;
let instances: serde_json::Value = serde_json::from_slice(&output.stdout)?;
Ok(instances)
}
pub async fn get_metrics(&self) -> Result<serde_json::Value> {
let output = AsyncCommand::new("incus")
.args(&["query", "/1.0/metrics"])
.output()
.await?;
let metrics: serde_json::Value = serde_json::from_slice(&output.stdout)?;
Ok(metrics)
}
}
pub fn create_default_cloud_config() -> IncusCloudConfig {
IncusCloudConfig {
cluster_name: "gbo-cloud".to_string(),
nodes: vec![
IncusNode {
name: "controller-1".to_string(),
address: "10.0.0.10:8443".to_string(),
role: NodeRole::Controller,
resources: NodeResources {
cpu_cores: 8,
memory_gb: 16,
storage_gb: 500,
},
}
],
storage_pools: vec![
StoragePool {
name: "gbo-pool".to_string(),
driver: "zfs".to_string(),
config: HashMap::from([
("size".to_string(), "100GB".to_string()),
]),
}
],
networks: vec![
NetworkConfig {
name: "gbo-net".to_string(),
type_: "bridge".to_string(),
config: HashMap::from([
("ipv4.address".to_string(), "10.10.10.1/24".to_string()),
("ipv4.nat".to_string(), "true".to_string()),
]),
}
],
profiles: vec![
ProfileConfig {
name: "gbo".to_string(),
devices: HashMap::from([
("eth0".to_string(), HashMap::from([
("type".to_string(), "nic".to_string()),
("network".to_string(), "gbo-net".to_string()),
])),
("root".to_string(), HashMap::from([
("type".to_string(), "disk".to_string()),
("pool".to_string(), "gbo-pool".to_string()),
("path".to_string(), "/".to_string()),
])),
]),
config: HashMap::from([
("security.privileged".to_string(), "true".to_string()),
("limits.cpu".to_string(), "2".to_string()),
("limits.memory".to_string(), "4GB".to_string()),
]),
},
ProfileConfig {
name: "gbo-vm".to_string(),
devices: HashMap::from([
("eth0".to_string(), HashMap::from([
("type".to_string(), "nic".to_string()),
("network".to_string(), "gbo-net".to_string()),
])),
("root".to_string(), HashMap::from([
("type".to_string(), "disk".to_string()),
("pool".to_string(), "gbo-pool".to_string()),
("path".to_string(), "/".to_string()),
("size".to_string(), "20GB".to_string()),
])),
]),
config: HashMap::from([
("limits.cpu".to_string(), "4".to_string()),
("limits.memory".to_string(), "8GB".to_string()),
]),
}
],
}
}

View file

@ -12,8 +12,6 @@ use std::path::PathBuf;
#[derive(Deserialize, Debug)]
struct ComponentEntry {
url: String,
filename: String,
sha256: String,
}
#[derive(Deserialize, Debug)]
@ -33,6 +31,7 @@ fn get_component_url(name: &str) -> Option<String> {
.map(|c| c.url.clone())
}
#[cfg(target_os = "windows")]
fn safe_nvcc_version() -> Option<std::process::Output> {
SafeCommand::new("nvcc")
.and_then(|c| c.arg("--version"))

View file

@ -2,12 +2,12 @@ use crate::core::config::DriveConfig;
use crate::core::secrets::SecretsManager;
use anyhow::{Context, Result};
#[cfg(feature = "drive")]
use aws_config::BehaviorVersion;
#[cfg(feature = "drive")]
use aws_config::retry::RetryConfig;
#[cfg(feature = "drive")]
use aws_config::timeout::TimeoutConfig;
#[cfg(feature = "drive")]
use aws_config::BehaviorVersion;
#[cfg(feature = "drive")]
use aws_sdk_s3::{config::Builder as S3ConfigBuilder, Client as S3Client};
use diesel::Connection;
use diesel::{
@ -112,7 +112,10 @@ pub async fn create_s3_operator(
if std::path::Path::new(CA_CERT_PATH).exists() {
std::env::set_var("AWS_CA_BUNDLE", CA_CERT_PATH);
std::env::set_var("SSL_CERT_FILE", CA_CERT_PATH);
debug!("Set AWS_CA_BUNDLE and SSL_CERT_FILE to {} for S3 client", CA_CERT_PATH);
debug!(
"Set AWS_CA_BUNDLE and SSL_CERT_FILE to {} for S3 client",
CA_CERT_PATH
);
}
// Configure timeouts to prevent memory leaks on connection failures
@ -124,8 +127,7 @@ pub async fn create_s3_operator(
.build();
// Limit retries to prevent 100% CPU on connection failures
let retry_config = RetryConfig::standard()
.with_max_attempts(2);
let retry_config = RetryConfig::standard().with_max_attempts(2);
let base_config = aws_config::defaults(BehaviorVersion::latest())
.endpoint_url(endpoint)
@ -330,145 +332,316 @@ pub fn run_migrations(pool: &DbPool) -> Result<(), Box<dyn std::error::Error + S
run_migrations_on_conn(&mut conn)
}
pub fn run_migrations_on_conn(conn: &mut diesel::PgConnection) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
pub fn run_migrations_on_conn(
conn: &mut diesel::PgConnection,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness};
// Core migrations (Always run)
const CORE_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/core");
conn.run_pending_migrations(CORE_MIGRATIONS).map_err(|e| Box::new(std::io::Error::other(format!("Core migration error: {}", e))) as Box<dyn std::error::Error + Send + Sync>)?;
conn.run_pending_migrations(CORE_MIGRATIONS).map_err(|e| {
Box::new(std::io::Error::other(format!(
"Core migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
// Calendar
#[cfg(feature = "calendar")]
{
const CALENDAR_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/calendar");
conn.run_pending_migrations(CALENDAR_MIGRATIONS).map_err(|e| Box::new(std::io::Error::other(format!("Calendar migration error: {}", e))) as Box<dyn std::error::Error + Send + Sync>)?;
conn.run_pending_migrations(CALENDAR_MIGRATIONS)
.map_err(|e| {
Box::new(std::io::Error::other(format!(
"Calendar migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
}
// People (CRM)
#[cfg(feature = "people")]
{
const PEOPLE_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/people");
conn.run_pending_migrations(PEOPLE_MIGRATIONS).map_err(|e| Box::new(std::io::Error::other(format!("People migration error: {}", e))) as Box<dyn std::error::Error + Send + Sync>)?;
conn.run_pending_migrations(PEOPLE_MIGRATIONS)
.map_err(|e| {
Box::new(std::io::Error::other(format!(
"People migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
}
// Mail
#[cfg(feature = "mail")]
{
const MAIL_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/mail");
conn.run_pending_migrations(MAIL_MIGRATIONS).map_err(|e| Box::new(std::io::Error::other(format!("Mail migration error: {}", e))) as Box<dyn std::error::Error + Send + Sync>)?;
conn.run_pending_migrations(MAIL_MIGRATIONS).map_err(|e| {
Box::new(std::io::Error::other(format!(
"Mail migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
}
// Tasks
#[cfg(feature = "tasks")]
{
const TASKS_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/tasks");
conn.run_pending_migrations(TASKS_MIGRATIONS).map_err(|e| Box::new(std::io::Error::other(format!("Tasks migration error: {}", e))) as Box<dyn std::error::Error + Send + Sync>)?;
conn.run_pending_migrations(TASKS_MIGRATIONS).map_err(|e| {
Box::new(std::io::Error::other(format!(
"Tasks migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
}
// Drive
#[cfg(feature = "drive")]
{
const DRIVE_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/drive");
conn.run_pending_migrations(DRIVE_MIGRATIONS).map_err(|e| Box::new(std::io::Error::other(format!("Drive migration error: {}", e))) as Box<dyn std::error::Error + Send + Sync>)?;
conn.run_pending_migrations(DRIVE_MIGRATIONS).map_err(|e| {
Box::new(std::io::Error::other(format!(
"Drive migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
}
// Automation
#[cfg(feature = "automation")]
{
const AUTOMATION_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/automation");
conn.run_pending_migrations(AUTOMATION_MIGRATIONS).map_err(|e| Box::new(std::io::Error::other(format!("Automation migration error: {}", e))) as Box<dyn std::error::Error + Send + Sync>)?;
const AUTOMATION_MIGRATIONS: EmbeddedMigrations =
embed_migrations!("migrations/automation");
conn.run_pending_migrations(AUTOMATION_MIGRATIONS)
.map_err(|e| {
Box::new(std::io::Error::other(format!(
"Automation migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
}
// Paper
#[cfg(feature = "paper")]
{
const PAPER_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/paper");
conn.run_pending_migrations(PAPER_MIGRATIONS).map_err(|e| Box::new(std::io::Error::other(format!("Paper migration error: {}", e))) as Box<dyn std::error::Error + Send + Sync>)?;
conn.run_pending_migrations(PAPER_MIGRATIONS).map_err(|e| {
Box::new(std::io::Error::other(format!(
"Paper migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
}
// Designer
#[cfg(feature = "designer")]
{
const DESIGNER_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/designer");
conn.run_pending_migrations(DESIGNER_MIGRATIONS).map_err(|e| Box::new(std::io::Error::other(format!("Designer migration error: {}", e))) as Box<dyn std::error::Error + Send + Sync>)?;
conn.run_pending_migrations(DESIGNER_MIGRATIONS)
.map_err(|e| {
Box::new(std::io::Error::other(format!(
"Designer migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
}
// Learn
#[cfg(feature = "learn")]
{
const LEARN_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/learn");
conn.run_pending_migrations(LEARN_MIGRATIONS).map_err(|e| {
Box::new(std::io::Error::other(format!(
"Learn migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
}
// Video
#[cfg(feature = "video")]
{
const VIDEO_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/video");
conn.run_pending_migrations(VIDEO_MIGRATIONS).map_err(|e| {
Box::new(std::io::Error::other(format!(
"Video migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
}
// LLM
#[cfg(feature = "llm")]
{
const LLM_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/llm");
conn.run_pending_migrations(LLM_MIGRATIONS).map_err(|e| Box::new(std::io::Error::other(format!("LLM migration error: {}", e))) as Box<dyn std::error::Error + Send + Sync>)?;
conn.run_pending_migrations(LLM_MIGRATIONS).map_err(|e| {
Box::new(std::io::Error::other(format!("LLM migration error: {}", e)))
as Box<dyn std::error::Error + Send + Sync>
})?;
}
// Products
#[cfg(feature = "billing")]
{
const PRODUCTS_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/products");
conn.run_pending_migrations(PRODUCTS_MIGRATIONS)
.map_err(|e| {
Box::new(std::io::Error::other(format!(
"Products migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
}
// Billing
const BILLING_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/billing");
conn.run_pending_migrations(BILLING_MIGRATIONS).map_err(|e| Box::new(std::io::Error::other(format!("Billing migration error: {}", e))) as Box<dyn std::error::Error + Send + Sync>)?;
conn.run_pending_migrations(BILLING_MIGRATIONS)
.map_err(|e| {
Box::new(std::io::Error::other(format!(
"Billing migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
// Attendant
#[cfg(feature = "attendant")]
{
const ATTENDANT_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/attendant");
conn.run_pending_migrations(ATTENDANT_MIGRATIONS).map_err(|e| Box::new(std::io::Error::other(format!("Attendant migration error: {}", e))) as Box<dyn std::error::Error + Send + Sync>)?;
conn.run_pending_migrations(ATTENDANT_MIGRATIONS)
.map_err(|e| {
Box::new(std::io::Error::other(format!(
"Attendant migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
}
// Analytics
#[cfg(feature = "analytics")]
{
const ANALYTICS_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/analytics");
conn.run_pending_migrations(ANALYTICS_MIGRATIONS).map_err(|e| Box::new(std::io::Error::other(format!("Analytics migration error: {}", e))) as Box<dyn std::error::Error + Send + Sync>)?;
conn.run_pending_migrations(ANALYTICS_MIGRATIONS)
.map_err(|e| {
Box::new(std::io::Error::other(format!(
"Analytics migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
}
// Dashboards
#[cfg(feature = "dashboards")]
{
const DASHBOARDS_MIGRATIONS: EmbeddedMigrations =
embed_migrations!("migrations/dashboards");
conn.run_pending_migrations(DASHBOARDS_MIGRATIONS)
.map_err(|e| {
Box::new(std::io::Error::other(format!(
"Dashboards migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
}
// Meet
#[cfg(feature = "meet")]
{
const MEET_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/meet");
conn.run_pending_migrations(MEET_MIGRATIONS).map_err(|e| Box::new(std::io::Error::other(format!("Meet migration error: {}", e))) as Box<dyn std::error::Error + Send + Sync>)?;
conn.run_pending_migrations(MEET_MIGRATIONS).map_err(|e| {
Box::new(std::io::Error::other(format!(
"Meet migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
}
// Tickets (Feedback)
const TICKETS_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/tickets");
conn.run_pending_migrations(TICKETS_MIGRATIONS).map_err(|e| Box::new(std::io::Error::other(format!("Tickets migration error: {}", e))) as Box<dyn std::error::Error + Send + Sync>)?;
conn.run_pending_migrations(TICKETS_MIGRATIONS)
.map_err(|e| {
Box::new(std::io::Error::other(format!(
"Tickets migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
// Compliance
#[cfg(feature = "compliance")]
{
const COMPLIANCE_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/compliance");
conn.run_pending_migrations(COMPLIANCE_MIGRATIONS).map_err(|e| Box::new(std::io::Error::other(format!("Compliance migration error: {}", e))) as Box<dyn std::error::Error + Send + Sync>)?;
const COMPLIANCE_MIGRATIONS: EmbeddedMigrations =
embed_migrations!("migrations/compliance");
conn.run_pending_migrations(COMPLIANCE_MIGRATIONS)
.map_err(|e| {
Box::new(std::io::Error::other(format!(
"Compliance migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
}
// Canvas
#[cfg(feature = "canvas")]
{
const CANVAS_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/canvas");
conn.run_pending_migrations(CANVAS_MIGRATIONS).map_err(|e| Box::new(std::io::Error::other(format!("Canvas migration error: {}", e))) as Box<dyn std::error::Error + Send + Sync>)?;
conn.run_pending_migrations(CANVAS_MIGRATIONS)
.map_err(|e| {
Box::new(std::io::Error::other(format!(
"Canvas migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
}
// Social
#[cfg(feature = "social")]
{
const SOCIAL_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/social");
conn.run_pending_migrations(SOCIAL_MIGRATIONS).map_err(|e| Box::new(std::io::Error::other(format!("Social migration error: {}", e))) as Box<dyn std::error::Error + Send + Sync>)?;
conn.run_pending_migrations(SOCIAL_MIGRATIONS)
.map_err(|e| {
Box::new(std::io::Error::other(format!(
"Social migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
}
// Workspaces
#[cfg(feature = "workspaces")]
{
const WORKSPACE_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/workspaces");
conn.run_pending_migrations(WORKSPACE_MIGRATIONS).map_err(|e| Box::new(std::io::Error::other(format!("Workspace migration error: {}", e))) as Box<dyn std::error::Error + Send + Sync>)?;
conn.run_pending_migrations(WORKSPACE_MIGRATIONS)
.map_err(|e| {
Box::new(std::io::Error::other(format!(
"Workspace migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
}
// Goals
#[cfg(feature = "goals")]
{
const GOALS_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/goals");
conn.run_pending_migrations(GOALS_MIGRATIONS).map_err(|e| Box::new(std::io::Error::other(format!("Goals migration error: {}", e))) as Box<dyn std::error::Error + Send + Sync>)?;
conn.run_pending_migrations(GOALS_MIGRATIONS).map_err(|e| {
Box::new(std::io::Error::other(format!(
"Goals migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
}
// Research
#[cfg(feature = "research")]
{
const RESEARCH_MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/research");
conn.run_pending_migrations(RESEARCH_MIGRATIONS).map_err(|e| Box::new(std::io::Error::other(format!("Research migration error: {}", e))) as Box<dyn std::error::Error + Send + Sync>)?;
conn.run_pending_migrations(RESEARCH_MIGRATIONS)
.map_err(|e| {
Box::new(std::io::Error::other(format!(
"Research migration error: {}",
e
))) as Box<dyn std::error::Error + Send + Sync>
})?;
}
Ok(())
@ -487,7 +660,13 @@ pub fn sanitize_path_component(component: &str) -> String {
pub fn sanitize_path_for_filename(path: &str) -> String {
path.chars()
.map(|c| if c.is_alphanumeric() || c == '_' || c == '-' { c } else { '_' })
.map(|c| {
if c.is_alphanumeric() || c == '_' || c == '-' {
c
} else {
'_'
}
})
.collect()
}
@ -569,23 +748,30 @@ pub fn create_tls_client_with_ca(ca_cert_path: &str, timeout_secs: Option<u64>)
// If it doesn't exist, we use system CA store (production with public certs)
if std::path::Path::new(ca_cert_path).exists() {
match std::fs::read(ca_cert_path) {
Ok(ca_cert_pem) => {
match Certificate::from_pem(&ca_cert_pem) {
Ok(ca_cert_pem) => match Certificate::from_pem(&ca_cert_pem) {
Ok(ca_cert) => {
builder = builder.add_root_certificate(ca_cert);
debug!("Using local CA certificate from {} (dev stack mode)", ca_cert_path);
debug!(
"Using local CA certificate from {} (dev stack mode)",
ca_cert_path
);
}
Err(e) => {
warn!("Failed to parse CA certificate from {}: {}", ca_cert_path, e);
}
}
warn!(
"Failed to parse CA certificate from {}: {}",
ca_cert_path, e
);
}
},
Err(e) => {
warn!("Failed to read CA certificate from {}: {}", ca_cert_path, e);
}
}
} else {
debug!("Local CA cert not found at {}, using system CA store (production mode)", ca_cert_path);
debug!(
"Local CA cert not found at {}, using system CA store (production mode)",
ca_cert_path
);
}
builder.build().unwrap_or_else(|e| {
@ -606,7 +792,13 @@ pub fn format_timestamp_vtt(ms: i64) -> String {
let mins = secs / 60;
let hours = mins / 60;
let millis = ms % 1000;
format!("{:02}:{:02}:{:02}.{:03}", hours, mins % 60, secs % 60, millis)
format!(
"{:02}:{:02}:{:02}.{:03}",
hours,
mins % 60,
secs % 60,
millis
)
}
pub fn format_timestamp_srt(ms: i64) -> String {
@ -614,7 +806,13 @@ pub fn format_timestamp_srt(ms: i64) -> String {
let mins = secs / 60;
let hours = mins / 60;
let millis = ms % 1000;
format!("{:02}:{:02}:{:02},{:03}", hours, mins % 60, secs % 60, millis)
format!(
"{:02}:{:02}:{:02},{:03}",
hours,
mins % 60,
secs % 60,
millis
)
}
pub fn parse_hex_color(hex: &str) -> Option<(u8, u8, u8)> {