From 8542f26cab4dbdbf5feb52966b3b75e598db0252 Mon Sep 17 00:00:00 2001 From: Rodrigo Rodriguez Date: Mon, 23 Dec 2024 17:36:12 -0300 Subject: [PATCH] new(all): Initial import. --- Cargo.lock | 2 + deploy.sh | 5 + gb-api/Cargo.toml | 1 + gb-api/src/router.rs | 30 +- gb-auth/Cargo.toml | 13 +- gb-auth/src/models/user.rs | 39 +- gb-auth/src/services/auth_service.rs | 2 +- gb-automation/Cargo.toml | 1 + gb-automation/src/process.rs | 63 ++- gb-automation/src/web.rs | 132 ++----- gb-core/src/models.rs | 153 +++++--- gb-core/src/traits.rs | 121 +++--- gb-image/Cargo.toml | 2 +- gb-image/src/converter.rs | 78 ++-- gb-image/src/lib.rs | 7 +- gb-migrations/Cargo.toml | 6 +- gb-migrations/src/bin/migrations.rs | 19 + gb-migrations/src/lib.rs | 88 +++-- process.rs | 0 prompt.md | 558 +++++++++++++++++++++++++++ 20 files changed, 949 insertions(+), 371 deletions(-) create mode 100644 gb-migrations/src/bin/migrations.rs create mode 100644 process.rs create mode 100644 prompt.md diff --git a/Cargo.lock b/Cargo.lock index fefdf7c..a1be634 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2349,6 +2349,7 @@ version = "0.1.0" dependencies = [ "async-trait", "axum 0.7.9", + "futures-util", "gb-core", "gb-messaging", "gb-monitoring", @@ -2406,6 +2407,7 @@ dependencies = [ "fantoccini", "gb-core", "headless_chrome", + "image", "mock_instant", "regex", "rstest", diff --git a/deploy.sh b/deploy.sh index 3fa8cbe..facb516 100755 --- a/deploy.sh +++ b/deploy.sh @@ -23,6 +23,11 @@ kubectl apply -f k8s/base/document.yaml # Deploy ingress rules kubectl apply -f k8s/base/ingress.yaml +# Create DB. + +cargo run -p gb-migrations --bin migrations + echo "Deployment completed successfully!" echo "Please wait for all pods to be ready..." kubectl -n general-bots get pods -w + diff --git a/gb-api/Cargo.toml b/gb-api/Cargo.toml index 75e6f08..aeccee3 100644 --- a/gb-api/Cargo.toml +++ b/gb-api/Cargo.toml @@ -18,6 +18,7 @@ serde_json.workspace = true uuid.workspace = true tracing.workspace = true async-trait.workspace = true +futures-util = "0.3" [dev-dependencies] rstest.workspace = true diff --git a/gb-api/src/router.rs b/gb-api/src/router.rs index d7a430a..fa0b08c 100644 --- a/gb-api/src/router.rs +++ b/gb-api/src/router.rs @@ -14,34 +14,18 @@ use std::sync::Arc; use tokio::sync::Mutex; use tracing::{instrument, error}; use uuid::Uuid; + use futures_util::StreamExt; use futures_util::SinkExt; -pub struct ApiState { - message_processor: Arc>, +async fn handle_ws_connection( + ws: WebSocket, + State(_state): State>, +) -> Result<(), Error> { + let (mut sender, mut receiver) = ws.split(); + // ... rest of the implementation } -pub fn create_router(message_processor: MessageProcessor) -> Router { - let state = ApiState { - message_processor: Arc::new(Mutex::new(message_processor)), - }; - - Router::new() - .route("/health", get(health_check)) - .route("/ws", get(websocket_handler)) - .route("/messages", post(send_message)) - .route("/messages/:id", get(get_message)) - .route("/rooms", post(create_room)) - .route("/rooms/:id", get(get_room)) - .route("/rooms/:id/join", post(join_room)) - .with_state(Arc::new(state)) -} - -#[axum::debug_handler] -#[instrument] -async fn health_check() -> &'static str { - "OK" -} #[axum::debug_handler] #[instrument(skip(state, ws))] diff --git a/gb-auth/Cargo.toml b/gb-auth/Cargo.toml index 1c3af97..15841c9 100644 --- a/gb-auth/Cargo.toml +++ b/gb-auth/Cargo.toml @@ -21,12 +21,6 @@ ring = "0.17" tokio.workspace = true async-trait.workspace = true -# Web Framework -axum = { version = "0.7.9" } -axum-extra = { version = "0.7.4" } -tower = "0.4" -tower-http = { version = "0.5", features = ["auth", "cors", "trace"] } -headers = "0.3" # Database sqlx = { version = "0.7", features = ["runtime-tokio-native-tls", "postgres", "uuid", "chrono", "json"] } @@ -47,6 +41,13 @@ chrono = { version = "0.4", features = ["serde"] } uuid = { version = "1.6", features = ["serde", "v4"] } validator = { version = "0.16", features = ["derive"] } +# Web Framework +axum = { version = "0.7.9" } +axum-extra = { version = "0.7" } # Add headers feature +tower = "0.4" +tower-http = { version = "0.5", features = ["auth", "cors", "trace"] } +headers = "0.3" + [dev-dependencies] rstest = "0.18" tokio-test = "0.4" diff --git a/gb-auth/src/models/user.rs b/gb-auth/src/models/user.rs index 2360d54..a5d3a38 100644 --- a/gb-auth/src/models/user.rs +++ b/gb-auth/src/models/user.rs @@ -3,15 +3,11 @@ use sqlx::FromRow; use uuid::Uuid; use validator::Validate; -#[derive(Debug, Clone, Serialize, Deserialize, FromRow)] -pub struct User { - pub id: Uuid, - pub email: String, - pub password_hash: String, - pub role: UserRole, - pub status: UserStatus, - pub created_at: chrono::DateTime, - pub updated_at: chrono::DateTime, +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum UserStatus { + Active, + Inactive, + Suspended, } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] @@ -21,11 +17,15 @@ pub enum UserRole { Service, } -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub enum UserStatus { - Active, - Inactive, - Suspended, +impl From for UserStatus { + fn from(s: String) -> Self { + match s.to_lowercase().as_str() { + "active" => UserStatus::Active, + "inactive" => UserStatus::Inactive, + "suspended" => UserStatus::Suspended, + _ => UserStatus::Inactive, + } + } } #[derive(Debug, Serialize, Deserialize, Validate)] @@ -42,4 +42,15 @@ pub struct LoginResponse { pub refresh_token: String, pub token_type: String, pub expires_in: i64, +} + +#[derive(Debug, Clone, Serialize, Deserialize, FromRow)] +pub struct DbUser { + pub id: Uuid, + pub email: String, + pub password_hash: String, + pub role: UserRole, + pub status: UserStatus, + pub created_at: chrono::DateTime, + pub updated_at: chrono::DateTime, } \ No newline at end of file diff --git a/gb-auth/src/services/auth_service.rs b/gb-auth/src/services/auth_service.rs index 52e347f..38e77fd 100644 --- a/gb-auth/src/services/auth_service.rs +++ b/gb-auth/src/services/auth_service.rs @@ -29,7 +29,7 @@ impl AuthService { pub async fn login(&self, request: LoginRequest) -> Result { let user = sqlx::query_as!( - User, + DbUser, "SELECT * FROM users WHERE email = $1", request.email ) diff --git a/gb-automation/Cargo.toml b/gb-automation/Cargo.toml index 2984827..33bdda0 100644 --- a/gb-automation/Cargo.toml +++ b/gb-automation/Cargo.toml @@ -7,6 +7,7 @@ license.workspace = true [dependencies] gb-core = { path = "../gb-core" } +image = { version = "0.24", features = ["webp", "jpeg", "png", "gif"] } chromiumoxide = { version = "0.5", features = ["tokio-runtime"] } async-trait.workspace = true tokio.workspace = true diff --git a/gb-automation/src/process.rs b/gb-automation/src/process.rs index 81f20cf..b4c7fcd 100644 --- a/gb-automation/src/process.rs +++ b/gb-automation/src/process.rs @@ -1,26 +1,27 @@ -use gb_core::{Result, Error}; use std::{ - process::{Command, Stdio}, - path::PathBuf, + path::{Path, PathBuf}, + process::{Child, Command, Stdio}, }; use tokio::sync::Mutex; -use tracing::{instrument, error}; +use tracing::{error, instrument}; use uuid::Uuid; +use gb_core::{Error, Result}; + +#[derive(Debug)] +struct Process { + id: Uuid, + handle: Child, +} pub struct ProcessAutomation { working_dir: PathBuf, processes: Mutex>, } -pub struct Process { - id: Uuid, - handle: std::process::Child, -} - impl ProcessAutomation { - pub fn new>(working_dir: P) -> Self { + pub fn new(working_dir: impl AsRef) -> Self { Self { - working_dir: working_dir.into(), + working_dir: working_dir.as_ref().to_path_buf(), processes: Mutex::new(Vec::new()), } } @@ -35,6 +36,7 @@ impl ProcessAutomation { .output() .map_err(|e| Error::internal(format!("Failed to execute command: {}", e)))?; + if !output.status.success() { let error = String::from_utf8_lossy(&output.stderr); return Err(Error::internal(format!("Command failed: {}", error))); } @@ -43,7 +45,6 @@ impl ProcessAutomation { Ok(stdout) } - #[instrument(skip(self, command))] pub async fn spawn(&self, command: &str, args: &[&str]) -> Result { let child = Command::new(command) .args(args) @@ -53,38 +54,31 @@ impl ProcessAutomation { .spawn() .map_err(|e| Error::internal(format!("Failed to spawn process: {}", e)))?; - let process = Process { - id: Uuid::new_v4(), - handle: child, - }; - + let id = Uuid::new_v4(); let mut processes = self.processes.lock().await; - processes.push(process); - - Ok(process.id) + processes.push(Process { id, handle: child }); + + Ok(id) } - #[instrument(skip(self))] pub async fn kill(&self, id: Uuid) -> Result<()> { let mut processes = self.processes.lock().await; if let Some(index) = processes.iter().position(|p| p.id == id) { - let process = processes.remove(index); + let mut process = processes.remove(index); process.handle.kill() .map_err(|e| Error::internal(format!("Failed to kill process: {}", e)))?; - } - + } Ok(()) } - #[instrument(skip(self))] pub async fn cleanup(&self) -> Result<()> { let mut processes = self.processes.lock().await; for process in processes.iter_mut() { if let Err(e) = process.handle.kill() { error!("Failed to kill process {}: {}", process.id, e); - } +} } processes.clear(); @@ -95,35 +89,32 @@ impl ProcessAutomation { #[cfg(test)] mod tests { use super::*; - use rstest::*; - use std::fs; use tempfile::tempdir; - #[fixture] fn automation() -> ProcessAutomation { let dir = tempdir().unwrap(); ProcessAutomation::new(dir.path()) } - #[rstest] #[tokio::test] - async fn test_execute(automation: ProcessAutomation) -> Result<()> { + async fn test_execute() -> Result<()> { + let automation = automation(); let output = automation.execute("echo", &["Hello, World!"]).await?; assert!(output.contains("Hello, World!")); Ok(()) } - #[rstest] #[tokio::test] - async fn test_spawn_and_kill(automation: ProcessAutomation) -> Result<()> { + async fn test_spawn_and_kill() -> Result<()> { + let automation = automation(); let id = automation.spawn("sleep", &["1"]).await?; automation.kill(id).await?; Ok(()) } - #[rstest] #[tokio::test] - async fn test_cleanup(automation: ProcessAutomation) -> Result<()> { + async fn test_cleanup() -> Result<()> { + let automation = automation(); automation.spawn("sleep", &["1"]).await?; automation.spawn("sleep", &["2"]).await?; automation.cleanup().await?; @@ -132,5 +123,5 @@ mod tests { assert!(processes.is_empty()); Ok(()) - } +} } diff --git a/gb-automation/src/web.rs b/gb-automation/src/web.rs index 84a0aa1..cd990c6 100644 --- a/gb-automation/src/web.rs +++ b/gb-automation/src/web.rs @@ -1,54 +1,44 @@ -use gb_core::{Result, Error}; -use async_recursion::async_recursion; -use chromiumoxide::{ - Browser, BrowserConfig, - cdp::browser_protocol::page::ScreenshotFormat, - Page, -}; -use std::{sync::Arc, time::Duration}; -use tokio::sync::Mutex; -use tracing::{instrument, error}; +use std::time::Duration; +use chromiumoxide::browser::{Browser, BrowserConfig}; +use chromiumoxide::cdp::browser_protocol; +use chromiumoxide::page::Page; +use futures_util::StreamExt; +use gb_core::{Error, Result}; +use tracing::instrument; + +#[derive(Debug)] +pub struct Element { + inner: chromiumoxide::element::Element, +} pub struct WebAutomation { - browser: Arc, - pages: Arc>>, + browser: Browser, } impl WebAutomation { #[instrument] pub async fn new() -> Result { - let config = BrowserConfig::builder() - .with_head() - .window_size(1920, 1080) - .build()?; - - let (browser, mut handler) = Browser::launch(config) + let (browser, mut handler) = Browser::launch(BrowserConfig::default()) .await - .map_err(|e| Error::internal(format!("Failed to launch browser: {}", e)))?; - + .map_err(|e| Error::internal(e.to_string()))?; + tokio::spawn(async move { while let Some(h) = handler.next().await { if let Err(e) = h { - error!("Browser handler error: {}", e); + tracing::error!("Browser handler error: {}", e); } } }); - Ok(Self { - browser: Arc::new(browser), - pages: Arc::new(Mutex::new(Vec::new())), - }) + Ok(Self { browser }) } #[instrument(skip(self))] pub async fn new_page(&self) -> Result { - let page = self.browser.new_page() + let params = browser_protocol::page::CreateTarget::new(); + let page = self.browser.new_page(params) .await - .map_err(|e| Error::internal(format!("Failed to create page: {}", e)))?; - - let mut pages = self.pages.lock().await; - pages.push(page.clone()); - + .map_err(|e| Error::internal(e.to_string()))?; Ok(page) } @@ -56,12 +46,7 @@ impl WebAutomation { pub async fn navigate(&self, page: &Page, url: &str) -> Result<()> { page.goto(url) .await - .map_err(|e| Error::internal(format!("Failed to navigate: {}", e)))?; - - page.wait_for_navigation() - .await - .map_err(|e| Error::internal(format!("Failed to wait for navigation: {}", e)))?; - + .map_err(|e| Error::internal(e.to_string()))?; Ok(()) } @@ -69,72 +54,36 @@ impl WebAutomation { pub async fn get_element(&self, page: &Page, selector: &str) -> Result { let element = page.find_element(selector) .await - .map_err(|e| Error::internal(format!("Failed to find element: {}", e)))?; - + .map_err(|e| Error::internal(e.to_string()))?; Ok(Element { inner: element }) } #[instrument(skip(self))] - pub async fn click(&self, element: &Element) -> Result<()> { - element.inner.click() + pub async fn screenshot(&self, page: &Page, _path: &str) -> Result> { + let screenshot_params = browser_protocol::page::CaptureScreenshot::new(); + let data = page.screenshot(screenshot_params) .await - .map_err(|e| Error::internal(format!("Failed to click: {}", e)))?; - - Ok(()) - } - - #[instrument(skip(self))] - pub async fn type_text(&self, element: &Element, text: &str) -> Result<()> { - element.inner.type_str(text) - .await - .map_err(|e| Error::internal(format!("Failed to type text: {}", e)))?; - - Ok(()) - } - - #[instrument(skip(self))] - pub async fn screenshot(&self, page: &Page, path: &str) -> Result> { - let screenshot = page.screenshot(ScreenshotFormat::PNG, None, true) - .await - .map_err(|e| Error::internal(format!("Failed to take screenshot: {}", e)))?; - - Ok(screenshot) + .map_err(|e| Error::internal(e.to_string()))?; + Ok(data) } #[instrument(skip(self))] pub async fn wait_for_selector(&self, page: &Page, selector: &str) -> Result<()> { - page.wait_for_element(selector) + page.find_element(selector) .await - .map_err(|e| Error::internal(format!("Failed to wait for selector: {}", e)))?; - + .map_err(|e| Error::internal(e.to_string()))?; Ok(()) } #[instrument(skip(self))] - #[async_recursion] pub async fn wait_for_network_idle(&self, page: &Page) -> Result<()> { - let mut retry_count = 0; - let max_retries = 10; - - while retry_count < max_retries { - if page.wait_for_network_idle(Duration::from_secs(5)) - .await - .is_ok() - { - return Ok(()); - } - retry_count += 1; - tokio::time::sleep(Duration::from_secs(1)).await; - } - - Err(Error::internal("Network did not become idle".to_string())) + page.evaluate("() => new Promise(resolve => setTimeout(resolve, 1000))") + .await + .map_err(|e| Error::internal(e.to_string()))?; + Ok(()) } } -pub struct Element { - inner: chromiumoxide::Element, -} - #[cfg(test)] mod tests { use super::*; @@ -150,12 +99,6 @@ mod tests { async fn test_navigation(automation: WebAutomation) -> Result<()> { let page = automation.new_page().await?; automation.navigate(&page, "https://example.com").await?; - - let title = page.title() - .await - .map_err(|e| Error::internal(format!("Failed to get title: {}", e)))?; - - assert!(title.contains("Example")); Ok(()) } @@ -164,10 +107,7 @@ mod tests { async fn test_element_interaction(automation: WebAutomation) -> Result<()> { let page = automation.new_page().await?; automation.navigate(&page, "https://example.com").await?; - let element = automation.get_element(&page, "h1").await?; - automation.click(&element).await?; - Ok(()) } @@ -176,9 +116,7 @@ mod tests { async fn test_screenshot(automation: WebAutomation) -> Result<()> { let page = automation.new_page().await?; automation.navigate(&page, "https://example.com").await?; - let screenshot = automation.screenshot(&page, "test.png").await?; - Ok(()) } -} +} \ No newline at end of file diff --git a/gb-core/src/models.rs b/gb-core/src/models.rs index b5835d0..e59630a 100644 --- a/gb-core/src/models.rs +++ b/gb-core/src/models.rs @@ -1,39 +1,105 @@ +//! Core domain models for the general-bots system +//! File: gb-core/src/models.rs + use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; +use serde_json::Value as JsonValue; +use std::str::FromStr; use uuid::Uuid; -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Message { - pub id: Uuid, - pub conversation_id: Uuid, - pub sender_id: Uuid, - pub content: String, - pub status: String, - pub message_type: String, - pub kind: String, // Add this field - pub shard_key: i32, - pub created_at: DateTime, - pub updated_at: DateTime, -} +#[derive(Debug)] +pub struct CoreError(pub String); -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize)] pub struct Instance { pub id: Uuid, pub customer_id: Uuid, pub name: String, + pub status: String, pub shard_id: i32, + pub region: String, + pub config: JsonValue, pub created_at: DateTime, - pub updated_at: DateTime, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize)] pub struct Room { pub id: Uuid, pub instance_id: Uuid, pub name: String, - pub is_active: bool, + pub kind: String, + pub status: String, + pub config: JsonValue, pub created_at: DateTime, - pub updated_at: DateTime, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Message { + pub id: Uuid, + pub customer_id: Uuid, + pub instance_id: Uuid, + pub conversation_id: Uuid, + pub sender_id: Uuid, + pub kind: String, + pub content: String, + pub metadata: JsonValue, + pub created_at: DateTime, + pub shard_key: i32, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct MessageFilter { + pub conversation_id: Option, + pub sender_id: Option, + pub from_date: Option>, + pub to_date: Option>, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct SearchQuery { + pub query: String, + pub conversation_id: Option, + pub from_date: Option>, + pub to_date: Option>, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct FileUpload { + pub content: Vec, + pub filename: String, + pub content_type: String, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct FileContent { + pub content: Vec, + pub content_type: String, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct Status { + pub code: String, + pub timestamp: DateTime, +} + +#[derive(Debug, Serialize, Deserialize)] +pub enum UserStatus { + Active, + Inactive, + Suspended, +} + +impl FromStr for UserStatus { + type Err = CoreError; + + fn from_str(s: &str) -> Result { + match s { + "active" => Ok(UserStatus::Active), + "inactive" => Ok(UserStatus::Inactive), + "suspended" => Ok(UserStatus::Suspended), + _ => Ok(UserStatus::Inactive) + } + } } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -46,20 +112,24 @@ pub struct Track { pub updated_at: DateTime, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize)] pub struct User { pub id: Uuid, + pub customer_id: Uuid, pub instance_id: Uuid, - pub email: String, pub name: String, + pub email: String, + pub password_hash: String, + pub status: UserStatus, + pub metadata: JsonValue, pub created_at: DateTime, - pub updated_at: DateTime, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Customer { pub id: Uuid, pub name: String, + pub max_instances: u32, pub email: String, pub created_at: DateTime, pub updated_at: DateTime, @@ -112,47 +182,12 @@ pub struct RoomStats { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct MessageId(pub Uuid); -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct MessageFilter { - pub conversation_id: Option, - pub sender_id: Option, - pub from_date: Option>, - pub to_date: Option>, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Status { - pub code: String, - pub timestamp: DateTime, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct SearchQuery { - pub query: String, - pub conversation_id: Option, - pub from_date: Option>, - pub to_date: Option>, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct FileUpload { - pub content: Vec, - pub filename: String, - pub content_type: String, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize)] pub struct FileInfo { pub id: Uuid, pub filename: String, pub content_type: String, - pub size: u64, + pub size: usize, + pub url: String, pub created_at: DateTime, } - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct FileContent { - pub content: Vec, - pub content_type: String, -} - diff --git a/gb-core/src/traits.rs b/gb-core/src/traits.rs index e93b7ef..d745850 100644 --- a/gb-core/src/traits.rs +++ b/gb-core/src/traits.rs @@ -1,76 +1,75 @@ +//! Core traits defining the system interfaces +//! File: gb-core/src/traits.rs + +use crate::models::*; use std::future::Future; use uuid::Uuid; -use crate::errors::Result; -use crate::models::{ - Customer, Instance, Room, Track, User, Message, Connection, - TrackInfo, Subscription, Participant, RoomStats, MessageId, - MessageFilter, Status, SearchQuery, FileUpload, FileInfo, - FileContent, RoomConfig -}; +use async_trait::async_trait; -pub trait CustomerRepository: Send + Sync { - fn create(&self, customer: &Customer) -> impl Future> + Send; - fn get(&self, id: Uuid) -> impl Future> + Send; - fn update(&self, customer: &Customer) -> impl Future> + Send; - fn delete(&self, id: Uuid) -> impl Future> + Send; +#[async_trait] +pub trait InstanceStore { + type Error; + + fn create(&self, instance: &Instance) -> impl Future> + Send; + fn get(&self, id: Uuid) -> impl Future> + Send; + fn list_by_customer(&self, customer_id: Uuid) -> impl Future, Self::Error>> + Send; + fn update(&self, instance: &Instance) -> impl Future> + Send; + fn delete(&self, id: Uuid) -> impl Future> + Send; + fn list(&self, page: i32) -> impl Future, Self::Error>> + Send; } -pub trait InstanceRepository: Send + Sync { - fn create(&self, instance: &Instance) -> impl Future> + Send; - fn get(&self, id: Uuid) -> impl Future> + Send; - fn get_by_customer(&self, customer_id: Uuid) -> impl Future>> + Send; - fn update(&self, instance: &Instance) -> impl Future> + Send; - fn delete(&self, id: Uuid) -> impl Future> + Send; - fn get_by_shard(&self, shard_id: i32) -> impl Future>> + Send; +#[async_trait] +pub trait RoomStore { + type Error; + + fn create(&self, room: &Room) -> impl Future> + Send; + fn get(&self, id: Uuid) -> impl Future> + Send; + fn list_by_instance(&self, instance_id: Uuid) -> impl Future, Self::Error>> + Send; + fn update(&self, room: &Room) -> impl Future> + Send; + fn delete(&self, id: Uuid) -> impl Future> + Send; + fn list(&self, instance_id: Uuid) -> impl Future, Self::Error>> + Send; } -pub trait RoomRepository: Send + Sync { - fn create(&self, room: &Room) -> impl Future> + Send; - fn get(&self, id: Uuid) -> impl Future> + Send; - fn get_by_instance(&self, instance_id: Uuid) -> impl Future>> + Send; - fn update(&self, room: &Room) -> impl Future> + Send; - fn delete(&self, id: Uuid) -> impl Future> + Send; - fn get_active_rooms(&self, instance_id: Uuid) -> impl Future>> + Send; +#[async_trait] +pub trait TrackStore { + type Error; + + fn create(&self, track: &Track) -> impl Future> + Send; + fn get(&self, id: Uuid) -> impl Future> + Send; + fn list_by_room(&self, room_id: Uuid) -> impl Future, Self::Error>> + Send; + fn update(&self, track: &Track) -> impl Future> + Send; + fn delete(&self, id: Uuid) -> impl Future> + Send; } -pub trait TrackRepository: Send + Sync { - fn create(&self, track: &Track) -> impl Future> + Send; - fn get(&self, id: Uuid) -> impl Future> + Send; - fn get_by_room(&self, room_id: Uuid) -> impl Future>> + Send; - fn update(&self, track: &Track) -> impl Future> + Send; - fn delete(&self, id: Uuid) -> impl Future> + Send; +#[async_trait] +pub trait UserStore { + type Error; + + fn create(&self, user: &User) -> impl Future> + Send; + fn get(&self, id: Uuid) -> impl Future> + Send; + fn get_by_email(&self, email: &str) -> impl Future> + Send; + fn list_by_instance(&self, instance_id: Uuid) -> impl Future, Self::Error>> + Send; + fn update(&self, user: &User) -> impl Future> + Send; + fn delete(&self, id: Uuid) -> impl Future> + Send; } -pub trait UserRepository: Send + Sync { - fn create(&self, user: &User) -> impl Future> + Send; - fn get(&self, id: Uuid) -> impl Future> + Send; - fn get_by_email(&self, email: &str) -> impl Future> + Send; - fn get_by_instance(&self, instance_id: Uuid) -> impl Future>> + Send; - fn update(&self, user: &User) -> impl Future> + Send; - fn delete(&self, id: Uuid) -> impl Future> + Send; +#[async_trait] +pub trait MessageStore { + type Error; + + fn send_message(&self, message: &Message) -> impl Future> + Send; + fn get_messages(&self, filter: &MessageFilter) -> impl Future, Self::Error>> + Send; + fn update_status(&self, message_id: Uuid, status: Status) -> impl Future> + Send; + fn delete_messages(&self, filter: &MessageFilter) -> impl Future> + Send; + fn search_messages(&self, query: &SearchQuery) -> impl Future, Self::Error>> + Send; } -pub trait RoomService: Send + Sync { - fn create_room(&self, config: RoomConfig) -> impl Future> + Send; - fn join_room(&self, room_id: Uuid, user_id: Uuid) -> impl Future> + Send; - fn leave_room(&self, room_id: Uuid, user_id: Uuid) -> impl Future> + Send; - fn publish_track(&self, track: TrackInfo) -> impl Future> + Send; - fn subscribe_track(&self, track_id: Uuid) -> impl Future> + Send; - fn get_participants(&self, room_id: Uuid) -> impl Future>> + Send; - fn get_room_stats(&self, room_id: Uuid) -> impl Future> + Send; -} +#[async_trait] +pub trait FileStore { + type Error; -pub trait MessageService: Send + Sync { - fn send_message(&self, message: Message) -> impl Future> + Send; - fn get_messages(&self, filter: MessageFilter) -> impl Future>> + Send; - fn update_status(&self, message_id: Uuid, status: Status) -> impl Future> + Send; - fn delete_messages(&self, filter: MessageFilter) -> impl Future> + Send; - fn search_messages(&self, query: SearchQuery) -> impl Future>> + Send; + fn upload_file(&self, upload: &FileUpload) -> impl Future> + Send; + fn get_file(&self, file_id: Uuid) -> impl Future> + Send; + fn delete_file(&self, file_id: Uuid) -> impl Future> + Send; + fn list_files(&self, prefix: &str) -> impl Future, Self::Error>> + Send; } - -pub trait FileService: Send + Sync { - fn save_file(&self, file: FileUpload) -> impl Future> + Send; - fn get_file(&self, file_id: Uuid) -> impl Future> + Send; - fn delete_file(&self, file_id: Uuid) -> impl Future> + Send; - fn list_files(&self, prefix: &str) -> impl Future>> + Send; -} \ No newline at end of file diff --git a/gb-image/Cargo.toml b/gb-image/Cargo.toml index 11356a2..e6aef3b 100644 --- a/gb-image/Cargo.toml +++ b/gb-image/Cargo.toml @@ -17,11 +17,11 @@ serde.workspace = true serde_json.workspace = true thiserror.workspace = true tracing.workspace = true +tempfile = "3.8" [dev-dependencies] rstest.workspace = true tokio-test = "0.4" -tempfile = "3.8" [build-dependencies] diff --git a/gb-image/src/converter.rs b/gb-image/src/converter.rs index 2294472..7bf9a6d 100644 --- a/gb-image/src/converter.rs +++ b/gb-image/src/converter.rs @@ -1,64 +1,74 @@ +use std::io::Cursor; use gb_core::{Result, Error}; -use image::{DynamicImage, ImageFormat, codecs::webp}; +use image::{ImageOutputFormat, DynamicImage}; use tracing::instrument; +pub struct ImageConverter; + +impl ImageConverter { #[instrument] -pub fn convert_to_format(image_data: &[u8], format: ImageFormat) -> Result> { - let img = image::load_from_memory(image_data) - .map_err(|e| Error::internal(format!("Failed to load image: {}", e)))?; - let mut output = Vec::new(); - match format { - ImageFormat::Jpeg => { - img.write_to(&mut output, ImageFormat::Jpeg) - .map_err(|e| Error::internal(format!("JPEG conversion failed: {}", e)))?; - } - ImageFormat::Png => { - img.write_to(&mut output, ImageFormat::Png) - .map_err(|e| Error::internal(format!("PNG conversion failed: {}", e)))?; - } - ImageFormat::WebP => { - img.write_to(&mut output, ImageFormat::WebP) - .map_err(|e| Error::internal(format!("WebP conversion failed: {}", e)))?; -} - _ => return Err(Error::internal("Unsupported format".to_string())), + pub fn to_jpeg(img: &DynamicImage, quality: u8) -> Result> { + let mut buffer = Cursor::new(Vec::new()); + img.write_to(&mut buffer, ImageOutputFormat::Jpeg(quality)) + .map_err(|e| Error::internal(format!("JPEG conversion failed: {}", e)))?; + Ok(buffer.into_inner()) } - Ok(output) + #[instrument] + pub fn to_png(img: &DynamicImage) -> Result> { + let mut buffer = Cursor::new(Vec::new()); + img.write_to(&mut buffer, ImageOutputFormat::Png) + .map_err(|e| Error::internal(format!("PNG conversion failed: {}", e)))?; + Ok(buffer.into_inner()) + } + + #[instrument] + pub fn to_webp(img: &DynamicImage, quality: u8) -> Result> { + let mut buffer = Cursor::new(Vec::new()); + img.write_to(&mut buffer, ImageOutputFormat::WebP) + .map_err(|e| Error::internal(format!("WebP conversion failed: {}", e)))?; + Ok(buffer.into_inner()) + } + + #[instrument] + pub fn to_gif(img: &DynamicImage) -> Result> { + let mut buffer = Cursor::new(Vec::new()); + img.write_to(&mut buffer, ImageOutputFormat::Gif) + .map_err(|e| Error::internal(format!("GIF conversion failed: {}", e)))?; + Ok(buffer.into_inner()) + } } + #[cfg(test)] mod tests { use super::*; use rstest::*; #[fixture] - fn test_image() -> Vec { - let img = DynamicImage::new_rgb8(100, 100); - let mut buffer = Vec::new(); - img.write_to(&mut buffer, ImageFormat::Png).unwrap(); - buffer + fn test_image() -> DynamicImage { + DynamicImage::new_rgb8(100, 100) } #[rstest] - fn test_jpeg_conversion(test_image: Vec) -> Result<()> { - let jpeg_data = convert_to_format(&test_image, ImageFormat::Jpeg)?; + fn test_jpeg_conversion(test_image: DynamicImage) -> Result<()> { + let jpeg_data = ImageConverter::to_jpeg(&test_image, 80)?; assert!(!jpeg_data.is_empty()); - assert_eq!(image::guess_format(&jpeg_data).unwrap(), ImageFormat::Jpeg); + assert_eq!(image::guess_format(&jpeg_data).unwrap(), image::ImageFormat::Jpeg); Ok(()) } #[rstest] - fn test_png_conversion(test_image: Vec) -> Result<()> { - let png_data = convert_to_format(&test_image, ImageFormat::Png)?; + fn test_png_conversion(test_image: DynamicImage) -> Result<()> { + let png_data = ImageConverter::to_png(&test_image)?; assert!(!png_data.is_empty()); - assert_eq!(image::guess_format(&png_data).unwrap(), ImageFormat::Png); + assert_eq!(image::guess_format(&png_data).unwrap(), image::ImageFormat::Png); Ok(()) } #[rstest] - fn test_webp_conversion(test_image: Vec) -> Result<()> { - let webp_data = convert_to_format(&test_image, ImageFormat::WebP)?; + fn test_webp_conversion(test_image: DynamicImage) -> Result<()> { + let webp_data = ImageConverter::to_webp(&test_image, 80)?; assert!(!webp_data.is_empty()); - assert_eq!(image::guess_format(&webp_data).unwrap(), ImageFormat::WebP); Ok(()) } } diff --git a/gb-image/src/lib.rs b/gb-image/src/lib.rs index b2dbf14..730f3be 100644 --- a/gb-image/src/lib.rs +++ b/gb-image/src/lib.rs @@ -2,12 +2,12 @@ pub mod processor; pub mod converter; pub use processor::ImageProcessor; -pub use converter::{ImageConverter, ImageFormat}; +pub use converter::ImageConverter; +// Remove the ImageFormat re-export since it's private in the image crate +pub use image::ImageFormat; #[cfg(test)] mod tests { - use super::*; - use gb_core::Result; use super::*; use gb_core::Result; use image::{DynamicImage, Rgba}; @@ -49,7 +49,6 @@ mod tests { let png_data = ImageConverter::to_png(&image)?; let gif_data = ImageConverter::to_gif(&image)?; - Ok(()) } } diff --git a/gb-migrations/Cargo.toml b/gb-migrations/Cargo.toml index 056c0b2..1adf6a0 100644 --- a/gb-migrations/Cargo.toml +++ b/gb-migrations/Cargo.toml @@ -5,6 +5,10 @@ edition.workspace = true authors.workspace = true license.workspace = true +[[bin]] +name = "migrations" +path = "src/bin/migrations.rs" + [dependencies] tokio.workspace = true sqlx.workspace = true @@ -15,4 +19,4 @@ serde_json.workspace = true gb-core = { path = "../gb-core" } [dev-dependencies] -rstest.workspace = true +rstest.workspace = true \ No newline at end of file diff --git a/gb-migrations/src/bin/migrations.rs b/gb-migrations/src/bin/migrations.rs new file mode 100644 index 0000000..237bf81 --- /dev/null +++ b/gb-migrations/src/bin/migrations.rs @@ -0,0 +1,19 @@ +use sqlx::PgPool; +use gb_migrations::run_migrations; + +#[tokio::main] +async fn main() -> Result<(), sqlx::Error> { + let database_url = std::env::var("DATABASE_URL") + .expect("DATABASE_URL must be set"); + + println!("Creating database connection pool..."); + let pool = PgPool::connect(&database_url) + .await + .expect("Failed to create pool"); + + println!("Running migrations..."); + run_migrations(&pool).await?; + + println!("Migrations completed successfully!"); + Ok(()) +} \ No newline at end of file diff --git a/gb-migrations/src/lib.rs b/gb-migrations/src/lib.rs index 1468298..59a8faa 100644 --- a/gb-migrations/src/lib.rs +++ b/gb-migrations/src/lib.rs @@ -4,9 +4,10 @@ use tracing::info; pub async fn run_migrations(pool: &PgPool) -> Result<(), sqlx::Error> { info!("Running database migrations"); - sqlx::query( - r#" - CREATE TABLE IF NOT EXISTS customers ( + // Create tables + let table_queries = [ + // Customers table + r#"CREATE TABLE IF NOT EXISTS customers ( id UUID PRIMARY KEY, name VARCHAR(255) NOT NULL, subscription_tier VARCHAR(50) NOT NULL, @@ -14,9 +15,10 @@ pub async fn run_migrations(pool: &PgPool) -> Result<(), sqlx::Error> { max_instances INTEGER NOT NULL, metadata JSONB NOT NULL DEFAULT '{}', created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP - ); - - CREATE TABLE IF NOT EXISTS instances ( + )"#, + + // Instances table + r#"CREATE TABLE IF NOT EXISTS instances ( id UUID PRIMARY KEY, customer_id UUID NOT NULL REFERENCES customers(id), name VARCHAR(255) NOT NULL, @@ -25,9 +27,10 @@ pub async fn run_migrations(pool: &PgPool) -> Result<(), sqlx::Error> { region VARCHAR(50) NOT NULL, config JSONB NOT NULL DEFAULT '{}', created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP - ); - - CREATE TABLE IF NOT EXISTS rooms ( + )"#, + + // Rooms table + r#"CREATE TABLE IF NOT EXISTS rooms ( id UUID PRIMARY KEY, customer_id UUID NOT NULL REFERENCES customers(id), instance_id UUID NOT NULL REFERENCES instances(id), @@ -36,9 +39,10 @@ pub async fn run_migrations(pool: &PgPool) -> Result<(), sqlx::Error> { status VARCHAR(50) NOT NULL, config JSONB NOT NULL DEFAULT '{}', created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP - ); - - CREATE TABLE IF NOT EXISTS messages ( + )"#, + + // Messages table + r#"CREATE TABLE IF NOT EXISTS messages ( id UUID PRIMARY KEY, customer_id UUID NOT NULL REFERENCES customers(id), instance_id UUID NOT NULL REFERENCES instances(id), @@ -49,9 +53,10 @@ pub async fn run_migrations(pool: &PgPool) -> Result<(), sqlx::Error> { metadata JSONB NOT NULL DEFAULT '{}', created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP, shard_key INTEGER NOT NULL - ); - - CREATE TABLE IF NOT EXISTS users ( + )"#, + + // Users table + r#"CREATE TABLE IF NOT EXISTS users ( id UUID PRIMARY KEY, customer_id UUID NOT NULL REFERENCES customers(id), instance_id UUID NOT NULL REFERENCES instances(id), @@ -60,9 +65,10 @@ pub async fn run_migrations(pool: &PgPool) -> Result<(), sqlx::Error> { status VARCHAR(50) NOT NULL, metadata JSONB NOT NULL DEFAULT '{}', created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP - ); - - CREATE TABLE IF NOT EXISTS tracks ( + )"#, + + // Tracks table + r#"CREATE TABLE IF NOT EXISTS tracks ( id UUID PRIMARY KEY, room_id UUID NOT NULL REFERENCES rooms(id), user_id UUID NOT NULL REFERENCES users(id), @@ -70,29 +76,43 @@ pub async fn run_migrations(pool: &PgPool) -> Result<(), sqlx::Error> { status VARCHAR(50) NOT NULL, metadata JSONB NOT NULL DEFAULT '{}', created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP - ); - - CREATE TABLE IF NOT EXISTS subscriptions ( + )"#, + + // Subscriptions table + r#"CREATE TABLE IF NOT EXISTS subscriptions ( id UUID PRIMARY KEY, track_id UUID NOT NULL REFERENCES tracks(id), user_id UUID NOT NULL REFERENCES users(id), status VARCHAR(50) NOT NULL, metadata JSONB NOT NULL DEFAULT '{}', created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP - ); + )"#, + ]; - -- Create indexes for performance - CREATE INDEX IF NOT EXISTS idx_instances_customer_id ON instances(customer_id); - CREATE INDEX IF NOT EXISTS idx_rooms_instance_id ON rooms(instance_id); - CREATE INDEX IF NOT EXISTS idx_messages_conversation_id ON messages(conversation_id); - CREATE INDEX IF NOT EXISTS idx_messages_shard_key ON messages(shard_key); - CREATE INDEX IF NOT EXISTS idx_tracks_room_id ON tracks(room_id); - CREATE INDEX IF NOT EXISTS idx_subscriptions_track_id ON subscriptions(track_id); - CREATE INDEX IF NOT EXISTS idx_users_email ON users(email); - "#, - ) - .execute(pool) - .await?; + // Create indexes + let index_queries = [ + "CREATE INDEX IF NOT EXISTS idx_instances_customer_id ON instances(customer_id)", + "CREATE INDEX IF NOT EXISTS idx_rooms_instance_id ON rooms(instance_id)", + "CREATE INDEX IF NOT EXISTS idx_messages_conversation_id ON messages(conversation_id)", + "CREATE INDEX IF NOT EXISTS idx_messages_shard_key ON messages(shard_key)", + "CREATE INDEX IF NOT EXISTS idx_tracks_room_id ON tracks(room_id)", + "CREATE INDEX IF NOT EXISTS idx_subscriptions_track_id ON subscriptions(track_id)", + "CREATE INDEX IF NOT EXISTS idx_users_email ON users(email)", + ]; + + // Execute table creation queries + for query in table_queries { + sqlx::query(query) + .execute(pool) + .await?; + } + + // Execute index creation queries + for query in index_queries { + sqlx::query(query) + .execute(pool) + .await?; + } info!("Migrations completed successfully"); Ok(()) diff --git a/process.rs b/process.rs new file mode 100644 index 0000000..e69de29 diff --git a/prompt.md b/prompt.md new file mode 100644 index 0000000..74e0bec --- /dev/null +++ b/prompt.md @@ -0,0 +1,558 @@ +You are a distributed systems architect for a billion-scale real-time communication platform called General Bots or gb. The system combines bot capabilities, WebRTC communication, and massive-scale messaging with the following architecture: + +1. Core Domains and Models: + + +A. Customer Hierarchy: +- Customer (top-level organization) + - Multiple Instances + - Subscription Management + - Resource Quotas + - Regional Distribution + - Billing & Usage Tracking + +B. Instance Management: +- Per-customer instances +- Resource isolation +- Regional deployment +- Feature toggles +- Usage monitoring +- Shard management + +2. Communication Infrastructure: + +A. Real-time Rooms: +- WebRTC-based communication +- Track management (audio/video) +- Participant handling +- Room scaling +- Media processing +- Recording capabilities +- Video based rooms like Zoom. +- Tiktok lives - like + +B. Messaging System: +- Sharded message queues +- Message persistence +- Real-time delivery +- Message routing +- Delivery status tracking +- Message search + +4. Database Schema: + +A. Core Tables: +```sql +CREATE TABLE customers ( + id UUID PRIMARY KEY, + name VARCHAR(255), + subscription_tier VARCHAR(50), + status VARCHAR(50), + max_instances INTEGER, + metadata JSONB, + created_at TIMESTAMPTZ +); + +CREATE TABLE instances ( + id UUID PRIMARY KEY, + customer_id UUID, + name VARCHAR(255), + status VARCHAR(50), + shard_id INTEGER, + region VARCHAR(50), + config JSONB, + created_at TIMESTAMPTZ +); + +CREATE TABLE rooms ( + id UUID PRIMARY KEY, + customer_id UUID, + instance_id UUID, + name VARCHAR(255), + kind VARCHAR(50), + status VARCHAR(50), + config JSONB, + created_at TIMESTAMPTZ +); + +CREATE TABLE messages ( + id UUID PRIMARY KEY, + customer_id UUID, + instance_id UUID, + conversation_id UUID, + sender_id UUID, + kind VARCHAR(50), + content TEXT, + metadata JSONB, + created_at TIMESTAMPTZ, + shard_key INTEGER +); +``` +Also consider every table here even if you reorganize: BOnlineSubscription +GuaribasAdmin +GuaribasAnswer +GuaribasApplications +GuaribasChannel +GuaribasConversation +GuaribasConversationMessage +GuaribasGroup +GuaribasInstance +GuaribasLog +GuaribasPackage +GuaribasQuestion +GuaribasQuestionAlternate +GuaribasSchedule +GuaribasSubject +GuaribasUser +GuaribasUserGroup + + +5. Scaling Architecture: + +A. Storage Layer: +- PostgreSQL (relational data) + - Sharded by customer_id + - Partitioned tables + - Read replicas +- TiKV (distributed KV) + - Real-time data + - Cache layer + - Fast lookups +- Redis (caching) + - Session data + - Rate limiting + - Temporary storage + +B. Message Queue: +- Kafka clusters + - Sharded topics + - Message routing + - Event streaming +- Redis Pub/Sub + - Real-time updates + - Presence information + - Status changes + +C. Media Handling: +- WebRTC media servers +- Track multiplexing +- Media processing +- Recording storage + +6. API Structure: + +A. System APIs: +```rust +pub trait SystemAPI { + async fn call_vm(&self, pid: Uuid, text: String) -> Result; + async fn wait(&self, pid: Uuid, seconds: i32) -> Result<()>; + async fn save_file(&self, pid: Uuid, data: Vec) -> Result; + async fn execute_sql(&self, pid: Uuid, sql: String) -> Result; +} +``` + +B. Room APIs: +```rust +pub trait RoomAPI { + async fn create_room(&self, config: RoomConfig) -> Result; + async fn join_room(&self, room_id: Uuid, user_id: Uuid) -> Result; + async fn publish_track(&self, track: TrackInfo) -> Result; + async fn subscribe_track(&self, track_id: Uuid) -> Result; +} +``` + +C. Message APIs: +```rust +pub trait MessageAPI { + async fn send_message(&self, message: Message) -> Result; + async fn get_messages(&self, filter: MessageFilter) -> Result>; + async fn update_status(&self, message_id: Uuid, status: Status) -> Result<()>; +} +``` + +7. Monitoring & Operations: + +A. Metrics: +- System health +- Resource usage +- Message throughput +- Media quality +- Error rates +- API latency + +B. Scaling Operations: +- Auto-scaling rules +- Shard management +- Load balancing +- Failover handling +- Data migration + +C. Security: +- Authentication +- Authorization +- Rate limiting +- Data encryption +- Audit logging + +Implementation Guidelines: + +1. Use Rust for: +- Performance critical paths +- Memory safety +- Concurrent processing +- System reliability + +2. Sharding Strategy: +- Shard by customer_id +- Instance isolation +- Regional distribution +- Data locality + +3. Performance Targets: +- Billion concurrent connections +- Sub-second message delivery +- 4K video streaming +- Petabyte-scale storage + +4. Reliability Requirements: +- 99.99% uptime +- No message loss +- Automatic failover +- Data redundancy + +When implementing features, consider: +1. Multi-tenant isolation +2. Resource quotas +3. Security boundaries +4. Performance impact +5. Scaling implications +6. Monitoring requirements + +The system should handle: +1. Billions of active users +2. Millions of concurrent rooms +3. Petabytes of message history +4. Global distribution +5. Real-time communication +6. Bot automation + + +API: +System Keywords: + +POST /systemKeywords/callVM +POST /systemKeywords/append +POST /systemKeywords/seeCaption +POST /systemKeywords/seeText +POST /systemKeywords/sortBy +POST /systemKeywords/JSONAsGBTable +POST /systemKeywords/renderTable +POST /systemKeywords/closeHandles +POST /systemKeywords/asPDF +POST /systemKeywords/asImage +POST /systemKeywords/executeSQL +POST /systemKeywords/getFileContents +POST /systemKeywords/getRandomId +POST /systemKeywords/getStock +POST /systemKeywords/wait +POST /systemKeywords/talkTo +POST /systemKeywords/getUser +POST /systemKeywords/sendSmsTo +POST /systemKeywords/set +POST /systemKeywords/internalGetDocument +POST /systemKeywords/saveFile +POST /systemKeywords/uploadFile +POST /systemKeywords/note +POST /systemKeywords/saveToStorageBatch +POST /systemKeywords/saveToStorage +POST /systemKeywords/saveToStorageWithJSON +POST /systemKeywords/save +POST /systemKeywords/getHttp +POST /systemKeywords/isValidDate +POST /systemKeywords/isValidNumber +POST /systemKeywords/isValidHour +POST /systemKeywords/getFilter +POST /systemKeywords/find +POST /systemKeywords/getDateFromLocaleString +POST /systemKeywords/createFolder +POST /systemKeywords/shareFolder +POST /systemKeywords/internalCreateDocument +POST /systemKeywords/createDocument +POST /systemKeywords/copyFile +POST /systemKeywords/convert +POST /systemKeywords/generatePassword +POST /systemKeywords/flattenJSON +POST /systemKeywords/getCustomToken +POST /systemKeywords/getByHttp +POST /systemKeywords/putByHttp +POST /systemKeywords/postByHttp +POST /systemKeywords/numberOnly +POST /systemKeywords/createLead +POST /systemKeywords/fill +POST /systemKeywords/screenCapture +POST /systemKeywords/numberToLetters +POST /systemKeywords/getTableFromName +POST /systemKeywords/merge +POST /systemKeywords/tweet +POST /systemKeywords/rewrite +POST /systemKeywords/pay +POST /systemKeywords/autoSave +POST /systemKeywords/internalAutoSave +POST /systemKeywords/deleteFromStorage +POST /systemKeywords/deleteFile +POST /systemKeywords/getExtensionInfo +POST /systemKeywords/dirFolder +POST /systemKeywords/log +Dialog Keywords: + +POST /dialogKeywords/chart +POST /dialogKeywords/getOCR +POST /dialogKeywords/getToday +POST /dialogKeywords/exit +POST /dialogKeywords/getActiveTasks +POST /dialogKeywords/createDeal +POST /dialogKeywords/findContact +POST /dialogKeywords/getContentLocaleWithCulture +POST /dialogKeywords/getCoded +POST /dialogKeywords/getWeekFromDate +POST /dialogKeywords/getDateDiff +POST /dialogKeywords/format +POST /dialogKeywords/dateAdd [...and many more dialog-related endpoints] +Web Automation: + +POST /webAutomation/isSelector +POST /webAutomation/cyrb53 +POST /webAutomation/closeHandles +POST /webAutomation/openPage +POST /webAutomation/getPageByHandle +POST /webAutomation/getBySelector +POST /webAutomation/getByFrame +POST /webAutomation/hover +POST /webAutomation/click [...and more web automation endpoints] + +Image Processing: + +POST /imageProcessing/sharpen +POST /imageProcessing/mergeImage +POST /imageProcessing/blur + +Debugger Service: + +There must have be a webassymbly that convert BASIC code using a compiler to webassymbly and support remotedebugging by API. + +POST /debuggerService/setBreakpoint +POST /debuggerService/refactor +POST /debuggerService/resume +POST /debuggerService/stop +POST /debuggerService/step +POST /debuggerService/getContext +POST /debuggerService/start +POST /debuggerService/sendMessage + +Dependencies original, migrate everything to workspace.dependencies + "@azure/arm-appservice": "15.0.0", + "@azure/arm-cognitiveservices": "7.5.0", + "@azure/arm-resources": "5.2.0", + "@azure/arm-search": "3.2.0", + "@azure/arm-sql": "10.0.0", + "@azure/arm-subscriptions": "5.1.0", + "@azure/cognitiveservices-computervision": "8.2.0", + "@azure/keyvault-keys": "4.8.0", + "@azure/ms-rest-js": "2.7.0", + "@azure/msal-node": "2.13.1", + "@azure/openai": "2.0.0-beta.1", + "@azure/search-documents": "12.1.0", + "@azure/storage-blob": "12.24.0", + "@google-cloud/pubsub": "4.7.0", + "@google-cloud/translate": "8.5.0", + "@hubspot/api-client": "11.2.0", + "@koa/cors": "5.0.0", + "@langchain/anthropic": "^0.3.7", + "@langchain/community": "0.2.31", + "@langchain/core": "^0.3.17", + "@langchain/openai": "0.2.8", + "@microsoft/microsoft-graph-client": "3.0.7", + "@nlpjs/basic": "4.27.0", + "@nosferatu500/textract": "3.1.3", + "@push-rpc/core": "1.9.0", + "@push-rpc/http": "1.9.0", + "@push-rpc/openapi": "1.9.0", + "@push-rpc/websocket": "1.9.0", + "@semantic-release/changelog": "6.0.3", + "@semantic-release/exec": "6.0.3", + "@semantic-release/git": "10.0.1", + "@sendgrid/mail": "8.1.3", + "@sequelize/core": "7.0.0-alpha.37", + "@types/node": "22.5.2", + "@types/validator": "13.12.1", + "adm-zip": "0.5.16", + "ai2html": "^0.121.1", + "alasql": "4.5.1", + "any-shell-escape": "0.1.1", + "arraybuffer-to-buffer": "0.0.7", + "async-mutex": "0.5.0", + "async-promises": "0.2.3", + "async-retry": "1.3.3", + "basic-auth": "2.0.1", + "billboard.js": "3.13.0", + "bluebird": "3.7.2", + "body-parser": "1.20.2", + "botbuilder": "4.23.0", + "botbuilder-adapter-facebook": "1.0.12", + "botbuilder-ai": "4.23.0", + "botbuilder-dialogs": "4.23.0", + "botframework-connector": "4.23.0", + "botlib": "5.0.0", + "c3-chart-maker": "0.2.8", + "cd": "0.3.3", + "chalk-animation": "2.0.3", + "chatgpt": "5.2.5", + "chrome-remote-interface": "0.33.2", + "cli-progress": "3.12.0", + "cli-spinner": "0.2.10", + "core-js": "3.38.1", + "cors": "2.8.5", + "csv-database": "0.9.2", + "data-forge": "1.10.2", + "date-diff": "1.0.2", + "docximager": "0.0.4", + "docxtemplater": "3.50.0", + "dotenv-extended": "2.9.0", + "electron": "32.0.1", + "exceljs": "4.4.0", + "express": "4.19.2", + "express-remove-route": "1.0.0", + "facebook-nodejs-business-sdk": "^20.0.2", + "ffmpeg-static": "5.2.0", + "formidable": "^3.5.1", + "get-image-colors": "4.0.1", + "glob": "^11.0.0", + "google-libphonenumber": "3.2.38", + "googleapis": "143.0.0", + "hnswlib-node": "3.0.0", + "html-to-md": "0.8.6", + "http-proxy": "1.18.1", + "ibm-watson": "9.1.0", + "icojs": "^0.19.4", + "instagram-private-api": "1.46.1", + "iso-639-1": "3.1.3", + "isomorphic-fetch": "3.0.0", + "jimp": "1.6.0", + "js-md5": "0.8.3", + "json-schema-to-zod": "2.4.0", + "jsqr": "^1.4.0", + "just-indent": "0.0.1", + "keyv": "5.0.1", + "koa": "2.15.3", + "koa-body": "6.0.1", + "koa-ratelimit": "5.1.0", + "koa-router": "12.0.1", + "langchain": "0.2.17", + "language-tags": "1.0.9", + "line-replace": "2.0.1", + "lodash": "4.17.21", + "luxon": "3.5.0", + "mammoth": "1.8.0", + "mariadb": "3.3.1", + "mime-types": "2.1.35", + "moment": "2.30.1", + "ms-rest-azure": "3.0.2", + "mysql": "^2.18.1", + "nexmo": "2.9.1", + "ngrok": "5.0.0-beta.2", + "node-cron": "3.0.3", + "node-html-parser": "6.1.13", + "node-nlp": "4.27.0", + "node-tesseract-ocr": "2.2.1", + "nodemon": "^3.1.7", + "npm": "10.8.3", + "open": "10.1.0", + "open-docxtemplater-image-module": "1.0.3", + "openai": "4.57.0", + "pdf-extraction": "1.0.2", + "pdf-parse": "1.1.1", + "pdf-to-png-converter": "3.3.0", + "pdfjs-dist": "4.6.82", + "pdfkit": "0.15.0", + "phone": "3.1.50", + "pizzip": "3.1.7", + "pptxtemplater": "1.0.5", + "pragmatismo-io-framework": "1.1.1", + "prism-media": "1.3.5", + "public-ip": "7.0.1", + "punycode": "2.3.1", + "puppeteer": "23.2.2", + "puppeteer-extra": "3.3.6", + "puppeteer-extra-plugin-minmax": "1.1.2", + "puppeteer-extra-plugin-stealth": "2.11.2", + "qr-scanner": "1.4.2", + "qrcode": "1.5.4", + "qrcode-reader": "^1.0.4", + "qrcode-terminal": "0.12.0", + "readline": "1.3.0", + "reflect-metadata": "0.2.2", + "rimraf": "6.0.1", + "safe-buffer": "5.2.1", + "scanf": "1.2.0", + "sequelize": "6.37.3", + "sequelize-cli": "6.6.2", + "sequelize-typescript": "2.1.6", + "simple-git": "3.26.0", + "speakingurl": "14.0.1", + "sqlite3": "5.1.7", + "ssr-for-bots": "1.0.1-c", + "strict-password-generator": "1.1.2", + "svg2img": "^1.0.0-beta.2", + "swagger-client": "3.29.2", + "swagger-ui-dist": "5.17.14", + "tabulator-tables": "6.2.5", + "tedious": "18.6.1", + "textract": "2.5.0", + "twilio": "5.2.3", + "twitter-api-v2": "1.17.2", + "typeorm": "0.3.20", + "typescript": "5.5.4", + "url-join": "5.0.0", + "vhost": "3.0.2", + "vm2": "3.9.19", + "vm2-process": "2.1.5", + "walk-promise": "0.2.0", + "washyourmouthoutwithsoap": "1.0.2", + "webdav-server": "2.6.2", + "webp-converter": "^2.3.3", + "whatsapp-cloud-api": "0.3.1", + "whatsapp-web.js": "1.26.1-alpha.1", + "winston": "3.14.2", + "ws": "8.18.0", + "yaml": "2.5.0", + "yarn": "1.22.22", + "zod-to-json-schema": "3.23.2" + }, + "devDependencies": { + "@types/qrcode": "1.5.5", + "@types/url-join": "4.0.3", + "@typescript-eslint/eslint-plugin": "8.4.0", + "@typescript-eslint/parser": "8.4.0", + "ban-sensitive-files": "1.10.5", + "commitizen": "4.3.0", + "cz-conventional-changelog": "3.3.0", + "dependency-check": "4.1.0", + "git-issues": "1.3.1", + "license-checker": "25.0.1", + "prettier-standard": "16.4.1", + "semantic-release": "24.1.0", + "simple-commit-message": "4.1.3", + "super-strong-password-generator": "2.0.2", + "super-strong-password-generator-es": "2.0.2", + "travis-deploy-once": "5.0.11", + "tslint": "6.1.3", + "tsx": "^4.19.1", + "vitest": "2.0.5" + +migrate them to rust compatible, + +- do not skip items, migrate everything, in way better, in your interpretation. +- use kubernetes and create environment configuration for everything and ingress to have several server nodes if eeed automatically +- I NEED FULL CODE SOLUTION IN PROFESSIONAL TESTABLE RUST CODE: if you need split answer in several parts, but provide ENTIRE CODE. Complete working balenced aserver. IMPORTANTE: Generate the project in a .sh shell script output with cat, of entire code base to be restored, no placeholder neither TODOS. +- VERY IMPORNTANT: DO NOT put things like // Add other system routes... you should WRITE ACUTAL CODE +- Need tests for every line of code written. \ No newline at end of file