new(all): Initial import.

This commit is contained in:
Rodrigo Rodriguez 2024-12-26 16:16:03 -03:00
parent ced0bc3f0f
commit 3798976340
9 changed files with 46 additions and 226 deletions

13
.vscode/launch.json vendored
View file

@ -1,19 +1,6 @@
{ {
"version": "0.2.0", "version": "0.2.0",
"configurations": [ "configurations": [
{
"type": "lldb",
"request": "launch",
"name": "Cargo test",
"cargo": {
"args": [
"test",
"--no-run",
"--lib"
]
},
"args": []
},
{ {
"type": "lldb", "type": "lldb",
"request": "launch", "request": "launch",

View file

@ -1,7 +1,7 @@
use gb_core::{Error, Result}; use gb_core::{Error, Result};
use tracing::{info, error}; use tracing::{info, error};
use axum::Router;
use std::net::SocketAddr; use std::net::SocketAddr;
use gb_messaging::MessageProcessor;
#[tokio::main] #[tokio::main]
async fn main() -> Result<()> { async fn main() -> Result<()> {
@ -16,21 +16,13 @@ async fn main() -> Result<()> {
} }
async fn initialize_bot_server() -> Result<axum::Router> { async fn initialize_bot_server() -> Result<axum::Router> {
info!("Initializing General Bots..."); info!("Initializing General Bots server...");
// Initialize database connections // Initialize the MessageProcessor
let db_pool = initialize_database().await?; let message_processor = MessageProcessor::new();
// Initialize Redis // Build the Axum router using our router module
let redis_client = initialize_redis().await?; let app = gb_api::create_router(message_processor)
// Build the Axum router with our routes
let app = axum::Router::new()
.with_state(AppState {
db: db_pool,
redis: redis_client,
})
// Add your route handlers here
.layer(tower_http::trace::TraceLayer::new_for_http()); .layer(tower_http::trace::TraceLayer::new_for_http());
Ok(app) Ok(app)
@ -77,8 +69,8 @@ struct AppState {
} }
async fn start_server(app: Router) -> Result<()> { async fn start_server(app: axum::Router) -> Result<()> {
let addr = SocketAddr::from(([0, 0, 0, 0], 3000)); let addr = SocketAddr::from(([0, 0, 0, 0], 3001));
info!("Starting server on {}", addr); info!("Starting server on {}", addr);
match tokio::net::TcpListener::bind(addr).await { match tokio::net::TcpListener::bind(addr).await {
@ -91,6 +83,7 @@ async fn start_server(app: Router) -> Result<()> {
Err(e) => { Err(e) => {
error!("Failed to bind to address: {}", e); error!("Failed to bind to address: {}", e);
Err(Error::internal(format!("Failed to bind to address: {}", e))) Err(Error::internal(format!("Failed to bind to address: {}", e)))
} }
} }
} }

View file

@ -70,16 +70,18 @@ async fn send_message(
State(state): State<Arc<ApiState>>, State(state): State<Arc<ApiState>>,
Json(message): Json<Message>, Json(message): Json<Message>,
) -> Result<Json<MessageId>> { ) -> Result<Json<MessageId>> {
// Clone the message before using it in envelope
let envelope = MessageEnvelope { let envelope = MessageEnvelope {
id: Uuid::new_v4(), id: Uuid::new_v4(),
message, message: message.clone(), // Clone here
metadata: HashMap::new(), metadata: HashMap::new(),
}; };
let mut processor = state.message_processor.lock().await; let mut processor = state.message_processor.lock().await;
processor.process_messages().await processor.add_message(message) // Use original message here
.map_err(|e| Error::internal(format!("Failed to process message: {}", e)))?; .await
.map_err(|e| Error::internal(format!("Failed to add message: {}", e)))?;
Ok(Json(MessageId(envelope.id))) Ok(Json(MessageId(envelope.id)))
} }

View file

@ -60,8 +60,8 @@ pub struct Message {
pub kind: String, pub kind: String,
pub content: String, pub content: String,
pub metadata: JsonValue, pub metadata: JsonValue,
pub created_at: DateTime<Utc>, pub created_at: Option<DateTime<Utc>>,
pub shard_key: i32, pub shard_key: Option<i32>,
} }
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]

View file

@ -1,22 +0,0 @@
[package]
name = "gb-migrations"
version = { workspace = true }
edition = { workspace = true }
authors = { workspace = true }
license = { workspace = true }
[[bin]]
name = "migrations"
path = "src/bin/migrations.rs"
[dependencies]
tokio= { workspace = true }
sqlx= { workspace = true }
tracing= { workspace = true }
uuid= { workspace = true }
chrono= { workspace = true }
serde_json= { workspace = true }
gb-core = { path = "../gb-core" }
[dev-dependencies]
rstest= { workspace = true }

View file

@ -1,19 +0,0 @@
use sqlx::PgPool;
use gb_migrations::run_migrations;
#[tokio::main]
async fn main() -> Result<(), sqlx::Error> {
let database_url = std::env::var("DATABASE_URL")
.expect("DATABASE_URL must be set");
println!("Creating database connection pool...");
let pool = PgPool::connect(&database_url)
.await
.expect("Failed to create pool");
println!("Running migrations...");
run_migrations(&pool).await?;
println!("Migrations completed successfully!");
Ok(())
}

View file

@ -1,144 +0,0 @@
use sqlx::PgPool;
use tracing::info;
pub async fn run_migrations(pool: &PgPool) -> Result<(), sqlx::Error> {
info!("Running database migrations");
// Create tables
let table_queries = [
// Customers table
r#"CREATE TABLE IF NOT EXISTS customers (
id UUID PRIMARY KEY,
name VARCHAR(255) NOT NULL,
subscription_tier VARCHAR(50) NOT NULL,
status VARCHAR(50) NOT NULL,
max_instances INTEGER NOT NULL,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP
)"#,
// Instances table
r#"CREATE TABLE IF NOT EXISTS instances (
id UUID PRIMARY KEY,
customer_id UUID NOT NULL REFERENCES customers(id),
name VARCHAR(255) NOT NULL,
status VARCHAR(50) NOT NULL,
shard_id INTEGER NOT NULL,
region VARCHAR(50) NOT NULL,
config JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP
)"#,
// Rooms table
r#"CREATE TABLE IF NOT EXISTS rooms (
id UUID PRIMARY KEY,
customer_id UUID NOT NULL REFERENCES customers(id),
instance_id UUID NOT NULL REFERENCES instances(id),
name VARCHAR(255) NOT NULL,
kind VARCHAR(50) NOT NULL,
status VARCHAR(50) NOT NULL,
config JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP
)"#,
// Messages table
r#"CREATE TABLE IF NOT EXISTS messages (
id UUID PRIMARY KEY,
customer_id UUID NOT NULL REFERENCES customers(id),
instance_id UUID NOT NULL REFERENCES instances(id),
conversation_id UUID NOT NULL,
sender_id UUID NOT NULL,
kind VARCHAR(50) NOT NULL,
content TEXT NOT NULL,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
shard_key INTEGER NOT NULL
)"#,
// Users table
r#"CREATE TABLE IF NOT EXISTS users (
id UUID PRIMARY KEY,
customer_id UUID NOT NULL REFERENCES customers(id),
instance_id UUID NOT NULL REFERENCES instances(id),
name VARCHAR(255) NOT NULL,
email VARCHAR(255) NOT NULL UNIQUE,
status VARCHAR(50) NOT NULL,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP
)"#,
// Tracks table
r#"CREATE TABLE IF NOT EXISTS tracks (
id UUID PRIMARY KEY,
room_id UUID NOT NULL REFERENCES rooms(id),
user_id UUID NOT NULL REFERENCES users(id),
kind VARCHAR(50) NOT NULL,
status VARCHAR(50) NOT NULL,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP
)"#,
// Subscriptions table
r#"CREATE TABLE IF NOT EXISTS subscriptions (
id UUID PRIMARY KEY,
track_id UUID NOT NULL REFERENCES tracks(id),
user_id UUID NOT NULL REFERENCES users(id),
status VARCHAR(50) NOT NULL,
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP
)"#,
];
// Create indexes
let index_queries = [
"CREATE INDEX IF NOT EXISTS idx_instances_customer_id ON instances(customer_id)",
"CREATE INDEX IF NOT EXISTS idx_rooms_instance_id ON rooms(instance_id)",
"CREATE INDEX IF NOT EXISTS idx_messages_conversation_id ON messages(conversation_id)",
"CREATE INDEX IF NOT EXISTS idx_messages_shard_key ON messages(shard_key)",
"CREATE INDEX IF NOT EXISTS idx_tracks_room_id ON tracks(room_id)",
"CREATE INDEX IF NOT EXISTS idx_subscriptions_track_id ON subscriptions(track_id)",
"CREATE INDEX IF NOT EXISTS idx_users_email ON users(email)",
];
// Execute table creation queries
for query in table_queries {
sqlx::query(query)
.execute(pool)
.await?;
}
// Execute index creation queries
for query in index_queries {
sqlx::query(query)
.execute(pool)
.await?;
}
info!("Migrations completed successfully");
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use sqlx::postgres::{PgPoolOptions, PgPool};
use rstest::*;
async fn create_test_pool() -> PgPool {
let database_url = std::env::var("DATABASE_URL")
.unwrap_or_else(|_| "postgres://postgres:postgres@localhost/gb_test".to_string());
PgPoolOptions::new()
.max_connections(5)
.connect(&database_url)
.await
.expect("Failed to create test pool")
}
#[rstest]
#[tokio::test]
async fn test_migrations() {
let pool = create_test_pool().await;
assert!(run_migrations(&pool).await.is_ok());
}
}

View file

@ -1,11 +1,14 @@
use gb_core::Result; use gb_core::{Result, models::*}; // This will import both Message and MessageId
use tracing::{error, instrument}; use gb_core::Error;
use std::sync::Arc; use uuid::Uuid;
use tokio::sync::broadcast;
use std::collections::HashMap; use std::collections::HashMap;
use tracing::instrument;
use crate::MessageEnvelope; use crate::MessageEnvelope;
use tokio::sync::broadcast; // Add this import
use std::sync::Arc;
use tracing::{error, info}; // Add error and info macros here
pub struct MessageProcessor { pub struct MessageProcessor {
tx: broadcast::Sender<MessageEnvelope>, tx: broadcast::Sender<MessageEnvelope>,
@ -51,6 +54,25 @@ impl MessageProcessor {
.insert(kind.to_string(), Box::new(handler)); .insert(kind.to_string(), Box::new(handler));
} }
#[instrument(skip(self))]
pub async fn add_message(&mut self, message: Message) -> Result<MessageId> {
let envelope = MessageEnvelope {
id: Uuid::new_v4(),
message,
metadata: HashMap::new(),
};
self.tx.send(envelope.clone())
.map_err(|e| Error::internal(format!("Failed to queue message: {}", e)))?;
// Start processing immediately
if let Some(handler) = self.handlers.get(&envelope.message.kind) {
handler(envelope.clone())
.map_err(|e| Error::internal(format!("Handler error: {}", e)))?;
}
Ok(MessageId(envelope.id))
}
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn process_messages(&mut self) -> Result<()> { pub async fn process_messages(&mut self) -> Result<()> {
while let Ok(envelope) = self.rx.recv().await { while let Ok(envelope) = self.rx.recv().await {
@ -58,6 +80,7 @@ impl MessageProcessor {
if let Err(e) = handler(envelope.clone()) { if let Err(e) = handler(envelope.clone()) {
error!("Handler error for message {}: {}", envelope.id, e); error!("Handler error for message {}: {}", envelope.id, e);
} }
tracing::info!("Processing message: {:?}", &envelope.message.id);
} else { } else {
error!("No handler registered for message kind: {}", envelope.message.kind); error!("No handler registered for message kind: {}", envelope.message.kind);
} }

0
processor.rs Normal file
View file