new(all): Initial import.
This commit is contained in:
commit
f7734f2d62
98 changed files with 16309 additions and 0 deletions
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
target
|
9129
Cargo.lock
generated
Normal file
9129
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load diff
115
Cargo.toml
Normal file
115
Cargo.toml
Normal file
|
@ -0,0 +1,115 @@
|
|||
[workspace]
|
||||
members = [
|
||||
"gb-core", # Core domain models and traits
|
||||
"gb-api", # API layer and server implementation
|
||||
"gb-media", # Media processing and WebRTC handling
|
||||
"gb-messaging", # Message queue and real-time communication
|
||||
"gb-storage", # Database and storage implementations
|
||||
"gb-monitoring", # Metrics, logging and monitoring
|
||||
"gb-auth", # Authentication and authorization
|
||||
"gb-testing", # Integration and load testing
|
||||
"gb-migrations", # Database migrations
|
||||
#"gb-cloud", # Cloud provider integrations
|
||||
#"gb-vm", # Virtual machine and BASIC compiler
|
||||
"gb-automation", # Web and process automation
|
||||
"gb-nlp", # Natural language processing
|
||||
"gb-image", # Image processing capabilities
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
authors = ["GeneralBots Team"]
|
||||
license = "MIT"
|
||||
|
||||
[workspace.dependencies]
|
||||
# Core async runtime and utilities
|
||||
tokio = { version = "1.34", features = ["full"] }
|
||||
futures = "0.3"
|
||||
async-trait = "0.1"
|
||||
parking_lot = "0.12"
|
||||
|
||||
# Web framework and servers
|
||||
axum = { version = "0.7.9", features = ["ws", "multipart"] }
|
||||
tower = "0.4"
|
||||
tower-http = { version = "0.5", features = ["cors", "trace", "fs"] }
|
||||
hyper = { version = "1.1", features = ["full"] }
|
||||
tonic = { version = "0.10", features = ["tls", "transport"] }
|
||||
|
||||
# Database and storage
|
||||
sqlx = { version = "0.7", features = ["runtime-tokio-native-tls", "postgres", "mysql", "sqlite", "uuid", "time", "json"] }
|
||||
redis = { version = "0.24", features = ["tokio-comp", "connection-manager"] }
|
||||
tikv-client = "0.3"
|
||||
sea-orm = { version = "0.12", features = ["sqlx-postgres", "runtime-tokio-native-tls", "macros"] }
|
||||
|
||||
# Message queues
|
||||
rdkafka = { version = "0.36", features = ["cmake-build", "ssl"] }
|
||||
lapin = "2.3"
|
||||
|
||||
# Serialization and data formats
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
protobuf = "3.3"
|
||||
prost = "0.12"
|
||||
csv = "1.3"
|
||||
|
||||
# WebRTC and media processing
|
||||
webrtc = "0.9"
|
||||
gstreamer = "0.21"
|
||||
opus = "0.3"
|
||||
image = "0.24"
|
||||
|
||||
# Authentication and security
|
||||
jsonwebtoken = "9.2"
|
||||
argon2 = "0.5"
|
||||
ring = "0.17"
|
||||
reqwest = { version = "0.11", features = ["json", "stream"] }
|
||||
|
||||
# Cloud services
|
||||
aws-sdk-core = "1.1"
|
||||
azure_core = "0.15"
|
||||
azure_identity = "0.15"
|
||||
google-cloud-storage = "0.16"
|
||||
|
||||
# Monitoring and metrics
|
||||
prometheus = "0.13"
|
||||
opentelemetry = { version = "0.20", features = ["rt-tokio"] }
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
# Testing
|
||||
criterion = "0.5"
|
||||
mockall = "0.12"
|
||||
fake = { version = "2.9", features = ["derive"] }
|
||||
rstest = "0.18"
|
||||
|
||||
# Utilities
|
||||
uuid = { version = "1.6", features = ["serde", "v4"] }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
thiserror = "1.0"
|
||||
anyhow = "1.0"
|
||||
regex = "1.10"
|
||||
url = "2.5"
|
||||
rand = "0.8"
|
||||
base64 = "0.21"
|
||||
semver = "1.0"
|
||||
walkdir = "2.4"
|
||||
tempfile = "3.9"
|
||||
|
||||
# Web assembly
|
||||
wasm-bindgen = "0.2"
|
||||
js-sys = "0.3"
|
||||
web-sys = { version = "0.3", features = ["WebSocket", "WebRtcPeerConnection"] }
|
||||
|
||||
# Natural language processing
|
||||
rust-bert = "0.21"
|
||||
tokenizers = "0.15"
|
||||
whatlang = "0.16"
|
||||
|
||||
# PDF and document processing
|
||||
pdf = "0.8"
|
||||
docx = "1.1"
|
||||
zip = "0.6"
|
||||
|
||||
[workspace.metadata]
|
||||
msrv = "1.70.0"
|
28
deploy.sh
Executable file
28
deploy.sh
Executable file
|
@ -0,0 +1,28 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "Deploying General Bots platform..."
|
||||
|
||||
# Create namespace
|
||||
kubectl apply -f k8s/base/namespace.yaml
|
||||
|
||||
# Deploy infrastructure components
|
||||
kubectl apply -f k8s/base/postgres.yaml
|
||||
kubectl apply -f k8s/base/redis.yaml
|
||||
kubectl apply -f k8s/base/kafka.yaml
|
||||
kubectl apply -f k8s/base/monitoring.yaml
|
||||
|
||||
# Deploy application components
|
||||
kubectl apply -f k8s/base/api.yaml
|
||||
kubectl apply -f k8s/base/webrtc.yaml
|
||||
kubectl apply -f k8s/base/vm.yaml
|
||||
kubectl apply -f k8s/base/nlp.yaml
|
||||
kubectl apply -f k8s/base/image.yaml
|
||||
kubectl apply -f k8s/base/document.yaml
|
||||
|
||||
# Deploy ingress rules
|
||||
kubectl apply -f k8s/base/ingress.yaml
|
||||
|
||||
echo "Deployment completed successfully!"
|
||||
echo "Please wait for all pods to be ready..."
|
||||
kubectl -n general-bots get pods -w
|
24
gb-api/Cargo.toml
Normal file
24
gb-api/Cargo.toml
Normal file
|
@ -0,0 +1,24 @@
|
|||
[package]
|
||||
name = "gb-api"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
gb-core = { path = "../gb-core" }
|
||||
gb-messaging = { path = "../gb-messaging" }
|
||||
gb-monitoring = { path = "../gb-monitoring" }
|
||||
tokio.workspace = true
|
||||
axum = { version = "0.7.9", features = ["ws", "multipart"] }
|
||||
tower.workspace = true
|
||||
tower-http = { version = "0.5", features = ["cors", "trace"] }
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
uuid.workspace = true
|
||||
tracing.workspace = true
|
||||
async-trait.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
rstest.workspace = true
|
||||
tokio-test = "0.4"
|
65
gb-api/src/lib.rs
Normal file
65
gb-api/src/lib.rs
Normal file
|
@ -0,0 +1,65 @@
|
|||
pub mod router;
|
||||
|
||||
pub use router::{create_router, ApiState};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use gb_messaging::MessageProcessor;
|
||||
use axum::Router;
|
||||
use tower::ServiceExt;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_api_integration() {
|
||||
// Initialize message processor
|
||||
let processor = MessageProcessor::new(100);
|
||||
|
||||
// Create router
|
||||
let app: Router = create_router(processor);
|
||||
|
||||
// Test health endpoint
|
||||
let response = app
|
||||
.clone()
|
||||
.oneshot(
|
||||
axum::http::Request::builder()
|
||||
.uri("/health")
|
||||
.body(axum::body::Body::empty())
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(response.status(), axum::http::StatusCode::OK);
|
||||
|
||||
// Test message sending
|
||||
let message = gb_core::models::Message {
|
||||
id: Uuid::new_v4(),
|
||||
customer_id: Uuid::new_v4(),
|
||||
instance_id: Uuid::new_v4(),
|
||||
conversation_id: Uuid::new_v4(),
|
||||
sender_id: Uuid::new_v4(),
|
||||
kind: "test".to_string(),
|
||||
content: "integration test".to_string(),
|
||||
metadata: serde_json::Value::Object(serde_json::Map::new()),
|
||||
created_at: chrono::Utc::now(),
|
||||
shard_key: 0,
|
||||
};
|
||||
|
||||
let response = app
|
||||
.oneshot(
|
||||
axum::http::Request::builder()
|
||||
.method("POST")
|
||||
.uri("/messages")
|
||||
.header("content-type", "application/json")
|
||||
.body(axum::body::Body::from(
|
||||
serde_json::to_string(&message).unwrap()
|
||||
))
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(response.status(), axum::http::StatusCode::OK);
|
||||
}
|
||||
}
|
171
gb-api/src/router.rs
Normal file
171
gb-api/src/router.rs
Normal file
|
@ -0,0 +1,171 @@
|
|||
use axum::{
|
||||
routing::{get, post},
|
||||
Router,
|
||||
extract::{Path, State, WebSocketUpgrade},
|
||||
response::IntoResponse,
|
||||
Json,
|
||||
};
|
||||
use gb_core::{Result, Error, models::*};
|
||||
use gb_messaging::{MessageProcessor, MessageEnvelope};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::{instrument, error};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub struct ApiState {
|
||||
message_processor: Arc<Mutex<MessageProcessor>>,
|
||||
}
|
||||
|
||||
pub fn create_router(message_processor: MessageProcessor) -> Router {
|
||||
let state = ApiState {
|
||||
message_processor: Arc::new(Mutex::new(message_processor)),
|
||||
};
|
||||
|
||||
Router::new()
|
||||
.route("/health", get(health_check))
|
||||
.route("/ws", get(websocket_handler))
|
||||
.route("/messages", post(send_message))
|
||||
.route("/messages/:id", get(get_message))
|
||||
.route("/rooms", post(create_room))
|
||||
.route("/rooms/:id", get(get_room))
|
||||
.route("/rooms/:id/join", post(join_room))
|
||||
.with_state(Arc::new(state))
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn health_check() -> &'static str {
|
||||
"OK"
|
||||
}
|
||||
|
||||
#[instrument(skip(state, ws))]
|
||||
async fn websocket_handler(
|
||||
State(state): State<Arc<ApiState>>,
|
||||
ws: WebSocketUpgrade,
|
||||
) -> impl IntoResponse {
|
||||
ws.on_upgrade(|socket| async move {
|
||||
let (mut sender, mut receiver) = socket.split();
|
||||
|
||||
while let Some(Ok(msg)) = receiver.next().await {
|
||||
if let Ok(text) = msg.to_text() {
|
||||
if let Ok(envelope) = serde_json::from_str::<MessageEnvelope>(text) {
|
||||
let mut processor = state.message_processor.lock().await;
|
||||
if let Err(e) = processor.sender().send(envelope).await {
|
||||
error!("Failed to process WebSocket message: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(state, message))]
|
||||
async fn send_message(
|
||||
State(state): State<Arc<ApiState>>,
|
||||
Json(message): Json<Message>,
|
||||
) -> Result<Json<MessageId>> {
|
||||
let envelope = MessageEnvelope {
|
||||
id: Uuid::new_v4(),
|
||||
message,
|
||||
metadata: std::collections::HashMap::new(),
|
||||
};
|
||||
|
||||
let mut processor = state.message_processor.lock().await;
|
||||
processor.sender().send(envelope.clone()).await
|
||||
.map_err(|e| Error::Internal(format!("Failed to send message: {}", e)))?;
|
||||
|
||||
Ok(Json(MessageId(envelope.id)))
|
||||
}
|
||||
|
||||
#[instrument(skip(state))]
|
||||
async fn get_message(
|
||||
State(state): State<Arc<ApiState>>,
|
||||
Path(id): Path<Uuid>,
|
||||
) -> Result<Json<Message>> {
|
||||
// Implement message retrieval logic
|
||||
todo!()
|
||||
}
|
||||
|
||||
#[instrument(skip(state, config))]
|
||||
async fn create_room(
|
||||
State(state): State<Arc<ApiState>>,
|
||||
Json(config): Json<RoomConfig>,
|
||||
) -> Result<Json<Room>> {
|
||||
// Implement room creation logic
|
||||
todo!()
|
||||
}
|
||||
|
||||
#[instrument(skip(state))]
|
||||
async fn get_room(
|
||||
State(state): State<Arc<ApiState>>,
|
||||
Path(id): Path<Uuid>,
|
||||
) -> Result<Json<Room>> {
|
||||
// Implement room retrieval logic
|
||||
todo!()
|
||||
}
|
||||
|
||||
#[instrument(skip(state))]
|
||||
async fn join_room(
|
||||
State(state): State<Arc<ApiState>>,
|
||||
Path(id): Path<Uuid>,
|
||||
Json(user_id): Json<Uuid>,
|
||||
) -> Result<Json<Connection>> {
|
||||
// Implement room joining logic
|
||||
todo!()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use axum::http::StatusCode;
|
||||
use axum::body::Body;
|
||||
use tower::ServiceExt;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_health_check() {
|
||||
let app = create_router(MessageProcessor::new(100));
|
||||
|
||||
let response = app
|
||||
.oneshot(
|
||||
axum::http::Request::builder()
|
||||
.uri("/health")
|
||||
.body(Body::empty())
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_send_message() {
|
||||
let app = create_router(MessageProcessor::new(100));
|
||||
|
||||
let message = Message {
|
||||
id: Uuid::new_v4(),
|
||||
customer_id: Uuid::new_v4(),
|
||||
instance_id: Uuid::new_v4(),
|
||||
conversation_id: Uuid::new_v4(),
|
||||
sender_id: Uuid::new_v4(),
|
||||
kind: "test".to_string(),
|
||||
content: "test message".to_string(),
|
||||
metadata: serde_json::Value::Object(serde_json::Map::new()),
|
||||
created_at: chrono::Utc::now(),
|
||||
shard_key: 0,
|
||||
};
|
||||
|
||||
let response = app
|
||||
.oneshot(
|
||||
axum::http::Request::builder()
|
||||
.method("POST")
|
||||
.uri("/messages")
|
||||
.header("content-type", "application/json")
|
||||
.body(Body::from(serde_json::to_string(&message).unwrap()))
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
}
|
||||
}
|
52
gb-auth/Cargo.toml
Normal file
52
gb-auth/Cargo.toml
Normal file
|
@ -0,0 +1,52 @@
|
|||
[package]
|
||||
name = "gb-auth"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
gb-core = { path = "../gb-core" }
|
||||
|
||||
# Authentication & Security
|
||||
jsonwebtoken = "9.2"
|
||||
argon2 = "0.5"
|
||||
rand = { version = "0.8", features = ["std"] }
|
||||
oauth2 = "4.4"
|
||||
openid = "0.12"
|
||||
tokio-openssl = "0.6"
|
||||
ring = "0.17"
|
||||
|
||||
# Async Runtime
|
||||
tokio.workspace = true
|
||||
async-trait.workspace = true
|
||||
|
||||
# Web Framework
|
||||
axum = { version = "0.7.9" }
|
||||
tower = "0.4"
|
||||
tower-http = { version = "0.5", features = ["auth", "cors"] }
|
||||
headers = "0.3"
|
||||
|
||||
# Database
|
||||
sqlx = { version = "0.7", features = ["runtime-tokio-native-tls", "postgres", "uuid", "chrono", "json"] }
|
||||
redis = { version = "0.24", features = ["tokio-comp", "json"] }
|
||||
|
||||
# Serialization
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
|
||||
# Error Handling
|
||||
thiserror = "1.0"
|
||||
|
||||
# Logging & Metrics
|
||||
tracing.workspace = true
|
||||
|
||||
# Utils
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
uuid = { version = "1.6", features = ["serde", "v4"] }
|
||||
validator = { version = "0.16", features = ["derive"] }
|
||||
|
||||
[dev-dependencies]
|
||||
rstest = "0.18"
|
||||
tokio-test = "0.4"
|
||||
mockall = "0.12"
|
23
gb-auth/config/auth_config.yaml
Normal file
23
gb-auth/config/auth_config.yaml
Normal file
|
@ -0,0 +1,23 @@
|
|||
jwt:
|
||||
secret: your_jwt_secret_key_here
|
||||
expiration: 3600 # 1 hour in seconds
|
||||
|
||||
password:
|
||||
min_length: 8
|
||||
require_uppercase: true
|
||||
require_lowercase: true
|
||||
require_numbers: true
|
||||
require_special: true
|
||||
|
||||
oauth:
|
||||
providers:
|
||||
google:
|
||||
client_id: your_google_client_id
|
||||
client_secret: your_google_client_secret
|
||||
github:
|
||||
client_id: your_github_client_id
|
||||
client_secret: your_github_client_secret
|
||||
|
||||
redis:
|
||||
url: redis://localhost:6379
|
||||
session_ttl: 86400 # 24 hours in seconds
|
24
gb-auth/migrations/20231201000000_create_auth_tables.sql
Normal file
24
gb-auth/migrations/20231201000000_create_auth_tables.sql
Normal file
|
@ -0,0 +1,24 @@
|
|||
-- Create users table
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
email VARCHAR(255) NOT NULL UNIQUE,
|
||||
password_hash VARCHAR(255) NOT NULL,
|
||||
role VARCHAR(50) NOT NULL DEFAULT 'user',
|
||||
status VARCHAR(50) NOT NULL DEFAULT 'active',
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- Create sessions table
|
||||
CREATE TABLE IF NOT EXISTS sessions (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
refresh_token VARCHAR(255) NOT NULL UNIQUE,
|
||||
expires_at TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- Create indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_users_email ON users(email);
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_user_id ON sessions(user_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_refresh_token ON sessions(refresh_token);
|
27
gb-auth/src/handlers/auth_handler.rs
Normal file
27
gb-auth/src/handlers/auth_handler.rs
Normal file
|
@ -0,0 +1,27 @@
|
|||
use axum::{
|
||||
extract::State,
|
||||
Json,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::{
|
||||
models::{LoginRequest, LoginResponse},
|
||||
services::auth_service::AuthService,
|
||||
Result,
|
||||
};
|
||||
|
||||
pub async fn login(
|
||||
State(auth_service): State<Arc<AuthService>>,
|
||||
Json(request): Json<LoginRequest>,
|
||||
) -> Result<Json<LoginResponse>> {
|
||||
let response = auth_service.login(request).await?;
|
||||
Ok(Json(response))
|
||||
}
|
||||
|
||||
pub async fn logout() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn refresh_token() -> Result<Json<LoginResponse>> {
|
||||
todo!()
|
||||
}
|
27
gb-auth/src/lib.rs
Normal file
27
gb-auth/src/lib.rs
Normal file
|
@ -0,0 +1,27 @@
|
|||
pub mod handlers;
|
||||
pub mod middleware;
|
||||
pub mod models;
|
||||
pub mod services;
|
||||
pub mod utils;
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum AuthError {
|
||||
#[error("Authentication failed")]
|
||||
AuthenticationFailed,
|
||||
#[error("Invalid credentials")]
|
||||
InvalidCredentials,
|
||||
#[error("Token expired")]
|
||||
TokenExpired,
|
||||
#[error("Invalid token")]
|
||||
InvalidToken,
|
||||
#[error("Database error: {0}")]
|
||||
Database(#[from] sqlx::Error),
|
||||
#[error("Cache error: {0}")]
|
||||
Cache(#[from] redis::RedisError),
|
||||
#[error("Internal error: {0}")]
|
||||
Internal(String),
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, AuthError>;
|
31
gb-auth/src/middleware/auth_middleware.rs
Normal file
31
gb-auth/src/middleware/auth_middleware.rs
Normal file
|
@ -0,0 +1,31 @@
|
|||
use axum::{
|
||||
async_trait,
|
||||
extract::{FromRequestParts, TypedHeader},
|
||||
headers::{authorization::Bearer, Authorization},
|
||||
http::request::Parts,
|
||||
RequestPartsExt,
|
||||
};
|
||||
use jsonwebtoken::{decode, DecodingKey, Validation};
|
||||
|
||||
use crate::{
|
||||
models::User,
|
||||
AuthError,
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl<S> FromRequestParts<S> for User
|
||||
where
|
||||
S: Send + Sync,
|
||||
{
|
||||
type Rejection = AuthError;
|
||||
|
||||
async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result<Self, Self::Rejection> {
|
||||
let TypedHeader(Authorization(bearer)) = parts
|
||||
.extract::<TypedHeader<Authorization<Bearer>>>()
|
||||
.await
|
||||
.map_err(|_| AuthError::InvalidToken)?;
|
||||
|
||||
// Implement token validation and user extraction
|
||||
todo!()
|
||||
}
|
||||
}
|
45
gb-auth/src/models/user.rs
Normal file
45
gb-auth/src/models/user.rs
Normal file
|
@ -0,0 +1,45 @@
|
|||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::FromRow;
|
||||
use uuid::Uuid;
|
||||
use validator::Validate;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, FromRow)]
|
||||
pub struct User {
|
||||
pub id: Uuid,
|
||||
pub email: String,
|
||||
pub password_hash: String,
|
||||
pub role: UserRole,
|
||||
pub status: UserStatus,
|
||||
pub created_at: chrono::DateTime<chrono::Utc>,
|
||||
pub updated_at: chrono::DateTime<chrono::Utc>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub enum UserRole {
|
||||
Admin,
|
||||
User,
|
||||
Service,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub enum UserStatus {
|
||||
Active,
|
||||
Inactive,
|
||||
Suspended,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Validate)]
|
||||
pub struct LoginRequest {
|
||||
#[validate(email)]
|
||||
pub email: String,
|
||||
#[validate(length(min = 8))]
|
||||
pub password: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct LoginResponse {
|
||||
pub access_token: String,
|
||||
pub refresh_token: String,
|
||||
pub token_type: String,
|
||||
pub expires_in: i64,
|
||||
}
|
73
gb-auth/src/services/auth_service.rs
Normal file
73
gb-auth/src/services/auth_service.rs
Normal file
|
@ -0,0 +1,73 @@
|
|||
use crate::{
|
||||
models::{LoginRequest, LoginResponse, User},
|
||||
Result, AuthError,
|
||||
};
|
||||
use argon2::{
|
||||
password_hash::{rand_core::OsRng, SaltString},
|
||||
Argon2, PasswordHash, PasswordHasher, PasswordVerifier,
|
||||
};
|
||||
use jsonwebtoken::{encode, EncodingKey, Header};
|
||||
use sqlx::PgPool;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct AuthService {
|
||||
db: Arc<PgPool>,
|
||||
jwt_secret: String,
|
||||
jwt_expiration: i64,
|
||||
}
|
||||
|
||||
impl AuthService {
|
||||
pub fn new(db: Arc<PgPool>, jwt_secret: String, jwt_expiration: i64) -> Self {
|
||||
Self {
|
||||
db,
|
||||
jwt_secret,
|
||||
jwt_expiration,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn login(&self, request: LoginRequest) -> Result<LoginResponse> {
|
||||
let user = sqlx::query_as!(
|
||||
User,
|
||||
"SELECT * FROM users WHERE email = $1",
|
||||
request.email
|
||||
)
|
||||
.fetch_optional(&*self.db)
|
||||
.await?
|
||||
.ok_or(AuthError::InvalidCredentials)?;
|
||||
|
||||
self.verify_password(&request.password, &user.password_hash)?;
|
||||
|
||||
let token = self.generate_token(&user)?;
|
||||
|
||||
Ok(LoginResponse {
|
||||
access_token: token,
|
||||
refresh_token: uuid::Uuid::new_v4().to_string(),
|
||||
token_type: "Bearer".to_string(),
|
||||
expires_in: self.jwt_expiration,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn hash_password(&self, password: &str) -> Result<String> {
|
||||
let salt = SaltString::generate(&mut OsRng);
|
||||
let argon2 = Argon2::default();
|
||||
|
||||
argon2
|
||||
.hash_password(password.as_bytes(), &salt)
|
||||
.map(|hash| hash.to_string())
|
||||
.map_err(|e| AuthError::Internal(e.to_string()))
|
||||
}
|
||||
|
||||
fn verify_password(&self, password: &str, hash: &str) -> Result<()> {
|
||||
let parsed_hash = PasswordHash::new(hash)
|
||||
.map_err(|e| AuthError::Internal(e.to_string()))?;
|
||||
|
||||
Argon2::default()
|
||||
.verify_password(password.as_bytes(), &parsed_hash)
|
||||
.map_err(|_| AuthError::InvalidCredentials)
|
||||
}
|
||||
|
||||
fn generate_token(&self, user: &User) -> Result<String> {
|
||||
// Token generation implementation
|
||||
Ok("token".to_string())
|
||||
}
|
||||
}
|
51
gb-auth/tests/auth_service_tests.rs
Normal file
51
gb-auth/tests/auth_service_tests.rs
Normal file
|
@ -0,0 +1,51 @@
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::services::auth_service::AuthService;
|
||||
use crate::models::{LoginRequest, User};
|
||||
use sqlx::PgPool;
|
||||
use std::sync::Arc;
|
||||
use rstest::*;
|
||||
|
||||
async fn setup_test_db() -> PgPool {
|
||||
let database_url = std::env::var("DATABASE_URL")
|
||||
.unwrap_or_else(|_| "postgres://postgres:postgres@localhost/gb_auth_test".to_string());
|
||||
|
||||
PgPool::connect(&database_url)
|
||||
.await
|
||||
.expect("Failed to connect to database")
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn auth_service() -> AuthService {
|
||||
let pool = setup_test_db().await;
|
||||
AuthService::new(
|
||||
Arc::new(pool),
|
||||
"test_secret".to_string(),
|
||||
3600,
|
||||
)
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_login_success(auth_service: AuthService) {
|
||||
let request = LoginRequest {
|
||||
email: "test@example.com".to_string(),
|
||||
password: "password123".to_string(),
|
||||
};
|
||||
|
||||
let result = auth_service.login(request).await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_login_invalid_credentials(auth_service: AuthService) {
|
||||
let request = LoginRequest {
|
||||
email: "wrong@example.com".to_string(),
|
||||
password: "wrongpassword".to_string(),
|
||||
};
|
||||
|
||||
let result = auth_service.login(request).await;
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
26
gb-automation/Cargo.toml
Normal file
26
gb-automation/Cargo.toml
Normal file
|
@ -0,0 +1,26 @@
|
|||
[package]
|
||||
name = "gb-automation"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
gb-core = { path = "../gb-core" }
|
||||
chromiumoxide = { version = "0.5", features = ["tokio-runtime"] }
|
||||
async-trait.workspace = true
|
||||
tokio.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
thiserror.workspace = true
|
||||
tracing.workspace = true
|
||||
uuid.workspace = true
|
||||
regex = "1.10"
|
||||
fantoccini = "0.19"
|
||||
headless_chrome = "1.0"
|
||||
async-recursion = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
rstest.workspace = true
|
||||
tokio-test = "0.4"
|
||||
mock_instant = "0.2"
|
36
gb-automation/src/lib.rs
Normal file
36
gb-automation/src/lib.rs
Normal file
|
@ -0,0 +1,36 @@
|
|||
pub mod web;
|
||||
pub mod process;
|
||||
|
||||
pub use web::{WebAutomation, Element};
|
||||
pub use process::ProcessAutomation;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use gb_core::Result;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_automation_integration() -> Result<()> {
|
||||
// Initialize automation components
|
||||
let web = WebAutomation::new().await?;
|
||||
let dir = tempdir()?;
|
||||
let process = ProcessAutomation::new(dir.path());
|
||||
|
||||
// Test web automation
|
||||
let page = web.new_page().await?;
|
||||
web.navigate(&page, "https://example.com").await?;
|
||||
let screenshot = web.screenshot(&page, "test.png").await?;
|
||||
|
||||
// Test process automation
|
||||
let output = process.execute("echo", &["Test output"]).await?;
|
||||
assert!(output.contains("Test output"));
|
||||
|
||||
// Test process spawning and cleanup
|
||||
let id = process.spawn("sleep", &["1"]).await?;
|
||||
process.kill(id).await?;
|
||||
process.cleanup().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
136
gb-automation/src/process.rs
Normal file
136
gb-automation/src/process.rs
Normal file
|
@ -0,0 +1,136 @@
|
|||
use gb_core::{Result, Error};
|
||||
use std::{
|
||||
process::{Command, Stdio},
|
||||
path::PathBuf,
|
||||
};
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::{instrument, error};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub struct ProcessAutomation {
|
||||
working_dir: PathBuf,
|
||||
processes: Mutex<Vec<Process>>,
|
||||
}
|
||||
|
||||
pub struct Process {
|
||||
id: Uuid,
|
||||
handle: std::process::Child,
|
||||
}
|
||||
|
||||
impl ProcessAutomation {
|
||||
pub fn new<P: Into<PathBuf>>(working_dir: P) -> Self {
|
||||
Self {
|
||||
working_dir: working_dir.into(),
|
||||
processes: Mutex::new(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(self, command))]
|
||||
pub async fn execute(&self, command: &str, args: &[&str]) -> Result<String> {
|
||||
let output = Command::new(command)
|
||||
.args(args)
|
||||
.current_dir(&self.working_dir)
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.output()
|
||||
.map_err(|e| Error::Internal(format!("Failed to execute command: {}", e)))?;
|
||||
|
||||
let error = String::from_utf8_lossy(&output.stderr);
|
||||
return Err(Error::Internal(format!("Command failed: {}", error)));
|
||||
}
|
||||
|
||||
let stdout = String::from_utf8_lossy(&output.stdout).to_string();
|
||||
Ok(stdout)
|
||||
}
|
||||
|
||||
#[instrument(skip(self, command))]
|
||||
pub async fn spawn(&self, command: &str, args: &[&str]) -> Result<Uuid> {
|
||||
let child = Command::new(command)
|
||||
.args(args)
|
||||
.current_dir(&self.working_dir)
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.spawn()
|
||||
.map_err(|e| Error::Internal(format!("Failed to spawn process: {}", e)))?;
|
||||
|
||||
let process = Process {
|
||||
id: Uuid::new_v4(),
|
||||
handle: child,
|
||||
};
|
||||
|
||||
let mut processes = self.processes.lock().await;
|
||||
processes.push(process);
|
||||
|
||||
Ok(process.id)
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn kill(&self, id: Uuid) -> Result<()> {
|
||||
let mut processes = self.processes.lock().await;
|
||||
|
||||
if let Some(index) = processes.iter().position(|p| p.id == id) {
|
||||
let process = processes.remove(index);
|
||||
process.handle.kill()
|
||||
.map_err(|e| Error::Internal(format!("Failed to kill process: {}", e)))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn cleanup(&self) -> Result<()> {
|
||||
let mut processes = self.processes.lock().await;
|
||||
|
||||
for process in processes.iter_mut() {
|
||||
if let Err(e) = process.handle.kill() {
|
||||
error!("Failed to kill process {}: {}", process.id, e);
|
||||
}
|
||||
}
|
||||
|
||||
processes.clear();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rstest::*;
|
||||
use std::fs;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[fixture]
|
||||
fn automation() -> ProcessAutomation {
|
||||
let dir = tempdir().unwrap();
|
||||
ProcessAutomation::new(dir.path())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_execute(automation: ProcessAutomation) -> Result<()> {
|
||||
let output = automation.execute("echo", &["Hello, World!"]).await?;
|
||||
assert!(output.contains("Hello, World!"));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_spawn_and_kill(automation: ProcessAutomation) -> Result<()> {
|
||||
let id = automation.spawn("sleep", &["1"]).await?;
|
||||
automation.kill(id).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_cleanup(automation: ProcessAutomation) -> Result<()> {
|
||||
automation.spawn("sleep", &["1"]).await?;
|
||||
automation.spawn("sleep", &["2"]).await?;
|
||||
automation.cleanup().await?;
|
||||
|
||||
let processes = automation.processes.lock().await;
|
||||
assert!(processes.is_empty());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
184
gb-automation/src/web.rs
Normal file
184
gb-automation/src/web.rs
Normal file
|
@ -0,0 +1,184 @@
|
|||
use gb_core::{Result, Error};
|
||||
use async_recursion::async_recursion;
|
||||
use chromiumoxide::{
|
||||
Browser, BrowserConfig,
|
||||
cdp::browser_protocol::page::ScreenshotFormat,
|
||||
Page,
|
||||
};
|
||||
use std::{sync::Arc, time::Duration};
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::{instrument, error};
|
||||
|
||||
pub struct WebAutomation {
|
||||
browser: Arc<Browser>,
|
||||
pages: Arc<Mutex<Vec<Page>>>,
|
||||
}
|
||||
|
||||
impl WebAutomation {
|
||||
#[instrument]
|
||||
pub async fn new() -> Result<Self> {
|
||||
let config = BrowserConfig::builder()
|
||||
.with_head()
|
||||
.window_size(1920, 1080)
|
||||
.build()?;
|
||||
|
||||
let (browser, mut handler) = Browser::launch(config)
|
||||
.await
|
||||
.map_err(|e| Error::Internal(format!("Failed to launch browser: {}", e)))?;
|
||||
|
||||
tokio::spawn(async move {
|
||||
while let Some(h) = handler.next().await {
|
||||
if let Err(e) = h {
|
||||
error!("Browser handler error: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(Self {
|
||||
browser: Arc::new(browser),
|
||||
pages: Arc::new(Mutex::new(Vec::new())),
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn new_page(&self) -> Result<Page> {
|
||||
let page = self.browser.new_page()
|
||||
.await
|
||||
.map_err(|e| Error::Internal(format!("Failed to create page: {}", e)))?;
|
||||
|
||||
let mut pages = self.pages.lock().await;
|
||||
pages.push(page.clone());
|
||||
|
||||
Ok(page)
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn navigate(&self, page: &Page, url: &str) -> Result<()> {
|
||||
page.goto(url)
|
||||
.await
|
||||
.map_err(|e| Error::Internal(format!("Failed to navigate: {}", e)))?;
|
||||
|
||||
page.wait_for_navigation()
|
||||
.await
|
||||
.map_err(|e| Error::Internal(format!("Failed to wait for navigation: {}", e)))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn get_element(&self, page: &Page, selector: &str) -> Result<Element> {
|
||||
let element = page.find_element(selector)
|
||||
.await
|
||||
.map_err(|e| Error::Internal(format!("Failed to find element: {}", e)))?;
|
||||
|
||||
Ok(Element { inner: element })
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn click(&self, element: &Element) -> Result<()> {
|
||||
element.inner.click()
|
||||
.await
|
||||
.map_err(|e| Error::Internal(format!("Failed to click: {}", e)))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn type_text(&self, element: &Element, text: &str) -> Result<()> {
|
||||
element.inner.type_str(text)
|
||||
.await
|
||||
.map_err(|e| Error::Internal(format!("Failed to type text: {}", e)))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn screenshot(&self, page: &Page, path: &str) -> Result<Vec<u8>> {
|
||||
let screenshot = page.screenshot(ScreenshotFormat::PNG, None, true)
|
||||
.await
|
||||
.map_err(|e| Error::Internal(format!("Failed to take screenshot: {}", e)))?;
|
||||
|
||||
Ok(screenshot)
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn wait_for_selector(&self, page: &Page, selector: &str) -> Result<()> {
|
||||
page.wait_for_element(selector)
|
||||
.await
|
||||
.map_err(|e| Error::Internal(format!("Failed to wait for selector: {}", e)))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
#[async_recursion]
|
||||
pub async fn wait_for_network_idle(&self, page: &Page) -> Result<()> {
|
||||
let mut retry_count = 0;
|
||||
let max_retries = 10;
|
||||
|
||||
while retry_count < max_retries {
|
||||
if page.wait_for_network_idle(Duration::from_secs(5))
|
||||
.await
|
||||
.is_ok()
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
retry_count += 1;
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
|
||||
Err(Error::Internal("Network did not become idle".to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Element {
|
||||
inner: chromiumoxide::Element,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rstest::*;
|
||||
|
||||
#[fixture]
|
||||
async fn automation() -> WebAutomation {
|
||||
WebAutomation::new().await.unwrap()
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_navigation(automation: WebAutomation) -> Result<()> {
|
||||
let page = automation.new_page().await?;
|
||||
automation.navigate(&page, "https://example.com").await?;
|
||||
|
||||
let title = page.title()
|
||||
.await
|
||||
.map_err(|e| Error::Internal(format!("Failed to get title: {}", e)))?;
|
||||
|
||||
assert!(title.contains("Example"));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_element_interaction(automation: WebAutomation) -> Result<()> {
|
||||
let page = automation.new_page().await?;
|
||||
automation.navigate(&page, "https://example.com").await?;
|
||||
|
||||
let element = automation.get_element(&page, "h1").await?;
|
||||
automation.click(&element).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_screenshot(automation: WebAutomation) -> Result<()> {
|
||||
let page = automation.new_page().await?;
|
||||
automation.navigate(&page, "https://example.com").await?;
|
||||
|
||||
let screenshot = automation.screenshot(&page, "test.png").await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
22
gb-core/Cargo.toml
Normal file
22
gb-core/Cargo.toml
Normal file
|
@ -0,0 +1,22 @@
|
|||
[package]
|
||||
name = "gb-core"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
serde.workspace = true
|
||||
uuid.workspace = true
|
||||
tokio.workspace = true
|
||||
thiserror.workspace = true
|
||||
chrono.workspace = true
|
||||
sqlx.workspace = true
|
||||
redis.workspace = true
|
||||
tracing.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
mockall.workspace = true
|
||||
rstest.workspace = true
|
||||
tokio-test = "0.4"
|
54
gb-core/src/errors.rs
Normal file
54
gb-core/src/errors.rs
Normal file
|
@ -0,0 +1,54 @@
|
|||
use thiserror::Error;
|
||||
use redis::RedisError;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum Error {
|
||||
#[error("Database error: {0}")]
|
||||
Database(#[from] sqlx::Error),
|
||||
|
||||
#[error("Redis error: {0}")]
|
||||
Redis(#[from] redis::RedisError),
|
||||
|
||||
#[error("Kafka error: {0}")]
|
||||
Kafka(String),
|
||||
|
||||
#[error("WebRTC error: {0}")]
|
||||
WebRTC(String),
|
||||
|
||||
#[error("Invalid input: {0}")]
|
||||
InvalidInput(String),
|
||||
|
||||
#[error("Not found: {0}")]
|
||||
NotFound(String),
|
||||
|
||||
#[error("Unauthorized: {0}")]
|
||||
Unauthorized(String),
|
||||
|
||||
#[error("Rate limited: {0}")]
|
||||
RateLimited(String),
|
||||
|
||||
#[error("Resource quota exceeded: {0}")]
|
||||
QuotaExceeded(String),
|
||||
|
||||
#[error("Internal error: {0}")]
|
||||
Internal(String),
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_error_display() {
|
||||
let err = Error::NotFound("User".to_string());
|
||||
assert_eq!(err.to_string(), "Not found: User");
|
||||
|
||||
let err = Error::Unauthorized("Invalid token".to_string());
|
||||
assert_eq!(err.to_string(), "Unauthorized: Invalid token");
|
||||
|
||||
let err = Error::QuotaExceeded("Max instances reached".to_string());
|
||||
assert_eq!(err.to_string(), "Resource quota exceeded: Max instances reached");
|
||||
}
|
||||
}
|
30
gb-core/src/lib.rs
Normal file
30
gb-core/src/lib.rs
Normal file
|
@ -0,0 +1,30 @@
|
|||
pub mod models;
|
||||
pub mod traits;
|
||||
pub mod errors;
|
||||
|
||||
pub use errors::{Error, Result};
|
||||
pub use models::*;
|
||||
pub use traits::*;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rstest::*;
|
||||
|
||||
#[fixture]
|
||||
fn customer() -> Customer {
|
||||
Customer::new(
|
||||
"Test Corp".to_string(),
|
||||
"enterprise".to_string(),
|
||||
10,
|
||||
)
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_customer_fixture(customer: Customer) {
|
||||
assert_eq!(customer.name, "Test Corp");
|
||||
assert_eq!(customer.subscription_tier, "enterprise");
|
||||
assert_eq!(customer.max_instances, 10);
|
||||
assert_eq!(customer.status, "active");
|
||||
}
|
||||
}
|
113
gb-core/src/models.rs
Normal file
113
gb-core/src/models.rs
Normal file
|
@ -0,0 +1,113 @@
|
|||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::types::JsonValue;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Customer {
|
||||
pub id: Uuid,
|
||||
pub name: String,
|
||||
pub subscription_tier: String,
|
||||
pub status: String,
|
||||
pub max_instances: i32,
|
||||
pub metadata: JsonValue,
|
||||
pub created_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Instance {
|
||||
pub id: Uuid,
|
||||
pub customer_id: Uuid,
|
||||
pub name: String,
|
||||
pub status: String,
|
||||
pub shard_id: i32,
|
||||
pub region: String,
|
||||
pub config: JsonValue,
|
||||
pub created_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Room {
|
||||
pub id: Uuid,
|
||||
pub customer_id: Uuid,
|
||||
pub instance_id: Uuid,
|
||||
pub name: String,
|
||||
pub kind: String,
|
||||
pub status: String,
|
||||
pub config: JsonValue,
|
||||
pub created_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Message {
|
||||
pub id: Uuid,
|
||||
pub customer_id: Uuid,
|
||||
pub instance_id: Uuid,
|
||||
pub conversation_id: Uuid,
|
||||
pub sender_id: Uuid,
|
||||
pub kind: String,
|
||||
pub content: String,
|
||||
pub metadata: JsonValue,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub shard_key: i32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Track {
|
||||
pub id: Uuid,
|
||||
pub room_id: Uuid,
|
||||
pub user_id: Uuid,
|
||||
pub kind: String,
|
||||
pub status: String,
|
||||
pub metadata: JsonValue,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct User {
|
||||
pub id: Uuid,
|
||||
pub customer_id: Uuid,
|
||||
pub instance_id: Uuid,
|
||||
pub name: String,
|
||||
pub email: String,
|
||||
pub status: String,
|
||||
pub metadata: JsonValue,
|
||||
pub created_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
impl Customer {
|
||||
pub fn new(
|
||||
name: String,
|
||||
subscription_tier: String,
|
||||
max_instances: i32,
|
||||
) -> Self {
|
||||
Self {
|
||||
id: Uuid::new_v4(),
|
||||
name,
|
||||
subscription_tier,
|
||||
status: "active".to_string(),
|
||||
max_instances,
|
||||
metadata: HashMap::new(),
|
||||
created_at: Utc::now()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rstest::*;
|
||||
|
||||
#[rstest]
|
||||
fn test_customer_creation() {
|
||||
let customer = Customer::new(
|
||||
"Test Corp".to_string(),
|
||||
"enterprise".to_string(),
|
||||
10,
|
||||
);
|
||||
|
||||
assert_eq!(customer.name, "Test Corp");
|
||||
assert_eq!(customer.subscription_tier, "enterprise");
|
||||
assert_eq!(customer.max_instances, 10);
|
||||
assert_eq!(customer.status, "active");
|
||||
}
|
||||
}
|
99
gb-core/src/traits.rs
Normal file
99
gb-core/src/traits.rs
Normal file
|
@ -0,0 +1,99 @@
|
|||
use async_trait::async_trait;
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde::{Map, Value as JsonValue};
|
||||
use uuid::Uuid;
|
||||
use crate::{models::*, Result};
|
||||
|
||||
#[async_trait]
|
||||
pub trait CustomerRepository: Send + Sync {
|
||||
async fn create(&self, customer: &Customer) -> Result<Customer>;
|
||||
async fn get(&self, id: Uuid) -> Result<Customer>;
|
||||
async fn update(&self, customer: &Customer) -> Result<Customer>;
|
||||
async fn delete(&self, id: Uuid) -> Result<()>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait InstanceRepository: Send + Sync {
|
||||
async fn create(&self, instance: &Instance) -> Result<Instance>;
|
||||
async fn get(&self, id: Uuid) -> Result<Instance>;
|
||||
async fn get_by_customer(&self, customer_id: Uuid) -> Result<Vec<Instance>>;
|
||||
async fn update(&self, instance: &Instance) -> Result<Instance>;
|
||||
async fn delete(&self, id: Uuid) -> Result<()>;
|
||||
async fn get_by_shard(&self, shard_id: i32) -> Result<Vec<Instance>>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait RoomRepository: Send + Sync {
|
||||
async fn create(&self, room: &Room) -> Result<Room>;
|
||||
async fn get(&self, id: Uuid) -> Result<Room>;
|
||||
async fn get_by_instance(&self, instance_id: Uuid) -> Result<Vec<Room>>;
|
||||
async fn update(&self, room: &Room) -> Result<Room>;
|
||||
async fn delete(&self, id: Uuid) -> Result<()>;
|
||||
async fn get_active_rooms(&self, instance_id: Uuid) -> Result<Vec<Room>>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait MessageRepository: Send + Sync {
|
||||
async fn create(&self, message: &Message) -> Result<Message>;
|
||||
async fn get(&self, id: Uuid) -> Result<Message>;
|
||||
async fn get_by_conversation(&self, conversation_id: Uuid) -> Result<Vec<Message>>;
|
||||
async fn update_status(&self, id: Uuid, status: String) -> Result<()>;
|
||||
async fn delete(&self, id: Uuid) -> Result<()>;
|
||||
async fn get_by_shard(&self, shard_key: i32) -> Result<Vec<Message>>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait TrackRepository: Send + Sync {
|
||||
async fn create(&self, track: &Track) -> Result<Track>;
|
||||
async fn get(&self, id: Uuid) -> Result<Track>;
|
||||
async fn get_by_room(&self, room_id: Uuid) -> Result<Vec<Track>>;
|
||||
async fn update(&self, track: &Track) -> Result<Track>;
|
||||
async fn delete(&self, id: Uuid) -> Result<()>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait UserRepository: Send + Sync {
|
||||
async fn create(&self, user: &User) -> Result<User>;
|
||||
async fn get(&self, id: Uuid) -> Result<User>;
|
||||
async fn get_by_email(&self, email: &str) -> Result<User>;
|
||||
async fn get_by_instance(&self, instance_id: Uuid) -> Result<Vec<User>>;
|
||||
async fn update(&self, user: &User) -> Result<User>;
|
||||
async fn delete(&self, id: Uuid) -> Result<()>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait RoomService: Send + Sync {
|
||||
async fn create_room(&self, config: RoomConfig) -> Result<Room>;
|
||||
async fn join_room(&self, room_id: Uuid, user_id: Uuid) -> Result<Connection>;
|
||||
async fn leave_room(&self, room_id: Uuid, user_id: Uuid) -> Result<()>;
|
||||
async fn publish_track(&self, track: TrackInfo) -> Result<Track>;
|
||||
async fn subscribe_track(&self, track_id: Uuid) -> Result<Subscription>;
|
||||
async fn get_participants(&self, room_id: Uuid) -> Result<Vec<Participant>>;
|
||||
async fn get_room_stats(&self, room_id: Uuid) -> Result<RoomStats>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait MessageService: Send + Sync {
|
||||
async fn send_message(&self, message: Message) -> Result<MessageId>;
|
||||
async fn get_messages(&self, filter: MessageFilter) -> Result<Vec<Message>>;
|
||||
async fn update_status(&self, message_id: Uuid, status: Status) -> Result<()>;
|
||||
async fn delete_messages(&self, filter: MessageFilter) -> Result<()>;
|
||||
async fn search_messages(&self, query: SearchQuery) -> Result<Vec<Message>>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait StorageService: Send + Sync {
|
||||
async fn save_file(&self, file: FileUpload) -> Result<FileInfo>;
|
||||
async fn get_file(&self, file_id: Uuid) -> Result<FileContent>;
|
||||
async fn delete_file(&self, file_id: Uuid) -> Result<()>;
|
||||
async fn list_files(&self, prefix: &str) -> Result<Vec<FileInfo>>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait MetricsService: Send + Sync {
|
||||
async fn record_metric(&self, metric: Metric) -> Result<()>;
|
||||
async fn get_metrics(&self, query: MetricsQuery) -> Result<Vec<MetricValue>>;
|
||||
async fn create_dashboard(&self, config: DashboardConfig) -> Result<Dashboard>;
|
||||
async fn get_dashboard(&self, id: Uuid) -> Result<Dashboard>;
|
||||
}
|
25
gb-document/Cargo.toml
Normal file
25
gb-document/Cargo.toml
Normal file
|
@ -0,0 +1,25 @@
|
|||
[package]
|
||||
name = "gb-document"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
gb-core = { path = "../gb-core" }
|
||||
lopdf = "0.31"
|
||||
docx-rs = "0.4"
|
||||
calamine = "0.21"
|
||||
async-trait.workspace = true
|
||||
tokio.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
thiserror.workspace = true
|
||||
tracing.workspace = true
|
||||
encoding_rs = "0.8"
|
||||
zip = "0.6"
|
||||
|
||||
[dev-dependencies]
|
||||
rstest.workspace = true
|
||||
tokio-test = "0.4"
|
||||
tempfile = "3.8"
|
35
gb-document/src/excel.rs
Normal file
35
gb-document/src/excel.rs
Normal file
|
@ -0,0 +1,35 @@
|
|||
use gb_core::{Result, Error};
|
||||
use calamine::{Reader, Xlsx, RangeDeserializerBuilder};
|
||||
use std::io::Cursor;
|
||||
use tracing::{instrument, error};
|
||||
|
||||
pub struct ExcelProcessor;
|
||||
|
||||
impl ExcelProcessor {
|
||||
#[instrument(skip(data))]
|
||||
pub fn extract_data(data: &[u8]) -> Result<Vec<Vec<String>>> {
|
||||
let cursor = Cursor::new(data);
|
||||
let mut workbook = Xlsx::new(cursor)
|
||||
.map_err(|e| Error::Internal(format!("Failed to read Excel file: {}", e)))?;
|
||||
|
||||
let sheet_name = workbook.sheet_names()[0].clone();
|
||||
let range = workbook.worksheet_range(&sheet_name)
|
||||
.ok_or_else(|| Error::Internal("Failed to get worksheet".to_string()))?
|
||||
.map_err(|e| Error::Internal(format!("Failed to read range: {}", e)))?;
|
||||
|
||||
let mut result = Vec::new();
|
||||
for row in range.rows() {
|
||||
let row_data: Vec<String> = row.iter()
|
||||
.map(|cell| cell.to_string())
|
||||
.collect();
|
||||
result.push(row_data);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
#[instrument(skip(headers, data))]
|
||||
pub fn create_excel(headers: &[&str], data: &[Vec<String>]) -> Result<Vec<u8>> {
|
||||
todo!("Implement Excel creation using a suitable library");
|
||||
}
|
||||
|
127
gb-document/src/pdf.rs
Normal file
127
gb-document/src/pdf.rs
Normal file
|
@ -0,0 +1,127 @@
|
|||
use gb_core::{Result, Error};
|
||||
use lopdf::{Document, Object, StringFormat};
|
||||
use std::io::Cursor;
|
||||
use tracing::{instrument, error};
|
||||
|
||||
pub struct PdfProcessor;
|
||||
|
||||
impl PdfProcessor {
|
||||
#[instrument(skip(data))]
|
||||
pub fn extract_text(data: &[u8]) -> Result<String> {
|
||||
let doc = Document::load_from(Cursor::new(data))
|
||||
.map_err(|e| Error::Internal(format!("Failed to load PDF: {}", e)))?;
|
||||
|
||||
let mut text = String::new();
|
||||
for page_num in 1..=doc.get_pages().len() {
|
||||
if let Ok(page_text) = Self::extract_page_text(&doc, page_num) {
|
||||
text.push_str(&page_text);
|
||||
text.push('\n');
|
||||
}
|
||||
}
|
||||
|
||||
Ok(text)
|
||||
}
|
||||
|
||||
#[instrument(skip(doc))]
|
||||
fn extract_page_text(doc: &Document, page_num: u32) -> Result<String> {
|
||||
let page = doc.get_page(page_num)
|
||||
.map_err(|e| Error::Internal(format!("Failed to get page {}: {}", page_num, e)))?;
|
||||
|
||||
let contents = doc.get_page_content(page)
|
||||
.map_err(|e| Error::Internal(format!("Failed to get page content: {}", e)))?;
|
||||
|
||||
let mut text = String::new();
|
||||
for content in contents.iter() {
|
||||
if let Ok(Object::String(s, StringFormat::Literal)) = content {
|
||||
if let Ok(decoded) = String::from_utf8(s.clone()) {
|
||||
text.push_str(&decoded);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(text)
|
||||
}
|
||||
|
||||
#[instrument(skip(data))]
|
||||
pub fn merge_pdfs(pdfs: Vec<&[u8]>) -> Result<Vec<u8>> {
|
||||
let mut merged = Document::new();
|
||||
let mut current_page = 1;
|
||||
|
||||
for pdf_data in pdfs {
|
||||
let doc = Document::load_from(Cursor::new(pdf_data))
|
||||
.map_err(|e| Error::Internal(format!("Failed to load PDF: {}", e)))?;
|
||||
|
||||
for (_, page) in doc.get_pages() {
|
||||
merged.add_page(page.clone());
|
||||
current_page += 1;
|
||||
}
|
||||
}
|
||||
|
||||
let mut output = Vec::new();
|
||||
merged.save_to(&mut Cursor::new(&mut output))
|
||||
.map_err(|e| Error::Internal(format!("Failed to save merged PDF: {}", e)))?;
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
#[instrument(skip(data))]
|
||||
pub fn split_pdf(data: &[u8], pages: &[u32]) -> Result<Vec<Vec<u8>>> {
|
||||
let doc = Document::load_from(Cursor::new(data))
|
||||
.map_err(|e| Error::Internal(format!("Failed to load PDF: {}", e)))?;
|
||||
|
||||
let mut result = Vec::new();
|
||||
for &page_num in pages {
|
||||
let mut new_doc = Document::new();
|
||||
if let Ok(page) = doc.get_page(page_num) {
|
||||
new_doc.add_page(page.clone());
|
||||
let mut output = Vec::new();
|
||||
new_doc.save_to(&mut Cursor::new(&mut output))
|
||||
.map_err(|e| Error::Internal(format!("Failed to save split PDF: {}", e)))?;
|
||||
result.push(output);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rstest::*;
|
||||
|
||||
fn create_test_pdf() -> Vec<u8> {
|
||||
let mut doc = Document::new();
|
||||
doc.add_page(lopdf::dictionary! {
|
||||
"Type" => "Page",
|
||||
"Contents" => Object::String(b"BT /F1 12 Tf 72 712 Td (Test Page) Tj ET".to_vec(), StringFormat::Literal),
|
||||
});
|
||||
let mut output = Vec::new();
|
||||
doc.save_to(&mut Cursor::new(&mut output)).unwrap();
|
||||
output
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_extract_text() -> Result<()> {
|
||||
let pdf_data = create_test_pdf();
|
||||
let text = PdfProcessor::extract_text(&pdf_data)?;
|
||||
assert!(text.contains("Test Page"));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_merge_pdfs() -> Result<()> {
|
||||
let pdf1 = create_test_pdf();
|
||||
let pdf2 = create_test_pdf();
|
||||
let merged = PdfProcessor::merge_pdfs(vec[build-dependencies]&pdf1, &pdf2])?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_split_pdf() -> Result<()> {
|
||||
let pdf_data = create_test_pdf();
|
||||
let split = PdfProcessor::split_pdf(&pdf_data, &[1])?;
|
||||
assert_eq!(split.len(), 1);
|
||||
Ok(())
|
||||
}
|
||||
}
|
105
gb-document/src/word.rs
Normal file
105
gb-document/src/word.rs
Normal file
|
@ -0,0 +1,105 @@
|
|||
use gb_core::{Result, Error};
|
||||
use docx_rs::{Docx, Paragraph, Run, RunText};
|
||||
use std::io::{Cursor, Read};
|
||||
use tracing::{instrument, error};
|
||||
|
||||
pub struct WordProcessor;
|
||||
|
||||
impl WordProcessor {
|
||||
#[instrument(skip(data))]
|
||||
pub fn extract_text(data: &[u8]) -> Result<String> {
|
||||
let doc = Docx::from_reader(Cursor::new(data))
|
||||
.map_err(|e| Error::Internal(format!("Failed to read DOCX: {}", e)))?;
|
||||
|
||||
let mut text = String::new();
|
||||
for para in doc.document.paragraphs() {
|
||||
for run in para.runs() {
|
||||
if let Some(text_content) = run.text() {
|
||||
text.push_str(text_content);
|
||||
}
|
||||
text.push(' ');
|
||||
}
|
||||
text.push('\n');
|
||||
}
|
||||
|
||||
Ok(text)
|
||||
}
|
||||
|
||||
#[instrument(skip(content))]
|
||||
pub fn create_document(content: &str) -> Result<Vec<u8>> {
|
||||
let mut docx = Docx::new();
|
||||
|
||||
for line in content.lines() {
|
||||
let paragraph = Paragraph::new()
|
||||
.add_run(
|
||||
Run::new().add_text(RunText::new(line))
|
||||
);
|
||||
docx = docx.add_paragraph(paragraph);
|
||||
}
|
||||
|
||||
let mut output = Vec::new();
|
||||
docx.build()
|
||||
.pack(&mut Cursor::new(&mut output))
|
||||
.map_err(|e| Error::Internal(format!("Failed to create DOCX: {}", e)))?;
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
#[instrument(skip(template_data, variables))]
|
||||
pub fn fill_template(template_data: &[u8], variables: &serde_json::Value) -> Result<Vec<u8>> {
|
||||
let doc = Docx::from_reader(Cursor::new(template_data))
|
||||
.map_err(|e| Error::Internal(format!("Failed to read template: {}", e)))?;
|
||||
|
||||
let mut new_doc = doc.clone();
|
||||
|
||||
for para in new_doc.document.paragraphs_mut() {
|
||||
for run in para.runs_mut() {
|
||||
if let Some(text) = run.text_mut() {
|
||||
let mut new_text = text.clone();
|
||||
for (key, value) in variables.as_object().unwrap() {
|
||||
let placeholder = format!("{{{}}}", key);
|
||||
new_text = new_text.replace(&placeholder, value.as_str().unwrap_or(""));
|
||||
}
|
||||
*text = new_text;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut output = Vec::new();
|
||||
new_doc.build()
|
||||
.pack(&mut Cursor::new(&mut output))
|
||||
.map_err(|e| Error::Internal(format!("Failed to save filled template: {}", e)))?;
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rstest::*;
|
||||
use serde_json::json;
|
||||
|
||||
#[rstest]
|
||||
fn test_create_document() -> Result<()> {
|
||||
let content = "Test document\nSecond line";
|
||||
let doc_data = WordProcessor::create_document(content)?;
|
||||
|
||||
let extracted_text = WordProcessor::extract_text(&doc_data)?;
|
||||
assert!(extracted_text.contains("Test document"));
|
||||
assert!(extracted_text.contains("Second line"));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_fill_template() -> Result<()> {
|
||||
let template = WordProcessor::create_document("Hello, {name}!")?;
|
||||
"name": "World"
|
||||
});
|
||||
|
||||
let filled = WordProcessor::fill_template(&template, &variables)?;
|
||||
let text = WordProcessor::extract_text(&filled)?;
|
||||
assert!(text.contains("Hello, World!"));
|
||||
Ok(())
|
||||
}
|
||||
}
|
25
gb-image/Cargo.toml
Normal file
25
gb-image/Cargo.toml
Normal file
|
@ -0,0 +1,25 @@
|
|||
[package]
|
||||
name = "gb-image"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
gb-core = { path = "../gb-core" }
|
||||
image = { version = "0.24", features = ["webp", "jpeg", "png", "gif"] }
|
||||
imageproc = "0.23"
|
||||
rusttype = "0.9"
|
||||
tesseract = "0.13"
|
||||
opencv = { version = "0.84", features = ["clang-runtime"] }
|
||||
async-trait.workspace = true
|
||||
tokio.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
thiserror.workspace = true
|
||||
tracing.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
rstest.workspace = true
|
||||
tokio-test = "0.4"
|
||||
tempfile = "3.8"
|
118
gb-image/src/converter.rs
Normal file
118
gb-image/src/converter.rs
Normal file
|
@ -0,0 +1,118 @@
|
|||
use gb_core::{Result, Error};
|
||||
use image::{
|
||||
DynamicImage, ImageOutputFormat,
|
||||
codecs::{webp, jpeg, png, gif},
|
||||
};
|
||||
use std::io::Cursor;
|
||||
use tracing::instrument;
|
||||
|
||||
pub struct ImageConverter;
|
||||
|
||||
impl ImageConverter {
|
||||
#[instrument]
|
||||
pub fn to_webp(image: &DynamicImage, quality: u8) -> Result<Vec<u8>> {
|
||||
let mut buffer = Cursor::new(Vec::new());
|
||||
let encoder = webp::WebPEncoder::new_with_quality(&mut buffer, quality as f32);
|
||||
|
||||
encoder.encode(
|
||||
image.as_bytes(),
|
||||
image.width(),
|
||||
image.height(),
|
||||
image.color(),
|
||||
).map_err(|e| Error::Internal(format!("WebP conversion failed: {}", e)))?;
|
||||
|
||||
Ok(buffer.into_inner())
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub fn to_jpeg(image: &DynamicImage, quality: u8) -> Result<Vec<u8>> {
|
||||
let mut buffer = Cursor::new(Vec::new());
|
||||
image.write_to(&mut buffer, ImageOutputFormat::Jpeg(quality))
|
||||
.map_err(|e| Error::Internal(format!("JPEG conversion failed: {}", e)))?;
|
||||
|
||||
Ok(buffer.into_inner())
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub fn to_png(image: &DynamicImage) -> Result<Vec<u8>> {
|
||||
let mut buffer = Cursor::new(Vec::new());
|
||||
image.write_to(&mut buffer, ImageOutputFormat::Png)
|
||||
.map_err(|e| Error::Internal(format!("PNG conversion failed: {}", e)))?;
|
||||
|
||||
Ok(buffer.into_inner())
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub fn to_gif(image: &DynamicImage) -> Result<Vec<u8>> {
|
||||
let mut buffer = Cursor::new(Vec::new());
|
||||
image.write_to(&mut buffer, ImageOutputFormat::Gif)
|
||||
.map_err(|e| Error::Internal(format!("GIF conversion failed: {}", e)))?;
|
||||
|
||||
Ok(buffer.into_inner())
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub fn get_format(data: &[u8]) -> Result<ImageFormat> {
|
||||
let format = image::guess_format(data)
|
||||
.map_err(|e| Error::Internal(format!("Failed to determine format: {}", e)))?;
|
||||
|
||||
match format {
|
||||
image::ImageFormat::WebP => Ok(ImageFormat::WebP),
|
||||
image::ImageFormat::Jpeg => Ok(ImageFormat::Jpeg),
|
||||
image::ImageFormat::Png => Ok(ImageFormat::Png),
|
||||
image::ImageFormat::Gif => Ok(ImageFormat::Gif),
|
||||
_ => Err(Error::Internal("Unsupported format".to_string())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum ImageFormat {
|
||||
WebP,
|
||||
Jpeg,
|
||||
Png,
|
||||
Gif,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rstest::*;
|
||||
|
||||
#[fixture]
|
||||
fn test_image() -> DynamicImage {
|
||||
DynamicImage::new_rgb8(100, 100)
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_webp_conversion(test_image: DynamicImage) -> Result<()> {
|
||||
let webp_data = ImageConverter::to_webp(&test_image, 80)?;
|
||||
assert!(!webp_data.is_empty());
|
||||
assert_eq!(ImageConverter::get_format(&webp_data)?, ImageFormat::WebP);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_jpeg_conversion(test_image: DynamicImage) -> Result<()> {
|
||||
let jpeg_data = ImageConverter::to_jpeg(&test_image, 80)?;
|
||||
assert!(!jpeg_data.is_empty());
|
||||
assert_eq!(ImageConverter::get_format(&jpeg_data)?, ImageFormat::Jpeg);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_png_conversion(test_image: DynamicImage) -> Result<()> {
|
||||
let png_data = ImageConverter::to_png(&test_image)?;
|
||||
assert!(!png_data.is_empty());
|
||||
assert_eq!(ImageConverter::get_format(&png_data)?, ImageFormat::Png);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_gif_conversion(test_image: DynamicImage) -> Result<()> {
|
||||
let gif_data = ImageConverter::to_gif(&test_image)?;
|
||||
assert!(!gif_data.is_empty());
|
||||
assert_eq!(ImageConverter::get_format(&gif_data)?, ImageFormat::Gif);
|
||||
Ok(())
|
||||
}
|
||||
}
|
55
gb-image/src/lib.rs
Normal file
55
gb-image/src/lib.rs
Normal file
|
@ -0,0 +1,55 @@
|
|||
pub mod processor;
|
||||
pub mod converter;
|
||||
|
||||
pub use processor::ImageProcessor;
|
||||
pub use converter::{ImageConverter, ImageFormat};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use gb_core::Result;
|
||||
use super::*;
|
||||
use gb_core::Result;
|
||||
use image::{DynamicImage, Rgba};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_image_processing_integration() -> Result<()> {
|
||||
// Initialize components
|
||||
let processor = ImageProcessor::new()?;
|
||||
|
||||
// Create test image
|
||||
let mut image = DynamicImage::new_rgb8(200, 200);
|
||||
|
||||
// Test image processing operations
|
||||
let resized = processor.resize(&image, 100, 100);
|
||||
assert_eq!(resized.width(), 100);
|
||||
assert_eq!(resized.height(), 100);
|
||||
|
||||
let cropped = processor.crop(&image, 50, 50, 100, 100)?;
|
||||
assert_eq!(cropped.width(), 100);
|
||||
assert_eq!(cropped.height(), 100);
|
||||
|
||||
let blurred = processor.apply_blur(&image, 1.0);
|
||||
let brightened = processor.adjust_brightness(&image, 10);
|
||||
let contrasted = processor.adjust_contrast(&image, 1.2);
|
||||
|
||||
// Test text addition
|
||||
processor.add_text(
|
||||
&mut image,
|
||||
"Integration Test",
|
||||
10,
|
||||
10,
|
||||
24.0,
|
||||
Rgba([0, 0, 0, 255]),
|
||||
)?;
|
||||
|
||||
// Test format conversion
|
||||
let webp_data = ImageConverter::to_webp(&image, 80)?;
|
||||
let jpeg_data = ImageConverter::to_jpeg(&image, 80)?;
|
||||
let png_data = ImageConverter::to_png(&image)?;
|
||||
let gif_data = ImageConverter::to_gif(&image)?;
|
||||
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
245
gb-image/src/processor.rs
Normal file
245
gb-image/src/processor.rs
Normal file
|
@ -0,0 +1,245 @@
|
|||
use gb_core::{Result, Error};
|
||||
use image::{
|
||||
DynamicImage, ImageBuffer, Rgba, GenericImageView,
|
||||
imageops::{blur, brighten, contrast},
|
||||
};
|
||||
use imageproc::{
|
||||
drawing::{draw_text_mut, draw_filled_rect_mut},
|
||||
rect::Rect,
|
||||
};
|
||||
use rusttype::{Font, Scale};
|
||||
use std::path::Path;
|
||||
use tracing::{instrument, error};
|
||||
|
||||
pub struct ImageProcessor {
|
||||
default_font: Font<'static>,
|
||||
}
|
||||
|
||||
impl ImageProcessor {
|
||||
pub fn new() -> Result<Self> {
|
||||
let font_data = include_bytes!("../assets/DejaVuSans.ttf");
|
||||
let font = Font::try_from_bytes(font_data)
|
||||
.ok_or_else(|| Error::Internal("Failed to load font".to_string()))?;
|
||||
|
||||
Ok(Self {
|
||||
default_font: font,
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self, image_data))]
|
||||
pub fn load_image(&self, image_data: &[u8]) -> Result<DynamicImage> {
|
||||
image::load_from_memory(image_data)
|
||||
.map_err(|e| Error::Internal(format!("Failed to load image: {}", e)))
|
||||
}
|
||||
|
||||
#[instrument(skip(self, image))]
|
||||
pub fn save_image(&self, image: &DynamicImage, path: &Path) -> Result<()> {
|
||||
image.save(path)
|
||||
.map_err(|e| Error::Internal(format!("Failed to save image: {}", e)))
|
||||
}
|
||||
|
||||
#[instrument(skip(self, image))]
|
||||
pub fn resize(&self, image: &DynamicImage, width: u32, height: u32) -> DynamicImage {
|
||||
image.resize(width, height, image::imageops::FilterType::Lanczos3)
|
||||
}
|
||||
|
||||
#[instrument(skip(self, image))]
|
||||
pub fn crop(&self, image: &DynamicImage, x: u32, y: u32, width: u32, height: u32) -> Result<DynamicImage> {
|
||||
image.crop_imm(x, y, width, height)
|
||||
.map_err(|e| Error::Internal(format!("Failed to crop image: {}", e)))
|
||||
.map(|img| img.to_owned())
|
||||
}
|
||||
|
||||
#[instrument(skip(self, image))]
|
||||
pub fn apply_blur(&self, image: &DynamicImage, sigma: f32) -> DynamicImage {
|
||||
let mut img = image.clone();
|
||||
blur(&mut img, sigma);
|
||||
img
|
||||
}
|
||||
|
||||
#[instrument(skip(self, image))]
|
||||
pub fn adjust_brightness(&self, image: &DynamicImage, value: i32) -> DynamicImage {
|
||||
let mut img = image.clone();
|
||||
brighten(&mut img, value);
|
||||
img
|
||||
}
|
||||
|
||||
#[instrument(skip(self, image))]
|
||||
pub fn adjust_contrast(&self, image: &DynamicImage, value: f32) -> DynamicImage {
|
||||
let mut img = image.clone();
|
||||
contrast(&mut img, value);
|
||||
img
|
||||
}
|
||||
|
||||
#[instrument(skip(self, image, text))]
|
||||
pub fn add_text(
|
||||
&self,
|
||||
image: &mut DynamicImage,
|
||||
text: &str,
|
||||
x: i32,
|
||||
y: i32,
|
||||
scale: f32,
|
||||
color: Rgba<u8>,
|
||||
) -> Result<()> {
|
||||
let scale = Scale::uniform(scale);
|
||||
|
||||
let mut img = image.to_rgba8();
|
||||
draw_text_mut(
|
||||
&mut img,
|
||||
color,
|
||||
x,
|
||||
y,
|
||||
scale,
|
||||
&self.default_font,
|
||||
text,
|
||||
);
|
||||
|
||||
*image = DynamicImage::ImageRgba8(img);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self, image))]
|
||||
pub fn add_watermark(
|
||||
&self,
|
||||
image: &mut DynamicImage,
|
||||
watermark: &DynamicImage,
|
||||
x: u32,
|
||||
y: u32,
|
||||
) -> Result<()> {
|
||||
image::imageops::overlay(image, watermark, x, y);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self, image))]
|
||||
pub fn extract_text(&self, image: &DynamicImage) -> Result<String> {
|
||||
use tesseract::Tesseract;
|
||||
|
||||
let mut temp_file = tempfile::NamedTempFile::new()
|
||||
.map_err(|e| Error::Internal(format!("Failed to create temp file: {}", e)))?;
|
||||
|
||||
image.save(&temp_file)
|
||||
.map_err(|e| Error::Internal(format!("Failed to save temp image: {}", e)))?;
|
||||
|
||||
let text = Tesseract::new(None, Some("eng"))
|
||||
.map_err(|e| Error::Internal(format!("Failed to initialize Tesseract: {}", e)))?
|
||||
.set_image_from_path(temp_file.path())
|
||||
.map_err(|e| Error::Internal(format!("Failed to set image: {}", e)))?
|
||||
.recognize()
|
||||
.map_err(|e| Error::Internal(format!("Failed to recognize text: {}", e)))?
|
||||
.get_text()
|
||||
.map_err(|e| Error::Internal(format!("Failed to get text: {}", e)))?;
|
||||
|
||||
Ok(text)
|
||||
}
|
||||
|
||||
#[instrument(skip(self, image))]
|
||||
pub fn detect_faces(&self, image: &DynamicImage) -> Result<Vec<Rect>> {
|
||||
use opencv::{
|
||||
core,
|
||||
objdetect::CascadeClassifier,
|
||||
prelude::*,
|
||||
types::VectorOfRect,
|
||||
};
|
||||
|
||||
let mut classifier = CascadeClassifier::new(&format!(
|
||||
"{}/haarcascade_frontalface_default.xml",
|
||||
std::env::var("OPENCV_DATA_PATH")
|
||||
.unwrap_or_else(|_| "/usr/share/opencv4".to_string())
|
||||
)).map_err(|e| Error::Internal(format!("Failed to load classifier: {}", e)))?;
|
||||
|
||||
let mut img = core::Mat::new_rows_cols_with_default(
|
||||
image.height() as i32,
|
||||
image.width() as i32,
|
||||
core::CV_8UC3,
|
||||
core::Scalar::all(0.0),
|
||||
).map_err(|e| Error::Internal(format!("Failed to create Mat: {}", e)))?;
|
||||
|
||||
// Convert DynamicImage to OpenCV Mat
|
||||
let rgb = image.to_rgb8();
|
||||
unsafe {
|
||||
img.set_data(rgb.as_raw().as_ptr() as *mut u8, core::CV_8UC3)?;
|
||||
}
|
||||
|
||||
let mut faces = VectorOfRect::new();
|
||||
classifier.detect_multi_scale(
|
||||
&img,
|
||||
&mut faces,
|
||||
1.1,
|
||||
3,
|
||||
0,
|
||||
core::Size::new(30, 30),
|
||||
core::Size::new(0, 0),
|
||||
).map_err(|e| Error::Internal(format!("Face detection failed: {}", e)))?;
|
||||
|
||||
Ok(faces.iter().map(|r| Rect::at(r.x, r.y).of_size(r.width as u32, r.height as u32))
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rstest::*;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[fixture]
|
||||
fn processor() -> ImageProcessor {
|
||||
ImageProcessor::new().unwrap()
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
fn test_image() -> DynamicImage {
|
||||
DynamicImage::new_rgb8(100, 100)
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_resize(processor: ImageProcessor, test_image: DynamicImage) {
|
||||
let resized = processor.resize(&test_image, 50, 50);
|
||||
assert_eq!(resized.width(), 50);
|
||||
assert_eq!(resized.height(), 50);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_crop(processor: ImageProcessor, test_image: DynamicImage) -> Result<()> {
|
||||
let cropped = processor.crop(&test_image, 25, 25, 50, 50)?;
|
||||
assert_eq!(cropped.width(), 50);
|
||||
assert_eq!(cropped.height(), 50);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_add_text(processor: ImageProcessor, mut test_image: DynamicImage) -> Result<()> {
|
||||
processor.add_text(
|
||||
&mut test_image,
|
||||
"Test",
|
||||
10,
|
||||
10,
|
||||
12.0,
|
||||
Rgba([255, 255, 255, 255]),
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_extract_text(processor: ImageProcessor, mut test_image: DynamicImage) -> Result<()> {
|
||||
processor.add_text(
|
||||
&mut test_image,
|
||||
"Test OCR",
|
||||
10,
|
||||
10,
|
||||
24.0,
|
||||
Rgba([0, 0, 0, 255]),
|
||||
)?;
|
||||
|
||||
let text = processor.extract_text(&test_image)?;
|
||||
assert!(text.contains("Test OCR"));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_detect_faces(processor: ImageProcessor, test_image: DynamicImage) -> Result<()> {
|
||||
let faces = processor.detect_faces(&test_image)?;
|
||||
assert!(faces.is_empty()); // Test image has no faces
|
||||
Ok(())
|
||||
}
|
||||
}
|
23
gb-media/Cargo.toml
Normal file
23
gb-media/Cargo.toml
Normal file
|
@ -0,0 +1,23 @@
|
|||
[package]
|
||||
name = "gb-media"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
gb-core = { path = "../gb-core" }
|
||||
tokio.workspace = true
|
||||
webrtc.workspace = true
|
||||
gstreamer.workspace = true
|
||||
opus.workspace = true
|
||||
tracing.workspace = true
|
||||
async-trait.workspace = true
|
||||
serde.workspace = true
|
||||
uuid.workspace = true
|
||||
anyhow.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
rstest.workspace = true
|
||||
mockall.workspace = true
|
||||
tokio-test = "0.4"
|
99
gb-media/src/audio.rs
Normal file
99
gb-media/src/audio.rs
Normal file
|
@ -0,0 +1,99 @@
|
|||
use gb_core::{Result, Error};
|
||||
use opus::{Decoder, Encoder};
|
||||
use std::io::Cursor;
|
||||
use tracing::{instrument, error};
|
||||
|
||||
pub struct AudioProcessor {
|
||||
sample_rate: i32,
|
||||
channels: i32,
|
||||
}
|
||||
|
||||
impl AudioProcessor {
|
||||
pub fn new(sample_rate: i32, channels: i32) -> Self {
|
||||
Self {
|
||||
sample_rate,
|
||||
channels,
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(self, input))]
|
||||
pub fn encode(&self, input: &[i16]) -> Result<Vec<u8>> {
|
||||
let mut encoder = Encoder::new(
|
||||
self.sample_rate,
|
||||
if self.channels == 1 {
|
||||
opus::Channels::Mono
|
||||
} else {
|
||||
opus::Channels::Stereo
|
||||
},
|
||||
opus::Application::Voip,
|
||||
).map_err(|e| Error::Internal(format!("Failed to create Opus encoder: {}", e)))?;
|
||||
|
||||
let mut output = vec![0u8; 1024];
|
||||
let encoded_len = encoder.encode(input, &mut output)
|
||||
.map_err(|e| Error::Internal(format!("Failed to encode audio: {}", e)))?;
|
||||
|
||||
output.truncate(encoded_len);
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
#[instrument(skip(self, input))]
|
||||
pub fn decode(&self, input: &[u8]) -> Result<Vec<i16>> {
|
||||
let mut decoder = Decoder::new(
|
||||
self.sample_rate,
|
||||
if self.channels == 1 {
|
||||
opus::Channels::Mono
|
||||
} else {
|
||||
opus::Channels::Stereo
|
||||
},
|
||||
).map_err(|e| Error::Internal(format!("Failed to create Opus decoder: {}", e)))?;
|
||||
|
||||
let mut output = vec![0i16; 1024];
|
||||
let decoded_len = decoder.decode(input, &mut output, false)
|
||||
.map_err(|e| Error::Internal(format!("Failed to decode audio: {}", e)))?;
|
||||
|
||||
output.truncate(decoded_len);
|
||||
Ok(output)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rstest::*;
|
||||
|
||||
#[fixture]
|
||||
fn audio_processor() -> AudioProcessor {
|
||||
AudioProcessor::new(48000, 2)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
fn test_audio() -> Vec<i16> {
|
||||
// Generate 1 second of 440Hz sine wave
|
||||
let sample_rate = 48000;
|
||||
let frequency = 440.0;
|
||||
let duration = 1.0;
|
||||
|
||||
(0..sample_rate)
|
||||
.flat_map(|i| {
|
||||
let t = i as f32 / sample_rate as f32;
|
||||
let value = (2.0 * std::f32::consts::PI * frequency * t).sin();
|
||||
let sample = (value * i16::MAX as f32) as i16;
|
||||
vec![sample, sample] // Stereo
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_encode_decode(audio_processor: AudioProcessor, test_audio: Vec<i16>) {
|
||||
let encoded = audio_processor.encode(&test_audio).unwrap();
|
||||
let decoded = audio_processor.decode(&encoded).unwrap();
|
||||
|
||||
// Verify basic properties
|
||||
assert!(!encoded.is_empty());
|
||||
assert!(!decoded.is_empty());
|
||||
|
||||
// Opus is lossy, so we can't compare exact values
|
||||
// But we can verify the length is the same
|
||||
assert_eq!(decoded.len(), test_audio.len());
|
||||
}
|
||||
}
|
44
gb-media/src/lib.rs
Normal file
44
gb-media/src/lib.rs
Normal file
|
@ -0,0 +1,44 @@
|
|||
pub mod webrtc;
|
||||
pub mod processor;
|
||||
pub mod audio;
|
||||
|
||||
pub use webrtc::WebRTCService;
|
||||
pub use processor::{MediaProcessor, MediaMetadata};
|
||||
pub use audio::AudioProcessor;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::path::PathBuf;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_media_integration() {
|
||||
// Initialize services
|
||||
let webrtc = WebRTCService::new(vec!["stun:stun.l.google.com:19302".to_string()]);
|
||||
let processor = MediaProcessor::new().unwrap();
|
||||
let audio = AudioProcessor::new(48000, 2);
|
||||
|
||||
// Test room creation and joining
|
||||
let room_id = Uuid::new_v4();
|
||||
let user_id = Uuid::new_v4();
|
||||
|
||||
let connection = webrtc.join_room(room_id, user_id).await.unwrap();
|
||||
assert_eq!(connection.room_id, room_id);
|
||||
assert_eq!(connection.user_id, user_id);
|
||||
|
||||
// Test media processing
|
||||
let input_path = PathBuf::from("test_data/test.mp4");
|
||||
if input_path.exists() {
|
||||
let metadata = processor.extract_metadata(input_path.clone()).await.unwrap();
|
||||
assert!(metadata.width.is_some());
|
||||
assert!(metadata.height.is_some());
|
||||
}
|
||||
|
||||
// Test audio processing
|
||||
let test_audio: Vec<i16> = (0..1024).map(|i| i as i16).collect();
|
||||
let encoded = audio.encode(&test_audio).unwrap();
|
||||
let decoded = audio.decode(&encoded).unwrap();
|
||||
assert!(!decoded.is_empty());
|
||||
}
|
||||
}
|
201
gb-media/src/processor.rs
Normal file
201
gb-media/src/processor.rs
Normal file
|
@ -0,0 +1,201 @@
|
|||
use async_trait::async_trait;
|
||||
use gb_core::{Result, Error};
|
||||
use gstreamer as gst;
|
||||
use std::path::PathBuf;
|
||||
use tracing::{instrument, error};
|
||||
|
||||
pub struct MediaProcessor {
|
||||
pipeline: gst::Pipeline,
|
||||
}
|
||||
|
||||
impl MediaProcessor {
|
||||
pub fn new() -> Result<Self> {
|
||||
gst::init().map_err(|e| Error::Internal(format!("Failed to initialize GStreamer: {}", e)))?;
|
||||
|
||||
let pipeline = gst::Pipeline::new(None);
|
||||
Ok(Self { pipeline })
|
||||
}
|
||||
|
||||
#[instrument(skip(self, input_path, output_path))]
|
||||
pub async fn transcode(
|
||||
&self,
|
||||
input_path: PathBuf,
|
||||
output_path: PathBuf,
|
||||
format: &str,
|
||||
) -> Result<()> {
|
||||
let src = gst::ElementFactory::make("filesrc")
|
||||
.property("location", input_path.to_str().unwrap())
|
||||
.build()
|
||||
.map_err(|e| Error::Internal(format!("Failed to create source element: {}", e)))?;
|
||||
|
||||
let sink = gst::ElementFactory::make("filesink")
|
||||
.property("location", output_path.to_str().unwrap())
|
||||
.build()
|
||||
.map_err(|e| Error::Internal(format!("Failed to create sink element: {}", e)))?;
|
||||
|
||||
let decoder = match format {
|
||||
"h264" => gst::ElementFactory::make("h264parse").build(),
|
||||
"opus" => gst::ElementFactory::make("opusparse").build(),
|
||||
_ => return Err(Error::InvalidInput(format!("Unsupported format: {}", format))),
|
||||
}.map_err(|e| Error::Internal(format!("Failed to create decoder: {}", e)))?;
|
||||
|
||||
self.pipeline.add_many(&[&src, &decoder, &sink])
|
||||
.map_err(|e| Error::Internal(format!("Failed to add elements: {}", e)))?;
|
||||
|
||||
gst::Element::link_many(&[&src, &decoder, &sink])
|
||||
.map_err(|e| Error::Internal(format!("Failed to link elements: {}", e)))?;
|
||||
|
||||
self.pipeline.set_state(gst::State::Playing)
|
||||
.map_err(|e| Error::Internal(format!("Failed to start pipeline: {}", e)))?;
|
||||
|
||||
let bus = self.pipeline.bus().unwrap();
|
||||
|
||||
for msg in bus.iter_timed(gst::ClockTime::NONE) {
|
||||
use gst::MessageView;
|
||||
|
||||
match msg.view() {
|
||||
MessageView::Error(err) => {
|
||||
error!("Error from {:?}: {} ({:?})",
|
||||
err.src().map(|s| s.path_string()),
|
||||
err.error(),
|
||||
err.debug()
|
||||
);
|
||||
return Err(Error::Internal(format!("Pipeline error: {}", err.error())));
|
||||
}
|
||||
MessageView::Eos(_) => break,
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
self.pipeline.set_state(gst::State::Null)
|
||||
.map_err(|e| Error::Internal(format!("Failed to stop pipeline: {}", e)))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self, input_path))]
|
||||
pub async fn extract_metadata(&self, input_path: PathBuf) -> Result<MediaMetadata> {
|
||||
let src = gst::ElementFactory::make("filesrc")
|
||||
.property("location", input_path.to_str().unwrap())
|
||||
.build()
|
||||
.map_err(|e| Error::Internal(format!("Failed to create source element: {}", e)))?;
|
||||
|
||||
let decodebin = gst::ElementFactory::make("decodebin").build()
|
||||
.map_err(|e| Error::Internal(format!("Failed to create decodebin: {}", e)))?;
|
||||
|
||||
self.pipeline.add_many(&[&src, &decodebin])
|
||||
.map_err(|e| Error::Internal(format!("Failed to add elements: {}", e)))?;
|
||||
|
||||
gst::Element::link_many(&[&src, &decodebin])
|
||||
.map_err(|e| Error::Internal(format!("Failed to link elements: {}", e)))?;
|
||||
|
||||
let mut metadata = MediaMetadata::default();
|
||||
|
||||
decodebin.connect_pad_added(move |_, pad| {
|
||||
let caps = pad.current_caps().unwrap();
|
||||
let structure = caps.structure(0).unwrap();
|
||||
|
||||
match structure.name() {
|
||||
"video/x-raw" => {
|
||||
if let Ok(width) = structure.get::<i32>("width") {
|
||||
metadata.width = Some(width);
|
||||
}
|
||||
if let Ok(height) = structure.get::<i32>("height") {
|
||||
metadata.height = Some(height);
|
||||
}
|
||||
if let Ok(framerate) = structure.get::<gst::Fraction>("framerate") {
|
||||
metadata.framerate = Some(framerate.numer() as f64 / framerate.denom() as f64);
|
||||
}
|
||||
},
|
||||
"audio/x-raw" => {
|
||||
if let Ok(channels) = structure.get::<i32>("channels") {
|
||||
metadata.channels = Some(channels);
|
||||
}
|
||||
if let Ok(rate) = structure.get::<i32>("rate") {
|
||||
metadata.sample_rate = Some(rate);
|
||||
}
|
||||
},
|
||||
_ => (),
|
||||
}
|
||||
});
|
||||
|
||||
self.pipeline.set_state(gst::State::Playing)
|
||||
.map_err(|e| Error::Internal(format!("Failed to start pipeline: {}", e)))?;
|
||||
|
||||
let bus = self.pipeline.bus().unwrap();
|
||||
|
||||
for msg in bus.iter_timed(gst::ClockTime::NONE) {
|
||||
use gst::MessageView;
|
||||
|
||||
match msg.view() {
|
||||
MessageView::Error(err) => {
|
||||
error!("Error from {:?}: {} ({:?})",
|
||||
err.src().map(|s| s.path_string()),
|
||||
err.error(),
|
||||
err.debug()
|
||||
);
|
||||
return Err(Error::Internal(format!("Pipeline error: {}", err.error())));
|
||||
}
|
||||
MessageView::Eos(_) => break,
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
self.pipeline.set_state(gst::State::Null)
|
||||
.map_err(|e| Error::Internal(format!("Failed to stop pipeline: {}", e)))?;
|
||||
|
||||
Ok(metadata)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct MediaMetadata {
|
||||
pub width: Option<i32>,
|
||||
pub height: Option<i32>,
|
||||
pub framerate: Option<f64>,
|
||||
pub channels: Option<i32>,
|
||||
pub sample_rate: Option<i32>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::path::PathBuf;
|
||||
use rstest::*;
|
||||
|
||||
#[fixture]
|
||||
fn media_processor() -> MediaProcessor {
|
||||
MediaProcessor::new().unwrap()
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
fn test_video_path() -> PathBuf {
|
||||
PathBuf::from("test_data/test.mp4")
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_transcode(media_processor: MediaProcessor, test_video_path: PathBuf) {
|
||||
let output_path = PathBuf::from("test_data/output.mp4");
|
||||
|
||||
let result = media_processor.transcode(
|
||||
test_video_path,
|
||||
output_path.clone(),
|
||||
"h264",
|
||||
).await;
|
||||
|
||||
assert!(result.is_ok());
|
||||
assert!(output_path.exists());
|
||||
std::fs::remove_file(output_path).unwrap();
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_extract_metadata(media_processor: MediaProcessor, test_video_path: PathBuf) {
|
||||
let metadata = media_processor.extract_metadata(test_video_path).await.unwrap();
|
||||
|
||||
assert!(metadata.width.is_some());
|
||||
assert!(metadata.height.is_some());
|
||||
assert!(metadata.framerate.is_some());
|
||||
}
|
||||
}
|
166
gb-media/src/webrtc.rs
Normal file
166
gb-media/src/webrtc.rs
Normal file
|
@ -0,0 +1,166 @@
|
|||
use async_trait::async_trait;
|
||||
use gb_core::{
|
||||
models::*,
|
||||
traits::*,
|
||||
Result, Error,
|
||||
};
|
||||
use uuid::Uuid;
|
||||
use webrtc::{
|
||||
api::APIBuilder,
|
||||
ice_transport::ice_server::RTCIceServer,
|
||||
peer_connection::configuration::RTCConfiguration,
|
||||
peer_connection::peer_connection_state::RTCPeerConnectionState,
|
||||
peer_connection::RTCPeerConnection,
|
||||
track::track_remote::TrackRemote,
|
||||
};
|
||||
use tracing::{instrument, error};
|
||||
|
||||
pub struct WebRTCService {
|
||||
config: RTCConfiguration,
|
||||
}
|
||||
|
||||
impl WebRTCService {
|
||||
pub fn new(ice_servers: Vec<String>) -> Self {
|
||||
let mut config = RTCConfiguration::default();
|
||||
config.ice_servers = ice_servers
|
||||
.into_iter()
|
||||
.map(|url| RTCIceServer {
|
||||
urls: vec![url],
|
||||
..Default::default()
|
||||
})
|
||||
.collect();
|
||||
|
||||
Self { config }
|
||||
}
|
||||
|
||||
async fn create_peer_connection(&self) -> Result<RTCPeerConnection> {
|
||||
let api = APIBuilder::new().build();
|
||||
|
||||
let peer_connection = api.new_peer_connection(self.config.clone())
|
||||
.await
|
||||
.map_err(|e| Error::WebRTC(format!("Failed to create peer connection: {}", e)))?;
|
||||
|
||||
Ok(peer_connection)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RoomService for WebRTCService {
|
||||
#[instrument(skip(self))]
|
||||
async fn create_room(&self, config: RoomConfig) -> Result<Room> {
|
||||
// Create room implementation
|
||||
todo!()
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn join_room(&self, room_id: Uuid, user_id: Uuid) -> Result<Connection> {
|
||||
let peer_connection = self.create_peer_connection().await?;
|
||||
|
||||
// Setup connection handlers
|
||||
peer_connection
|
||||
.on_peer_connection_state_change(Box::new(move |s: RTCPeerConnectionState| {
|
||||
Box::pin(async move {
|
||||
match s {
|
||||
RTCPeerConnectionState::Connected => {
|
||||
tracing::info!("Peer connection connected");
|
||||
}
|
||||
RTCPeerConnectionState::Disconnected
|
||||
| RTCPeerConnectionState::Failed
|
||||
| RTCPeerConnectionState::Closed => {
|
||||
tracing::warn!("Peer connection state changed to {}", s);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
})
|
||||
}));
|
||||
|
||||
peer_connection
|
||||
.on_track(Box::new(move |track: Option<Arc<TrackRemote>>, _receiver| {
|
||||
Box::pin(async move {
|
||||
if let Some(track) = track {
|
||||
tracing::info!(
|
||||
"Received track: {} {}",
|
||||
track.kind(),
|
||||
track.id()
|
||||
);
|
||||
}
|
||||
})
|
||||
}));
|
||||
|
||||
// Create connection object
|
||||
let connection = Connection {
|
||||
id: Uuid::new_v4(),
|
||||
room_id,
|
||||
user_id,
|
||||
ice_servers: self.config.ice_servers.clone(),
|
||||
metadata: serde_json::Value::Object(serde_json::Map::new()),
|
||||
};
|
||||
|
||||
Ok(connection)
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn leave_room(&self, room_id: Uuid, user_id: Uuid) -> Result<()> {
|
||||
// Leave room implementation
|
||||
todo!()
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn publish_track(&self, track: TrackInfo) -> Result<Track> {
|
||||
// Publish track implementation
|
||||
todo!()
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn subscribe_track(&self, track_id: Uuid) -> Result<Subscription> {
|
||||
// Subscribe to track implementation
|
||||
todo!()
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn get_participants(&self, room_id: Uuid) -> Result<Vec<Participant>> {
|
||||
// Get participants implementation
|
||||
todo!()
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn get_room_stats(&self, room_id: Uuid) -> Result<RoomStats> {
|
||||
// Get room stats implementation
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rstest::*;
|
||||
|
||||
#[fixture]
|
||||
fn webrtc_service() -> WebRTCService {
|
||||
WebRTCService::new(vec!["stun:stun.l.google.com:19302".to_string()])
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
|
||||
async fn test_create_peer_connection(webrtc_service: WebRTCService) {
|
||||
let peer_connection = webrtc_service.create_peer_connection().await.unwrap();
|
||||
assert_eq!(
|
||||
peer_connection.connection_state().await,
|
||||
RTCPeerConnectionState::New
|
||||
);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_join_room(webrtc_service: WebRTCService) {
|
||||
let room_id = Uuid::new_v4();
|
||||
let user_id = Uuid::new_v4();
|
||||
|
||||
let connection = webrtc_service.join_room(room_id, user_id).await.unwrap();
|
||||
|
||||
assert_eq!(connection.room_id, room_id);
|
||||
assert_eq!(connection.user_id, user_id);
|
||||
assert!(!connection.ice_servers.is_empty());
|
||||
}
|
||||
}
|
23
gb-messaging/Cargo.toml
Normal file
23
gb-messaging/Cargo.toml
Normal file
|
@ -0,0 +1,23 @@
|
|||
[package]
|
||||
name = "gb-messaging"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
gb-core = { path = "../gb-core" }
|
||||
tokio.workspace = true
|
||||
rdkafka.workspace = true
|
||||
redis.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
uuid.workspace = true
|
||||
async-trait.workspace = true
|
||||
tracing.workspace = true
|
||||
futures.workspace = true
|
||||
lapin = "2.3"
|
||||
|
||||
[dev-dependencies]
|
||||
rstest.workspace = true
|
||||
tokio-test = "0.4"
|
144
gb-messaging/src/kafka.rs
Normal file
144
gb-messaging/src/kafka.rs
Normal file
|
@ -0,0 +1,144 @@
|
|||
use async_trait::async_trait;
|
||||
use gb_core::{Result, Error, models::Message};
|
||||
use rdkafka::{
|
||||
producer::{FutureProducer, FutureRecord},
|
||||
consumer::{StreamConsumer, Consumer},
|
||||
ClientConfig, Message as KafkaMessage,
|
||||
};
|
||||
use serde::{de::DeserializeOwned, Serialize};
|
||||
use std::time::Duration;
|
||||
use tracing::{instrument, error, info};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub struct KafkaBroker {
|
||||
producer: FutureProducer,
|
||||
consumer: StreamConsumer,
|
||||
}
|
||||
|
||||
impl KafkaBroker {
|
||||
pub fn new(brokers: &str, group_id: &str) -> Result<Self> {
|
||||
let producer: FutureProducer = ClientConfig::new()
|
||||
.set("bootstrap.servers", brokers)
|
||||
.set("message.timeout.ms", "5000")
|
||||
.create()
|
||||
.map_err(|e| Error::Kafka(format!("Failed to create producer: {}", e)))?;
|
||||
|
||||
let consumer: StreamConsumer = ClientConfig::new()
|
||||
.set("bootstrap.servers", brokers)
|
||||
.set("group.id", group_id)
|
||||
.set("enable.auto.commit", "true")
|
||||
.set("auto.offset.reset", "earliest")
|
||||
.create()
|
||||
.map_err(|e| Error::Kafka(format!("Failed to create consumer: {}", e)))?;
|
||||
|
||||
Ok(Self {
|
||||
producer,
|
||||
consumer,
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self, value))]
|
||||
pub async fn publish<T: Serialize>(&self, topic: &str, key: &str, value: &T) -> Result<()> {
|
||||
let payload = serde_json::to_string(value)
|
||||
.map_err(|e| Error::Internal(format!("Serialization error: {}", e)))?;
|
||||
|
||||
self.producer
|
||||
.send(
|
||||
FutureRecord::to(topic)
|
||||
.key(key)
|
||||
.payload(&payload),
|
||||
Duration::from_secs(5),
|
||||
)
|
||||
.await
|
||||
.map_err(|(e, _)| Error::Kafka(format!("Failed to send message: {}", e)))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self, handler))]
|
||||
pub async fn subscribe<T, F, Fut>(&self, topics: &[&str], handler: F) -> Result<()>
|
||||
where
|
||||
T: DeserializeOwned,
|
||||
F: Fn(T) -> Fut,
|
||||
Fut: std::future::Future<Output = Result<()>>,
|
||||
{
|
||||
self.consumer
|
||||
.subscribe(topics)
|
||||
.map_err(|e| Error::Kafka(format!("Failed to subscribe: {}", e)))?;
|
||||
|
||||
loop {
|
||||
match self.consumer.recv().await {
|
||||
Ok(msg) => {
|
||||
if let Some(payload) = msg.payload() {
|
||||
match serde_json::from_slice::<T>(payload) {
|
||||
Ok(value) => {
|
||||
if let Err(e) = handler(value).await {
|
||||
error!("Handler error: {}", e);
|
||||
}
|
||||
}
|
||||
Err(e) => error!("Deserialization error: {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => error!("Consumer error: {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rstest::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
struct TestMessage {
|
||||
id: Uuid,
|
||||
content: String,
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
fn kafka_broker() -> KafkaBroker {
|
||||
KafkaBroker::new(
|
||||
"localhost:9092",
|
||||
"test-group",
|
||||
).unwrap()
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
fn test_message() -> TestMessage {
|
||||
TestMessage {
|
||||
id: Uuid::new_v4(),
|
||||
content: "test message".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_publish_subscribe(
|
||||
kafka_broker: KafkaBroker,
|
||||
test_message: TestMessage,
|
||||
) {
|
||||
let topic = "test-topic";
|
||||
let key = test_message.id.to_string();
|
||||
|
||||
// Publish message
|
||||
kafka_broker.publish(topic, &key, &test_message)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Subscribe and verify
|
||||
let handler = |msg: TestMessage| async move {
|
||||
assert_eq!(msg, test_message);
|
||||
Ok(())
|
||||
};
|
||||
|
||||
// Run subscription for a short time
|
||||
tokio::spawn(async move {
|
||||
kafka_broker.subscribe(&[topic], handler).await.unwrap();
|
||||
});
|
||||
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
102
gb-messaging/src/lib.rs
Normal file
102
gb-messaging/src/lib.rs
Normal file
|
@ -0,0 +1,102 @@
|
|||
pub mod kafka;
|
||||
pub mod redis_pubsub;
|
||||
pub mod rabbitmq;
|
||||
pub mod websocket;
|
||||
pub mod processor;
|
||||
|
||||
pub use kafka::KafkaBroker;
|
||||
pub use redis_pubsub::RedisPubSub;
|
||||
pub use rabbitmq::RabbitMQ;
|
||||
pub use websocket::WebSocketClient;
|
||||
pub use processor::{MessageProcessor, MessageEnvelope};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use gb_core::models::Message;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::Duration;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
struct TestMessage {
|
||||
id: Uuid,
|
||||
content: String,
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_messaging_integration() {
|
||||
// Initialize message brokers
|
||||
let kafka = KafkaBroker::new(
|
||||
"localhost:9092",
|
||||
"test-group",
|
||||
).unwrap();
|
||||
|
||||
let redis = RedisPubSub::new("redis://localhost")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let rabbitmq = RabbitMQ::new("amqp://localhost:5672")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let websocket = WebSocketClient::connect("ws://localhost:8080")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create test message
|
||||
let test_message = TestMessage {
|
||||
id: Uuid::new_v4(),
|
||||
content: "integration test".to_string(),
|
||||
};
|
||||
|
||||
// Test Kafka
|
||||
kafka.publish("test-topic", &test_message.id.to_string(), &test_message)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Test Redis PubSub
|
||||
redis.publish("test-channel", &test_message)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Test RabbitMQ
|
||||
rabbitmq.publish("", "test.key", &test_message)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Test WebSocket
|
||||
websocket.send(&test_message)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Test Message Processor
|
||||
let mut processor = MessageProcessor::new(100);
|
||||
|
||||
processor.register_handler("test", |envelope| {
|
||||
println!("Processed message: {}", envelope.message.content);
|
||||
Ok(())
|
||||
});
|
||||
|
||||
let message = Message {
|
||||
id: Uuid::new_v4(),
|
||||
customer_id: Uuid::new_v4(),
|
||||
instance_id: Uuid::new_v4(),
|
||||
conversation_id: Uuid::new_v4(),
|
||||
sender_id: Uuid::new_v4(),
|
||||
kind: "test".to_string(),
|
||||
content: "test content".to_string(),
|
||||
metadata: serde_json::Value::Object(serde_json::Map::new()),
|
||||
created_at: chrono::Utc::now(),
|
||||
shard_key: 0,
|
||||
};
|
||||
|
||||
let envelope = MessageEnvelope {
|
||||
id: Uuid::new_v4(),
|
||||
message,
|
||||
metadata: std::collections::HashMap::new(),
|
||||
};
|
||||
|
||||
processor.sender().send(envelope).await.unwrap();
|
||||
}
|
||||
}
|
121
gb-messaging/src/processor.rs
Normal file
121
gb-messaging/src/processor.rs
Normal file
|
@ -0,0 +1,121 @@
|
|||
use gb_core::{Result, Error, models::Message};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use tokio::sync::mpsc;
|
||||
use tracing::{instrument, error};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MessageEnvelope {
|
||||
pub id: Uuid,
|
||||
pub message: Message,
|
||||
pub metadata: HashMap<String, String>,
|
||||
}
|
||||
|
||||
pub struct MessageProcessor {
|
||||
tx: mpsc::Sender<MessageEnvelope>,
|
||||
rx: mpsc::Receiver<MessageEnvelope>,
|
||||
handlers: HashMap<String, Box<dyn Fn(MessageEnvelope) -> Result<()> + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl MessageProcessor {
|
||||
pub fn new(buffer_size: usize) -> Self {
|
||||
let (tx, rx) = mpsc::channel(buffer_size);
|
||||
|
||||
Self {
|
||||
tx,
|
||||
rx,
|
||||
handlers: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn sender(&self) -> mpsc::Sender<MessageEnvelope> {
|
||||
self.tx.clone()
|
||||
}
|
||||
|
||||
#[instrument(skip(self, handler))]
|
||||
pub fn register_handler<F>(&mut self, kind: &str, handler: F)
|
||||
where
|
||||
F: Fn(MessageEnvelope) -> Result<()> + Send + Sync + 'static,
|
||||
{
|
||||
self.handlers.insert(kind.to_string(), Box::new(handler));
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn process_messages(&mut self) -> Result<()> {
|
||||
while let Some(envelope) = self.rx.recv().await {
|
||||
if let Some(handler) = self.handlers.get(&envelope.message.kind) {
|
||||
if let Err(e) = handler(envelope.clone()) {
|
||||
error!("Handler error for message {}: {}", envelope.id, e);
|
||||
}
|
||||
} else {
|
||||
error!("No handler registered for message kind: {}", envelope.message.kind);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rstest::*;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
#[fixture]
|
||||
fn test_message() -> Message {
|
||||
Message {
|
||||
id: Uuid::new_v4(),
|
||||
customer_id: Uuid::new_v4(),
|
||||
instance_id: Uuid::new_v4(),
|
||||
conversation_id: Uuid::new_v4(),
|
||||
sender_id: Uuid::new_v4(),
|
||||
kind: "test".to_string(),
|
||||
content: "test content".to_string(),
|
||||
metadata: serde_json::Value::Object(serde_json::Map::new()),
|
||||
created_at: chrono::Utc::now(),
|
||||
shard_key: 0,
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_message_processor(test_message: Message) {
|
||||
let mut processor = MessageProcessor::new(100);
|
||||
let processed = Arc::new(Mutex::new(false));
|
||||
let processed_clone = processed.clone();
|
||||
|
||||
// Register handler
|
||||
processor.register_handler("test", move |envelope| {
|
||||
assert_eq!(envelope.message.content, "test content");
|
||||
let mut processed = processed_clone.blocking_lock();
|
||||
*processed = true;
|
||||
Ok(())
|
||||
});
|
||||
|
||||
// Start processing in background
|
||||
let mut processor_clone = processor.clone();
|
||||
let handle = tokio::spawn(async move {
|
||||
processor_clone.process_messages().await.unwrap();
|
||||
});
|
||||
|
||||
// Send test message
|
||||
let envelope = MessageEnvelope {
|
||||
id: Uuid::new_v4(),
|
||||
message: test_message,
|
||||
metadata: HashMap::new(),
|
||||
};
|
||||
|
||||
processor.sender().send(envelope).await.unwrap();
|
||||
|
||||
// Wait for processing
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
|
||||
// Verify message was processed
|
||||
assert!(*processed.lock().await);
|
||||
|
||||
handle.abort();
|
||||
}
|
||||
}
|
180
gb-messaging/src/rabbitmq.rs
Normal file
180
gb-messaging/src/rabbitmq.rs
Normal file
|
@ -0,0 +1,180 @@
|
|||
use async_trait::async_trait;
|
||||
use gb_core::{Result, Error};
|
||||
use lapin::{
|
||||
options::*,
|
||||
types::FieldTable,
|
||||
Connection, ConnectionProperties,
|
||||
Channel, Consumer,
|
||||
message::Delivery,
|
||||
};
|
||||
use serde::{de::DeserializeOwned, Serialize};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::{instrument, error};
|
||||
|
||||
pub struct RabbitMQ {
|
||||
connection: Connection,
|
||||
channel: Arc<Mutex<Channel>>,
|
||||
}
|
||||
|
||||
impl RabbitMQ {
|
||||
pub async fn new(url: &str) -> Result<Self> {
|
||||
let connection = Connection::connect(
|
||||
url,
|
||||
ConnectionProperties::default(),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Error::Internal(format!("RabbitMQ connection error: {}", e)))?;
|
||||
|
||||
let channel = connection.create_channel()
|
||||
.await
|
||||
.map_err(|e| Error::Internal(format!("RabbitMQ channel error: {}", e)))?;
|
||||
|
||||
Ok(Self {
|
||||
connection,
|
||||
channel: Arc::new(Mutex::new(channel)),
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self, message))]
|
||||
pub async fn publish<T: Serialize>(
|
||||
&self,
|
||||
exchange: &str,
|
||||
routing_key: &str,
|
||||
message: &T,
|
||||
) -> Result<()> {
|
||||
let payload = serde_json::to_string(message)
|
||||
.map_err(|e| Error::Internal(format!("Serialization error: {}", e)))?;
|
||||
|
||||
let channel = self.channel.lock().await;
|
||||
|
||||
channel.basic_publish(
|
||||
exchange,
|
||||
routing_key,
|
||||
BasicPublishOptions::default(),
|
||||
payload.as_bytes(),
|
||||
BasicProperties::default(),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Error::Internal(format!("RabbitMQ publish error: {}", e)))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self, handler))]
|
||||
pub async fn subscribe<T, F, Fut>(
|
||||
&self,
|
||||
queue: &str,
|
||||
handler: F,
|
||||
) -> Result<()>
|
||||
where
|
||||
T: DeserializeOwned,
|
||||
F: Fn(T) -> Fut,
|
||||
Fut: std::future::Future<Output = Result<()>>,
|
||||
{
|
||||
let channel = self.channel.lock().await;
|
||||
|
||||
channel.queue_declare(
|
||||
queue,
|
||||
QueueDeclareOptions::default(),
|
||||
FieldTable::default(),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Error::Internal(format!("RabbitMQ queue declare error: {}", e)))?;
|
||||
|
||||
let mut consumer = channel.basic_consume(
|
||||
queue,
|
||||
"consumer",
|
||||
BasicConsumeOptions::default(),
|
||||
FieldTable::default(),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Error::Internal(format!("RabbitMQ consume error: {}", e)))?;
|
||||
|
||||
while let Some(delivery) = consumer.next().await {
|
||||
match delivery {
|
||||
Ok(delivery) => {
|
||||
if let Ok(payload) = String::from_utf8(delivery.data.clone()) {
|
||||
match serde_json::from_str::<T>(&payload) {
|
||||
Ok(value) => {
|
||||
if let Err(e) = handler(value).await {
|
||||
error!("Handler error: {}", e);
|
||||
}
|
||||
}
|
||||
Err(e) => error!("Deserialization error: {}", e),
|
||||
}
|
||||
}
|
||||
delivery.ack(BasicAckOptions::default())
|
||||
.await
|
||||
.map_err(|e| error!("Ack error: {}", e)).ok();
|
||||
}
|
||||
Err(e) => error!("Consumer error: {}", e),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rstest::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
struct TestMessage {
|
||||
id: Uuid,
|
||||
content: String,
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn rabbitmq() -> RabbitMQ {
|
||||
RabbitMQ::new("amqp://localhost:5672")
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
fn test_message() -> TestMessage {
|
||||
TestMessage {
|
||||
id: Uuid::new_v4(),
|
||||
content: "test message".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_publish_subscribe(
|
||||
rabbitmq: RabbitMQ,
|
||||
test_message: TestMessage,
|
||||
) {
|
||||
let queue = "test-queue";
|
||||
let routing_key = "test.key";
|
||||
|
||||
// Subscribe first
|
||||
let rabbitmq_clone = rabbitmq.clone();
|
||||
let test_message_clone = test_message.clone();
|
||||
let handle = tokio::spawn(async move {
|
||||
let handler = |msg: TestMessage| async move {
|
||||
assert_eq!(msg, test_message_clone);
|
||||
Ok(())
|
||||
};
|
||||
|
||||
rabbitmq_clone.subscribe(queue, handler).await.unwrap();
|
||||
});
|
||||
|
||||
// Give subscription time to establish
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
// Publish message
|
||||
rabbitmq.publish("", routing_key, &test_message)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Wait for handler to process
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
handle.abort();
|
||||
}
|
||||
}
|
144
gb-messaging/src/redis_pubsub.rs
Normal file
144
gb-messaging/src/redis_pubsub.rs
Normal file
|
@ -0,0 +1,144 @@
|
|||
use async_trait::async_trait;
|
||||
use gb_core::{Result, Error};
|
||||
use redis::{
|
||||
aio::MultiplexedConnection,
|
||||
AsyncCommands, Client,
|
||||
};
|
||||
use serde::{de::DeserializeOwned, Serialize};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::{instrument, error};
|
||||
|
||||
pub struct RedisPubSub {
|
||||
client: Client,
|
||||
conn: Arc<Mutex<MultiplexedConnection>>,
|
||||
}
|
||||
|
||||
impl RedisPubSub {
|
||||
pub async fn new(url: &str) -> Result<Self> {
|
||||
let client = Client::open(url)
|
||||
.map_err(|e| Error::Redis(e))?;
|
||||
|
||||
let conn = client.get_multiplexed_async_connection()
|
||||
.await
|
||||
.map_err(|e| Error::Redis(e))?;
|
||||
|
||||
Ok(Self {
|
||||
client,
|
||||
conn: Arc::new(Mutex::new(conn)),
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self, message))]
|
||||
pub async fn publish<T: Serialize>(&self, channel: &str, message: &T) -> Result<()> {
|
||||
let payload = serde_json::to_string(message)
|
||||
.map_err(|e| Error::Internal(format!("Serialization error: {}", e)))?;
|
||||
|
||||
let mut conn = self.conn.lock().await;
|
||||
conn.publish(channel, payload)
|
||||
.await
|
||||
.map_err(|e| Error::Redis(e))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self, handler))]
|
||||
pub async fn subscribe<T, F, Fut>(&self, channels: &[&str], handler: F) -> Result<()>
|
||||
where
|
||||
T: DeserializeOwned,
|
||||
F: Fn(T) -> Fut,
|
||||
Fut: std::future::Future<Output = Result<()>>,
|
||||
{
|
||||
let mut pubsub = self.client.get_async_connection()
|
||||
.await
|
||||
.map_err(|e| Error::Redis(e))?
|
||||
.into_pubsub();
|
||||
|
||||
for channel in channels {
|
||||
pubsub.subscribe(*channel)
|
||||
.await
|
||||
.map_err(|e| Error::Redis(e))?;
|
||||
}
|
||||
|
||||
let mut stream = pubsub.on_message();
|
||||
|
||||
while let Some(msg) = stream.next().await {
|
||||
let payload: String = msg.get_payload()
|
||||
.map_err(|e| Error::Redis(e))?;
|
||||
|
||||
match serde_json::from_str::<T>(&payload) {
|
||||
Ok(value) => {
|
||||
if let Err(e) = handler(value).await {
|
||||
error!("Handler error: {}", e);
|
||||
}
|
||||
}
|
||||
Err(e) => error!("Deserialization error: {}", e),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rstest::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
struct TestMessage {
|
||||
id: Uuid,
|
||||
content: String,
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn redis_pubsub() -> RedisPubSub {
|
||||
RedisPubSub::new("redis://localhost")
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
fn test_message() -> TestMessage {
|
||||
TestMessage {
|
||||
id: Uuid::new_v4(),
|
||||
content: "test message".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_publish_subscribe(
|
||||
redis_pubsub: RedisPubSub,
|
||||
test_message: TestMessage,
|
||||
) {
|
||||
let channel = "test-channel";
|
||||
|
||||
// Subscribe first
|
||||
let pubsub_clone = redis_pubsub.clone();
|
||||
let test_message_clone = test_message.clone();
|
||||
|
||||
let handle = tokio::spawn(async move {
|
||||
let handler = |msg: TestMessage| async move {
|
||||
assert_eq!(msg, test_message_clone);
|
||||
Ok(())
|
||||
};
|
||||
|
||||
pubsub_clone.subscribe(&[channel], handler).await.unwrap();
|
||||
});
|
||||
|
||||
// Give subscription time to establish
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
// Publish message
|
||||
redis_pubsub.publish(channel, &test_message)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Wait for handler to process
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
handle.abort();
|
||||
}
|
||||
}
|
155
gb-messaging/src/websocket.rs
Normal file
155
gb-messaging/src/websocket.rs
Normal file
|
@ -0,0 +1,155 @@
|
|||
use gb_core::{Result, Error};
|
||||
use futures::{
|
||||
stream::{SplitSink, SplitStream},
|
||||
SinkExt, StreamExt,
|
||||
};
|
||||
use serde::{de::DeserializeOwned, Serialize};
|
||||
use std::sync::Arc;
|
||||
use tokio::{
|
||||
net::TcpStream,
|
||||
sync::Mutex,
|
||||
};
|
||||
use tokio_tungstenite::{
|
||||
connect_async,
|
||||
tungstenite::Message,
|
||||
WebSocketStream,
|
||||
};
|
||||
use tracing::{instrument, error};
|
||||
|
||||
pub struct WebSocketClient {
|
||||
write: Arc<Mutex<SplitSink<WebSocketStream<TcpStream>, Message>>>,
|
||||
read: Arc<Mutex<SplitStream<WebSocketStream<TcpStream>>>>,
|
||||
}
|
||||
|
||||
impl WebSocketClient {
|
||||
pub async fn connect(url: &str) -> Result<Self> {
|
||||
let (ws_stream, _) = connect_async(url)
|
||||
.await
|
||||
.map_err(|e| Error::Internal(format!("WebSocket connection error: {}", e)))?;
|
||||
|
||||
let (write, read) = ws_stream.split();
|
||||
|
||||
Ok(Self {
|
||||
write: Arc::new(Mutex::new(write)),
|
||||
read: Arc::new(Mutex::new(read)),
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self, message))]
|
||||
pub async fn send<T: Serialize>(&self, message: &T) -> Result<()> {
|
||||
let payload = serde_json::to_string(message)
|
||||
.map_err(|e| Error::Internal(format!("Serialization error: {}", e)))?;
|
||||
|
||||
let mut write = self.write.lock().await;
|
||||
write.send(Message::Text(payload))
|
||||
.await
|
||||
.map_err(|e| Error::Internal(format!("WebSocket send error: {}", e)))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self, handler))]
|
||||
pub async fn receive<T, F, Fut>(&self, handler: F) -> Result<()>
|
||||
where
|
||||
T: DeserializeOwned,
|
||||
F: Fn(T) -> Fut,
|
||||
Fut: std::future::Future<Output = Result<()>>,
|
||||
{
|
||||
let mut read = self.read.lock().await;
|
||||
|
||||
while let Some(message) = read.next().await {
|
||||
match message {
|
||||
Ok(Message::Text(payload)) => {
|
||||
match serde_json::from_str::<T>(&payload) {
|
||||
Ok(value) => {
|
||||
if let Err(e) = handler(value).await {
|
||||
error!("Handler error: {}", e);
|
||||
}
|
||||
}
|
||||
Err(e) => error!("Deserialization error: {}", e),
|
||||
}
|
||||
}
|
||||
Ok(Message::Close(_)) => break,
|
||||
Err(e) => error!("WebSocket receive error: {}", e),
|
||||
_ => continue,
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rstest::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::net::TcpListener;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
struct TestMessage {
|
||||
id: Uuid,
|
||||
content: String,
|
||||
}
|
||||
|
||||
async fn create_test_server() -> String {
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let addr = listener.local_addr().unwrap();
|
||||
|
||||
tokio::spawn(async move {
|
||||
while let Ok((stream, _)) = listener.accept().await {
|
||||
let ws_stream = tokio_tungstenite::accept_async(stream)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let (mut write, mut read) = ws_stream.split();
|
||||
|
||||
while let Some(Ok(msg)) = read.next().await {
|
||||
if let Message::Text(_) = msg {
|
||||
write.send(msg).await.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
format!("ws://{}", addr)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
fn test_message() -> TestMessage {
|
||||
TestMessage {
|
||||
id: Uuid::new_v4(),
|
||||
content: "test message".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_websocket_client(test_message: TestMessage) {
|
||||
let server_url = create_test_server().await;
|
||||
let client = WebSocketClient::connect(&server_url).await.unwrap();
|
||||
let test_message_clone = test_message.clone();
|
||||
|
||||
// Start receiving messages
|
||||
let client_clone = client.clone();
|
||||
let handle = tokio::spawn(async move {
|
||||
let handler = |msg: TestMessage| async move {
|
||||
assert_eq!(msg, test_message_clone);
|
||||
Ok(())
|
||||
};
|
||||
|
||||
client_clone.receive(handler).await.unwrap();
|
||||
});
|
||||
|
||||
// Give receiver time to start
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
// Send test message
|
||||
client.send(&test_message).await.unwrap();
|
||||
|
||||
// Wait for message to be processed
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
handle.abort();
|
||||
}
|
||||
}
|
18
gb-migrations/Cargo.toml
Normal file
18
gb-migrations/Cargo.toml
Normal file
|
@ -0,0 +1,18 @@
|
|||
[package]
|
||||
name = "gb-migrations"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
tokio.workspace = true
|
||||
sqlx.workspace = true
|
||||
tracing.workspace = true
|
||||
uuid.workspace = true
|
||||
chrono.workspace = true
|
||||
serde_json.workspace = true
|
||||
gb-core = { path = "../gb-core" }
|
||||
|
||||
[dev-dependencies]
|
||||
rstest.workspace = true
|
124
gb-migrations/src/lib.rs
Normal file
124
gb-migrations/src/lib.rs
Normal file
|
@ -0,0 +1,124 @@
|
|||
use sqlx::PgPool;
|
||||
use tracing::info;
|
||||
|
||||
pub async fn run_migrations(pool: &PgPool) -> Result<(), sqlx::Error> {
|
||||
info!("Running database migrations");
|
||||
|
||||
sqlx::query(
|
||||
r#"
|
||||
CREATE TABLE IF NOT EXISTS customers (
|
||||
id UUID PRIMARY KEY,
|
||||
name VARCHAR(255) NOT NULL,
|
||||
subscription_tier VARCHAR(50) NOT NULL,
|
||||
status VARCHAR(50) NOT NULL,
|
||||
max_instances INTEGER NOT NULL,
|
||||
metadata JSONB NOT NULL DEFAULT '{}',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS instances (
|
||||
id UUID PRIMARY KEY,
|
||||
customer_id UUID NOT NULL REFERENCES customers(id),
|
||||
name VARCHAR(255) NOT NULL,
|
||||
status VARCHAR(50) NOT NULL,
|
||||
shard_id INTEGER NOT NULL,
|
||||
region VARCHAR(50) NOT NULL,
|
||||
config JSONB NOT NULL DEFAULT '{}',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS rooms (
|
||||
id UUID PRIMARY KEY,
|
||||
customer_id UUID NOT NULL REFERENCES customers(id),
|
||||
instance_id UUID NOT NULL REFERENCES instances(id),
|
||||
name VARCHAR(255) NOT NULL,
|
||||
kind VARCHAR(50) NOT NULL,
|
||||
status VARCHAR(50) NOT NULL,
|
||||
config JSONB NOT NULL DEFAULT '{}',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS messages (
|
||||
id UUID PRIMARY KEY,
|
||||
customer_id UUID NOT NULL REFERENCES customers(id),
|
||||
instance_id UUID NOT NULL REFERENCES instances(id),
|
||||
conversation_id UUID NOT NULL,
|
||||
sender_id UUID NOT NULL,
|
||||
kind VARCHAR(50) NOT NULL,
|
||||
content TEXT NOT NULL,
|
||||
metadata JSONB NOT NULL DEFAULT '{}',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
shard_key INTEGER NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
id UUID PRIMARY KEY,
|
||||
customer_id UUID NOT NULL REFERENCES customers(id),
|
||||
instance_id UUID NOT NULL REFERENCES instances(id),
|
||||
name VARCHAR(255) NOT NULL,
|
||||
email VARCHAR(255) NOT NULL UNIQUE,
|
||||
status VARCHAR(50) NOT NULL,
|
||||
metadata JSONB NOT NULL DEFAULT '{}',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS tracks (
|
||||
id UUID PRIMARY KEY,
|
||||
room_id UUID NOT NULL REFERENCES rooms(id),
|
||||
user_id UUID NOT NULL REFERENCES users(id),
|
||||
kind VARCHAR(50) NOT NULL,
|
||||
status VARCHAR(50) NOT NULL,
|
||||
metadata JSONB NOT NULL DEFAULT '{}',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS subscriptions (
|
||||
id UUID PRIMARY KEY,
|
||||
track_id UUID NOT NULL REFERENCES tracks(id),
|
||||
user_id UUID NOT NULL REFERENCES users(id),
|
||||
status VARCHAR(50) NOT NULL,
|
||||
metadata JSONB NOT NULL DEFAULT '{}',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- Create indexes for performance
|
||||
CREATE INDEX IF NOT EXISTS idx_instances_customer_id ON instances(customer_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_rooms_instance_id ON rooms(instance_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_conversation_id ON messages(conversation_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_shard_key ON messages(shard_key);
|
||||
CREATE INDEX IF NOT EXISTS idx_tracks_room_id ON tracks(room_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_subscriptions_track_id ON subscriptions(track_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_users_email ON users(email);
|
||||
"#,
|
||||
)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
|
||||
info!("Migrations completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use sqlx::postgres::{PgPoolOptions, PgPool};
|
||||
use rstest::*;
|
||||
|
||||
async fn create_test_pool() -> PgPool {
|
||||
let database_url = std::env::var("DATABASE_URL")
|
||||
.unwrap_or_else(|_| "postgres://postgres:postgres@localhost/gb_test".to_string());
|
||||
|
||||
PgPoolOptions::new()
|
||||
.max_connections(5)
|
||||
.connect(&database_url)
|
||||
.await
|
||||
.expect("Failed to create test pool")
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_migrations() {
|
||||
let pool = create_test_pool().await;
|
||||
assert!(run_migrations(&pool).await.is_ok());
|
||||
}
|
||||
}
|
20
gb-monitoring/Cargo.toml
Normal file
20
gb-monitoring/Cargo.toml
Normal file
|
@ -0,0 +1,20 @@
|
|||
[package]
|
||||
name = "gb-monitoring"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
gb-core = { path = "../gb-core" }
|
||||
tokio.workspace = true
|
||||
tracing.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
prometheus.workspace = true
|
||||
opentelemetry.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
rstest.workspace = true
|
||||
tokio-test = "0.4"
|
39
gb-monitoring/src/lib.rs
Normal file
39
gb-monitoring/src/lib.rs
Normal file
|
@ -0,0 +1,39 @@
|
|||
pub mod metrics;
|
||||
pub mod logging;
|
||||
pub mod telemetry;
|
||||
|
||||
pub use metrics::Metrics;
|
||||
pub use logging::init_logging;
|
||||
pub use telemetry::Telemetry;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tracing::info;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_monitoring_integration() {
|
||||
// Initialize logging
|
||||
init_logging().unwrap();
|
||||
|
||||
// Initialize metrics
|
||||
let metrics = Metrics::new().unwrap();
|
||||
|
||||
// Initialize telemetry
|
||||
let telemetry = Telemetry::new("test-service").unwrap();
|
||||
|
||||
// Test logging with metrics
|
||||
info!(
|
||||
active_connections = metrics.active_connections.get() as i64,
|
||||
"System initialized"
|
||||
);
|
||||
|
||||
// Simulate some activity
|
||||
metrics.increment_connections();
|
||||
metrics.increment_messages();
|
||||
metrics.observe_request_duration(0.1);
|
||||
|
||||
// Verify metrics
|
||||
assert_eq!(metrics.active_connections.get(), 1.0);
|
||||
}
|
||||
}
|
43
gb-monitoring/src/logging.rs
Normal file
43
gb-monitoring/src/logging.rs
Normal file
|
@ -0,0 +1,43 @@
|
|||
use tracing::{subscriber::set_global_default, Subscriber};
|
||||
use tracing_subscriber::{
|
||||
fmt::{format::FmtSpan, time::ChronoUtc},
|
||||
layer::SubscriberExt,
|
||||
EnvFilter, Registry,
|
||||
};
|
||||
|
||||
pub fn init_logging() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let env_filter = EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| EnvFilter::new("info"));
|
||||
|
||||
let formatting_layer = tracing_subscriber::fmt::layer()
|
||||
.with_timer(ChronoUtc::rfc3339())
|
||||
.with_thread_ids(true)
|
||||
.with_thread_names(true)
|
||||
.with_target(true)
|
||||
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
|
||||
.with_file(true)
|
||||
.with_line_number(true)
|
||||
.json();
|
||||
|
||||
let subscriber = Registry::default()
|
||||
.with(env_filter)
|
||||
.with(formatting_layer);
|
||||
|
||||
set_global_default(subscriber)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tracing::info;
|
||||
|
||||
#[test]
|
||||
fn test_logging_initialization() {
|
||||
assert!(init_logging().is_ok());
|
||||
|
||||
// Test logging
|
||||
info!("Test log message");
|
||||
}
|
||||
}
|
146
gb-monitoring/src/metrics.rs
Normal file
146
gb-monitoring/src/metrics.rs
Normal file
|
@ -0,0 +1,146 @@
|
|||
use gb_core::{Result, Error};
|
||||
use prometheus::{
|
||||
Counter, Gauge, Histogram, HistogramOpts, IntCounter, Registry,
|
||||
opts, register_counter, register_gauge, register_histogram,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use tracing::{instrument, error};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Metrics {
|
||||
registry: Arc<Registry>,
|
||||
active_connections: Gauge,
|
||||
message_count: IntCounter,
|
||||
request_duration: Histogram,
|
||||
active_rooms: Gauge,
|
||||
media_bandwidth: Gauge,
|
||||
}
|
||||
|
||||
impl Metrics {
|
||||
pub fn new() -> Result<Self> {
|
||||
let registry = Registry::new();
|
||||
|
||||
let active_connections = register_gauge!(
|
||||
opts!("gb_active_connections", "Number of active connections"),
|
||||
registry
|
||||
).map_err(|e| Error::Internal(format!("Failed to create metric: {}", e)))?;
|
||||
|
||||
let message_count = register_counter!(
|
||||
opts!("gb_message_count", "Total number of messages processed"),
|
||||
registry
|
||||
).map_err(|e| Error::Internal(format!("Failed to create metric: {}", e)))?;
|
||||
|
||||
let request_duration = register_histogram!(
|
||||
HistogramOpts::new(
|
||||
"gb_request_duration",
|
||||
"Request duration in seconds"
|
||||
),
|
||||
registry
|
||||
).map_err(|e| Error::Internal(format!("Failed to create metric: {}", e)))?;
|
||||
|
||||
let active_rooms = register_gauge!(
|
||||
opts!("gb_active_rooms", "Number of active rooms"),
|
||||
registry
|
||||
).map_err(|e| Error::Internal(format!("Failed to create metric: {}", e)))?;
|
||||
|
||||
let media_bandwidth = register_gauge!(
|
||||
opts!("gb_media_bandwidth", "Current media bandwidth usage in bytes/sec"),
|
||||
registry
|
||||
).map_err(|e| Error::Internal(format!("Failed to create metric: {}", e)))?;
|
||||
|
||||
Ok(Self {
|
||||
registry: Arc::new(registry),
|
||||
active_connections,
|
||||
message_count,
|
||||
request_duration,
|
||||
active_rooms,
|
||||
media_bandwidth,
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub fn increment_connections(&self) {
|
||||
self.active_connections.inc();
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub fn decrement_connections(&self) {
|
||||
self.active_connections.dec();
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub fn increment_messages(&self) {
|
||||
self.message_count.inc();
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub fn observe_request_duration(&self, duration: f64) {
|
||||
self.request_duration.observe(duration);
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub fn set_active_rooms(&self, count: i64) {
|
||||
self.active_rooms.set(count as f64);
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub fn set_media_bandwidth(&self, bytes_per_sec: f64) {
|
||||
self.media_bandwidth.set(bytes_per_sec);
|
||||
}
|
||||
|
||||
pub fn registry(&self) -> Arc<Registry> {
|
||||
self.registry.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use prometheus::core::{Collector, Desc};
|
||||
|
||||
#[test]
|
||||
fn test_metrics_creation() {
|
||||
let metrics = Metrics::new().unwrap();
|
||||
|
||||
// Test increment connections
|
||||
metrics.increment_connections();
|
||||
assert_eq!(
|
||||
metrics.active_connections.get(),
|
||||
1.0
|
||||
);
|
||||
|
||||
// Test decrement connections
|
||||
metrics.decrement_connections();
|
||||
assert_eq!(
|
||||
metrics.active_connections.get(),
|
||||
0.0
|
||||
);
|
||||
|
||||
// Test message count
|
||||
metrics.increment_messages();
|
||||
assert_eq!(
|
||||
metrics.message_count.get(),
|
||||
1
|
||||
);
|
||||
|
||||
// Test request duration
|
||||
metrics.observe_request_duration(0.5);
|
||||
let mut buffer = Vec::new();
|
||||
metrics.request_duration.encode(&mut buffer).unwrap();
|
||||
assert!(!buffer.is_empty());
|
||||
|
||||
// Test active rooms
|
||||
metrics.set_active_rooms(10);
|
||||
assert_eq!(
|
||||
metrics.active_rooms.get(),
|
||||
10.0
|
||||
);
|
||||
|
||||
// Test media bandwidth
|
||||
metrics.set_media_bandwidth(1024.0);
|
||||
assert_eq!(
|
||||
metrics.media_bandwidth.get(),
|
||||
1024.0
|
||||
);
|
||||
}
|
||||
}
|
56
gb-monitoring/src/telemetry.rs
Normal file
56
gb-monitoring/src/telemetry.rs
Normal file
|
@ -0,0 +1,56 @@
|
|||
use opentelemetry::{
|
||||
runtime::Tokio,
|
||||
sdk::{trace, Resource},
|
||||
KeyValue,
|
||||
};
|
||||
use std::time::Duration;
|
||||
use tracing::error;
|
||||
|
||||
pub struct Telemetry {
|
||||
tracer: opentelemetry::sdk::trace::Tracer,
|
||||
}
|
||||
|
||||
impl Telemetry {
|
||||
pub fn new(service_name: &str) -> Result<Self, Box<dyn std::error::Error>> {
|
||||
let tracer = opentelemetry_otlp::new_pipeline()
|
||||
.tracing()
|
||||
.with_exporter(
|
||||
opentelemetry_otlp::new_exporter()
|
||||
.tonic()
|
||||
.with_endpoint("http://localhost:4317")
|
||||
.with_timeout(Duration::from_secs(3))
|
||||
)
|
||||
.with_trace_config(
|
||||
trace::config()
|
||||
.with_resource(Resource::new(vec![KeyValue::new(
|
||||
"service.name",
|
||||
service_name.to_string(),
|
||||
)]))
|
||||
.with_sampler(trace::Sampler::AlwaysOn)
|
||||
)
|
||||
.install_batch(Tokio)?;
|
||||
|
||||
Ok(Self { tracer })
|
||||
}
|
||||
|
||||
pub fn tracer(&self) -> &opentelemetry::sdk::trace::Tracer {
|
||||
&self.tracer
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Telemetry {
|
||||
fn drop(&mut self) {
|
||||
opentelemetry::global::shutdown_tracer_provider();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_telemetry_creation() {
|
||||
let telemetry = Telemetry::new("test-service");
|
||||
assert!(telemetry.is_ok());
|
||||
}
|
||||
}
|
23
gb-nlp/Cargo.toml
Normal file
23
gb-nlp/Cargo.toml
Normal file
|
@ -0,0 +1,23 @@
|
|||
[package]
|
||||
name = "gb-nlp"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
gb-core = { path = "../gb-core" }
|
||||
rust-bert = "0.21"
|
||||
tokenizers = "0.15"
|
||||
whatlang = "0.16"
|
||||
async-trait.workspace = true
|
||||
tokio.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
thiserror.workspace = true
|
||||
tracing.workspace = true
|
||||
tch = "0.13"
|
||||
|
||||
[dev-dependencies]
|
||||
rstest.workspace = true
|
||||
tokio-test = "0.4"
|
76
gb-nlp/src/lang.rs
Normal file
76
gb-nlp/src/lang.rs
Normal file
|
@ -0,0 +1,76 @@
|
|||
use gb_core::{Result, Error};
|
||||
use tracing::instrument;
|
||||
use whatlang::{Lang, Script, Detector, detect};
|
||||
|
||||
pub struct LanguageDetector {
|
||||
detector: Detector,
|
||||
}
|
||||
|
||||
impl LanguageDetector {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
detector: Detector::new(),
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(self, text))]
|
||||
pub fn detect_language(&self, text: &str) -> Result<DetectedLanguage> {
|
||||
let info = detect(text)
|
||||
.ok_or_else(|| Error::Internal("Failed to detect language".to_string()))?;
|
||||
|
||||
Ok(DetectedLanguage {
|
||||
lang: info.lang(),
|
||||
script: info.script(),
|
||||
confidence: info.confidence(),
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self, text))]
|
||||
pub fn is_language(&self, text: &str, lang: Lang) -> bool {
|
||||
if let Some(info) = detect(text) {
|
||||
info.lang() == lang
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DetectedLanguage {
|
||||
pub lang: Lang,
|
||||
pub script: Script,
|
||||
pub confidence: f64,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rstest::*;
|
||||
|
||||
#[fixture]
|
||||
fn detector() -> LanguageDetector {
|
||||
LanguageDetector::new()
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_detect_english(detector: LanguageDetector) {
|
||||
let text = "Hello, this is a test sentence in English.";
|
||||
let result = detector.detect_language(text).unwrap();
|
||||
assert_eq!(result.lang, Lang::Eng);
|
||||
assert!(result.confidence > 0.9);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_detect_spanish(detector: LanguageDetector) {
|
||||
let text = "Hola, esta es una prueba en español.";
|
||||
let result = detector.detect_language(text).unwrap();
|
||||
assert_eq!(result.lang, Lang::Spa);
|
||||
assert!(result.confidence > 0.9);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_is_language(detector: LanguageDetector) {
|
||||
let text = "Hello world";
|
||||
assert!(detector.is_language(text, Lang::Eng));
|
||||
}
|
||||
}
|
42
gb-nlp/src/lib.rs
Normal file
42
gb-nlp/src/lib.rs
Normal file
|
@ -0,0 +1,42 @@
|
|||
pub mod lang;
|
||||
pub mod text;
|
||||
|
||||
pub use lang::{LanguageDetector, DetectedLanguage};
|
||||
pub use text::{TextProcessor, Sentiment, Entity, Answer};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use gb_core::Result;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_nlp_integration() -> Result<()> {
|
||||
// Initialize NLP components
|
||||
let lang_detector = LanguageDetector::new();
|
||||
let text_processor = TextProcessor::new().await?;
|
||||
|
||||
// Test language detection
|
||||
let text = "This is a test sentence in English.";
|
||||
let lang = lang_detector.detect_language(text)?;
|
||||
assert_eq!(lang.lang, whatlang::Lang::Eng);
|
||||
|
||||
// Test sentiment analysis
|
||||
let sentiment = text_processor.analyze_sentiment(text).await?;
|
||||
assert!(sentiment.score > 0.0);
|
||||
|
||||
// Test entity extraction
|
||||
let text = "OpenAI released GPT-4 in March 2023.";
|
||||
let entities = text_processor.extract_entities(text).await?;
|
||||
|
||||
// Test summarization
|
||||
let text = "Artificial intelligence has made significant advances in recent years. Machine learning models can now perform tasks that were once thought to be exclusive to humans. This has led to both excitement and concern about the future of AI.";
|
||||
let summary = text_processor.summarize(text).await?;
|
||||
|
||||
// Test question answering
|
||||
let context = "Rust is a systems programming language focused on safety and performance.";
|
||||
let question = "What is Rust?";
|
||||
let answer = text_processor.answer_question(context, question).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
168
gb-nlp/src/text.rs
Normal file
168
gb-nlp/src/text.rs
Normal file
|
@ -0,0 +1,168 @@
|
|||
use gb_core::{Result, Error};
|
||||
use rust_bert::pipelines::{
|
||||
sentiment::SentimentModel,
|
||||
ner::NERModel,
|
||||
summarization::SummarizationModel,
|
||||
question_answering::{QaModel, QuestionAnsweringModel},
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::{instrument, error};
|
||||
|
||||
pub struct TextProcessor {
|
||||
sentiment_model: Arc<Mutex<SentimentModel>>,
|
||||
ner_model: Arc<Mutex<NERModel>>,
|
||||
summarization_model: Arc<Mutex<SummarizationModel>>,
|
||||
qa_model: Arc<Mutex<QuestionAnsweringModel>>,
|
||||
}
|
||||
|
||||
impl TextProcessor {
|
||||
#[instrument]
|
||||
pub async fn new() -> Result<Self> {
|
||||
let sentiment_model = SentimentModel::new(Default::default())
|
||||
.map_err(|e| Error::Internal(format!("Failed to load sentiment model: {}", e)))?;
|
||||
|
||||
let ner_model = NERModel::new(Default::default())
|
||||
.map_err(|e| Error::Internal(format!("Failed to load NER model: {}", e)))?;
|
||||
|
||||
let summarization_model = SummarizationModel::new(Default::default())
|
||||
.map_err(|e| Error::Internal(format!("Failed to load summarization model: {}", e)))?;
|
||||
|
||||
let qa_model = QuestionAnsweringModel::new(Default::default())
|
||||
.map_err(|e| Error::Internal(format!("Failed to load QA model: {}", e)))?;
|
||||
|
||||
Ok(Self {
|
||||
sentiment_model: Arc::new(Mutex::new(sentiment_model)),
|
||||
ner_model: Arc::new(Mutex::new(ner_model)),
|
||||
summarization_model: Arc::new(Mutex::new(summarization_model)),
|
||||
qa_model: Arc::new(Mutex::new(qa_model)),
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self, text))]
|
||||
pub async fn analyze_sentiment(&self, text: &str) -> Result<Sentiment> {
|
||||
let model = self.sentiment_model.lock().await;
|
||||
let output = model.predict(&[text])
|
||||
.map_err(|e| Error::Internal(format!("Sentiment analysis failed: {}", e)))?;
|
||||
|
||||
Ok(Sentiment {
|
||||
score: output[0].score,
|
||||
label: output[0].label.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self, text))]
|
||||
pub async fn extract_entities(&self, text: &str) -> Result<Vec<Entity>> {
|
||||
let model = self.ner_model.lock().await;
|
||||
let output = model.predict(&[text])
|
||||
.map_err(|e| Error::Internal(format!("Entity extraction failed: {}", e)))?;
|
||||
|
||||
Ok(output[0].iter().map(|entity| Entity {
|
||||
text: entity.word.clone(),
|
||||
label: entity.entity.clone(),
|
||||
score: entity.score,
|
||||
}).collect())
|
||||
}
|
||||
|
||||
#[instrument(skip(self, text))]
|
||||
pub async fn summarize(&self, text: &str) -> Result<String> {
|
||||
let model = self.summarization_model.lock().await;
|
||||
let output = model.summarize(&[text])
|
||||
.map_err(|e| Error::Internal(format!("Summarization failed: {}", e)))?;
|
||||
|
||||
Ok(output[0].clone())
|
||||
}
|
||||
|
||||
#[instrument(skip(self, context, question))]
|
||||
pub async fn answer_question(&self, context: &str, question: &str) -> Result<Answer> {
|
||||
let model = self.qa_model.lock().await;
|
||||
let output = model.predict(&[QaModel {
|
||||
context,
|
||||
question,
|
||||
}])
|
||||
.map_err(|e| Error::Internal(format!("Question answering failed: {}", e)))?;
|
||||
|
||||
Ok(Answer {
|
||||
text: output[0].answer.clone(),
|
||||
score: output[0].score,
|
||||
start: output[0].start,
|
||||
end: output[0].end,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Sentiment {
|
||||
pub score: f64,
|
||||
pub label: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Entity {
|
||||
pub text: String,
|
||||
pub label: String,
|
||||
pub score: f64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Answer {
|
||||
pub text: String,
|
||||
pub score: f64,
|
||||
pub start: usize,
|
||||
pub end: usize,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rstest::*;
|
||||
|
||||
#[fixture]
|
||||
async fn processor() -> TextProcessor {
|
||||
TextProcessor::new().await.unwrap()
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_sentiment_analysis(processor: TextProcessor) -> Result<()> {
|
||||
let text = "I love this product! It's amazing!";
|
||||
let sentiment = processor.analyze_sentiment(text).await?;
|
||||
assert!(sentiment.score > 0.5);
|
||||
assert_eq!(sentiment.label, "positive");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_entity_extraction(processor: TextProcessor) -> Result<()> {
|
||||
let text = "John Smith works at Microsoft in Seattle.";
|
||||
let entities = processor.extract_entities(text).await?;
|
||||
|
||||
assert!(entities.iter().any(|e| e.text == "John Smith" && e.label == "PERSON"));
|
||||
assert!(entities.iter().any(|e| e.text == "Microsoft" && e.label == "ORG"));
|
||||
assert!(entities.iter().any(|e| e.text == "Seattle" && e.label == "LOC"));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_summarization(processor: TextProcessor) -> Result<()> {
|
||||
let text = "The quick brown fox jumps over the lazy dog. This is a classic pangram that contains every letter of the English alphabet. It has been used for typing practice and font displays for many years.";
|
||||
let summary = processor.summarize(text).await?;
|
||||
assert!(summary.len() < text.len());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_question_answering(processor: TextProcessor) -> Result<()> {
|
||||
let context = "The capital of France is Paris. It is known as the City of Light.";
|
||||
let question = "What is the capital of France?";
|
||||
|
||||
let answer = processor.answer_question(context, question).await?;
|
||||
assert_eq!(answer.text, "Paris");
|
||||
assert!(answer.score > 0.8);
|
||||
Ok(())
|
||||
}
|
||||
}
|
24
gb-storage/Cargo.toml
Normal file
24
gb-storage/Cargo.toml
Normal file
|
@ -0,0 +1,24 @@
|
|||
[package]
|
||||
name = "gb-storage"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
gb-core = { path = "../gb-core" }
|
||||
tokio.workspace = true
|
||||
sqlx.workspace = true
|
||||
redis.workspace = true
|
||||
tikv-client.workspace = true
|
||||
tracing.workspace = true
|
||||
async-trait.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
uuid.workspace = true
|
||||
chrono.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
rstest.workspace = true
|
||||
mockall.workspace = true
|
||||
tokio-test = "0.4"
|
66
gb-storage/src/lib.rs
Normal file
66
gb-storage/src/lib.rs
Normal file
|
@ -0,0 +1,66 @@
|
|||
pub mod postgres;
|
||||
pub mod redis;
|
||||
pub mod tikv;
|
||||
|
||||
pub use postgres::{PostgresCustomerRepository, PostgresInstanceRepository};
|
||||
pub use redis::RedisCache;
|
||||
pub use tikv::TiKVStorage;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use gb_core::models::Customer;
|
||||
use sqlx::postgres::PgPoolOptions;
|
||||
use std::time::Duration;
|
||||
|
||||
async fn setup_test_db() -> sqlx::PgPool {
|
||||
let database_url = std::env::var("DATABASE_URL")
|
||||
.unwrap_or_else(|_| "postgres://postgres:postgres@localhost/gb_test".to_string());
|
||||
|
||||
let pool = PgPoolOptions::new()
|
||||
.max_connections(5)
|
||||
.connect(&database_url)
|
||||
.await
|
||||
.expect("Failed to connect to database");
|
||||
|
||||
// Run migrations
|
||||
gb_migrations::run_migrations(&pool)
|
||||
.await
|
||||
.expect("Failed to run migrations");
|
||||
|
||||
pool
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_storage_integration() {
|
||||
// Setup PostgreSQL
|
||||
let pool = setup_test_db().await;
|
||||
let customer_repo = PostgresCustomerRepository::new(pool.clone());
|
||||
|
||||
// Setup Redis
|
||||
let redis_url = std::env::var("REDIS_URL")
|
||||
.unwrap_or_else(|_| "redis://127.0.0.1/".to_string());
|
||||
let cache = RedisCache::new(&redis_url, Duration::from_secs(60)).unwrap();
|
||||
|
||||
// Create a customer
|
||||
let customer = Customer::new(
|
||||
"Integration Test Corp".to_string(),
|
||||
"enterprise".to_string(),
|
||||
10,
|
||||
);
|
||||
|
||||
// Save to PostgreSQL
|
||||
let created = customer_repo.create(&customer).await.unwrap();
|
||||
|
||||
// Cache in Redis
|
||||
cache.set(&format!("customer:{}", created.id), &created).await.unwrap();
|
||||
|
||||
// Verify Redis cache
|
||||
let cached: Option<Customer> = cache.get(&format!("customer:{}", created.id)).await.unwrap();
|
||||
assert_eq!(cached.unwrap().id, created.id);
|
||||
|
||||
// Cleanup
|
||||
customer_repo.delete(created.id).await.unwrap();
|
||||
cache.delete(&format!("customer:{}", created.id)).await.unwrap();
|
||||
}
|
||||
}
|
302
gb-storage/src/postgres.rs
Normal file
302
gb-storage/src/postgres.rs
Normal file
|
@ -0,0 +1,302 @@
|
|||
use async_trait::async_trait;
|
||||
use gb_core::{
|
||||
models::*,
|
||||
traits::*,
|
||||
Result, Error,
|
||||
};
|
||||
use sqlx::PgPool;
|
||||
use uuid::Uuid;
|
||||
use tracing::{instrument, error};
|
||||
|
||||
pub struct PostgresCustomerRepository {
|
||||
pool: PgPool,
|
||||
}
|
||||
|
||||
impl PostgresCustomerRepository {
|
||||
pub fn new(pool: PgPool) -> Self {
|
||||
Self { pool }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl CustomerRepository for PostgresCustomerRepository {
|
||||
#[instrument(skip(self))]
|
||||
async fn create(&self, customer: &Customer) -> Result<Customer> {
|
||||
let record = sqlx::query_as!(
|
||||
Customer,
|
||||
r#"
|
||||
INSERT INTO customers (id, name, subscription_tier, status, max_instances, metadata, created_at)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||
RETURNING *
|
||||
"#,
|
||||
customer.id,
|
||||
customer.name,
|
||||
customer.subscription_tier,
|
||||
customer.status,
|
||||
customer.max_instances,
|
||||
customer.metadata as _,
|
||||
customer.created_at,
|
||||
)
|
||||
.fetch_one(&self.pool)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!("Failed to create customer: {}", e);
|
||||
Error::Database(e)
|
||||
})?;
|
||||
|
||||
Ok(record)
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn get(&self, id: Uuid) -> Result<Customer> {
|
||||
let record = sqlx::query_as!(
|
||||
Customer,
|
||||
r#"
|
||||
SELECT * FROM customers WHERE id = $1
|
||||
"#,
|
||||
id
|
||||
)
|
||||
.fetch_one(&self.pool)
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
sqlx::Error::RowNotFound => Error::NotFound(format!("Customer {} not found", id)),
|
||||
e => Error::Database(e),
|
||||
})?;
|
||||
|
||||
Ok(record)
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn update(&self, customer: &Customer) -> Result<Customer> {
|
||||
let record = sqlx::query_as!(
|
||||
Customer,
|
||||
r#"
|
||||
UPDATE customers
|
||||
SET name = $1, subscription_tier = $2, status = $3, max_instances = $4, metadata = $5
|
||||
WHERE id = $6
|
||||
RETURNING *
|
||||
"#,
|
||||
customer.name,
|
||||
customer.subscription_tier,
|
||||
customer.status,
|
||||
customer.max_instances,
|
||||
customer.metadata as _,
|
||||
customer.id
|
||||
)
|
||||
.fetch_one(&self.pool)
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
sqlx::Error::RowNotFound => Error::NotFound(format!("Customer {} not found", customer.id)),
|
||||
e => Error::Database(e),
|
||||
})?;
|
||||
|
||||
Ok(record)
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn delete(&self, id: Uuid) -> Result<()> {
|
||||
sqlx::query!(
|
||||
r#"
|
||||
DELETE FROM customers WHERE id = $1
|
||||
"#,
|
||||
id
|
||||
)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
sqlx::Error::RowNotFound => Error::NotFound(format!("Customer {} not found", id)),
|
||||
e => Error::Database(e),
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PostgresInstanceRepository {
|
||||
pool: PgPool,
|
||||
}
|
||||
|
||||
impl PostgresInstanceRepository {
|
||||
pub fn new(pool: PgPool) -> Self {
|
||||
Self { pool }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl InstanceRepository for PostgresInstanceRepository {
|
||||
#[instrument(skip(self))]
|
||||
async fn create(&self, instance: &Instance) -> Result<Instance> {
|
||||
let record = sqlx::query_as!(
|
||||
Instance,
|
||||
r#"
|
||||
INSERT INTO instances (id, customer_id, name, status, shard_id, region, config, created_at)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
RETURNING *
|
||||
"#,
|
||||
instance.id,
|
||||
instance.customer_id,
|
||||
instance.name,
|
||||
instance.status,
|
||||
instance.shard_id,
|
||||
instance.region,
|
||||
instance.config as _,
|
||||
instance.created_at,
|
||||
)
|
||||
.fetch_one(&self.pool)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!("Failed to create instance: {}", e);
|
||||
Error::Database(e)
|
||||
})?;
|
||||
|
||||
Ok(record)
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn get(&self, id: Uuid) -> Result<Instance> {
|
||||
let record = sqlx::query_as!(
|
||||
Instance,
|
||||
r#"
|
||||
SELECT * FROM instances WHERE id = $1
|
||||
"#,
|
||||
id
|
||||
)
|
||||
.fetch_one(&self.pool)
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
sqlx::Error::RowNotFound => Error::NotFound(format!("Instance {} not found", id)),
|
||||
e => Error::Database(e),
|
||||
})?;
|
||||
|
||||
Ok(record)
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn get_by_customer(&self, customer_id: Uuid) -> Result<Vec<Instance>> {
|
||||
let records = sqlx::query_as!(
|
||||
Instance,
|
||||
r#"
|
||||
SELECT * FROM instances WHERE customer_id = $1
|
||||
"#,
|
||||
customer_id
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await
|
||||
.map_err(Error::Database)?;
|
||||
|
||||
Ok(records)
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn get_by_shard(&self, shard_id: i32) -> Result<Vec<Instance>> {
|
||||
let records = sqlx::query_as!(
|
||||
Instance,
|
||||
r#"
|
||||
SELECT * FROM instances WHERE shard_id = $1
|
||||
"#,
|
||||
shard_id
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await
|
||||
.map_err(Error::Database)?;
|
||||
|
||||
Ok(records)
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn update(&self, instance: &Instance) -> Result<Instance> {
|
||||
let record = sqlx::query_as!(
|
||||
Instance,
|
||||
r#"
|
||||
UPDATE instances
|
||||
SET name = $1, status = $2, shard_id = $3, region = $4, config = $5
|
||||
WHERE id = $6
|
||||
RETURNING *
|
||||
"#,
|
||||
instance.name,
|
||||
instance.status,
|
||||
instance.shard_id,
|
||||
instance.region,
|
||||
instance.config as _,
|
||||
instance.id
|
||||
)
|
||||
.fetch_one(&self.pool)
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
sqlx::Error::RowNotFound => Error::NotFound(format!("Instance {} not found", instance.id)),
|
||||
e => Error::Database(e),
|
||||
})?;
|
||||
|
||||
Ok(record)
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn delete(&self, id: Uuid) -> Result<()> {
|
||||
sqlx::query!(
|
||||
r#"
|
||||
DELETE FROM instances WHERE id = $1
|
||||
"#,
|
||||
id
|
||||
)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
sqlx::Error::RowNotFound => Error::NotFound(format!("Instance {} not found", id)),
|
||||
e => Error::Database(e),
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rstest::*;
|
||||
use sqlx::postgres::PgPoolOptions;
|
||||
|
||||
async fn create_test_pool() -> PgPool {
|
||||
let database_url = std::env::var("DATABASE_URL")
|
||||
.unwrap_or_else(|_| "postgres://postgres:postgres@localhost/gb_test".to_string());
|
||||
|
||||
PgPoolOptions::new()
|
||||
.max_connections(5)
|
||||
.connect(&database_url)
|
||||
.await
|
||||
.expect("Failed to create test pool")
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
fn customer() -> Customer {
|
||||
Customer::new(
|
||||
"Test Corp".to_string(),
|
||||
"enterprise".to_string(),
|
||||
10,
|
||||
)
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_customer_crud(customer: Customer) {
|
||||
let pool = create_test_pool().await;
|
||||
let repo = PostgresCustomerRepository::new(pool);
|
||||
|
||||
// Create
|
||||
let created = repo.create(&customer).await.unwrap();
|
||||
assert_eq!(created.name, customer.name);
|
||||
|
||||
// Get
|
||||
let retrieved = repo.get(created.id).await.unwrap();
|
||||
assert_eq!(retrieved.id, created.id);
|
||||
|
||||
// Update
|
||||
let mut updated = retrieved.clone();
|
||||
updated.name = "Updated Corp".to_string();
|
||||
let updated = repo.update(&updated).await.unwrap();
|
||||
assert_eq!(updated.name, "Updated Corp");
|
||||
|
||||
// Delete
|
||||
repo.delete(updated.id).await.unwrap();
|
||||
assert!(repo.get(updated.id).await.is_err());
|
||||
}
|
||||
}
|
154
gb-storage/src/redis.rs
Normal file
154
gb-storage/src/redis.rs
Normal file
|
@ -0,0 +1,154 @@
|
|||
use async_trait::async_trait;
|
||||
use gb_core::{Result, Error};
|
||||
use redis::{AsyncCommands, Client};
|
||||
use serde::{de::DeserializeOwned, Serialize};
|
||||
use std::time::Duration;
|
||||
use tracing::{instrument, error};
|
||||
|
||||
pub struct RedisCache {
|
||||
client: Client,
|
||||
default_ttl: Duration,
|
||||
}
|
||||
|
||||
impl RedisCache {
|
||||
pub fn new(url: &str, default_ttl: Duration) -> Result<Self> {
|
||||
let client = Client::open(url).map_err(|e| Error::Redis(e))?;
|
||||
Ok(Self {
|
||||
client,
|
||||
default_ttl,
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self, value))]
|
||||
pub async fn set<T: Serialize>(&self, key: &str, value: &T) -> Result<()> {
|
||||
let mut conn = self.client.get_async_connection()
|
||||
.await
|
||||
.map_err(Error::Redis)?;
|
||||
|
||||
let serialized = serde_json::to_string(value)
|
||||
.map_err(|e| Error::Internal(format!("Serialization error: {}", e)))?;
|
||||
|
||||
conn.set_ex(key, serialized, self.default_ttl.as_secs() as usize)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!("Redis set error: {}", e);
|
||||
Error::Redis(e)
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn get<T: DeserializeOwned>(&self, key: &str) -> Result<Option<T>> {
|
||||
let mut conn = self.client.get_async_connection()
|
||||
.await
|
||||
.map_err(Error::Redis)?;
|
||||
|
||||
let value: Option<String> = conn.get(key)
|
||||
.await
|
||||
.map_err(Error::Redis)?;
|
||||
|
||||
match value {
|
||||
Some(v) => {
|
||||
let deserialized = serde_json::from_str(&v)
|
||||
.map_err(|e| Error::Internal(format!("Deserialization error: {}", e)))?;
|
||||
Ok(Some(deserialized))
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn delete(&self, key: &str) -> Result<()> {
|
||||
let mut conn = self.client.get_async_connection()
|
||||
.await
|
||||
.map_err(Error::Redis)?;
|
||||
|
||||
conn.del(key)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!("Redis delete error: {}", e);
|
||||
Error::Redis(e)
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn increment(&self, key: &str) -> Result<i64> {
|
||||
let mut conn = self.client.get_async_connection()
|
||||
.await
|
||||
.map_err(Error::Redis)?;
|
||||
|
||||
conn.incr(key, 1)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!("Redis increment error: {}", e);
|
||||
Error::Redis(e)
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn set_with_ttl<T: Serialize>(&self, key: &str, value: &T, ttl: Duration) -> Result<()> {
|
||||
let mut conn = self.client.get_async_connection()
|
||||
.await
|
||||
.map_err(Error::Redis)?;
|
||||
|
||||
let serialized = serde_json::to_string(value)
|
||||
.map_err(|e| Error::Internal(format!("Serialization error: {}", e)))?;
|
||||
|
||||
conn.set_ex(key, serialized, ttl.as_secs() as usize)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!("Redis set error: {}", e);
|
||||
Error::Redis(e)
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
struct TestStruct {
|
||||
field: String,
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_redis_cache() {
|
||||
let redis_url = std::env::var("REDIS_URL")
|
||||
.unwrap_or_else(|_| "redis://127.0.0.1/".to_string());
|
||||
|
||||
let cache = RedisCache::new(&redis_url, Duration::from_secs(60)).unwrap();
|
||||
|
||||
// Test set and get
|
||||
let test_value = TestStruct {
|
||||
field: "test".to_string(),
|
||||
};
|
||||
|
||||
cache.set("test_key", &test_value).await.unwrap();
|
||||
let retrieved: Option<TestStruct> = cache.get("test_key").await.unwrap();
|
||||
assert_eq!(retrieved.unwrap(), test_value);
|
||||
|
||||
// Test delete
|
||||
cache.delete("test_key").await.unwrap();
|
||||
let deleted: Option<TestStruct> = cache.get("test_key").await.unwrap();
|
||||
assert!(deleted.is_none());
|
||||
|
||||
// Test increment
|
||||
cache.set("counter", &0).await.unwrap();
|
||||
let count = cache.increment("counter").await.unwrap();
|
||||
assert_eq!(count, 1);
|
||||
|
||||
// Test TTL
|
||||
cache.set_with_ttl("ttl_key", &test_value, Duration::from_secs(1)).await.unwrap();
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
let expired: Option<TestStruct> = cache.get("ttl_key").await.unwrap();
|
||||
assert!(expired.is_none());
|
||||
}
|
||||
}
|
128
gb-storage/src/tikv.rs
Normal file
128
gb-storage/src/tikv.rs
Normal file
|
@ -0,0 +1,128 @@
|
|||
use async_trait::async_trait;
|
||||
use gb_core::{Result, Error};
|
||||
use tikv_client::{Config, RawClient, Value};
|
||||
use tracing::{instrument, error};
|
||||
|
||||
pub struct TiKVStorage {
|
||||
client: RawClient,
|
||||
}
|
||||
|
||||
impl TiKVStorage {
|
||||
pub async fn new(pd_endpoints: Vec<String>) -> Result<Self> {
|
||||
let config = Config::default();
|
||||
let client = RawClient::new(pd_endpoints, config)
|
||||
.await
|
||||
.map_err(|e| Error::Internal(format!("TiKV client error: {}", e)))?;
|
||||
|
||||
Ok(Self { client })
|
||||
}
|
||||
|
||||
#[instrument(skip(self, value))]
|
||||
pub async fn put(&self, key: &[u8], value: Value) -> Result<()> {
|
||||
self.client
|
||||
.put(key.to_vec(), value)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!("TiKV put error: {}", e);
|
||||
Error::Internal(format!("TiKV error: {}", e))
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn get(&self, key: &[u8]) -> Result<Option<Value>> {
|
||||
self.client
|
||||
.get(key.to_vec())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!("TiKV get error: {}", e);
|
||||
Error::Internal(format!("TiKV error: {}", e))
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn delete(&self, key: &[u8]) -> Result<()> {
|
||||
self.client
|
||||
.delete(key.to_vec())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!("TiKV delete error: {}", e);
|
||||
Error::Internal(format!("TiKV error: {}", e))
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn batch_get(&self, keys: Vec<Vec<u8>>) -> Result<Vec<KVPair>> {
|
||||
self.client
|
||||
.batch_get(keys)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!("TiKV batch get error: {}", e);
|
||||
Error::Internal(format!("TiKV error: {}", e))
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn scan(&self, start: &[u8], end: &[u8], limit: u32) -> Result<Vec<KVPair>> {
|
||||
self.client
|
||||
.scan(start.to_vec()..end.to_vec(), limit)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!("TiKV scan error: {}", e);
|
||||
Error::Internal(format!("TiKV error: {}", e))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct KVPair {
|
||||
pub key: Vec<u8>,
|
||||
pub value: Value,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tikv_client::Value;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_tikv_storage() {
|
||||
let pd_endpoints = vec!["127.0.0.1:2379".to_string()];
|
||||
let storage = TiKVStorage::new(pd_endpoints).await.unwrap();
|
||||
|
||||
// Test put and get
|
||||
let key = b"test_key";
|
||||
let value = Value::from(b"test_value".to_vec());
|
||||
storage.put(key, value.clone()).await.unwrap();
|
||||
|
||||
let retrieved = storage.get(key).await.unwrap();
|
||||
assert_eq!(retrieved.unwrap(), value);
|
||||
|
||||
// Test delete
|
||||
storage.delete(key).await.unwrap();
|
||||
let deleted = storage.get(key).await.unwrap();
|
||||
assert!(deleted.is_none());
|
||||
|
||||
// Test batch operations
|
||||
let pairs = vec![
|
||||
(b"key1".to_vec(), Value::from(b"value1".to_vec())),
|
||||
(b"key2".to_vec(), Value::from(b"value2".to_vec())),
|
||||
];
|
||||
|
||||
for (key, value) in pairs.clone() {
|
||||
storage.put(&key, value).await.unwrap();
|
||||
}
|
||||
|
||||
let keys: Vec<Vec<u8>> = pairs.iter().map(|(k, _)| k.clone()).collect();
|
||||
let retrieved = storage.batch_get(keys).await.unwrap();
|
||||
assert_eq!(retrieved.len(), pairs.len());
|
||||
|
||||
// Test scan
|
||||
let scanned = storage.scan(b"key", b"key3", 10).await.unwrap();
|
||||
assert_eq!(scanned.len(), 2);
|
||||
|
||||
// Cleanup
|
||||
for (key, _) in pairs {
|
||||
storage.delete(&key).await.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
56
gb-testing/Cargo.toml
Normal file
56
gb-testing/Cargo.toml
Normal file
|
@ -0,0 +1,56 @@
|
|||
[package]
|
||||
name = "gb-testing"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
gb-core = { path = "../gb-core" }
|
||||
gb-auth = { path = "../gb-auth" }
|
||||
gb-api = { path = "../gb-api" }
|
||||
|
||||
# Testing frameworks
|
||||
goose = "0.17" # Load testing
|
||||
criterion = { version = "0.5", features = ["async_futures"] }
|
||||
testcontainers = "0.14"
|
||||
k8s-openapi = { version = "0.18", features = ["v1_26"] }
|
||||
kube = { version = "0.82", features = ["runtime", "derive"] }
|
||||
|
||||
# Async Runtime
|
||||
tokio.workspace = true
|
||||
async-trait.workspace = true
|
||||
|
||||
# HTTP Client
|
||||
reqwest = { version = "0.11", features = ["json", "stream"] }
|
||||
hyper = { version = "1.0", features = ["full"] }
|
||||
|
||||
# WebSocket Testing
|
||||
tokio-tungstenite = "0.20"
|
||||
tungstenite = "0.20"
|
||||
|
||||
# Database
|
||||
sqlx.workspace = true
|
||||
redis.workspace = true
|
||||
|
||||
# Metrics & Monitoring
|
||||
prometheus = { version = "0.13", features = ["process"] }
|
||||
tracing.workspace = true
|
||||
opentelemetry.workspace = true
|
||||
|
||||
# Serialization
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
|
||||
# Utils
|
||||
futures = "0.3"
|
||||
rand = "0.8"
|
||||
fake = { version = "2.9", features = ["derive"] }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
uuid = { version = "1.6", features = ["v4"] }
|
||||
|
||||
[dev-dependencies]
|
||||
rstest = "0.18"
|
||||
wiremock = "0.5"
|
||||
assert_cmd = "2.0"
|
||||
predicates = "3.0"
|
57
gb-testing/config/test_config.yaml
Normal file
57
gb-testing/config/test_config.yaml
Normal file
|
@ -0,0 +1,57 @@
|
|||
load_test:
|
||||
users: 100
|
||||
duration: 300 # seconds
|
||||
ramp_up: 60 # seconds
|
||||
scenarios:
|
||||
- auth
|
||||
- api
|
||||
- webrtc
|
||||
|
||||
performance_test:
|
||||
iterations: 1000
|
||||
|
||||
performance_test:
|
||||
iterations: 1000
|
||||
warmup_iterations: 100
|
||||
sample_size: 100
|
||||
threads: 8
|
||||
scenarios:
|
||||
- api_latency
|
||||
- database_queries
|
||||
- media_processing
|
||||
|
||||
stress_test:
|
||||
duration: 1800 # 30 minutes
|
||||
concurrent_users: 1000
|
||||
scenarios:
|
||||
- continuous_requests
|
||||
- websocket_connections
|
||||
- file_uploads
|
||||
|
||||
chaos_test:
|
||||
duration: 3600 # 1 hour
|
||||
interval: 300 # 5 minutes between actions
|
||||
actions:
|
||||
- kill_random_pod
|
||||
- network_partition
|
||||
- resource_exhaustion
|
||||
- disk_pressure
|
||||
|
||||
metrics:
|
||||
prometheus:
|
||||
enabled: true
|
||||
port: 9090
|
||||
grafana:
|
||||
enabled: true
|
||||
port: 3000
|
||||
jaeger:
|
||||
enabled: true
|
||||
port: 16686
|
||||
|
||||
reports:
|
||||
formats:
|
||||
- json
|
||||
- html
|
||||
- pdf
|
||||
output_dir: "./test-reports"
|
||||
retain_days: 30
|
26
gb-testing/run_tests.sh
Normal file
26
gb-testing/run_tests.sh
Normal file
|
@ -0,0 +1,26 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "Running gb-testing test suite..."
|
||||
|
||||
# Run integration tests
|
||||
echo "Running integration tests..."
|
||||
cargo test --test '*' --features integration
|
||||
|
||||
# Run load tests
|
||||
echo "Running load tests..."
|
||||
cargo test --test '*' --features load
|
||||
|
||||
# Run performance benchmarks
|
||||
echo "Running performance benchmarks..."
|
||||
cargo bench
|
||||
|
||||
# Run stress tests
|
||||
echo "Running stress tests..."
|
||||
cargo test --test '*' --features stress
|
||||
|
||||
# Run chaos tests
|
||||
echo "Running chaos tests..."
|
||||
cargo test --test '*' --features chaos
|
||||
|
||||
echo "All tests completed!"
|
41
gb-testing/src/chaos/mod.rs
Normal file
41
gb-testing/src/chaos/mod.rs
Normal file
|
@ -0,0 +1,41 @@
|
|||
use kube::{
|
||||
api::{Api, DeleteParams, PostParams},
|
||||
Client,
|
||||
};
|
||||
use k8s_openapi::api::core::v1::Pod;
|
||||
use rand::seq::SliceRandom;
|
||||
|
||||
pub struct ChaosTest {
|
||||
client: Client,
|
||||
namespace: String,
|
||||
}
|
||||
|
||||
impl ChaosTest {
|
||||
pub async fn new(namespace: String) -> anyhow::Result<Self> {
|
||||
let client = Client::try_default().await?;
|
||||
Ok(Self { client, namespace })
|
||||
}
|
||||
|
||||
pub async fn kill_random_pod(&self) -> anyhow::Result<()> {
|
||||
let pods: Api<Pod> = Api::namespaced(self.client.clone(), &self.namespace);
|
||||
let pod_list = pods.list(&Default::default()).await?;
|
||||
|
||||
if let Some(pod) = pod_list.items.choose(&mut rand::thread_rng()) {
|
||||
if let Some(name) = &pod.metadata.name {
|
||||
pods.delete(name, &DeleteParams::default()).await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn network_partition(&self) -> anyhow::Result<()> {
|
||||
// Network partition test implementation
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn resource_exhaustion(&self) -> anyhow::Result<()> {
|
||||
// Resource exhaustion test implementation
|
||||
Ok(())
|
||||
}
|
||||
}
|
42
gb-testing/src/integration/mod.rs
Normal file
42
gb-testing/src/integration/mod.rs
Normal file
|
@ -0,0 +1,42 @@
|
|||
use async_trait::async_trait;
|
||||
use sqlx::PgPool;
|
||||
use testcontainers::clients::Cli;
|
||||
|
||||
pub struct IntegrationTest {
|
||||
pub docker: Cli,
|
||||
pub db_pool: PgPool,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait IntegrationTestCase {
|
||||
async fn setup(&mut self) -> anyhow::Result<()>;
|
||||
async fn execute(&self) -> anyhow::Result<()>;
|
||||
async fn teardown(&mut self) -> anyhow::Result<()>;
|
||||
}
|
||||
|
||||
//pub struct TestEnvironment {
|
||||
//pub postgres: testcontainers::Container<'static, testcontainers::images::postgres::Postgres>,
|
||||
//pub redis: testcontainers::Container<'static, testcontainers::images::redis::Redis>,
|
||||
// pub kafka: testcontainers::Container<'static, testcontainers::images::kafka::Kafka>,
|
||||
//
|
||||
|
||||
impl TestEnvironment {
|
||||
pub async fn new() -> anyhow::Result<Self> {
|
||||
let docker = Cli::default();
|
||||
|
||||
// Start PostgreSQL
|
||||
let postgres = docker.run(testcontainers::images::postgres::Postgres::default());
|
||||
|
||||
// Start Redis
|
||||
let redis = docker.run(testcontainers::images::redis::Redis::default());
|
||||
|
||||
// Start Kafka
|
||||
let kafka = docker.run(testcontainers::images::kafka::Kafka::default());
|
||||
|
||||
Ok(Self {
|
||||
postgres,
|
||||
redis,
|
||||
kafka,
|
||||
})
|
||||
}
|
||||
}
|
13
gb-testing/src/lib.rs
Normal file
13
gb-testing/src/lib.rs
Normal file
|
@ -0,0 +1,13 @@
|
|||
pub mod integration;
|
||||
pub mod load;
|
||||
pub mod performance;
|
||||
pub mod stress;
|
||||
pub mod chaos;
|
||||
pub mod scenarios;
|
||||
pub mod utils;
|
||||
pub mod metrics;
|
||||
pub mod reports;
|
||||
|
||||
pub use scenarios::TestScenario;
|
||||
pub use metrics::TestMetrics;
|
||||
pub use reports::TestReport;
|
141
gb-testing/src/load/mod.rs
Normal file
141
gb-testing/src/load/mod.rs
Normal file
|
@ -0,0 +1,141 @@
|
|||
use goose::prelude::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct LoadTestConfig {
|
||||
pub users: usize,
|
||||
pub duration: std::time::Duration,
|
||||
pub ramp_up: std::time::Duration,
|
||||
pub scenarios: Vec<String>,
|
||||
}
|
||||
|
||||
pub struct LoadTest {
|
||||
pub config: LoadTestConfig,
|
||||
pub metrics: crate::metrics::TestMetrics,
|
||||
}
|
||||
|
||||
impl LoadTest {
|
||||
pub fn new(config: LoadTestConfig) -> Self {
|
||||
Self {
|
||||
config,
|
||||
metrics: crate::metrics::TestMetrics::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run(&self) -> anyhow::Result<crate::reports::TestReport> {
|
||||
let mut goose = GooseAttack::initialize()?;
|
||||
|
||||
goose
|
||||
.set_default_host("http://localhost:8080")?
|
||||
.set_users(self.config.users)?
|
||||
.set_startup_time(self.config.ramp_up)?
|
||||
.set_run_time(self.config.duration)?;
|
||||
|
||||
for scenario in &self.config.scenarios {
|
||||
match scenario.as_str() {
|
||||
"auth" => goose.register_scenario(auth_scenario()),
|
||||
"api" => goose.register_scenario(api_scenario()),
|
||||
"webrtc" => goose.register_scenario(webrtc_scenario()),
|
||||
_ => continue,
|
||||
}?;
|
||||
}
|
||||
|
||||
let metrics = goose.execute().await?;
|
||||
Ok(crate::reports::TestReport::from(metrics))
|
||||
}
|
||||
}
|
||||
|
||||
fn auth_scenario() -> Scenario {
|
||||
scenario!("Authentication")
|
||||
.register_transaction(transaction!(login))
|
||||
.register_transaction(transaction!(logout))
|
||||
}
|
||||
|
||||
async fn login(user: &mut GooseUser) -> TransactionResult {
|
||||
let payload = serde_json::json!({
|
||||
"email": "test@example.com",
|
||||
"password": "password123"
|
||||
});
|
||||
|
||||
let _response = user
|
||||
.post_json("/auth/login", &payload)
|
||||
.await?
|
||||
.response?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn logout(user: &mut GooseUser) -> TransactionResult {
|
||||
let _response = user
|
||||
.post("/auth/logout")
|
||||
.await?
|
||||
.response?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn api_scenario() -> Scenario {
|
||||
scenario!("API")
|
||||
.register_transaction(transaction!(create_instance))
|
||||
.register_transaction(transaction!(list_instances))
|
||||
}
|
||||
|
||||
async fn create_instance(user: &mut GooseUser) -> TransactionResult {
|
||||
let payload = serde_json::json!({
|
||||
"name": "test-instance",
|
||||
"config": {
|
||||
"memory": "512Mi",
|
||||
"cpu": "0.5"
|
||||
}
|
||||
});
|
||||
|
||||
let _response = user
|
||||
.post_json("/api/instances", &payload)
|
||||
.await?
|
||||
.response?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_instances(user: &mut GooseUser) -> TransactionResult {
|
||||
let _response = user
|
||||
.get("/api/instances")
|
||||
.await?
|
||||
.response?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn webrtc_scenario() -> Scenario {
|
||||
scenario!("WebRTC")
|
||||
.register_transaction(transaction!(join_room))
|
||||
.register_transaction(transaction!(send_message))
|
||||
}
|
||||
|
||||
async fn join_room(user: &mut GooseUser) -> TransactionResult {
|
||||
let payload = serde_json::json!({
|
||||
"room_id": "test-room",
|
||||
"user_id": "test-user"
|
||||
});
|
||||
|
||||
let _response = user
|
||||
.post_json("/webrtc/rooms/join", &payload)
|
||||
.await?
|
||||
.response?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn send_message(user: &mut GooseUser) -> TransactionResult {
|
||||
let payload = serde_json::json!({
|
||||
"room_id": "test-room",
|
||||
"message": "test message"
|
||||
});
|
||||
|
||||
let _response = user
|
||||
.post_json("/webrtc/messages", &payload)
|
||||
.await?
|
||||
.response?;
|
||||
|
||||
Ok(())
|
||||
}
|
52
gb-testing/src/metrics/mod.rs
Normal file
52
gb-testing/src/metrics/mod.rs
Normal file
|
@ -0,0 +1,52 @@
|
|||
use prometheus::{Registry, Counter, Gauge, Histogram, HistogramOpts};
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct TestMetrics {
|
||||
registry: Registry,
|
||||
request_count: Counter,
|
||||
error_count: Counter,
|
||||
response_time: Histogram,
|
||||
active_users: Gauge,
|
||||
}
|
||||
|
||||
impl TestMetrics {
|
||||
pub fn new() -> Self {
|
||||
let registry = Registry::new();
|
||||
|
||||
let request_count = Counter::new("test_requests_total", "Total number of requests").unwrap();
|
||||
let error_count = Counter::new("test_errors_total", "Total number of errors").unwrap();
|
||||
let response_time = Histogram::with_opts(
|
||||
HistogramOpts::new("test_response_time", "Response time in seconds")
|
||||
).unwrap();
|
||||
let active_users = Gauge::new("test_active_users", "Number of active users").unwrap();
|
||||
|
||||
registry.register(Box::new(request_count.clone())).unwrap();
|
||||
registry.register(Box::new(error_count.clone())).unwrap();
|
||||
registry.register(Box::new(response_time.clone())).unwrap();
|
||||
registry.register(Box::new(active_users.clone())).unwrap();
|
||||
|
||||
Self {
|
||||
registry,
|
||||
request_count,
|
||||
error_count,
|
||||
response_time,
|
||||
active_users,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn increment_requests(&self) {
|
||||
self.request_count.inc();
|
||||
}
|
||||
|
||||
pub fn increment_errors(&self) {
|
||||
self.error_count.inc();
|
||||
}
|
||||
|
||||
pub fn observe_response_time(&self, duration: f64) {
|
||||
self.response_time.observe(duration);
|
||||
}
|
||||
|
||||
pub fn set_active_users(&self, count: i64) {
|
||||
self.active_users.set(count as f64);
|
||||
}
|
||||
}
|
29
gb-testing/src/performance/mod.rs
Normal file
29
gb-testing/src/performance/mod.rs
Normal file
|
@ -0,0 +1,29 @@
|
|||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
use tokio::runtime::Runtime;
|
||||
|
||||
pub fn benchmark_api(c: &mut Criterion) {
|
||||
let rt = Runtime::new().unwrap();
|
||||
|
||||
c.bench_function("api_latency", |b| {
|
||||
b.iter(|| {
|
||||
rt.block_on(async {
|
||||
// API latency test implementation
|
||||
})
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
pub fn benchmark_database(c: &mut Criterion) {
|
||||
let rt = Runtime::new().unwrap();
|
||||
|
||||
c.bench_function("db_query_performance", |b| {
|
||||
b.iter(|| {
|
||||
rt.block_on(async {
|
||||
// Database query performance test implementation
|
||||
})
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, benchmark_api, benchmark_database);
|
||||
criterion_main!(benches);
|
63
gb-testing/src/reports/mod.rs
Normal file
63
gb-testing/src/reports/mod.rs
Normal file
|
@ -0,0 +1,63 @@
|
|||
use serde::Serialize;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct TestReport {
|
||||
pub timestamp: SystemTime,
|
||||
pub duration: Duration,
|
||||
pub total_requests: u64,
|
||||
pub successful_requests: u64,
|
||||
pub failed_requests: u64,
|
||||
pub avg_response_time: f64,
|
||||
pub percentiles: Percentiles,
|
||||
pub errors: Vec<TestError>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct Percentiles {
|
||||
pub p50: f64,
|
||||
pub p90: f64,
|
||||
pub p95: f64,
|
||||
pub p99: f64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct TestError {
|
||||
pub error_type: String,
|
||||
pub message: String,
|
||||
pub count: u64,
|
||||
}
|
||||
|
||||
impl TestReport {
|
||||
pub fn new(
|
||||
duration: Duration,
|
||||
total_requests: u64,
|
||||
successful_requests: u64,
|
||||
failed_requests: u64,
|
||||
avg_response_time: f64,
|
||||
percentiles: Percentiles,
|
||||
errors: Vec<TestError>,
|
||||
) -> Self {
|
||||
Self {
|
||||
timestamp: SystemTime::now(),
|
||||
duration,
|
||||
total_requests,
|
||||
successful_requests,
|
||||
failed_requests,
|
||||
avg_response_time,
|
||||
percentiles,
|
||||
errors,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn save_json(&self, path: &str) -> anyhow::Result<()> {
|
||||
let json = serde_json::to_string_pretty(self)?;
|
||||
std::fs::write(path, json)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn save_html(&self, path: &str) -> anyhow::Result<()> {
|
||||
// HTML report generation implementation
|
||||
Ok(())
|
||||
}
|
||||
}
|
31
gb-testing/src/scenarios/mod.rs
Normal file
31
gb-testing/src/scenarios/mod.rs
Normal file
|
@ -0,0 +1,31 @@
|
|||
use async_trait::async_trait;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::Duration;
|
||||
|
||||
#[async_trait]
|
||||
pub trait TestScenario {
|
||||
async fn setup(&mut self) -> anyhow::Result<()>;
|
||||
async fn execute(&self) -> anyhow::Result<()>;
|
||||
async fn teardown(&mut self) -> anyhow::Result<()>;
|
||||
fn name(&self) -> &'static str;
|
||||
fn description(&self) -> &'static str;
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ScenarioResult {
|
||||
pub name: String,
|
||||
pub success: bool,
|
||||
pub duration: Duration,
|
||||
pub error: Option<String>,
|
||||
pub metrics: ScenarioMetrics,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ScenarioMetrics {
|
||||
pub requests: u64,
|
||||
pub failures: u64,
|
||||
pub avg_response_time: f64,
|
||||
pub max_response_time: f64,
|
||||
pub min_response_time: f64,
|
||||
pub throughput: f64,
|
||||
}
|
38
gb-testing/src/stress/mod.rs
Normal file
38
gb-testing/src/stress/mod.rs
Normal file
|
@ -0,0 +1,38 @@
|
|||
use std::time::Duration;
|
||||
use tokio::time;
|
||||
|
||||
pub struct StressTest {
|
||||
duration: Duration,
|
||||
concurrent_users: usize,
|
||||
}
|
||||
|
||||
impl StressTest {
|
||||
pub fn new(duration: Duration, concurrent_users: usize) -> Self {
|
||||
Self {
|
||||
duration,
|
||||
concurrent_users,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run(&self) -> anyhow::Result<()> {
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
while start.elapsed() < self.duration {
|
||||
let mut handles = Vec::new();
|
||||
|
||||
for _ in 0..self.concurrent_users {
|
||||
handles.push(tokio::spawn(async {
|
||||
// Stress test implementation
|
||||
}));
|
||||
}
|
||||
|
||||
for handle in handles {
|
||||
handle.await?;
|
||||
}
|
||||
|
||||
time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
49
gb-testing/src/utils/mod.rs
Normal file
49
gb-testing/src/utils/mod.rs
Normal file
|
@ -0,0 +1,49 @@
|
|||
use rand::{distributions::Alphanumeric, Rng};
|
||||
use std::time::Duration;
|
||||
|
||||
pub fn generate_random_string(length: usize) -> String {
|
||||
rand::thread_rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(length)
|
||||
.map(char::from)
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn generate_test_data(size: usize) -> Vec<u8> {
|
||||
(0..size).map(|_| rand::random::<u8>()).collect()
|
||||
}
|
||||
|
||||
pub async fn exponential_backoff<F, Fut, T>(
|
||||
mut operation: F,
|
||||
max_retries: u32,
|
||||
initial_delay: Duration,
|
||||
) -> anyhow::Result<T>
|
||||
where
|
||||
F: FnMut() -> Fut,
|
||||
Fut: std::future::Future<Output = anyhow::Result<T>>,
|
||||
{
|
||||
let mut retries = 0;
|
||||
let mut delay = initial_delay;
|
||||
|
||||
loop {
|
||||
match operation().await {
|
||||
Ok(value) => return Ok(value),
|
||||
Err(error) => {
|
||||
if retries >= max_retries {
|
||||
return Err(error);
|
||||
}
|
||||
tokio::time::sleep(delay).await;
|
||||
delay *= 2;
|
||||
retries += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn format_duration(duration: Duration) -> String {
|
||||
let total_seconds = duration.as_secs();
|
||||
let hours = total_seconds / 3600;
|
||||
let minutes = (total_seconds % 3600) / 60;
|
||||
let seconds = total_seconds % 60;
|
||||
format!("{:02}:{:02}:{:02}", hours, minutes, seconds)
|
||||
}
|
12
gb-testing/tests/chaos/kubernetes_test.rs
Normal file
12
gb-testing/tests/chaos/kubernetes_test.rs
Normal file
|
@ -0,0 +1,12 @@
|
|||
use gb_testing::chaos::ChaosTest;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_kubernetes_chaos() -> anyhow::Result<()> {
|
||||
let chaos_test = ChaosTest::new("general-bots".to_string()).await?;
|
||||
|
||||
chaos_test.kill_random_pod().await?;
|
||||
chaos_test.network_partition().await?;
|
||||
chaos_test.resource_exhaustion().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
41
gb-testing/tests/integration/api_test.rs
Normal file
41
gb-testing/tests/integration/api_test.rs
Normal file
|
@ -0,0 +1,41 @@
|
|||
use gb_testing::integration::{IntegrationTest, IntegrationTestCase};
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
|
||||
struct ApiTest {
|
||||
test: IntegrationTest,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl IntegrationTestCase for ApiTest {
|
||||
async fn setup(&mut self) -> Result<()> {
|
||||
// Setup test environment
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn execute(&self) -> Result<()> {
|
||||
// Test API endpoints
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn teardown(&mut self) -> Result<()> {
|
||||
// Cleanup test environment
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_api_integration() -> Result<()> {
|
||||
let mut test = ApiTest {
|
||||
test: IntegrationTest {
|
||||
docker: testcontainers::clients::Cli::default(),
|
||||
db_pool: sqlx::PgPool::connect("postgres://postgres:postgres@localhost:5432/test").await?,
|
||||
},
|
||||
};
|
||||
|
||||
test.setup().await?;
|
||||
test.execute().await?;
|
||||
test.teardown().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
20
gb-testing/tests/load/auth_test.rs
Normal file
20
gb-testing/tests/load/auth_test.rs
Normal file
|
@ -0,0 +1,20 @@
|
|||
use gb_testing::load::{LoadTest, LoadTestConfig};
|
||||
use std::time::Duration;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_auth_load() -> anyhow::Result<()> {
|
||||
let config = LoadTestConfig {
|
||||
users: 100,
|
||||
duration: Duration::from_secs(300),
|
||||
ramp_up: Duration::from_secs(60),
|
||||
scenarios: vec!["auth".to_string()],
|
||||
};
|
||||
|
||||
let load_test = LoadTest::new(config);
|
||||
let report = load_test.run().await?;
|
||||
|
||||
report.save_json("test-reports/auth-load-test.json")?;
|
||||
report.save_html("test-reports/auth-load-test.html")?;
|
||||
|
||||
Ok(())
|
||||
}
|
17
gb-testing/tests/performance/api_bench.rs
Normal file
17
gb-testing/tests/performance/api_bench.rs
Normal file
|
@ -0,0 +1,17 @@
|
|||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
use gb_testing::performance;
|
||||
|
||||
pub fn api_benchmark(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("API");
|
||||
|
||||
group.bench_function("create_instance", |b| {
|
||||
b.iter(|| {
|
||||
// Benchmark implementation
|
||||
})
|
||||
});
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(benches, api_benchmark);
|
||||
criterion_main!(benches);
|
14
gb-testing/tests/stress/system_test.rs
Normal file
14
gb-testing/tests/stress/system_test.rs
Normal file
|
@ -0,0 +1,14 @@
|
|||
use gb_testing::stress::StressTest;
|
||||
use std::time::Duration;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_system_stress() -> anyhow::Result<()> {
|
||||
let stress_test = StressTest::new(
|
||||
Duration::from_secs(1800),
|
||||
1000,
|
||||
);
|
||||
|
||||
stress_test.run().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
24
gb-utils/Cargo.toml
Normal file
24
gb-utils/Cargo.toml
Normal file
|
@ -0,0 +1,24 @@
|
|||
[package]
|
||||
name = "gb-utils"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
gb-core = { path = "../gb-core" }
|
||||
gb-document = { path = "../gb-document" }
|
||||
gb-image = { path = "../gb-image" }
|
||||
async-trait.workspace = true
|
||||
tokio.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
thiserror.workspace = true
|
||||
tracing.workspace = true
|
||||
mime = "0.3"
|
||||
mime_guess = "2.0"
|
||||
uuid = { version = "1.6", features = ["v4"] }
|
||||
|
||||
[dev-dependencies]
|
||||
rstest.workspace = true
|
||||
tokio-test = "0.4"
|
83
gb-utils/src/detector.rs
Normal file
83
gb-utils/src/detector.rs
Normal file
|
@ -0,0 +1,83 @@
|
|||
use gb_core::{Result, Error};
|
||||
use mime_guess::{from_path, mime};
|
||||
use std::path::Path;
|
||||
use tracing::instrument;
|
||||
|
||||
pub struct FileTypeDetector;
|
||||
|
||||
impl FileTypeDetector {
|
||||
#[instrument]
|
||||
pub fn detect_mime_type(path: &Path) -> Result<mime::Mime> {
|
||||
from_path(path)
|
||||
.first_or_octet_stream()
|
||||
.to_owned()
|
||||
.into()
|
||||
}
|
||||
|
||||
#[instrument(skip(data))]
|
||||
pub fn detect_from_bytes(data: &[u8]) -> Result<FileType> {
|
||||
if data.starts_with(b"%PDF") {
|
||||
Ok(FileType::Pdf)
|
||||
} else if data.starts_with(&[0x50, 0x4B, 0x03, 0x04]) {
|
||||
// ZIP header, could be DOCX/XLSX
|
||||
if Self::is_office_document(data) {
|
||||
Ok(FileType::Word)
|
||||
} else {
|
||||
Ok(FileType::Excel)
|
||||
}
|
||||
} else if data.starts_with(&[0x89, 0x50, 0x4E, 0x47]) {
|
||||
Ok(FileType::Png)
|
||||
} else if data.starts_with(&[0xFF, 0xD8, 0xFF]) {
|
||||
Ok(FileType::Jpeg)
|
||||
} else if data.starts_with(b"RIFF") && data[8..12] == *b"WEBP" {
|
||||
Ok(FileType::WebP)
|
||||
} else {
|
||||
Err(Error::Internal("Unknown file type".to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
fn is_office_document(data: &[u8]) -> bool {
|
||||
// Check for Office Open XML signatures
|
||||
// This is a simplified check
|
||||
std::str::from_utf8(data)
|
||||
.map(|s| s.contains("word/"))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum FileType {
|
||||
Pdf,
|
||||
Word,
|
||||
Excel,
|
||||
Png,
|
||||
Jpeg,
|
||||
WebP,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rstest::*;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[rstest]
|
||||
fn test_detect_mime_type() -> Result<()> {
|
||||
let path = PathBuf::from("test.pdf");
|
||||
let mime = FileTypeDetector::detect_mime_type(&path)?;
|
||||
assert_eq!(mime.type_(), "application");
|
||||
assert_eq!(mime.subtype(), "pdf");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_detect_from_bytes() -> Result<()> {
|
||||
let pdf_data = b"%PDF-1.4\n";
|
||||
assert_eq!(FileTypeDetector::detect_from_bytes(pdf_data)?, FileType::Pdf);
|
||||
|
||||
let png_data = [0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A];
|
||||
assert_eq!(FileTypeDetector::detect_from_bytes(&png_data)?, FileType::Png);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
40
gb-utils/src/lib.rs
Normal file
40
gb-utils/src/lib.rs
Normal file
|
@ -0,0 +1,40 @@
|
|||
pub mod detector;
|
||||
pub mod processor;
|
||||
|
||||
pub use detector::{FileTypeDetector, FileType};
|
||||
pub use processor::{FileProcessor, ProcessedFile, ProcessedContent};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use gb_core::Result;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_utils_integration() -> Result<()> {
|
||||
// Initialize processor
|
||||
let processor = FileProcessor::new()?;
|
||||
|
||||
// Test PDF processing
|
||||
let pdf_data = b"%PDF-1.4\nTest PDF";
|
||||
let pdf_path = PathBuf::from("test.pdf");
|
||||
let processed_pdf = processor.process_file(pdf_data, &pdf_path).await?;
|
||||
assert_eq!(processed_pdf.content_type(), "text");
|
||||
|
||||
// Test image processing
|
||||
let image_data = [0x89, 0x50, 0x4E, 0x47]; // PNG header
|
||||
let image_path = PathBuf::from("test.png");
|
||||
let processed_image = processor.process_file(&image_data, &image_path).await?;
|
||||
assert_eq!(processed_image.content_type(), "image");
|
||||
|
||||
// Test file type detection
|
||||
let detected_type = FileTypeDetector::detect_from_bytes(pdf_data)?;
|
||||
assert_eq!(detected_type, FileType::Pdf);
|
||||
|
||||
let mime_type = FileTypeDetector::detect_mime_type(&pdf_path)?;
|
||||
assert_eq!(mime_type.type_(), "application");
|
||||
assert_eq!(mime_type.subtype(), "pdf");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
216
gb-utils/src/processor.rs
Normal file
216
gb-utils/src/processor.rs
Normal file
|
@ -0,0 +1,216 @@
|
|||
use gb_core::{Result, Error};
|
||||
use gb_document::{PdfProcessor, WordProcessor, ExcelProcessor};
|
||||
use gb_image::{ImageProcessor, ImageConverter};
|
||||
use super::detector::{FileTypeDetector, FileType};
|
||||
use std::path::Path;
|
||||
use tracing::{instrument, error};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub struct FileProcessor {
|
||||
image_processor: ImageProcessor,
|
||||
}
|
||||
|
||||
impl FileProcessor {
|
||||
pub fn new() -> Result<Self> {
|
||||
Ok(Self {
|
||||
image_processor: ImageProcessor::new()?,
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self, data))]
|
||||
pub async fn process_file(&self, data: &[u8], path: &Path) -> Result<ProcessedFile> {
|
||||
let file_type = FileTypeDetector::detect_from_bytes(data)?;
|
||||
let mime_type = FileTypeDetector::detect_mime_type(path)?;
|
||||
|
||||
match file_type {
|
||||
FileType::Pdf => {
|
||||
let text = PdfProcessor::extract_text(data)?;
|
||||
Ok(ProcessedFile {
|
||||
id: Uuid::new_v4(),
|
||||
original_name: path.file_name().unwrap().to_string_lossy().to_string(),
|
||||
mime_type,
|
||||
content: ProcessedContent::Text(text),
|
||||
})
|
||||
}
|
||||
FileType::Word => {
|
||||
let text = WordProcessor::extract_text(data)?;
|
||||
Ok(ProcessedFile {
|
||||
id: Uuid::new_v4(),
|
||||
original_name: path.file_name().unwrap().to_string_lossy().to_string(),
|
||||
mime_type,
|
||||
content: ProcessedContent::Text(text),
|
||||
})
|
||||
}
|
||||
FileType::Excel => {
|
||||
let json = ExcelProcessor::extract_as_json(data)?;
|
||||
Ok(ProcessedFile {
|
||||
id: Uuid::new_v4(),
|
||||
original_name: path.file_name().unwrap().to_string_lossy().to_string(),
|
||||
mime_type,
|
||||
content: ProcessedContent::Json(json),
|
||||
})
|
||||
}
|
||||
FileType::Png | FileType::Jpeg | FileType::WebP => {
|
||||
let image = self.image_processor.load_image(data)?;
|
||||
let text = self.image_processor.extract_text(&image)?;
|
||||
Ok(ProcessedFile {
|
||||
id: Uuid::new_v4(),
|
||||
original_name: path.file_name().unwrap().to_string_lossy().to_string(),
|
||||
mime_type,
|
||||
content: ProcessedContent::Image {
|
||||
text,
|
||||
width: image.width(),
|
||||
height: image.height(),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(self, file))]
|
||||
pub async fn convert_file(
|
||||
&self,
|
||||
file: &ProcessedFile,
|
||||
target_type: FileType,
|
||||
) -> Result<Vec<u8>> {
|
||||
match (&file.content, target_type) {
|
||||
(ProcessedContent::Image { .. }, FileType::Png) => {
|
||||
let image = self.image_processor.load_image(file.raw_data())?;
|
||||
ImageConverter::to_png(&image)
|
||||
}
|
||||
(ProcessedContent::Image { .. }, FileType::Jpeg) => {
|
||||
let image = self.image_processor.load_image(file.raw_data())?;
|
||||
ImageConverter::to_jpeg(&image, 80)
|
||||
}
|
||||
EOL
|
||||
|
||||
# Continuing gb-utils/src/processor.rs
|
||||
cat >> gb-utils/src/processor.rs << 'EOL'
|
||||
(ProcessedContent::Image { .. }, FileType::WebP) => {
|
||||
let image = self.image_processor.load_image(file.raw_data())?;
|
||||
ImageConverter::to_webp(&image, 80)
|
||||
}
|
||||
(ProcessedContent::Text(text), FileType::Pdf) => {
|
||||
let doc = PdfProcessor::create_document(text)?;
|
||||
Ok(doc)
|
||||
}
|
||||
(ProcessedContent::Text(text), FileType::Word) => {
|
||||
let doc = WordProcessor::create_document(text)?;
|
||||
Ok(doc)
|
||||
}
|
||||
(ProcessedContent::Json(json), FileType::Excel) => {
|
||||
let data: Vec<Vec<String>> = serde_json::from_value(json.clone())?;
|
||||
let headers: Vec<&str> = data[0].iter().map(|s| s.as_str()).collect();
|
||||
ExcelProcessor::create_excel(&headers, &data[1..])
|
||||
}
|
||||
_ => Err(Error::Internal(format!(
|
||||
"Unsupported conversion: {:?} to {:?}",
|
||||
file.content_type(),
|
||||
target_type
|
||||
))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ProcessedFile {
|
||||
pub id: Uuid,
|
||||
pub original_name: String,
|
||||
pub mime_type: mime::Mime,
|
||||
pub content: ProcessedContent,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ProcessedContent {
|
||||
Text(String),
|
||||
Json(serde_json::Value),
|
||||
Image {
|
||||
text: String,
|
||||
width: u32,
|
||||
height: u32,
|
||||
},
|
||||
}
|
||||
|
||||
impl ProcessedFile {
|
||||
pub fn content_type(&self) -> &'static str {
|
||||
match self.content {
|
||||
ProcessedContent::Text(_) => "text",
|
||||
ProcessedContent::Json(_) => "json",
|
||||
ProcessedContent::Image { .. } => "image",
|
||||
}
|
||||
}
|
||||
|
||||
pub fn raw_data(&self) -> &[u8] {
|
||||
// This is a placeholder - in a real implementation,
|
||||
// we would store the raw data alongside the processed content
|
||||
&[]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rstest::*;
|
||||
|
||||
#[fixture]
|
||||
fn processor() -> FileProcessor {
|
||||
FileProcessor::new().unwrap()
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_process_pdf(processor: FileProcessor) -> Result<()> {
|
||||
let pdf_data = b"%PDF-1.4\nTest content";
|
||||
let path = Path::new("test.pdf");
|
||||
|
||||
let processed = processor.process_file(pdf_data, path).await?;
|
||||
assert_eq!(processed.content_type(), "text");
|
||||
|
||||
if let ProcessedContent::Text(text) = &processed.content {
|
||||
assert!(text.contains("Test content"));
|
||||
} else {
|
||||
panic!("Expected text content");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_process_image(processor: FileProcessor) -> Result<()> {
|
||||
let image_data = [0x89, 0x50, 0x4E, 0x47]; // PNG header
|
||||
let path = Path::new("test.png");
|
||||
|
||||
let processed = processor.process_file(&image_data, path).await?;
|
||||
assert_eq!(processed.content_type(), "image");
|
||||
|
||||
if let ProcessedContent::Image { width, height, .. } = processed.content {
|
||||
assert!(width > 0);
|
||||
assert!(height > 0);
|
||||
} else {
|
||||
panic!("Expected image content");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_convert_file(processor: FileProcessor) -> Result<()> {
|
||||
let text = "Test conversion";
|
||||
let processed = ProcessedFile {
|
||||
id: Uuid::new_v4(),
|
||||
original_name: "test.txt".to_string(),
|
||||
mime_type: mime::TEXT_PLAIN,
|
||||
content: ProcessedContent::Text(text.to_string()),
|
||||
};
|
||||
|
||||
let pdf_data = processor.convert_file(&processed, FileType::Pdf).await?;
|
||||
assert!(!pdf_data.is_empty());
|
||||
|
||||
let word_data = processor.convert_file(&processed, FileType::Word).await?;
|
||||
assert!(!word_data.is_empty());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
113
install.sh
Executable file
113
install.sh
Executable file
|
@ -0,0 +1,113 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
|
||||
# Install https transport if not already installed
|
||||
|
||||
sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common gnupg
|
||||
|
||||
# Update package lists
|
||||
echo "Updating package lists..."
|
||||
|
||||
|
||||
echo "Repository fixes completed!"
|
||||
|
||||
# Install system dependencies
|
||||
echo "Installing system dependencies..."
|
||||
sudo apt-get install -y \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
curl \
|
||||
git \
|
||||
clang \
|
||||
libclang-dev \
|
||||
postgresql \
|
||||
postgresql-contrib \
|
||||
redis-server \
|
||||
libopencv-dev \
|
||||
libtesseract-dev \
|
||||
cmake \
|
||||
protobuf-compiler \
|
||||
libprotobuf-dev
|
||||
|
||||
# Install Rust if not already installed
|
||||
if ! command -v cargo &> /dev/null; then
|
||||
echo "Installing Rust..."
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
source $HOME/.cargo/env
|
||||
fi
|
||||
|
||||
# Install kubectl if not present
|
||||
if ! command -v kubectl &> /dev/null; then
|
||||
echo "Installing kubectl..."
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
|
||||
chmod +x kubectl
|
||||
sudo mv kubectl /usr/local/bin/
|
||||
fi
|
||||
|
||||
# Setup project structure
|
||||
echo "Setting up project structure..."
|
||||
mkdir -p general-bots
|
||||
cd general-bots
|
||||
|
||||
# Optional: Azure CLI installation
|
||||
echo "Would you like to install Azure CLI? (y/n)"
|
||||
read -r response
|
||||
if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]]
|
||||
then
|
||||
echo "Installing Azure CLI..."
|
||||
curl -sL https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/microsoft.gpg > /dev/null
|
||||
echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/azure-cli.list
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y azure-cli
|
||||
fi
|
||||
|
||||
# Optional: HandBrake installation
|
||||
echo "Would you like to install HandBrake? (y/n)"
|
||||
read -r response
|
||||
if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]]
|
||||
then
|
||||
echo "Installing HandBrake..."
|
||||
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 8771ADB0816950D8
|
||||
sudo add-apt-repository -y ppa:stebbins/handbrake-releases
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y handbrake-cli handbrake-gtk
|
||||
fi
|
||||
|
||||
# Build the project
|
||||
echo "Building the project..."
|
||||
cargo build
|
||||
|
||||
# Run tests
|
||||
echo "Running tests..."
|
||||
./run_tests.sh
|
||||
|
||||
# Setup database
|
||||
echo "Setting up PostgreSQL database..."
|
||||
sudo systemctl start postgresql
|
||||
sudo systemctl enable postgresql
|
||||
|
||||
# Create database and user (with error handling)
|
||||
sudo -u postgres psql -c "CREATE DATABASE generalbots;" 2>/dev/null || echo "Database might already exist"
|
||||
sudo -u postgres psql -c "CREATE USER gbuser WITH PASSWORD 'gbpassword';" 2>/dev/null || echo "User might already exist"
|
||||
sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE generalbots TO gbuser;" 2>/dev/null || echo "Privileges might already be granted"
|
||||
|
||||
# Start Redis
|
||||
echo "Starting Redis service..."
|
||||
sudo systemctl start redis-server
|
||||
sudo systemctl enable redis-server
|
||||
|
||||
echo "Installation completed!"
|
||||
echo "Next steps:"
|
||||
echo "1. Configure your Kubernetes cluster"
|
||||
echo "2. Update k8s/base/*.yaml files with your configuration"
|
||||
echo "3. Run ./deploy.sh to deploy to Kubernetes"
|
||||
echo "4. Check deployment status with: kubectl -n general-bots get pods"
|
||||
|
||||
# Print service status
|
||||
echo -e "\nService Status:"
|
||||
echo "PostgreSQL status:"
|
||||
sudo systemctl status postgresql --no-pager
|
||||
echo -e "\nRedis status:"
|
||||
sudo systemctl status redis-server --no-pager
|
84
k8s/base/api.yaml
Normal file
84
k8s/base/api.yaml
Normal file
|
@ -0,0 +1,84 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: api
|
||||
namespace: general-bots
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: api
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: api
|
||||
spec:
|
||||
containers:
|
||||
- name: api
|
||||
image: generalbotsproject/api:latest
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
env:
|
||||
- name: DATABASE_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: postgres-creds
|
||||
key: url
|
||||
- name: REDIS_URL
|
||||
value: redis://redis:6379
|
||||
- name: KAFKA_BROKERS
|
||||
value: kafka:9092
|
||||
- name: RABBITMQ_URL
|
||||
value: amqp://rabbitmq:5672
|
||||
resources:
|
||||
requests:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
cpu: "1000m"
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 20
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: api
|
||||
namespace: general-bots
|
||||
spec:
|
||||
selector:
|
||||
app: api
|
||||
ports:
|
||||
- port: 8080
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: api
|
||||
namespace: general-bots
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/cors-allow-methods: "GET,POST,PUT,DELETE,OPTIONS"
|
||||
nginx.ingress.kubernetes.io/cors-allow-origin: "*"
|
||||
nginx.ingress.kubernetes.io/enable-cors: "true"
|
||||
spec:
|
||||
rules:
|
||||
- host: api.general-bots.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: api
|
||||
port:
|
||||
number: 8080
|
75
k8s/base/document.yaml
Normal file
75
k8s/base/document.yaml
Normal file
|
@ -0,0 +1,75 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: document-processor
|
||||
namespace: general-bots
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: document-processor
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: document-processor
|
||||
spec:
|
||||
containers:
|
||||
- name: document-processor
|
||||
image: generalbotsproject/document-processor:latest
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
env:
|
||||
- name: RUST_LOG
|
||||
value: info
|
||||
volumeMounts:
|
||||
- name: temp
|
||||
mountPath: /tmp
|
||||
resources:
|
||||
requests:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
cpu: "1000m"
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 20
|
||||
volumes:
|
||||
- name: temp
|
||||
emptyDir: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: document-processor
|
||||
namespace: general-bots
|
||||
spec:
|
||||
selector:
|
||||
app: document-processor
|
||||
ports:
|
||||
- port: 8080
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: document-processor-config
|
||||
namespace: general-bots
|
||||
data:
|
||||
processing.conf: |
|
||||
max_file_size = 50MB
|
||||
supported_formats = [
|
||||
"pdf",
|
||||
"docx",
|
||||
"xlsx"
|
||||
]
|
||||
temp_dir = "/tmp"
|
||||
processing_timeout = 300s
|
75
k8s/base/image.yaml
Normal file
75
k8s/base/image.yaml
Normal file
|
@ -0,0 +1,75 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: image-processor
|
||||
namespace: general-bots
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: image-processor
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: image-processor
|
||||
spec:
|
||||
containers:
|
||||
- name: image-processor
|
||||
image: generalbotsproject/image-processor:latest
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
env:
|
||||
- name: RUST_LOG
|
||||
value: info
|
||||
- name: OPENCV_DATA_PATH
|
||||
value: /usr/share/opencv4
|
||||
volumeMounts:
|
||||
- name: temp
|
||||
mountPath: /tmp
|
||||
- name: opencv-data
|
||||
mountPath: /usr/share/opencv4
|
||||
resources:
|
||||
requests:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
limits:
|
||||
memory: "1Gi"
|
||||
cpu: "1000m"
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 20
|
||||
volumes:
|
||||
- name: temp
|
||||
emptyDir: {}
|
||||
- name: opencv-data
|
||||
configMap:
|
||||
name: opencv-data
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: image-processor
|
||||
namespace: general-bots
|
||||
spec:
|
||||
selector:
|
||||
app: image-processor
|
||||
ports:
|
||||
- port: 8080
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: opencv-data
|
||||
namespace: general-bots
|
||||
data:
|
||||
haarcascade_frontalface_default.xml: |
|
||||
<include actual cascade classifier XML data here>
|
33
k8s/base/ingress.yaml
Normal file
33
k8s/base/ingress.yaml
Normal file
|
@ -0,0 +1,33 @@
|
|||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: monitoring
|
||||
namespace: general-bots
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
spec:
|
||||
rules:
|
||||
- host: metrics.general-bots.io
|
||||
http:
|
||||
paths:
|
||||
- path: /prometheus
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: prometheus
|
||||
port:
|
||||
number: 9090
|
||||
- path: /grafana
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: grafana
|
||||
port:
|
||||
number: 3000
|
||||
- path: /jaeger
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: jaeger
|
||||
port:
|
||||
number: 16686
|
53
k8s/base/kafka.yaml
Normal file
53
k8s/base/kafka.yaml
Normal file
|
@ -0,0 +1,53 @@
|
|||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: kafka
|
||||
namespace: general-bots
|
||||
spec:
|
||||
serviceName: kafka
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: kafka
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: kafka
|
||||
spec:
|
||||
containers:
|
||||
- name: kafka
|
||||
image: confluentinc/cp-kafka:7.4.0
|
||||
ports:
|
||||
- containerPort: 9092
|
||||
env:
|
||||
- name: KAFKA_ZOOKEEPER_CONNECT
|
||||
value: zookeeper:2181
|
||||
- name: KAFKA_ADVERTISED_LISTENERS
|
||||
value: PLAINTEXT://kafka-$(POD_NAME).kafka:9092
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
volumeMounts:
|
||||
- name: kafka-data
|
||||
mountPath: /var/lib/kafka/data
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: kafka-data
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kafka
|
||||
namespace: general-bots
|
||||
spec:
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: kafka
|
||||
ports:
|
||||
- port: 9092
|
12
k8s/base/kustomization.yaml
Normal file
12
k8s/base/kustomization.yaml
Normal file
|
@ -0,0 +1,12 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- postgres.yaml
|
||||
- redis.yaml
|
||||
- kafka.yaml
|
||||
- webrtc.yaml
|
||||
- api.yaml
|
||||
- web.yaml
|
||||
- monitoring.yaml
|
175
k8s/base/messaging.yaml
Normal file
175
k8s/base/messaging.yaml
Normal file
|
@ -0,0 +1,175 @@
|
|||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: kafka
|
||||
namespace: general-bots
|
||||
spec:
|
||||
serviceName: kafka
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: kafka
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: kafka
|
||||
spec:
|
||||
containers:
|
||||
- name: kafka
|
||||
image: confluentinc/cp-kafka:7.4.0
|
||||
ports:
|
||||
- containerPort: 9092
|
||||
env:
|
||||
- name: KAFKA_ZOOKEEPER_CONNECT
|
||||
value: zookeeper:2181
|
||||
- name: KAFKA_ADVERTISED_LISTENERS
|
||||
value: PLAINTEXT://kafka-$(POD_NAME).kafka:9092
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /var/lib/kafka
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kafka
|
||||
namespace: general-bots
|
||||
spec:
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: kafka
|
||||
ports:
|
||||
- port: 9092
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: rabbitmq
|
||||
namespace: general-bots
|
||||
spec:
|
||||
serviceName: rabbitmq
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rabbitmq
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rabbitmq
|
||||
spec:
|
||||
containers:
|
||||
- name: rabbitmq
|
||||
image: rabbitmq:3.12-management
|
||||
ports:
|
||||
- containerPort: 5672
|
||||
- containerPort: 15672
|
||||
env:
|
||||
- name: RABBITMQ_ERLANG_COOKIE
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: rabbitmq-secret
|
||||
```bash
|
||||
# Continuing k8s/base/messaging.yaml
|
||||
cat >> k8s/base/messaging.yaml << 'EOL'
|
||||
key: erlang-cookie
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /var/lib/rabbitmq
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: rabbitmq
|
||||
namespace: general-bots
|
||||
spec:
|
||||
selector:
|
||||
app: rabbitmq
|
||||
ports:
|
||||
- name: amqp
|
||||
port: 5672
|
||||
- name: management
|
||||
port: 15672
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: websocket
|
||||
namespace: general-bots
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: websocket
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: websocket
|
||||
spec:
|
||||
containers:
|
||||
- name: websocket
|
||||
image: generalbotsproject/websocket:latest
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
env:
|
||||
- name: REDIS_URL
|
||||
value: redis://redis:6379
|
||||
- name: KAFKA_BROKERS
|
||||
value: kafka:9092
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: websocket
|
||||
namespace: general-bots
|
||||
spec:
|
||||
selector:
|
||||
app: websocket
|
||||
ports:
|
||||
- port: 8080
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: websocket
|
||||
namespace: general-bots
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
|
||||
nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
|
||||
spec:
|
||||
rules:
|
||||
- host: ws.general-bots.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: websocket
|
||||
port:
|
||||
number: 8080
|
158
k8s/base/monitoring.yaml
Normal file
158
k8s/base/monitoring.yaml
Normal file
|
@ -0,0 +1,158 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: general-bots
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: prometheus
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: prometheus
|
||||
spec:
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: prom/prometheus:v2.45.0
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/prometheus
|
||||
- name: storage
|
||||
mountPath: /prometheus
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: prometheus-config
|
||||
- name: storage
|
||||
persistentVolumeClaim:
|
||||
claimName: prometheus-storage
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: general-bots
|
||||
spec:
|
||||
selector:
|
||||
app: prometheus
|
||||
ports:
|
||||
- port: 9090
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: grafana
|
||||
namespace: general-bots
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: grafana
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: grafana
|
||||
spec:
|
||||
containers:
|
||||
- name: grafana
|
||||
image: grafana/grafana:9.5.5
|
||||
ports:
|
||||
- containerPort: 3000
|
||||
volumeMounts:
|
||||
- name: storage
|
||||
mountPath: /var/lib/grafana
|
||||
volumes:
|
||||
- name: storage
|
||||
persistentVolumeClaim:
|
||||
claimName: grafana-storage
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: grafana
|
||||
namespace: general-bots
|
||||
spec:
|
||||
selector:
|
||||
app: grafana
|
||||
ports:
|
||||
- port: 3000
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: jaeger
|
||||
namespace: general-bots
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: jaeger
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: jaeger
|
||||
spec:
|
||||
containers:
|
||||
- name: jaeger
|
||||
image: jaegertracing/all-in-one:1.47
|
||||
ports:
|
||||
- containerPort: 16686
|
||||
- containerPort: 4317
|
||||
- containerPort: 4318
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: jaeger
|
||||
namespace: general-bots
|
||||
spec:
|
||||
selector:
|
||||
app: jaeger
|
||||
ports:
|
||||
- name: ui
|
||||
port: 16686
|
||||
- name: otlp-grpc
|
||||
port: 4317
|
||||
- name: otlp-http
|
||||
port: 4318
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: prometheus-config
|
||||
namespace: general-bots
|
||||
data:
|
||||
prometheus.yml: |
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
evaluation_interval: 15s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'general-bots'
|
||||
static_configs:
|
||||
- targets: ['api:8080']
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: prometheus-storage
|
||||
namespace: general-bots
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: grafana-storage
|
||||
namespace: general-bots
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
4
k8s/base/namespace.yaml
Normal file
4
k8s/base/namespace.yaml
Normal file
|
@ -0,0 +1,4 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: general-bots
|
74
k8s/base/nlp.yaml
Normal file
74
k8s/base/nlp.yaml
Normal file
|
@ -0,0 +1,74 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nlp
|
||||
namespace: general-bots
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nlp
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nlp
|
||||
spec:
|
||||
containers:
|
||||
- name: nlp
|
||||
image: generalbotsproject/nlp:latest
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
env:
|
||||
- name: RUST_LOG
|
||||
value: info
|
||||
- name: MODEL_CACHE_DIR
|
||||
value: /models
|
||||
volumeMounts:
|
||||
- name: models
|
||||
mountPath: /models
|
||||
resources:
|
||||
requests:
|
||||
memory: "4Gi"
|
||||
cpu: "2000m"
|
||||
limits:
|
||||
memory: "8Gi"
|
||||
cpu: "4000m"
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 20
|
||||
volumes:
|
||||
- name: models
|
||||
persistentVolumeClaim:
|
||||
claimName: nlp-models
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nlp
|
||||
namespace: general-bots
|
||||
spec:
|
||||
selector:
|
||||
app: nlp
|
||||
ports:
|
||||
- port: 8080
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: nlp-models
|
||||
namespace: general-bots
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 50Gi
|
57
k8s/base/postgres.yaml
Normal file
57
k8s/base/postgres.yaml
Normal file
|
@ -0,0 +1,57 @@
|
|||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: postgres
|
||||
namespace: general-bots
|
||||
spec:
|
||||
serviceName: postgres
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: postgres
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:15
|
||||
ports:
|
||||
- containerPort: 5432
|
||||
env:
|
||||
- name: POSTGRES_DB
|
||||
value: generalbots
|
||||
- name: POSTGRES_USER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: postgres-creds
|
||||
key: username
|
||||
- name: POSTGRES_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: postgres-creds
|
||||
key: password
|
||||
volumeMounts:
|
||||
- name: postgres-data
|
||||
mountPath: /var/lib/postgresql/data
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: postgres-data
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: postgres
|
||||
namespace: general-bots
|
||||
spec:
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: postgres
|
||||
ports:
|
||||
- port: 5432
|
53
k8s/base/redis.yaml
Normal file
53
k8s/base/redis.yaml
Normal file
|
@ -0,0 +1,53 @@
|
|||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: redis
|
||||
namespace: general-bots
|
||||
spec:
|
||||
serviceName: redis
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: redis
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
spec:
|
||||
containers:
|
||||
- name: redis
|
||||
image: redis:7
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
command:
|
||||
- redis-server
|
||||
- /etc/redis/redis.conf
|
||||
volumeMounts:
|
||||
- name: redis-config
|
||||
mountPath: /etc/redis
|
||||
- name: redis-data
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: redis-config
|
||||
configMap:
|
||||
name: redis-config
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: redis-data
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: redis
|
||||
namespace: general-bots
|
||||
spec:
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: redis
|
||||
ports:
|
||||
- port: 6379
|
44
k8s/base/webrtc.yaml
Normal file
44
k8s/base/webrtc.yaml
Normal file
|
@ -0,0 +1,44 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: webrtc
|
||||
namespace: general-bots
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webrtc
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webrtc
|
||||
spec:
|
||||
containers:
|
||||
- name: webrtc
|
||||
image: generalbotsproject/webrtc:latest
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
env:
|
||||
- name: REDIS_URL
|
||||
value: redis:6379
|
||||
- name: KAFKA_BROKERS
|
||||
value: kafka:9092
|
||||
resources:
|
||||
requests:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
limits:
|
||||
memory: "2Gi"
|
||||
cpu: "1000m"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: webrtc
|
||||
namespace: general-bots
|
||||
spec:
|
||||
selector:
|
||||
app: webrtc
|
||||
ports:
|
||||
- port: 8080
|
||||
type: ClusterIP
|
42
run_tests.sh
Executable file
42
run_tests.sh
Executable file
|
@ -0,0 +1,42 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "Running tests for all components..."
|
||||
|
||||
# Core tests
|
||||
echo "Testing gb-core..."
|
||||
cd gb-core && cargo test
|
||||
|
||||
# API tests
|
||||
echo "Testing gb-api..."
|
||||
cd ../gb-api && cargo test
|
||||
|
||||
# VM tests
|
||||
echo "Testing gb-vm..."
|
||||
cd ../gb-vm && cargo test
|
||||
|
||||
# Document processing tests
|
||||
echo "Testing gb-document..."
|
||||
cd ../gb-document && cargo test
|
||||
|
||||
# Image processing tests
|
||||
echo "Testing gb-image..."
|
||||
cd ../gb-image && cargo test
|
||||
|
||||
# NLP tests
|
||||
echo "Testing gb-nlp..."
|
||||
cd ../gb-nlp && cargo test
|
||||
|
||||
# Utils tests
|
||||
echo "Testing gb-utils..."
|
||||
cd ../gb-utils && cargo test
|
||||
|
||||
# Messaging tests
|
||||
echo "Testing gb-messaging..."
|
||||
cd ../gb-messaging && cargo test
|
||||
|
||||
# Monitoring tests
|
||||
echo "Testing gb-monitoring..."
|
||||
cd ../gb-monitoring && cargo test
|
||||
|
||||
echo "All tests completed successfully!"
|
Loading…
Add table
Reference in a new issue