fix(all): Organizing APIs and docs.
This commit is contained in:
parent
eadbed756f
commit
2ce7d2e340
6 changed files with 168 additions and 98 deletions
80
README.md
80
README.md
|
@ -36,12 +36,11 @@ GB6 is a billion-scale real-time communication platform integrating advanced bot
|
||||||
- PostgreSQL with sharding
|
- PostgreSQL with sharding
|
||||||
- Redis caching
|
- Redis caching
|
||||||
- TiKV distributed storage
|
- TiKV distributed storage
|
||||||
- Customer data management
|
|
||||||
|
|
||||||
## 🏗 Architecture
|
## 🏗 Architecture
|
||||||
|
|
||||||
### Multi-Tenant Core
|
### Multi-Tenant Core
|
||||||
- Organization hierarchy
|
- Organizations
|
||||||
- Instance management
|
- Instance management
|
||||||
- Resource quotas
|
- Resource quotas
|
||||||
- Usage analytics
|
- Usage analytics
|
||||||
|
@ -52,29 +51,6 @@ GB6 is a billion-scale real-time communication platform integrating advanced bot
|
||||||
- Media processing
|
- Media processing
|
||||||
- Video conferencing
|
- Video conferencing
|
||||||
|
|
||||||
### Storage Architecture
|
|
||||||
```sql
|
|
||||||
-- Customer Sharding Example
|
|
||||||
CREATE TABLE customers (
|
|
||||||
id UUID PRIMARY KEY,
|
|
||||||
name TEXT,
|
|
||||||
subscription_tier TEXT,
|
|
||||||
status TEXT,
|
|
||||||
max_instances INTEGER
|
|
||||||
);
|
|
||||||
```
|
|
||||||
|
|
||||||
### Message Processing
|
|
||||||
```rust
|
|
||||||
// Kafka Producer Example
|
|
||||||
pub async fn publish<T: Serialize>(
|
|
||||||
&self,
|
|
||||||
topic: &str,
|
|
||||||
key: &str,
|
|
||||||
message: &T,
|
|
||||||
) -> Result<()>
|
|
||||||
```
|
|
||||||
|
|
||||||
## 🛠 Installation
|
## 🛠 Installation
|
||||||
|
|
||||||
### Prerequisites
|
### Prerequisites
|
||||||
|
@ -206,47 +182,49 @@ Licensed under terms specified in workspace configuration.
|
||||||
- Extended monitoring
|
- Extended monitoring
|
||||||
|
|
||||||
### Long Term
|
### Long Term
|
||||||
- AI/ML integration
|
|
||||||
- Advanced analytics
|
- Advanced analytics
|
||||||
- Global expansion
|
- Global expansion
|
||||||
- Enterprise features
|
- Enterprise features
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Infrastructure Compliance Checklist - ISO 27001, HIPAA, LGPD
|
|
||||||
|
|
||||||
| ✓ | Requirement | Component | Standard | Implementation Steps |
|
| ✓ | Requirement | Component | Standard | Implementation Steps |
|
||||||
|---|-------------|-----------|-----------|---------------------|
|
|---|-------------|-----------|-----------|---------------------|
|
||||||
| ⬜ | TLS 1.3 Configuration | Nginx | All | Configure modern SSL parameters and ciphers in `/etc/nginx/conf.d/ssl.conf` |
|
| ✅ | TLS 1.3 Configuration | Nginx | All | Configure modern SSL parameters and ciphers in `/etc/nginx/conf.d/ssl.conf` |
|
||||||
| ⬜ | Access Logging | Nginx | All | Enable detailed access logs with privacy fields in `/etc/nginx/nginx.conf` |
|
| ✅ | Access Logging | Nginx | All | Enable detailed access logs with privacy fields in `/etc/nginx/nginx.conf` |
|
||||||
| ⬜ | Rate Limiting | Nginx | ISO 27001 | Implement rate limiting rules in location blocks |
|
| ⬜ | Rate Limiting | Nginx | ISO 27001 | Implement rate limiting rules in location blocks |
|
||||||
| ⬜ | WAF Rules | Nginx | HIPAA | Install and configure ModSecurity with OWASP rules |
|
| ⬜ | WAF Rules | Nginx | HIPAA | Install and configure ModSecurity with OWASP rules |
|
||||||
| ⬜ | Reverse Proxy Security | Nginx | All | Configure security headers (X-Frame-Options, HSTS, CSP) |
|
| ✅ | Reverse Proxy Security | Nginx | All | Configure security headers (X-Frame-Options, HSTS, CSP) |
|
||||||
| ⬜ | MFA Implementation | Zitadel | All | Enable and enforce MFA for all administrative accounts |
|
| ✅ | MFA Implementation | Zitadel | All | Enable and enforce MFA for all administrative accounts |
|
||||||
| ⬜ | RBAC Configuration | Zitadel | All | Set up role-based access control with least privilege |
|
| ✅ | RBAC Configuration | Zitadel | All | Set up role-based access control with least privilege |
|
||||||
| ⬜ | Password Policy | Zitadel | All | Configure strong password requirements (length, complexity, history) |
|
| ✅ | Password Policy | Zitadel | All | Configure strong password requirements (length, complexity, history) |
|
||||||
| ⬜ | OAuth2/OIDC Setup | Zitadel | ISO 27001 | Configure secure OAuth flows and token policies |
|
| ✅ | OAuth2/OIDC Setup | Zitadel | ISO 27001 | Configure secure OAuth flows and token policies |
|
||||||
| ⬜ | Audit Logging | Zitadel | All | Enable comprehensive audit logging for user activities |
|
| ✅ | Audit Logging | Zitadel | All | Enable comprehensive audit logging for user activities |
|
||||||
| ⬜ | Encryption at Rest | Garage (S3) | All | Configure encrypted storage with key management |
|
| ✅ | Encryption at Rest | MinIO | All | Configure encrypted storage with key management |
|
||||||
| ⬜ | Bucket Policies | Garage (S3) | All | Implement strict bucket access policies |
|
| ✅ | Bucket Policies | MinIO | All | Implement strict bucket access policies |
|
||||||
| ⬜ | Object Versioning | Garage (S3) | HIPAA | Enable versioning for data recovery capability |
|
| ✅ | Object Versioning | MinIO | HIPAA | Enable versioning for data recovery capability |
|
||||||
| ⬜ | Access Logging | Garage (S3) | All | Enable detailed access logging for object operations |
|
| ✅ | Access Logging | MinIO | All | Enable detailed access logging for object operations |
|
||||||
| ⬜ | Lifecycle Rules | Garage (S3) | LGPD | Configure data retention and deletion policies |
|
| ⬜ | Lifecycle Rules | MinIO | LGPD | Configure data retention and deletion policies |
|
||||||
| ⬜ | DKIM/SPF/DMARC | Stalwart | All | Configure email authentication mechanisms |
|
| ✅ | DKIM/SPF/DMARC | Stalwart | All | Configure email authentication mechanisms |
|
||||||
| ⬜ | Mail Encryption | Stalwart | All | Enable TLS for mail transport |
|
| ✅ | Mail Encryption | Stalwart | All | Enable TLS for mail transport |
|
||||||
| ⬜ | Content Filtering | Stalwart | All | Implement content scanning and filtering rules |
|
| ✅ | Content Filtering | Stalwart | All | Implement content scanning and filtering rules |
|
||||||
| ⬜ | Mail Archiving | Stalwart | HIPAA | Configure compliant email archiving |
|
| ⬜ | Mail Archiving | Stalwart | HIPAA | Configure compliant email archiving |
|
||||||
| ⬜ | Sieve Filtering | Stalwart | All | Implement security-focused mail filtering rules |
|
| ✅ | Sieve Filtering | Stalwart | All | Implement security-focused mail filtering rules |
|
||||||
| ⬜ | System Hardening | Ubuntu | All | Apply CIS Ubuntu Linux benchmarks |
|
| ⬜ | System Hardening | Ubuntu | All | Apply CIS Ubuntu Linux benchmarks |
|
||||||
| ⬜ | System Updates | Ubuntu | All | Configure unattended-upgrades for security patches |
|
| ✅ | System Updates | Ubuntu | All | Configure unattended-upgrades for security patches |
|
||||||
| ⬜ | Audit Daemon | Ubuntu | All | Configure auditd for system event logging |
|
| ⬜ | Audit Daemon | Ubuntu | All | Configure auditd for system event logging |
|
||||||
| ⬜ | Firewall Rules | Ubuntu | All | Configure UFW with restrictive rules |
|
| ✅ | Firewall Rules | Ubuntu | All | Configure UFW with restrictive rules |
|
||||||
| ⬜ | Disk Encryption | Ubuntu | All | Implement LUKS encryption for system disks |
|
| ⬜ | Disk Encryption | Ubuntu | All | Implement LUKS encryption for system disks |
|
||||||
| ⬜ | SELinux/AppArmor | Ubuntu | All | Enable and configure mandatory access control |
|
| ⬜ | SELinux/AppArmor | Ubuntu | All | Enable and configure mandatory access control |
|
||||||
| ⬜ | Monitoring Setup | All | All | Install and configure Prometheus + Grafana |
|
| ✅ | Monitoring Setup | All | All | Install and configure Prometheus + Grafana |
|
||||||
| ⬜ | Log Aggregation | All | All | Implement centralized logging (e.g., ELK Stack) |
|
| ✅ | Log Aggregation | All | All | Implement centralized logging (e.g., ELK Stack) |
|
||||||
| ⬜ | Backup System | All | All | Configure automated backup system with encryption |
|
| ⬜ | Backup System | All | All | Configure automated backup system with encryption |
|
||||||
| ⬜ | Network Isolation | All | All | Implement proper network segmentation |
|
| ✅ | Network Isolation | All | All | Implement proper network segmentation |
|
||||||
|
| ✅ | Data Classification | All | HIPAA/LGPD | Document data types and handling procedures |
|
||||||
|
| ✅ | Session Management | Zitadel | All | Configure secure session timeouts and invalidation |
|
||||||
|
| ✅ | Certificate Management | All | All | Implement automated certificate renewal with Let's Encrypt |
|
||||||
|
| ✅ | Vulnerability Scanning | All | ISO 27001 | Regular automated scanning with tools like OpenVAS |
|
||||||
|
| ✅ | Incident Response Plan | All | All | Document and test incident response procedures |
|
||||||
|
| ✅ | Disaster Recovery | All | HIPAA | Implement and test disaster recovery procedures |
|
||||||
|
|
||||||
|
|
||||||
## Documentation Requirements
|
## Documentation Requirements
|
||||||
|
|
|
@ -9,6 +9,7 @@ license = { workspace = true }
|
||||||
gb-core = { path = "../gb-core" }
|
gb-core = { path = "../gb-core" }
|
||||||
gb-messaging = { path = "../gb-messaging" }
|
gb-messaging = { path = "../gb-messaging" }
|
||||||
gb-monitoring = { path = "../gb-monitoring" }
|
gb-monitoring = { path = "../gb-monitoring" }
|
||||||
|
gb-file = {path = "../gb-file" }
|
||||||
tokio = { version = "1.0", features = ["full", "macros", "rt-multi-thread"] } # Add these features
|
tokio = { version = "1.0", features = ["full", "macros", "rt-multi-thread"] } # Add these features
|
||||||
axum = { version = "0.7.9", features = ["ws", "multipart", "macros"] }
|
axum = { version = "0.7.9", features = ["ws", "multipart", "macros"] }
|
||||||
serde= { workspace = true }
|
serde= { workspace = true }
|
||||||
|
|
|
@ -25,7 +25,10 @@ pub fn create_router(message_processor: MessageProcessor) -> Router {
|
||||||
message_processor: Mutex::new(message_processor),
|
message_processor: Mutex::new(message_processor),
|
||||||
});
|
});
|
||||||
Router::new()
|
Router::new()
|
||||||
// File & Document Management
|
|
||||||
|
|
||||||
|
|
||||||
|
// File & Document Management
|
||||||
.route("/files/upload", post(upload_file))
|
.route("/files/upload", post(upload_file))
|
||||||
.route("/files/download", post(download))
|
.route("/files/download", post(download))
|
||||||
.route("/files/copy", post(copy_file))
|
.route("/files/copy", post(copy_file))
|
||||||
|
@ -49,7 +52,7 @@ pub fn create_router(message_processor: MessageProcessor) -> Router {
|
||||||
.route("/files/sync/start", post(start_sync))
|
.route("/files/sync/start", post(start_sync))
|
||||||
.route("/files/sync/stop", post(stop_sync))
|
.route("/files/sync/stop", post(stop_sync))
|
||||||
|
|
||||||
// Document Processing
|
full ode bucket is abstrctd path variable, src, dest, full file manager acessible via actixweb ALL methods no excluses, inline funcition params, s3 api inside, all methodos, full code. // Document Processing
|
||||||
.route("/docs/merge", post(merge_documents))
|
.route("/docs/merge", post(merge_documents))
|
||||||
.route("/docs/convert", post(convert_document))
|
.route("/docs/convert", post(convert_document))
|
||||||
.route("/docs/fill", post(fill_document))
|
.route("/docs/fill", post(fill_document))
|
||||||
|
@ -74,26 +77,6 @@ pub fn create_router(message_processor: MessageProcessor) -> Router {
|
||||||
.route("/groups/invites/send", post(send_group_invite))
|
.route("/groups/invites/send", post(send_group_invite))
|
||||||
.route("/groups/invites/list", get(list_group_invites))
|
.route("/groups/invites/list", get(list_group_invites))
|
||||||
|
|
||||||
// Teams & Projects
|
|
||||||
.route("/teams/create", post(create_team))
|
|
||||||
.route("/teams/update", put(update_team))
|
|
||||||
.route("/teams/delete", delete(delete_team))
|
|
||||||
.route("/teams/list", get(get_teams))
|
|
||||||
.route("/teams/search", post(search_teams))
|
|
||||||
.route("/teams/members", get(get_team_members))
|
|
||||||
.route("/teams/members/add", post(add_team_member))
|
|
||||||
.route("/teams/members/remove", post(remove_team_member))
|
|
||||||
.route("/teams/roles", post(set_team_roles))
|
|
||||||
.route("/teams/permissions", post(set_team_permissions))
|
|
||||||
.route("/teams/settings", post(update_team_settings))
|
|
||||||
.route("/teams/analytics", get(get_team_analytics))
|
|
||||||
.route("/teams/projects/create", post(create_project))
|
|
||||||
.route("/teams/projects/list", get(get_projects))
|
|
||||||
.route("/teams/projects/update", put(update_project))
|
|
||||||
.route("/teams/projects/delete", delete(delete_project))
|
|
||||||
.route("/teams/reports/generate", post(generate_team_report))
|
|
||||||
.route("/teams/activity", get(get_team_activity))
|
|
||||||
|
|
||||||
// Conversations & Real-time Communication
|
// Conversations & Real-time Communication
|
||||||
.route("/conversations/create", post(create_conversation))
|
.route("/conversations/create", post(create_conversation))
|
||||||
.route("/conversations/join", post(join_conversation))
|
.route("/conversations/join", post(join_conversation))
|
||||||
|
@ -183,20 +166,6 @@ pub fn create_router(message_processor: MessageProcessor) -> Router {
|
||||||
.route("/storage/archive", post(archive_data))
|
.route("/storage/archive", post(archive_data))
|
||||||
.route("/storage/metrics", get(get_storage_metrics))
|
.route("/storage/metrics", get(get_storage_metrics))
|
||||||
|
|
||||||
// Automation & Workflows
|
|
||||||
.route("/automation/workflow/create", post(create_workflow))
|
|
||||||
.route("/automation/workflow/update", put(update_workflow))
|
|
||||||
.route("/automation/workflow/delete", delete(delete_workflow))
|
|
||||||
.route("/automation/workflow/execute", post(execute_workflow))
|
|
||||||
.route("/automation/workflow/status", get(get_workflow_status))
|
|
||||||
.route("/automation/triggers/create", post(create_trigger))
|
|
||||||
.route("/automation/triggers/list", get(list_triggers))
|
|
||||||
.route("/automation/schedule/create", post(create_schedule))
|
|
||||||
.route("/automation/schedule/update", put(update_schedule))
|
|
||||||
.route("/automation/actions/create", post(create_action))
|
|
||||||
.route("/automation/actions/execute", post(execute_action))
|
|
||||||
.route("/automation/rules/create", post(create_rule))
|
|
||||||
.route("/automation/rules/evaluate", post(evaluate_rules))
|
|
||||||
|
|
||||||
// Analytics & Reporting
|
// Analytics & Reporting
|
||||||
.route("/analytics/dashboard", get(get_dashboard_data))
|
.route("/analytics/dashboard", get(get_dashboard_data))
|
||||||
|
@ -221,16 +190,6 @@ pub fn create_router(message_processor: MessageProcessor) -> Router {
|
||||||
.route("/admin/quotas/manage", post(manage_quotas))
|
.route("/admin/quotas/manage", post(manage_quotas))
|
||||||
.route("/admin/licenses/manage", post(manage_licenses))
|
.route("/admin/licenses/manage", post(manage_licenses))
|
||||||
|
|
||||||
// Integration & External Services
|
|
||||||
.route("/integrations/list", get(list_integrations))
|
|
||||||
.route("/integrations/install", post(install_integration))
|
|
||||||
.route("/integrations/configure", post(configure_integration))
|
|
||||||
.route("/integrations/uninstall", post(uninstall_integration))
|
|
||||||
.route("/integrations/status", get(get_integration_status))
|
|
||||||
.route("/integrations/sync", post(sync_integration_data))
|
|
||||||
.route("/integrations/webhook/create", post(create_webhook))
|
|
||||||
.route("/integrations/webhook/manage", post(manage_webhooks))
|
|
||||||
|
|
||||||
// AI & Machine Learning
|
// AI & Machine Learning
|
||||||
.route("/ai/analyze/text", post(analyze_text))
|
.route("/ai/analyze/text", post(analyze_text))
|
||||||
.route("/ai/analyze/image", post(analyze_image))
|
.route("/ai/analyze/image", post(analyze_image))
|
||||||
|
|
20
gb-file/Cargo.toml
Normal file
20
gb-file/Cargo.toml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
[package]
|
||||||
|
name = "gb-document"
|
||||||
|
version = { workspace = true }
|
||||||
|
edition = { workspace = true }
|
||||||
|
authors = { workspace = true }
|
||||||
|
license = { workspace = true }
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
gb-core = { path = "../gb-core" }
|
||||||
|
async-trait= { workspace = true }
|
||||||
|
tokio= { workspace = true }
|
||||||
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
serde_json = "1.0"thiserror= { workspace = true }
|
||||||
|
tracing= { workspace = true }
|
||||||
|
minio-rs = "0.1"
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
rstest= { workspace = true }
|
||||||
|
tokio-test = "0.4"
|
||||||
|
tempfile = "3.8"
|
113
gb-file/src/facade.rs
Normal file
113
gb-file/src/facade.rs
Normal file
|
@ -0,0 +1,113 @@
|
||||||
|
use minio_rs::minio::client::Client;
|
||||||
|
use minio_rs::minio::s3::args::{BucketExistsArgs, MakeBucketArgs, RemoveObjectArgs, GetObjectArgs, PutObjectArgs, ListObjectsArgs};
|
||||||
|
use minio_rs::minio::s3::response::Object;
|
||||||
|
use minio_rs::minio::s3::error::Error as MinioError;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::io::Cursor;
|
||||||
|
|
||||||
|
/// Represents a file manager for handling MinIO file operations.
|
||||||
|
pub struct FileManager {
|
||||||
|
client: Client,
|
||||||
|
bucket_name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FileManager {
|
||||||
|
/// Creates a new `FileManager` instance.
|
||||||
|
pub async fn new(endpoint: &str, access_key: &str, secret_key: &str, bucket_name: &str, use_ssl: bool) -> Result<Self, MinioError> {
|
||||||
|
let client = Client::new(endpoint, access_key, secret_key, use_ssl).await?;
|
||||||
|
Ok(Self {
|
||||||
|
client,
|
||||||
|
bucket_name: bucket_name.to_string(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks if the bucket exists, and creates it if it doesn't.
|
||||||
|
pub async fn ensure_bucket_exists(&self) -> Result<(), MinioError> {
|
||||||
|
let exists = self.client
|
||||||
|
.bucket_exists(&BucketExistsArgs::new(&self.bucket_name))
|
||||||
|
.await?;
|
||||||
|
if !exists {
|
||||||
|
self.client
|
||||||
|
.make_bucket(&MakeBucketArgs::new(&self.bucket_name))
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Uploads a file to the specified path.
|
||||||
|
pub async fn upload_file(&self, path: &str, file_data: Vec<u8>) -> Result<(), MinioError> {
|
||||||
|
let args = PutObjectArgs::new(&self.bucket_name, path, Cursor::new(file_data), file_data.len() as u64);
|
||||||
|
self.client.put_object(&args).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Downloads a file from the specified path.
|
||||||
|
pub async fn download_file(&self, path: &str) -> Result<Vec<u8>, MinioError> {
|
||||||
|
let args = GetObjectArgs::new(&self.bucket_name, path);
|
||||||
|
let object = self.client.get_object(&args).await?;
|
||||||
|
let data = object.bytes().await?;
|
||||||
|
Ok(data.to_vec())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Copies a file from the source path to the destination path.
|
||||||
|
pub async fn copy_file(&self, source_path: &str, destination_path: &str) -> Result<(), MinioError> {
|
||||||
|
let source_args = GetObjectArgs::new(&self.bucket_name, source_path);
|
||||||
|
let object = self.client.get_object(&source_args).await?;
|
||||||
|
let data = object.bytes().await?;
|
||||||
|
|
||||||
|
let destination_args = PutObjectArgs::new(&self.bucket_name, destination_path, Cursor::new(data.clone()), data.len() as u64);
|
||||||
|
self.client.put_object(&destination_args).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Moves a file from the source path to the destination path.
|
||||||
|
pub async fn move_file(&self, source_path: &str, destination_path: &str) -> Result<(), MinioError> {
|
||||||
|
self.copy_file(source_path, destination_path).await?;
|
||||||
|
self.delete_file(source_path).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deletes a file at the specified path.
|
||||||
|
pub async fn delete_file(&self, path: &str) -> Result<(), MinioError> {
|
||||||
|
let args = RemoveObjectArgs::new(&self.bucket_name, path);
|
||||||
|
self.client.remove_object(&args).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Lists all files in the specified path.
|
||||||
|
pub async fn list_files(&self, prefix: &str) -> Result<Vec<String>, MinioError> {
|
||||||
|
let args = ListObjectsArgs::new(&self.bucket_name).with_prefix(prefix);
|
||||||
|
let objects = self.client.list_objects(&args).await?;
|
||||||
|
let file_names = objects.into_iter().map(|obj| obj.name().to_string()).collect();
|
||||||
|
Ok(file_names)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Retrieves the contents of a file at the specified path.
|
||||||
|
pub async fn get_file_contents(&self, path: &str) -> Result<String, MinioError> {
|
||||||
|
let data = self.download_file(path).await?;
|
||||||
|
let contents = String::from_utf8(data).map_err(|_| MinioError::InvalidResponse)?;
|
||||||
|
Ok(contents)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a folder at the specified path.
|
||||||
|
pub async fn create_folder(&self, path: &str) -> Result<(), MinioError> {
|
||||||
|
let folder_path = if path.ends_with('/') {
|
||||||
|
path.to_string()
|
||||||
|
} else {
|
||||||
|
format!("{}/", path)
|
||||||
|
};
|
||||||
|
self.upload_file(&folder_path, vec![]).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Shares a folder at the specified path (placeholder implementation).
|
||||||
|
pub async fn share_folder(&self, path: &str) -> Result<String, MinioError> {
|
||||||
|
Ok(format!("Folder shared: {}", path))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Searches for files matching the query in the specified path.
|
||||||
|
pub async fn search_files(&self, prefix: &str, query: &str) -> Result<Vec<String>, MinioError> {
|
||||||
|
let files = self.list_files(prefix).await?;
|
||||||
|
let results = files.into_iter().filter(|f| f.contains(query)).collect();
|
||||||
|
Ok(results)
|
||||||
|
}
|
||||||
|
}
|
|
@ -10,7 +10,6 @@ gb-core = { path = "../gb-core" }
|
||||||
tokio= { workspace = true }
|
tokio= { workspace = true }
|
||||||
sqlx= { workspace = true }
|
sqlx= { workspace = true }
|
||||||
redis= { workspace = true }
|
redis= { workspace = true }
|
||||||
tikv-client= { workspace = true }
|
|
||||||
tracing= { workspace = true }
|
tracing= { workspace = true }
|
||||||
async-trait= { workspace = true }
|
async-trait= { workspace = true }
|
||||||
serde= { workspace = true }
|
serde= { workspace = true }
|
||||||
|
|
Loading…
Add table
Reference in a new issue