2025-11-22 12:26:16 -03:00
|
|
|
//! Drive Module - S3-based File Storage
|
|
|
|
|
//!
|
|
|
|
|
//! Provides file management operations using S3 as backend storage.
|
|
|
|
|
//! Supports bot storage and provides REST API endpoints for desktop frontend.
|
|
|
|
|
//!
|
|
|
|
|
//! API Endpoints:
|
|
|
|
|
//! - GET /files/list - List files and folders
|
|
|
|
|
//! - POST /files/read - Read file content
|
|
|
|
|
//! - POST /files/write - Write file content
|
|
|
|
|
//! - POST /files/delete - Delete file/folder
|
|
|
|
|
//! - POST /files/create-folder - Create new folder
|
2025-11-30 16:40:11 -03:00
|
|
|
//! - GET /files/versions - List file versions
|
|
|
|
|
//! - POST /files/restore - Restore file to specific version
|
2025-11-22 12:26:16 -03:00
|
|
|
|
2025-11-22 22:54:45 -03:00
|
|
|
#[cfg(feature = "console")]
|
2025-11-27 23:10:43 -03:00
|
|
|
use crate::console::file_tree::FileTree;
|
2025-11-27 08:34:24 -03:00
|
|
|
use crate::shared::state::AppState;
|
2025-11-22 12:26:16 -03:00
|
|
|
use axum::{
|
|
|
|
|
extract::{Query, State},
|
|
|
|
|
http::StatusCode,
|
|
|
|
|
response::Json,
|
|
|
|
|
routing::{get, post},
|
|
|
|
|
Router,
|
|
|
|
|
};
|
2025-11-27 13:53:16 -03:00
|
|
|
|
2025-11-21 09:28:35 -03:00
|
|
|
use serde::{Deserialize, Serialize};
|
2025-11-27 08:34:24 -03:00
|
|
|
// use serde_json::json; // Unused import
|
2025-11-21 09:28:35 -03:00
|
|
|
use std::sync::Arc;
|
|
|
|
|
|
2025-11-22 13:24:53 -03:00
|
|
|
pub mod document_processing;
|
2025-11-22 22:54:45 -03:00
|
|
|
pub mod drive_monitor;
|
2025-11-22 12:26:16 -03:00
|
|
|
pub mod vectordb;
|
|
|
|
|
|
2025-11-27 15:19:17 -03:00
|
|
|
// Note: Most functions are defined locally in this module
|
|
|
|
|
// The file module functions are not imported as they're either private or redefined here
|
|
|
|
|
|
2025-11-22 12:26:16 -03:00
|
|
|
// ===== Request/Response Structures =====
|
|
|
|
|
|
|
|
|
|
#[derive(Debug, Serialize, Deserialize)]
|
2025-11-21 09:28:35 -03:00
|
|
|
pub struct FileItem {
|
2025-11-22 12:26:16 -03:00
|
|
|
pub name: String,
|
|
|
|
|
pub path: String,
|
|
|
|
|
pub is_dir: bool,
|
|
|
|
|
pub size: Option<i64>,
|
|
|
|
|
pub modified: Option<String>,
|
|
|
|
|
pub icon: String,
|
2025-11-21 09:28:35 -03:00
|
|
|
}
|
|
|
|
|
|
2025-11-22 12:26:16 -03:00
|
|
|
#[derive(Debug, Deserialize)]
|
2025-11-21 09:28:35 -03:00
|
|
|
pub struct ListQuery {
|
2025-11-22 12:26:16 -03:00
|
|
|
pub path: Option<String>,
|
|
|
|
|
pub bucket: Option<String>,
|
2025-11-21 09:28:35 -03:00
|
|
|
}
|
|
|
|
|
|
2025-11-22 12:26:16 -03:00
|
|
|
#[derive(Debug, Deserialize)]
|
2025-11-21 09:28:35 -03:00
|
|
|
pub struct ReadRequest {
|
2025-11-22 12:26:16 -03:00
|
|
|
pub bucket: String,
|
|
|
|
|
pub path: String,
|
2025-11-21 09:28:35 -03:00
|
|
|
}
|
|
|
|
|
|
2025-11-22 12:26:16 -03:00
|
|
|
#[derive(Debug, Serialize)]
|
|
|
|
|
pub struct ReadResponse {
|
|
|
|
|
pub content: String,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[derive(Debug, Deserialize)]
|
2025-11-21 09:28:35 -03:00
|
|
|
pub struct WriteRequest {
|
2025-11-22 12:26:16 -03:00
|
|
|
pub bucket: String,
|
|
|
|
|
pub path: String,
|
|
|
|
|
pub content: String,
|
2025-11-21 09:28:35 -03:00
|
|
|
}
|
|
|
|
|
|
2025-11-22 12:26:16 -03:00
|
|
|
#[derive(Debug, Deserialize)]
|
2025-11-21 09:28:35 -03:00
|
|
|
pub struct DeleteRequest {
|
2025-11-22 12:26:16 -03:00
|
|
|
pub bucket: String,
|
|
|
|
|
pub path: String,
|
2025-11-21 09:28:35 -03:00
|
|
|
}
|
|
|
|
|
|
2025-11-22 12:26:16 -03:00
|
|
|
#[derive(Debug, Deserialize)]
|
2025-11-21 09:28:35 -03:00
|
|
|
pub struct CreateFolderRequest {
|
2025-11-22 12:26:16 -03:00
|
|
|
pub bucket: String,
|
|
|
|
|
pub path: String,
|
|
|
|
|
pub name: String,
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-22 13:24:53 -03:00
|
|
|
#[derive(Debug, Deserialize)]
|
|
|
|
|
pub struct CopyRequest {
|
|
|
|
|
pub source_bucket: String,
|
|
|
|
|
pub source_path: String,
|
|
|
|
|
pub dest_bucket: String,
|
|
|
|
|
pub dest_path: String,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[derive(Debug, Deserialize)]
|
|
|
|
|
pub struct MoveRequest {
|
|
|
|
|
pub source_bucket: String,
|
|
|
|
|
pub source_path: String,
|
|
|
|
|
pub dest_bucket: String,
|
|
|
|
|
pub dest_path: String,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[derive(Debug, Deserialize)]
|
|
|
|
|
pub struct DownloadRequest {
|
|
|
|
|
pub bucket: String,
|
|
|
|
|
pub path: String,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[derive(Debug, Deserialize)]
|
|
|
|
|
pub struct SearchQuery {
|
|
|
|
|
pub bucket: Option<String>,
|
|
|
|
|
pub query: String,
|
|
|
|
|
pub file_type: Option<String>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[derive(Debug, Deserialize)]
|
|
|
|
|
pub struct ShareRequest {
|
2025-11-27 23:10:43 -03:00
|
|
|
pub _bucket: String,
|
|
|
|
|
pub _path: String,
|
|
|
|
|
pub _users: Vec<String>,
|
|
|
|
|
pub _permissions: String,
|
2025-11-22 13:24:53 -03:00
|
|
|
}
|
|
|
|
|
|
2025-11-22 12:26:16 -03:00
|
|
|
#[derive(Debug, Serialize)]
|
|
|
|
|
pub struct SuccessResponse {
|
|
|
|
|
pub success: bool,
|
|
|
|
|
pub message: Option<String>,
|
2025-11-21 09:28:35 -03:00
|
|
|
}
|
|
|
|
|
|
2025-11-22 13:24:53 -03:00
|
|
|
#[derive(Debug, Serialize)]
|
|
|
|
|
pub struct QuotaResponse {
|
|
|
|
|
pub total_bytes: i64,
|
|
|
|
|
pub used_bytes: i64,
|
|
|
|
|
pub available_bytes: i64,
|
|
|
|
|
pub percentage_used: f64,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[derive(Debug, Serialize)]
|
|
|
|
|
pub struct ShareResponse {
|
|
|
|
|
pub share_id: String,
|
|
|
|
|
pub url: String,
|
|
|
|
|
pub expires_at: Option<String>,
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-05 06:50:45 -03:00
|
|
|
/// Sync status for desktop file synchronization
|
|
|
|
|
///
|
|
|
|
|
/// Desktop-only: These endpoints coordinate with the rclone process
|
|
|
|
|
/// running on the user's machine via the Tauri desktop app (botapp).
|
|
|
|
|
/// Web-only users see stub responses as sync requires local filesystem access.
|
2025-11-22 13:24:53 -03:00
|
|
|
#[derive(Debug, Serialize)]
|
|
|
|
|
pub struct SyncStatus {
|
|
|
|
|
pub status: String,
|
|
|
|
|
pub last_sync: Option<String>,
|
|
|
|
|
pub files_synced: i64,
|
|
|
|
|
pub bytes_synced: i64,
|
2025-12-05 06:50:45 -03:00
|
|
|
pub is_desktop: bool,
|
|
|
|
|
pub message: Option<String>,
|
2025-11-22 13:24:53 -03:00
|
|
|
}
|
|
|
|
|
|
2025-11-30 16:40:11 -03:00
|
|
|
// ===== File Versioning Structures =====
|
|
|
|
|
|
|
|
|
|
#[derive(Debug, Deserialize)]
|
|
|
|
|
pub struct VersionsQuery {
|
|
|
|
|
pub bucket: Option<String>,
|
|
|
|
|
pub path: String,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[derive(Debug, Serialize)]
|
|
|
|
|
pub struct FileVersion {
|
|
|
|
|
pub version_id: String,
|
|
|
|
|
pub modified: String,
|
|
|
|
|
pub size: i64,
|
|
|
|
|
pub is_latest: bool,
|
|
|
|
|
pub etag: Option<String>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[derive(Debug, Serialize)]
|
|
|
|
|
pub struct VersionsResponse {
|
|
|
|
|
pub path: String,
|
|
|
|
|
pub versions: Vec<FileVersion>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[derive(Debug, Deserialize)]
|
|
|
|
|
pub struct RestoreRequest {
|
|
|
|
|
pub bucket: Option<String>,
|
|
|
|
|
pub path: String,
|
|
|
|
|
pub version_id: String,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[derive(Debug, Serialize)]
|
|
|
|
|
pub struct RestoreResponse {
|
|
|
|
|
pub success: bool,
|
|
|
|
|
pub message: String,
|
|
|
|
|
pub restored_version: String,
|
|
|
|
|
pub new_version_id: Option<String>,
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-22 12:26:16 -03:00
|
|
|
// ===== API Configuration =====
|
2025-11-21 09:28:35 -03:00
|
|
|
|
2025-11-22 12:26:16 -03:00
|
|
|
/// Configure drive API routes
|
2025-11-27 23:10:43 -03:00
|
|
|
#[allow(unused)]
|
2025-11-22 12:26:16 -03:00
|
|
|
pub fn configure() -> Router<Arc<AppState>> {
|
|
|
|
|
Router::new()
|
2025-11-22 13:24:53 -03:00
|
|
|
// Basic file operations
|
2025-11-22 12:26:16 -03:00
|
|
|
.route("/files/list", get(list_files))
|
|
|
|
|
.route("/files/read", post(read_file))
|
|
|
|
|
.route("/files/write", post(write_file))
|
2025-11-22 13:24:53 -03:00
|
|
|
.route("/files/save", post(write_file))
|
|
|
|
|
.route("/files/getContents", post(read_file))
|
2025-11-22 12:26:16 -03:00
|
|
|
.route("/files/delete", post(delete_file))
|
2025-11-22 13:24:53 -03:00
|
|
|
.route("/files/upload", post(upload_file_to_drive))
|
|
|
|
|
.route("/files/download", post(download_file))
|
|
|
|
|
// File management
|
|
|
|
|
.route("/files/copy", post(copy_file))
|
|
|
|
|
.route("/files/move", post(move_file))
|
|
|
|
|
.route("/files/createFolder", post(create_folder))
|
2025-11-22 12:26:16 -03:00
|
|
|
.route("/files/create-folder", post(create_folder))
|
2025-11-22 13:24:53 -03:00
|
|
|
.route("/files/dirFolder", post(list_folder_contents))
|
|
|
|
|
// Search and discovery
|
|
|
|
|
.route("/files/search", get(search_files))
|
|
|
|
|
.route("/files/recent", get(recent_files))
|
|
|
|
|
.route("/files/favorite", get(list_favorites))
|
|
|
|
|
// Sharing and permissions
|
|
|
|
|
.route("/files/shareFolder", post(share_folder))
|
|
|
|
|
.route("/files/shared", get(list_shared))
|
|
|
|
|
.route("/files/permissions", get(get_permissions))
|
|
|
|
|
// Storage management
|
|
|
|
|
.route("/files/quota", get(get_quota))
|
|
|
|
|
// Sync operations
|
|
|
|
|
.route("/files/sync/status", get(sync_status))
|
|
|
|
|
.route("/files/sync/start", post(start_sync))
|
|
|
|
|
.route("/files/sync/stop", post(stop_sync))
|
2025-11-30 16:40:11 -03:00
|
|
|
// File versioning
|
|
|
|
|
.route("/files/versions", get(list_versions))
|
|
|
|
|
.route("/files/restore", post(restore_version))
|
2025-11-22 13:24:53 -03:00
|
|
|
// Document processing
|
|
|
|
|
.route("/docs/merge", post(document_processing::merge_documents))
|
|
|
|
|
.route("/docs/convert", post(document_processing::convert_document))
|
|
|
|
|
.route("/docs/fill", post(document_processing::fill_document))
|
|
|
|
|
.route("/docs/export", post(document_processing::export_document))
|
|
|
|
|
.route("/docs/import", post(document_processing::import_document))
|
2025-11-22 12:26:16 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ===== API Handlers =====
|
|
|
|
|
|
|
|
|
|
/// GET /files/list - List files and folders in S3 bucket
|
|
|
|
|
pub async fn list_files(
|
|
|
|
|
State(state): State<Arc<AppState>>,
|
|
|
|
|
Query(params): Query<ListQuery>,
|
|
|
|
|
) -> Result<Json<Vec<FileItem>>, (StatusCode, Json<serde_json::Value>)> {
|
2025-11-22 22:54:45 -03:00
|
|
|
// Use FileTree for hierarchical navigation when console feature is enabled
|
|
|
|
|
#[cfg(feature = "console")]
|
|
|
|
|
let result = {
|
|
|
|
|
let mut tree = FileTree::new(state.clone());
|
|
|
|
|
if let Some(bucket) = ¶ms.bucket {
|
|
|
|
|
if let Some(path) = ¶ms.path {
|
Add .env.example with comprehensive configuration template
The commit adds a complete example environment configuration file
documenting all available settings for BotServer, including logging,
database, server, drive, LLM, Redis, email, and feature flags.
Also removes hardcoded environment variable usage throughout the
codebase, replacing them with configuration via config.csv or
appropriate defaults. This includes:
- WhatsApp, Teams, Instagram adapter configurations
- Weather API key handling
- Email and directory service configurations
- Console feature conditionally compiles monitoring code
- Improved logging configuration with library suppression
2025-11-28 13:19:03 -03:00
|
|
|
tree.enter_folder(bucket.clone(), path.clone()).await.ok();
|
2025-11-22 22:54:45 -03:00
|
|
|
} else {
|
Add .env.example with comprehensive configuration template
The commit adds a complete example environment configuration file
documenting all available settings for BotServer, including logging,
database, server, drive, LLM, Redis, email, and feature flags.
Also removes hardcoded environment variable usage throughout the
codebase, replacing them with configuration via config.csv or
appropriate defaults. This includes:
- WhatsApp, Teams, Instagram adapter configurations
- Weather API key handling
- Email and directory service configurations
- Console feature conditionally compiles monitoring code
- Improved logging configuration with library suppression
2025-11-28 13:19:03 -03:00
|
|
|
tree.enter_bucket(bucket.clone()).await.ok();
|
2025-11-22 22:54:45 -03:00
|
|
|
}
|
2025-11-21 09:28:35 -03:00
|
|
|
} else {
|
Add .env.example with comprehensive configuration template
The commit adds a complete example environment configuration file
documenting all available settings for BotServer, including logging,
database, server, drive, LLM, Redis, email, and feature flags.
Also removes hardcoded environment variable usage throughout the
codebase, replacing them with configuration via config.csv or
appropriate defaults. This includes:
- WhatsApp, Teams, Instagram adapter configurations
- Weather API key handling
- Email and directory service configurations
- Console feature conditionally compiles monitoring code
- Improved logging configuration with library suppression
2025-11-28 13:19:03 -03:00
|
|
|
tree.load_root().await.ok();
|
2025-11-21 09:28:35 -03:00
|
|
|
}
|
Add .env.example with comprehensive configuration template
The commit adds a complete example environment configuration file
documenting all available settings for BotServer, including logging,
database, server, drive, LLM, Redis, email, and feature flags.
Also removes hardcoded environment variable usage throughout the
codebase, replacing them with configuration via config.csv or
appropriate defaults. This includes:
- WhatsApp, Teams, Instagram adapter configurations
- Weather API key handling
- Email and directory service configurations
- Console feature conditionally compiles monitoring code
- Improved logging configuration with library suppression
2025-11-28 13:19:03 -03:00
|
|
|
|
|
|
|
|
// Convert FileTree items to FileItem format
|
|
|
|
|
Ok::<Vec<FileItem>, (StatusCode, Json<serde_json::Value>)>(vec![])
|
2025-11-21 09:28:35 -03:00
|
|
|
};
|
|
|
|
|
|
2025-11-22 22:54:45 -03:00
|
|
|
#[cfg(not(feature = "console"))]
|
|
|
|
|
let result: Result<Vec<FileItem>, (StatusCode, Json<serde_json::Value>)> = {
|
|
|
|
|
// Fallback implementation without FileTree
|
2025-11-27 08:34:24 -03:00
|
|
|
let s3_client = state.drive.as_ref().ok_or_else(|| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
|
Json(serde_json::json!({"error": "S3 client not configured"})),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
2025-11-21 09:28:35 -03:00
|
|
|
|
2025-11-22 22:54:45 -03:00
|
|
|
if let Some(bucket) = ¶ms.bucket {
|
|
|
|
|
let mut items = Vec::new();
|
|
|
|
|
let prefix = params.path.as_deref().unwrap_or("");
|
|
|
|
|
|
2025-11-27 08:34:24 -03:00
|
|
|
let paginator = s3_client
|
2025-11-22 22:54:45 -03:00
|
|
|
.list_objects_v2()
|
|
|
|
|
.bucket(bucket)
|
|
|
|
|
.prefix(prefix)
|
|
|
|
|
.delimiter("/")
|
|
|
|
|
.into_paginator()
|
|
|
|
|
.send();
|
|
|
|
|
|
|
|
|
|
let mut stream = paginator;
|
|
|
|
|
while let Some(result) = stream.try_next().await.map_err(|e| {
|
2025-11-27 08:34:24 -03:00
|
|
|
(
|
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
|
Json(serde_json::json!({"error": e.to_string()})),
|
|
|
|
|
)
|
2025-11-22 22:54:45 -03:00
|
|
|
})? {
|
|
|
|
|
// Add directories
|
|
|
|
|
if let Some(prefixes) = result.common_prefixes {
|
|
|
|
|
for prefix in prefixes {
|
|
|
|
|
if let Some(dir) = prefix.prefix {
|
2025-11-27 08:34:24 -03:00
|
|
|
let name = dir
|
|
|
|
|
.trim_end_matches('/')
|
|
|
|
|
.split('/')
|
|
|
|
|
.last()
|
|
|
|
|
.unwrap_or(&dir)
|
|
|
|
|
.to_string();
|
2025-11-22 22:54:45 -03:00
|
|
|
items.push(FileItem {
|
|
|
|
|
name,
|
|
|
|
|
path: dir.clone(),
|
|
|
|
|
is_dir: true,
|
|
|
|
|
size: None,
|
|
|
|
|
modified: None,
|
|
|
|
|
icon: get_file_icon(&dir),
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-11-21 09:28:35 -03:00
|
|
|
}
|
2025-11-22 22:54:45 -03:00
|
|
|
|
|
|
|
|
// Add files
|
|
|
|
|
if let Some(contents) = result.contents {
|
|
|
|
|
for object in contents {
|
|
|
|
|
if let Some(key) = object.key {
|
|
|
|
|
if !key.ends_with('/') {
|
|
|
|
|
let name = key.split('/').last().unwrap_or(&key).to_string();
|
|
|
|
|
items.push(FileItem {
|
|
|
|
|
name,
|
|
|
|
|
path: key.clone(),
|
|
|
|
|
is_dir: false,
|
|
|
|
|
size: object.size.map(|s| s as i64),
|
|
|
|
|
modified: object.last_modified.map(|t| t.to_string()),
|
|
|
|
|
icon: get_file_icon(&key),
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-11-21 09:28:35 -03:00
|
|
|
}
|
|
|
|
|
}
|
2025-11-22 22:54:45 -03:00
|
|
|
Ok(items)
|
|
|
|
|
} else {
|
|
|
|
|
Ok(vec![])
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
match result {
|
|
|
|
|
Ok(items) => Ok(Json(items)),
|
2025-11-27 08:34:24 -03:00
|
|
|
Err(e) => Err(e),
|
2025-11-22 22:54:45 -03:00
|
|
|
}
|
|
|
|
|
}
|
2025-11-21 09:28:35 -03:00
|
|
|
|
2025-11-22 22:54:45 -03:00
|
|
|
#[cfg(feature = "console")]
|
2025-12-02 21:09:43 -03:00
|
|
|
/// Convert a FileTree to a list of FileItems for display in the console UI
|
|
|
|
|
#[allow(dead_code)]
|
|
|
|
|
pub fn convert_tree_to_items(tree: &FileTree) -> Vec<FileItem> {
|
|
|
|
|
let mut items = Vec::new();
|
|
|
|
|
|
|
|
|
|
for (display_name, node) in tree.get_items() {
|
|
|
|
|
match node {
|
|
|
|
|
crate::console::file_tree::TreeNode::Bucket { name } => {
|
|
|
|
|
if !name.is_empty() {
|
|
|
|
|
items.push(FileItem {
|
|
|
|
|
name: display_name.clone(),
|
|
|
|
|
path: format!("/{}", name),
|
|
|
|
|
is_dir: true,
|
|
|
|
|
size: None,
|
|
|
|
|
modified: None,
|
|
|
|
|
icon: if name.ends_with(".gbai") {
|
|
|
|
|
"🤖".to_string()
|
|
|
|
|
} else {
|
|
|
|
|
"📦".to_string()
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
crate::console::file_tree::TreeNode::Folder { bucket, path } => {
|
|
|
|
|
let folder_name = path.split('/').last().unwrap_or(&display_name);
|
|
|
|
|
items.push(FileItem {
|
|
|
|
|
name: folder_name.to_string(),
|
|
|
|
|
path: format!("/{}/{}", bucket, path),
|
|
|
|
|
is_dir: true,
|
|
|
|
|
size: None,
|
|
|
|
|
modified: None,
|
|
|
|
|
icon: "📁".to_string(),
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
crate::console::file_tree::TreeNode::File { bucket, path } => {
|
|
|
|
|
let file_name = path.split('/').last().unwrap_or(&display_name);
|
|
|
|
|
items.push(FileItem {
|
|
|
|
|
name: file_name.to_string(),
|
|
|
|
|
path: format!("/{}/{}", bucket, path),
|
|
|
|
|
is_dir: false,
|
|
|
|
|
size: None,
|
|
|
|
|
modified: None,
|
|
|
|
|
icon: "📄".to_string(),
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
items
|
2025-11-21 09:28:35 -03:00
|
|
|
}
|
|
|
|
|
|
2025-11-22 12:26:16 -03:00
|
|
|
/// POST /files/read - Read file content from S3
|
|
|
|
|
pub async fn read_file(
|
|
|
|
|
State(state): State<Arc<AppState>>,
|
|
|
|
|
Json(req): Json<ReadRequest>,
|
|
|
|
|
) -> Result<Json<ReadResponse>, (StatusCode, Json<serde_json::Value>)> {
|
|
|
|
|
let s3_client = state.drive.as_ref().ok_or_else(|| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::SERVICE_UNAVAILABLE,
|
|
|
|
|
Json(serde_json::json!({ "error": "S3 service not available" })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
let result = s3_client
|
|
|
|
|
.get_object()
|
|
|
|
|
.bucket(&req.bucket)
|
|
|
|
|
.key(&req.path)
|
|
|
|
|
.send()
|
|
|
|
|
.await
|
|
|
|
|
.map_err(|e| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
|
Json(serde_json::json!({ "error": format!("Failed to read file: {}", e) })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
let bytes = result
|
|
|
|
|
.body
|
|
|
|
|
.collect()
|
|
|
|
|
.await
|
|
|
|
|
.map_err(|e| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
|
Json(serde_json::json!({ "error": format!("Failed to read file body: {}", e) })),
|
|
|
|
|
)
|
|
|
|
|
})?
|
|
|
|
|
.into_bytes();
|
|
|
|
|
|
|
|
|
|
let content = String::from_utf8(bytes.to_vec()).map_err(|e| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
|
Json(serde_json::json!({ "error": format!("File is not valid UTF-8: {}", e) })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
Ok(Json(ReadResponse { content }))
|
2025-11-21 09:28:35 -03:00
|
|
|
}
|
|
|
|
|
|
2025-11-22 12:26:16 -03:00
|
|
|
/// POST /files/write - Write file content to S3
|
|
|
|
|
pub async fn write_file(
|
|
|
|
|
State(state): State<Arc<AppState>>,
|
|
|
|
|
Json(req): Json<WriteRequest>,
|
|
|
|
|
) -> Result<Json<SuccessResponse>, (StatusCode, Json<serde_json::Value>)> {
|
|
|
|
|
let s3_client = state.drive.as_ref().ok_or_else(|| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::SERVICE_UNAVAILABLE,
|
|
|
|
|
Json(serde_json::json!({ "error": "S3 service not available" })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
s3_client
|
|
|
|
|
.put_object()
|
|
|
|
|
.bucket(&req.bucket)
|
|
|
|
|
.key(&req.path)
|
|
|
|
|
.body(req.content.into_bytes().into())
|
|
|
|
|
.send()
|
|
|
|
|
.await
|
|
|
|
|
.map_err(|e| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
|
Json(serde_json::json!({ "error": format!("Failed to write file: {}", e) })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
Ok(Json(SuccessResponse {
|
|
|
|
|
success: true,
|
|
|
|
|
message: Some("File written successfully".to_string()),
|
|
|
|
|
}))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// POST /files/delete - Delete file or folder from S3
|
|
|
|
|
pub async fn delete_file(
|
|
|
|
|
State(state): State<Arc<AppState>>,
|
|
|
|
|
Json(req): Json<DeleteRequest>,
|
|
|
|
|
) -> Result<Json<SuccessResponse>, (StatusCode, Json<serde_json::Value>)> {
|
|
|
|
|
let s3_client = state.drive.as_ref().ok_or_else(|| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::SERVICE_UNAVAILABLE,
|
|
|
|
|
Json(serde_json::json!({ "error": "S3 service not available" })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
// If path ends with /, it's a folder - delete all objects with this prefix
|
|
|
|
|
if req.path.ends_with('/') {
|
|
|
|
|
let result = s3_client
|
|
|
|
|
.list_objects_v2()
|
2025-11-21 09:28:35 -03:00
|
|
|
.bucket(&req.bucket)
|
2025-11-22 12:26:16 -03:00
|
|
|
.prefix(&req.path)
|
2025-11-21 09:28:35 -03:00
|
|
|
.send()
|
|
|
|
|
.await
|
2025-11-22 12:26:16 -03:00
|
|
|
.map_err(|e| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
|
Json(serde_json::json!({ "error": format!("Failed to list objects for deletion: {}", e) })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
for obj in result.contents() {
|
|
|
|
|
if let Some(key) = obj.key() {
|
|
|
|
|
s3_client
|
|
|
|
|
.delete_object()
|
|
|
|
|
.bucket(&req.bucket)
|
|
|
|
|
.key(key)
|
|
|
|
|
.send()
|
|
|
|
|
.await
|
|
|
|
|
.map_err(|e| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
|
Json(serde_json::json!({ "error": format!("Failed to delete object: {}", e) })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
}
|
2025-11-21 09:28:35 -03:00
|
|
|
}
|
|
|
|
|
} else {
|
2025-11-22 12:26:16 -03:00
|
|
|
s3_client
|
2025-11-21 09:28:35 -03:00
|
|
|
.delete_object()
|
|
|
|
|
.bucket(&req.bucket)
|
|
|
|
|
.key(&req.path)
|
|
|
|
|
.send()
|
|
|
|
|
.await
|
2025-11-22 12:26:16 -03:00
|
|
|
.map_err(|e| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
|
Json(serde_json::json!({ "error": format!("Failed to delete file: {}", e) })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
2025-11-21 09:28:35 -03:00
|
|
|
}
|
2025-11-22 12:26:16 -03:00
|
|
|
|
|
|
|
|
Ok(Json(SuccessResponse {
|
|
|
|
|
success: true,
|
|
|
|
|
message: Some("Deleted successfully".to_string()),
|
|
|
|
|
}))
|
2025-11-21 09:28:35 -03:00
|
|
|
}
|
|
|
|
|
|
2025-11-22 12:26:16 -03:00
|
|
|
/// POST /files/create-folder - Create new folder in S3
|
|
|
|
|
pub async fn create_folder(
|
|
|
|
|
State(state): State<Arc<AppState>>,
|
|
|
|
|
Json(req): Json<CreateFolderRequest>,
|
|
|
|
|
) -> Result<Json<SuccessResponse>, (StatusCode, Json<serde_json::Value>)> {
|
|
|
|
|
let s3_client = state.drive.as_ref().ok_or_else(|| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::SERVICE_UNAVAILABLE,
|
|
|
|
|
Json(serde_json::json!({ "error": "S3 service not available" })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
2025-11-21 09:28:35 -03:00
|
|
|
|
2025-11-22 12:26:16 -03:00
|
|
|
// S3 doesn't have real folders, create an empty object with trailing /
|
|
|
|
|
let folder_path = if req.path.is_empty() || req.path == "/" {
|
|
|
|
|
format!("{}/", req.name)
|
2025-11-21 09:28:35 -03:00
|
|
|
} else {
|
2025-11-22 12:26:16 -03:00
|
|
|
format!("{}/{}/", req.path.trim_end_matches('/'), req.name)
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
s3_client
|
|
|
|
|
.put_object()
|
|
|
|
|
.bucket(&req.bucket)
|
|
|
|
|
.key(&folder_path)
|
|
|
|
|
.body(Vec::new().into())
|
|
|
|
|
.send()
|
|
|
|
|
.await
|
|
|
|
|
.map_err(|e| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
|
Json(serde_json::json!({ "error": format!("Failed to create folder: {}", e) })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
Ok(Json(SuccessResponse {
|
|
|
|
|
success: true,
|
|
|
|
|
message: Some("Folder created successfully".to_string()),
|
|
|
|
|
}))
|
2025-11-21 09:28:35 -03:00
|
|
|
}
|
|
|
|
|
|
2025-11-22 12:26:16 -03:00
|
|
|
// ===== Helper Functions =====
|
|
|
|
|
|
|
|
|
|
/// Get appropriate icon for file based on extension
|
|
|
|
|
fn get_file_icon(path: &str) -> String {
|
|
|
|
|
if path.ends_with(".bas") {
|
|
|
|
|
"⚙️".to_string()
|
|
|
|
|
} else if path.ends_with(".ast") {
|
|
|
|
|
"🔧".to_string()
|
|
|
|
|
} else if path.ends_with(".csv") {
|
|
|
|
|
"📊".to_string()
|
|
|
|
|
} else if path.ends_with(".gbkb") {
|
|
|
|
|
"📚".to_string()
|
|
|
|
|
} else if path.ends_with(".json") {
|
|
|
|
|
"🔖".to_string()
|
|
|
|
|
} else if path.ends_with(".txt") || path.ends_with(".md") {
|
|
|
|
|
"📃".to_string()
|
|
|
|
|
} else if path.ends_with(".pdf") {
|
|
|
|
|
"📕".to_string()
|
|
|
|
|
} else if path.ends_with(".zip") || path.ends_with(".tar") || path.ends_with(".gz") {
|
|
|
|
|
"📦".to_string()
|
|
|
|
|
} else if path.ends_with(".jpg") || path.ends_with(".png") || path.ends_with(".gif") {
|
|
|
|
|
"🖼️".to_string()
|
|
|
|
|
} else {
|
|
|
|
|
"📄".to_string()
|
|
|
|
|
}
|
2025-11-21 09:28:35 -03:00
|
|
|
}
|
2025-11-22 13:24:53 -03:00
|
|
|
|
|
|
|
|
// ===== Extended File Operations =====
|
|
|
|
|
|
|
|
|
|
/// POST /files/copy - Copy file or folder within S3
|
|
|
|
|
pub async fn copy_file(
|
|
|
|
|
State(state): State<Arc<AppState>>,
|
|
|
|
|
Json(req): Json<CopyRequest>,
|
|
|
|
|
) -> Result<Json<SuccessResponse>, (StatusCode, Json<serde_json::Value>)> {
|
|
|
|
|
let s3_client = state.drive.as_ref().ok_or_else(|| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::SERVICE_UNAVAILABLE,
|
|
|
|
|
Json(serde_json::json!({ "error": "S3 service not available" })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
let copy_source = format!("{}/{}", req.source_bucket, req.source_path);
|
|
|
|
|
|
|
|
|
|
s3_client
|
|
|
|
|
.copy_object()
|
|
|
|
|
.copy_source(©_source)
|
|
|
|
|
.bucket(&req.dest_bucket)
|
|
|
|
|
.key(&req.dest_path)
|
|
|
|
|
.send()
|
|
|
|
|
.await
|
|
|
|
|
.map_err(|e| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
|
Json(serde_json::json!({ "error": format!("Failed to copy file: {}", e) })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
Ok(Json(SuccessResponse {
|
|
|
|
|
success: true,
|
|
|
|
|
message: Some("File copied successfully".to_string()),
|
|
|
|
|
}))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// POST /files/move - Move file or folder within S3
|
|
|
|
|
pub async fn move_file(
|
|
|
|
|
State(state): State<Arc<AppState>>,
|
|
|
|
|
Json(req): Json<MoveRequest>,
|
|
|
|
|
) -> Result<Json<SuccessResponse>, (StatusCode, Json<serde_json::Value>)> {
|
|
|
|
|
let s3_client = state.drive.as_ref().ok_or_else(|| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::SERVICE_UNAVAILABLE,
|
|
|
|
|
Json(serde_json::json!({ "error": "S3 service not available" })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
let copy_source = format!("{}/{}", req.source_bucket, req.source_path);
|
|
|
|
|
|
|
|
|
|
s3_client
|
|
|
|
|
.copy_object()
|
|
|
|
|
.copy_source(©_source)
|
|
|
|
|
.bucket(&req.dest_bucket)
|
|
|
|
|
.key(&req.dest_path)
|
|
|
|
|
.send()
|
|
|
|
|
.await
|
|
|
|
|
.map_err(|e| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
|
Json(serde_json::json!({ "error": format!("Failed to move file: {}", e) })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
s3_client
|
|
|
|
|
.delete_object()
|
|
|
|
|
.bucket(&req.source_bucket)
|
|
|
|
|
.key(&req.source_path)
|
|
|
|
|
.send()
|
|
|
|
|
.await
|
|
|
|
|
.map_err(|e| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
|
Json(
|
|
|
|
|
serde_json::json!({ "error": format!("Failed to delete source file: {}", e) }),
|
|
|
|
|
),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
Ok(Json(SuccessResponse {
|
|
|
|
|
success: true,
|
|
|
|
|
message: Some("File moved successfully".to_string()),
|
|
|
|
|
}))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// POST /files/upload - Upload file to S3
|
|
|
|
|
pub async fn upload_file_to_drive(
|
|
|
|
|
State(state): State<Arc<AppState>>,
|
|
|
|
|
Json(req): Json<WriteRequest>,
|
|
|
|
|
) -> Result<Json<SuccessResponse>, (StatusCode, Json<serde_json::Value>)> {
|
|
|
|
|
write_file(State(state), Json(req)).await
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// POST /files/download - Download file from S3
|
|
|
|
|
pub async fn download_file(
|
|
|
|
|
State(state): State<Arc<AppState>>,
|
|
|
|
|
Json(req): Json<DownloadRequest>,
|
|
|
|
|
) -> Result<Json<ReadResponse>, (StatusCode, Json<serde_json::Value>)> {
|
|
|
|
|
read_file(
|
|
|
|
|
State(state),
|
|
|
|
|
Json(ReadRequest {
|
|
|
|
|
bucket: req.bucket,
|
|
|
|
|
path: req.path,
|
|
|
|
|
}),
|
|
|
|
|
)
|
|
|
|
|
.await
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// POST /files/dirFolder - List folder contents
|
|
|
|
|
pub async fn list_folder_contents(
|
|
|
|
|
State(state): State<Arc<AppState>>,
|
|
|
|
|
Json(req): Json<ReadRequest>,
|
|
|
|
|
) -> Result<Json<Vec<FileItem>>, (StatusCode, Json<serde_json::Value>)> {
|
|
|
|
|
list_files(
|
|
|
|
|
State(state),
|
|
|
|
|
Query(ListQuery {
|
|
|
|
|
path: Some(req.path),
|
|
|
|
|
bucket: Some(req.bucket),
|
|
|
|
|
}),
|
|
|
|
|
)
|
|
|
|
|
.await
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// GET /files/search - Search for files
|
|
|
|
|
pub async fn search_files(
|
|
|
|
|
State(state): State<Arc<AppState>>,
|
|
|
|
|
Query(params): Query<SearchQuery>,
|
|
|
|
|
) -> Result<Json<Vec<FileItem>>, (StatusCode, Json<serde_json::Value>)> {
|
|
|
|
|
let s3_client = state.drive.as_ref().ok_or_else(|| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::SERVICE_UNAVAILABLE,
|
|
|
|
|
Json(serde_json::json!({ "error": "S3 service not available" })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
let mut all_items = Vec::new();
|
2025-11-27 23:10:43 -03:00
|
|
|
let buckets = if let Some(bucket) = params.bucket.as_ref() {
|
2025-11-22 13:24:53 -03:00
|
|
|
vec![bucket.clone()]
|
|
|
|
|
} else {
|
|
|
|
|
let result = s3_client.list_buckets().send().await.map_err(|e| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
|
Json(serde_json::json!({ "error": format!("Failed to list buckets: {}", e) })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
result
|
|
|
|
|
.buckets()
|
|
|
|
|
.iter()
|
|
|
|
|
.filter_map(|b| b.name().map(String::from))
|
|
|
|
|
.collect()
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
for bucket in buckets {
|
|
|
|
|
let result = s3_client
|
|
|
|
|
.list_objects_v2()
|
|
|
|
|
.bucket(&bucket)
|
|
|
|
|
.send()
|
|
|
|
|
.await
|
|
|
|
|
.map_err(|e| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
|
Json(serde_json::json!({ "error": format!("Failed to list objects: {}", e) })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
for obj in result.contents() {
|
|
|
|
|
if let Some(key) = obj.key() {
|
|
|
|
|
let name = key.split('/').last().unwrap_or(key).to_lowercase();
|
|
|
|
|
let query_lower = params.query.to_lowercase();
|
|
|
|
|
|
|
|
|
|
if name.contains(&query_lower) {
|
|
|
|
|
if let Some(file_type) = ¶ms.file_type {
|
|
|
|
|
if key.ends_with(file_type) {
|
|
|
|
|
all_items.push(FileItem {
|
|
|
|
|
name: name.to_string(),
|
|
|
|
|
path: key.to_string(),
|
|
|
|
|
is_dir: false,
|
|
|
|
|
size: obj.size(),
|
|
|
|
|
modified: obj.last_modified().map(|t| t.to_string()),
|
|
|
|
|
icon: get_file_icon(key),
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
all_items.push(FileItem {
|
|
|
|
|
name: name.to_string(),
|
|
|
|
|
path: key.to_string(),
|
|
|
|
|
is_dir: false,
|
|
|
|
|
size: obj.size(),
|
|
|
|
|
modified: obj.last_modified().map(|t| t.to_string()),
|
|
|
|
|
icon: get_file_icon(key),
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(Json(all_items))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// GET /files/recent - Get recently modified files
|
|
|
|
|
pub async fn recent_files(
|
|
|
|
|
State(state): State<Arc<AppState>>,
|
|
|
|
|
Query(params): Query<ListQuery>,
|
|
|
|
|
) -> Result<Json<Vec<FileItem>>, (StatusCode, Json<serde_json::Value>)> {
|
|
|
|
|
let s3_client = state.drive.as_ref().ok_or_else(|| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::SERVICE_UNAVAILABLE,
|
|
|
|
|
Json(serde_json::json!({ "error": "S3 service not available" })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
let mut all_items = Vec::new();
|
|
|
|
|
let buckets = if let Some(bucket) = ¶ms.bucket {
|
|
|
|
|
vec![bucket.clone()]
|
|
|
|
|
} else {
|
|
|
|
|
let result = s3_client.list_buckets().send().await.map_err(|e| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
|
Json(serde_json::json!({ "error": format!("Failed to list buckets: {}", e) })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
result
|
|
|
|
|
.buckets()
|
|
|
|
|
.iter()
|
|
|
|
|
.filter_map(|b| b.name().map(String::from))
|
|
|
|
|
.collect()
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
for bucket in buckets {
|
|
|
|
|
let result = s3_client
|
|
|
|
|
.list_objects_v2()
|
|
|
|
|
.bucket(&bucket)
|
|
|
|
|
.send()
|
|
|
|
|
.await
|
|
|
|
|
.map_err(|e| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
|
Json(serde_json::json!({ "error": format!("Failed to list objects: {}", e) })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
for obj in result.contents() {
|
|
|
|
|
if let Some(key) = obj.key() {
|
|
|
|
|
all_items.push(FileItem {
|
|
|
|
|
name: key.split('/').last().unwrap_or(key).to_string(),
|
|
|
|
|
path: key.to_string(),
|
|
|
|
|
is_dir: false,
|
|
|
|
|
size: obj.size(),
|
|
|
|
|
modified: obj.last_modified().map(|t| t.to_string()),
|
|
|
|
|
icon: get_file_icon(key),
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
all_items.sort_by(|a, b| b.modified.cmp(&a.modified));
|
|
|
|
|
all_items.truncate(50);
|
|
|
|
|
|
|
|
|
|
Ok(Json(all_items))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// GET /files/favorite - List favorite files
|
|
|
|
|
pub async fn list_favorites(
|
2025-11-27 08:34:24 -03:00
|
|
|
State(_state): State<Arc<AppState>>,
|
2025-11-22 13:24:53 -03:00
|
|
|
) -> Result<Json<Vec<FileItem>>, (StatusCode, Json<serde_json::Value>)> {
|
|
|
|
|
Ok(Json(Vec::new()))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// POST /files/shareFolder - Share folder with users
|
|
|
|
|
pub async fn share_folder(
|
2025-11-27 08:34:24 -03:00
|
|
|
State(_state): State<Arc<AppState>>,
|
|
|
|
|
Json(_req): Json<ShareRequest>,
|
2025-11-22 13:24:53 -03:00
|
|
|
) -> Result<Json<ShareResponse>, (StatusCode, Json<serde_json::Value>)> {
|
|
|
|
|
let share_id = uuid::Uuid::new_v4().to_string();
|
|
|
|
|
let url = format!("https://share.example.com/{}", share_id);
|
|
|
|
|
|
|
|
|
|
Ok(Json(ShareResponse {
|
|
|
|
|
share_id,
|
|
|
|
|
url,
|
|
|
|
|
expires_at: Some(
|
|
|
|
|
chrono::Utc::now()
|
2025-11-27 23:10:43 -03:00
|
|
|
.checked_add_signed(chrono::Duration::hours(24))
|
2025-11-22 13:24:53 -03:00
|
|
|
.unwrap()
|
|
|
|
|
.to_rfc3339(),
|
|
|
|
|
),
|
|
|
|
|
}))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// GET /files/shared - List shared files and folders
|
|
|
|
|
pub async fn list_shared(
|
2025-11-27 08:34:24 -03:00
|
|
|
State(_state): State<Arc<AppState>>,
|
2025-11-22 13:24:53 -03:00
|
|
|
) -> Result<Json<Vec<FileItem>>, (StatusCode, Json<serde_json::Value>)> {
|
|
|
|
|
Ok(Json(Vec::new()))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// GET /files/permissions - Get file/folder permissions
|
|
|
|
|
pub async fn get_permissions(
|
2025-11-27 08:34:24 -03:00
|
|
|
State(_state): State<Arc<AppState>>,
|
2025-11-22 13:24:53 -03:00
|
|
|
Query(params): Query<ReadRequest>,
|
|
|
|
|
) -> Result<Json<serde_json::Value>, (StatusCode, Json<serde_json::Value>)> {
|
|
|
|
|
Ok(Json(serde_json::json!({
|
|
|
|
|
"bucket": params.bucket,
|
|
|
|
|
"path": params.path,
|
|
|
|
|
"permissions": {
|
|
|
|
|
"read": true,
|
|
|
|
|
"write": true,
|
|
|
|
|
"delete": true,
|
|
|
|
|
"share": true
|
|
|
|
|
},
|
|
|
|
|
"shared_with": []
|
|
|
|
|
})))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// GET /files/quota - Get storage quota information
|
|
|
|
|
pub async fn get_quota(
|
|
|
|
|
State(state): State<Arc<AppState>>,
|
|
|
|
|
) -> Result<Json<QuotaResponse>, (StatusCode, Json<serde_json::Value>)> {
|
|
|
|
|
let s3_client = state.drive.as_ref().ok_or_else(|| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::SERVICE_UNAVAILABLE,
|
|
|
|
|
Json(serde_json::json!({ "error": "S3 service not available" })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
let mut total_size = 0i64;
|
|
|
|
|
|
|
|
|
|
let result = s3_client.list_buckets().send().await.map_err(|e| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
|
Json(serde_json::json!({ "error": format!("Failed to list buckets: {}", e) })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
let buckets: Vec<String> = result
|
|
|
|
|
.buckets()
|
|
|
|
|
.iter()
|
|
|
|
|
.filter_map(|b| b.name().map(String::from))
|
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
|
|
for bucket in buckets {
|
|
|
|
|
let list_result = s3_client
|
|
|
|
|
.list_objects_v2()
|
|
|
|
|
.bucket(&bucket)
|
|
|
|
|
.send()
|
|
|
|
|
.await
|
|
|
|
|
.map_err(|e| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
|
Json(serde_json::json!({ "error": format!("Failed to list objects: {}", e) })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
for obj in list_result.contents() {
|
|
|
|
|
total_size += obj.size().unwrap_or(0);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let total_bytes = 100_000_000_000i64;
|
|
|
|
|
let used_bytes = total_size;
|
|
|
|
|
let available_bytes = total_bytes - used_bytes;
|
|
|
|
|
let percentage_used = (used_bytes as f64 / total_bytes as f64) * 100.0;
|
|
|
|
|
|
|
|
|
|
Ok(Json(QuotaResponse {
|
|
|
|
|
total_bytes,
|
|
|
|
|
used_bytes,
|
|
|
|
|
available_bytes,
|
2025-11-27 23:10:43 -03:00
|
|
|
percentage_used: percentage_used as f64,
|
2025-11-22 13:24:53 -03:00
|
|
|
}))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// GET /files/sync/status - Get sync status
|
2025-12-05 06:50:45 -03:00
|
|
|
///
|
|
|
|
|
/// Desktop-only feature: File synchronization uses rclone running locally
|
|
|
|
|
/// on the user's machine. The Tauri desktop app (botapp) manages the rclone
|
|
|
|
|
/// process and reports status back through this endpoint.
|
|
|
|
|
///
|
|
|
|
|
/// For web-only users, this returns a stub response indicating sync
|
|
|
|
|
/// is not available (requires desktop app with local filesystem access).
|
|
|
|
|
///
|
|
|
|
|
/// Desktop app implementation: botapp/src/desktop/sync.rs
|
2025-11-22 13:24:53 -03:00
|
|
|
pub async fn sync_status(
|
2025-11-27 08:34:24 -03:00
|
|
|
State(_state): State<Arc<AppState>>,
|
2025-11-22 13:24:53 -03:00
|
|
|
) -> Result<Json<SyncStatus>, (StatusCode, Json<serde_json::Value>)> {
|
|
|
|
|
Ok(Json(SyncStatus {
|
2025-12-05 06:50:45 -03:00
|
|
|
status: "unavailable".to_string(),
|
|
|
|
|
last_sync: None,
|
2025-11-22 13:24:53 -03:00
|
|
|
files_synced: 0,
|
|
|
|
|
bytes_synced: 0,
|
2025-12-05 06:50:45 -03:00
|
|
|
is_desktop: false,
|
|
|
|
|
message: Some(
|
|
|
|
|
"File sync requires the General Bots desktop app with rclone installed".to_string(),
|
|
|
|
|
),
|
2025-11-22 13:24:53 -03:00
|
|
|
}))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// POST /files/sync/start - Start file synchronization
|
2025-12-05 06:50:45 -03:00
|
|
|
///
|
|
|
|
|
/// Desktop-only feature: Triggers rclone sync on the user's local machine.
|
|
|
|
|
/// The actual sync is performed by the Tauri desktop app which spawns
|
|
|
|
|
/// and manages the rclone subprocess.
|
|
|
|
|
///
|
|
|
|
|
/// Web users receive a response indicating this feature requires the desktop app.
|
|
|
|
|
///
|
|
|
|
|
/// Desktop app implementation: botapp/src/desktop/sync.rs
|
2025-11-22 13:24:53 -03:00
|
|
|
pub async fn start_sync(
|
2025-11-27 08:34:24 -03:00
|
|
|
State(_state): State<Arc<AppState>>,
|
2025-11-22 13:24:53 -03:00
|
|
|
) -> Result<Json<SuccessResponse>, (StatusCode, Json<serde_json::Value>)> {
|
|
|
|
|
Ok(Json(SuccessResponse {
|
2025-12-05 06:50:45 -03:00
|
|
|
success: false,
|
|
|
|
|
message: Some("File sync requires the General Bots desktop app. Install rclone and use the desktop app to sync files.".to_string()),
|
2025-11-22 13:24:53 -03:00
|
|
|
}))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// POST /files/sync/stop - Stop file synchronization
|
2025-12-05 06:50:45 -03:00
|
|
|
///
|
|
|
|
|
/// Desktop-only feature: Stops the rclone process on the user's local machine.
|
|
|
|
|
/// The Tauri desktop app handles graceful termination of the sync process.
|
|
|
|
|
///
|
|
|
|
|
/// Web users receive a response indicating this feature requires the desktop app.
|
|
|
|
|
///
|
|
|
|
|
/// Desktop app implementation: botapp/src/desktop/sync.rs
|
2025-11-22 13:24:53 -03:00
|
|
|
pub async fn stop_sync(
|
2025-11-27 08:34:24 -03:00
|
|
|
State(_state): State<Arc<AppState>>,
|
2025-11-22 13:24:53 -03:00
|
|
|
) -> Result<Json<SuccessResponse>, (StatusCode, Json<serde_json::Value>)> {
|
|
|
|
|
Ok(Json(SuccessResponse {
|
2025-12-05 06:50:45 -03:00
|
|
|
success: false,
|
|
|
|
|
message: Some("File sync requires the General Bots desktop app".to_string()),
|
2025-11-22 13:24:53 -03:00
|
|
|
}))
|
|
|
|
|
}
|
2025-11-30 16:40:11 -03:00
|
|
|
|
|
|
|
|
// ===== File Versioning API =====
|
|
|
|
|
|
|
|
|
|
/// GET /files/versions - List all versions of a file
|
|
|
|
|
///
|
|
|
|
|
/// SeaweedFS/S3 supports object versioning. This endpoint lists all versions
|
|
|
|
|
/// of a specific file, allowing users to restore previous versions.
|
|
|
|
|
///
|
|
|
|
|
/// Query parameters:
|
|
|
|
|
/// - path: The file path to get versions for
|
|
|
|
|
/// - bucket: Optional bucket name (defaults to bot's bucket)
|
|
|
|
|
pub async fn list_versions(
|
|
|
|
|
State(state): State<Arc<AppState>>,
|
|
|
|
|
Query(params): Query<VersionsQuery>,
|
|
|
|
|
) -> Result<Json<VersionsResponse>, (StatusCode, Json<serde_json::Value>)> {
|
|
|
|
|
let bucket = params.bucket.unwrap_or_else(|| "default".to_string());
|
|
|
|
|
let path = params.path;
|
|
|
|
|
|
|
|
|
|
// Get S3 client from state
|
|
|
|
|
let s3_client = state.s3_client.as_ref().ok_or_else(|| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::SERVICE_UNAVAILABLE,
|
|
|
|
|
Json(serde_json::json!({ "error": "S3 storage not configured" })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
// List object versions using S3 API
|
|
|
|
|
let versions_result = s3_client
|
|
|
|
|
.list_object_versions()
|
|
|
|
|
.bucket(&bucket)
|
|
|
|
|
.prefix(&path)
|
|
|
|
|
.send()
|
|
|
|
|
.await
|
|
|
|
|
.map_err(|e| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
|
Json(serde_json::json!({ "error": format!("Failed to list versions: {}", e) })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
let mut versions: Vec<FileVersion> = Vec::new();
|
|
|
|
|
|
|
|
|
|
// Process version list
|
|
|
|
|
for version in versions_result.versions() {
|
|
|
|
|
if version.key().unwrap_or_default() == path {
|
|
|
|
|
versions.push(FileVersion {
|
|
|
|
|
version_id: version.version_id().unwrap_or("null").to_string(),
|
|
|
|
|
modified: version
|
|
|
|
|
.last_modified()
|
|
|
|
|
.map(|t| t.to_string())
|
|
|
|
|
.unwrap_or_else(|| chrono::Utc::now().to_rfc3339()),
|
|
|
|
|
size: version.size().unwrap_or(0),
|
|
|
|
|
is_latest: version.is_latest().unwrap_or(false),
|
|
|
|
|
etag: version.e_tag().map(|s| s.to_string()),
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Sort by modified date, newest first
|
|
|
|
|
versions.sort_by(|a, b| b.modified.cmp(&a.modified));
|
|
|
|
|
|
|
|
|
|
Ok(Json(VersionsResponse {
|
|
|
|
|
path: path.clone(),
|
|
|
|
|
versions,
|
|
|
|
|
}))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// POST /files/restore - Restore a file to a specific version
|
|
|
|
|
///
|
|
|
|
|
/// Restores a file to a previous version by copying the old version
|
|
|
|
|
/// to become the new current version. The previous versions are preserved.
|
|
|
|
|
///
|
|
|
|
|
/// Request body:
|
|
|
|
|
/// - path: The file path to restore
|
|
|
|
|
/// - version_id: The version ID to restore to
|
|
|
|
|
/// - bucket: Optional bucket name (defaults to bot's bucket)
|
|
|
|
|
pub async fn restore_version(
|
|
|
|
|
State(state): State<Arc<AppState>>,
|
|
|
|
|
Json(payload): Json<RestoreRequest>,
|
|
|
|
|
) -> Result<Json<RestoreResponse>, (StatusCode, Json<serde_json::Value>)> {
|
|
|
|
|
let bucket = payload.bucket.unwrap_or_else(|| "default".to_string());
|
|
|
|
|
let path = payload.path;
|
|
|
|
|
let version_id = payload.version_id;
|
|
|
|
|
|
|
|
|
|
// Get S3 client from state
|
|
|
|
|
let s3_client = state.s3_client.as_ref().ok_or_else(|| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::SERVICE_UNAVAILABLE,
|
|
|
|
|
Json(serde_json::json!({ "error": "S3 storage not configured" })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
// Copy the specific version to itself, making it the latest version
|
|
|
|
|
// S3 copy with version-id copies that version as a new object
|
|
|
|
|
let copy_source = format!("{}/{}?versionId={}", bucket, path, version_id);
|
|
|
|
|
|
|
|
|
|
let copy_result = s3_client
|
|
|
|
|
.copy_object()
|
|
|
|
|
.bucket(&bucket)
|
|
|
|
|
.key(&path)
|
|
|
|
|
.copy_source(©_source)
|
|
|
|
|
.send()
|
|
|
|
|
.await
|
|
|
|
|
.map_err(|e| {
|
|
|
|
|
(
|
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
|
Json(serde_json::json!({ "error": format!("Failed to restore version: {}", e) })),
|
|
|
|
|
)
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
let new_version_id = copy_result.version_id().map(|s| s.to_string());
|
|
|
|
|
|
|
|
|
|
Ok(Json(RestoreResponse {
|
|
|
|
|
success: true,
|
|
|
|
|
message: format!("Successfully restored {} to version {}", path, version_id),
|
|
|
|
|
restored_version: version_id,
|
|
|
|
|
new_version_id,
|
|
|
|
|
}))
|
|
|
|
|
}
|