refactor(all): Remove unused files and update dependencies
This commit is contained in:
parent
0b4a23da0d
commit
cfa14dd222
66 changed files with 2757 additions and 2769 deletions
8
.idea/.gitignore
generated
vendored
8
.idea/.gitignore
generated
vendored
|
@ -1,8 +0,0 @@
|
||||||
# Default ignored files
|
|
||||||
/shelf/
|
|
||||||
/workspace.xml
|
|
||||||
# Editor-based HTTP Client requests
|
|
||||||
/httpRequests/
|
|
||||||
# Datasource local storage ignored files
|
|
||||||
/dataSources/
|
|
||||||
/dataSources.local.xml
|
|
23
.idea/general-bots.iml
generated
23
.idea/general-bots.iml
generated
|
@ -1,23 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<module type="EMPTY_MODULE" version="4">
|
|
||||||
<component name="NewModuleRootManager">
|
|
||||||
<content url="file://$MODULE_DIR$">
|
|
||||||
<sourceFolder url="file://$MODULE_DIR$/gb-api/src" isTestSource="false" />
|
|
||||||
<sourceFolder url="file://$MODULE_DIR$/gb-auth/src" isTestSource="false" />
|
|
||||||
<sourceFolder url="file://$MODULE_DIR$/gb-auth/tests" isTestSource="true" />
|
|
||||||
<sourceFolder url="file://$MODULE_DIR$/gb-automation/src" isTestSource="false" />
|
|
||||||
<sourceFolder url="file://$MODULE_DIR$/gb-core/src" isTestSource="false" />
|
|
||||||
<sourceFolder url="file://$MODULE_DIR$/gb-image/src" isTestSource="false" />
|
|
||||||
<sourceFolder url="file://$MODULE_DIR$/gb-media/src" isTestSource="false" />
|
|
||||||
<sourceFolder url="file://$MODULE_DIR$/gb-messaging/src" isTestSource="false" />
|
|
||||||
<sourceFolder url="file://$MODULE_DIR$/gb-migrations/src" isTestSource="false" />
|
|
||||||
<sourceFolder url="file://$MODULE_DIR$/gb-monitoring/src" isTestSource="false" />
|
|
||||||
<sourceFolder url="file://$MODULE_DIR$/gb-storage/src" isTestSource="false" />
|
|
||||||
<sourceFolder url="file://$MODULE_DIR$/gb-testing/src" isTestSource="false" />
|
|
||||||
<sourceFolder url="file://$MODULE_DIR$/gb-testing/tests" isTestSource="true" />
|
|
||||||
<excludeFolder url="file://$MODULE_DIR$/target" />
|
|
||||||
</content>
|
|
||||||
<orderEntry type="inheritedJdk" />
|
|
||||||
<orderEntry type="sourceFolder" forTests="false" />
|
|
||||||
</component>
|
|
||||||
</module>
|
|
8
.idea/modules.xml
generated
8
.idea/modules.xml
generated
|
@ -1,8 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<project version="4">
|
|
||||||
<component name="ProjectModuleManager">
|
|
||||||
<modules>
|
|
||||||
<module fileurl="file://$PROJECT_DIR$/.idea/general-bots.iml" filepath="$PROJECT_DIR$/.idea/general-bots.iml" />
|
|
||||||
</modules>
|
|
||||||
</component>
|
|
||||||
</project>
|
|
6
.idea/vcs.xml
generated
6
.idea/vcs.xml
generated
|
@ -1,6 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<project version="4">
|
|
||||||
<component name="VcsDirectoryMappings">
|
|
||||||
<mapping directory="" vcs="Git" />
|
|
||||||
</component>
|
|
||||||
</project>
|
|
12
.vscode/launch.json
vendored
12
.vscode/launch.json
vendored
|
@ -8,10 +8,10 @@
|
||||||
"cargo": {
|
"cargo": {
|
||||||
"args": [
|
"args": [
|
||||||
"build",
|
"build",
|
||||||
"--bin=gb-api"
|
"--bin=gb-server"
|
||||||
],
|
],
|
||||||
"filter": {
|
"filter": {
|
||||||
"name": "gb-api",
|
"name": "gb-server",
|
||||||
"kind": "bin"
|
"kind": "bin"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -26,16 +26,16 @@
|
||||||
{
|
{
|
||||||
"type": "lldb",
|
"type": "lldb",
|
||||||
"request": "launch",
|
"request": "launch",
|
||||||
"name": "Debug unit tests in executable 'gb-api'",
|
"name": "Debug unit tests in executable 'gb-server'",
|
||||||
"cargo": {
|
"cargo": {
|
||||||
"args": [
|
"args": [
|
||||||
"test",
|
"test",
|
||||||
"--no-run",
|
"--no-run",
|
||||||
"--lib",
|
"--lib",
|
||||||
"--package=gb-api"
|
"--package=gb-server"
|
||||||
],
|
],
|
||||||
"filter": {
|
"filter": {
|
||||||
"name": "gb-api",
|
"name": "gb-server",
|
||||||
"kind": "bin"
|
"kind": "bin"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -53,7 +53,7 @@
|
||||||
"test",
|
"test",
|
||||||
"--no-run",
|
"--no-run",
|
||||||
"--lib",
|
"--lib",
|
||||||
"--package=gb-api"
|
"--package=gb-server"
|
||||||
],
|
],
|
||||||
"filter": {
|
"filter": {
|
||||||
"name": "integration",
|
"name": "integration",
|
||||||
|
|
2405
Cargo.lock
generated
2405
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
54
Cargo.toml
54
Cargo.toml
|
@ -1,39 +1,42 @@
|
||||||
[workspace]
|
[workspace]
|
||||||
resolver="2"
|
resolver = "2"
|
||||||
members = [
|
members = [
|
||||||
"gb-core", # Core domain models and traits
|
"gb-core",
|
||||||
"gb-api", # API layer and server implementation
|
"gb-server",
|
||||||
"gb-media", # Media processing and WebRTC handling
|
"gb-media",
|
||||||
|
"gb-messaging",
|
||||||
"gb-messaging", # Message queue and real-time communication
|
"gb-storage",
|
||||||
"gb-storage", # Database and storage implementations
|
"gb-monitoring",
|
||||||
"gb-monitoring", # Metrics, logging and monitoring
|
"gb-auth",
|
||||||
"gb-auth", # Authentication and authorization
|
"gb-testing",
|
||||||
"gb-testing", # Integration and load testing
|
"gb-migrations",
|
||||||
"gb-migrations", # Database migrations
|
"gb-cloud",
|
||||||
#"gb-cloud", # Cloud provider integrations
|
"gb-vm",
|
||||||
#"gb-vm", # Virtual machine and BASIC compiler
|
"gb-automation",
|
||||||
"gb-automation", # Web and process automation
|
"gb-image",
|
||||||
"gb-image", # Image processing capabilities
|
"gb-utils",
|
||||||
|
"gb-document",
|
||||||
|
"gb-file",
|
||||||
|
"gb-llm",
|
||||||
|
"gb-calendar",
|
||||||
]
|
]
|
||||||
|
|
||||||
# [workspace.lints.rust]
|
|
||||||
# unused_imports = "allow"
|
|
||||||
# dead_code = "allow"
|
|
||||||
# unused_variables = "allow"
|
|
||||||
# dependency_on_unit_never_type_fallback = "allow"
|
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
authors = ["GeneralBots Team"]
|
authors = ["General Bots Maintainers"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
# Core async runtime and utilities
|
# Core async runtime and utilities
|
||||||
tokio = { version = "1.34", features = ["full"] }
|
tokio = { version = "1.34", features = ["full"] }
|
||||||
futures = "0.3"
|
tokio-tungstenite = { version = "0.24.0", features = ["native-tls"] }
|
||||||
|
tungstenite = "0.20"
|
||||||
|
tokio-test = "0.4"
|
||||||
|
tokio-stream = "0.1.17"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
|
futures = "0.3"
|
||||||
|
futures-util = "0.3" # Add futures-util here
|
||||||
parking_lot = "0.12"
|
parking_lot = "0.12"
|
||||||
|
|
||||||
# Web framework and servers
|
# Web framework and servers
|
||||||
|
@ -41,6 +44,7 @@ axum = { version = "0.7.9", features = ["ws", "multipart"] }
|
||||||
tower = "0.4"
|
tower = "0.4"
|
||||||
tower-http = { version = "0.5", features = ["cors", "trace", "fs"] }
|
tower-http = { version = "0.5", features = ["cors", "trace", "fs"] }
|
||||||
hyper = { version = "1.1", features = ["full"] }
|
hyper = { version = "1.1", features = ["full"] }
|
||||||
|
hyper-util = { version = "0.1" }
|
||||||
tonic = { version = "0.10", features = ["tls", "transport"] }
|
tonic = { version = "0.10", features = ["tls", "transport"] }
|
||||||
|
|
||||||
# Database and storage
|
# Database and storage
|
||||||
|
@ -108,7 +112,7 @@ wasm-bindgen = "0.2"
|
||||||
js-sys = "0.3"
|
js-sys = "0.3"
|
||||||
web-sys = { version = "0.3", features = ["WebSocket", "WebRtcPeerConnection"] }
|
web-sys = { version = "0.3", features = ["WebSocket", "WebRtcPeerConnection"] }
|
||||||
|
|
||||||
# Natural language processing
|
# Natural language processing
|
||||||
rust-bert = "0.21"
|
rust-bert = "0.21"
|
||||||
tokenizers = "0.15"
|
tokenizers = "0.15"
|
||||||
whatlang = "0.16"
|
whatlang = "0.16"
|
||||||
|
@ -119,4 +123,4 @@ docx = "1.1"
|
||||||
zip = "0.6"
|
zip = "0.6"
|
||||||
|
|
||||||
[workspace.metadata]
|
[workspace.metadata]
|
||||||
msrv = "1.70.0"
|
msrv = "1.70.0"
|
225
README.md
225
README.md
|
@ -14,7 +14,7 @@ GB6 is a billion-scale real-time communication platform integrating advanced bot
|
||||||
- Petabyte-scale storage
|
- Petabyte-scale storage
|
||||||
|
|
||||||
### Core Services
|
### Core Services
|
||||||
- **API Service** (gb-api)
|
- **API Service** (gb-server)
|
||||||
- Axum-based REST & WebSocket
|
- Axum-based REST & WebSocket
|
||||||
- Multi-tenant request routing
|
- Multi-tenant request routing
|
||||||
- Authentication & Authorization
|
- Authentication & Authorization
|
||||||
|
@ -61,11 +61,6 @@ GB6 is a billion-scale real-time communication platform integrating advanced bot
|
||||||
- Kafka 3.0+
|
- Kafka 3.0+
|
||||||
- GStreamer
|
- GStreamer
|
||||||
|
|
||||||
### Kubernetes Setup
|
|
||||||
```bash
|
|
||||||
# Initialize cluster
|
|
||||||
./setup-k8s.sh
|
|
||||||
|
|
||||||
# Deploy platform
|
# Deploy platform
|
||||||
./deploy.sh
|
./deploy.sh
|
||||||
```
|
```
|
||||||
|
@ -79,7 +74,7 @@ cargo build --workspace
|
||||||
cargo test --workspace
|
cargo test --workspace
|
||||||
|
|
||||||
# Start API service
|
# Start API service
|
||||||
cargo run -p gb-api
|
cargo run -p gb-server
|
||||||
```
|
```
|
||||||
|
|
||||||
## 📊 Monitoring & Operations
|
## 📊 Monitoring & Operations
|
||||||
|
@ -115,13 +110,12 @@ cargo run -p gb-api
|
||||||
### Project Structure
|
### Project Structure
|
||||||
```
|
```
|
||||||
general-bots/
|
general-bots/
|
||||||
├── gb-api/ # API service
|
├── gb-server/ # API service
|
||||||
├── gb-core/ # Core functionality
|
├── gb-core/ # Core functionality
|
||||||
├── gb-media/ # Media processing
|
├── gb-media/ # Media processing
|
||||||
├── gb-messaging/ # Message brokers
|
├── gb-messaging/ # Message brokers
|
||||||
├── gb-storage/ # Data storage
|
├── gb-storage/ # Data storage
|
||||||
├── gb-utils/ # Utilities
|
├── gb-utils/ # Utilities
|
||||||
├── k8s/ # Kubernetes configs
|
|
||||||
└── migrations/ # DB migrations
|
└── migrations/ # DB migrations
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -283,5 +277,218 @@ Licensed under terms specified in workspace configuration.
|
||||||
14. **Kubernetes (Go)**: Container orchestration for scalable deployments.
|
14. **Kubernetes (Go)**: Container orchestration for scalable deployments.
|
||||||
15. **Matrix (Rust)**: Real-time communication and collaboration.
|
15. **Matrix (Rust)**: Real-time communication and collaboration.
|
||||||
|
|
||||||
|
# API:
|
||||||
|
|
||||||
|
## **File & Document Management**
|
||||||
|
/files/upload
|
||||||
|
/files/download
|
||||||
|
/files/copy
|
||||||
|
/files/move
|
||||||
|
/files/delete
|
||||||
|
/files/getContents
|
||||||
|
/files/save
|
||||||
|
/files/createFolder
|
||||||
|
/files/shareFolder
|
||||||
|
/files/dirFolder
|
||||||
|
/files/list
|
||||||
|
/files/search
|
||||||
|
/files/recent
|
||||||
|
/files/favorite
|
||||||
|
/files/versions
|
||||||
|
/files/restore
|
||||||
|
/files/permissions
|
||||||
|
/files/quota
|
||||||
|
/files/shared
|
||||||
|
/files/sync/status
|
||||||
|
/files/sync/start
|
||||||
|
/files/sync/stop
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Document Processing**
|
||||||
|
/docs/merge
|
||||||
|
/docs/convert
|
||||||
|
/docs/fill
|
||||||
|
/docs/export
|
||||||
|
/docs/import
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Groups & Organizations**
|
||||||
|
/groups/create
|
||||||
|
/groups/update
|
||||||
|
/groups/delete
|
||||||
|
/groups/list
|
||||||
|
/groups/search
|
||||||
|
/groups/members
|
||||||
|
/groups/members/add
|
||||||
|
/groups/members/remove
|
||||||
|
/groups/permissions
|
||||||
|
/groups/settings
|
||||||
|
/groups/analytics
|
||||||
|
/groups/join/request
|
||||||
|
/groups/join/approve
|
||||||
|
/groups/join/reject
|
||||||
|
/groups/invites/send
|
||||||
|
/groups/invites/list
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Conversations & Real-time Communication**
|
||||||
|
/conversations/create
|
||||||
|
/conversations/join
|
||||||
|
/conversations/leave
|
||||||
|
/conversations/members
|
||||||
|
/conversations/messages
|
||||||
|
/conversations/messages/send
|
||||||
|
/conversations/messages/edit
|
||||||
|
/conversations/messages/delete
|
||||||
|
/conversations/messages/react
|
||||||
|
/conversations/messages/pin
|
||||||
|
/conversations/messages/search
|
||||||
|
/conversations/calls/start
|
||||||
|
/conversations/calls/join
|
||||||
|
/conversations/calls/leave
|
||||||
|
/conversations/calls/mute
|
||||||
|
/conversations/calls/unmute
|
||||||
|
/conversations/screen/share
|
||||||
|
/conversations/screen/stop
|
||||||
|
/conversations/recording/start
|
||||||
|
/conversations/recording/stop
|
||||||
|
/conversations/whiteboard/create
|
||||||
|
/conversations/whiteboard/collaborate
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Communication Services**
|
||||||
|
/comm/email/send
|
||||||
|
/comm/email/template
|
||||||
|
/comm/email/schedule
|
||||||
|
/comm/email/cancel
|
||||||
|
/comm/sms/send
|
||||||
|
/comm/sms/bulk
|
||||||
|
/comm/notifications/send
|
||||||
|
/comm/notifications/preferences
|
||||||
|
/comm/broadcast/send
|
||||||
|
/comm/contacts/import
|
||||||
|
/comm/contacts/export
|
||||||
|
/comm/contacts/sync
|
||||||
|
/comm/contacts/groups
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **User Management & Authentication**
|
||||||
|
/users/create
|
||||||
|
/users/update
|
||||||
|
/users/delete
|
||||||
|
/users/list
|
||||||
|
/users/search
|
||||||
|
/users/profile
|
||||||
|
/users/profile/update
|
||||||
|
/users/settings
|
||||||
|
/users/permissions
|
||||||
|
/users/roles
|
||||||
|
/users/status
|
||||||
|
/users/presence
|
||||||
|
/users/activity
|
||||||
|
/users/security/2fa/enable
|
||||||
|
/users/security/2fa/disable
|
||||||
|
/users/security/devices
|
||||||
|
/users/security/sessions
|
||||||
|
/users/notifications/settings
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Calendar & Task Management**
|
||||||
|
/calendar/events/create
|
||||||
|
/calendar/events/update
|
||||||
|
/calendar/events/delete
|
||||||
|
/calendar/events/list
|
||||||
|
/calendar/events/search
|
||||||
|
/calendar/availability/check
|
||||||
|
/calendar/schedule/meeting
|
||||||
|
/calendar/reminders/set
|
||||||
|
/tasks/create
|
||||||
|
/tasks/update
|
||||||
|
/tasks/delete
|
||||||
|
/tasks/list
|
||||||
|
/tasks/assign
|
||||||
|
/tasks/status/update
|
||||||
|
/tasks/priority/set
|
||||||
|
/tasks/dependencies/set
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Storage & Data Management**
|
||||||
|
/storage/save
|
||||||
|
/storage/batch
|
||||||
|
/storage/json
|
||||||
|
/storage/delete
|
||||||
|
/storage/quota/check
|
||||||
|
/storage/cleanup
|
||||||
|
/storage/backup/create
|
||||||
|
/storage/backup/restore
|
||||||
|
/storage/archive
|
||||||
|
/storage/metrics
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Analytics & Reporting**
|
||||||
|
/analytics/dashboard
|
||||||
|
/analytics/reports/generate
|
||||||
|
/analytics/reports/schedule
|
||||||
|
/analytics/metrics/collect
|
||||||
|
/analytics/insights/generate
|
||||||
|
/analytics/trends/analyze
|
||||||
|
/analytics/export
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **System & Administration**
|
||||||
|
/admin/system/status
|
||||||
|
/admin/system/metrics
|
||||||
|
/admin/logs/view
|
||||||
|
/admin/logs/export
|
||||||
|
/admin/config/update
|
||||||
|
/admin/maintenance/schedule
|
||||||
|
/admin/backup/create
|
||||||
|
/admin/backup/restore
|
||||||
|
/admin/users/manage
|
||||||
|
/admin/roles/manage
|
||||||
|
/admin/quotas/manage
|
||||||
|
/admin/licenses/manage
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **AI & Machine Learning**
|
||||||
|
/ai/analyze/text
|
||||||
|
/ai/analyze/image
|
||||||
|
/ai/generate/text
|
||||||
|
/ai/generate/image
|
||||||
|
/ai/translate
|
||||||
|
/ai/summarize
|
||||||
|
/ai/recommend
|
||||||
|
/ai/train/model
|
||||||
|
/ai/predict
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Security & Compliance**
|
||||||
|
/security/audit/logs
|
||||||
|
/security/compliance/check
|
||||||
|
/security/threats/scan
|
||||||
|
/security/access/review
|
||||||
|
/security/encryption/manage
|
||||||
|
/security/certificates/manage
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Health & Monitoring**
|
||||||
|
/health
|
||||||
|
/health/detailed
|
||||||
|
/monitoring/status
|
||||||
|
/monitoring/alerts
|
||||||
|
/monitoring/metrics
|
||||||
|
|
||||||
|
|
||||||
Built with ❤️ from Brazil, using Rust for maximum performance and reliability.
|
Built with ❤️ from Brazil, using Rust for maximum performance and reliability.
|
||||||
|
|
21
deploy.sh
21
deploy.sh
|
@ -3,29 +3,10 @@ set -e
|
||||||
|
|
||||||
echo "Deploying General Bots platform..."
|
echo "Deploying General Bots platform..."
|
||||||
|
|
||||||
# Create namespace
|
|
||||||
kubectl apply -f k8s/base/namespace.yaml
|
|
||||||
|
|
||||||
# Deploy infrastructure components
|
|
||||||
kubectl apply -f k8s/base/postgres.yaml
|
|
||||||
kubectl apply -f k8s/base/redis.yaml
|
|
||||||
kubectl apply -f k8s/base/kafka.yaml
|
|
||||||
kubectl apply -f k8s/base/monitoring.yaml
|
|
||||||
|
|
||||||
# Deploy application components
|
|
||||||
kubectl apply -f k8s/base/api.yaml
|
|
||||||
kubectl apply -f k8s/base/webrtc.yaml
|
|
||||||
kubectl apply -f k8s/base/image.yaml
|
|
||||||
kubectl apply -f k8s/base/document.yaml
|
|
||||||
|
|
||||||
# Deploy ingress rules
|
|
||||||
kubectl apply -f k8s/base/ingress.yaml
|
|
||||||
|
|
||||||
# Create DB.
|
# Create DB.
|
||||||
|
|
||||||
#cargo run -p gb-migrations --bin migrations
|
cargo run -p gb-migrations --bin migrations
|
||||||
|
|
||||||
echo "Deployment completed successfully!"
|
echo "Deployment completed successfully!"
|
||||||
echo "Please wait for all pods to be ready..."
|
echo "Please wait for all pods to be ready..."
|
||||||
kubectl -n general-bots get pods -w
|
|
||||||
|
|
||||||
|
|
|
@ -1,33 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "gb-api"
|
|
||||||
version = { workspace = true }
|
|
||||||
edition = { workspace = true }
|
|
||||||
authors = { workspace = true }
|
|
||||||
license = { workspace = true }
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
gb-core = { path = "../gb-core" }
|
|
||||||
gb-messaging = { path = "../gb-messaging" }
|
|
||||||
gb-monitoring = { path = "../gb-monitoring" }
|
|
||||||
gb-file = {path = "../gb-file" }
|
|
||||||
tokio = { version = "1.0", features = ["full", "macros", "rt-multi-thread"] } # Add these features
|
|
||||||
axum = { version = "0.7.9", features = ["ws", "multipart", "macros"] }
|
|
||||||
serde= { workspace = true }
|
|
||||||
serde_json= { workspace = true }
|
|
||||||
uuid= { workspace = true }
|
|
||||||
tracing= { workspace = true }
|
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
|
||||||
async-trait= { workspace = true }
|
|
||||||
futures-util = { version = "0.3", features = ["sink"] }
|
|
||||||
chrono = { workspace = true, features = ["serde"] }
|
|
||||||
tokio-stream = "0.1.17"
|
|
||||||
sqlx = { version = "0.7", features = ["runtime-tokio-rustls", "postgres", "chrono", "uuid"] }
|
|
||||||
redis = { version = "0.23", features = ["tokio-comp"] }
|
|
||||||
hyper = { version = "1.0", features = ["server"] }
|
|
||||||
hyper-util = { version = "0.1" }
|
|
||||||
tower = { workspace = true }
|
|
||||||
tower-http = { version = "0.5", features = ["cors", "trace"] }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
rstest= { workspace = true }
|
|
||||||
tokio-test = "0.4"
|
|
|
@ -1,97 +0,0 @@
|
||||||
use gb_core::{Error, Result};
|
|
||||||
use tracing::{info, error};
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use gb_messaging::MessageProcessor;
|
|
||||||
|
|
||||||
|
|
||||||
#[allow(dead_code)]
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct AppState {
|
|
||||||
db: sqlx::PgPool,
|
|
||||||
redis: redis::Client,
|
|
||||||
message_processor: MessageProcessor,
|
|
||||||
customer: PostgresCustomerRepository,
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() -> Result<()> {
|
|
||||||
// Initialize logging first
|
|
||||||
init_logging()?;
|
|
||||||
|
|
||||||
// Initialize core components
|
|
||||||
let app = initialize_bot_server().await?;
|
|
||||||
|
|
||||||
// Start the server
|
|
||||||
start_server(app).await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn initialize_bot_server() -> Result<axum::Router> {
|
|
||||||
info!("Initializing General Bots server...");
|
|
||||||
|
|
||||||
// Initialize the MessageProcessor
|
|
||||||
let message_processor = MessageProcessor::new();
|
|
||||||
let state = AppState::new();
|
|
||||||
state.repo = PostgresCustomerRepository::new(Arc::new(pool));
|
|
||||||
|
|
||||||
|
|
||||||
// Build the Axum router using our router module
|
|
||||||
let app = gb_api::create_router(state)
|
|
||||||
.layer(tower_http::trace::TraceLayer::new_for_http());
|
|
||||||
|
|
||||||
Ok(app)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn init_logging() -> Result<()> {
|
|
||||||
use tracing_subscriber::EnvFilter;
|
|
||||||
|
|
||||||
let env_filter = EnvFilter::try_from_default_env()
|
|
||||||
.unwrap_or_else(|_| EnvFilter::new("info"));
|
|
||||||
|
|
||||||
tracing_subscriber::fmt()
|
|
||||||
.with_env_filter(env_filter)
|
|
||||||
.with_file(true)
|
|
||||||
.with_line_number(true)
|
|
||||||
.with_thread_ids(true)
|
|
||||||
.init();
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn initialize_database() -> Result<sqlx::PgPool> {
|
|
||||||
let database_url = std::env::var("DATABASE_URL")
|
|
||||||
.map_err(|_| Error::internal("DATABASE_URL not set".to_string()))?;
|
|
||||||
|
|
||||||
sqlx::PgPool::connect(&database_url)
|
|
||||||
.await
|
|
||||||
.map_err(|e| Error::internal(e.to_string()))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn initialize_redis() -> Result<redis::Client> {
|
|
||||||
let redis_url = std::env::var("REDIS_URL")
|
|
||||||
.map_err(|_| Error::internal("REDIS_URL not set".to_string()))?;
|
|
||||||
|
|
||||||
redis::Client::open(redis_url)
|
|
||||||
.map_err(|e| Error::internal(e.to_string()))
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
async fn start_server(app: axum::Router) -> Result<()> {
|
|
||||||
let addr = SocketAddr::from(([0, 0, 0, 0], 3001));
|
|
||||||
info!("Starting server on {}", addr);
|
|
||||||
|
|
||||||
match tokio::net::TcpListener::bind(addr).await {
|
|
||||||
Ok(listener) => {
|
|
||||||
info!("Listening on {}", addr);
|
|
||||||
axum::serve(listener, app)
|
|
||||||
.await
|
|
||||||
.map_err(|e| Error::internal(format!("Server error: {}", e)))
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
error!("Failed to bind to address: {}", e);
|
|
||||||
Err(Error::internal(format!("Failed to bind to address: {}", e)))
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,25 +0,0 @@
|
||||||
-- Create users table
|
|
||||||
CREATE TABLE IF NOT EXISTS users (
|
|
||||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
||||||
email VARCHAR(255) NOT NULL UNIQUE,
|
|
||||||
name VARCHAR(255),
|
|
||||||
password_hash VARCHAR(255) NOT NULL,
|
|
||||||
role VARCHAR(50) NOT NULL DEFAULT 'user',
|
|
||||||
status VARCHAR(50) NOT NULL DEFAULT 'active',
|
|
||||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Create sessions table
|
|
||||||
CREATE TABLE IF NOT EXISTS sessions (
|
|
||||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
||||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
|
||||||
refresh_token VARCHAR(255) NOT NULL UNIQUE,
|
|
||||||
expires_at TIMESTAMP WITH TIME ZONE NOT NULL,
|
|
||||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Create indexes
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_users_email ON users(email);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_sessions_user_id ON sessions(user_id);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_sessions_refresh_token ON sessions(refresh_token);
|
|
|
@ -1,13 +1,3 @@
|
||||||
use axum::{Json, Extension};
|
use axum::{Json, Extension};
|
||||||
use crate::services::AuthService;
|
|
||||||
use crate::AuthError;
|
|
||||||
use crate::models::{LoginRequest, LoginResponse};
|
use crate::models::{LoginRequest, LoginResponse};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
pub async fn login_handler(
|
|
||||||
Extension(auth_service): Extension<Arc<AuthService>>,
|
|
||||||
Json(request): Json<LoginRequest>,
|
|
||||||
) -> Result<Json<LoginResponse>, AuthError> {
|
|
||||||
let response = auth_service.login(request).await?;
|
|
||||||
Ok(Json(response))
|
|
||||||
}
|
|
|
@ -5,7 +5,3 @@ pub mod services; // Make services module public
|
||||||
pub mod middleware;
|
pub mod middleware;
|
||||||
|
|
||||||
pub use error::AuthError;
|
pub use error::AuthError;
|
||||||
pub use handlers::*;
|
|
||||||
pub use models::*;
|
|
||||||
pub use services::AuthService;
|
|
||||||
pub use middleware::*;
|
|
||||||
|
|
|
@ -1,118 +0,0 @@
|
||||||
use gb_core::{Result, Error};
|
|
||||||
use crate::models::{LoginRequest, LoginResponse};
|
|
||||||
use crate::models::user::{DbUser, UserRole, UserStatus};
|
|
||||||
use std::sync::Arc;
|
|
||||||
use sqlx::PgPool;
|
|
||||||
use argon2::{
|
|
||||||
password_hash::{PasswordHash, PasswordHasher, SaltString, PasswordVerifier},
|
|
||||||
Argon2,
|
|
||||||
};
|
|
||||||
use rand::rngs::OsRng;
|
|
||||||
use chrono::{DateTime, Utc, Duration}; // Add chrono imports
|
|
||||||
use jsonwebtoken::{encode, EncodingKey, Header};
|
|
||||||
use serde::{Serialize, Deserialize};
|
|
||||||
|
|
||||||
pub struct AuthService {
|
|
||||||
db: Arc<PgPool>,
|
|
||||||
jwt_secret: String,
|
|
||||||
jwt_expiration: i64
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AuthService {
|
|
||||||
pub fn new(db: Arc<PgPool>, jwt_secret: String, jwt_expiration: i64) -> Self {
|
|
||||||
Self {
|
|
||||||
db,
|
|
||||||
jwt_secret,
|
|
||||||
jwt_expiration,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn login(&self, request: LoginRequest) -> Result<LoginResponse> {
|
|
||||||
let user = sqlx::query_as!(
|
|
||||||
DbUser,
|
|
||||||
r#"
|
|
||||||
SELECT
|
|
||||||
id,
|
|
||||||
email,
|
|
||||||
password_hash,
|
|
||||||
role as "role!: String",
|
|
||||||
status as "status!: String",
|
|
||||||
created_at as "created_at!: DateTime<Utc>",
|
|
||||||
updated_at as "updated_at!: DateTime<Utc>"
|
|
||||||
FROM users
|
|
||||||
WHERE email = $1
|
|
||||||
"#,
|
|
||||||
request.email
|
|
||||||
)
|
|
||||||
.fetch_optional(&*self.db)
|
|
||||||
.await
|
|
||||||
.map_err(|e| Error::internal(e.to_string()))?
|
|
||||||
.ok_or_else(|| Error::internal("Invalid credentials"))?;
|
|
||||||
|
|
||||||
// Convert the string fields to their respective enum types
|
|
||||||
let user = DbUser {
|
|
||||||
id: user.id,
|
|
||||||
email: user.email,
|
|
||||||
password_hash: user.password_hash,
|
|
||||||
role: UserRole::from(user.role),
|
|
||||||
status: UserStatus::from(user.status),
|
|
||||||
created_at: user.created_at,
|
|
||||||
updated_at: user.updated_at,
|
|
||||||
};
|
|
||||||
|
|
||||||
self.verify_password(&request.password, &user.password_hash)?;
|
|
||||||
|
|
||||||
let token = self.generate_token(&user)?;
|
|
||||||
|
|
||||||
Ok(LoginResponse {
|
|
||||||
access_token: token,
|
|
||||||
refresh_token: uuid::Uuid::new_v4().to_string(),
|
|
||||||
token_type: "Bearer".to_string(),
|
|
||||||
expires_in: self.jwt_expiration,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn hash_password(&self, password: &str) -> Result<String> {
|
|
||||||
let salt = SaltString::generate(&mut OsRng);
|
|
||||||
let argon2 = Argon2::default();
|
|
||||||
|
|
||||||
argon2
|
|
||||||
.hash_password(password.as_bytes(), &salt)
|
|
||||||
.map(|hash| hash.to_string())
|
|
||||||
.map_err(|e| Error::internal(e.to_string()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn verify_password(&self, password: &str, hash: &str) -> Result<()> {
|
|
||||||
let parsed_hash = PasswordHash::new(hash)
|
|
||||||
.map_err(|e| Error::internal(e.to_string()))?;
|
|
||||||
|
|
||||||
Argon2::default()
|
|
||||||
.verify_password(password.as_bytes(), &parsed_hash)
|
|
||||||
.map_err(|_| Error::internal("Invalid credentials"))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn generate_token(&self, user: &DbUser) -> Result<String> {
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
struct Claims {
|
|
||||||
sub: String,
|
|
||||||
exp: i64,
|
|
||||||
iat: i64,
|
|
||||||
}
|
|
||||||
|
|
||||||
let now = Utc::now();
|
|
||||||
let exp = now + Duration::seconds(self.jwt_expiration);
|
|
||||||
|
|
||||||
let claims = Claims {
|
|
||||||
sub: user.id.to_string(),
|
|
||||||
exp: exp.timestamp(),
|
|
||||||
iat: now.timestamp(),
|
|
||||||
};
|
|
||||||
|
|
||||||
encode(
|
|
||||||
&Header::default(),
|
|
||||||
&claims,
|
|
||||||
&EncodingKey::from_secret(self.jwt_secret.as_bytes()),
|
|
||||||
)
|
|
||||||
.map_err(|e| Error::internal(e.to_string()))
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,3 +0,0 @@
|
||||||
pub mod auth_service;
|
|
||||||
|
|
||||||
pub use auth_service::*;
|
|
|
@ -7,14 +7,3 @@ license = { workspace = true }
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
gb-core = { path = "../gb-core" }
|
gb-core = { path = "../gb-core" }
|
||||||
async-trait= { workspace = true }
|
|
||||||
tokio= { workspace = true }
|
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
|
||||||
serde_json = "1.0"thiserror= { workspace = true }
|
|
||||||
tracing= { workspace = true }
|
|
||||||
minio-rs = "0.1"
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
rstest= { workspace = true }
|
|
||||||
tokio-test = "0.4"
|
|
||||||
tempfile = "3.8"
|
|
||||||
|
|
|
@ -1,113 +0,0 @@
|
||||||
use minio_rs::minio::client::Client;
|
|
||||||
use minio_rs::minio::s3::args::{BucketExistsArgs, MakeBucketArgs, RemoveObjectArgs, GetObjectArgs, PutObjectArgs, ListObjectsArgs};
|
|
||||||
use minio_rs::minio::s3::response::Object;
|
|
||||||
use minio_rs::minio::s3::error::Error as MinioError;
|
|
||||||
use std::path::Path;
|
|
||||||
use std::io::Cursor;
|
|
||||||
|
|
||||||
/// Represents a file manager for handling MinIO file operations.
|
|
||||||
pub struct FileManager {
|
|
||||||
client: Client,
|
|
||||||
bucket_name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FileManager {
|
|
||||||
/// Creates a new `FileManager` instance.
|
|
||||||
pub async fn new(endpoint: &str, access_key: &str, secret_key: &str, bucket_name: &str, use_ssl: bool) -> Result<Self, MinioError> {
|
|
||||||
let client = Client::new(endpoint, access_key, secret_key, use_ssl).await?;
|
|
||||||
Ok(Self {
|
|
||||||
client,
|
|
||||||
bucket_name: bucket_name.to_string(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks if the bucket exists, and creates it if it doesn't.
|
|
||||||
pub async fn ensure_bucket_exists(&self) -> Result<(), MinioError> {
|
|
||||||
let exists = self.client
|
|
||||||
.bucket_exists(&BucketExistsArgs::new(&self.bucket_name))
|
|
||||||
.await?;
|
|
||||||
if !exists {
|
|
||||||
self.client
|
|
||||||
.make_bucket(&MakeBucketArgs::new(&self.bucket_name))
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Uploads a file to the specified path.
|
|
||||||
pub async fn upload_file(&self, path: &str, file_data: Vec<u8>) -> Result<(), MinioError> {
|
|
||||||
let args = PutObjectArgs::new(&self.bucket_name, path, Cursor::new(file_data), file_data.len() as u64);
|
|
||||||
self.client.put_object(&args).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Downloads a file from the specified path.
|
|
||||||
pub async fn download_file(&self, path: &str) -> Result<Vec<u8>, MinioError> {
|
|
||||||
let args = GetObjectArgs::new(&self.bucket_name, path);
|
|
||||||
let object = self.client.get_object(&args).await?;
|
|
||||||
let data = object.bytes().await?;
|
|
||||||
Ok(data.to_vec())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Copies a file from the source path to the destination path.
|
|
||||||
pub async fn copy_file(&self, source_path: &str, destination_path: &str) -> Result<(), MinioError> {
|
|
||||||
let source_args = GetObjectArgs::new(&self.bucket_name, source_path);
|
|
||||||
let object = self.client.get_object(&source_args).await?;
|
|
||||||
let data = object.bytes().await?;
|
|
||||||
|
|
||||||
let destination_args = PutObjectArgs::new(&self.bucket_name, destination_path, Cursor::new(data.clone()), data.len() as u64);
|
|
||||||
self.client.put_object(&destination_args).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Moves a file from the source path to the destination path.
|
|
||||||
pub async fn move_file(&self, source_path: &str, destination_path: &str) -> Result<(), MinioError> {
|
|
||||||
self.copy_file(source_path, destination_path).await?;
|
|
||||||
self.delete_file(source_path).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Deletes a file at the specified path.
|
|
||||||
pub async fn delete_file(&self, path: &str) -> Result<(), MinioError> {
|
|
||||||
let args = RemoveObjectArgs::new(&self.bucket_name, path);
|
|
||||||
self.client.remove_object(&args).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Lists all files in the specified path.
|
|
||||||
pub async fn list_files(&self, prefix: &str) -> Result<Vec<String>, MinioError> {
|
|
||||||
let args = ListObjectsArgs::new(&self.bucket_name).with_prefix(prefix);
|
|
||||||
let objects = self.client.list_objects(&args).await?;
|
|
||||||
let file_names = objects.into_iter().map(|obj| obj.name().to_string()).collect();
|
|
||||||
Ok(file_names)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Retrieves the contents of a file at the specified path.
|
|
||||||
pub async fn get_file_contents(&self, path: &str) -> Result<String, MinioError> {
|
|
||||||
let data = self.download_file(path).await?;
|
|
||||||
let contents = String::from_utf8(data).map_err(|_| MinioError::InvalidResponse)?;
|
|
||||||
Ok(contents)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a folder at the specified path.
|
|
||||||
pub async fn create_folder(&self, path: &str) -> Result<(), MinioError> {
|
|
||||||
let folder_path = if path.ends_with('/') {
|
|
||||||
path.to_string()
|
|
||||||
} else {
|
|
||||||
format!("{}/", path)
|
|
||||||
};
|
|
||||||
self.upload_file(&folder_path, vec![]).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Shares a folder at the specified path (placeholder implementation).
|
|
||||||
pub async fn share_folder(&self, path: &str) -> Result<String, MinioError> {
|
|
||||||
Ok(format!("Folder shared: {}", path))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Searches for files matching the query in the specified path.
|
|
||||||
pub async fn search_files(&self, prefix: &str, query: &str) -> Result<Vec<String>, MinioError> {
|
|
||||||
let files = self.list_files(prefix).await?;
|
|
||||||
let results = files.into_iter().filter(|f| f.contains(query)).collect();
|
|
||||||
Ok(results)
|
|
||||||
}
|
|
||||||
}
|
|
19
gb-cloud/Cargo.toml
Normal file
19
gb-cloud/Cargo.toml
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
[package]
|
||||||
|
name = "gb-cloud"
|
||||||
|
version = { workspace = true }
|
||||||
|
edition = { workspace = true }
|
||||||
|
authors = { workspace = true }
|
||||||
|
license = { workspace = true }
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
gb-core = { path = "../gb-core" }
|
||||||
|
async-trait= { workspace = true }
|
||||||
|
tokio= { workspace = true }
|
||||||
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
thiserror= { workspace = true }
|
||||||
|
tracing= { workspace = true }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
rstest= { workspace = true }
|
||||||
|
tokio-test = "0.4"
|
||||||
|
tempfile = "3.8"
|
|
@ -6,20 +6,21 @@ authors = { workspace = true }
|
||||||
license = { workspace = true }
|
license = { workspace = true }
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
tokio-tungstenite = "0.18"
|
tokio-tungstenite = { workspace = true }
|
||||||
async-trait= { workspace = true }
|
async-trait= { workspace = true }
|
||||||
serde= { workspace = true }
|
serde= { workspace = true }
|
||||||
uuid= { workspace = true }
|
uuid= { workspace = true }
|
||||||
tokio= { workspace = true }
|
tokio= { workspace = true }
|
||||||
thiserror= { workspace = true }
|
thiserror= { workspace = true }
|
||||||
chrono= { workspace = true }
|
chrono= { workspace = true }
|
||||||
sqlx= { workspace = true }
|
|
||||||
redis= { workspace = true }
|
|
||||||
tracing= { workspace = true }
|
tracing= { workspace = true }
|
||||||
axum = { version = "0.7", features = ["json"] }
|
axum = { version = "0.7", features = ["json"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
|
sqlx = { workspace = true }
|
||||||
|
redis = { workspace = true }
|
||||||
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
mockall= { workspace = true }
|
mockall= { workspace = true }
|
||||||
rstest= { workspace = true }
|
rstest= { workspace = true }
|
||||||
tokio-test = "0.4"
|
tokio-test = { workspace = true }
|
|
@ -3,6 +3,18 @@ pub mod models;
|
||||||
pub mod traits;
|
pub mod traits;
|
||||||
pub use errors::{Error, ErrorKind, Result};
|
pub use errors::{Error, ErrorKind, Result};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct AppState {
|
||||||
|
db: PgPool,
|
||||||
|
redis: RedisClient,
|
||||||
|
storage: MinioClient,
|
||||||
|
message_processor: MessageProcessor,
|
||||||
|
customer: PostgresCustomerRepository,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::models::{Customer, SubscriptionTier};
|
use crate::models::{Customer, SubscriptionTier};
|
||||||
|
|
|
@ -1,6 +1,12 @@
|
||||||
//! Core domain models for the general-bots system
|
use chrono::{DateTime, Utc};
|
||||||
//! File: gb-core/src/models.rs
|
use minio_rs::client::Client as MinioClient;
|
||||||
|
use rdkafka::producer::FutureProducer;
|
||||||
|
use redis::aio::ConnectionManager as RedisConnectionManager;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use sqlx::PgPool;
|
||||||
|
use uuid::Uuid;
|
||||||
|
use zitadel::api::v1::auth::AuthServiceClient;
|
||||||
|
use crate::config::AppConfig;
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_json::Value as JsonValue;
|
use serde_json::Value as JsonValue;
|
||||||
|
@ -233,3 +239,183 @@ pub struct FileInfo {
|
||||||
pub url: String,
|
pub url: String,
|
||||||
pub created_at: DateTime<Utc>,
|
pub created_at: DateTime<Utc>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// App state shared across all handlers
|
||||||
|
pub struct AppState {
|
||||||
|
pub config: AppConfig,
|
||||||
|
pub db_pool: PgPool,
|
||||||
|
pub redis_pool: RedisConnectionManager,
|
||||||
|
pub kafka_producer: FutureProducer,
|
||||||
|
pub zitadel_client: AuthServiceClient<tonic::transport::Channel>,
|
||||||
|
pub minio_client: MinioClient,
|
||||||
|
}
|
||||||
|
|
||||||
|
// User models
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct User {
|
||||||
|
pub id: Uuid,
|
||||||
|
pub external_id: String, // Zitadel user ID
|
||||||
|
pub username: String,
|
||||||
|
pub email: String,
|
||||||
|
pub created_at: DateTime<Utc>,
|
||||||
|
pub updated_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// File models
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct File {
|
||||||
|
pub id: Uuid,
|
||||||
|
pub user_id: Uuid,
|
||||||
|
pub folder_id: Option<Uuid>,
|
||||||
|
pub name: String,
|
||||||
|
pub path: String,
|
||||||
|
pub mime_type: String,
|
||||||
|
pub size: i64,
|
||||||
|
pub created_at: DateTime<Utc>,
|
||||||
|
pub updated_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct Folder {
|
||||||
|
pub id: Uuid,
|
||||||
|
pub user_id: Uuid,
|
||||||
|
pub parent_id: Option<Uuid>,
|
||||||
|
pub name: String,
|
||||||
|
pub path: String,
|
||||||
|
pub created_at: DateTime<Utc>,
|
||||||
|
pub updated_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Conversation models
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct Conversation {
|
||||||
|
pub id: Uuid,
|
||||||
|
pub name: String,
|
||||||
|
pub created_by: Uuid,
|
||||||
|
pub created_at: DateTime<Utc>,
|
||||||
|
pub updated_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ConversationMember {
|
||||||
|
pub conversation_id: Uuid,
|
||||||
|
pub user_id: Uuid,
|
||||||
|
pub joined_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct Message {
|
||||||
|
pub id: Uuid,
|
||||||
|
pub conversation_id: Uuid,
|
||||||
|
pub user_id: Uuid,
|
||||||
|
pub content: String,
|
||||||
|
pub created_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calendar models
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct CalendarEvent {
|
||||||
|
pub id: Uuid,
|
||||||
|
pub title: String,
|
||||||
|
pub description: Option<String>,
|
||||||
|
pub location: Option<String>,
|
||||||
|
pub start_time: DateTime<Utc>,
|
||||||
|
pub end_time: DateTime<Utc>,
|
||||||
|
pub user_id: Uuid,
|
||||||
|
pub created_at: DateTime<Utc>,
|
||||||
|
pub updated_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Task models
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct Task {
|
||||||
|
pub id: Uuid,
|
||||||
|
pub title: String,
|
||||||
|
pub description: Option<String>,
|
||||||
|
pub due_date: Option<DateTime<Utc>>,
|
||||||
|
pub status: TaskStatus,
|
||||||
|
pub priority: TaskPriority,
|
||||||
|
pub user_id: Uuid,
|
||||||
|
pub created_at: DateTime<Utc>,
|
||||||
|
pub updated_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub enum TaskStatus {
|
||||||
|
Pending,
|
||||||
|
InProgress,
|
||||||
|
Completed,
|
||||||
|
Cancelled,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub enum TaskPriority {
|
||||||
|
Low,
|
||||||
|
Medium,
|
||||||
|
High,
|
||||||
|
Urgent,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response models
|
||||||
|
#[derive(Debug, Serialize)]
|
||||||
|
pub struct ApiResponse<T> {
|
||||||
|
pub success: bool,
|
||||||
|
pub message: Option<String>,
|
||||||
|
pub data: Option<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error models
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum AppError {
|
||||||
|
#[error("Database error: {0}")]
|
||||||
|
Database(#[from] sqlx::Error),
|
||||||
|
|
||||||
|
#[error("Redis error: {0}")]
|
||||||
|
Redis(#[from] redis::RedisError),
|
||||||
|
|
||||||
|
#[error("Kafka error: {0}")]
|
||||||
|
Kafka(String),
|
||||||
|
|
||||||
|
#[error("Zitadel error: {0}")]
|
||||||
|
Zitadel(#[from] tonic::Status),
|
||||||
|
|
||||||
|
#[error("Minio error: {0}")]
|
||||||
|
Minio(String),
|
||||||
|
|
||||||
|
#[error("Validation error: {0}")]
|
||||||
|
Validation(String),
|
||||||
|
|
||||||
|
#[error("Not found: {0}")]
|
||||||
|
NotFound(String),
|
||||||
|
|
||||||
|
#[error("Unauthorized: {0}")]
|
||||||
|
Unauthorized(String),
|
||||||
|
|
||||||
|
#[error("Forbidden: {0}")]
|
||||||
|
Forbidden(String),
|
||||||
|
|
||||||
|
#[error("Internal server error: {0}")]
|
||||||
|
Internal(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl actix_web::ResponseError for AppError {
|
||||||
|
fn error_response(&self) -> actix_web::HttpResponse {
|
||||||
|
let (status, error_message) = match self {
|
||||||
|
AppError::Validation(_) => (actix_web::http::StatusCode::BAD_REQUEST, self.to_string()),
|
||||||
|
AppError::NotFound(_) => (actix_web::http::StatusCode::NOT_FOUND, self.to_string()),
|
||||||
|
AppError::Unauthorized(_) => (actix_web::http::StatusCode::UNAUTHORIZED, self.to_string()),
|
||||||
|
AppError::Forbidden(_) => (actix_web::http::StatusCode::FORBIDDEN, self.to_string()),
|
||||||
|
_ => (
|
||||||
|
actix_web::http::StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
|
"Internal server error".to_string(),
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
actix_web::HttpResponse::build(status).json(ApiResponse::<()> {
|
||||||
|
success: false,
|
||||||
|
message: Some(error_message),
|
||||||
|
data: None,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
65
gb-file/src/db.rs
Normal file
65
gb-file/src/db.rs
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
use anyhow::Result;
|
||||||
|
use minio_rs::client::{Client as MinioClient, ClientBuilder as MinioClientBuilder};
|
||||||
|
use rdkafka::ClientConfig;
|
||||||
|
use rdkafka::producer::FutureProducer;
|
||||||
|
use redis::aio::ConnectionManager as RedisConnectionManager;
|
||||||
|
use sqlx::postgres::{PgPoolOptions, PgPool};
|
||||||
|
use zitadel::api::v1::auth::AuthServiceClient;
|
||||||
|
|
||||||
|
use crate::config::AppConfig;
|
||||||
|
|
||||||
|
pub async fn init_postgres(config: &AppConfig) -> Result<PgPool> {
|
||||||
|
let pool = PgPoolOptions::new()
|
||||||
|
.max_connections(config.database.max_connections)
|
||||||
|
.connect(&config.database.url)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// Run migrations
|
||||||
|
sqlx::migrate!("./migrations")
|
||||||
|
.run(&pool)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(pool)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn init_redis(config: &AppConfig) -> Result<RedisConnectionManager> {
|
||||||
|
let client = redis::Client::open(config.redis.url.as_str())?;
|
||||||
|
let connection_manager = RedisConnectionManager::new(client).await?;
|
||||||
|
|
||||||
|
Ok(connection_manager)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn init_kafka(config: &AppConfig) -> Result<FutureProducer> {
|
||||||
|
let producer: FutureProducer = ClientConfig::new()
|
||||||
|
.set("bootstrap.servers", &config.kafka.brokers)
|
||||||
|
.set("message.timeout.ms", "5000")
|
||||||
|
.create()?;
|
||||||
|
|
||||||
|
Ok(producer)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn init_zitadel(config: &AppConfig) -> Result<AuthServiceClient<tonic::transport::Channel>> {
|
||||||
|
let channel = tonic::transport::Channel::from_shared(format!("https://{}", config.zitadel.domain))?
|
||||||
|
.connect()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let client = AuthServiceClient::new(channel);
|
||||||
|
|
||||||
|
Ok(client)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn init_minio(config: &AppConfig) -> Result<MinioClient> {
|
||||||
|
let client = MinioClientBuilder::new()
|
||||||
|
.endpoint(&config.minio.endpoint)
|
||||||
|
.access_key(&config.minio.access_key)
|
||||||
|
.secret_key(&config.minio.secret_key)
|
||||||
|
.ssl(config.minio.use_ssl)
|
||||||
|
.build()?;
|
||||||
|
|
||||||
|
// Ensure bucket exists
|
||||||
|
if !client.bucket_exists(&config.minio.bucket).await? {
|
||||||
|
client.make_bucket(&config.minio.bucket, None).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(client)
|
||||||
|
}
|
103
gb-file/src/handlers.rs
Normal file
103
gb-file/src/handlers.rs
Normal file
|
@ -0,0 +1,103 @@
|
||||||
|
use actix_multipart::Multipart;
|
||||||
|
use actix_web::{web, HttpRequest, HttpResponse};
|
||||||
|
use futures::{StreamExt, TryStreamExt};
|
||||||
|
use std::io::Write;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
use crate::models::AppError;
|
||||||
|
use crate::utils::{create_response, extract_user_id};
|
||||||
|
|
||||||
|
#[actix_web::post("/files/upload")]
|
||||||
|
pub async fn upload_file(
|
||||||
|
req: HttpRequest,
|
||||||
|
mut payload: Multipart,
|
||||||
|
state: web::Data<AppState>,
|
||||||
|
) -> Result<HttpResponse, AppError> {
|
||||||
|
let user_id = extract_user_id(&req)?;
|
||||||
|
let folder_path = req.query_string(); // Assuming folder path is passed as query parameter
|
||||||
|
|
||||||
|
while let Ok(Some(mut field)) = payload.try_next().await {
|
||||||
|
let content_disposition = field.content_disposition();
|
||||||
|
let filename = content_disposition
|
||||||
|
.get_filename()
|
||||||
|
.ok_or_else(|| AppError::Validation("Filename not provided".to_string()))?
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
let sanitized_filename = sanitize_filename::sanitize(&filename);
|
||||||
|
let file_path = format!("{}/{}/{}", user_id, folder_path, sanitized_filename);
|
||||||
|
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
while let Some(chunk) = field.next().await {
|
||||||
|
let data = chunk.map_err(|e| AppError::Internal(format!("Error reading file: {}", e)))?;
|
||||||
|
buffer.write_all(&data).map_err(|e| AppError::Internal(format!("Error writing to buffer: {}", e)))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let content_type = field.content_type().map(|t| t.to_string()).unwrap_or_else(|| "application/octet-stream".to_string());
|
||||||
|
|
||||||
|
state.minio_client
|
||||||
|
.put_object(&state.config.minio.bucket, &file_path, &buffer, Some(content_type.as_str()), None)
|
||||||
|
.await
|
||||||
|
.map_err(|e| AppError::Minio(format!("Failed to upload file to Minio: {}", e)))?;
|
||||||
|
|
||||||
|
return Ok(create_response(
|
||||||
|
format!("File uploaded successfully at {}", file_path),
|
||||||
|
None,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(AppError::Validation("No file provided".to_string()))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[actix_web::post("/files/download")]
|
||||||
|
pub async fn download(
|
||||||
|
req: HttpRequest,
|
||||||
|
state: web::Data<AppState>,
|
||||||
|
file_path: web::Json<String>,
|
||||||
|
) -> Result<HttpResponse, AppError> {
|
||||||
|
let user_id = extract_user_id(&req)?;
|
||||||
|
|
||||||
|
let file_content = state.minio_client
|
||||||
|
.get_object(&state.config.minio.bucket, &file_path)
|
||||||
|
.await
|
||||||
|
.map_err(|e| AppError::Minio(format!("Failed to retrieve file from Minio: {}", e)))?;
|
||||||
|
|
||||||
|
Ok(HttpResponse::Ok()
|
||||||
|
.content_type("application/octet-stream")
|
||||||
|
.append_header(("Content-Disposition", format!("attachment; filename=\"{}\"", file_path)))
|
||||||
|
.body(file_content))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[actix_web::post("/files/delete")]
|
||||||
|
pub async fn delete_file(
|
||||||
|
req: HttpRequest,
|
||||||
|
state: web::Data<AppState>,
|
||||||
|
file_path: web::Json<String>,
|
||||||
|
) -> Result<HttpResponse, AppError> {
|
||||||
|
let user_id = extract_user_id(&req)?;
|
||||||
|
|
||||||
|
state.minio_client
|
||||||
|
.remove_object(&state.config.minio.bucket, &file_path)
|
||||||
|
.await
|
||||||
|
.map_err(|e| AppError::Minio(format!("Failed to delete file from Minio: {}", e)))?;
|
||||||
|
|
||||||
|
Ok(create_response(
|
||||||
|
true,
|
||||||
|
Some("File deleted successfully".to_string()),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[actix_web::post("/files/list")]
|
||||||
|
pub async fn list_files(
|
||||||
|
req: HttpRequest,
|
||||||
|
state: web::Data<AppState>,
|
||||||
|
folder_path: web::Json<String>,
|
||||||
|
) -> Result<HttpResponse, AppError> {
|
||||||
|
let user_id = extract_user_id(&req)?;
|
||||||
|
|
||||||
|
let objects = state.minio_client
|
||||||
|
.list_objects(&state.config.minio.bucket, &folder_path, None, None)
|
||||||
|
.await
|
||||||
|
.map_err(|e| AppError::Minio(format!("Failed to list objects in Minio: {}", e)))?;
|
||||||
|
|
||||||
|
Ok(create_response(objects, None))
|
||||||
|
}
|
37
gb-file/src/router.rs
Normal file
37
gb-file/src/router.rs
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
use actix_web::web;
|
||||||
|
|
||||||
|
use crate::router;
|
||||||
|
|
||||||
|
pub fn files_router_configure(cfg: &mut web::ServiceConfig) {
|
||||||
|
// File & Document Management
|
||||||
|
cfg.route("/files/upload", web::post().to(handlers::upload_file))
|
||||||
|
.route("/files/download", web::post().to(handlers::download))
|
||||||
|
.route("/files/delete", web::post().to(handlers::delete_file))
|
||||||
|
.route("/files/getContents", web::post().to(handlers::get_file_contents))
|
||||||
|
.route("/files/createFolder", web::post().to(handlers::create_folder))
|
||||||
|
.route("/files/dirFolder", web::post().to(handlers::dir_folder))
|
||||||
|
|
||||||
|
// Conversations & Real-time Communication
|
||||||
|
.route("/conversations/create", web::post().to(handlers::create_conversation))
|
||||||
|
.route("/conversations/join", web::post().to(handlers::join_conversation))
|
||||||
|
.route("/conversations/leave", web::post().to(handlers::leave_conversation))
|
||||||
|
.route("/conversations/members", web::get().to(handlers::get_conversation_members))
|
||||||
|
.route("/conversations/messages", web::get().to(handlers::get_messages))
|
||||||
|
.route("/conversations/messages/send", web::post().to(handlers::send_message))
|
||||||
|
|
||||||
|
// Communication Services
|
||||||
|
.route("/comm/email/send", web::post().to(handlers::send_email))
|
||||||
|
|
||||||
|
// User Management
|
||||||
|
.route("/users/profile", web::get().to(handlers::get_user_profile))
|
||||||
|
|
||||||
|
// Calendar & Task Management
|
||||||
|
.route("/calendar/events/create", web::post().to(handlers::create_event))
|
||||||
|
|
||||||
|
.route("/tasks/create", web::post().to(handlers::create_task))
|
||||||
|
.route("/tasks/list", web::get().to(handlers::get_tasks))
|
||||||
|
|
||||||
|
// Admin
|
||||||
|
.route("/admin/system/status", web::get().to(handlers::get_system_status))
|
||||||
|
.route("/admin/logs/view", web::get().to(handlers::view_logs));
|
||||||
|
}
|
|
@ -10,7 +10,6 @@ gb-core = { path = "../gb-core" }
|
||||||
image = { version = "0.24", features = ["webp", "jpeg", "png", "gif"] }
|
image = { version = "0.24", features = ["webp", "jpeg", "png", "gif"] }
|
||||||
imageproc = "0.23"
|
imageproc = "0.23"
|
||||||
rusttype = "0.9"
|
rusttype = "0.9"
|
||||||
tesseract = "0.12"
|
|
||||||
async-trait= { workspace = true }
|
async-trait= { workspace = true }
|
||||||
tokio= { workspace = true }
|
tokio= { workspace = true }
|
||||||
serde= { workspace = true }
|
serde= { workspace = true }
|
||||||
|
|
|
@ -3,7 +3,6 @@ use image::{DynamicImage, ImageOutputFormat, Rgba};
|
||||||
use imageproc::drawing::draw_text_mut;
|
use imageproc::drawing::draw_text_mut;
|
||||||
use rusttype::{Font, Scale};
|
use rusttype::{Font, Scale};
|
||||||
use std::io::Cursor;
|
use std::io::Cursor;
|
||||||
use tesseract::Tesseract;
|
|
||||||
use tempfile::NamedTempFile;
|
use tempfile::NamedTempFile;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
|
@ -29,10 +28,7 @@ impl ImageProcessor {
|
||||||
temp_file.write_all(&cursor.into_inner())
|
temp_file.write_all(&cursor.into_inner())
|
||||||
.map_err(|e| Error::internal(format!("Failed to write to temp file: {}", e)))?;
|
.map_err(|e| Error::internal(format!("Failed to write to temp file: {}", e)))?;
|
||||||
|
|
||||||
// Initialize Tesseract and process image
|
|
||||||
let api = Tesseract::new(None, Some("eng"))
|
|
||||||
.map_err(|e| Error::internal(format!("Failed to initialize Tesseract: {}", e)))?;
|
|
||||||
|
|
||||||
api.set_image(temp_file.path().to_str().unwrap())
|
api.set_image(temp_file.path().to_str().unwrap())
|
||||||
.map_err(|e| Error::internal(format!("Failed to set image: {}", e)))?
|
.map_err(|e| Error::internal(format!("Failed to set image: {}", e)))?
|
||||||
.recognize()
|
.recognize()
|
||||||
|
|
|
@ -9,12 +9,8 @@ license = { workspace = true }
|
||||||
gb-core = { path = "../gb-core" }
|
gb-core = { path = "../gb-core" }
|
||||||
async-trait= { workspace = true }
|
async-trait= { workspace = true }
|
||||||
tokio= { workspace = true }
|
tokio= { workspace = true }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
thiserror= { workspace = true }
|
||||||
serde_json = "1.0"thiserror= { workspace = true }
|
|
||||||
tracing= { workspace = true }
|
tracing= { workspace = true }
|
||||||
minio-rs = "0.1"
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
rstest= { workspace = true }
|
rstest= { workspace = true }
|
||||||
tokio-test = "0.4"
|
|
||||||
tempfile = "3.8"
|
|
||||||
|
|
0
gb-llm/src/lib.rs
Normal file
0
gb-llm/src/lib.rs
Normal file
|
@ -19,7 +19,7 @@ futures= { workspace = true }
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
chrono = { version = "0.4", features = ["serde"] }
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
lapin = "2.3"
|
lapin = "2.3"
|
||||||
tokio-tungstenite = { version = "0.20", features = ["native-tls"] }
|
tokio-tungstenite = { workspace= true, features = ["native-tls"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
rstest= { workspace = true }
|
rstest= { workspace = true }
|
||||||
|
|
|
@ -1,3 +1,30 @@
|
||||||
|
-- Create users table
|
||||||
|
CREATE TABLE IF NOT EXISTS users (
|
||||||
|
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||||
|
email VARCHAR(255) NOT NULL UNIQUE,
|
||||||
|
name VARCHAR(255),
|
||||||
|
password_hash VARCHAR(255) NOT NULL,
|
||||||
|
role VARCHAR(50) NOT NULL DEFAULT 'user',
|
||||||
|
status VARCHAR(50) NOT NULL DEFAULT 'active',
|
||||||
|
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Create sessions table
|
||||||
|
CREATE TABLE IF NOT EXISTS sessions (
|
||||||
|
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||||
|
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||||
|
refresh_token VARCHAR(255) NOT NULL UNIQUE,
|
||||||
|
expires_at TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||||
|
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Create indexes
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_users_email ON users(email);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_sessions_user_id ON sessions(user_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_sessions_refresh_token ON sessions(refresh_token);
|
||||||
|
|
||||||
|
|
||||||
-- Add password_hash column to users table
|
-- Add password_hash column to users table
|
||||||
ALTER TABLE users
|
ALTER TABLE users
|
||||||
ADD COLUMN IF NOT EXISTS password_hash VARCHAR(255) NOT NULL DEFAULT '';
|
ADD COLUMN IF NOT EXISTS password_hash VARCHAR(255) NOT NULL DEFAULT '';
|
||||||
|
|
33
gb-server/Cargo.toml
Normal file
33
gb-server/Cargo.toml
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
[package]
|
||||||
|
name = "gb-server"
|
||||||
|
version = { workspace = true }
|
||||||
|
edition = { workspace = true }
|
||||||
|
authors = { workspace = true }
|
||||||
|
license = { workspace = true }
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
gb-core = { path = "../gb-core" }
|
||||||
|
gb-messaging = { path = "../gb-messaging" }
|
||||||
|
gb-monitoring = { path = "../gb-monitoring" }
|
||||||
|
gb-file = { path = "../gb-file" }
|
||||||
|
tokio = { workspace = true, features = ["full", "macros", "rt-multi-thread"] }
|
||||||
|
axum = { workspace = true, features = ["ws", "multipart", "macros"] }
|
||||||
|
serde = { workspace = true }
|
||||||
|
serde_json = { workspace = true }
|
||||||
|
uuid = { workspace = true }
|
||||||
|
tracing = { workspace = true }
|
||||||
|
tracing-subscriber = { workspace = true, features = ["env-filter"] }
|
||||||
|
async-trait = { workspace = true }
|
||||||
|
futures-util = { workspace = true, features = ["sink"] } # Now valid, as futures-util is in workspace.dependencies
|
||||||
|
chrono = { workspace = true, features = ["serde"] }
|
||||||
|
tokio-stream = { workspace = true }
|
||||||
|
sqlx = { workspace = true, features = ["runtime-tokio-rustls", "postgres", "chrono", "uuid"] }
|
||||||
|
redis = { workspace = true, features = ["tokio-comp"] }
|
||||||
|
hyper = { workspace = true, features = ["server"] }
|
||||||
|
hyper-util = { workspace = true }
|
||||||
|
tower = { workspace = true }
|
||||||
|
tower-http = { workspace = true, features = ["cors", "trace"] }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
rstest = { workspace = true }
|
||||||
|
tokio-test = { workspace = true }
|
113
gb-server/src/config.rs
Normal file
113
gb-server/src/config.rs
Normal file
|
@ -0,0 +1,113 @@
|
||||||
|
use serde::Deserialize;
|
||||||
|
use std::env;
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
|
pub struct AppConfig {
|
||||||
|
pub server: ServerConfig,
|
||||||
|
pub database: DatabaseConfig,
|
||||||
|
pub redis: RedisConfig,
|
||||||
|
pub kafka: KafkaConfig,
|
||||||
|
pub zitadel: ZitadelConfig,
|
||||||
|
pub minio: MinioConfig,
|
||||||
|
pub email: EmailConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
|
pub struct ServerConfig {
|
||||||
|
pub host: String,
|
||||||
|
pub port: u16,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
|
pub struct DatabaseConfig {
|
||||||
|
pub url: String,
|
||||||
|
pub max_connections: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
|
pub struct RedisConfig {
|
||||||
|
pub url: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
|
pub struct KafkaConfig {
|
||||||
|
pub brokers: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
|
pub struct ZitadelConfig {
|
||||||
|
pub domain: String,
|
||||||
|
pub client_id: String,
|
||||||
|
pub client_secret: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
|
pub struct MinioConfig {
|
||||||
|
pub endpoint: String,
|
||||||
|
pub access_key: String,
|
||||||
|
pub secret_key: String,
|
||||||
|
pub use_ssl: bool,
|
||||||
|
pub bucket: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
|
pub struct EmailConfig {
|
||||||
|
pub smtp_server: String,
|
||||||
|
pub smtp_port: u16,
|
||||||
|
pub username: String,
|
||||||
|
pub password: String,
|
||||||
|
pub from_email: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AppConfig {
|
||||||
|
pub fn from_env() -> Self {
|
||||||
|
Self {
|
||||||
|
server: ServerConfig {
|
||||||
|
host: env::var("SERVER_HOST").unwrap_or_else(|_| "127.0.0.1".to_string()),
|
||||||
|
port: env::var("SERVER_PORT")
|
||||||
|
.unwrap_or_else(|_| "8080".to_string())
|
||||||
|
.parse()
|
||||||
|
.expect("Invalid SERVER_PORT"),
|
||||||
|
},
|
||||||
|
database: DatabaseConfig {
|
||||||
|
url: env::var("DATABASE_URL").expect("DATABASE_URL must be set"),
|
||||||
|
max_connections: env::var("DATABASE_MAX_CONNECTIONS")
|
||||||
|
.unwrap_or_else(|_| "5".to_string())
|
||||||
|
.parse()
|
||||||
|
.expect("Invalid DATABASE_MAX_CONNECTIONS"),
|
||||||
|
},
|
||||||
|
redis: RedisConfig {
|
||||||
|
url: env::var("REDIS_URL").expect("REDIS_URL must be set"),
|
||||||
|
},
|
||||||
|
kafka: KafkaConfig {
|
||||||
|
brokers: env::var("KAFKA_BROKERS").expect("KAFKA_BROKERS must be set"),
|
||||||
|
},
|
||||||
|
zitadel: ZitadelConfig {
|
||||||
|
domain: env::var("ZITADEL_DOMAIN").expect("ZITADEL_DOMAIN must be set"),
|
||||||
|
client_id: env::var("ZITADEL_CLIENT_ID").expect("ZITADEL_CLIENT_ID must be set"),
|
||||||
|
client_secret: env::var("ZITADEL_CLIENT_SECRET")
|
||||||
|
.expect("ZITADEL_CLIENT_SECRET must be set"),
|
||||||
|
},
|
||||||
|
minio: MinioConfig {
|
||||||
|
endpoint: env::var("MINIO_ENDPOINT").expect("MINIO_ENDPOINT must be set"),
|
||||||
|
access_key: env::var("MINIO_ACCESS_KEY").expect("MINIO_ACCESS_KEY must be set"),
|
||||||
|
secret_key: env::var("MINIO_SECRET_KEY").expect("MINIO_SECRET_KEY must be set"),
|
||||||
|
use_ssl: env::var("MINIO_USE_SSL")
|
||||||
|
.unwrap_or_else(|_| "false".to_string())
|
||||||
|
.parse()
|
||||||
|
.expect("Invalid MINIO_USE_SSL"),
|
||||||
|
bucket: env::var("MINIO_BUCKET").expect("MINIO_BUCKET must be set"),
|
||||||
|
},
|
||||||
|
email: EmailConfig {
|
||||||
|
smtp_server: env::var("EMAIL_SMTP_SERVER").expect("EMAIL_SMTP_SERVER must be set"),
|
||||||
|
smtp_port: env::var("EMAIL_SMTP_PORT")
|
||||||
|
.unwrap_or_else(|_| "587".to_string())
|
||||||
|
.parse()
|
||||||
|
.expect("Invalid EMAIL_SMTP_PORT"),
|
||||||
|
username: env::var("EMAIL_USERNAME").expect("EMAIL_USERNAME must be set"),
|
||||||
|
password: env::var("EMAIL_PASSWORD").expect("EMAIL_PASSWORD must be set"),
|
||||||
|
from_email: env::var("EMAIL_FROM").expect("EMAIL_FROM must be set"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
67
gb-server/src/main.rs
Normal file
67
gb-server/src/main.rs
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
use gb_core::{Error, Result};
|
||||||
|
use tracing::{info, error};
|
||||||
|
use std::{net::SocketAddr, sync::Arc};
|
||||||
|
use sqlx::PgPool;
|
||||||
|
use redis::Client as RedisClient;
|
||||||
|
use minio::MinioClient;
|
||||||
|
use gb_api::PostgresCustomerRepository;
|
||||||
|
use gb_messaging::MessageProcessor;
|
||||||
|
use axum::Router;
|
||||||
|
use tower_http::trace::TraceLayer;
|
||||||
|
|
||||||
|
use actix_cors::Cors;
|
||||||
|
use actix_web::{middleware, web, App, HttpServer};
|
||||||
|
use dotenv::dotenv;
|
||||||
|
use tracing_subscriber::fmt::format::FmtSpan;
|
||||||
|
|
||||||
|
use crate::config::AppConfig;
|
||||||
|
use crate::db::{init_kafka, init_minio, init_postgres, init_redis, init_zitadel};
|
||||||
|
use crate::router::*;
|
||||||
|
|
||||||
|
#[actix_web::main]
|
||||||
|
async fn main() -> std::io::Result<()> {
|
||||||
|
dotenv().ok();
|
||||||
|
|
||||||
|
// Initialize tracing
|
||||||
|
tracing_subscriber::fmt()
|
||||||
|
.with_span_events(FmtSpan::CLOSE)
|
||||||
|
.init();
|
||||||
|
|
||||||
|
// Load configuration
|
||||||
|
let config = AppConfig::from_env();
|
||||||
|
|
||||||
|
// Initialize databases and services
|
||||||
|
let db_pool = init_postgres(&config).await.expect("Failed to connect to PostgreSQL");
|
||||||
|
let redis_pool = init_redis(&config).await.expect("Failed to connect to Redis");
|
||||||
|
let kafka_producer = init_kafka(&config).await.expect("Failed to initialize Kafka");
|
||||||
|
let zitadel_client = init_zitadel(&config).await.expect("Failed to initialize Zitadel");
|
||||||
|
let minio_client = init_minio(&config).await.expect("Failed to initialize Minio");
|
||||||
|
|
||||||
|
let app_state = web::Data::new(models::AppState {
|
||||||
|
config: config.clone(),
|
||||||
|
db_pool,
|
||||||
|
redis_pool,
|
||||||
|
kafka_producer,
|
||||||
|
zitadel_client,
|
||||||
|
minio_client,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Start HTTP server
|
||||||
|
HttpServer::new(move || {
|
||||||
|
let cors = Cors::default()
|
||||||
|
.allow_any_origin()
|
||||||
|
.allow_any_method()
|
||||||
|
.allow_any_header()
|
||||||
|
.max_age(3600);
|
||||||
|
|
||||||
|
App::new()
|
||||||
|
.wrap(middleware::Logger::default())
|
||||||
|
.wrap(middleware::Compress::default())
|
||||||
|
.wrap(cors)
|
||||||
|
.app_data(app_state.clone())
|
||||||
|
.configure(filesrouter::files_router_configure)
|
||||||
|
})
|
||||||
|
.bind((config.server.host.clone(), config.server.port))?
|
||||||
|
.run()
|
||||||
|
.await
|
||||||
|
}
|
|
@ -16,11 +16,8 @@ use tracing::{instrument, error};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
|
|
||||||
pub struct ApiState {
|
|
||||||
pub message_processor: Mutex<MessageProcessor>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn create_router(message_processor: MessageProcessor) -> Router {
|
pub fn create_router(message_processor: AppState) -> Router {
|
||||||
let state = Arc::new(ApiState {
|
let state = Arc::new(ApiState {
|
||||||
message_processor: Mutex::new(message_processor),
|
message_processor: Mutex::new(message_processor),
|
||||||
});
|
});
|
||||||
|
@ -52,7 +49,8 @@ pub fn create_router(message_processor: MessageProcessor) -> Router {
|
||||||
.route("/files/sync/start", post(start_sync))
|
.route("/files/sync/start", post(start_sync))
|
||||||
.route("/files/sync/stop", post(stop_sync))
|
.route("/files/sync/stop", post(stop_sync))
|
||||||
|
|
||||||
full ode bucket is abstrctd path variable, src, dest, full file manager acessible via actixweb ALL methods no excluses, inline funcition params, s3 api inside, all methodos, full code. // Document Processing
|
// full ode bucket is abstrctd path variable, src, dest, full file manager acessible via actixweb ALL methods no excluses, inline funcition params, s3 api inside, all methodos, full code. // Document Processing
|
||||||
|
|
||||||
.route("/docs/merge", post(merge_documents))
|
.route("/docs/merge", post(merge_documents))
|
||||||
.route("/docs/convert", post(convert_document))
|
.route("/docs/convert", post(convert_document))
|
||||||
.route("/docs/fill", post(fill_document))
|
.route("/docs/fill", post(fill_document))
|
155
gb-server/src/utils.rs
Normal file
155
gb-server/src/utils.rs
Normal file
|
@ -0,0 +1,155 @@
|
||||||
|
use actix_web::{web, HttpRequest, HttpResponse};
|
||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
use jsonwebtoken::{decode, encode, DecodingKey, EncodingKey, Header, Validation};
|
||||||
|
use lettre::message::header::ContentType;
|
||||||
|
use lettre::transport::smtp::authentication::Credentials;
|
||||||
|
use lettre::{Message, SmtpTransport, Transport};
|
||||||
|
use rdkafka::producer::{FutureProducer, FutureRecord};
|
||||||
|
use rdkafka::util::Timeout;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::time::{Duration, SystemTime};
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
use crate::config::AppConfig;
|
||||||
|
use crate::models::{ApiResponse, AppError, User};
|
||||||
|
|
||||||
|
// JWT Claims
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct Claims {
|
||||||
|
pub sub: String, // subject (user ID)
|
||||||
|
pub exp: usize, // expiration time
|
||||||
|
pub iat: usize, // issued at
|
||||||
|
pub email: String, // user email
|
||||||
|
pub username: String, // username
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate JWT token
|
||||||
|
pub fn generate_jwt(user: &User, secret: &str) -> Result<String, AppError> {
|
||||||
|
let expiration = SystemTime::now()
|
||||||
|
.duration_since(SystemTime::UNIX_EPOCH)
|
||||||
|
.unwrap()
|
||||||
|
.as_secs() as usize + 86400; // 24 hours
|
||||||
|
|
||||||
|
let issued_at = SystemTime::now()
|
||||||
|
.duration_since(SystemTime::UNIX_EPOCH)
|
||||||
|
.unwrap()
|
||||||
|
.as_secs() as usize;
|
||||||
|
|
||||||
|
let claims = Claims {
|
||||||
|
sub: user.id.to_string(),
|
||||||
|
exp: expiration,
|
||||||
|
iat: issued_at,
|
||||||
|
email: user.email.clone(),
|
||||||
|
username: user.username.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
encode(
|
||||||
|
&Header::default(),
|
||||||
|
&claims,
|
||||||
|
&EncodingKey::from_secret(secret.as_bytes()),
|
||||||
|
)
|
||||||
|
.map_err(|e| AppError::Internal(format!("Failed to generate JWT: {}", e)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate JWT token
|
||||||
|
pub fn validate_jwt(token: &str, secret: &str) -> Result<Claims, AppError> {
|
||||||
|
let validation = Validation::default();
|
||||||
|
|
||||||
|
decode::<Claims>(
|
||||||
|
token,
|
||||||
|
&DecodingKey::from_secret(secret.as_bytes()),
|
||||||
|
&validation,
|
||||||
|
)
|
||||||
|
.map(|data| data.claims)
|
||||||
|
.map_err(|e| AppError::Unauthorized(format!("Invalid token: {}", e)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract user ID from request
|
||||||
|
pub fn extract_user_id(req: &HttpRequest) -> Result<Uuid, AppError> {
|
||||||
|
let auth_header = req
|
||||||
|
.headers()
|
||||||
|
.get("Authorization")
|
||||||
|
.ok_or_else(|| AppError::Unauthorized("Missing Authorization header".to_string()))?
|
||||||
|
.to_str()
|
||||||
|
.map_err(|_| AppError::Unauthorized("Invalid Authorization header".to_string()))?;
|
||||||
|
|
||||||
|
if !auth_header.starts_with("Bearer ") {
|
||||||
|
return Err(AppError::Unauthorized("Invalid Authorization header format".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
let token = &auth_header[7..];
|
||||||
|
let claims = validate_jwt(token, "your-secret-key")?;
|
||||||
|
|
||||||
|
Uuid::parse_str(&claims.sub)
|
||||||
|
.map_err(|_| AppError::Unauthorized("Invalid user ID in token".to_string()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send email
|
||||||
|
pub async fn send_email(
|
||||||
|
config: &AppConfig,
|
||||||
|
to_email: &str,
|
||||||
|
subject: &str,
|
||||||
|
body: &str,
|
||||||
|
) -> Result<(), AppError> {
|
||||||
|
let email = Message::builder()
|
||||||
|
.from(config.email.from_email.parse().unwrap())
|
||||||
|
.to(to_email.parse().unwrap())
|
||||||
|
.subject(subject)
|
||||||
|
.header(ContentType::TEXT_PLAIN)
|
||||||
|
.body(body.to_string())
|
||||||
|
.map_err(|e| AppError::Internal(format!("Failed to create email: {}", e)))?;
|
||||||
|
|
||||||
|
let creds = Credentials::new(
|
||||||
|
config.email.username.clone(),
|
||||||
|
config.email.password.clone(),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Open a remote connection to the SMTP server
|
||||||
|
let mailer = SmtpTransport::relay(&config.email.smtp_server)
|
||||||
|
.unwrap()
|
||||||
|
.credentials(creds)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
// Send the email
|
||||||
|
mailer.send(&email)
|
||||||
|
.map_err(|e| AppError::Internal(format!("Failed to send email: {}", e)))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send message to Kafka
|
||||||
|
pub async fn send_to_kafka(
|
||||||
|
producer: &FutureProducer,
|
||||||
|
topic: &str,
|
||||||
|
key: &str,
|
||||||
|
payload: &str,
|
||||||
|
) -> Result<(), AppError> {
|
||||||
|
producer
|
||||||
|
.send(
|
||||||
|
FutureRecord::to(topic)
|
||||||
|
.key(key)
|
||||||
|
.payload(payload),
|
||||||
|
Timeout::After(Duration::from_secs(5)),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map_err(|(e, _)| AppError::Kafka(format!("Failed to send message to Kafka: {}", e)))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format datetime for JSON responses
|
||||||
|
pub fn format_datetime(dt: DateTime<Utc>) -> String {
|
||||||
|
dt.to_rfc3339()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a standard API response
|
||||||
|
pub fn create_response<T: Serialize>(
|
||||||
|
data: T,
|
||||||
|
message: Option<String>,
|
||||||
|
) -> HttpResponse {
|
||||||
|
HttpResponse::Ok().json(ApiResponse {
|
||||||
|
success: true,
|
||||||
|
message,
|
||||||
|
data: Some(data),
|
||||||
|
})
|
||||||
|
}
|
|
@ -16,6 +16,7 @@ serde= { workspace = true }
|
||||||
serde_json= { workspace = true }
|
serde_json= { workspace = true }
|
||||||
uuid= { workspace = true }
|
uuid= { workspace = true }
|
||||||
chrono= { workspace = true }
|
chrono= { workspace = true }
|
||||||
|
tikv-client = "0.1"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
rstest= { workspace = true }
|
rstest= { workspace = true }
|
||||||
|
|
|
@ -8,50 +8,47 @@ license = { workspace = true }
|
||||||
[dependencies]
|
[dependencies]
|
||||||
gb-core = { path = "../gb-core" }
|
gb-core = { path = "../gb-core" }
|
||||||
gb-auth = { path = "../gb-auth" }
|
gb-auth = { path = "../gb-auth" }
|
||||||
gb-api = { path = "../gb-api" }
|
gb-server = { path = "../gb-server" }
|
||||||
|
|
||||||
anyhow="1.0"
|
anyhow = { workspace = true }
|
||||||
# Testing frameworks
|
# Testing frameworks
|
||||||
goose = "0.17" # Load testing
|
goose = "0.17" # Load testing
|
||||||
criterion = { version = "0.5", features = ["async_futures"] }
|
criterion = { workspace = true, features = ["async_futures"] }
|
||||||
testcontainers = "0.14"
|
|
||||||
k8s-openapi = { version = "0.18", features = ["v1_26"] }
|
|
||||||
kube = { version = "0.82", features = ["runtime", "derive"] }
|
|
||||||
|
|
||||||
# Async Runtime
|
# Async Runtime
|
||||||
tokio= { workspace = true }
|
tokio = { workspace = true }
|
||||||
async-trait= { workspace = true }
|
async-trait = { workspace = true }
|
||||||
|
|
||||||
# HTTP Client
|
# HTTP Client
|
||||||
reqwest = { version = "0.11", features = ["json", "stream"] }
|
reqwest = { workspace = true, features = ["json", "stream"] }
|
||||||
hyper = { version = "1.0", features = ["full"] }
|
hyper = { workspace = true, features = ["full"] }
|
||||||
|
|
||||||
# WebSocket Testing
|
# WebSocket Testing
|
||||||
tokio-tungstenite = "0.20"
|
tokio-tungstenite = { workspace = true }
|
||||||
tungstenite = "0.20"
|
tungstenite = { workspace = true }
|
||||||
|
|
||||||
# Database
|
# Database
|
||||||
sqlx= { workspace = true }
|
sqlx = { workspace = true }
|
||||||
redis= { workspace = true }
|
redis = { workspace = true }
|
||||||
|
|
||||||
# Metrics & Monitoring
|
# Metrics & Monitoring
|
||||||
prometheus = { version = "0.13.0", features = ["process"] }
|
prometheus = { workspace = true, features = ["process"] }
|
||||||
tracing= { workspace = true }
|
tracing = { workspace = true }
|
||||||
opentelemetry= { workspace = true }
|
opentelemetry = { workspace = true }
|
||||||
|
|
||||||
# Serialization
|
# Serialization
|
||||||
serde= { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json= { workspace = true }
|
serde_json = { workspace = true }
|
||||||
|
|
||||||
# Utils
|
# Utils
|
||||||
futures = "0.3"
|
futures = { workspace = true }
|
||||||
rand = "0.8"
|
rand = { workspace = true }
|
||||||
fake = { version = "2.9", features = ["derive"] }
|
fake = { workspace = true, features = ["derive"] }
|
||||||
chrono = { version = "0.4", features = ["serde"] }
|
chrono = { workspace = true, features = ["serde"] }
|
||||||
uuid = { version = "1.6", features = ["v4"] }
|
uuid = { workspace = true, features = ["v4"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
rstest = "0.18"
|
rstest = { workspace = true }
|
||||||
wiremock = "0.5"
|
wiremock = "0.5"
|
||||||
assert_cmd = "2.0"
|
assert_cmd = "2.0"
|
||||||
predicates = "3.0"
|
predicates = "3.0"
|
||||||
|
|
|
@ -1,8 +1,3 @@
|
||||||
use kube::{
|
|
||||||
api::{Api, DeleteParams},
|
|
||||||
Client,
|
|
||||||
};
|
|
||||||
use k8s_openapi::api::core::v1::Pod;
|
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
|
|
||||||
pub struct ChaosTest {
|
pub struct ChaosTest {
|
||||||
|
|
|
@ -1,38 +0,0 @@
|
||||||
use async_trait::async_trait;
|
|
||||||
use sqlx::PgPool;
|
|
||||||
use testcontainers::clients::Cli;
|
|
||||||
|
|
||||||
pub struct IntegrationTest {
|
|
||||||
_docker: Cli,
|
|
||||||
pub db_pool: PgPool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
pub trait IntegrationTestCase {
|
|
||||||
async fn setup(&mut self) -> anyhow::Result<()>;
|
|
||||||
async fn execute(&self) -> anyhow::Result<()>;
|
|
||||||
async fn teardown(&mut self) -> anyhow::Result<()>;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct TestEnvironment {
|
|
||||||
pub postgres: testcontainers::Container<'static, testcontainers::images::postgres::Postgres>,
|
|
||||||
pub redis: testcontainers::Container<'static, testcontainers::images::redis::Redis>,
|
|
||||||
pub kafka: testcontainers::Container<'static, testcontainers::images::kafka::Kafka>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl IntegrationTest {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
let docker = Cli::default();
|
|
||||||
// Start PostgreSQL
|
|
||||||
let _postgres = docker.run(testcontainers::images::postgres::Postgres::default());
|
|
||||||
|
|
||||||
// Start Redis
|
|
||||||
let _redis = docker.run(testcontainers::images::redis::Redis::default());
|
|
||||||
|
|
||||||
let _kafka = docker.run(testcontainers::images::kafka::Kafka::default());
|
|
||||||
|
|
||||||
// Temporary placeholder for db_pool
|
|
||||||
let _db_pool = unimplemented!("Database pool needs to be implemented");
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -26,16 +26,6 @@ impl IntegrationTestCase for ApiTest {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_api_integration() -> Result<()> {
|
async fn test_api_integration() -> Result<()> {
|
||||||
let mut test = ApiTest {
|
|
||||||
test: IntegrationTest {
|
|
||||||
docker: testcontainers::clients::Cli::default(),
|
|
||||||
db_pool: sqlx::PgPool::connect("postgres://postgres:postgres@localhost:5432/test").await?,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
test.setup().await?;
|
|
||||||
test.execute().await?;
|
|
||||||
test.teardown().await?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
21
gb-vm/Cargo.toml
Normal file
21
gb-vm/Cargo.toml
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
[package]
|
||||||
|
name = "gb-vm"
|
||||||
|
version = { workspace = true }
|
||||||
|
edition = { workspace = true }
|
||||||
|
authors = { workspace = true }
|
||||||
|
license = { workspace = true }
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
gb-core = { path = "../gb-core" }
|
||||||
|
async-trait = { workspace = true }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
serde = { workspace = true }
|
||||||
|
serde_json = { workspace = true }
|
||||||
|
thiserror = { workspace = true }
|
||||||
|
tracing = { workspace = true }
|
||||||
|
minio = "0.1.0"
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
rstest = { workspace = true }
|
||||||
|
tokio-test = { workspace = true }
|
||||||
|
tempfile = { workspace = true }
|
65
gb-vm/src/db.rs
Normal file
65
gb-vm/src/db.rs
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
use anyhow::Result;
|
||||||
|
use minio_rs::client::{Client as MinioClient, ClientBuilder as MinioClientBuilder};
|
||||||
|
use rdkafka::ClientConfig;
|
||||||
|
use rdkafka::producer::FutureProducer;
|
||||||
|
use redis::aio::ConnectionManager as RedisConnectionManager;
|
||||||
|
use sqlx::postgres::{PgPoolOptions, PgPool};
|
||||||
|
use zitadel::api::v1::auth::AuthServiceClient;
|
||||||
|
|
||||||
|
use crate::config::AppConfig;
|
||||||
|
|
||||||
|
pub async fn init_postgres(config: &AppConfig) -> Result<PgPool> {
|
||||||
|
let pool = PgPoolOptions::new()
|
||||||
|
.max_connections(config.database.max_connections)
|
||||||
|
.connect(&config.database.url)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// Run migrations
|
||||||
|
sqlx::migrate!("./migrations")
|
||||||
|
.run(&pool)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(pool)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn init_redis(config: &AppConfig) -> Result<RedisConnectionManager> {
|
||||||
|
let client = redis::Client::open(config.redis.url.as_str())?;
|
||||||
|
let connection_manager = RedisConnectionManager::new(client).await?;
|
||||||
|
|
||||||
|
Ok(connection_manager)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn init_kafka(config: &AppConfig) -> Result<FutureProducer> {
|
||||||
|
let producer: FutureProducer = ClientConfig::new()
|
||||||
|
.set("bootstrap.servers", &config.kafka.brokers)
|
||||||
|
.set("message.timeout.ms", "5000")
|
||||||
|
.create()?;
|
||||||
|
|
||||||
|
Ok(producer)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn init_zitadel(config: &AppConfig) -> Result<AuthServiceClient<tonic::transport::Channel>> {
|
||||||
|
let channel = tonic::transport::Channel::from_shared(format!("https://{}", config.zitadel.domain))?
|
||||||
|
.connect()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let client = AuthServiceClient::new(channel);
|
||||||
|
|
||||||
|
Ok(client)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn init_minio(config: &AppConfig) -> Result<MinioClient> {
|
||||||
|
let client = MinioClientBuilder::new()
|
||||||
|
.endpoint(&config.minio.endpoint)
|
||||||
|
.access_key(&config.minio.access_key)
|
||||||
|
.secret_key(&config.minio.secret_key)
|
||||||
|
.ssl(config.minio.use_ssl)
|
||||||
|
.build()?;
|
||||||
|
|
||||||
|
// Ensure bucket exists
|
||||||
|
if !client.bucket_exists(&config.minio.bucket).await? {
|
||||||
|
client.make_bucket(&config.minio.bucket, None).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(client)
|
||||||
|
}
|
103
gb-vm/src/handlers.rs
Normal file
103
gb-vm/src/handlers.rs
Normal file
|
@ -0,0 +1,103 @@
|
||||||
|
use actix_multipart::Multipart;
|
||||||
|
use actix_web::{web, HttpRequest, HttpResponse};
|
||||||
|
use futures::{StreamExt, TryStreamExt};
|
||||||
|
use std::io::Write;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
use crate::models::AppError;
|
||||||
|
use crate::utils::{create_response, extract_user_id};
|
||||||
|
|
||||||
|
#[actix_web::post("/files/upload")]
|
||||||
|
pub async fn upload_file(
|
||||||
|
req: HttpRequest,
|
||||||
|
mut payload: Multipart,
|
||||||
|
state: web::Data<AppState>,
|
||||||
|
) -> Result<HttpResponse, AppError> {
|
||||||
|
let user_id = extract_user_id(&req)?;
|
||||||
|
let folder_path = req.query_string(); // Assuming folder path is passed as query parameter
|
||||||
|
|
||||||
|
while let Ok(Some(mut field)) = payload.try_next().await {
|
||||||
|
let content_disposition = field.content_disposition();
|
||||||
|
let filename = content_disposition
|
||||||
|
.get_filename()
|
||||||
|
.ok_or_else(|| AppError::Validation("Filename not provided".to_string()))?
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
let sanitized_filename = sanitize_filename::sanitize(&filename);
|
||||||
|
let file_path = format!("{}/{}/{}", user_id, folder_path, sanitized_filename);
|
||||||
|
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
while let Some(chunk) = field.next().await {
|
||||||
|
let data = chunk.map_err(|e| AppError::Internal(format!("Error reading file: {}", e)))?;
|
||||||
|
buffer.write_all(&data).map_err(|e| AppError::Internal(format!("Error writing to buffer: {}", e)))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let content_type = field.content_type().map(|t| t.to_string()).unwrap_or_else(|| "application/octet-stream".to_string());
|
||||||
|
|
||||||
|
state.minio_client
|
||||||
|
.put_object(&state.config.minio.bucket, &file_path, &buffer, Some(content_type.as_str()), None)
|
||||||
|
.await
|
||||||
|
.map_err(|e| AppError::Minio(format!("Failed to upload file to Minio: {}", e)))?;
|
||||||
|
|
||||||
|
return Ok(create_response(
|
||||||
|
format!("File uploaded successfully at {}", file_path),
|
||||||
|
None,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(AppError::Validation("No file provided".to_string()))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[actix_web::post("/files/download")]
|
||||||
|
pub async fn download(
|
||||||
|
req: HttpRequest,
|
||||||
|
state: web::Data<AppState>,
|
||||||
|
file_path: web::Json<String>,
|
||||||
|
) -> Result<HttpResponse, AppError> {
|
||||||
|
let user_id = extract_user_id(&req)?;
|
||||||
|
|
||||||
|
let file_content = state.minio_client
|
||||||
|
.get_object(&state.config.minio.bucket, &file_path)
|
||||||
|
.await
|
||||||
|
.map_err(|e| AppError::Minio(format!("Failed to retrieve file from Minio: {}", e)))?;
|
||||||
|
|
||||||
|
Ok(HttpResponse::Ok()
|
||||||
|
.content_type("application/octet-stream")
|
||||||
|
.append_header(("Content-Disposition", format!("attachment; filename=\"{}\"", file_path)))
|
||||||
|
.body(file_content))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[actix_web::post("/files/delete")]
|
||||||
|
pub async fn delete_file(
|
||||||
|
req: HttpRequest,
|
||||||
|
state: web::Data<AppState>,
|
||||||
|
file_path: web::Json<String>,
|
||||||
|
) -> Result<HttpResponse, AppError> {
|
||||||
|
let user_id = extract_user_id(&req)?;
|
||||||
|
|
||||||
|
state.minio_client
|
||||||
|
.remove_object(&state.config.minio.bucket, &file_path)
|
||||||
|
.await
|
||||||
|
.map_err(|e| AppError::Minio(format!("Failed to delete file from Minio: {}", e)))?;
|
||||||
|
|
||||||
|
Ok(create_response(
|
||||||
|
true,
|
||||||
|
Some("File deleted successfully".to_string()),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[actix_web::post("/files/list")]
|
||||||
|
pub async fn list_files(
|
||||||
|
req: HttpRequest,
|
||||||
|
state: web::Data<AppState>,
|
||||||
|
folder_path: web::Json<String>,
|
||||||
|
) -> Result<HttpResponse, AppError> {
|
||||||
|
let user_id = extract_user_id(&req)?;
|
||||||
|
|
||||||
|
let objects = state.minio_client
|
||||||
|
.list_objects(&state.config.minio.bucket, &folder_path, None, None)
|
||||||
|
.await
|
||||||
|
.map_err(|e| AppError::Minio(format!("Failed to list objects in Minio: {}", e)))?;
|
||||||
|
|
||||||
|
Ok(create_response(objects, None))
|
||||||
|
}
|
|
@ -1,110 +1,141 @@
|
||||||
use minio_rs::minio::client::Client;
|
use minio::s3::client::Client;
|
||||||
use minio_rs::minio::s3::args::{BucketExistsArgs, MakeBucketArgs, RemoveObjectArgs, GetObjectArgs, PutObjectArgs, ListObjectsArgs};
|
use minio::s3::args::{BucketExistsArgs, MakeBucketArgs, RemoveObjectArgs, GetObjectArgs, PutObjectArgs, ListObjectsArgs};
|
||||||
use minio_rs::minio::s3::response::Object;
|
use minio::s3::creds::StaticProvider;
|
||||||
use minio_rs::minio::s3::error::Error as MinioError;
|
use minio::s3::error::Error as MinioError;
|
||||||
use std::path::Path;
|
use minio::s3::types::{BaseUrl, Item};
|
||||||
use std::io::Cursor;
|
use std::io::Cursor;
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
/// Represents a file manager for handling MinIO file operations.
|
|
||||||
pub struct FileManager {
|
pub struct FileManager {
|
||||||
client: Client,
|
client: Client,
|
||||||
bucket_name: String,
|
bucket_name: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FileManager {
|
impl FileManager {
|
||||||
/// Creates a new `FileManager` instance.
|
|
||||||
pub async fn new(endpoint: &str, access_key: &str, secret_key: &str, bucket_name: &str, use_ssl: bool) -> Result<Self, MinioError> {
|
pub async fn new(endpoint: &str, access_key: &str, secret_key: &str, bucket_name: &str, use_ssl: bool) -> Result<Self, MinioError> {
|
||||||
let client = Client::new(endpoint, access_key, secret_key, use_ssl).await?;
|
// Create BaseUrl from endpoint
|
||||||
|
let base_url = BaseUrl::from_string(endpoint)?;
|
||||||
|
let static_provider = StaticProvider::new(
|
||||||
|
access_key,
|
||||||
|
secret_key,
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
let client = Client::new(base_url.clone(), Some(Box::new(static_provider)), None, None).unwrap();
|
||||||
|
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
client,
|
client,
|
||||||
bucket_name: bucket_name.to_string(),
|
bucket_name: bucket_name.to_string(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks if the bucket exists, and creates it if it doesn't.
|
|
||||||
pub async fn ensure_bucket_exists(&self) -> Result<(), MinioError> {
|
pub async fn ensure_bucket_exists(&self) -> Result<(), MinioError> {
|
||||||
let exists = self.client
|
let exists = self.client
|
||||||
.bucket_exists(&BucketExistsArgs::new(&self.bucket_name))
|
.bucket_exists(&BucketExistsArgs::new(&self.bucket_name)?)
|
||||||
.await?;
|
.await?;
|
||||||
if !exists {
|
if !exists {
|
||||||
self.client
|
self.client
|
||||||
.make_bucket(&MakeBucketArgs::new(&self.bucket_name))
|
.make_bucket(&MakeBucketArgs::new(&self.bucket_name)?)
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Uploads a file to the specified path.
|
|
||||||
pub async fn upload_file(&self, path: &str, file_data: Vec<u8>) -> Result<(), MinioError> {
|
pub async fn upload_file(&self, path: &str, file_data: Vec<u8>) -> Result<(), MinioError> {
|
||||||
let args = PutObjectArgs::new(&self.bucket_name, path, Cursor::new(file_data), file_data.len() as u64);
|
let reader = Cursor::new(&file_data);
|
||||||
|
let file_size = file_data.len() as u64;
|
||||||
|
|
||||||
|
let args = PutObjectArgs::new(
|
||||||
|
&self.bucket_name,
|
||||||
|
path,
|
||||||
|
reader,
|
||||||
|
Some(file_size),
|
||||||
|
None
|
||||||
|
)?;
|
||||||
|
|
||||||
self.client.put_object(&args).await?;
|
self.client.put_object(&args).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Downloads a file from the specified path.
|
|
||||||
pub async fn download_file(&self, path: &str) -> Result<Vec<u8>, MinioError> {
|
pub async fn download_file(&self, path: &str) -> Result<Vec<u8>, MinioError> {
|
||||||
let args = GetObjectArgs::new(&self.bucket_name, path);
|
let args = GetObjectArgs::new(&self.bucket_name, path)?;
|
||||||
let object = self.client.get_object(&args).await?;
|
let object = self.client.get_object(&args).await?;
|
||||||
let data = object.bytes().await?;
|
let data = object.bytes().await?;
|
||||||
Ok(data.to_vec())
|
Ok(data.to_vec())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Copies a file from the source path to the destination path.
|
|
||||||
pub async fn copy_file(&self, source_path: &str, destination_path: &str) -> Result<(), MinioError> {
|
pub async fn copy_file(&self, source_path: &str, destination_path: &str) -> Result<(), MinioError> {
|
||||||
let source_args = GetObjectArgs::new(&self.bucket_name, source_path);
|
// Download the source file
|
||||||
let object = self.client.get_object(&source_args).await?;
|
let data = self.download_file(source_path).await?;
|
||||||
let data = object.bytes().await?;
|
|
||||||
|
// Upload it to the destination
|
||||||
let destination_args = PutObjectArgs::new(&self.bucket_name, destination_path, Cursor::new(data.clone()), data.len() as u64);
|
let reader = Cursor::new(&data);
|
||||||
self.client.put_object(&destination_args).await?;
|
let file_size = data.len() as u64;
|
||||||
|
|
||||||
|
let args = PutObjectArgs::new(
|
||||||
|
&self.bucket_name,
|
||||||
|
destination_path,
|
||||||
|
reader,
|
||||||
|
Some(file_size),
|
||||||
|
None
|
||||||
|
)?;
|
||||||
|
|
||||||
|
self.client.put_object(&args).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Moves a file from the source path to the destination path.
|
|
||||||
pub async fn move_file(&self, source_path: &str, destination_path: &str) -> Result<(), MinioError> {
|
pub async fn move_file(&self, source_path: &str, destination_path: &str) -> Result<(), MinioError> {
|
||||||
self.copy_file(source_path, destination_path).await?;
|
self.copy_file(source_path, destination_path).await?;
|
||||||
self.delete_file(source_path).await?;
|
self.delete_file(source_path).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Deletes a file at the specified path.
|
|
||||||
pub async fn delete_file(&self, path: &str) -> Result<(), MinioError> {
|
pub async fn delete_file(&self, path: &str) -> Result<(), MinioError> {
|
||||||
let args = RemoveObjectArgs::new(&self.bucket_name, path);
|
let args = RemoveObjectArgs::new(&self.bucket_name, path)?;
|
||||||
self.client.remove_object(&args).await?;
|
self.client.remove_object(&args).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Lists all files in the specified path.
|
|
||||||
pub async fn list_files(&self, prefix: &str) -> Result<Vec<String>, MinioError> {
|
pub async fn list_files(&self, prefix: &str) -> Result<Vec<String>, MinioError> {
|
||||||
let args = ListObjectsArgs::new(&self.bucket_name).with_prefix(prefix);
|
// Create a predicate function that always returns true
|
||||||
|
let predicate = |_: Vec<Item>| -> bool { true };
|
||||||
|
|
||||||
|
let args = ListObjectsArgs::new(&self.bucket_name, &predicate)?;
|
||||||
let objects = self.client.list_objects(&args).await?;
|
let objects = self.client.list_objects(&args).await?;
|
||||||
let file_names = objects.into_iter().map(|obj| obj.name().to_string()).collect();
|
|
||||||
|
// Filter objects based on prefix manually
|
||||||
|
let file_names: Vec<String> = objects
|
||||||
|
.into_iter()
|
||||||
|
.filter(|obj| obj.name().starts_with(prefix))
|
||||||
|
.map(|obj| obj.name().to_string())
|
||||||
|
.collect();
|
||||||
|
|
||||||
Ok(file_names)
|
Ok(file_names)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Retrieves the contents of a file at the specified path.
|
|
||||||
pub async fn get_file_contents(&self, path: &str) -> Result<String, MinioError> {
|
pub async fn get_file_contents(&self, path: &str) -> Result<String, MinioError> {
|
||||||
let data = self.download_file(path).await?;
|
let data = self.download_file(path).await?;
|
||||||
let contents = String::from_utf8(data).map_err(|_| MinioError::InvalidResponse)?;
|
let contents = String::from_utf8(data)
|
||||||
|
.map_err(|_| MinioError::InvalidResponse(400, "Invalid UTF-8 sequence".to_string()))?;
|
||||||
Ok(contents)
|
Ok(contents)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a folder at the specified path.
|
|
||||||
pub async fn create_folder(&self, path: &str) -> Result<(), MinioError> {
|
pub async fn create_folder(&self, path: &str) -> Result<(), MinioError> {
|
||||||
let folder_path = if path.ends_with('/') {
|
let folder_path = if path.ends_with('/') {
|
||||||
path.to_string()
|
path.to_string()
|
||||||
} else {
|
} else {
|
||||||
format!("{}/", path)
|
format!("{}/", path)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Create empty file with folder path
|
||||||
self.upload_file(&folder_path, vec![]).await
|
self.upload_file(&folder_path, vec![]).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Shares a folder at the specified path (placeholder implementation).
|
|
||||||
pub async fn share_folder(&self, path: &str) -> Result<String, MinioError> {
|
pub async fn share_folder(&self, path: &str) -> Result<String, MinioError> {
|
||||||
|
// This is just a placeholder implementation
|
||||||
Ok(format!("Folder shared: {}", path))
|
Ok(format!("Folder shared: {}", path))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Searches for files matching the query in the specified path.
|
|
||||||
pub async fn search_files(&self, prefix: &str, query: &str) -> Result<Vec<String>, MinioError> {
|
pub async fn search_files(&self, prefix: &str, query: &str) -> Result<Vec<String>, MinioError> {
|
||||||
let files = self.list_files(prefix).await?;
|
let files = self.list_files(prefix).await?;
|
||||||
let results = files.into_iter().filter(|f| f.contains(query)).collect();
|
let results = files.into_iter().filter(|f| f.contains(query)).collect();
|
37
gb-vm/src/router.rs
Normal file
37
gb-vm/src/router.rs
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
use actix_web::web;
|
||||||
|
|
||||||
|
use crate::router;
|
||||||
|
|
||||||
|
pub fn files_router_configure(cfg: &mut web::ServiceConfig) {
|
||||||
|
// File & Document Management
|
||||||
|
cfg.route("/files/upload", web::post().to(handlers::upload_file))
|
||||||
|
.route("/files/download", web::post().to(handlers::download))
|
||||||
|
.route("/files/delete", web::post().to(handlers::delete_file))
|
||||||
|
.route("/files/getContents", web::post().to(handlers::get_file_contents))
|
||||||
|
.route("/files/createFolder", web::post().to(handlers::create_folder))
|
||||||
|
.route("/files/dirFolder", web::post().to(handlers::dir_folder))
|
||||||
|
|
||||||
|
// Conversations & Real-time Communication
|
||||||
|
.route("/conversations/create", web::post().to(handlers::create_conversation))
|
||||||
|
.route("/conversations/join", web::post().to(handlers::join_conversation))
|
||||||
|
.route("/conversations/leave", web::post().to(handlers::leave_conversation))
|
||||||
|
.route("/conversations/members", web::get().to(handlers::get_conversation_members))
|
||||||
|
.route("/conversations/messages", web::get().to(handlers::get_messages))
|
||||||
|
.route("/conversations/messages/send", web::post().to(handlers::send_message))
|
||||||
|
|
||||||
|
// Communication Services
|
||||||
|
.route("/comm/email/send", web::post().to(handlers::send_email))
|
||||||
|
|
||||||
|
// User Management
|
||||||
|
.route("/users/profile", web::get().to(handlers::get_user_profile))
|
||||||
|
|
||||||
|
// Calendar & Task Management
|
||||||
|
.route("/calendar/events/create", web::post().to(handlers::create_event))
|
||||||
|
|
||||||
|
.route("/tasks/create", web::post().to(handlers::create_task))
|
||||||
|
.route("/tasks/list", web::get().to(handlers::get_tasks))
|
||||||
|
|
||||||
|
// Admin
|
||||||
|
.route("/admin/system/status", web::get().to(handlers::get_system_status))
|
||||||
|
.route("/admin/logs/view", web::get().to(handlers::view_logs));
|
||||||
|
}
|
40
install.sh
40
install.sh
|
@ -22,10 +22,15 @@ sudo apt-get install -y \
|
||||||
postgresql-contrib \
|
postgresql-contrib \
|
||||||
redis-server \
|
redis-server \
|
||||||
libopencv-dev \
|
libopencv-dev \
|
||||||
libtesseract-dev \
|
|
||||||
cmake \
|
cmake \
|
||||||
protobuf-compiler \
|
protobuf-compiler \
|
||||||
libprotobuf-dev
|
libprotobuf-dev
|
||||||
|
sudo apt reinstall libssl-dev
|
||||||
|
sudo apt install -y pkg-config libssl-dev libleptonica-dev
|
||||||
|
sudo apt install -y libglib2.0-dev libleptonica-dev pkg-config
|
||||||
|
sudo apt install -y build-essential clang libclang-dev libc-dev
|
||||||
|
sudo apt install -y libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev
|
||||||
|
|
||||||
|
|
||||||
# Install Rust if not already installed
|
# Install Rust if not already installed
|
||||||
if ! command -v cargo &> /dev/null; then
|
if ! command -v cargo &> /dev/null; then
|
||||||
|
@ -34,33 +39,6 @@ if ! command -v cargo &> /dev/null; then
|
||||||
source $HOME/.cargo/env
|
source $HOME/.cargo/env
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Install kubectl if not present
|
|
||||||
if ! command -v kubectl &> /dev/null; then
|
|
||||||
echo "Installing kubectl..."
|
|
||||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
|
|
||||||
chmod +x kubectl
|
|
||||||
sudo mv kubectl /usr/local/bin/
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Setup project structure
|
|
||||||
echo "Setting up project structure..."
|
|
||||||
mkdir -p general-bots
|
|
||||||
cd general-bots
|
|
||||||
|
|
||||||
# Optional: Azure CLI installation
|
|
||||||
echo "Would you like to install Azure CLI? (y/n)"
|
|
||||||
read -r response
|
|
||||||
if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]]
|
|
||||||
then
|
|
||||||
echo "Installing Azure CLI..."
|
|
||||||
curl -sL https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/microsoft.gpg > /dev/null
|
|
||||||
echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/azure-cli.list
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y azure-cli
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Optional: HandBrake installation
|
|
||||||
echo "Would you like to install HandBrake? (y/n)"
|
|
||||||
read -r response
|
read -r response
|
||||||
if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]]
|
if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]]
|
||||||
then
|
then
|
||||||
|
@ -94,12 +72,6 @@ echo "Starting Redis service..."
|
||||||
sudo systemctl start redis-server
|
sudo systemctl start redis-server
|
||||||
sudo systemctl enable redis-server
|
sudo systemctl enable redis-server
|
||||||
|
|
||||||
echo "Installation completed!"
|
|
||||||
echo "Next steps:"
|
|
||||||
echo "1. Configure your Kubernetes cluster"
|
|
||||||
echo "2. Update k8s/base/*.yaml files with your configuration"
|
|
||||||
echo "3. Run ./deploy.sh to deploy to Kubernetes"
|
|
||||||
echo "4. Check deployment status with: kubectl -n general-bots get pods"
|
|
||||||
|
|
||||||
# Print service status
|
# Print service status
|
||||||
echo -e "\nService Status:"
|
echo -e "\nService Status:"
|
||||||
|
|
|
@ -1,84 +0,0 @@
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: api
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
replicas: 3
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: api
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: api
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: api
|
|
||||||
image: generalbotsproject/api:latest
|
|
||||||
ports:
|
|
||||||
- containerPort: 8080
|
|
||||||
env:
|
|
||||||
- name: DATABASE_URL
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: postgres-creds
|
|
||||||
key: url
|
|
||||||
- name: REDIS_URL
|
|
||||||
value: redis://redis:6379
|
|
||||||
- name: KAFKA_BROKERS
|
|
||||||
value: kafka:9092
|
|
||||||
- name: RABBITMQ_URL
|
|
||||||
value: amqp://rabbitmq:5672
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: "512Mi"
|
|
||||||
cpu: "500m"
|
|
||||||
limits:
|
|
||||||
memory: "1Gi"
|
|
||||||
cpu: "1000m"
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /health
|
|
||||||
port: 8080
|
|
||||||
initialDelaySeconds: 5
|
|
||||||
periodSeconds: 10
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /health
|
|
||||||
port: 8080
|
|
||||||
initialDelaySeconds: 15
|
|
||||||
periodSeconds: 20
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: api
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: api
|
|
||||||
ports:
|
|
||||||
- port: 8080
|
|
||||||
---
|
|
||||||
apiVersion: networking.k8s.io/v1
|
|
||||||
kind: Ingress
|
|
||||||
metadata:
|
|
||||||
name: api
|
|
||||||
namespace: general-bots
|
|
||||||
annotations:
|
|
||||||
nginx.ingress.kubernetes.io/cors-allow-methods: "GET,POST,PUT,DELETE,OPTIONS"
|
|
||||||
nginx.ingress.kubernetes.io/cors-allow-origin: "*"
|
|
||||||
nginx.ingress.kubernetes.io/enable-cors: "true"
|
|
||||||
spec:
|
|
||||||
rules:
|
|
||||||
- host: api.general-bots.io
|
|
||||||
http:
|
|
||||||
paths:
|
|
||||||
- path: /
|
|
||||||
pathType: Prefix
|
|
||||||
backend:
|
|
||||||
service:
|
|
||||||
name: api
|
|
||||||
port:
|
|
||||||
number: 8080
|
|
|
@ -1,75 +0,0 @@
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: document-processor
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
replicas: 3
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: document-processor
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: document-processor
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: document-processor
|
|
||||||
image: generalbotsproject/document-processor:latest
|
|
||||||
ports:
|
|
||||||
- containerPort: 8080
|
|
||||||
env:
|
|
||||||
- name: RUST_LOG
|
|
||||||
value: info
|
|
||||||
volumeMounts:
|
|
||||||
- name: temp
|
|
||||||
mountPath: /tmp
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: "512Mi"
|
|
||||||
cpu: "500m"
|
|
||||||
limits:
|
|
||||||
memory: "1Gi"
|
|
||||||
cpu: "1000m"
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /health
|
|
||||||
port: 8080
|
|
||||||
initialDelaySeconds: 5
|
|
||||||
periodSeconds: 10
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /health
|
|
||||||
port: 8080
|
|
||||||
initialDelaySeconds: 15
|
|
||||||
periodSeconds: 20
|
|
||||||
volumes:
|
|
||||||
- name: temp
|
|
||||||
emptyDir: {}
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: document-processor
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: document-processor
|
|
||||||
ports:
|
|
||||||
- port: 8080
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: document-processor-config
|
|
||||||
namespace: general-bots
|
|
||||||
data:
|
|
||||||
processing.conf: |
|
|
||||||
max_file_size = 50MB
|
|
||||||
supported_formats = [
|
|
||||||
"pdf",
|
|
||||||
"docx",
|
|
||||||
"xlsx"
|
|
||||||
]
|
|
||||||
temp_dir = "/tmp"
|
|
||||||
processing_timeout = 300s
|
|
|
@ -1,75 +0,0 @@
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: image-processor
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
replicas: 3
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: image-processor
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: image-processor
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: image-processor
|
|
||||||
image: generalbotsproject/image-processor:latest
|
|
||||||
ports:
|
|
||||||
- containerPort: 8080
|
|
||||||
env:
|
|
||||||
- name: RUST_LOG
|
|
||||||
value: info
|
|
||||||
- name: OPENCV_DATA_PATH
|
|
||||||
value: /usr/share/opencv4
|
|
||||||
volumeMounts:
|
|
||||||
- name: temp
|
|
||||||
mountPath: /tmp
|
|
||||||
- name: opencv-data
|
|
||||||
mountPath: /usr/share/opencv4
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: "512Mi"
|
|
||||||
cpu: "500m"
|
|
||||||
limits:
|
|
||||||
memory: "1Gi"
|
|
||||||
cpu: "1000m"
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /health
|
|
||||||
port: 8080
|
|
||||||
initialDelaySeconds: 5
|
|
||||||
periodSeconds: 10
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /health
|
|
||||||
port: 8080
|
|
||||||
initialDelaySeconds: 15
|
|
||||||
periodSeconds: 20
|
|
||||||
volumes:
|
|
||||||
- name: temp
|
|
||||||
emptyDir: {}
|
|
||||||
- name: opencv-data
|
|
||||||
configMap:
|
|
||||||
name: opencv-data
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: image-processor
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: image-processor
|
|
||||||
ports:
|
|
||||||
- port: 8080
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: opencv-data
|
|
||||||
namespace: general-bots
|
|
||||||
data:
|
|
||||||
haarcascade_frontalface_default.xml: |
|
|
||||||
<include actual cascade classifier XML data here>
|
|
|
@ -1,33 +0,0 @@
|
||||||
apiVersion: networking.k8s.io/v1
|
|
||||||
kind: Ingress
|
|
||||||
metadata:
|
|
||||||
name: monitoring
|
|
||||||
namespace: general-bots
|
|
||||||
annotations:
|
|
||||||
nginx.ingress.kubernetes.io/rewrite-target: /
|
|
||||||
spec:
|
|
||||||
rules:
|
|
||||||
- host: metrics.general-bots.io
|
|
||||||
http:
|
|
||||||
paths:
|
|
||||||
- path: /prometheus
|
|
||||||
pathType: Prefix
|
|
||||||
backend:
|
|
||||||
service:
|
|
||||||
name: prometheus
|
|
||||||
port:
|
|
||||||
number: 9090
|
|
||||||
- path: /grafana
|
|
||||||
pathType: Prefix
|
|
||||||
backend:
|
|
||||||
service:
|
|
||||||
name: grafana
|
|
||||||
port:
|
|
||||||
number: 3000
|
|
||||||
- path: /jaeger
|
|
||||||
pathType: Prefix
|
|
||||||
backend:
|
|
||||||
service:
|
|
||||||
name: jaeger
|
|
||||||
port:
|
|
||||||
number: 16686
|
|
|
@ -1,53 +0,0 @@
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: StatefulSet
|
|
||||||
metadata:
|
|
||||||
name: kafka
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
serviceName: kafka
|
|
||||||
replicas: 3
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: kafka
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: kafka
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: kafka
|
|
||||||
image: confluentinc/cp-kafka:7.4.0
|
|
||||||
ports:
|
|
||||||
- containerPort: 9092
|
|
||||||
env:
|
|
||||||
- name: KAFKA_ZOOKEEPER_CONNECT
|
|
||||||
value: zookeeper:2181
|
|
||||||
- name: KAFKA_ADVERTISED_LISTENERS
|
|
||||||
value: PLAINTEXT://kafka-$(POD_NAME).kafka:9092
|
|
||||||
- name: POD_NAME
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.name
|
|
||||||
volumeMounts:
|
|
||||||
- name: kafka-data
|
|
||||||
mountPath: /var/lib/kafka/data
|
|
||||||
volumeClaimTemplates:
|
|
||||||
- metadata:
|
|
||||||
name: kafka-data
|
|
||||||
spec:
|
|
||||||
accessModes: [ "ReadWriteOnce" ]
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 100Gi
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: kafka
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
clusterIP: None
|
|
||||||
selector:
|
|
||||||
app: kafka
|
|
||||||
ports:
|
|
||||||
- port: 9092
|
|
|
@ -1,12 +0,0 @@
|
||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
|
|
||||||
resources:
|
|
||||||
- namespace.yaml
|
|
||||||
- postgres.yaml
|
|
||||||
- redis.yaml
|
|
||||||
- kafka.yaml
|
|
||||||
- webrtc.yaml
|
|
||||||
- api.yaml
|
|
||||||
- web.yaml
|
|
||||||
- monitoring.yaml
|
|
|
@ -1,175 +0,0 @@
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: StatefulSet
|
|
||||||
metadata:
|
|
||||||
name: kafka
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
serviceName: kafka
|
|
||||||
replicas: 3
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: kafka
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: kafka
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: kafka
|
|
||||||
image: confluentinc/cp-kafka:7.4.0
|
|
||||||
ports:
|
|
||||||
- containerPort: 9092
|
|
||||||
env:
|
|
||||||
- name: KAFKA_ZOOKEEPER_CONNECT
|
|
||||||
value: zookeeper:2181
|
|
||||||
- name: KAFKA_ADVERTISED_LISTENERS
|
|
||||||
value: PLAINTEXT://kafka-$(POD_NAME).kafka:9092
|
|
||||||
- name: POD_NAME
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.name
|
|
||||||
volumeMounts:
|
|
||||||
- name: data
|
|
||||||
mountPath: /var/lib/kafka
|
|
||||||
volumeClaimTemplates:
|
|
||||||
- metadata:
|
|
||||||
name: data
|
|
||||||
spec:
|
|
||||||
accessModes: [ "ReadWriteOnce" ]
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 100Gi
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: kafka
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
clusterIP: None
|
|
||||||
selector:
|
|
||||||
app: kafka
|
|
||||||
ports:
|
|
||||||
- port: 9092
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: StatefulSet
|
|
||||||
metadata:
|
|
||||||
name: rabbitmq
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
serviceName: rabbitmq
|
|
||||||
replicas: 3
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: rabbitmq
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: rabbitmq
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: rabbitmq
|
|
||||||
image: rabbitmq:3.12-management
|
|
||||||
ports:
|
|
||||||
- containerPort: 5672
|
|
||||||
- containerPort: 15672
|
|
||||||
env:
|
|
||||||
- name: RABBITMQ_ERLANG_COOKIE
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: rabbitmq-secret
|
|
||||||
```bash
|
|
||||||
# Continuing k8s/base/messaging.yaml
|
|
||||||
cat >> k8s/base/messaging.yaml << 'EOL'
|
|
||||||
key: erlang-cookie
|
|
||||||
volumeMounts:
|
|
||||||
- name: data
|
|
||||||
mountPath: /var/lib/rabbitmq
|
|
||||||
volumeClaimTemplates:
|
|
||||||
- metadata:
|
|
||||||
name: data
|
|
||||||
spec:
|
|
||||||
accessModes: [ "ReadWriteOnce" ]
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 10Gi
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: rabbitmq
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: rabbitmq
|
|
||||||
ports:
|
|
||||||
- name: amqp
|
|
||||||
port: 5672
|
|
||||||
- name: management
|
|
||||||
port: 15672
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: websocket
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
replicas: 3
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: websocket
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: websocket
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: websocket
|
|
||||||
image: generalbotsproject/websocket:latest
|
|
||||||
ports:
|
|
||||||
- containerPort: 8080
|
|
||||||
env:
|
|
||||||
- name: REDIS_URL
|
|
||||||
value: redis://redis:6379
|
|
||||||
- name: KAFKA_BROKERS
|
|
||||||
value: kafka:9092
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: "256Mi"
|
|
||||||
cpu: "250m"
|
|
||||||
limits:
|
|
||||||
memory: "512Mi"
|
|
||||||
cpu: "500m"
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: websocket
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: websocket
|
|
||||||
ports:
|
|
||||||
- port: 8080
|
|
||||||
---
|
|
||||||
apiVersion: networking.k8s.io/v1
|
|
||||||
kind: Ingress
|
|
||||||
metadata:
|
|
||||||
name: websocket
|
|
||||||
namespace: general-bots
|
|
||||||
annotations:
|
|
||||||
nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
|
|
||||||
nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
|
|
||||||
spec:
|
|
||||||
rules:
|
|
||||||
- host: ws.general-bots.io
|
|
||||||
http:
|
|
||||||
paths:
|
|
||||||
- path: /
|
|
||||||
pathType: Prefix
|
|
||||||
backend:
|
|
||||||
service:
|
|
||||||
name: websocket
|
|
||||||
port:
|
|
||||||
number: 8080
|
|
|
@ -1,158 +0,0 @@
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: prometheus
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: prometheus
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: prometheus
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: prometheus
|
|
||||||
image: prom/prometheus:v2.45.0
|
|
||||||
ports:
|
|
||||||
- containerPort: 9090
|
|
||||||
volumeMounts:
|
|
||||||
- name: config
|
|
||||||
mountPath: /etc/prometheus
|
|
||||||
- name: storage
|
|
||||||
mountPath: /prometheus
|
|
||||||
volumes:
|
|
||||||
- name: config
|
|
||||||
configMap:
|
|
||||||
name: prometheus-config
|
|
||||||
- name: storage
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: prometheus-storage
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: prometheus
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: prometheus
|
|
||||||
ports:
|
|
||||||
- port: 9090
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: grafana
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: grafana
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: grafana
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: grafana
|
|
||||||
image: grafana/grafana:9.5.5
|
|
||||||
ports:
|
|
||||||
- containerPort: 3000
|
|
||||||
volumeMounts:
|
|
||||||
- name: storage
|
|
||||||
mountPath: /var/lib/grafana
|
|
||||||
volumes:
|
|
||||||
- name: storage
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: grafana-storage
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: grafana
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: grafana
|
|
||||||
ports:
|
|
||||||
- port: 3000
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: jaeger
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: jaeger
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: jaeger
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: jaeger
|
|
||||||
image: jaegertracing/all-in-one:1.47
|
|
||||||
ports:
|
|
||||||
- containerPort: 16686
|
|
||||||
- containerPort: 4317
|
|
||||||
- containerPort: 4318
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: jaeger
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: jaeger
|
|
||||||
ports:
|
|
||||||
- name: ui
|
|
||||||
port: 16686
|
|
||||||
- name: otlp-grpc
|
|
||||||
port: 4317
|
|
||||||
- name: otlp-http
|
|
||||||
port: 4318
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: prometheus-config
|
|
||||||
namespace: general-bots
|
|
||||||
data:
|
|
||||||
prometheus.yml: |
|
|
||||||
global:
|
|
||||||
scrape_interval: 15s
|
|
||||||
evaluation_interval: 15s
|
|
||||||
|
|
||||||
scrape_configs:
|
|
||||||
- job_name: 'general-bots'
|
|
||||||
static_configs:
|
|
||||||
- targets: ['api:8080']
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
metadata:
|
|
||||||
name: prometheus-storage
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 10Gi
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
metadata:
|
|
||||||
name: grafana-storage
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 10Gi
|
|
|
@ -1,4 +0,0 @@
|
||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: general-bots
|
|
|
@ -1,74 +0,0 @@
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: nlp
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
replicas: 2
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: nlp
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: nlp
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: nlp
|
|
||||||
image: generalbotsproject/nlp:latest
|
|
||||||
ports:
|
|
||||||
- containerPort: 8080
|
|
||||||
env:
|
|
||||||
- name: RUST_LOG
|
|
||||||
value: info
|
|
||||||
- name: MODEL_CACHE_DIR
|
|
||||||
value: /models
|
|
||||||
volumeMounts:
|
|
||||||
- name: models
|
|
||||||
mountPath: /models
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: "4Gi"
|
|
||||||
cpu: "2000m"
|
|
||||||
limits:
|
|
||||||
memory: "8Gi"
|
|
||||||
cpu: "4000m"
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /health
|
|
||||||
port: 8080
|
|
||||||
initialDelaySeconds: 30
|
|
||||||
periodSeconds: 10
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /health
|
|
||||||
port: 8080
|
|
||||||
initialDelaySeconds: 60
|
|
||||||
periodSeconds: 20
|
|
||||||
volumes:
|
|
||||||
- name: models
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: nlp-models
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: nlp
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: nlp
|
|
||||||
ports:
|
|
||||||
- port: 8080
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
metadata:
|
|
||||||
name: nlp-models
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 50Gi
|
|
|
@ -1,57 +0,0 @@
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: StatefulSet
|
|
||||||
metadata:
|
|
||||||
name: postgres
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
serviceName: postgres
|
|
||||||
replicas: 3
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: postgres
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: postgres
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: postgres
|
|
||||||
image: postgres:15
|
|
||||||
ports:
|
|
||||||
- containerPort: 5432
|
|
||||||
env:
|
|
||||||
- name: POSTGRES_DB
|
|
||||||
value: generalbots
|
|
||||||
- name: POSTGRES_USER
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: postgres-creds
|
|
||||||
key: username
|
|
||||||
- name: POSTGRES_PASSWORD
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: postgres-creds
|
|
||||||
key: password
|
|
||||||
volumeMounts:
|
|
||||||
- name: postgres-data
|
|
||||||
mountPath: /var/lib/postgresql/data
|
|
||||||
volumeClaimTemplates:
|
|
||||||
- metadata:
|
|
||||||
name: postgres-data
|
|
||||||
spec:
|
|
||||||
accessModes: [ "ReadWriteOnce" ]
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 100Gi
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: postgres
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
clusterIP: None
|
|
||||||
selector:
|
|
||||||
app: postgres
|
|
||||||
ports:
|
|
||||||
- port: 5432
|
|
|
@ -1,53 +0,0 @@
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: StatefulSet
|
|
||||||
metadata:
|
|
||||||
name: redis
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
serviceName: redis
|
|
||||||
replicas: 3
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: redis
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: redis
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: redis
|
|
||||||
image: redis:7
|
|
||||||
ports:
|
|
||||||
- containerPort: 6379
|
|
||||||
command:
|
|
||||||
- redis-server
|
|
||||||
- /etc/redis/redis.conf
|
|
||||||
volumeMounts:
|
|
||||||
- name: redis-config
|
|
||||||
mountPath: /etc/redis
|
|
||||||
- name: redis-data
|
|
||||||
mountPath: /data
|
|
||||||
volumes:
|
|
||||||
- name: redis-config
|
|
||||||
configMap:
|
|
||||||
name: redis-config
|
|
||||||
volumeClaimTemplates:
|
|
||||||
- metadata:
|
|
||||||
name: redis-data
|
|
||||||
spec:
|
|
||||||
accessModes: [ "ReadWriteOnce" ]
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 10Gi
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: redis
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
clusterIP: None
|
|
||||||
selector:
|
|
||||||
app: redis
|
|
||||||
ports:
|
|
||||||
- port: 6379
|
|
|
@ -1,44 +0,0 @@
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: webrtc
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
replicas: 3
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: webrtc
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: webrtc
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: webrtc
|
|
||||||
image: generalbotsproject/webrtc:latest
|
|
||||||
ports:
|
|
||||||
- containerPort: 8080
|
|
||||||
env:
|
|
||||||
- name: REDIS_URL
|
|
||||||
value: redis:6379
|
|
||||||
- name: KAFKA_BROKERS
|
|
||||||
value: kafka:9092
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: "1Gi"
|
|
||||||
cpu: "500m"
|
|
||||||
limits:
|
|
||||||
memory: "2Gi"
|
|
||||||
cpu: "1000m"
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: webrtc
|
|
||||||
namespace: general-bots
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: webrtc
|
|
||||||
ports:
|
|
||||||
- port: 8080
|
|
||||||
type: ClusterIP
|
|
|
@ -552,7 +552,7 @@ Dependencies original, migrate everything to workspace.dependencies
|
||||||
migrate them to rust compatible,
|
migrate them to rust compatible,
|
||||||
|
|
||||||
- do not skip items, migrate everything, in way better, in your interpretation.
|
- do not skip items, migrate everything, in way better, in your interpretation.
|
||||||
- use kubernetes and create environment configuration for everything and ingress to have several server nodes if eeed automatically
|
- reate environment configuration for everything and ingress to have several server nodes if eeed automatically
|
||||||
- I NEED FULL CODE SOLUTION IN PROFESSIONAL TESTABLE RUST CODE: if you need split answer in several parts, but provide ENTIRE CODE. Complete working balenced aserver. IMPORTANTE: Generate the project in a .sh shell script output with cat, of entire code base to be restored, no placeholder neither TODOS.
|
- I NEED FULL CODE SOLUTION IN PROFESSIONAL TESTABLE RUST CODE: if you need split answer in several parts, but provide ENTIRE CODE. Complete working balenced aserver. IMPORTANTE: Generate the project in a .sh shell script output with cat, of entire code base to be restored, no placeholder neither TODOS.
|
||||||
- VERY IMPORNTANT: DO NOT put things like // Add other system routes... you should WRITE ACUTAL CODE
|
- VERY IMPORNTANT: DO NOT put things like // Add other system routes... you should WRITE ACUTAL CODE
|
||||||
- Need tests for every line of code written.
|
- Need tests for every line of code written.
|
|
@ -8,8 +8,8 @@ echo "Testing gb-core..."
|
||||||
cd gb-core && cargo test
|
cd gb-core && cargo test
|
||||||
|
|
||||||
# API tests
|
# API tests
|
||||||
echo "Testing gb-api..."
|
echo "Testing gb-server..."
|
||||||
cd ../gb-api && cargo test
|
cd ../gb-server && cargo test
|
||||||
|
|
||||||
# VM tests
|
# VM tests
|
||||||
echo "Testing gb-vm..."
|
echo "Testing gb-vm..."
|
||||||
|
|
128
setupk.sh
128
setupk.sh
|
@ -1,128 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Enable error handling
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Function to check command status
|
|
||||||
check_status() {
|
|
||||||
if [ $? -eq 0 ]; then
|
|
||||||
echo "✅ $1 successful"
|
|
||||||
else
|
|
||||||
echo "❌ $1 failed"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
echo "🚀 Starting Kubernetes installation..."
|
|
||||||
|
|
||||||
# Update system
|
|
||||||
echo "📦 Updating system packages..."
|
|
||||||
sudo apt-get update && sudo apt-get upgrade -y
|
|
||||||
check_status "System update"
|
|
||||||
|
|
||||||
# Install prerequisites
|
|
||||||
echo "📦 Installing prerequisites..."
|
|
||||||
sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common
|
|
||||||
check_status "Prerequisites installation"
|
|
||||||
|
|
||||||
# Install containerd
|
|
||||||
echo "🐋 Installing containerd..."
|
|
||||||
sudo apt-get install -y containerd
|
|
||||||
check_status "Containerd installation"
|
|
||||||
|
|
||||||
# Configure containerd
|
|
||||||
echo "⚙️ Configuring containerd..."
|
|
||||||
sudo mkdir -p /etc/containerd
|
|
||||||
sudo containerd config default | sudo tee /etc/containerd/config.toml > /dev/null
|
|
||||||
sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
|
|
||||||
sudo systemctl restart containerd
|
|
||||||
sudo systemctl enable containerd
|
|
||||||
check_status "Containerd configuration"
|
|
||||||
|
|
||||||
# Disable swap
|
|
||||||
echo "⚙️ Disabling swap..."
|
|
||||||
sudo swapoff -a
|
|
||||||
sudo sed -i '/swap/d' /etc/fstab
|
|
||||||
check_status "Swap disabled"
|
|
||||||
|
|
||||||
# Load kernel modules
|
|
||||||
echo "⚙️ Loading kernel modules..."
|
|
||||||
sudo modprobe overlay
|
|
||||||
sudo modprobe br_netfilter
|
|
||||||
check_status "Kernel modules loaded"
|
|
||||||
|
|
||||||
# Configure system settings
|
|
||||||
echo "⚙️ Configuring system settings..."
|
|
||||||
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
|
|
||||||
net.bridge.bridge-nf-call-iptables = 1
|
|
||||||
net.bridge.bridge-nf-call-ip6tables = 1
|
|
||||||
net.ipv4.ip_forward = 1
|
|
||||||
EOF
|
|
||||||
sudo sysctl --system
|
|
||||||
check_status "System settings configuration"
|
|
||||||
|
|
||||||
# Add Kubernetes repository
|
|
||||||
echo "📦 Adding Kubernetes repository..."
|
|
||||||
sudo mkdir -p /etc/apt/keyrings
|
|
||||||
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.28/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
|
|
||||||
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.28/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
|
|
||||||
check_status "Kubernetes repository addition"
|
|
||||||
|
|
||||||
# Install Kubernetes components
|
|
||||||
echo "📦 Installing Kubernetes components..."
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y kubelet kubeadm kubectl
|
|
||||||
sudo apt-mark hold kubelet kubeadm kubectl
|
|
||||||
check_status "Kubernetes components installation"
|
|
||||||
|
|
||||||
# Initialize Kubernetes cluster
|
|
||||||
echo "🚀 Initializing Kubernetes cluster..."
|
|
||||||
sudo kubeadm init --pod-network-cidr=10.244.0.0/16
|
|
||||||
check_status "Kubernetes initialization"
|
|
||||||
|
|
||||||
# Configure kubectl
|
|
||||||
echo "⚙️ Configuring kubectl..."
|
|
||||||
mkdir -p $HOME/.kube
|
|
||||||
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
|
|
||||||
sudo chown $(id -u):$(id -g) $HOME/.kube/config
|
|
||||||
check_status "kubectl configuration"
|
|
||||||
|
|
||||||
# Install Flannel network plugin
|
|
||||||
echo "🔌 Installing Flannel network plugin..."
|
|
||||||
kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
|
|
||||||
check_status "Flannel installation"
|
|
||||||
|
|
||||||
# Allow scheduling on control-plane node
|
|
||||||
echo "⚙️ Enabling workload scheduling on control-plane..."
|
|
||||||
kubectl taint nodes --all node-role.kubernetes.io/control-plane- || true
|
|
||||||
kubectl taint nodes --all node-role.kubernetes.io/master- || true
|
|
||||||
check_status "Node configuration"
|
|
||||||
|
|
||||||
# Verify installation
|
|
||||||
echo "🔍 Verifying installation..."
|
|
||||||
kubectl get nodes
|
|
||||||
check_status "Node verification"
|
|
||||||
|
|
||||||
echo "✨ Kubernetes installation completed successfully!"
|
|
||||||
echo "🔍 Cluster status:"
|
|
||||||
kubectl cluster-info
|
|
||||||
echo "📝 Node status:"
|
|
||||||
kubectl get nodes
|
|
||||||
|
|
||||||
# Save cluster join command if needed
|
|
||||||
echo "💾 Saving cluster join command..."
|
|
||||||
sudo kubeadm token create --print-join-command > $HOME/k8s_join_command.txt
|
|
||||||
chmod 600 $HOME/k8s_join_command.txt
|
|
||||||
echo "Join command saved to $HOME/k8s_join_command.txt"
|
|
||||||
|
|
||||||
echo "
|
|
||||||
✅ Installation complete!
|
|
||||||
To start using your cluster:
|
|
||||||
kubectl get nodes
|
|
||||||
kubectl get pods --all-namespaces
|
|
||||||
|
|
||||||
To reset the cluster if needed:
|
|
||||||
sudo kubeadm reset
|
|
||||||
sudo rm -rf /etc/cni/net.d
|
|
||||||
sudo rm -rf $HOME/.kube/config
|
|
||||||
"
|
|
Loading…
Add table
Reference in a new issue