feat: init new minio inregration
This commit is contained in:
parent
9b9c3af567
commit
2c7eb7c090
|
|
@ -0,0 +1,48 @@
|
|||
# Application Configuration
|
||||
NODE_ENV=development
|
||||
PORT=3000
|
||||
LOG_LEVEL=info
|
||||
API_BASE_URL=http://localhost:3000
|
||||
|
||||
# CORS Configuration
|
||||
CORS_ORIGIN=*
|
||||
|
||||
# Database Configuration
|
||||
DB_HOST=postgres
|
||||
DB_PORT=5432
|
||||
DB_NAME=banatie
|
||||
DB_USER=banatie_user
|
||||
DB_PASSWORD=banatie_secure_password
|
||||
|
||||
# MinIO Storage Configuration (SNMD)
|
||||
MINIO_ROOT_USER=banatie_admin
|
||||
MINIO_ROOT_PASSWORD=banatie_storage_secure_key_2024
|
||||
STORAGE_TYPE=minio
|
||||
MINIO_ENDPOINT=minio:9000
|
||||
MINIO_ACCESS_KEY=banatie_service
|
||||
MINIO_SECRET_KEY=banatie_service_key_2024
|
||||
MINIO_USE_SSL=false
|
||||
MINIO_BUCKET_NAME=banatie
|
||||
MINIO_PUBLIC_URL=http://localhost:9000
|
||||
|
||||
# AI Service Configuration
|
||||
GEMINI_API_KEY=AIzaSyBaOt9JMPGKA3811FL-ssf1n5Hh9Jauly8
|
||||
|
||||
# File Upload Configuration
|
||||
MAX_FILE_SIZE=5242880
|
||||
MAX_FILES=3
|
||||
|
||||
# Multi-tenancy Configuration (Production-Ready Names)
|
||||
DEFAULT_ORG_ID=default
|
||||
DEFAULT_PROJECT_ID=main
|
||||
DEFAULT_USER_ID=system
|
||||
|
||||
# Presigned URL Configuration
|
||||
PRESIGNED_URL_EXPIRY=86400 # 24 hours
|
||||
|
||||
# Directory Configuration
|
||||
RESULTS_DIR=/app/results
|
||||
UPLOADS_DIR=/app/uploads/temp
|
||||
|
||||
# Logging Configuration
|
||||
LOG_LEVEL=info
|
||||
|
|
@ -0,0 +1,136 @@
|
|||
version: '3.8'
|
||||
|
||||
services:
|
||||
app:
|
||||
build:
|
||||
context: .
|
||||
target: development
|
||||
container_name: banatie-app
|
||||
ports:
|
||||
- "3000:3000"
|
||||
volumes:
|
||||
- ./src:/app/src
|
||||
- ./logs:/app/logs
|
||||
networks:
|
||||
- banatie-network
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
minio:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- NODE_ENV=development
|
||||
env_file:
|
||||
- .env.docker
|
||||
restart: unless-stopped
|
||||
|
||||
postgres:
|
||||
image: postgres:15-alpine
|
||||
container_name: banatie-postgres
|
||||
ports:
|
||||
- "5434:5432"
|
||||
volumes:
|
||||
- ./data/postgres:/var/lib/postgresql/data
|
||||
- ./scripts/init-db.sql:/docker-entrypoint-initdb.d/01-init.sql
|
||||
networks:
|
||||
- banatie-network
|
||||
environment:
|
||||
POSTGRES_DB: banatie
|
||||
POSTGRES_USER: banatie_user
|
||||
POSTGRES_PASSWORD: banatie_secure_password
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U banatie_user -d banatie"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
restart: unless-stopped
|
||||
|
||||
# SNMD MinIO Setup - Production Ready
|
||||
minio:
|
||||
image: quay.io/minio/minio:latest
|
||||
container_name: banatie-storage
|
||||
ports:
|
||||
- "9000:9000" # S3 API
|
||||
- "9001:9001" # Console
|
||||
volumes:
|
||||
# SNMD: 4 drives for full S3 compatibility and erasure coding
|
||||
- ./data/storage/drive1:/data1
|
||||
- ./data/storage/drive2:/data2
|
||||
- ./data/storage/drive3:/data3
|
||||
- ./data/storage/drive4:/data4
|
||||
networks:
|
||||
- banatie-network
|
||||
environment:
|
||||
MINIO_ROOT_USER: ${MINIO_ROOT_USER}
|
||||
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD}
|
||||
MINIO_BROWSER_REDIRECT_URL: http://localhost:9001
|
||||
MINIO_SERVER_URL: http://localhost:9000
|
||||
# CRITICAL: SNMD command for full S3 compatibility
|
||||
command: server /data{1...4} --console-address ":9001"
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
restart: unless-stopped
|
||||
|
||||
storage-init:
|
||||
image: minio/mc:latest
|
||||
container_name: banatie-storage-init
|
||||
networks:
|
||||
- banatie-network
|
||||
depends_on:
|
||||
minio:
|
||||
condition: service_healthy
|
||||
entrypoint: >
|
||||
/bin/sh -c "
|
||||
echo 'Setting up MinIO alias...';
|
||||
mc alias set storage http://minio:9000 ${MINIO_ROOT_USER} ${MINIO_ROOT_PASSWORD};
|
||||
|
||||
echo 'Creating main bucket...';
|
||||
mc mb --ignore-existing storage/banatie;
|
||||
|
||||
echo 'Creating service user...';
|
||||
mc admin user add storage banatie_service banatie_service_key_2024;
|
||||
|
||||
echo 'Attaching readwrite policy to service user...';
|
||||
mc admin policy attach storage readwrite --user=banatie_service;
|
||||
|
||||
echo 'Setting up lifecycle policy...';
|
||||
cat > /tmp/lifecycle.json << EOF
|
||||
{
|
||||
\"Rules\": [
|
||||
{
|
||||
\"ID\": \"temp-cleanup\",
|
||||
\"Status\": \"Enabled\",
|
||||
\"Filter\": {
|
||||
\"Prefix\": \"temp/\"
|
||||
},
|
||||
\"Expiration\": {
|
||||
\"Days\": 7
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
mc ilm import storage/banatie < /tmp/lifecycle.json;
|
||||
|
||||
echo 'Storage initialization completed!';
|
||||
echo 'Bucket: banatie';
|
||||
echo 'Using presigned URLs for secure access';
|
||||
echo 'SNMD mode: Full S3 compatibility enabled';
|
||||
exit 0;
|
||||
"
|
||||
restart: "no"
|
||||
|
||||
networks:
|
||||
banatie-network:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
postgres-data:
|
||||
driver: local
|
||||
storage-data:
|
||||
driver: local
|
||||
|
|
@ -0,0 +1,265 @@
|
|||
# MinIO Setup Technical Specification
|
||||
|
||||
## Project Status
|
||||
Starting MinIO integration from scratch. Previous implementation had compatibility issues with bucket policies in SNSD mode. New implementation uses SNMD (Single Node Multi Drive) configuration with presigned URLs for reliable file access.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
### Storage Strategy
|
||||
- **Mode**: SNMD (4 virtual drives) for full S3 compatibility
|
||||
- **Access Method**: Presigned URLs only (no bucket policies)
|
||||
- **Bucket Structure**: Single bucket `banatie` with path-based organization
|
||||
- **File Organization**: `orgId/projectId/category/year-month/filename`
|
||||
|
||||
### Technology Stack
|
||||
- MinIO latest (`quay.io/minio/minio:latest`)
|
||||
- Docker Compose for orchestration
|
||||
- PostgreSQL for application data
|
||||
- Express.js API with TypeScript
|
||||
|
||||
## Configuration Files Status
|
||||
|
||||
### Completed Files
|
||||
- `docker-compose.yml` - SNMD configuration with 4 virtual drives
|
||||
- `.env.docker` - Environment variables for development
|
||||
- `src/services/MinioStorageService.ts` - Updated service implementation
|
||||
- `src/services/StorageFactory.ts` - Service factory configuration
|
||||
- `src/routes/images.ts` - Presigned URL endpoints
|
||||
|
||||
### Integration Requirements
|
||||
|
||||
#### 1. Update Application Router
|
||||
Add images router to main application in `src/app.ts`:
|
||||
|
||||
```typescript
|
||||
import { imagesRouter } from './routes/images';
|
||||
|
||||
// Add to routes section
|
||||
app.use('/api', imagesRouter);
|
||||
```
|
||||
|
||||
#### 2. Environment Variables Update
|
||||
Update existing `.env` file with MinIO configuration:
|
||||
|
||||
```bash
|
||||
# Add to existing .env file
|
||||
MINIO_ROOT_USER=banatie_admin
|
||||
MINIO_ROOT_PASSWORD=banatie_storage_secure_key_2024
|
||||
STORAGE_TYPE=minio
|
||||
MINIO_ENDPOINT=minio:9000
|
||||
MINIO_ACCESS_KEY=banatie_service
|
||||
MINIO_SECRET_KEY=banatie_service_key_2024
|
||||
MINIO_USE_SSL=false
|
||||
MINIO_BUCKET_NAME=banatie
|
||||
MINIO_PUBLIC_URL=http://localhost:9000
|
||||
API_BASE_URL=http://localhost:3000
|
||||
DEFAULT_ORG_ID=default
|
||||
DEFAULT_PROJECT_ID=main
|
||||
DEFAULT_USER_ID=system
|
||||
PRESIGNED_URL_EXPIRY=86400
|
||||
```
|
||||
|
||||
#### 3. Database Script Update
|
||||
Update `scripts/init-db.sql` to use new database name `banatie` instead of previous naming.
|
||||
|
||||
#### 4. Service Dependencies Update
|
||||
Update existing image generation services to use new storage configuration:
|
||||
|
||||
```typescript
|
||||
// In ImageGenService.ts or similar
|
||||
const storageService = StorageFactory.getInstance();
|
||||
const uploadResult = await storageService.uploadFile(
|
||||
orgId,
|
||||
projectId,
|
||||
'generated',
|
||||
filename,
|
||||
buffer,
|
||||
'image/png'
|
||||
);
|
||||
// Use uploadResult.url (returns API URL for presigned access)
|
||||
```
|
||||
|
||||
## Setup Instructions
|
||||
|
||||
### 1. Directory Structure
|
||||
Create required directories:
|
||||
```bash
|
||||
mkdir -p data/storage/{drive1,drive2,drive3,drive4}
|
||||
mkdir -p data/postgres
|
||||
```
|
||||
|
||||
### 2. Service Startup
|
||||
```bash
|
||||
# Start all services
|
||||
docker-compose up -d
|
||||
|
||||
# Verify services are healthy
|
||||
docker-compose ps
|
||||
|
||||
# Check MinIO logs
|
||||
docker logs banatie-storage
|
||||
|
||||
# Check initialization logs
|
||||
docker logs banatie-storage-init
|
||||
```
|
||||
|
||||
### 3. Verification Steps
|
||||
|
||||
#### MinIO Console Access
|
||||
- URL: http://localhost:9001
|
||||
- Username: banatie_admin
|
||||
- Password: banatie_storage_secure_key_2024
|
||||
|
||||
#### Test Presigned URL Generation
|
||||
```bash
|
||||
# Test image upload endpoint
|
||||
curl -X POST http://localhost:3000/api/upload \
|
||||
-F "files=@test.png" \
|
||||
-F "category=generated"
|
||||
|
||||
# Test presigned URL access
|
||||
curl -I "http://localhost:3000/api/images/default/main/generated/test-image.png"
|
||||
# Should return 302 redirect to presigned URL
|
||||
```
|
||||
|
||||
#### Verify SNMD Mode
|
||||
```bash
|
||||
# Check MinIO is in erasure coding mode
|
||||
docker exec banatie-storage mc admin info local
|
||||
# Should show 4 drives and erasure coding information
|
||||
```
|
||||
|
||||
## Integration Testing
|
||||
|
||||
### Required Tests
|
||||
1. **Storage Service Initialization**
|
||||
- Verify StorageFactory creates MinioStorageService
|
||||
- Confirm bucket creation and accessibility
|
||||
|
||||
2. **File Upload/Download Cycle**
|
||||
- Upload file via StorageService
|
||||
- Generate presigned URL
|
||||
- Verify file accessibility via presigned URL
|
||||
|
||||
3. **API Endpoint Testing**
|
||||
- Test `/api/images/:orgId/:projectId/:category/:filename`
|
||||
- Verify 302 redirect to presigned URL
|
||||
- Test fallback direct streaming
|
||||
|
||||
4. **Error Handling**
|
||||
- Test invalid file paths
|
||||
- Test expired presigned URLs
|
||||
- Test MinIO connection failures
|
||||
|
||||
### Test Script Template
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# test-minio-integration.sh
|
||||
|
||||
echo "Testing MinIO Integration"
|
||||
|
||||
# Test 1: Upload file
|
||||
UPLOAD_RESPONSE=$(curl -s -X POST http://localhost:3000/api/upload \
|
||||
-F "files=@test.png" \
|
||||
-F "category=generated")
|
||||
|
||||
echo "Upload Response: $UPLOAD_RESPONSE"
|
||||
|
||||
# Extract URL from response
|
||||
FILE_URL=$(echo "$UPLOAD_RESPONSE" | jq -r '.files[0].url')
|
||||
|
||||
# Test 2: Access via presigned URL
|
||||
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" "$FILE_URL")
|
||||
|
||||
if [ "$HTTP_CODE" = "302" ]; then
|
||||
echo "SUCCESS: Presigned URL redirect working"
|
||||
else
|
||||
echo "FAILURE: Expected 302, got $HTTP_CODE"
|
||||
fi
|
||||
```
|
||||
|
||||
## Production Considerations
|
||||
|
||||
### Environment Differences
|
||||
Development and production use identical configuration with different values:
|
||||
|
||||
```bash
|
||||
# Production .env differences
|
||||
API_BASE_URL=https://api.yourdomain.com
|
||||
MINIO_PUBLIC_URL=https://storage.yourdomain.com:9000
|
||||
MINIO_USE_SSL=true
|
||||
```
|
||||
|
||||
### Security Notes
|
||||
- All passwords use environment variables (no hardcoded values)
|
||||
- Presigned URLs expire after 24 hours by default
|
||||
- Service user has minimal required permissions
|
||||
- MinIO admin access separate from application access
|
||||
|
||||
### Scaling Path
|
||||
- Current SNMD setup supports development and small production loads
|
||||
- Can migrate to distributed MinIO cluster when needed
|
||||
- Presigned URL architecture remains unchanged during scaling
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
1. **403 Errors**: Check presigned URL generation and MinIO service user permissions
|
||||
2. **404 Errors**: Verify file paths and bucket configuration
|
||||
3. **Connection Errors**: Confirm MinIO service health and network connectivity
|
||||
|
||||
### Debug Commands
|
||||
```bash
|
||||
# Check MinIO health
|
||||
docker exec banatie-storage mc admin info local
|
||||
|
||||
# List bucket contents
|
||||
docker exec banatie-storage mc ls storage/banatie --recursive
|
||||
|
||||
# Check service logs
|
||||
docker logs banatie-storage
|
||||
docker logs banatie-app
|
||||
```
|
||||
|
||||
### Recovery Procedures
|
||||
```bash
|
||||
# Reset MinIO data (development only)
|
||||
docker-compose down
|
||||
rm -rf data/storage/*
|
||||
docker-compose up -d
|
||||
|
||||
# Recreate bucket structure
|
||||
docker exec banatie-storage mc mb storage/banatie
|
||||
```
|
||||
|
||||
## Implementation Priority
|
||||
|
||||
### Phase 1 (Immediate)
|
||||
1. Update app.ts with images router
|
||||
2. Update environment configuration
|
||||
3. Test basic upload/download functionality
|
||||
|
||||
### Phase 2 (Next)
|
||||
1. Update existing image generation services
|
||||
2. Implement comprehensive error handling
|
||||
3. Add integration tests
|
||||
|
||||
### Phase 3 (Future)
|
||||
1. Add monitoring and logging
|
||||
2. Implement file cleanup policies
|
||||
3. Add CDN integration capability
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
Integration is complete when:
|
||||
- [ ] All services start successfully via docker-compose
|
||||
- [ ] MinIO operates in SNMD mode with 4 drives
|
||||
- [ ] Image upload returns API URL format
|
||||
- [ ] API URLs redirect to working presigned URLs
|
||||
- [ ] Generated images accessible via presigned URLs
|
||||
- [ ] Error handling provides meaningful responses
|
||||
- [ ] Integration tests pass consistently
|
||||
|
||||
## Notes
|
||||
|
||||
This implementation prioritizes simplicity and reliability over advanced features. The presigned URL approach ensures consistent behavior across different MinIO configurations and provides a foundation for future enhancements without requiring architectural changes.
|
||||
|
|
@ -0,0 +1,121 @@
|
|||
import { Router, Request, Response } from 'express';
|
||||
import { StorageFactory } from '../services/StorageFactory';
|
||||
import { asyncHandler } from '../middleware/errorHandler';
|
||||
|
||||
export const imagesRouter = Router();
|
||||
|
||||
/**
|
||||
* GET /api/images/:orgId/:projectId/:category/:filename
|
||||
* Serves images via presigned URLs (redirect approach)
|
||||
*/
|
||||
imagesRouter.get(
|
||||
'/images/:orgId/:projectId/:category/:filename',
|
||||
asyncHandler(async (req: Request, res: Response) => {
|
||||
const { orgId, projectId, category, filename } = req.params;
|
||||
|
||||
// Validate category
|
||||
if (!['uploads', 'generated', 'references'].includes(category)) {
|
||||
return res.status(400).json({
|
||||
success: false,
|
||||
message: 'Invalid category'
|
||||
});
|
||||
}
|
||||
|
||||
const storageService = StorageFactory.getInstance();
|
||||
|
||||
try {
|
||||
// Method 1: Redirect to presigned URL (24 hour expiry)
|
||||
const presignedUrl = await storageService.getPresignedDownloadUrl(
|
||||
orgId,
|
||||
projectId,
|
||||
category as 'uploads' | 'generated' | 'references',
|
||||
filename,
|
||||
24 * 60 * 60 // 24 hours
|
||||
);
|
||||
|
||||
// Redirect to the presigned URL
|
||||
return res.redirect(302, presignedUrl);
|
||||
|
||||
} catch (error) {
|
||||
console.error('Failed to generate presigned URL:', error);
|
||||
|
||||
try {
|
||||
// Method 2: Fallback - Stream the file directly through our API
|
||||
const fileBuffer = await storageService.downloadFile(
|
||||
orgId,
|
||||
projectId,
|
||||
category as 'uploads' | 'generated' | 'references',
|
||||
filename
|
||||
);
|
||||
|
||||
// Determine content type from filename
|
||||
const ext = filename.toLowerCase().split('.').pop();
|
||||
const contentType = {
|
||||
'png': 'image/png',
|
||||
'jpg': 'image/jpeg',
|
||||
'jpeg': 'image/jpeg',
|
||||
'gif': 'image/gif',
|
||||
'webp': 'image/webp',
|
||||
'svg': 'image/svg+xml'
|
||||
}[ext || ''] || 'application/octet-stream';
|
||||
|
||||
res.setHeader('Content-Type', contentType);
|
||||
res.setHeader('Cache-Control', 'public, max-age=86400'); // 24 hours
|
||||
res.setHeader('Content-Length', fileBuffer.length);
|
||||
|
||||
return res.send(fileBuffer);
|
||||
|
||||
} catch (streamError) {
|
||||
console.error('Failed to stream file:', streamError);
|
||||
return res.status(404).json({
|
||||
success: false,
|
||||
message: 'File not found'
|
||||
});
|
||||
}
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
/**
|
||||
* GET /api/images/url/:orgId/:projectId/:category/:filename
|
||||
* Returns a presigned URL instead of redirecting
|
||||
*/
|
||||
imagesRouter.get(
|
||||
'/images/url/:orgId/:projectId/:category/:filename',
|
||||
asyncHandler(async (req: Request, res: Response) => {
|
||||
const { orgId, projectId, category, filename } = req.params;
|
||||
const { expiry = '3600' } = req.query; // Default 1 hour
|
||||
|
||||
if (!['uploads', 'generated', 'references'].includes(category)) {
|
||||
return res.status(400).json({
|
||||
success: false,
|
||||
message: 'Invalid category'
|
||||
});
|
||||
}
|
||||
|
||||
const storageService = StorageFactory.getInstance();
|
||||
|
||||
try {
|
||||
const presignedUrl = await storageService.getPresignedDownloadUrl(
|
||||
orgId,
|
||||
projectId,
|
||||
category as 'uploads' | 'generated' | 'references',
|
||||
filename,
|
||||
parseInt(expiry as string, 10)
|
||||
);
|
||||
|
||||
return res.json({
|
||||
success: true,
|
||||
url: presignedUrl,
|
||||
expiresIn: parseInt(expiry as string, 10)
|
||||
});
|
||||
|
||||
} catch (error) {
|
||||
console.error('Failed to generate presigned URL:', error);
|
||||
return res.status(404).json({
|
||||
success: false,
|
||||
message: 'File not found or access denied'
|
||||
});
|
||||
}
|
||||
})
|
||||
);
|
||||
|
|
@ -0,0 +1,261 @@
|
|||
import { Client as MinioClient } from 'minio';
|
||||
import { StorageService, FileMetadata, UploadResult } from './StorageService';
|
||||
|
||||
export class MinioStorageService implements StorageService {
|
||||
private client: MinioClient;
|
||||
private bucketName: string;
|
||||
private publicUrl: string;
|
||||
|
||||
constructor(
|
||||
endpoint: string,
|
||||
accessKey: string,
|
||||
secretKey: string,
|
||||
useSSL: boolean = false,
|
||||
bucketName: string = 'banatie',
|
||||
publicUrl?: string
|
||||
) {
|
||||
// Parse endpoint to separate hostname and port
|
||||
const cleanEndpoint = endpoint.replace(/^https?:\/\//, '');
|
||||
const [hostname, portStr] = cleanEndpoint.split(':');
|
||||
const port = portStr ? parseInt(portStr, 10) : (useSSL ? 443 : 9000);
|
||||
|
||||
if (!hostname) {
|
||||
throw new Error(`Invalid MinIO endpoint: ${endpoint}`);
|
||||
}
|
||||
|
||||
this.client = new MinioClient({
|
||||
endPoint: hostname,
|
||||
port: port,
|
||||
useSSL,
|
||||
accessKey,
|
||||
secretKey
|
||||
});
|
||||
this.bucketName = bucketName;
|
||||
this.publicUrl = publicUrl || `${useSSL ? 'https' : 'http'}://${endpoint}`;
|
||||
}
|
||||
|
||||
private getFilePath(
|
||||
orgId: string,
|
||||
projectId: string,
|
||||
category: 'uploads' | 'generated' | 'references',
|
||||
filename: string
|
||||
): string {
|
||||
const now = new Date();
|
||||
const year = now.getFullYear();
|
||||
const month = String(now.getMonth() + 1).padStart(2, '0');
|
||||
|
||||
return `${orgId}/${projectId}/${category}/${year}-${month}/${filename}`;
|
||||
}
|
||||
|
||||
private generateUniqueFilename(originalFilename: string): string {
|
||||
const timestamp = Date.now();
|
||||
const random = Math.random().toString(36).substring(2, 8);
|
||||
const ext = originalFilename.includes('.')
|
||||
? originalFilename.substring(originalFilename.lastIndexOf('.'))
|
||||
: '';
|
||||
const name = originalFilename.includes('.')
|
||||
? originalFilename.substring(0, originalFilename.lastIndexOf('.'))
|
||||
: originalFilename;
|
||||
|
||||
return `${name}-${timestamp}-${random}${ext}`;
|
||||
}
|
||||
|
||||
async createBucket(): Promise<void> {
|
||||
const exists = await this.client.bucketExists(this.bucketName);
|
||||
if (!exists) {
|
||||
await this.client.makeBucket(this.bucketName, 'us-east-1');
|
||||
console.log(`Created bucket: ${this.bucketName}`);
|
||||
}
|
||||
|
||||
// Note: With SNMD and presigned URLs, we don't need bucket policies
|
||||
console.log(`Bucket ${this.bucketName} ready for presigned URL access`);
|
||||
}
|
||||
|
||||
async bucketExists(): Promise<boolean> {
|
||||
return await this.client.bucketExists(this.bucketName);
|
||||
}
|
||||
|
||||
async uploadFile(
|
||||
orgId: string,
|
||||
projectId: string,
|
||||
category: 'uploads' | 'generated' | 'references',
|
||||
filename: string,
|
||||
buffer: Buffer,
|
||||
contentType: string
|
||||
): Promise<UploadResult> {
|
||||
// Ensure bucket exists
|
||||
await this.createBucket();
|
||||
|
||||
// Generate unique filename to avoid conflicts
|
||||
const uniqueFilename = this.generateUniqueFilename(filename);
|
||||
const filePath = this.getFilePath(orgId, projectId, category, uniqueFilename);
|
||||
|
||||
const metadata = {
|
||||
'Content-Type': contentType,
|
||||
'X-Amz-Meta-Original-Name': filename,
|
||||
'X-Amz-Meta-Category': category,
|
||||
'X-Amz-Meta-Project': projectId,
|
||||
'X-Amz-Meta-Organization': orgId,
|
||||
'X-Amz-Meta-Upload-Time': new Date().toISOString()
|
||||
};
|
||||
|
||||
console.log(`Uploading file to: ${this.bucketName}/${filePath}`);
|
||||
|
||||
const result = await this.client.putObject(
|
||||
this.bucketName,
|
||||
filePath,
|
||||
buffer,
|
||||
buffer.length,
|
||||
metadata
|
||||
);
|
||||
|
||||
const key = `${this.bucketName}/${filePath}`;
|
||||
const url = this.getPublicUrl(orgId, projectId, category, uniqueFilename);
|
||||
|
||||
console.log(`Generated API URL: ${url}`);
|
||||
|
||||
return {
|
||||
key,
|
||||
filename: uniqueFilename,
|
||||
url,
|
||||
etag: result.etag,
|
||||
size: buffer.length
|
||||
};
|
||||
}
|
||||
|
||||
async downloadFile(
|
||||
orgId: string,
|
||||
projectId: string,
|
||||
category: 'uploads' | 'generated' | 'references',
|
||||
filename: string
|
||||
): Promise<Buffer> {
|
||||
const filePath = this.getFilePath(orgId, projectId, category, filename);
|
||||
|
||||
const stream = await this.client.getObject(this.bucketName, filePath);
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const chunks: Buffer[] = [];
|
||||
stream.on('data', (chunk) => chunks.push(chunk));
|
||||
stream.on('end', () => resolve(Buffer.concat(chunks)));
|
||||
stream.on('error', reject);
|
||||
});
|
||||
}
|
||||
|
||||
async deleteFile(
|
||||
orgId: string,
|
||||
projectId: string,
|
||||
category: 'uploads' | 'generated' | 'references',
|
||||
filename: string
|
||||
): Promise<void> {
|
||||
const filePath = this.getFilePath(orgId, projectId, category, filename);
|
||||
await this.client.removeObject(this.bucketName, filePath);
|
||||
}
|
||||
|
||||
getPublicUrl(
|
||||
orgId: string,
|
||||
projectId: string,
|
||||
category: 'uploads' | 'generated' | 'references',
|
||||
filename: string
|
||||
): string {
|
||||
// Production-ready: Return API URL for presigned URL access
|
||||
const apiBaseUrl = process.env['API_BASE_URL'] || 'http://localhost:3000';
|
||||
return `${apiBaseUrl}/api/images/${orgId}/${projectId}/${category}/${filename}`;
|
||||
}
|
||||
|
||||
async getPresignedUploadUrl(
|
||||
orgId: string,
|
||||
projectId: string,
|
||||
category: 'uploads' | 'generated' | 'references',
|
||||
filename: string,
|
||||
expirySeconds: number = 3600
|
||||
): Promise<string> {
|
||||
const filePath = this.getFilePath(orgId, projectId, category, filename);
|
||||
return await this.client.presignedPutObject(this.bucketName, filePath, expirySeconds);
|
||||
}
|
||||
|
||||
async getPresignedDownloadUrl(
|
||||
orgId: string,
|
||||
projectId: string,
|
||||
category: 'uploads' | 'generated' | 'references',
|
||||
filename: string,
|
||||
expirySeconds: number = 86400 // 24 hours default
|
||||
): Promise<string> {
|
||||
const filePath = this.getFilePath(orgId, projectId, category, filename);
|
||||
return await this.client.presignedGetObject(this.bucketName, filePath, expirySeconds);
|
||||
}
|
||||
|
||||
async listProjectFiles(
|
||||
orgId: string,
|
||||
projectId: string,
|
||||
category?: 'uploads' | 'generated' | 'references'
|
||||
): Promise<FileMetadata[]> {
|
||||
const prefix = category ? `${orgId}/${projectId}/${category}/` : `${orgId}/${projectId}/`;
|
||||
|
||||
const files: FileMetadata[] = [];
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const stream = this.client.listObjects(this.bucketName, prefix, true);
|
||||
|
||||
stream.on('data', async (obj) => {
|
||||
try {
|
||||
if (!obj.name) return;
|
||||
|
||||
const metadata = await this.client.statObject(this.bucketName, obj.name);
|
||||
|
||||
const pathParts = obj.name.split('/');
|
||||
const filename = pathParts[pathParts.length - 1];
|
||||
const categoryFromPath = pathParts[2] as 'uploads' | 'generated' | 'references';
|
||||
|
||||
if (!filename || !categoryFromPath) {
|
||||
return;
|
||||
}
|
||||
|
||||
files.push({
|
||||
key: `${this.bucketName}/${obj.name}`,
|
||||
filename,
|
||||
contentType: metadata.metaData?.['content-type'] || 'application/octet-stream',
|
||||
size: obj.size || 0,
|
||||
url: this.getPublicUrl(orgId, projectId, categoryFromPath, filename),
|
||||
createdAt: obj.lastModified || new Date()
|
||||
});
|
||||
} catch (error) {
|
||||
console.error(`Error processing file ${obj.name}:`, error);
|
||||
}
|
||||
});
|
||||
|
||||
stream.on('end', () => resolve(files));
|
||||
stream.on('error', reject);
|
||||
});
|
||||
}
|
||||
|
||||
parseKey(key: string): {
|
||||
orgId: string;
|
||||
projectId: string;
|
||||
category: 'uploads' | 'generated' | 'references';
|
||||
filename: string;
|
||||
} | null {
|
||||
try {
|
||||
// Key format: banatie/orgId/projectId/category/year-month/filename
|
||||
const match = key.match(/^banatie\/([^/]+)\/([^/]+)\/(uploads|generated|references)\/[^/]+\/(.+)$/);
|
||||
|
||||
if (!match) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const [, orgId, projectId, category, filename] = match;
|
||||
|
||||
if (!orgId || !projectId || !category || !filename) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
orgId,
|
||||
projectId,
|
||||
category: category as 'uploads' | 'generated' | 'references',
|
||||
filename
|
||||
};
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
import { StorageService } from './StorageService';
|
||||
import { MinioStorageService } from './MinioStorageService';
|
||||
|
||||
export class StorageFactory {
|
||||
private static instance: StorageService | null = null;
|
||||
|
||||
static getInstance(): StorageService {
|
||||
if (!this.instance) {
|
||||
this.instance = this.createStorageService();
|
||||
}
|
||||
return this.instance;
|
||||
}
|
||||
|
||||
private static createStorageService(): StorageService {
|
||||
const storageType = process.env['STORAGE_TYPE'] || 'minio';
|
||||
|
||||
switch (storageType.toLowerCase()) {
|
||||
case 'minio': {
|
||||
const endpoint = process.env['MINIO_ENDPOINT'];
|
||||
const accessKey = process.env['MINIO_ACCESS_KEY'];
|
||||
const secretKey = process.env['MINIO_SECRET_KEY'];
|
||||
const useSSL = process.env['MINIO_USE_SSL'] === 'true';
|
||||
const bucketName = process.env['MINIO_BUCKET_NAME'] || 'banatie';
|
||||
const publicUrl = process.env['MINIO_PUBLIC_URL'];
|
||||
|
||||
if (!endpoint || !accessKey || !secretKey) {
|
||||
throw new Error(
|
||||
'MinIO configuration missing. Required: MINIO_ENDPOINT, MINIO_ACCESS_KEY, MINIO_SECRET_KEY'
|
||||
);
|
||||
}
|
||||
|
||||
console.log(`Initializing MinIO Storage Service:`);
|
||||
console.log(` Endpoint: ${endpoint}`);
|
||||
console.log(` Bucket: ${bucketName}`);
|
||||
console.log(` SSL: ${useSSL}`);
|
||||
console.log(` Public URL: ${publicUrl}`);
|
||||
|
||||
return new MinioStorageService(
|
||||
endpoint,
|
||||
accessKey,
|
||||
secretKey,
|
||||
useSSL,
|
||||
bucketName,
|
||||
publicUrl
|
||||
);
|
||||
}
|
||||
|
||||
default:
|
||||
throw new Error(`Unsupported storage type: ${storageType}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Reset instance for testing
|
||||
static resetInstance(): void {
|
||||
this.instance = null;
|
||||
}
|
||||
}
|
||||
Loading…
Reference in New Issue