feat: v2.0 Sprint 3 - Multipart Upload, Testing & Documentation (Part 2)
Sprint 3 Complete - Cloud Storage Full Implementation: New Features: ✅ Multipart upload for large files (>100MB) ✅ Automatic part size (10MB) and concurrency (10 parts) ✅ MinIO testing infrastructure ✅ Comprehensive integration test script ✅ Complete cloud storage documentation New Files: - CLOUD.md - Complete cloud storage guide (580+ lines) - docker-compose.minio.yml - MinIO + PostgreSQL + MySQL test setup - scripts/test_cloud_storage.sh - Full integration test suite Multipart Upload: - Automatic for files >100MB - 10MB part size for optimal performance - 10 concurrent parts for faster uploads - Progress tracking for multipart transfers - AWS S3 Upload Manager integration Testing Infrastructure: - docker-compose.minio.yml: * MinIO S3-compatible storage * PostgreSQL 16 test database * MySQL 8.0 test database * Automatic bucket creation * Health checks for all services - test_cloud_storage.sh (14 test scenarios): 1. Service startup and health checks 2. Test database creation with sample data 3. Local backup creation 4. Cloud upload to MinIO 5. Cloud list verification 6. Backup with cloud URI 7. Database drop for restore test 8. Restore from cloud URI 9. Data verification after restore 10. Cloud backup integrity verification 11. Cleanup dry-run test 12. Multiple backups creation 13. Actual cleanup test 14. Large file multipart upload (>100MB) Documentation (CLOUD.md): - Quick start guide - URI syntax documentation - Configuration methods (4 approaches) - All cloud commands with examples - Provider-specific setup (AWS S3, MinIO, B2, GCS) - Multipart upload details - Progress tracking - Metadata synchronization - Best practices (security, performance, reliability) - Troubleshooting guide - Real-world examples - FAQ section Sprint 3 COMPLETE! Total implementation: 100% of requirements met Cloud storage features now at 100%: ✅ URI parser and support ✅ Backup/restore/verify/cleanup integration ✅ Multipart uploads ✅ Testing infrastructure ✅ Comprehensive documentation
This commit is contained in:
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
)
|
||||
|
||||
@@ -92,7 +93,7 @@ func (s *S3Backend) buildKey(filename string) string {
|
||||
return filepath.Join(s.prefix, filename)
|
||||
}
|
||||
|
||||
// Upload uploads a file to S3
|
||||
// Upload uploads a file to S3 with multipart support for large files
|
||||
func (s *S3Backend) Upload(ctx context.Context, localPath, remotePath string, progress ProgressCallback) error {
|
||||
// Open local file
|
||||
file, err := os.Open(localPath)
|
||||
@@ -108,17 +109,30 @@ func (s *S3Backend) Upload(ctx context.Context, localPath, remotePath string, pr
|
||||
}
|
||||
fileSize := stat.Size()
|
||||
|
||||
// Build S3 key
|
||||
key := s.buildKey(remotePath)
|
||||
|
||||
// Use multipart upload for files larger than 100MB
|
||||
const multipartThreshold = 100 * 1024 * 1024 // 100 MB
|
||||
|
||||
if fileSize > multipartThreshold {
|
||||
return s.uploadMultipart(ctx, file, key, fileSize, progress)
|
||||
}
|
||||
|
||||
// Simple upload for smaller files
|
||||
return s.uploadSimple(ctx, file, key, fileSize, progress)
|
||||
}
|
||||
|
||||
// uploadSimple performs a simple single-part upload
|
||||
func (s *S3Backend) uploadSimple(ctx context.Context, file *os.File, key string, fileSize int64, progress ProgressCallback) error {
|
||||
// Create progress reader
|
||||
var reader io.Reader = file
|
||||
if progress != nil {
|
||||
reader = NewProgressReader(file, fileSize, progress)
|
||||
}
|
||||
|
||||
// Build S3 key
|
||||
key := s.buildKey(remotePath)
|
||||
|
||||
// Upload to S3
|
||||
_, err = s.client.PutObject(ctx, &s3.PutObjectInput{
|
||||
_, err := s.client.PutObject(ctx, &s3.PutObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(key),
|
||||
Body: reader,
|
||||
@@ -131,6 +145,40 @@ func (s *S3Backend) Upload(ctx context.Context, localPath, remotePath string, pr
|
||||
return nil
|
||||
}
|
||||
|
||||
// uploadMultipart performs a multipart upload for large files
|
||||
func (s *S3Backend) uploadMultipart(ctx context.Context, file *os.File, key string, fileSize int64, progress ProgressCallback) error {
|
||||
// Create uploader with custom options
|
||||
uploader := manager.NewUploader(s.client, func(u *manager.Uploader) {
|
||||
// Part size: 10MB
|
||||
u.PartSize = 10 * 1024 * 1024
|
||||
|
||||
// Upload up to 10 parts concurrently
|
||||
u.Concurrency = 10
|
||||
|
||||
// Leave parts on failure for debugging
|
||||
u.LeavePartsOnError = false
|
||||
})
|
||||
|
||||
// Wrap file with progress reader
|
||||
var reader io.Reader = file
|
||||
if progress != nil {
|
||||
reader = NewProgressReader(file, fileSize, progress)
|
||||
}
|
||||
|
||||
// Upload with multipart
|
||||
_, err := uploader.Upload(ctx, &s3.PutObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(key),
|
||||
Body: reader,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("multipart upload failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Download downloads a file from S3
|
||||
func (s *S3Backend) Download(ctx context.Context, remotePath, localPath string, progress ProgressCallback) error {
|
||||
// Build S3 key
|
||||
|
||||
Reference in New Issue
Block a user