Hey there, fellow developers! 👋 Today, we're diving deep into implementing file uploads with GoFrame, one of Go's most developer-friendly web frameworks. Whether you're building a social media platform, a document management system, or just need to handle file uploads in your web app, this guide has got you covered.
What We'll Cover
- Basic file uploads with GoFrame
- Security best practices and validations
- Handling large files with chunked uploads
- Cloud storage integration
- Performance optimization tips
- Real-world examples and use cases
Why GoFrame for File Uploads?
Before we dive in, you might be wondering: "Why should I use GoFrame for file uploads?" Well, here's what makes it stand out:
- Clean API Design: GoFrame makes file handling intuitive and straightforward
- Built-in Security: Comes with robust file validation mechanisms
- Performance: Excellent handling of concurrent uploads
- Flexibility: Easy integration with cloud storage services
Let's see how GoFrame compares to other popular Go frameworks:
Feature | GoFrame | Gin | Echo |
---|---|---|---|
Single File Upload | ✅ | ✅ | ✅ |
Multiple File Upload | ✅ | ✅ | ✅ |
Built-in File Validation | ✅ | ❌ | ❌ |
Chunked Upload Support | ✅ | ❌ | ❌ |
Getting Started: Basic File Upload
Let's start with a simple file upload example. First, install GoFrame:
go get github.com/gogf/gf/v2
Now, let's create a basic file upload server:
package main
import (
"github.com/gogf/gf/v2/frame/g"
"github.com/gogf/gf/v2/net/ghttp"
)
func main() {
s := g.Server()
s.Group("/api", func(group *ghttp.RouterGroup) {
group.POST("/upload", handleUpload)
})
s.SetPort(8080)
s.Run()
}
func handleUpload(r *ghttp.Request) {
file := r.GetUploadFile("file")
if file == nil {
r.Response.WriteJson(g.Map{
"code": 1,
"message": "No file found",
})
return
}
filename, err := file.Save("./uploads/", true)
if err != nil {
r.Response.WriteJson(g.Map{
"code": 1,
"message": "Upload failed: " + err.Error(),
})
return
}
r.Response.WriteJson(g.Map{
"code": 0,
"message": "Success!",
"data": g.Map{
"filename": filename,
"size": file.Size,
},
})
}
To test it out, create this simple HTML form:
action="/api/upload" method="post" enctype="multipart/form-data">
type="file" name="file">
type="submit">Upload
Comprehensive Error Handling
Before we dive into validation, let's set up proper error handling that we'll use throughout our upload system:
// Custom error types for better error handling
type UploadError struct {
Code int
Message string
Err error
}
func (e *UploadError) Error() string {
if e.Err != nil {
return fmt.Sprintf("%s: %v", e.Message, e.Err)
}
return e.Message
}
const (
ErrCodeNoFile = iota + 1000
ErrCodeInvalidType
ErrCodeSizeExceeded
ErrCodeStorageFull
ErrCodePermissionDenied
)
// Error handler middleware
func ErrorHandler(r *ghttp.Request) {
r.Middleware.Next()
if err := r.GetError(); err != nil {
// Type assert to check if it's our custom error
if uploadErr, ok := err.(*UploadError); ok {
r.Response.WriteJson(g.Map{
"code": uploadErr.Code,
"message": uploadErr.Message,
"error": uploadErr.Error(),
})
} else {
// Handle unexpected errors
r.Response.WriteJson(g.Map{
"code": 500,
"message": "Internal server error",
"error": err.Error(),
})
}
}
}
// Usage example
func handleUpload(r *ghttp.Request) {
file := r.GetUploadFile("file")
if file == nil {
r.SetError(&UploadError{
Code: ErrCodeNoFile,
Message: "No file provided",
})
return
}
// More error handling examples...
}
Making It Secure: File Validation
Security is crucial when handling file uploads. Here's a robust validation middleware you can use:
func FileValidator(r *ghttp.Request) {
file := r.GetUploadFile("file")
if file == nil {
respondWithError(r, "Please select a file")
return
}
// Size validation (10MB limit)
if file.Size > 10*1024*1024 {
respondWithError(r, "File too large (max 10MB)")
return
}
// Type validation
allowedTypes := map[string]bool{
"image/jpeg": true,
"image/png": true,
"image/gif": true,
"application/pdf": true,
}
// Read file header
buffer := make([]byte, 512)
f, err := file.Open()
if err != nil {
respondWithError(r, "Cannot read file")
return
}
defer f.Close()
_, err = f.Read(buffer)
if err != nil {
respondWithError(r, "Cannot read file")
return
}
// Check MIME type
fileType := http.DetectContentType(buffer)
if !allowedTypes[fileType] {
respondWithError(r, "File type not allowed")
return
}
r.Middleware.Next()
}
func respondWithError(r *ghttp.Request, message string) {
r.Response.WriteJsonExit(g.Map{
"code": 1,
"message": message,
})
}
Handling Large Files: Chunked Upload
When dealing with large files, chunked upload is your friend. Here's how to implement it:
type ChunkInfo struct {
FileId string `json:"fileId"`
ChunkNumber int `json:"chunkNumber"`
TotalChunks int `json:"totalChunks"`
}
func handleChunkUpload(r *ghttp.Request) {
var info ChunkInfo
if err := r.Parse(&info); err != nil {
respondWithError(r, "Invalid chunk info")
return
}
chunk := r.GetUploadFile("chunk")
if chunk == nil {
respondWithError(r, "No chunk found")
return
}
// Create chunks directory
chunkDir := fmt.Sprintf("./uploads/chunks/%s", info.FileId)
if err := os.MkdirAll(chunkDir, 0755); err != nil {
respondWithError(r, "Failed to create chunk directory")
return
}
// Save chunk
chunkPath := fmt.Sprintf("%s/chunk_%d", chunkDir, info.ChunkNumber)
if _, err := chunk.Save(chunkPath, false); err != nil {
respondWithError(r, "Failed to save chunk")
return
}
// Check if upload is complete
if isUploadComplete(chunkDir, info.TotalChunks) {
mergeChunks(info.FileId)
}
r.Response.WriteJson(g.Map{
"code": 0,
"message": "Chunk uploaded successfully",
})
}
Cloud Storage Integration
Most real-world applications store files in the cloud. Here's a clean way to integrate with cloud storage (using AWS S3 as an example):
type S3Uploader struct {
bucket string
client *s3.Client
}
func NewS3Uploader(bucket string) *S3Uploader {
cfg, err := config.LoadDefaultConfig(context.TODO())
if err != nil {
log.Fatal(err)
}
return &S3Uploader{
bucket: bucket,
client: s3.NewFromConfig(cfg),
}
}
func (u *S3Uploader) Upload(file *ghttp.UploadFile) (string, error) {
f, err := file.Open()
if err != nil {
return "", err
}
defer f.Close()
key := fmt.Sprintf("uploads/%s/%s",
time.Now().Format("2006/01/02"),
file.Filename)
_, err = u.client.PutObject(context.TODO(), &s3.PutObjectInput{
Bucket: aws.String(u.bucket),
Key: aws.String(key),
Body: f,
})
if err != nil {
return "", err
}
return key, nil
}
Performance Tips 🚀
Here are some battle-tested tips for optimizing your file upload system:
Use Buffered Reading
func processLargeFile(file *ghttp.UploadFile) error {
buffer := make([]byte, 32*1024) // 32KB buffer
reader, err := file.Open()
if err != nil {
return err
}
defer reader.Close()
for {
n, err := reader.Read(buffer)
if err == io.EOF {
break
}
if err != nil {
return err
}
// Process buffer[:n]...
}
return nil
}
Implement Rate Limiting
type UploadLimiter struct {
tokens chan struct{}
}
func NewUploadLimiter(max int) *UploadLimiter {
return &UploadLimiter{
tokens: make(chan struct{}, max),
}
}
func (l *UploadLimiter) Acquire() {
l.tokens <- struct{}{}
}
func (l *UploadLimiter) Release() {
<-l.tokens
}
Common Pitfalls to Avoid ⚠️
-
Not Closing File Handles
- Always use
defer file.Close()
- Be careful with nested function calls
- Always use
-
Memory Leaks
- Clean up temporary files
- Use streaming for large files
- Implement timeout mechanisms
-
Security Issues
- Always validate file types
- Implement file size limits
- Use secure file names
Real-World Examples
Let's look at some common scenarios you might encounter in production environments.
1. Image Upload Service with Multiple Formats
type ImageService struct {
uploader *S3Uploader
thumbSizes []int
maxFileSize int64
formats map[string]imaging.Format
}
func NewImageService() *ImageService {
return &ImageService{
thumbSizes: []int{100, 300, 600},
maxFileSize: 10 * 1024 * 1024, // 10MB
formats: map[string]imaging.Format{
".jpg": imaging.JPEG,
".jpeg": imaging.JPEG,
".png": imaging.PNG,
".webp": imaging.WEBP,
},
}
}
func (s *ImageService) Upload(r *ghttp.Request) {
file := r.GetUploadFile("image")
if file == nil {
r.SetError(&UploadError{
Code: ErrCodeNoFile,
Message: "No image file provided",
})
return
}
// Size validation
if file.Size > s.maxFileSize {
r.SetError(&UploadError{
Code: ErrCodeSizeExceeded,
Message: fmt.Sprintf("File size exceeds %dMB limit", s.maxFileSize/1024/1024),
})
return
}
// Process image with error handling
results, err := s.processImage(file)
if err != nil {
r.SetError(err)
return
}
r.Response.WriteJson(g.Map{
"code": 0,
"message": "Success",
"data": results,
})
}
func (s *ImageService) processImage(file *ghttp.UploadFile) (map[string]string, error) {
// Open image
img, err := s.openAndValidateImage(file)
if err != nil {
return nil, err
}
results := make(map[string]string)
// Generate thumbnails with different formats
for _, size := range s.thumbSizes {
for ext, format := range s.formats {
thumb := imaging.Resize(img, size, 0, imaging.Lanczos)
// Create temporary file for the thumbnail
tempFile, err := s.saveTempImage(thumb, format)
if err != nil {
return nil, &UploadError{
Code: 500,
Message: "Failed to create thumbnail",
Err: err,
}
}
// Upload to cloud storage
url, err := s.uploader.Upload(tempFile)
if err != nil {
return nil, &UploadError{
Code: 500,
Message: "Failed to upload thumbnail",
Err: err,
}
}
results[fmt.Sprintf("%dx%d%s", size, size, ext)] = url
}
}
return results, nil
}
func (s *ImageService) openAndValidateImage(file *ghttp.UploadFile) (image.Image, error) {
f, err := file.Open()
if err != nil {
return nil, &UploadError{
Code: 500,
Message: "Failed to open image",
Err: err,
}
}
defer f.Close()
// Decode image
img, format, err := image.Decode(f)
if err != nil {
return nil, &UploadError{
Code: ErrCodeInvalidType,
Message: "Invalid image format",
Err: err,
}
}
// Additional format-specific validations
switch format {
case "jpeg":
// Check for JPEG-specific issues
case "png":
// Check for PNG-specific issues
}
return img, nil
}
2. Document Upload Service with Virus Scanning
type DocumentService struct {
uploader *S3Uploader
virusScanner VirusScanner
allowedTypes map[string]bool
}
func NewDocumentService() *DocumentService {
return &DocumentService{
allowedTypes: map[string]bool{
"application/pdf": true,
"application/msword": true,
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": true,
},
}
}
func (s *DocumentService) Upload(r *ghttp.Request) {
file := r.GetUploadFile("document")
if file == nil {
r.SetError(&UploadError{
Code: ErrCodeNoFile,
Message: "No document provided",
})
return
}
// Validate document type
if err := s.validateDocument(file); err != nil {
r.SetError(err)
return
}
// Scan for viruses
if err := s.scanDocument(file); err != nil {
r.SetError(err)
return
}
// Generate preview
previewURL, err := s.generatePreview(file)
if err != nil {
r.SetError(err)
return
}
// Upload original file
originalURL, err := s.uploader.Upload(file)
if err != nil {
r.SetError(&UploadError{
Code: 500,
Message: "Failed to upload document",
Err: err,
})
return
}
r.Response.WriteJson(g.Map{
"code": 0,
"message": "Success",
"data": g.Map{
"url": originalURL,
"preview": previewURL,
},
})
}
func (s *DocumentService) validateDocument(file *ghttp.UploadFile) error {
// Read first 512 bytes for MIME type detection
buffer := make([]byte, 512)
f, err := file.Open()
if err != nil {
return &UploadError{
Code: 500,
Message: "Failed to open document",
Err: err,
}
}
defer f.Close()
n, err := f.Read(buffer)
if err != nil && err != io.EOF {
return &UploadError{
Code: 500,
Message: "Failed to read document",
Err: err,
}
}
// Check MIME type
mimeType := http.DetectContentType(buffer[:n])
if !s.allowedTypes[mimeType] {
return &UploadError{
Code: ErrCodeInvalidType,
Message: "Unsupported document type",
}
}
return nil
}
func (s *DocumentService) scanDocument(file *ghttp.UploadFile) error {
f, err := file.Open()
if err != nil {
return &UploadError{
Code: 500,
Message: "Failed to open document for scanning",
Err: err,
}
}
defer f.Close()
// Implement virus scanning logic
scanResult, err := s.virusScanner.Scan(f)
if err != nil {
return &UploadError{
Code: 500,
Message: "Virus scan failed",
Err: err,
}
}
if !scanResult.Clean {
return &UploadError{
Code: ErrCodeInvalidType,
Message: "Document failed security scan",
}
}
return nil
}
3. Resume Upload Manager
Here's a complete example of an image upload service with thumbnails:
type ImageService struct {
uploader *S3Uploader
thumbSizes []int
}
func (s *ImageService) Upload(r *ghttp.Request) {
file := r.GetUploadFile("image")
if file == nil {
respondWithError(r, "No image found")
return
}
// Validate image
if !s.validateImage(file) {
respondWithError(r, "Invalid image")
return
}
// Generate thumbnails
thumbnails, err := s.generateThumbnails(file)
if err != nil {
respondWithError(r, "Failed to generate thumbnails")
return
}
// Upload original and thumbnails
urls := make(map[string]string)
urls["original"], err = s.uploader.Upload(file)
if err != nil {
respondWithError(r, "Upload failed")
return
}
for size, thumb := range thumbnails {
urls[fmt.Sprintf("thumb_%d", size)], err = s.uploader.Upload(thumb)
if err != nil {
respondWithError(r, "Thumbnail upload failed")
return
}
}
r.Response.WriteJson(g.Map{
"code": 0,
"message": "Success",
"data": urls,
})
}
4. Fault-Tolerant Upload Manager
Here's a robust implementation that handles network issues and allows resuming interrupted uploads:
type UploadManager struct {
db *gorm.DB
uploader *S3Uploader
stateDir string
retries int
chunkSize int64
}
type UploadState struct {
ID string `gorm:"primaryKey"`
Filename string
Size int64
ChunksTotal int
ChunksUploaded []int
Status string
CreatedAt time.Time
UpdatedAt time.Time
}
func (m *UploadManager) StartUpload(r *ghttp.Request) {
file := r.GetUploadFile("file")
if file == nil {
r.SetError(&UploadError{
Code: ErrCodeNoFile,
Message: "No file provided",
})
return
}
// Create upload state
state := &UploadState{
ID: uuid.New().String(),
Filename: file.Filename,
Size: file.Size,
Status: "pending",
}
if err := m.db.Create(state).Error; err != nil {
r.SetError(&UploadError{
Code: 500,
Message: "Failed to initialize upload",
Err: err,
})
return
}
// Calculate chunks
chunksTotal := int(math.Ceil(float64(file.Size) / float64(m.chunkSize)))
state.ChunksTotal = chunksTotal
if err := m.db.Save(state).Error; err != nil {
r.SetError(&UploadError{
Code: 500,
Message: "Failed to update state",
Err: err,
})
return
}
r.Response.WriteJson(g.Map{
"code": 0,
"message": "Upload initialized",
"data": g.Map{
"uploadId": state.ID,
"chunksTotal": chunksTotal,
"chunkSize": m.chunkSize,
},
})
}
func (m *UploadManager) UploadChunk(r *ghttp.Request) {
uploadID := r.Get("uploadId").String()
chunkNum := r.Get("chunkNumber").Int()
// Get upload state
var state UploadState
if err := m.db.First(&state, "id = ?", uploadID).Error; err != nil {
r.SetError(&UploadError{
Code: 500,
Message: "Upload not found",
Err: err,
})
return
}
// Validate chunk number
if chunkNum >= state.ChunksTotal {
r.SetError(&UploadError{
Code: ErrCodeInvalidType,
Message: "Invalid chunk number",
})
return
}
// Get chunk file
chunk := r.GetUploadFile("chunk")
if chunk == nil {
r.SetError(&UploadError{
Code: ErrCodeNoFile,
Message: "No chunk provided",
})
return
}
// Upload chunk with retries
var uploadErr error
for i := 0; i < m.retries; i++ {
if err := m.uploadChunkToStorage(chunk, uploadID, chunkNum); err != nil {
uploadErr = err
time.Sleep(time.Second * time.Duration(i+1))
continue
}
uploadErr = nil
break
}
if uploadErr != nil {
r.SetError(&UploadError{
Code: 500,
Message: "Failed to upload chunk after retries",
Err: uploadErr,
})
return
}
// Update state
state.ChunksUploaded = append(state.ChunksUploaded, chunkNum)
if len(state.ChunksUploaded) == state.ChunksTotal {
state.Status = "complete"
}
if err := m.db.Save(&state).Error; err != nil {
r.SetError(&UploadError{
Code: 500,
Message: "Failed to update state",
Err: err,
})
return
}
r.Response.WriteJson(g.Map{
"code": 0,
"message": "Chunk uploaded",
"data": g.Map{
"uploadId": state.ID,
"chunksUploaded": len(state.ChunksUploaded),
"isComplete": state.Status == "complete",
},
})
}
func (m *UploadManager) uploadChunkToStorage(chunk *ghttp.UploadFile, uploadID string, chunkNum int) error {
key := fmt.Sprintf("uploads/%s/chunks/%d", uploadID, chunkNum)
f, err := chunk.Open()
if err != nil {
return fmt.Errorf("failed to open chunk: %w", err)
}
defer f.Close()
// Upload to cloud storage
if err := m.uploader.UploadPart(key, f); err != nil {
return fmt.Errorf("failed to upload chunk: %w", err)
}
return nil
}
// GetUploadStatus returns the current status of an upload
func (m *UploadManager) GetUploadStatus(r *ghttp.Request) {
uploadID := r.Get("uploadId").String()
var state UploadState
if err := m.db.First(&state, "id = ?", uploadID).Error; err != nil {
r.SetError(&UploadError{
Code: 500,
Message: "Upload not found",
Err: err,
})
return
}
r.Response.WriteJson(g.Map{
"code": 0,
"message": "Success",
"data": g.Map{
"uploadId": state.ID,
"filename": state.Filename,
"size": state.Size,
"status": state.Status,
"chunksTotal": state.ChunksTotal,
"chunksUploaded": len(state.ChunksUploaded),
"progress": float64(len(state.ChunksUploaded)) / float64(state.ChunksTotal) * 100,
},
})
}
// CompleteUpload finalizes a chunked upload
func (m *UploadManager) CompleteUpload(r *ghttp.Request) {
uploadID := r.Get("uploadId").String()
var state UploadState
if err := m.db.First(&state, "id = ?", uploadID).Error; err != nil {
r.SetError(&UploadError{
Code: 500,
Message: "Upload not found",
Err: err,
})
return
}
if len(state.ChunksUploaded) != state.ChunksTotal {
r.SetError(&UploadError{
Code: 400,
Message: "Upload incomplete",
})
return
}
// Merge chunks into final file
finalURL, err := m.mergeChunks(state)
if err != nil {
r.SetError(&UploadError{
Code: 500,
Message: "Failed to merge chunks",
Err: err,
})
return
}
// Clean up chunks
go m.cleanupChunks(uploadID)
r.Response.WriteJson(g.Map{
"code": 0,
"message": "Upload completed",
"data": g.Map{
"url": finalURL,
},
})
}
func (m *UploadManager) mergeChunks(state UploadState) (string, error) {
destKey := fmt.Sprintf("uploads/%s/%s", state.ID, state.Filename)
// Prepare multipart completion
var completedParts []CompletedPart
for i := 0; i < state.ChunksTotal; i++ {
// Verify each chunk exists
chunkKey := fmt.Sprintf("uploads/%s/chunks/%d", state.ID, i)
if exists, err := m.uploader.ObjectExists(chunkKey); err != nil || !exists {
return "", fmt.Errorf("chunk %d missing or error: %w", i, err)
}
completedParts = append(completedParts, CompletedPart{
PartNumber: i + 1,
ETag: chunkKey,
})
}
// Complete multipart upload
if err := m.uploader.CompleteMultipartUpload(destKey, completedParts); err != nil {
return "", fmt.Errorf("failed to complete multipart upload: %w", err)
}
return m.uploader.GetURL(destKey), nil
}
func (m *UploadManager) cleanupChunks(uploadID string) {
prefix := fmt.Sprintf("uploads/%s/chunks/", uploadID)
if err := m.uploader.DeleteByPrefix(prefix); err != nil {
log.Printf("Failed to cleanup chunks for upload %s: %v", uploadID, err)
}
}
5. Rate Limited Upload Handler
Here's an example of a rate-limited upload handler that prevents abuse:
type RateLimitedUploader struct {
limiter *rate.Limiter
quotas map[string]*UserQuota
mu sync.RWMutex
}
type UserQuota struct {
DailyUploads int64
DailyBytes int64
LastReset time.Time
}
func NewRateLimitedUploader() *RateLimitedUploader {
return &RateLimitedUploader{
limiter: rate.NewLimiter(rate.Every(time.Second), 10), // 10 uploads per second
quotas: make(map[string]*UserQuota),
}
}
func (u *RateLimitedUploader) Handle(r *ghttp.Request) {
userID := r.GetHeader("X-User-ID")
// Check rate limit
if !u.limiter.Allow() {
r.SetError(&UploadError{
Code: 429,
Message: "Too many requests",
})
return
}
// Check and update quota
if err := u.checkQuota(userID, r.ContentLength); err != nil {
r.SetError(err)
return
}
// Process upload
file := r.GetUploadFile("file")
if file == nil {
r.SetError(&UploadError{
Code: ErrCodeNoFile,
Message: "No file provided",
})
return
}
// Update quota after successful upload
u.updateQuota(userID, file.Size)
// Continue with upload processing...
}
func (u *RateLimitedUploader) checkQuota(userID string, size int64) error {
u.mu.Lock()
defer u.mu.Unlock()
quota, exists := u.quotas[userID]
if !exists {
quota = &UserQuota{
LastReset: time.Now(),
}
u.quotas[userID] = quota
}
// Reset daily quota if needed
if time.Since(quota.LastReset) > 24*time.Hour {
quota.DailyUploads = 0
quota.DailyBytes = 0
quota.LastReset = time.Now()
}
// Check quotas
if quota.DailyUploads >= 100 {
return &UploadError{
Code: 429,
Message: "Daily upload limit exceeded",
}
}
if quota.DailyBytes+size > 1024*1024*1024 { // 1GB daily limit
return &UploadError{
Code: 429,
Message: "Daily bandwidth limit exceeded",
}
}
return nil
}
func (u *RateLimitedUploader) updateQuota(userID string, size int64) {
u.mu.Lock()
defer u.mu.Unlock()
if quota, exists := u.quotas[userID]; exists {
quota.DailyUploads++
quota.DailyBytes += size
}
}
Testing Your Upload System
Here's how to properly test your upload implementation:
func TestUploadSystem(t *testing.T) {
// Setup test server
s := g.Server()
s.Group("/api", func(group *ghttp.RouterGroup) {
group.POST("/upload", handleUpload)
group.Use(ErrorHandler)
})
s.SetPort(8999)
go s.Run()
time.Sleep(time.Second) // Wait for server to start
t.Run("Basic Upload", func(t *testing.T) {
// Create test file
content := []byte("test content")
tmpfile, err := os.CreateTemp("", "test")
if err != nil {
t.Fatal(err)
}
defer os.Remove(tmpfile.Name())
if _, err := tmpfile.Write(content); err != nil {
t.Fatal(err)
}
// Create multipart request
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile("file", "test.txt")
if err != nil {
t.Fatal(err)
}
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
t.Fatal(err)
}
writer.Close()
// Send request
resp, err := http.Post("http://localhost:8999/api/upload",
writer.FormDataContentType(), body)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
// Check response
var result struct {
Code int `json:"code"`
Message string `json:"message"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
t.Fatal(err)
}
if result.Code != 0 {
t.Errorf("expected code 0, got %d", result.Code)
}
})
t.Run("Error Cases", func(t *testing.T) {
testCases := []struct {
name string
fileSize int64
contentType string
expectCode int
}{
{
name: "File Too Large",
fileSize: 11 * 1024 * 1024,
expectCode: ErrCodeSizeExceeded,
},
{
name: "Invalid Content Type",
fileSize: 1024,
contentType: "application/executable",
expectCode: ErrCodeInvalidType,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Create test file
content := make([]byte, tc.fileSize)
rand.Read(content)
// Create multipart request
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile("file", "test.txt")
if err != nil {
t.Fatal(err)
}
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
t.Fatal(err)
}
writer.Close()
// Send request
resp, err := http.Post("http://localhost:8999/api/upload",
writer.FormDataContentType(), body)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
// Check response
var result struct {
Code int `json:"code"`
Message string `json:"message"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
t.Fatal(err)
}
if result.Code != tc.expectCode {
t.Errorf("expected code %d, got %d", tc.expectCode, result.Code)
}
})
}
})
}
Monitoring and Metrics
Here's how to add monitoring to your upload system:
type UploadMetrics struct {
totalUploads prometheus.Counter
uploadErrors prometheus.Counter
uploadDuration prometheus.Histogram
uploadSize prometheus.Histogram
activeUploads prometheus.Gauge
}
func NewUploadMetrics() *UploadMetrics {
return &UploadMetrics{
totalUploads: prometheus.NewCounter(prometheus.CounterOpts{
Name: "uploads_total",
Help: "Total number of file uploads",
}),
uploadErrors: prometheus.NewCounter(prometheus.CounterOpts{
Name: "upload_errors_total",
Help: "Total number of upload errors",
}),
uploadDuration: prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "upload_duration_seconds",
Help: "Upload duration in seconds",
Buckets: prometheus.ExponentialBuckets(0.1, 2, 10),
}),
uploadSize: prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "upload_size_bytes",
Help: "Upload size in bytes",
Buckets: prometheus.ExponentialBuckets(1024, 2, 10),
}),
activeUploads: prometheus.NewGauge(prometheus.GaugeOpts{
Name: "active_uploads",
Help: "Number of active uploads",
}),
}
}
func (m *UploadMetrics) Middleware(r *ghttp.Request) {
start := time.Now()
m.activeUploads.Inc()
r.Middleware.Next()
m.activeUploads.Dec()
if err := r.GetError(); err != nil {
m.uploadErrors.Inc()
} else {
m.totalUploads.Inc()
if file := r.GetUploadFile("file"); file != nil {
m.uploadSize.Observe(float64(file.Size))
}
m.uploadDuration.Observe(time.Since(start).Seconds())
}
}
Wrapping Up
We've covered a lot of ground! From basic file uploads to advanced features like chunked uploads and cloud storage integration. Here's what you should remember:
- Always validate files before processing
- Use chunked uploads for large files
- Implement proper error handling
- Consider cloud storage for scalability
- Monitor and optimize performance
What's Next?
- Implement WebSocket progress tracking
- Add support for more cloud providers
- Build a complete file management system
- Add image processing capabilities
Resources
Feel free to drop any questions in the comments below! And don't forget to follow for more GoFrame tutorials. Happy coding! 🚀
Did you find this article helpful? Like, comment, and follow for more Go development content! 👍