feat: file explorer
All checks were successful
Build and Push App Image / build-and-push (push) Successful in 50s
All checks were successful
Build and Push App Image / build-and-push (push) Successful in 50s
This commit is contained in:
389
backend/internal/application/services/file_service.go
Normal file
389
backend/internal/application/services/file_service.go
Normal file
@@ -0,0 +1,389 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"go.mongodb.org/mongo-driver/v2/bson"
|
||||
|
||||
"github.com/noteapp/backend/internal/domain/repositories"
|
||||
"github.com/noteapp/backend/internal/infrastructure/security"
|
||||
)
|
||||
|
||||
// S3Object represents a file or folder entry with key relative to the space root.
|
||||
type S3Object struct {
|
||||
Key string `json:"key"`
|
||||
Size int64 `json:"size"`
|
||||
LastModified string `json:"last_modified"`
|
||||
IsFolder bool `json:"is_folder"`
|
||||
}
|
||||
|
||||
// FileService handles S3 file operations scoped to individual spaces.
|
||||
type FileService struct {
|
||||
featureFlagRepo repositories.FeatureFlagRepository
|
||||
membershipRepo repositories.MembershipRepository
|
||||
encryptor *security.Encryptor
|
||||
}
|
||||
|
||||
// NewFileService creates a new FileService.
|
||||
func NewFileService(
|
||||
featureFlagRepo repositories.FeatureFlagRepository,
|
||||
membershipRepo repositories.MembershipRepository,
|
||||
encryptor *security.Encryptor,
|
||||
) *FileService {
|
||||
return &FileService{
|
||||
featureFlagRepo: featureFlagRepo,
|
||||
membershipRepo: membershipRepo,
|
||||
encryptor: encryptor,
|
||||
}
|
||||
}
|
||||
|
||||
type s3Config struct {
|
||||
client *s3.Client
|
||||
bucket string
|
||||
}
|
||||
|
||||
// buildS3Config loads feature flags, decrypts credentials, and returns an S3 client + bucket name.
|
||||
func (s *FileService) buildS3Config(ctx context.Context) (*s3Config, error) {
|
||||
flags, err := s.featureFlagRepo.GetFeatureFlags(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !flags.FileExplorerEnabled {
|
||||
return nil, errors.New("file explorer is disabled")
|
||||
}
|
||||
if flags.S3Endpoint == "" || flags.S3Bucket == "" {
|
||||
return nil, errors.New("S3 is not configured")
|
||||
}
|
||||
|
||||
secretKey := ""
|
||||
if flags.S3SecretKey != "" && s.encryptor != nil {
|
||||
secretKey, err = s.encryptor.Decrypt(flags.S3SecretKey)
|
||||
if err != nil {
|
||||
return nil, errors.New("failed to decrypt S3 credentials")
|
||||
}
|
||||
}
|
||||
|
||||
region := flags.S3Region
|
||||
if region == "" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
|
||||
cfg := aws.Config{
|
||||
Region: region,
|
||||
Credentials: credentials.NewStaticCredentialsProvider(flags.S3AccessKey, secretKey, ""),
|
||||
}
|
||||
|
||||
client := s3.NewFromConfig(cfg, func(o *s3.Options) {
|
||||
o.BaseEndpoint = aws.String(flags.S3Endpoint)
|
||||
o.UsePathStyle = true
|
||||
})
|
||||
|
||||
return &s3Config{client: client, bucket: flags.S3Bucket}, nil
|
||||
}
|
||||
|
||||
// validateAccess ensures file explorer is enabled and the user is a member of the space.
|
||||
// Returns a ready S3 config on success.
|
||||
func (s *FileService) validateAccess(ctx context.Context, userIDHex, spaceIDHex string) (*s3Config, error) {
|
||||
cfg, err := s.buildS3Config(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
userID, err := bson.ObjectIDFromHex(userIDHex)
|
||||
if err != nil {
|
||||
return nil, errors.New("access denied")
|
||||
}
|
||||
spaceID, err := bson.ObjectIDFromHex(spaceIDHex)
|
||||
if err != nil {
|
||||
return nil, errors.New("access denied")
|
||||
}
|
||||
|
||||
if _, err := s.membershipRepo.GetUserMembership(ctx, userID, spaceID); err != nil {
|
||||
return nil, errors.New("access denied")
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// spaceBase returns the S3 key prefix for a space: "spaces/<spaceIDHex>/".
|
||||
func spaceBase(spaceIDHex string) string {
|
||||
return "spaces/" + spaceIDHex + "/"
|
||||
}
|
||||
|
||||
// resolveRelKey sanitises a relative key and returns the full S3 key,
|
||||
// rejecting anything that would escape the space prefix.
|
||||
func resolveRelKey(spaceIDHex, relKey string) (string, error) {
|
||||
relKey = strings.TrimLeft(strings.TrimSpace(relKey), "/")
|
||||
cleaned := path.Clean(relKey)
|
||||
if cleaned == "." || cleaned == "" {
|
||||
return "", errors.New("key is empty")
|
||||
}
|
||||
if strings.Contains(cleaned, "..") {
|
||||
return "", errors.New("invalid key")
|
||||
}
|
||||
base := spaceBase(spaceIDHex)
|
||||
full := base + cleaned
|
||||
if !strings.HasPrefix(full, base) {
|
||||
return "", errors.New("invalid key: outside space boundary")
|
||||
}
|
||||
return full, nil
|
||||
}
|
||||
|
||||
// resolveRelPrefix sanitises a relative folder prefix and returns the full S3 prefix.
|
||||
// An empty relPrefix maps to the space root folder.
|
||||
func resolveRelPrefix(spaceIDHex, relPrefix string) (string, error) {
|
||||
base := spaceBase(spaceIDHex)
|
||||
relPrefix = strings.TrimLeft(strings.TrimSpace(relPrefix), "/")
|
||||
if relPrefix == "" {
|
||||
return base, nil
|
||||
}
|
||||
cleaned := path.Clean(relPrefix)
|
||||
if cleaned == "." {
|
||||
return base, nil
|
||||
}
|
||||
if strings.Contains(cleaned, "..") {
|
||||
return "", errors.New("invalid prefix")
|
||||
}
|
||||
full := base + cleaned + "/"
|
||||
if !strings.HasPrefix(full, base) {
|
||||
return "", errors.New("invalid prefix: outside space boundary")
|
||||
}
|
||||
return full, nil
|
||||
}
|
||||
|
||||
// ListObjects returns objects and virtual folders directly under relPrefix within the space.
|
||||
// Returned keys are relative to the space root (no "spaces/<spaceId>/" prefix).
|
||||
func (s *FileService) ListObjects(ctx context.Context, userIDHex, spaceIDHex, relPrefix string) ([]*S3Object, error) {
|
||||
cfg, err := s.validateAccess(ctx, userIDHex, spaceIDHex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fullPrefix, err := resolveRelPrefix(spaceIDHex, relPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
base := spaceBase(spaceIDHex)
|
||||
result, err := cfg.client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(cfg.bucket),
|
||||
Prefix: aws.String(fullPrefix),
|
||||
Delimiter: aws.String("/"),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var objects []*S3Object
|
||||
|
||||
for _, cp := range result.CommonPrefixes {
|
||||
if cp.Prefix != nil {
|
||||
objects = append(objects, &S3Object{
|
||||
Key: strings.TrimPrefix(*cp.Prefix, base),
|
||||
IsFolder: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for _, obj := range result.Contents {
|
||||
if obj.Key == nil || *obj.Key == fullPrefix {
|
||||
continue
|
||||
}
|
||||
// Hide virtual .keep placeholder files used for folder creation
|
||||
if path.Base(*obj.Key) == ".keep" {
|
||||
continue
|
||||
}
|
||||
size := int64(0)
|
||||
if obj.Size != nil {
|
||||
size = *obj.Size
|
||||
}
|
||||
lastMod := ""
|
||||
if obj.LastModified != nil {
|
||||
lastMod = obj.LastModified.Format(time.RFC3339)
|
||||
}
|
||||
objects = append(objects, &S3Object{
|
||||
Key: strings.TrimPrefix(*obj.Key, base),
|
||||
Size: size,
|
||||
LastModified: lastMod,
|
||||
})
|
||||
}
|
||||
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
// GetObjectContent streams an S3 object, enforcing space boundary.
|
||||
// relKey is relative to the space root.
|
||||
func (s *FileService) GetObjectContent(ctx context.Context, userIDHex, spaceIDHex, relKey string) (io.ReadCloser, string, error) {
|
||||
cfg, err := s.validateAccess(ctx, userIDHex, spaceIDHex)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
fullKey, err := resolveRelKey(spaceIDHex, relKey)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
result, err := cfg.client.GetObject(ctx, &s3.GetObjectInput{
|
||||
Bucket: aws.String(cfg.bucket),
|
||||
Key: aws.String(fullKey),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
contentType := "application/octet-stream"
|
||||
if result.ContentType != nil {
|
||||
contentType = *result.ContentType
|
||||
}
|
||||
|
||||
return result.Body, contentType, nil
|
||||
}
|
||||
|
||||
// UploadObject stores a file at relKey within the space.
|
||||
func (s *FileService) UploadObject(ctx context.Context, userIDHex, spaceIDHex, relKey, contentType string, body io.Reader, size int64) error {
|
||||
cfg, err := s.validateAccess(ctx, userIDHex, spaceIDHex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fullKey, err := resolveRelKey(spaceIDHex, relKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if contentType == "" {
|
||||
contentType = "application/octet-stream"
|
||||
}
|
||||
|
||||
input := &s3.PutObjectInput{
|
||||
Bucket: aws.String(cfg.bucket),
|
||||
Key: aws.String(fullKey),
|
||||
Body: body,
|
||||
ContentType: aws.String(contentType),
|
||||
}
|
||||
if size > 0 {
|
||||
input.ContentLength = aws.Int64(size)
|
||||
}
|
||||
|
||||
_, err = cfg.client.PutObject(ctx, input)
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateFolder creates a virtual folder by uploading a zero-byte .keep placeholder.
|
||||
func (s *FileService) CreateFolder(ctx context.Context, userIDHex, spaceIDHex, relPath string) error {
|
||||
cfg, err := s.validateAccess(ctx, userIDHex, spaceIDHex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
base := spaceBase(spaceIDHex)
|
||||
relPath = strings.Trim(relPath, "/")
|
||||
cleaned := path.Clean(relPath)
|
||||
if cleaned == "." || cleaned == "" || strings.Contains(cleaned, "..") {
|
||||
return errors.New("invalid folder path")
|
||||
}
|
||||
fullKey := base + cleaned + "/.keep"
|
||||
if !strings.HasPrefix(fullKey, base) {
|
||||
return errors.New("invalid folder path: outside space boundary")
|
||||
}
|
||||
|
||||
zero := int64(0)
|
||||
_, err = cfg.client.PutObject(ctx, &s3.PutObjectInput{
|
||||
Bucket: aws.String(cfg.bucket),
|
||||
Key: aws.String(fullKey),
|
||||
Body: bytes.NewReader(nil),
|
||||
ContentType: aws.String("application/octet-stream"),
|
||||
ContentLength: aws.Int64(zero),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteObject removes a single object within the space.
|
||||
func (s *FileService) DeleteObject(ctx context.Context, userIDHex, spaceIDHex, relKey string) error {
|
||||
cfg, err := s.validateAccess(ctx, userIDHex, spaceIDHex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fullKey, err := resolveRelKey(spaceIDHex, relKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = cfg.client.DeleteObject(ctx, &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(cfg.bucket),
|
||||
Key: aws.String(fullKey),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteFolder recursively deletes all objects under relPrefix within the space.
|
||||
func (s *FileService) DeleteFolder(ctx context.Context, userIDHex, spaceIDHex, relPrefix string) error {
|
||||
cfg, err := s.validateAccess(ctx, userIDHex, spaceIDHex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fullPrefix, err := resolveRelPrefix(spaceIDHex, relPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Safety net: refuse to delete the entire space root
|
||||
if fullPrefix == spaceBase(spaceIDHex) {
|
||||
return errors.New("cannot delete the space root folder")
|
||||
}
|
||||
|
||||
paginator := s3.NewListObjectsV2Paginator(cfg.client, &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(cfg.bucket),
|
||||
Prefix: aws.String(fullPrefix),
|
||||
})
|
||||
|
||||
var toDelete []types.ObjectIdentifier
|
||||
for paginator.HasMorePages() {
|
||||
page, err := paginator.NextPage(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, obj := range page.Contents {
|
||||
if obj.Key != nil {
|
||||
toDelete = append(toDelete, types.ObjectIdentifier{Key: obj.Key})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(toDelete) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete in batches of 1000 (S3 limit per DeleteObjects call)
|
||||
for i := 0; i < len(toDelete); i += 1000 {
|
||||
end := i + 1000
|
||||
if end > len(toDelete) {
|
||||
end = len(toDelete)
|
||||
}
|
||||
_, err := cfg.client.DeleteObjects(ctx, &s3.DeleteObjectsInput{
|
||||
Bucket: aws.String(cfg.bucket),
|
||||
Delete: &types.Delete{
|
||||
Objects: toDelete[i:end],
|
||||
Quiet: aws.Bool(true),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
Reference in New Issue
Block a user