feat: file explorer
All checks were successful
Build and Push App Image / build-and-push (push) Successful in 50s
All checks were successful
Build and Push App Image / build-and-push (push) Successful in 50s
This commit is contained in:
@@ -59,16 +59,28 @@ type CreateAuthProviderRequest struct {
|
||||
|
||||
// FeatureFlagsDTO represents app-wide feature flags in API responses.
|
||||
type FeatureFlagsDTO struct {
|
||||
RegistrationEnabled bool `json:"registration_enabled"`
|
||||
ProviderLoginEnabled bool `json:"provider_login_enabled"`
|
||||
PublicSharingEnabled bool `json:"public_sharing_enabled"`
|
||||
RegistrationEnabled bool `json:"registration_enabled"`
|
||||
ProviderLoginEnabled bool `json:"provider_login_enabled"`
|
||||
PublicSharingEnabled bool `json:"public_sharing_enabled"`
|
||||
FileExplorerEnabled bool `json:"file_explorer_enabled"`
|
||||
S3Endpoint string `json:"s3_endpoint,omitempty"`
|
||||
S3Bucket string `json:"s3_bucket,omitempty"`
|
||||
S3Region string `json:"s3_region,omitempty"`
|
||||
S3AccessKey string `json:"s3_access_key,omitempty"`
|
||||
S3SecretKeySet bool `json:"s3_secret_key_set"`
|
||||
}
|
||||
|
||||
// UpdateFeatureFlagsRequest represents admin payload for feature flag updates.
|
||||
type UpdateFeatureFlagsRequest struct {
|
||||
RegistrationEnabled bool `json:"registration_enabled"`
|
||||
ProviderLoginEnabled bool `json:"provider_login_enabled"`
|
||||
PublicSharingEnabled bool `json:"public_sharing_enabled"`
|
||||
RegistrationEnabled bool `json:"registration_enabled"`
|
||||
ProviderLoginEnabled bool `json:"provider_login_enabled"`
|
||||
PublicSharingEnabled bool `json:"public_sharing_enabled"`
|
||||
FileExplorerEnabled bool `json:"file_explorer_enabled"`
|
||||
S3Endpoint string `json:"s3_endpoint"`
|
||||
S3Bucket string `json:"s3_bucket"`
|
||||
S3Region string `json:"s3_region"`
|
||||
S3AccessKey string `json:"s3_access_key"`
|
||||
S3SecretKey string `json:"s3_secret_key"` // empty = keep existing encrypted value
|
||||
}
|
||||
|
||||
// UserDTO represents a user in API responses
|
||||
@@ -206,6 +218,12 @@ func NewFeatureFlagsDTO(flags *entities.FeatureFlags) *FeatureFlagsDTO {
|
||||
RegistrationEnabled: flags.RegistrationEnabled,
|
||||
ProviderLoginEnabled: flags.ProviderLoginEnabled,
|
||||
PublicSharingEnabled: flags.PublicSharingEnabled,
|
||||
FileExplorerEnabled: flags.FileExplorerEnabled,
|
||||
S3Endpoint: flags.S3Endpoint,
|
||||
S3Bucket: flags.S3Bucket,
|
||||
S3Region: flags.S3Region,
|
||||
S3AccessKey: flags.S3AccessKey,
|
||||
S3SecretKeySet: flags.S3SecretKey != "",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/noteapp/backend/internal/application/dto"
|
||||
"github.com/noteapp/backend/internal/domain/entities"
|
||||
"github.com/noteapp/backend/internal/domain/repositories"
|
||||
"github.com/noteapp/backend/internal/infrastructure/security"
|
||||
)
|
||||
|
||||
// AdminService handles admin-level operations
|
||||
@@ -22,6 +23,7 @@ type AdminService struct {
|
||||
categoryRepo repositories.CategoryRepository
|
||||
featureFlagRepo repositories.FeatureFlagRepository
|
||||
permissionService *PermissionService
|
||||
encryptor *security.Encryptor
|
||||
}
|
||||
|
||||
// NewAdminService creates a new AdminService
|
||||
@@ -34,6 +36,7 @@ func NewAdminService(
|
||||
categoryRepo repositories.CategoryRepository,
|
||||
featureFlagRepo repositories.FeatureFlagRepository,
|
||||
permissionService *PermissionService,
|
||||
encryptor *security.Encryptor,
|
||||
) *AdminService {
|
||||
return &AdminService{
|
||||
userRepo: userRepo,
|
||||
@@ -44,6 +47,7 @@ func NewAdminService(
|
||||
categoryRepo: categoryRepo,
|
||||
featureFlagRepo: featureFlagRepo,
|
||||
permissionService: permissionService,
|
||||
encryptor: encryptor,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -299,10 +303,31 @@ func (s *AdminService) UpdateFeatureFlags(ctx context.Context, req *dto.UpdateFe
|
||||
return nil, errors.New("feature flags are unavailable")
|
||||
}
|
||||
|
||||
// Load existing flags so we can preserve the encrypted S3 secret when not updated
|
||||
existing, err := s.featureFlagRepo.GetFeatureFlags(ctx)
|
||||
if err != nil {
|
||||
existing = entities.NewDefaultFeatureFlags()
|
||||
}
|
||||
|
||||
flags := &entities.FeatureFlags{
|
||||
RegistrationEnabled: req.RegistrationEnabled,
|
||||
ProviderLoginEnabled: req.ProviderLoginEnabled,
|
||||
PublicSharingEnabled: req.PublicSharingEnabled,
|
||||
FileExplorerEnabled: req.FileExplorerEnabled,
|
||||
S3Endpoint: strings.TrimSpace(req.S3Endpoint),
|
||||
S3Bucket: strings.TrimSpace(req.S3Bucket),
|
||||
S3Region: strings.TrimSpace(req.S3Region),
|
||||
S3AccessKey: strings.TrimSpace(req.S3AccessKey),
|
||||
S3SecretKey: existing.S3SecretKey, // keep encrypted secret by default
|
||||
}
|
||||
|
||||
// Only re-encrypt if a new secret was supplied
|
||||
if s.encryptor != nil && strings.TrimSpace(req.S3SecretKey) != "" {
|
||||
encrypted, err := s.encryptor.Encrypt(strings.TrimSpace(req.S3SecretKey))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
flags.S3SecretKey = encrypted
|
||||
}
|
||||
|
||||
if err := s.featureFlagRepo.UpdateFeatureFlags(ctx, flags); err != nil {
|
||||
|
||||
389
backend/internal/application/services/file_service.go
Normal file
389
backend/internal/application/services/file_service.go
Normal file
@@ -0,0 +1,389 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"go.mongodb.org/mongo-driver/v2/bson"
|
||||
|
||||
"github.com/noteapp/backend/internal/domain/repositories"
|
||||
"github.com/noteapp/backend/internal/infrastructure/security"
|
||||
)
|
||||
|
||||
// S3Object represents a file or folder entry with key relative to the space root.
|
||||
type S3Object struct {
|
||||
Key string `json:"key"`
|
||||
Size int64 `json:"size"`
|
||||
LastModified string `json:"last_modified"`
|
||||
IsFolder bool `json:"is_folder"`
|
||||
}
|
||||
|
||||
// FileService handles S3 file operations scoped to individual spaces.
|
||||
type FileService struct {
|
||||
featureFlagRepo repositories.FeatureFlagRepository
|
||||
membershipRepo repositories.MembershipRepository
|
||||
encryptor *security.Encryptor
|
||||
}
|
||||
|
||||
// NewFileService creates a new FileService.
|
||||
func NewFileService(
|
||||
featureFlagRepo repositories.FeatureFlagRepository,
|
||||
membershipRepo repositories.MembershipRepository,
|
||||
encryptor *security.Encryptor,
|
||||
) *FileService {
|
||||
return &FileService{
|
||||
featureFlagRepo: featureFlagRepo,
|
||||
membershipRepo: membershipRepo,
|
||||
encryptor: encryptor,
|
||||
}
|
||||
}
|
||||
|
||||
type s3Config struct {
|
||||
client *s3.Client
|
||||
bucket string
|
||||
}
|
||||
|
||||
// buildS3Config loads feature flags, decrypts credentials, and returns an S3 client + bucket name.
|
||||
func (s *FileService) buildS3Config(ctx context.Context) (*s3Config, error) {
|
||||
flags, err := s.featureFlagRepo.GetFeatureFlags(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !flags.FileExplorerEnabled {
|
||||
return nil, errors.New("file explorer is disabled")
|
||||
}
|
||||
if flags.S3Endpoint == "" || flags.S3Bucket == "" {
|
||||
return nil, errors.New("S3 is not configured")
|
||||
}
|
||||
|
||||
secretKey := ""
|
||||
if flags.S3SecretKey != "" && s.encryptor != nil {
|
||||
secretKey, err = s.encryptor.Decrypt(flags.S3SecretKey)
|
||||
if err != nil {
|
||||
return nil, errors.New("failed to decrypt S3 credentials")
|
||||
}
|
||||
}
|
||||
|
||||
region := flags.S3Region
|
||||
if region == "" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
|
||||
cfg := aws.Config{
|
||||
Region: region,
|
||||
Credentials: credentials.NewStaticCredentialsProvider(flags.S3AccessKey, secretKey, ""),
|
||||
}
|
||||
|
||||
client := s3.NewFromConfig(cfg, func(o *s3.Options) {
|
||||
o.BaseEndpoint = aws.String(flags.S3Endpoint)
|
||||
o.UsePathStyle = true
|
||||
})
|
||||
|
||||
return &s3Config{client: client, bucket: flags.S3Bucket}, nil
|
||||
}
|
||||
|
||||
// validateAccess ensures file explorer is enabled and the user is a member of the space.
|
||||
// Returns a ready S3 config on success.
|
||||
func (s *FileService) validateAccess(ctx context.Context, userIDHex, spaceIDHex string) (*s3Config, error) {
|
||||
cfg, err := s.buildS3Config(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
userID, err := bson.ObjectIDFromHex(userIDHex)
|
||||
if err != nil {
|
||||
return nil, errors.New("access denied")
|
||||
}
|
||||
spaceID, err := bson.ObjectIDFromHex(spaceIDHex)
|
||||
if err != nil {
|
||||
return nil, errors.New("access denied")
|
||||
}
|
||||
|
||||
if _, err := s.membershipRepo.GetUserMembership(ctx, userID, spaceID); err != nil {
|
||||
return nil, errors.New("access denied")
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// spaceBase returns the S3 key prefix for a space: "spaces/<spaceIDHex>/".
|
||||
func spaceBase(spaceIDHex string) string {
|
||||
return "spaces/" + spaceIDHex + "/"
|
||||
}
|
||||
|
||||
// resolveRelKey sanitises a relative key and returns the full S3 key,
|
||||
// rejecting anything that would escape the space prefix.
|
||||
func resolveRelKey(spaceIDHex, relKey string) (string, error) {
|
||||
relKey = strings.TrimLeft(strings.TrimSpace(relKey), "/")
|
||||
cleaned := path.Clean(relKey)
|
||||
if cleaned == "." || cleaned == "" {
|
||||
return "", errors.New("key is empty")
|
||||
}
|
||||
if strings.Contains(cleaned, "..") {
|
||||
return "", errors.New("invalid key")
|
||||
}
|
||||
base := spaceBase(spaceIDHex)
|
||||
full := base + cleaned
|
||||
if !strings.HasPrefix(full, base) {
|
||||
return "", errors.New("invalid key: outside space boundary")
|
||||
}
|
||||
return full, nil
|
||||
}
|
||||
|
||||
// resolveRelPrefix sanitises a relative folder prefix and returns the full S3 prefix.
|
||||
// An empty relPrefix maps to the space root folder.
|
||||
func resolveRelPrefix(spaceIDHex, relPrefix string) (string, error) {
|
||||
base := spaceBase(spaceIDHex)
|
||||
relPrefix = strings.TrimLeft(strings.TrimSpace(relPrefix), "/")
|
||||
if relPrefix == "" {
|
||||
return base, nil
|
||||
}
|
||||
cleaned := path.Clean(relPrefix)
|
||||
if cleaned == "." {
|
||||
return base, nil
|
||||
}
|
||||
if strings.Contains(cleaned, "..") {
|
||||
return "", errors.New("invalid prefix")
|
||||
}
|
||||
full := base + cleaned + "/"
|
||||
if !strings.HasPrefix(full, base) {
|
||||
return "", errors.New("invalid prefix: outside space boundary")
|
||||
}
|
||||
return full, nil
|
||||
}
|
||||
|
||||
// ListObjects returns objects and virtual folders directly under relPrefix within the space.
|
||||
// Returned keys are relative to the space root (no "spaces/<spaceId>/" prefix).
|
||||
func (s *FileService) ListObjects(ctx context.Context, userIDHex, spaceIDHex, relPrefix string) ([]*S3Object, error) {
|
||||
cfg, err := s.validateAccess(ctx, userIDHex, spaceIDHex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fullPrefix, err := resolveRelPrefix(spaceIDHex, relPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
base := spaceBase(spaceIDHex)
|
||||
result, err := cfg.client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(cfg.bucket),
|
||||
Prefix: aws.String(fullPrefix),
|
||||
Delimiter: aws.String("/"),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var objects []*S3Object
|
||||
|
||||
for _, cp := range result.CommonPrefixes {
|
||||
if cp.Prefix != nil {
|
||||
objects = append(objects, &S3Object{
|
||||
Key: strings.TrimPrefix(*cp.Prefix, base),
|
||||
IsFolder: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for _, obj := range result.Contents {
|
||||
if obj.Key == nil || *obj.Key == fullPrefix {
|
||||
continue
|
||||
}
|
||||
// Hide virtual .keep placeholder files used for folder creation
|
||||
if path.Base(*obj.Key) == ".keep" {
|
||||
continue
|
||||
}
|
||||
size := int64(0)
|
||||
if obj.Size != nil {
|
||||
size = *obj.Size
|
||||
}
|
||||
lastMod := ""
|
||||
if obj.LastModified != nil {
|
||||
lastMod = obj.LastModified.Format(time.RFC3339)
|
||||
}
|
||||
objects = append(objects, &S3Object{
|
||||
Key: strings.TrimPrefix(*obj.Key, base),
|
||||
Size: size,
|
||||
LastModified: lastMod,
|
||||
})
|
||||
}
|
||||
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
// GetObjectContent streams an S3 object, enforcing space boundary.
|
||||
// relKey is relative to the space root.
|
||||
func (s *FileService) GetObjectContent(ctx context.Context, userIDHex, spaceIDHex, relKey string) (io.ReadCloser, string, error) {
|
||||
cfg, err := s.validateAccess(ctx, userIDHex, spaceIDHex)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
fullKey, err := resolveRelKey(spaceIDHex, relKey)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
result, err := cfg.client.GetObject(ctx, &s3.GetObjectInput{
|
||||
Bucket: aws.String(cfg.bucket),
|
||||
Key: aws.String(fullKey),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
contentType := "application/octet-stream"
|
||||
if result.ContentType != nil {
|
||||
contentType = *result.ContentType
|
||||
}
|
||||
|
||||
return result.Body, contentType, nil
|
||||
}
|
||||
|
||||
// UploadObject stores a file at relKey within the space.
|
||||
func (s *FileService) UploadObject(ctx context.Context, userIDHex, spaceIDHex, relKey, contentType string, body io.Reader, size int64) error {
|
||||
cfg, err := s.validateAccess(ctx, userIDHex, spaceIDHex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fullKey, err := resolveRelKey(spaceIDHex, relKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if contentType == "" {
|
||||
contentType = "application/octet-stream"
|
||||
}
|
||||
|
||||
input := &s3.PutObjectInput{
|
||||
Bucket: aws.String(cfg.bucket),
|
||||
Key: aws.String(fullKey),
|
||||
Body: body,
|
||||
ContentType: aws.String(contentType),
|
||||
}
|
||||
if size > 0 {
|
||||
input.ContentLength = aws.Int64(size)
|
||||
}
|
||||
|
||||
_, err = cfg.client.PutObject(ctx, input)
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateFolder creates a virtual folder by uploading a zero-byte .keep placeholder.
|
||||
func (s *FileService) CreateFolder(ctx context.Context, userIDHex, spaceIDHex, relPath string) error {
|
||||
cfg, err := s.validateAccess(ctx, userIDHex, spaceIDHex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
base := spaceBase(spaceIDHex)
|
||||
relPath = strings.Trim(relPath, "/")
|
||||
cleaned := path.Clean(relPath)
|
||||
if cleaned == "." || cleaned == "" || strings.Contains(cleaned, "..") {
|
||||
return errors.New("invalid folder path")
|
||||
}
|
||||
fullKey := base + cleaned + "/.keep"
|
||||
if !strings.HasPrefix(fullKey, base) {
|
||||
return errors.New("invalid folder path: outside space boundary")
|
||||
}
|
||||
|
||||
zero := int64(0)
|
||||
_, err = cfg.client.PutObject(ctx, &s3.PutObjectInput{
|
||||
Bucket: aws.String(cfg.bucket),
|
||||
Key: aws.String(fullKey),
|
||||
Body: bytes.NewReader(nil),
|
||||
ContentType: aws.String("application/octet-stream"),
|
||||
ContentLength: aws.Int64(zero),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteObject removes a single object within the space.
|
||||
func (s *FileService) DeleteObject(ctx context.Context, userIDHex, spaceIDHex, relKey string) error {
|
||||
cfg, err := s.validateAccess(ctx, userIDHex, spaceIDHex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fullKey, err := resolveRelKey(spaceIDHex, relKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = cfg.client.DeleteObject(ctx, &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(cfg.bucket),
|
||||
Key: aws.String(fullKey),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteFolder recursively deletes all objects under relPrefix within the space.
|
||||
func (s *FileService) DeleteFolder(ctx context.Context, userIDHex, spaceIDHex, relPrefix string) error {
|
||||
cfg, err := s.validateAccess(ctx, userIDHex, spaceIDHex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fullPrefix, err := resolveRelPrefix(spaceIDHex, relPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Safety net: refuse to delete the entire space root
|
||||
if fullPrefix == spaceBase(spaceIDHex) {
|
||||
return errors.New("cannot delete the space root folder")
|
||||
}
|
||||
|
||||
paginator := s3.NewListObjectsV2Paginator(cfg.client, &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(cfg.bucket),
|
||||
Prefix: aws.String(fullPrefix),
|
||||
})
|
||||
|
||||
var toDelete []types.ObjectIdentifier
|
||||
for paginator.HasMorePages() {
|
||||
page, err := paginator.NextPage(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, obj := range page.Contents {
|
||||
if obj.Key != nil {
|
||||
toDelete = append(toDelete, types.ObjectIdentifier{Key: obj.Key})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(toDelete) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete in batches of 1000 (S3 limit per DeleteObjects call)
|
||||
for i := 0; i < len(toDelete); i += 1000 {
|
||||
end := i + 1000
|
||||
if end > len(toDelete) {
|
||||
end = len(toDelete)
|
||||
}
|
||||
_, err := cfg.client.DeleteObjects(ctx, &s3.DeleteObjectsInput{
|
||||
Bucket: aws.String(cfg.bucket),
|
||||
Delete: &types.Delete{
|
||||
Objects: toDelete[i:end],
|
||||
Quiet: aws.Bool(true),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -36,9 +36,15 @@ type LoginAttempt struct {
|
||||
|
||||
// FeatureFlags controls app-wide behavior toggles.
|
||||
type FeatureFlags struct {
|
||||
RegistrationEnabled bool `bson:"registration_enabled"`
|
||||
ProviderLoginEnabled bool `bson:"provider_login_enabled"`
|
||||
PublicSharingEnabled bool `bson:"public_sharing_enabled"`
|
||||
RegistrationEnabled bool `bson:"registration_enabled"`
|
||||
ProviderLoginEnabled bool `bson:"provider_login_enabled"`
|
||||
PublicSharingEnabled bool `bson:"public_sharing_enabled"`
|
||||
FileExplorerEnabled bool `bson:"file_explorer_enabled"`
|
||||
S3Endpoint string `bson:"s3_endpoint,omitempty"`
|
||||
S3Bucket string `bson:"s3_bucket,omitempty"`
|
||||
S3Region string `bson:"s3_region,omitempty"`
|
||||
S3AccessKey string `bson:"s3_access_key,omitempty"`
|
||||
S3SecretKey string `bson:"s3_secret_key,omitempty"` // AES-256-GCM encrypted
|
||||
}
|
||||
|
||||
// NewDefaultFeatureFlags returns safe defaults for a new deployment.
|
||||
@@ -47,5 +53,6 @@ func NewDefaultFeatureFlags() *FeatureFlags {
|
||||
RegistrationEnabled: true,
|
||||
ProviderLoginEnabled: true,
|
||||
PublicSharingEnabled: true,
|
||||
FileExplorerEnabled: false,
|
||||
}
|
||||
}
|
||||
|
||||
273
backend/internal/interfaces/handlers/file_handler.go
Normal file
273
backend/internal/interfaces/handlers/file_handler.go
Normal file
@@ -0,0 +1,273 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/noteapp/backend/internal/application/services"
|
||||
"github.com/noteapp/backend/internal/interfaces/middleware"
|
||||
)
|
||||
|
||||
const maxUploadSize = 100 << 20 // 100 MB
|
||||
|
||||
// FileHandler exposes S3 file explorer endpoints scoped to spaces.
|
||||
type FileHandler struct {
|
||||
fileService *services.FileService
|
||||
}
|
||||
|
||||
// NewFileHandler creates a new FileHandler.
|
||||
func NewFileHandler(fileService *services.FileService) *FileHandler {
|
||||
return &FileHandler{fileService: fileService}
|
||||
}
|
||||
|
||||
// extractContext extracts and validates spaceId (URL) and userId (JWT context).
|
||||
func (h *FileHandler) extractContext(r *http.Request) (spaceID, userID string, err error) {
|
||||
spaceID = mux.Vars(r)["spaceId"]
|
||||
if spaceID == "" {
|
||||
return "", "", fmt.Errorf("missing spaceId")
|
||||
}
|
||||
userID, err = middleware.GetUserIDFromContext(r.Context())
|
||||
return
|
||||
}
|
||||
|
||||
// cleanKey sanitises a user-supplied relative key (strips leading slash, resolves .).
|
||||
func cleanKey(raw string) string {
|
||||
k := strings.TrimLeft(strings.TrimSpace(raw), "/")
|
||||
if c := path.Clean(k); c != "." {
|
||||
return c
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// cleanPrefix sanitises a user-supplied relative prefix.
|
||||
func cleanPrefix(raw string) string {
|
||||
p := strings.TrimLeft(strings.TrimSpace(raw), "/")
|
||||
if c := path.Clean(p); c != "." {
|
||||
return c
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// respondError maps service errors to appropriate HTTP status codes.
|
||||
func respondError(w http.ResponseWriter, err error) {
|
||||
msg := err.Error()
|
||||
switch {
|
||||
case strings.Contains(msg, "access denied"), strings.Contains(msg, "disabled"):
|
||||
http.Error(w, msg, http.StatusForbidden)
|
||||
default:
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
// ListFiles handles GET /api/v1/spaces/{spaceId}/files/list?prefix=
|
||||
func (h *FileHandler) ListFiles(w http.ResponseWriter, r *http.Request) {
|
||||
spaceID, userID, err := h.extractContext(r)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
relPrefix := cleanPrefix(r.URL.Query().Get("prefix"))
|
||||
objects, err := h.fileService.ListObjects(r.Context(), userID, spaceID, relPrefix)
|
||||
if err != nil {
|
||||
respondError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
"objects": objects,
|
||||
"prefix": relPrefix,
|
||||
})
|
||||
}
|
||||
|
||||
// GetFile handles GET /api/v1/spaces/{spaceId}/files/object?key=
|
||||
// Also accepts ?token= as a fallback auth mechanism so markdown images render in-browser.
|
||||
func (h *FileHandler) GetFile(w http.ResponseWriter, r *http.Request) {
|
||||
spaceID, userID, err := h.extractContext(r)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
relKey := cleanKey(r.URL.Query().Get("key"))
|
||||
if relKey == "" {
|
||||
http.Error(w, "key is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
body, contentType, err := h.fileService.GetObjectContent(r.Context(), userID, spaceID, relKey)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "access denied") {
|
||||
http.Error(w, "access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
http.Error(w, "file not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
defer body.Close()
|
||||
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
w.Header().Set("Cache-Control", "private, max-age=3600")
|
||||
io.Copy(w, body) //nolint:errcheck
|
||||
}
|
||||
|
||||
// UploadFile handles POST /api/v1/spaces/{spaceId}/files/upload (multipart/form-data)
|
||||
// Form fields:
|
||||
// - path: optional relative folder within the space (e.g. "docs/2024")
|
||||
// - files: one or more file uploads
|
||||
func (h *FileHandler) UploadFile(w http.ResponseWriter, r *http.Request) {
|
||||
spaceID, userID, err := h.extractContext(r)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := r.ParseMultipartForm(maxUploadSize); err != nil {
|
||||
http.Error(w, "request too large", http.StatusRequestEntityTooLarge)
|
||||
return
|
||||
}
|
||||
|
||||
relFolder := cleanPrefix(r.FormValue("path"))
|
||||
fileHeaders := r.MultipartForm.File["files"]
|
||||
if len(fileHeaders) == 0 {
|
||||
http.Error(w, "no files provided", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var uploaded []string
|
||||
for _, fh := range fileHeaders {
|
||||
filename := path.Base(fh.Filename)
|
||||
if filename == "." || filename == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
var relKey string
|
||||
if relFolder != "" {
|
||||
relKey = relFolder + "/" + filename
|
||||
} else {
|
||||
relKey = filename
|
||||
}
|
||||
|
||||
// Detect content-type from header then extension
|
||||
ct := fh.Header.Get("Content-Type")
|
||||
if ct == "" || ct == "application/octet-stream" {
|
||||
if ext := path.Ext(filename); ext != "" {
|
||||
if t := mime.TypeByExtension(ext); t != "" {
|
||||
ct = t
|
||||
}
|
||||
}
|
||||
}
|
||||
if ct == "" {
|
||||
ct = "application/octet-stream"
|
||||
}
|
||||
|
||||
f, err := fh.Open()
|
||||
if err != nil {
|
||||
http.Error(w, "failed to read uploaded file", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
uploadErr := h.fileService.UploadObject(r.Context(), userID, spaceID, relKey, ct, f, fh.Size)
|
||||
f.Close()
|
||||
if uploadErr != nil {
|
||||
respondError(w, uploadErr)
|
||||
return
|
||||
}
|
||||
uploaded = append(uploaded, relKey)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{"uploaded": uploaded})
|
||||
}
|
||||
|
||||
// CreateFolder handles POST /api/v1/spaces/{spaceId}/files/folder
|
||||
// JSON body: {"path": "new-folder-name"}
|
||||
func (h *FileHandler) CreateFolder(w http.ResponseWriter, r *http.Request) {
|
||||
spaceID, userID, err := h.extractContext(r)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var body struct {
|
||||
Path string `json:"path"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
|
||||
http.Error(w, "invalid request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
relPath := cleanPrefix(body.Path)
|
||||
if relPath == "" {
|
||||
http.Error(w, "path is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.fileService.CreateFolder(r.Context(), userID, spaceID, relPath); err != nil {
|
||||
respondError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
json.NewEncoder(w).Encode(map[string]string{"path": relPath})
|
||||
}
|
||||
|
||||
// DeleteFile handles DELETE /api/v1/spaces/{spaceId}/files/object?key=
|
||||
func (h *FileHandler) DeleteFile(w http.ResponseWriter, r *http.Request) {
|
||||
spaceID, userID, err := h.extractContext(r)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
relKey := cleanKey(r.URL.Query().Get("key"))
|
||||
if relKey == "" {
|
||||
http.Error(w, "key is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.fileService.DeleteObject(r.Context(), userID, spaceID, relKey); err != nil {
|
||||
if strings.Contains(err.Error(), "access denied") {
|
||||
http.Error(w, "access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// DeleteFolder handles DELETE /api/v1/spaces/{spaceId}/files/folder?prefix=
|
||||
func (h *FileHandler) DeleteFolder(w http.ResponseWriter, r *http.Request) {
|
||||
spaceID, userID, err := h.extractContext(r)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
relPrefix := cleanPrefix(r.URL.Query().Get("prefix"))
|
||||
if relPrefix == "" {
|
||||
http.Error(w, "prefix is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.fileService.DeleteFolder(r.Context(), userID, spaceID, relPrefix); err != nil {
|
||||
if strings.Contains(err.Error(), "access denied") {
|
||||
http.Error(w, "access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
@@ -41,8 +41,14 @@ func (m *AuthMiddleware) Middleware(next http.Handler) http.Handler {
|
||||
return
|
||||
}
|
||||
|
||||
// Extract token from Authorization header
|
||||
// Extract token from Authorization header.
|
||||
// For GET /files/object, also accept ?token= so markdown images render in-browser.
|
||||
authHeader := r.Header.Get("Authorization")
|
||||
if authHeader == "" && r.Method == http.MethodGet && strings.HasSuffix(r.URL.Path, "/files/object") {
|
||||
if tok := r.URL.Query().Get("token"); tok != "" {
|
||||
authHeader = "Bearer " + tok
|
||||
}
|
||||
}
|
||||
if authHeader == "" {
|
||||
http.Error(w, "Missing authorization header", http.StatusUnauthorized)
|
||||
return
|
||||
|
||||
Reference in New Issue
Block a user