Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions server/configurations/local.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,16 @@ s3:
endpoint:
region:
bucket:
# If clients (mobile apps, browsers) reach MinIO at a different address than museum does
# internally, set public_endpoint to the externally reachable host:port. Museum will use
# this address when generating pre-signed URLs handed to clients, while continuing to use
# endpoint for its own S3 API calls (HeadObject etc.).
#
# This is the common case when self-hosting on a LAN: museum reaches MinIO via Docker
# service DNS (endpoint: minio:3200) or socat (endpoint: localhost:3200), but mobile
# clients need a LAN IP in their upload URLs.
#
# public_endpoint: 192.168.1.100:3200
wasabi-eu-central-2-v3:
# are_local_buckets: true
# use_path_style_urls: true
Expand All @@ -169,6 +179,7 @@ s3:
endpoint:
region:
bucket:
# public_endpoint: 192.168.1.100:3200
# If enabled, this causes us to opt the object out of the compliance
# lock when the object is deleted. See "Wasabi Compliance".
#
Expand All @@ -182,6 +193,7 @@ s3:
endpoint:
region:
bucket:
# public_endpoint: 192.168.1.100:3200
wasabi-eu-central-2-derived:
key:
secret:
Expand Down
18 changes: 10 additions & 8 deletions server/pkg/controller/file.go
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,7 @@ func (c *FileController) GetUploadURLs(ctx context.Context, userID int64, count
if err != nil {
return []ente.UploadURL{}, stacktrace.Propagate(err, "")
}
s3Client := c.S3Config.GetHotS3Client()
s3Client := c.S3Config.GetHotS3PresignClient()
dc := c.S3Config.GetHotDataCenter()
bucket := c.S3Config.GetHotBucket()
urls := make([]ente.UploadURL, 0)
Expand Down Expand Up @@ -362,7 +362,7 @@ func (c *FileController) GetUploadURLWithMetadata(ctx context.Context, userID in
if err := c.UsageCtrl.CanUploadFile(ctx, userID, &req.ContentLength, app); err != nil {
return ente.UploadURL{}, stacktrace.Propagate(err, "")
}
s3Client := c.S3Config.GetHotS3Client()
s3Client := c.S3Config.GetHotS3PresignClient()
dc := c.S3Config.GetHotDataCenter()
bucket := c.S3Config.GetHotBucket()
objectKey := strconv.FormatInt(userID, 10) + "/" + uuid.NewString()
Expand Down Expand Up @@ -867,7 +867,7 @@ func (c *FileController) cleanupDeletedFile(qItem repo.QueueItem) {
}

func (c *FileController) getHotDcSignedUrl(objectKey string, objType ente.ObjectType) (string, error) {
s3Client := c.S3Config.GetHotS3Client()
s3Client := c.S3Config.GetHotS3PresignClient()
input := &s3.GetObjectInput{
Bucket: c.S3Config.GetHotBucket(),
Key: &objectKey,
Expand All @@ -881,7 +881,7 @@ func (c *FileController) getHotDcSignedUrl(objectKey string, objType ente.Object
}

func (c *FileController) getPreSignedURLForDC(objectKey string, dc string, objType ente.ObjectType) (string, error) {
s3Client := c.S3Config.GetS3Client(dc)
s3Client := c.S3Config.GetS3PresignClient(dc)
input := &s3.GetObjectInput{
Bucket: c.S3Config.GetBucket(dc),
Key: &objectKey,
Expand Down Expand Up @@ -1069,6 +1069,7 @@ func (c *FileController) GetMultipartUploadURLs(ctx context.Context, userID int6
return ente.MultipartUploadURLs{}, stacktrace.Propagate(err, "")
}
s3Client := c.S3Config.GetHotS3Client()
s3PresignClient := c.S3Config.GetHotS3PresignClient()
dc := c.S3Config.GetHotDataCenter()
bucket := c.S3Config.GetHotBucket()
objectKey := strconv.FormatInt(userID, 10) + "/" + uuid.NewString()
Expand All @@ -1086,14 +1087,14 @@ func (c *FileController) GetMultipartUploadURLs(ctx context.Context, userID int6
multipartUploadURLs := ente.MultipartUploadURLs{ObjectKey: objectKey}
urls := make([]string, 0)
for i := 0; i < count; i++ {
url, err := c.getPartURL(*s3Client, objectKey, int64(i+1), r.UploadId, nil, nil)
url, err := c.getPartURL(*s3PresignClient, objectKey, int64(i+1), r.UploadId, nil, nil)
if err != nil {
return multipartUploadURLs, stacktrace.Propagate(err, "")
}
urls = append(urls, url)
}
multipartUploadURLs.PartURLs = urls
r2, _ := s3Client.CompleteMultipartUploadRequest(&s3.CompleteMultipartUploadInput{
r2, _ := s3PresignClient.CompleteMultipartUploadRequest(&s3.CompleteMultipartUploadInput{
Bucket: c.S3Config.GetHotBucket(),
Key: &objectKey,
UploadId: r.UploadId,
Expand Down Expand Up @@ -1141,6 +1142,7 @@ func (c *FileController) GetMultipartUploadURLWithMetadata(ctx context.Context,
return ente.MultipartUploadURLs{}, stacktrace.Propagate(err, "")
}
s3Client := c.S3Config.GetHotS3Client()
s3PresignClient := c.S3Config.GetHotS3PresignClient()
dc := c.S3Config.GetHotDataCenter()
bucket := c.S3Config.GetHotBucket()
objectKey := strconv.FormatInt(userID, 10) + "/" + uuid.NewString()
Expand All @@ -1161,14 +1163,14 @@ func (c *FileController) GetMultipartUploadURLWithMetadata(ctx context.Context,
length := partLengths[i]
lengthCopy := length
checksumCopy := normalizedChecksums[i]
url, err := c.getPartURL(*s3Client, objectKey, partNumber, r.UploadId, &lengthCopy, &checksumCopy)
url, err := c.getPartURL(*s3PresignClient, objectKey, partNumber, r.UploadId, &lengthCopy, &checksumCopy)
if err != nil {
return multipartUploadURLs, stacktrace.Propagate(err, "")
}
urls = append(urls, url)
}
multipartUploadURLs.PartURLs = urls
r2, _ := s3Client.CompleteMultipartUploadRequest(&s3.CompleteMultipartUploadInput{
r2, _ := s3PresignClient.CompleteMultipartUploadRequest(&s3.CompleteMultipartUploadInput{
Bucket: c.S3Config.GetHotBucket(),
Key: &objectKey,
UploadId: r.UploadId,
Expand Down
9 changes: 5 additions & 4 deletions server/pkg/controller/filedata/s3.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import (
const PreSignedRequestValidityDuration = 7 * 24 * stime.Hour

func (c *Controller) getUploadURL(dc string, objectKey string) (*ente.UploadURL, error) {
s3Client := c.S3Config.GetS3Client(dc)
s3Client := c.S3Config.GetS3PresignClient(dc)
r, _ := s3Client.PutObjectRequest(&s3.PutObjectInput{
Bucket: c.S3Config.GetBucket(dc),
Key: &objectKey,
Expand All @@ -45,6 +45,7 @@ func (c *Controller) getUploadURL(dc string, objectKey string) (*ente.UploadURL,
}
func (c *Controller) getMultiPartUploadURL(dc string, objectKey string, count *int64) (*ente.MultipartUploadURLs, error) {
s3Client := c.S3Config.GetS3Client(dc)
s3PresignClient := c.S3Config.GetS3PresignClient(dc)
bucket := c.S3Config.GetBucket(dc)
r, err := s3Client.CreateMultipartUpload(&s3.CreateMultipartUploadInput{
Bucket: bucket,
Expand All @@ -60,7 +61,7 @@ func (c *Controller) getMultiPartUploadURL(dc string, objectKey string, count *i
multipartUploadURLs := ente.MultipartUploadURLs{ObjectKey: objectKey}
urls := make([]string, 0)
for i := int64(1); i <= *count; i++ {
partReq, _ := s3Client.UploadPartRequest(&s3.UploadPartInput{
partReq, _ := s3PresignClient.UploadPartRequest(&s3.UploadPartInput{
Bucket: bucket,
Key: &objectKey,
UploadId: r.UploadId,
Expand All @@ -73,7 +74,7 @@ func (c *Controller) getMultiPartUploadURL(dc string, objectKey string, count *i
urls = append(urls, partUrl)
}
multipartUploadURLs.PartURLs = urls
r2, _ := s3Client.CompleteMultipartUploadRequest(&s3.CompleteMultipartUploadInput{
r2, _ := s3PresignClient.CompleteMultipartUploadRequest(&s3.CompleteMultipartUploadInput{
Bucket: bucket,
Key: &objectKey,
UploadId: r.UploadId,
Expand All @@ -87,7 +88,7 @@ func (c *Controller) getMultiPartUploadURL(dc string, objectKey string, count *i
}

func (c *Controller) signedUrlGet(dc string, objectKey string) (*ente.UploadURL, error) {
s3Client := c.S3Config.GetS3Client(dc)
s3Client := c.S3Config.GetS3PresignClient(dc)
input := &s3.GetObjectInput{
Bucket: c.S3Config.GetBucket(dc),
Key: &objectKey,
Expand Down
43 changes: 43 additions & 0 deletions server/pkg/utils/s3config/s3config.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,9 @@ type S3Config struct {
s3Configs map[string]*aws.Config
// A map from data centers to pre-created S3 clients
s3Clients map[string]s3.S3
// A map from data centers to S3 clients configured with public_endpoint for presigning URLs.
// If public_endpoint is not set for a DC, the regular s3Client is used for presigning.
s3PresignClients map[string]s3.S3
// Indicates if compliance is enabled for the Wasabi DC.
isWasabiComplianceEnabled bool
// Indicates if local minio buckets are being used. Enables various
Expand Down Expand Up @@ -123,6 +126,7 @@ func (config *S3Config) initialize() {
config.buckets = make(map[string]string)
config.s3Configs = make(map[string]*aws.Config)
config.s3Clients = make(map[string]s3.S3)
config.s3PresignClients = make(map[string]s3.S3)

usePathStyleURLs := viper.GetBool("s3.use_path_style_urls")
areLocalBuckets := viper.GetBool("s3.are_local_buckets")
Expand All @@ -149,6 +153,29 @@ func (config *S3Config) initialize() {
s3Client := *s3.New(s3Session)
config.s3Configs[dc] = &s3Config
config.s3Clients[dc] = s3Client
// If a public_endpoint is configured, create a separate client for presigning URLs.
// This allows internal operations (HeadObject etc.) to use a Docker-internal hostname
// while pre-signed URLs served to clients use the externally-accessible endpoint.
if publicEndpoint := viper.GetString("s3." + dc + ".public_endpoint"); publicEndpoint != "" {
presignConfig := aws.Config{
Credentials: credentials.NewStaticCredentials(viper.GetString("s3."+dc+".key"),
viper.GetString("s3."+dc+".secret"), ""),
Endpoint: aws.String(publicEndpoint),
Region: aws.String(viper.GetString("s3." + dc + ".region")),
}
if usePathStyleURLs || viper.GetBool("s3."+dc+".use_path_style_urls") || areLocalBuckets {
presignConfig.S3ForcePathStyle = aws.Bool(true)
}
if areLocalBuckets || viper.GetBool("s3."+dc+".disable_ssl") {
presignConfig.DisableSSL = aws.Bool(true)
}
presignSession, presignErr := session.NewSession(&presignConfig)
if presignErr != nil {
log.Fatal("Could not create presign session for " + dc)
}
config.s3PresignClients[dc] = *s3.New(presignSession)
log.Infof("Using public_endpoint %s for presigning URLs in %s", publicEndpoint, dc)
}
if dc == dcWasabiEuropeCentral_v3 {
config.isWasabiComplianceEnabled = viper.GetBool("s3." + dc + ".compliance")
}
Expand Down Expand Up @@ -198,6 +225,16 @@ func (config *S3Config) GetS3Client(dcOrBucketID string) s3.S3 {
return config.s3Clients[dcOrBucketID]
}

// GetS3PresignClient returns the S3 client to use for generating pre-signed URLs.
// If a public_endpoint is configured for the DC, it returns the presign-specific client;
// otherwise it falls back to the regular client.
func (config *S3Config) GetS3PresignClient(dcOrBucketID string) s3.S3 {
if client, ok := config.s3PresignClients[dcOrBucketID]; ok {
return client
}
return config.s3Clients[dcOrBucketID]
}

func (config *S3Config) GetHotDataCenter() string {
return config.hotDC
}
Expand All @@ -219,6 +256,12 @@ func (config *S3Config) GetHotS3Client() *s3.S3 {
return &s3Client
}

// GetHotS3PresignClient returns the presign S3 client for the hot DC.
func (config *S3Config) GetHotS3PresignClient() *s3.S3 {
s3Client := config.GetS3PresignClient(config.hotDC)
return &s3Client
}

func (config *S3Config) GetDerivedStorageDataCenter() string {
return config.derivedStorageDC
}
Expand Down
10 changes: 9 additions & 1 deletion server/quickstart.sh
Original file line number Diff line number Diff line change
Expand Up @@ -177,14 +177,20 @@ s3:
# Set this to false if using subdomain-style URL. This is set to true for ensuring compatibility with MinIO when SSL is enabled.
use_path_style_urls: true
b2-eu-cen:
# Uncomment the below configuration to override the top-level configuration
# Uncomment the below configuration to override the top-level configuration
# are_local_buckets: true
# use_path_style_urls: true
key: $minio_user
secret: $minio_pass
endpoint: localhost:3200
region: eu-central-2
bucket: b2-eu-cen
# If you want to use the mobile app or access from any device other than
# this machine, uncomment and set this to your server's LAN IP or domain.
# Museum uses endpoint for its own internal S3 calls, and public_endpoint
# only for the pre-signed URLs it hands to clients. Without this, uploads
# from external clients will silently fail.
# public_endpoint: 192.168.1.100:3200
wasabi-eu-central-2-v3:
# are_local_buckets: true
# use_path_style_urls: true
Expand All @@ -194,6 +200,7 @@ s3:
region: eu-central-2
bucket: wasabi-eu-central-2-v3
compliance: false
# public_endpoint: 192.168.1.100:3200
scw-eu-fr-v3:
# are_local_buckets: true
# use_path_style_urls: true
Expand All @@ -202,6 +209,7 @@ s3:
endpoint: localhost:3200
region: eu-central-2
bucket: scw-eu-fr-v3
# public_endpoint: 192.168.1.100:3200

# Specify the base endpoints for various web apps
apps:
Expand Down