Skip to content

Commit

Permalink
[bugfix] Set cache-control max-age dynamically for s3 (#1510)
Browse files Browse the repository at this point in the history
* [bugfix] set cache-control max-age dynamically for s3

* woops

* double whoops

* time until, thank you linter, bless you, you're the best, no matter what kim says

* aa
  • Loading branch information
tsmethurst authored Feb 16, 2023
1 parent 40b584c commit 6ee0dc8
Show file tree
Hide file tree
Showing 4 changed files with 42 additions and 29 deletions.
36 changes: 15 additions & 21 deletions internal/api/fileserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,31 +31,25 @@ type Fileserver struct {
fileserver *fileserver.Module
}

// maxAge returns an appropriate max-age value for the
// storage method that's being used.
//
// The default max-age is very long to reflect that we
// never host different files at the same URL (since
// ULIDs are generated per piece of media), so we can
// easily prevent clients having to fetch files repeatedly.
//
// If we're using non-proxying s3, however, the max age is
// significantly shorter, to ensure that clients don't
// cache redirect responses to expired pre-signed URLs.
func maxAge() string {
if config.GetStorageBackend() == "s3" && !config.GetStorageS3Proxy() {
return "max-age=86400" // 24h
}

return "max-age=604800" // 7d
}

func (f *Fileserver) Route(r router.Router, m ...gin.HandlerFunc) {
fileserverGroup := r.AttachGroup("fileserver")

// attach middlewares appropriate for this group
// Attach middlewares appropriate for this group.
fileserverGroup.Use(m...)
fileserverGroup.Use(middleware.CacheControl("private", maxAge()))
// If we're using local storage or proxying s3, we can set a
// long max-age on all file requests to reflect that we
// never host different files at the same URL (since
// ULIDs are generated per piece of media), so we can
// easily prevent clients having to fetch files repeatedly.
//
// If we *are* using non-proxying s3, however, the max age
// must be set dynamically within the request handler,
// based on how long the signed URL has left to live before
// it expires. This ensures that clients won't cache expired
// links. This is done within fileserver/servefile.go.
if config.GetStorageBackend() == "local" || config.GetStorageS3Proxy() {
fileserverGroup.Use(middleware.CacheControl("private", "max-age=604800")) // 7d
}

f.fileserver.Route(fileserverGroup.Handle)
}
Expand Down
5 changes: 5 additions & 0 deletions internal/api/fileserver/servefile.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"net/http"
"strconv"
"strings"
"time"

"codeberg.org/gruf/go-fastcopy"
"github.com/gin-gonic/gin"
Expand Down Expand Up @@ -89,6 +90,10 @@ func (m *Module) ServeFile(c *gin.Context) {

if content.URL != nil {
// This is a non-local, non-proxied S3 file we're redirecting to.
// Derive the max-age value from how long the link has left until
// it expires.
maxAge := int(time.Until(content.URL.Expiry).Seconds())
c.Header("Cache-Control", "private,max-age="+strconv.Itoa(maxAge))
c.Redirect(http.StatusFound, content.URL.String())
return
}
Expand Down
5 changes: 3 additions & 2 deletions internal/api/model/content.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,9 @@ package model

import (
"io"
"net/url"
"time"

"github.com/superseriousbusiness/gotosocial/internal/storage"
)

// Content wraps everything needed to serve a blob of content (some kind of media) through the API.
Expand All @@ -35,7 +36,7 @@ type Content struct {
// Actual content
Content io.ReadCloser
// Resource URL to forward to if the file can be fetched from the storage directly (e.g signed S3 URL)
URL *url.URL
URL *storage.PresignedURL
}

// GetContentRequestForm describes a piece of content desired by the caller of the fileserver API.
Expand Down
25 changes: 19 additions & 6 deletions internal/storage/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,13 @@ const (
urlCacheExpiryFrequency = time.Minute * 5
)

// PresignedURL represents a pre signed S3 URL with
// an expiry time.
type PresignedURL struct {
*url.URL
Expiry time.Time // link expires at this time
}

// ErrAlreadyExists is a ptr to underlying storage.ErrAlreadyExists,
// to put the related errors in the same package as our storage wrapper.
var ErrAlreadyExists = storage.ErrAlreadyExists
Expand All @@ -54,11 +61,11 @@ type Driver struct {
// S3-only parameters
Proxy bool
Bucket string
PresignedCache *ttl.Cache[string, *url.URL]
PresignedCache *ttl.Cache[string, PresignedURL]
}

// URL will return a presigned GET object URL, but only if running on S3 storage with proxying disabled.
func (d *Driver) URL(ctx context.Context, key string) *url.URL {
func (d *Driver) URL(ctx context.Context, key string) *PresignedURL {
// Check whether S3 *without* proxying is enabled
s3, ok := d.Storage.(*storage.S3Storage)
if !ok || d.Proxy {
Expand All @@ -72,7 +79,7 @@ func (d *Driver) URL(ctx context.Context, key string) *url.URL {
d.PresignedCache.Unlock()

if ok {
return e.Value
return &e.Value
}

u, err := s3.Client().PresignedGetObject(ctx, d.Bucket, key, urlCacheTTL, url.Values{
Expand All @@ -82,8 +89,14 @@ func (d *Driver) URL(ctx context.Context, key string) *url.URL {
// If URL request fails, fallback is to fetch the file. So ignore the error here
return nil
}
d.PresignedCache.Set(key, u)
return u

psu := PresignedURL{
URL: u,
Expiry: time.Now().Add(urlCacheTTL), // link expires in 24h time
}

d.PresignedCache.Set(key, psu)
return &psu
}

func AutoConfig() (*Driver, error) {
Expand Down Expand Up @@ -151,7 +164,7 @@ func NewS3Storage() (*Driver, error) {
}

// ttl should be lower than the expiry used by S3 to avoid serving invalid URLs
presignedCache := ttl.New[string, *url.URL](0, 1000, urlCacheTTL-urlCacheExpiryFrequency)
presignedCache := ttl.New[string, PresignedURL](0, 1000, urlCacheTTL-urlCacheExpiryFrequency)
presignedCache.Start(urlCacheExpiryFrequency)

return &Driver{
Expand Down

0 comments on commit 6ee0dc8

Please sign in to comment.