feat: improve generation of bundled assets (#8143)

- Replace the current vfsgen with our own bindata generator.
- zstd is used instead of gzip. This reduces the size of the resulting binary by 2MiB, the size of the bundled assets were thus reduced from 13MiB to 11MiB.
- If [the browser accepts zstd encoding](https://caniuse.com/zstd), then the compressed bytes can be served directly, otherwise it falls back to being compressed by gzip if it's not disabled via `[server].ENABLE_GZIP`
- The compression and decompression speed is roughly 4 times faster.
- The generated filesystem is now of type `fs.Fs` instead of `http.FileSystem`, this slightly simplifies the generated code and
handling of the assets.

Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/8143
Co-authored-by: Gusted <postmaster@gusted.xyz>
Co-committed-by: Gusted <postmaster@gusted.xyz>
This commit is contained in:
Gusted 2025-06-11 09:36:18 +02:00 committed by Earl Warren
parent 39b93f828b
commit 4288c214a4
7 changed files with 311 additions and 76 deletions

View file

@ -1,14 +0,0 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
//go:build vendor
package main
// Libraries that are included to vendor utilities used during build.
// These libraries will not be included in a normal compilation.
import (
// for embed
_ "github.com/shurcooL/vfsgen"
)

View file

@ -1,5 +1,6 @@
// Copyright 2020 The Gitea Authors. All rights reserved. // Copyright 2020 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT // Copyright 2025 The Forgejo Authors. All rights reserved.
// SPDX-License-Identifier: GPL-3.0-or-later
//go:build ignore //go:build ignore
@ -7,15 +8,18 @@ package main
import ( import (
"bytes" "bytes"
"crypto/sha1" "crypto/sha256"
"fmt" "fmt"
"io"
"io/fs"
"log" "log"
"net/http"
"os" "os"
"path"
"path/filepath" "path/filepath"
"strconv" "strconv"
"text/template"
"github.com/shurcooL/vfsgen" "github.com/klauspost/compress/zstd"
) )
func needsUpdate(dir, filename string) (bool, []byte) { func needsUpdate(dir, filename string) (bool, []byte) {
@ -30,7 +34,7 @@ func needsUpdate(dir, filename string) (bool, []byte) {
oldHash = []byte{} oldHash = []byte{}
} }
hasher := sha1.New() hasher := sha256.New()
err = filepath.WalkDir(dir, func(path string, d os.DirEntry, err error) error { err = filepath.WalkDir(dir, func(path string, d os.DirEntry, err error) error {
if err != nil { if err != nil {
@ -51,7 +55,7 @@ func needsUpdate(dir, filename string) (bool, []byte) {
newHash := hasher.Sum([]byte{}) newHash := hasher.Sum([]byte{})
if bytes.Compare(oldHash, newHash) != 0 { if !bytes.Equal(oldHash, newHash) {
return true, newHash return true, newHash
} }
@ -77,16 +81,268 @@ func main() {
} }
fmt.Printf("generating bindata for %s\n", packageName) fmt.Printf("generating bindata for %s\n", packageName)
var fsTemplates http.FileSystem = http.Dir(dir)
err := vfsgen.Generate(fsTemplates, vfsgen.Options{ root, err := os.OpenRoot(dir)
PackageName: packageName,
BuildTags: "bindata",
VariableName: "Assets",
Filename: filename,
UseGlobalModTime: useGlobalModTime,
})
if err != nil { if err != nil {
log.Fatalf("%v\n", err) log.Fatal(err)
}
out, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o644)
if err != nil {
log.Fatal(err)
}
defer out.Close()
if err := generate(root.FS(), packageName, useGlobalModTime, out); err != nil {
log.Fatal(err)
} }
_ = os.WriteFile(filename+".hash", newHash, 0o666) _ = os.WriteFile(filename+".hash", newHash, 0o666)
} }
type file struct {
Path string
Name string
UncompressedSize int
CompressedData []byte
UncompressedData []byte
}
type direntry struct {
Name string
IsDir bool
}
func generate(fsRoot fs.FS, packageName string, globalTime bool, output io.Writer) error {
enc, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(zstd.SpeedBestCompression))
if err != nil {
return err
}
files := []file{}
dirs := map[string][]direntry{}
if err := fs.WalkDir(fsRoot, ".", func(filePath string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if d.IsDir() {
entries, err := fs.ReadDir(fsRoot, filePath)
if err != nil {
return err
}
dirEntries := make([]direntry, 0, len(entries))
for _, entry := range entries {
dirEntries = append(dirEntries, direntry{Name: entry.Name(), IsDir: entry.IsDir()})
}
dirs[filePath] = dirEntries
return nil
}
src, err := fs.ReadFile(fsRoot, filePath)
if err != nil {
return err
}
dst := enc.EncodeAll(src, nil)
if len(dst) < len(src) {
files = append(files, file{
Path: filePath,
Name: path.Base(filePath),
UncompressedSize: len(src),
CompressedData: dst,
})
} else {
files = append(files, file{
Path: filePath,
Name: path.Base(filePath),
UncompressedData: src,
})
}
return nil
}); err != nil {
return err
}
return generatedTmpl.Execute(output, map[string]any{
"Packagename": packageName,
"GlobalTime": globalTime,
"Files": files,
"Dirs": dirs,
})
}
var generatedTmpl = template.Must(template.New("").Parse(`// Code generated by efs-gen. DO NOT EDIT.
//go:build bindata
package {{.Packagename}}
import (
"bytes"
"time"
"io"
"io/fs"
"github.com/klauspost/compress/zstd"
)
type normalFile struct {
name string
content []byte
}
type compressedFile struct {
name string
uncompressedSize int64
data []byte
}
var files = map[string]any{
{{- range .Files}}
"{{.Path}}": {{if .CompressedData}}compressedFile{"{{.Name}}", {{.UncompressedSize}}, []byte({{printf "%+q" .CompressedData}})}{{else}}normalFile{"{{.Name}}", []byte({{printf "%+q" .UncompressedData}})}{{end}},
{{- end}}
}
var dirs = map[string][]fs.DirEntry{
{{- range $key, $entry := .Dirs}}
"{{$key}}": {
{{- range $entry}}
direntry{"{{.Name}}", {{.IsDir}}},
{{- end}}
},
{{- end}}
}
type assets struct{}
var Assets = assets{}
func (a assets) Open(name string) (fs.File, error) {
f, ok := files[name]
if !ok {
return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
}
switch f := f.(type) {
case normalFile:
return file{name: f.name, size: int64(len(f.content)), data: bytes.NewReader(f.content)}, nil
case compressedFile:
r, _ := zstd.NewReader(bytes.NewReader(f.data))
return &compressFile{name: f.name, size: f.uncompressedSize, data: r, content: f.data}, nil
default:
panic("unknown file type")
}
}
func (a assets) ReadDir(name string) ([]fs.DirEntry, error) {
d, ok := dirs[name]
if !ok {
return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
}
return d, nil
}
type file struct {
name string
size int64
data io.ReadSeeker
}
var _ io.ReadSeeker = (*file)(nil)
func (f file) Stat() (fs.FileInfo, error) {
return fileinfo{name: f.name, size: f.size}, nil
}
func (f file) Read(p []byte) (int, error) {
return f.data.Read(p)
}
func (f file) Seek(offset int64, whence int) (int64, error) {
return f.data.Seek(offset, whence)
}
func (f file) Close() error { return nil }
type compressFile struct {
name string
size int64
data *zstd.Decoder
content []byte
zstdPos int64
seekPos int64
}
var _ io.ReadSeeker = (*compressFile)(nil)
func (f *compressFile) Stat() (fs.FileInfo, error) {
return fileinfo{name: f.name, size: f.size}, nil
}
func (f *compressFile) Read(p []byte) (int, error) {
if f.zstdPos > f.seekPos {
if err := f.data.Reset(bytes.NewReader(f.content)); err != nil {
return 0, err
}
f.zstdPos = 0
}
if f.zstdPos < f.seekPos {
if _, err := io.CopyN(io.Discard, f.data, f.seekPos - f.zstdPos); err != nil {
return 0, err
}
f.zstdPos = f.seekPos
}
n, err := f.data.Read(p)
f.zstdPos += int64(n)
f.seekPos = f.zstdPos
return n, err
}
func (f *compressFile) Seek(offset int64, whence int) (int64, error) {
switch whence {
case io.SeekStart:
f.seekPos = 0 + offset
case io.SeekCurrent:
f.seekPos += offset
case io.SeekEnd:
f.seekPos = f.size + offset
}
return f.seekPos, nil
}
func (f *compressFile) Close() error {
f.data.Close()
return nil
}
func (f *compressFile) ZstdBytes() []byte { return f.content }
type fileinfo struct {
name string
size int64
}
func (f fileinfo) Name() string { return f.name }
func (f fileinfo) Size() int64 { return f.size }
func (f fileinfo) Mode() fs.FileMode { return 0o444 }
func (f fileinfo) ModTime() time.Time { return {{if .GlobalTime}}GlobalModTime(f.name){{else}}time.Unix(0, 0){{end}} }
func (f fileinfo) IsDir() bool { return false }
func (f fileinfo) Sys() any { return nil }
type direntry struct {
name string
isDir bool
}
func (d direntry) Name() string { return d.name }
func (d direntry) IsDir() bool { return d.isDir }
func (d direntry) Type() fs.FileMode {
if d.isDir {
return 0o755 | fs.ModeDir
}
return 0o444
}
func (direntry) Info() (fs.FileInfo, error) { return nil, fs.ErrNotExist }
`))

4
go.mod
View file

@ -89,7 +89,6 @@ require (
github.com/robfig/cron/v3 v3.0.1 github.com/robfig/cron/v3 v3.0.1
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 github.com/santhosh-tekuri/jsonschema/v6 v6.0.2
github.com/sergi/go-diff v1.4.0 github.com/sergi/go-diff v1.4.0
github.com/shurcooL/vfsgen v0.0.0-20230704071429-0000e147ea92
github.com/stretchr/testify v1.10.0 github.com/stretchr/testify v1.10.0
github.com/syndtr/goleveldb v1.0.0 github.com/syndtr/goleveldb v1.0.0
github.com/ulikunitz/xz v0.5.12 github.com/ulikunitz/xz v0.5.12
@ -222,7 +221,6 @@ require (
github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect
github.com/rs/xid v1.6.0 // indirect github.com/rs/xid v1.6.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c // indirect
github.com/sirupsen/logrus v1.9.3 // indirect github.com/sirupsen/logrus v1.9.3 // indirect
github.com/skeema/knownhosts v1.3.0 // indirect github.com/skeema/knownhosts v1.3.0 // indirect
github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf // indirect github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf // indirect
@ -246,8 +244,6 @@ require (
replace github.com/hashicorp/go-version => github.com/6543/go-version v1.3.1 replace github.com/hashicorp/go-version => github.com/6543/go-version v1.3.1
replace github.com/shurcooL/vfsgen => github.com/lunny/vfsgen v0.0.0-20220105142115-2c99e1ffdfa0
replace github.com/nektos/act => code.forgejo.org/forgejo/act v1.26.0 replace github.com/nektos/act => code.forgejo.org/forgejo/act v1.26.0
replace github.com/mholt/archiver/v3 => code.forgejo.org/forgejo/archiver/v3 v3.5.1 replace github.com/mholt/archiver/v3 => code.forgejo.org/forgejo/archiver/v3 v3.5.1

4
go.sum
View file

@ -384,8 +384,6 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/libdns/libdns v1.0.0-beta.1 h1:KIf4wLfsrEpXpZ3vmc/poM8zCATXT2klbdPe6hyOBjQ= github.com/libdns/libdns v1.0.0-beta.1 h1:KIf4wLfsrEpXpZ3vmc/poM8zCATXT2klbdPe6hyOBjQ=
github.com/libdns/libdns v1.0.0-beta.1/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ= github.com/libdns/libdns v1.0.0-beta.1/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ=
github.com/lunny/vfsgen v0.0.0-20220105142115-2c99e1ffdfa0 h1:F/3FfGmKdiKFa8kL3YrpZ7pe9H4l4AzA1pbaOUnRvPI=
github.com/lunny/vfsgen v0.0.0-20220105142115-2c99e1ffdfa0/go.mod h1:JEfTc3+2DF9Z4PXhLLvXL42zexJyh8rIq3OzUj/0rAk=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
@ -504,8 +502,6 @@ github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCw
github.com/serenize/snaker v0.0.0-20171204205717-a683aaf2d516/go.mod h1:Yow6lPLSAXx2ifx470yD/nUe22Dv5vBvxK/UK9UUTVs= github.com/serenize/snaker v0.0.0-20171204205717-a683aaf2d516/go.mod h1:Yow6lPLSAXx2ifx470yD/nUe22Dv5vBvxK/UK9UUTVs=
github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+YgDpBcK1ITf3o96N/K7/wsRXQnUTEs=
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c/go.mod h1:owqhoLW1qZoYLZzLnBw+QkPP9WZnjlSWihhxAJC1+/M=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=

View file

@ -5,10 +5,10 @@ package assetfs
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"io/fs" "io/fs"
"net/http"
"os" "os"
"path/filepath" "path/filepath"
"slices" "slices"
@ -25,7 +25,7 @@ import (
// Layer represents a layer in a layered asset file-system. It has a name and works like http.FileSystem // Layer represents a layer in a layered asset file-system. It has a name and works like http.FileSystem
type Layer struct { type Layer struct {
name string name string
fs http.FileSystem fs fs.FS
localPath string localPath string
} }
@ -34,10 +34,18 @@ func (l *Layer) Name() string {
} }
// Open opens the named file. The caller is responsible for closing the file. // Open opens the named file. The caller is responsible for closing the file.
func (l *Layer) Open(name string) (http.File, error) { func (l *Layer) Open(name string) (fs.File, error) {
return l.fs.Open(name) return l.fs.Open(name)
} }
func (l *Layer) ReadDir(name string) ([]fs.DirEntry, error) {
dirEntries, err := fs.ReadDir(l.fs, name)
if err != nil && errors.Is(err, fs.ErrNotExist) {
err = nil
}
return dirEntries, err
}
// Local returns a new Layer with the given name, it serves files from the given local path. // Local returns a new Layer with the given name, it serves files from the given local path.
func Local(name, base string, sub ...string) *Layer { func Local(name, base string, sub ...string) *Layer {
// TODO: the old behavior (StaticRootPath might not be absolute), not ideal, just keep the same as before // TODO: the old behavior (StaticRootPath might not be absolute), not ideal, just keep the same as before
@ -48,11 +56,18 @@ func Local(name, base string, sub ...string) *Layer {
panic(fmt.Sprintf("Unable to get absolute path for %q: %v", base, err)) panic(fmt.Sprintf("Unable to get absolute path for %q: %v", base, err))
} }
root := util.FilePathJoinAbs(base, sub...) root := util.FilePathJoinAbs(base, sub...)
return &Layer{name: name, fs: http.Dir(root), localPath: root} fsRoot, err := os.OpenRoot(root)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil
}
panic(fmt.Sprintf("Unable to open layer %q", err))
}
return &Layer{name: name, fs: fsRoot.FS(), localPath: root}
} }
// Bindata returns a new Layer with the given name, it serves files from the given bindata asset. // Bindata returns a new Layer with the given name, it serves files from the given bindata asset.
func Bindata(name string, fs http.FileSystem) *Layer { func Bindata(name string, fs fs.FS) *Layer {
return &Layer{name: name, fs: fs} return &Layer{name: name, fs: fs}
} }
@ -65,11 +80,11 @@ type LayeredFS struct {
// Layered returns a new LayeredFS with the given layers. The first layer is the top layer. // Layered returns a new LayeredFS with the given layers. The first layer is the top layer.
func Layered(layers ...*Layer) *LayeredFS { func Layered(layers ...*Layer) *LayeredFS {
return &LayeredFS{layers: layers} return &LayeredFS{layers: slices.DeleteFunc(layers, func(layer *Layer) bool { return layer == nil })}
} }
// Open opens the named file. The caller is responsible for closing the file. // Open opens the named file. The caller is responsible for closing the file.
func (l *LayeredFS) Open(name string) (http.File, error) { func (l *LayeredFS) Open(name string) (fs.File, error) {
for _, layer := range l.layers { for _, layer := range l.layers {
f, err := layer.Open(name) f, err := layer.Open(name)
if err == nil || !os.IsNotExist(err) { if err == nil || !os.IsNotExist(err) {
@ -102,29 +117,18 @@ func (l *LayeredFS) ReadLayeredFile(elems ...string) ([]byte, string, error) {
return nil, "", fs.ErrNotExist return nil, "", fs.ErrNotExist
} }
func shouldInclude(info fs.FileInfo, fileMode ...bool) bool { func shouldInclude(info fs.DirEntry, fileMode ...bool) bool {
if util.CommonSkip(info.Name()) { if util.CommonSkip(info.Name()) {
return false return false
} }
if len(fileMode) == 0 { if len(fileMode) == 0 {
return true return true
} else if len(fileMode) == 1 { } else if len(fileMode) == 1 {
return fileMode[0] == !info.Mode().IsDir() return fileMode[0] == !info.IsDir()
} }
panic("too many arguments for fileMode in shouldInclude") panic("too many arguments for fileMode in shouldInclude")
} }
func readDir(layer *Layer, name string) ([]fs.FileInfo, error) {
f, err := layer.Open(name)
if os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, err
}
defer f.Close()
return f.Readdir(-1)
}
// ListFiles lists files/directories in the given directory. The fileMode controls the returned files. // ListFiles lists files/directories in the given directory. The fileMode controls the returned files.
// * omitted: all files and directories will be returned. // * omitted: all files and directories will be returned.
// * true: only files will be returned. // * true: only files will be returned.
@ -133,7 +137,7 @@ func readDir(layer *Layer, name string) ([]fs.FileInfo, error) {
func (l *LayeredFS) ListFiles(name string, fileMode ...bool) ([]string, error) { func (l *LayeredFS) ListFiles(name string, fileMode ...bool) ([]string, error) {
fileSet := make(container.Set[string]) fileSet := make(container.Set[string])
for _, layer := range l.layers { for _, layer := range l.layers {
infos, err := readDir(layer, name) infos, err := layer.ReadDir(name)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -162,7 +166,7 @@ func listAllFiles(layers []*Layer, name string, fileMode ...bool) ([]string, err
var list func(dir string) error var list func(dir string) error
list = func(dir string) error { list = func(dir string) error {
for _, layer := range layers { for _, layer := range layers {
infos, err := readDir(layer, dir) infos, err := layer.ReadDir(dir)
if err != nil { if err != nil {
return err return err
} }

View file

@ -6,6 +6,7 @@ package public
import ( import (
"bytes" "bytes"
"io" "io"
"io/fs"
"net/http" "net/http"
"os" "os"
"path" "path"
@ -59,7 +60,7 @@ func setWellKnownContentType(w http.ResponseWriter, file string) {
} }
} }
func handleRequest(w http.ResponseWriter, req *http.Request, fs http.FileSystem, file string) { func handleRequest(w http.ResponseWriter, req *http.Request, fs fs.FS, file string) {
// actually, fs (http.FileSystem) is designed to be a safe interface, relative paths won't bypass its parent directory, it's also fine to do a clean here // actually, fs (http.FileSystem) is designed to be a safe interface, relative paths won't bypass its parent directory, it's also fine to do a clean here
f, err := fs.Open(util.PathJoinRelX(file)) f, err := fs.Open(util.PathJoinRelX(file))
if err != nil { if err != nil {
@ -86,33 +87,31 @@ func handleRequest(w http.ResponseWriter, req *http.Request, fs http.FileSystem,
return return
} }
serveContent(w, req, fi, fi.ModTime(), f) serveContent(w, req, fi.Name(), fi.ModTime(), f.(io.ReadSeeker))
} }
type GzipBytesProvider interface { type ZstdBytesProvider interface {
GzipBytes() []byte ZstdBytes() []byte
} }
// serveContent serve http content // serveContent serve http content
func serveContent(w http.ResponseWriter, req *http.Request, fi os.FileInfo, modtime time.Time, content io.ReadSeeker) { func serveContent(w http.ResponseWriter, req *http.Request, name string, modtime time.Time, content io.ReadSeeker) {
setWellKnownContentType(w, fi.Name()) setWellKnownContentType(w, name)
encodings := parseAcceptEncoding(req.Header.Get("Accept-Encoding")) encodings := parseAcceptEncoding(req.Header.Get("Accept-Encoding"))
if encodings.Contains("gzip") { if encodings.Contains("zstd") {
// try to provide gzip content directly from bindata (provided by vfsgen۰CompressedFileInfo) // If the file was compressed, use the bytes directly.
if compressed, ok := fi.(GzipBytesProvider); ok { if compressed, ok := content.(ZstdBytesProvider); ok {
rdGzip := bytes.NewReader(compressed.GzipBytes()) rdZstd := bytes.NewReader(compressed.ZstdBytes())
// all gzipped static files (from bindata) are managed by Gitea, so we can make sure every file has the correct ext name
// then we can get the correct Content-Type, we do not need to do http.DetectContentType on the decompressed data
if w.Header().Get("Content-Type") == "" { if w.Header().Get("Content-Type") == "" {
w.Header().Set("Content-Type", "application/octet-stream") w.Header().Set("Content-Type", "application/octet-stream")
} }
w.Header().Set("Content-Encoding", "gzip") w.Header().Set("Content-Encoding", "zstd")
httpcache.ServeContentWithCacheControl(w, req, fi.Name(), modtime, rdGzip) httpcache.ServeContentWithCacheControl(w, req, name, modtime, rdZstd)
return return
} }
} }
httpcache.ServeContentWithCacheControl(w, req, fi.Name(), modtime, content) httpcache.ServeContentWithCacheControl(w, req, name, modtime, content)
return return
} }

View file

@ -12,8 +12,6 @@ import (
"forgejo.org/modules/timeutil" "forgejo.org/modules/timeutil"
) )
var _ GzipBytesProvider = (*vfsgen۰CompressedFileInfo)(nil)
// GlobalModTime provide a global mod time for embedded asset files // GlobalModTime provide a global mod time for embedded asset files
func GlobalModTime(filename string) time.Time { func GlobalModTime(filename string) time.Time {
return timeutil.GetExecutableModTime() return timeutil.GetExecutableModTime()