mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2025-05-19 16:30:15 +00:00
Pooled and buffered gzip implementation (#5722)
* Pooled and buffered gzip implementation * Add test for gzip * Add integration test * Ensure lfs check within transaction The previous code made it possible for a race condition to occur whereby a LFSMetaObject could be checked into the database twice. We should check if the LFSMetaObject is within the database and insert it if not in one transaction. * Try to avoid primary key problem in postgres The integration tests are being affected by https://github.com/go-testfixtures/testfixtures/issues/39 if we set the primary key high enough, keep a count of this and remove at the end of each test we shouldn't be affected by this.
This commit is contained in:
parent
075649572d
commit
7d434376f1
6 changed files with 598 additions and 10 deletions
129
integrations/lfs_getobject_test.go
Normal file
129
integrations/lfs_getobject_test.go
Normal file
|
@ -0,0 +1,129 @@
|
|||
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package integrations
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/gitea/models"
|
||||
"code.gitea.io/gitea/modules/gzip"
|
||||
"code.gitea.io/gitea/modules/lfs"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
gzipp "github.com/klauspost/compress/gzip"
|
||||
)
|
||||
|
||||
func GenerateLFSOid(content io.Reader) (string, error) {
|
||||
h := sha256.New()
|
||||
if _, err := io.Copy(h, content); err != nil {
|
||||
return "", err
|
||||
}
|
||||
sum := h.Sum(nil)
|
||||
return hex.EncodeToString(sum), nil
|
||||
}
|
||||
|
||||
var lfsID = int64(20000)
|
||||
|
||||
func storeObjectInRepo(t *testing.T, repositoryID int64, content *[]byte) string {
|
||||
oid, err := GenerateLFSOid(bytes.NewReader(*content))
|
||||
assert.NoError(t, err)
|
||||
var lfsMetaObject *models.LFSMetaObject
|
||||
|
||||
if setting.UsePostgreSQL {
|
||||
lfsMetaObject = &models.LFSMetaObject{ID: lfsID, Oid: oid, Size: int64(len(*content)), RepositoryID: repositoryID}
|
||||
} else {
|
||||
lfsMetaObject = &models.LFSMetaObject{Oid: oid, Size: int64(len(*content)), RepositoryID: repositoryID}
|
||||
}
|
||||
|
||||
lfsID = lfsID + 1
|
||||
lfsMetaObject, err = models.NewLFSMetaObject(lfsMetaObject)
|
||||
assert.NoError(t, err)
|
||||
contentStore := &lfs.ContentStore{BasePath: setting.LFS.ContentPath}
|
||||
if !contentStore.Exists(lfsMetaObject) {
|
||||
err := contentStore.Put(lfsMetaObject, bytes.NewReader(*content))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
return oid
|
||||
}
|
||||
|
||||
func doLfs(t *testing.T, content *[]byte, expectGzip bool) {
|
||||
prepareTestEnv(t)
|
||||
repo, err := models.GetRepositoryByOwnerAndName("user2", "repo1")
|
||||
assert.NoError(t, err)
|
||||
oid := storeObjectInRepo(t, repo.ID, content)
|
||||
defer repo.RemoveLFSMetaObjectByOid(oid)
|
||||
|
||||
session := loginUser(t, "user2")
|
||||
|
||||
// Request OID
|
||||
req := NewRequest(t, "GET", "/user2/repo1.git/info/lfs/objects/"+oid+"/test")
|
||||
req.Header.Set("Accept-Encoding", "gzip")
|
||||
resp := session.MakeRequest(t, req, http.StatusOK)
|
||||
|
||||
contentEncoding := resp.Header().Get("Content-Encoding")
|
||||
if !expectGzip || !setting.EnableGzip {
|
||||
assert.NotContains(t, contentEncoding, "gzip")
|
||||
|
||||
result := resp.Body.Bytes()
|
||||
assert.Equal(t, *content, result)
|
||||
} else {
|
||||
assert.Contains(t, contentEncoding, "gzip")
|
||||
gzippReader, err := gzipp.NewReader(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
result, err := ioutil.ReadAll(gzippReader)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, *content, result)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetLFSSmall(t *testing.T) {
|
||||
content := []byte("A very small file\n")
|
||||
doLfs(t, &content, false)
|
||||
}
|
||||
|
||||
func TestGetLFSLarge(t *testing.T) {
|
||||
content := make([]byte, gzip.MinSize*10)
|
||||
for i := range content {
|
||||
content[i] = byte(i % 256)
|
||||
}
|
||||
doLfs(t, &content, true)
|
||||
}
|
||||
|
||||
func TestGetLFSGzip(t *testing.T) {
|
||||
b := make([]byte, gzip.MinSize*10)
|
||||
for i := range b {
|
||||
b[i] = byte(i % 256)
|
||||
}
|
||||
outputBuffer := bytes.NewBuffer([]byte{})
|
||||
gzippWriter := gzipp.NewWriter(outputBuffer)
|
||||
gzippWriter.Write(b)
|
||||
gzippWriter.Close()
|
||||
content := outputBuffer.Bytes()
|
||||
doLfs(t, &content, false)
|
||||
}
|
||||
|
||||
func TestGetLFSZip(t *testing.T) {
|
||||
b := make([]byte, gzip.MinSize*10)
|
||||
for i := range b {
|
||||
b[i] = byte(i % 256)
|
||||
}
|
||||
outputBuffer := bytes.NewBuffer([]byte{})
|
||||
zipWriter := zip.NewWriter(outputBuffer)
|
||||
fileWriter, err := zipWriter.Create("default")
|
||||
assert.NoError(t, err)
|
||||
fileWriter.Write(b)
|
||||
zipWriter.Close()
|
||||
content := outputBuffer.Bytes()
|
||||
doLfs(t, &content, false)
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue