1
0
Fork 0
forked from forgejo/forgejo

Vendor Update (#16121)

* update github.com/PuerkitoBio/goquery

* update github.com/alecthomas/chroma

* update github.com/blevesearch/bleve/v2

* update github.com/caddyserver/certmagic

* update github.com/go-enry/go-enry/v2

* update github.com/go-git/go-billy/v5

* update github.com/go-git/go-git/v5

* update github.com/go-redis/redis/v8

* update github.com/go-testfixtures/testfixtures/v3

* update github.com/jaytaylor/html2text

* update github.com/json-iterator/go

* update github.com/klauspost/compress

* update github.com/markbates/goth

* update github.com/mattn/go-isatty

* update github.com/mholt/archiver/v3

* update github.com/microcosm-cc/bluemonday

* update github.com/minio/minio-go/v7

* update github.com/prometheus/client_golang

* update github.com/unrolled/render

* update github.com/xanzy/go-gitlab

* update github.com/yuin/goldmark

* update github.com/yuin/goldmark-highlighting

Co-authored-by: techknowlogick <techknowlogick@gitea.io>
This commit is contained in:
6543 2021-06-10 16:44:25 +02:00 committed by GitHub
parent f088dc4ea1
commit 86e2789960
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
819 changed files with 38072 additions and 34969 deletions

View file

@ -41,11 +41,11 @@ func CompressBlockBound(n int) int {
return n + n/255 + 16
}
func UncompressBlock(src, dst []byte) (int, error) {
func UncompressBlock(src, dst, dict []byte) (int, error) {
if len(src) == 0 {
return 0, nil
}
if di := decodeBlock(dst, src); di >= 0 {
if di := decodeBlock(dst, src, dict); di >= 0 {
return di, nil
}
return 0, lz4errors.ErrInvalidSourceShortBuffer
@ -187,6 +187,9 @@ func (c *Compressor) CompressBlock(src, dst []byte) (int, error) {
}
mLen = si - mLen
if di >= len(dst) {
return 0, lz4errors.ErrInvalidSourceShortBuffer
}
if mLen < 0xF {
dst[di] = byte(mLen)
} else {

View file

@ -1,4 +1,3 @@
// +build amd64 arm
// +build !appengine
// +build gc
// +build !noasm
@ -6,4 +5,4 @@
package lz4block
//go:noescape
func decodeBlock(dst, src []byte) int
func decodeBlock(dst, src, dict []byte) int

View file

@ -16,9 +16,11 @@
// R11 &dst
// R12 short output end
// R13 short input end
// func decodeBlock(dst, src []byte) int
// using 50 bytes of stack currently
TEXT ·decodeBlock(SB), NOSPLIT, $64-56
// R14 &dict
// R15 &dict + len(dict)
// func decodeBlock(dst, src, dict []byte) int
TEXT ·decodeBlock(SB), NOSPLIT, $48-80
MOVQ dst_base+0(FP), DI
MOVQ DI, R11
MOVQ dst_len+8(FP), R8
@ -30,6 +32,10 @@ TEXT ·decodeBlock(SB), NOSPLIT, $64-56
JE err_corrupt
ADDQ SI, R9
MOVQ dict_base+48(FP), R14
MOVQ dict_len+56(FP), R15
ADDQ R14, R15
// shortcut ends
// short output end
MOVQ R8, R12
@ -96,6 +102,8 @@ loop:
// match length, we already have the offset.
CMPQ CX, $0xF
JEQ match_len_loop_pre
CMPQ DX, R11
JLT match_len_loop_pre
CMPQ DX, $8
JLT match_len_loop_pre
CMPQ AX, R11
@ -174,6 +182,9 @@ copy_literal:
MOVOU (SI), X0
MOVOU X0, (DI)
ADDQ CX, SI
ADDQ CX, DI
JMP finish_lit_copy
memmove_lit:
@ -181,18 +192,20 @@ memmove_lit:
MOVQ DI, 0(SP)
MOVQ SI, 8(SP)
MOVQ CX, 16(SP)
// spill
// Spill registers. Increment SI, DI now so we don't need to save CX.
ADDQ CX, DI
ADDQ CX, SI
MOVQ DI, 24(SP)
MOVQ SI, 32(SP)
MOVQ CX, 40(SP) // need len to inc SI, DI after
MOVB DX, 48(SP)
MOVL DX, 40(SP)
CALL runtime·memmove(SB)
// restore registers
MOVQ 24(SP), DI
MOVQ 32(SP), SI
MOVQ 40(SP), CX
MOVB 48(SP), DX
MOVL 40(SP), DX
// recalc initial values
MOVQ dst_base+0(FP), R8
@ -206,9 +219,6 @@ memmove_lit:
SUBQ $16, R13
finish_lit_copy:
ADDQ CX, SI
ADDQ CX, DI
CMPQ SI, R9
JGE end
@ -278,7 +288,7 @@ copy_match:
// check BX is within dst
// if BX < &dst
CMPQ BX, R11
JLT err_short_buf
JLT copy_match_from_dict
// if offset + match_len < di
LEAQ (BX)(CX*1), AX
@ -325,21 +335,97 @@ copy_interior_match:
ADDQ CX, DI
JMP loop
copy_match_from_dict:
// CX = match_len
// BX = &dst + (di - offset)
// AX = offset - di = dict_bytes_available => count of bytes potentially covered by the dictionary
MOVQ R11, AX
SUBQ BX, AX
// BX = &dict_end - dict_bytes_available
MOVQ R15, BX
SUBQ AX, BX
// check BX is within dict
// if BX < &dict
CMPQ BX, R14
JLT err_short_dict
// if match_len > dict_bytes_available, match fits entirely within external dictionary : just copy
CMPQ CX, AX
JLT memmove_match
// The match stretches over the dictionary and our block
// 1) copy what comes from the dictionary
// AX = dict_bytes_available = copy_size
// BX = &dict_end - copy_size
// CX = match_len
// memmove(to, from, len)
MOVQ DI, 0(SP)
MOVQ BX, 8(SP)
MOVQ AX, 16(SP)
// store extra stuff we want to recover
// spill
MOVQ DI, 24(SP)
MOVQ SI, 32(SP)
MOVQ CX, 40(SP)
CALL runtime·memmove(SB)
// restore registers
MOVQ 16(SP), AX // copy_size
MOVQ 24(SP), DI
MOVQ 32(SP), SI
MOVQ 40(SP), CX // match_len
// recalc initial values
MOVQ dst_base+0(FP), R8
MOVQ R8, R11 // TODO: make these sensible numbers
ADDQ dst_len+8(FP), R8
MOVQ src_base+24(FP), R9
ADDQ src_len+32(FP), R9
MOVQ dict_base+48(FP), R14
MOVQ dict_len+56(FP), R15
ADDQ R14, R15
MOVQ R8, R12
SUBQ $32, R12
MOVQ R9, R13
SUBQ $16, R13
// di+=copy_size
ADDQ AX, DI
// 2) copy the rest from the current block
// CX = match_len - copy_size = rest_size
SUBQ AX, CX
MOVQ R11, BX
// check if we have a copy overlap
// AX = &dst + rest_size
MOVQ CX, AX
ADDQ BX, AX
// if &dst + rest_size > di, copy byte by byte
CMPQ AX, DI
JGT copy_match_loop
memmove_match:
// memmove(to, from, len)
MOVQ DI, 0(SP)
MOVQ BX, 8(SP)
MOVQ CX, 16(SP)
// spill
// Spill registers. Increment DI now so we don't need to save CX.
ADDQ CX, DI
MOVQ DI, 24(SP)
MOVQ SI, 32(SP)
MOVQ CX, 40(SP) // need len to inc SI, DI after
CALL runtime·memmove(SB)
// restore registers
MOVQ 24(SP), DI
MOVQ 32(SP), SI
MOVQ 40(SP), CX
// recalc initial values
MOVQ dst_base+0(FP), R8
@ -351,19 +437,25 @@ memmove_match:
SUBQ $32, R12
MOVQ R9, R13
SUBQ $16, R13
MOVQ dict_base+48(FP), R14
MOVQ dict_len+56(FP), R15
ADDQ R14, R15
ADDQ CX, DI
JMP loop
err_corrupt:
MOVQ $-1, ret+48(FP)
MOVQ $-1, ret+72(FP)
RET
err_short_buf:
MOVQ $-2, ret+48(FP)
MOVQ $-2, ret+72(FP)
RET
err_short_dict:
MOVQ $-3, ret+72(FP)
RET
end:
SUBQ R11, DI
MOVQ DI, ret+48(FP)
MOVQ DI, ret+72(FP)
RET

View file

@ -0,0 +1,15 @@
// +build gc,!noasm
package lz4block
func decodeBlock(dst, src, dict []byte) int {
if len(dict) == 0 {
return decodeBlockNodict(dst, src)
}
return decodeBlockGo(dst, src, dict)
}
// Assembler version of decodeBlock, without linked block support.
//go:noescape
func decodeBlockNodict(dst, src []byte) int

View file

@ -19,12 +19,12 @@
#define minMatch $4
// func decodeBlock(dst, src []byte) int
TEXT ·decodeBlock(SB), NOFRAME|NOSPLIT, $-4-28
MOVW dst_base +0(FP), dst
MOVW dst_len +4(FP), dstend
MOVW src_base+12(FP), src
MOVW src_len +16(FP), srcend
// func decodeBlockNodict(dst, src []byte) int
TEXT ·decodeBlockNodict(SB), NOFRAME+NOSPLIT, $-4-28
MOVW dst_base +0(FP), dst
MOVW dst_len +4(FP), dstend
MOVW src_base +12(FP), src
MOVW src_len +16(FP), srcend
CMP $0, srcend
BEQ shortSrc

View file

@ -0,0 +1,7 @@
// +build !amd64,!arm appengine !gc noasm
package lz4block
func decodeBlock(dst, src, dict []byte) int {
return decodeBlockGo(dst, src, dict)
}

View file

@ -1,13 +1,14 @@
// +build !amd64,!arm appengine !gc noasm
package lz4block
import "encoding/binary"
import (
"encoding/binary"
)
func decodeBlock(dst, src []byte) (ret int) {
func decodeBlockGo(dst, src, dict []byte) (ret int) {
// Restrict capacities so we don't read or write out of bounds.
dst = dst[:len(dst):len(dst)]
src = src[:len(src):len(src)]
dictLen := uint(len(dict))
const hasError = -2
defer func() {
@ -38,7 +39,7 @@ func decodeBlock(dst, src []byte) (ret int) {
// if the match length (4..18) fits within the literals, then copy
// all 18 bytes, even if not all are part of the literals.
mLen += 4
if offset := u16(src[si:]); mLen <= offset {
if offset := u16(src[si:]); mLen <= offset && offset < di {
i := di - offset
end := i + 18
if end > uint(len(dst)) {
@ -91,6 +92,38 @@ func decodeBlock(dst, src []byte) (ret int) {
mLen += minMatch
// Copy the match.
if di < offset {
// The match is beyond our block, meaning in the dictionary
if offset-di > mLen {
// The match is entirely contained in the dictionary. Just copy!
copy(dst[di:di+mLen], dict[dictLen+di-offset:dictLen+di-offset+mLen])
di = di + mLen
} else {
// The match stretches over the dictionary and our block
copySize := offset - di
restSize := mLen - copySize
copy(dst[di:di+copySize], dict[dictLen-copySize:])
di = di + copySize
if di < restSize {
// Overlap - we want to copy more than what we have available,
// so copy byte per byte.
copyFrom := 0
endOfMatch := di + restSize
for di < endOfMatch {
dst[di] = dst[copyFrom]
di = di + 1
copyFrom = copyFrom + 1
}
} else {
copy(dst[di:di+restSize], dst[0:restSize])
di = di + restSize
}
}
continue
}
expanded := dst[di-offset:]
if mLen > offset {
// Efficiently copy the match dst[di-offset:di] into the dst slice.

View file

@ -127,9 +127,11 @@ func (b *Blocks) initR(f *Frame, num int, src io.Reader) (chan []byte, error) {
blocks <- c
go func() {
defer block.Close(f)
data, err := block.Uncompress(f, size.Get(), false)
data, err := block.Uncompress(f, size.Get(), nil, false)
if err != nil {
b.closeR(err)
// Close the block channel to indicate an error.
close(c)
} else {
c <- data
}
@ -150,13 +152,24 @@ func (b *Blocks) initR(f *Frame, num int, src io.Reader) (chan []byte, error) {
// on the returned channel.
go func(leg bool) {
defer close(blocks)
skipBlocks := false
for c := range blocks {
buf := <-c
buf, ok := <-c
if !ok {
// A closed channel indicates an error.
// All remaining channels should be discarded.
skipBlocks = true
continue
}
if buf == nil {
// Signal to end the loop.
close(c)
return
}
if skipBlocks {
// A previous error has occurred, skipping remaining channels.
continue
}
// Perform checksum now as the blocks are received in order.
if f.Descriptor.Flags.ContentChecksum() {
_, _ = f.checksum.Write(buf)
@ -303,12 +316,12 @@ func (b *FrameDataBlock) Read(f *Frame, src io.Reader, cum uint32) (uint32, erro
return x, nil
}
func (b *FrameDataBlock) Uncompress(f *Frame, dst []byte, sum bool) ([]byte, error) {
func (b *FrameDataBlock) Uncompress(f *Frame, dst, dict []byte, sum bool) ([]byte, error) {
if b.Size.Uncompressed() {
n := copy(dst, b.data)
dst = dst[:n]
} else {
n, err := lz4block.UncompressBlock(b.data, dst)
n, err := lz4block.UncompressBlock(b.data, dst, dict)
if err != nil {
return nil, err
}

View file

@ -77,16 +77,16 @@ func (f *Frame) isLegacy() bool {
return f.Magic == frameMagicLegacy
}
func (f *Frame) InitR(src io.Reader, num int) (chan []byte, error) {
func (f *Frame) ParseHeaders(src io.Reader) error {
if f.Magic > 0 {
// Header already read.
return nil, nil
return nil
}
newFrame:
var err error
if f.Magic, err = f.readUint32(src); err != nil {
return nil, err
return err
}
switch m := f.Magic; {
case m == frameMagic || m == frameMagicLegacy:
@ -94,19 +94,23 @@ newFrame:
case m>>8 == frameSkipMagic>>8:
skip, err := f.readUint32(src)
if err != nil {
return nil, err
return err
}
if _, err := io.CopyN(ioutil.Discard, src, int64(skip)); err != nil {
return nil, err
return err
}
goto newFrame
default:
return nil, lz4errors.ErrInvalidFrame
return lz4errors.ErrInvalidFrame
}
if err := f.Descriptor.initR(f, src); err != nil {
return nil, err
return err
}
f.checksum.Reset()
return nil
}
func (f *Frame) InitR(src io.Reader, num int) (chan []byte, error) {
return f.Blocks.initR(f, num, src)
}

View file

@ -35,7 +35,17 @@ func CompressBlockBound(n int) int {
//
// An error is returned if the source data is invalid or the destination buffer is too small.
func UncompressBlock(src, dst []byte) (int, error) {
return lz4block.UncompressBlock(src, dst)
return lz4block.UncompressBlock(src, dst, nil)
}
// UncompressBlockWithDict uncompresses the source buffer into the destination one using a
// dictionary, and returns the uncompressed size.
//
// The destination buffer must be sized appropriately.
//
// An error is returned if the source data is invalid or the destination buffer is too small.
func UncompressBlockWithDict(src, dst, dict []byte) (int, error) {
return lz4block.UncompressBlock(src, dst, dict)
}
// A Compressor compresses data into the LZ4 block format.

View file

@ -2,10 +2,11 @@ package lz4
import (
"fmt"
"github.com/pierrec/lz4/v4/internal/lz4block"
"github.com/pierrec/lz4/v4/internal/lz4errors"
"reflect"
"runtime"
"github.com/pierrec/lz4/v4/internal/lz4block"
"github.com/pierrec/lz4/v4/internal/lz4errors"
)
//go:generate go run golang.org/x/tools/cmd/stringer -type=BlockSize,CompressionLevel -output options_gen.go

View file

@ -40,6 +40,7 @@ type Reader struct {
idx int // size of pending data
handler func(int)
cum uint32
dict []byte
}
func (*Reader) private() {}
@ -77,6 +78,15 @@ func (r *Reader) isNotConcurrent() bool {
}
func (r *Reader) init() error {
err := r.frame.ParseHeaders(r.src)
if err != nil {
return err
}
if !r.frame.Descriptor.Flags.BlockIndependence() {
// We can't decompress dependent blocks concurrently.
// Instead of throwing an error to the user, silently drop concurrency
r.num = 1
}
data, err := r.frame.InitR(r.src, r.num)
if err != nil {
return err
@ -162,10 +172,20 @@ func (r *Reader) read(buf []byte) (int, error) {
direct = true
dst = buf
}
dst, err = block.Uncompress(r.frame, dst, true)
dst, err = block.Uncompress(r.frame, dst, r.dict, true)
if err != nil {
return 0, err
}
if !r.frame.Descriptor.Flags.BlockIndependence() {
if len(r.dict)+len(dst) > 128*1024 {
preserveSize := 64*1024 - len(dst)
if preserveSize < 0 {
preserveSize = 0
}
r.dict = r.dict[len(r.dict)-preserveSize:]
}
r.dict = append(r.dict, dst...)
}
r.cum += uint32(len(dst))
if direct {
return len(dst), nil

View file

@ -89,7 +89,7 @@ func (w *Writer) Write(buf []byte) (n int, err error) {
zn := len(w.data)
for len(buf) > 0 {
if w.idx == 0 && len(buf) >= zn {
if w.isNotConcurrent() && w.idx == 0 && len(buf) >= zn {
// Avoid a copy as there is enough data for a block.
if err = w.write(buf[:zn], false); err != nil {
return