forked from forgejo/forgejo
Backport #23768 (no source code conflict, only some unrelated docs/test-ini conflicts) Some storages like: * https://developers.cloudflare.com/r2/api/s3/api/ * https://www.backblaze.com/b2/docs/s3_compatible_api.html They do not support "x-amz-checksum-algorithm" header But minio recently uses that header with CRC32C by default. So we have to tell minio to use legacy MD5 checksum.
This commit is contained in:
parent
428d26d4a8
commit
b73d1ac1eb
7 changed files with 73 additions and 23 deletions
|
@ -59,15 +59,22 @@ func (s *ContentStore) Put(pointer Pointer, r io.Reader) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// This shouldn't happen but it is sensible to test
|
||||
if written != pointer.Size {
|
||||
if err := s.Delete(p); err != nil {
|
||||
log.Error("Cleaning the LFS OID[%s] failed: %v", pointer.Oid, err)
|
||||
}
|
||||
return ErrSizeMismatch
|
||||
// check again whether there is any error during the Save operation
|
||||
// because some errors might be ignored by the Reader's caller
|
||||
if wrappedRd.lastError != nil && !errors.Is(wrappedRd.lastError, io.EOF) {
|
||||
err = wrappedRd.lastError
|
||||
} else if written != pointer.Size {
|
||||
err = ErrSizeMismatch
|
||||
}
|
||||
|
||||
return nil
|
||||
// if the upload failed, try to delete the file
|
||||
if err != nil {
|
||||
if errDel := s.Delete(p); errDel != nil {
|
||||
log.Error("Cleaning the LFS OID[%s] failed: %v", pointer.Oid, errDel)
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Exists returns true if the object exists in the content store.
|
||||
|
@ -108,6 +115,17 @@ type hashingReader struct {
|
|||
expectedSize int64
|
||||
hash hash.Hash
|
||||
expectedHash string
|
||||
lastError error
|
||||
}
|
||||
|
||||
// recordError records the last error during the Save operation
|
||||
// Some callers of the Reader doesn't respect the returned "err"
|
||||
// For example, MinIO's Put will ignore errors if the written size could equal to expected size
|
||||
// So we must remember the error by ourselves,
|
||||
// and later check again whether ErrSizeMismatch or ErrHashMismatch occurs during the Save operation
|
||||
func (r *hashingReader) recordError(err error) error {
|
||||
r.lastError = err
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *hashingReader) Read(b []byte) (int, error) {
|
||||
|
@ -117,22 +135,22 @@ func (r *hashingReader) Read(b []byte) (int, error) {
|
|||
r.currentSize += int64(n)
|
||||
wn, werr := r.hash.Write(b[:n])
|
||||
if wn != n || werr != nil {
|
||||
return n, werr
|
||||
return n, r.recordError(werr)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil && err == io.EOF {
|
||||
if errors.Is(err, io.EOF) || r.currentSize >= r.expectedSize {
|
||||
if r.currentSize != r.expectedSize {
|
||||
return n, ErrSizeMismatch
|
||||
return n, r.recordError(ErrSizeMismatch)
|
||||
}
|
||||
|
||||
shaStr := hex.EncodeToString(r.hash.Sum(nil))
|
||||
if shaStr != r.expectedHash {
|
||||
return n, ErrHashMismatch
|
||||
return n, r.recordError(ErrHashMismatch)
|
||||
}
|
||||
}
|
||||
|
||||
return n, err
|
||||
return n, r.recordError(err)
|
||||
}
|
||||
|
||||
func newHashingReader(expectedSize int64, expectedHash string, reader io.Reader) *hashingReader {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue