simplify HardLimitReader by using LimitReader for internal usage (#17218)

This commit is contained in:
Klaus Post 2023-05-16 13:14:37 -07:00 committed by GitHub
parent 413549bcf5
commit aaf1abc993
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 16 additions and 31 deletions

View File

@ -413,7 +413,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a
} }
defer rd.Close() defer rd.Close()
hr, err = hash.NewLimitReader(rd, objInfo.Size, "", "", objInfo.Size) hr, err = hash.NewReader(io.LimitReader(rd, objInfo.Size), objInfo.Size, "", "", objInfo.Size)
if err != nil { if err != nil {
return err return err
} }

View File

@ -1485,7 +1485,7 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
) )
for _, partInfo := range objInfo.Parts { for _, partInfo := range objInfo.Parts {
hr, err = hash.NewLimitReader(r, partInfo.ActualSize, "", "", partInfo.ActualSize) hr, err = hash.NewReader(io.LimitReader(r, partInfo.ActualSize), partInfo.ActualSize, "", "", partInfo.ActualSize)
if err != nil { if err != nil {
return err return err
} }

View File

@ -2088,7 +2088,7 @@ func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket s
// rehydrate the parts back on disk as per the original xl.meta prior to transition // rehydrate the parts back on disk as per the original xl.meta prior to transition
for _, partInfo := range oi.Parts { for _, partInfo := range oi.Parts {
hr, err := hash.NewLimitReader(gr, partInfo.Size, "", "", partInfo.Size) hr, err := hash.NewReader(io.LimitReader(gr, partInfo.Size), partInfo.Size, "", "", partInfo.Size)
if err != nil { if err != nil {
return setRestoreHeaderFn(oi, err) return setRestoreHeaderFn(oi, err)
} }

View File

@ -22,6 +22,7 @@ import (
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
"io"
"math/rand" "math/rand"
"net/http" "net/http"
"sort" "sort"
@ -604,9 +605,9 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri
defer z.AbortMultipartUpload(ctx, bucket, objInfo.Name, res.UploadID, ObjectOptions{}) defer z.AbortMultipartUpload(ctx, bucket, objInfo.Name, res.UploadID, ObjectOptions{})
parts := make([]CompletePart, len(objInfo.Parts)) parts := make([]CompletePart, len(objInfo.Parts))
for i, part := range objInfo.Parts { for i, part := range objInfo.Parts {
hr, err := hash.NewLimitReader(gr, part.Size, "", "", part.ActualSize) hr, err := hash.NewReader(io.LimitReader(gr, part.Size), part.Size, "", "", part.ActualSize)
if err != nil { if err != nil {
return fmt.Errorf("decommissionObject: hash.NewLimitReader() %w", err) return fmt.Errorf("decommissionObject: hash.NewReader() %w", err)
} }
pi, err := z.PutObjectPart(ctx, bucket, objInfo.Name, res.UploadID, pi, err := z.PutObjectPart(ctx, bucket, objInfo.Name, res.UploadID,
part.Number, part.Number,
@ -638,9 +639,9 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri
return err return err
} }
hr, err := hash.NewLimitReader(gr, objInfo.Size, "", "", actualSize) hr, err := hash.NewReader(io.LimitReader(gr, objInfo.Size), objInfo.Size, "", "", actualSize)
if err != nil { if err != nil {
return fmt.Errorf("decommissionObject: hash.NewLimitReader() %w", err) return fmt.Errorf("decommissionObject: hash.NewReader() %w", err)
} }
_, err = z.PutObject(ctx, _, err = z.PutObject(ctx,
bucket, bucket,

View File

@ -22,6 +22,7 @@ import (
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
"io"
"math" "math"
"math/rand" "math/rand"
"net/http" "net/http"
@ -721,9 +722,9 @@ func (z *erasureServerPools) rebalanceObject(ctx context.Context, bucket string,
parts := make([]CompletePart, len(oi.Parts)) parts := make([]CompletePart, len(oi.Parts))
for i, part := range oi.Parts { for i, part := range oi.Parts {
hr, err := hash.NewLimitReader(gr, part.Size, "", "", part.ActualSize) hr, err := hash.NewReader(io.LimitReader(gr, part.Size), part.Size, "", "", part.ActualSize)
if err != nil { if err != nil {
return fmt.Errorf("rebalanceObject: hash.NewLimitReader() %w", err) return fmt.Errorf("rebalanceObject: hash.NewReader() %w", err)
} }
pi, err := z.PutObjectPart(ctx, bucket, oi.Name, res.UploadID, pi, err := z.PutObjectPart(ctx, bucket, oi.Name, res.UploadID,
part.Number, part.Number,

View File

@ -67,6 +67,7 @@ type Reader struct {
// //
// If size resp. actualSize is unknown at the time of calling // If size resp. actualSize is unknown at the time of calling
// NewReader then it should be set to -1. // NewReader then it should be set to -1.
// When size is >=0 it *must* match the amount of data provided by r.
// //
// NewReader may try merge the given size, MD5 and SHA256 values // NewReader may try merge the given size, MD5 and SHA256 values
// into src - if src is a Reader - to avoid computing the same // into src - if src is a Reader - to avoid computing the same
@ -75,10 +76,10 @@ type Reader struct {
// does not send more content than specified size. // does not send more content than specified size.
func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64) (*Reader, error) { func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64) (*Reader, error) {
// return hard limited reader // return hard limited reader
return newReader(src, size, md5Hex, sha256Hex, actualSize, true) return newReader(src, size, md5Hex, sha256Hex, actualSize)
} }
func newReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64, hardLimitReader bool) (*Reader, error) { func newReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64) (*Reader, error) {
MD5, err := hex.DecodeString(md5Hex) MD5, err := hex.DecodeString(md5Hex)
if err != nil { if err != nil {
return nil, BadDigest{ // TODO(aead): Return an error that indicates that an invalid ETag has been specified return nil, BadDigest{ // TODO(aead): Return an error that indicates that an invalid ETag has been specified
@ -119,12 +120,7 @@ func newReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i
r.checksum = MD5 r.checksum = MD5
r.contentSHA256 = SHA256 r.contentSHA256 = SHA256
if r.size < 0 && size >= 0 { if r.size < 0 && size >= 0 {
switch hardLimitReader { r.src = etag.Wrap(ioutil.HardLimitReader(r.src, size), r.src)
case true:
r.src = etag.Wrap(ioutil.HardLimitReader(r.src, size), r.src)
default:
r.src = etag.Wrap(io.LimitReader(r.src, size), r.src)
}
r.size = size r.size = size
} }
if r.actualSize <= 0 && actualSize >= 0 { if r.actualSize <= 0 && actualSize >= 0 {
@ -134,13 +130,7 @@ func newReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i
} }
if size >= 0 { if size >= 0 {
var r io.Reader r := ioutil.HardLimitReader(src, size)
switch hardLimitReader {
case true:
r = ioutil.HardLimitReader(src, size)
default:
r = io.LimitReader(src, size)
}
if _, ok := src.(etag.Tagger); !ok { if _, ok := src.(etag.Tagger); !ok {
src = etag.NewReader(r, MD5) src = etag.NewReader(r, MD5)
} else { } else {
@ -163,13 +153,6 @@ func newReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i
}, nil }, nil
} }
// NewLimitReader is similar to NewReader but it will read only up to actualsize specified. It will return
// EOF if actualsize is reached.
func NewLimitReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64) (*Reader, error) {
// return io limited reader
return newReader(src, size, md5Hex, sha256Hex, actualSize, false)
}
// ErrInvalidChecksum is returned when an invalid checksum is provided in headers. // ErrInvalidChecksum is returned when an invalid checksum is provided in headers.
var ErrInvalidChecksum = errors.New("invalid checksum") var ErrInvalidChecksum = errors.New("invalid checksum")