diff --git a/cmd/batch-handlers.go b/cmd/batch-handlers.go index eaf410044..93f5d6a65 100644 --- a/cmd/batch-handlers.go +++ b/cmd/batch-handlers.go @@ -413,7 +413,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a } defer rd.Close() - hr, err = hash.NewLimitReader(rd, objInfo.Size, "", "", objInfo.Size) + hr, err = hash.NewReader(io.LimitReader(rd, objInfo.Size), objInfo.Size, "", "", objInfo.Size) if err != nil { return err } diff --git a/cmd/bucket-replication.go b/cmd/bucket-replication.go index bbf0dcf85..c530573ab 100644 --- a/cmd/bucket-replication.go +++ b/cmd/bucket-replication.go @@ -1485,7 +1485,7 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob ) for _, partInfo := range objInfo.Parts { - hr, err = hash.NewLimitReader(r, partInfo.ActualSize, "", "", partInfo.ActualSize) + hr, err = hash.NewReader(io.LimitReader(r, partInfo.ActualSize), partInfo.ActualSize, "", "", partInfo.ActualSize) if err != nil { return err } diff --git a/cmd/erasure-object.go b/cmd/erasure-object.go index 9b2ceae34..ad8b4ae0c 100644 --- a/cmd/erasure-object.go +++ b/cmd/erasure-object.go @@ -2088,7 +2088,7 @@ func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket s // rehydrate the parts back on disk as per the original xl.meta prior to transition for _, partInfo := range oi.Parts { - hr, err := hash.NewLimitReader(gr, partInfo.Size, "", "", partInfo.Size) + hr, err := hash.NewReader(io.LimitReader(gr, partInfo.Size), partInfo.Size, "", "", partInfo.Size) if err != nil { return setRestoreHeaderFn(oi, err) } diff --git a/cmd/erasure-server-pool-decom.go b/cmd/erasure-server-pool-decom.go index 3dfd4d5f7..c70d96805 100644 --- a/cmd/erasure-server-pool-decom.go +++ b/cmd/erasure-server-pool-decom.go @@ -22,6 +22,7 @@ import ( "encoding/binary" "errors" "fmt" + "io" "math/rand" "net/http" "sort" @@ -604,9 +605,9 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri defer z.AbortMultipartUpload(ctx, bucket, objInfo.Name, res.UploadID, ObjectOptions{}) parts := make([]CompletePart, len(objInfo.Parts)) for i, part := range objInfo.Parts { - hr, err := hash.NewLimitReader(gr, part.Size, "", "", part.ActualSize) + hr, err := hash.NewReader(io.LimitReader(gr, part.Size), part.Size, "", "", part.ActualSize) if err != nil { - return fmt.Errorf("decommissionObject: hash.NewLimitReader() %w", err) + return fmt.Errorf("decommissionObject: hash.NewReader() %w", err) } pi, err := z.PutObjectPart(ctx, bucket, objInfo.Name, res.UploadID, part.Number, @@ -638,9 +639,9 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri return err } - hr, err := hash.NewLimitReader(gr, objInfo.Size, "", "", actualSize) + hr, err := hash.NewReader(io.LimitReader(gr, objInfo.Size), objInfo.Size, "", "", actualSize) if err != nil { - return fmt.Errorf("decommissionObject: hash.NewLimitReader() %w", err) + return fmt.Errorf("decommissionObject: hash.NewReader() %w", err) } _, err = z.PutObject(ctx, bucket, diff --git a/cmd/erasure-server-pool-rebalance.go b/cmd/erasure-server-pool-rebalance.go index 69fbe5a03..b41c5b47b 100644 --- a/cmd/erasure-server-pool-rebalance.go +++ b/cmd/erasure-server-pool-rebalance.go @@ -22,6 +22,7 @@ import ( "encoding/binary" "errors" "fmt" + "io" "math" "math/rand" "net/http" @@ -721,9 +722,9 @@ func (z *erasureServerPools) rebalanceObject(ctx context.Context, bucket string, parts := make([]CompletePart, len(oi.Parts)) for i, part := range oi.Parts { - hr, err := hash.NewLimitReader(gr, part.Size, "", "", part.ActualSize) + hr, err := hash.NewReader(io.LimitReader(gr, part.Size), part.Size, "", "", part.ActualSize) if err != nil { - return fmt.Errorf("rebalanceObject: hash.NewLimitReader() %w", err) + return fmt.Errorf("rebalanceObject: hash.NewReader() %w", err) } pi, err := z.PutObjectPart(ctx, bucket, oi.Name, res.UploadID, part.Number, diff --git a/internal/hash/reader.go b/internal/hash/reader.go index 1355a9237..b4c987ee9 100644 --- a/internal/hash/reader.go +++ b/internal/hash/reader.go @@ -67,6 +67,7 @@ type Reader struct { // // If size resp. actualSize is unknown at the time of calling // NewReader then it should be set to -1. +// When size is >=0 it *must* match the amount of data provided by r. // // NewReader may try merge the given size, MD5 and SHA256 values // into src - if src is a Reader - to avoid computing the same @@ -75,10 +76,10 @@ type Reader struct { // does not send more content than specified size. func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64) (*Reader, error) { // return hard limited reader - return newReader(src, size, md5Hex, sha256Hex, actualSize, true) + return newReader(src, size, md5Hex, sha256Hex, actualSize) } -func newReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64, hardLimitReader bool) (*Reader, error) { +func newReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64) (*Reader, error) { MD5, err := hex.DecodeString(md5Hex) if err != nil { return nil, BadDigest{ // TODO(aead): Return an error that indicates that an invalid ETag has been specified @@ -119,12 +120,7 @@ func newReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i r.checksum = MD5 r.contentSHA256 = SHA256 if r.size < 0 && size >= 0 { - switch hardLimitReader { - case true: - r.src = etag.Wrap(ioutil.HardLimitReader(r.src, size), r.src) - default: - r.src = etag.Wrap(io.LimitReader(r.src, size), r.src) - } + r.src = etag.Wrap(ioutil.HardLimitReader(r.src, size), r.src) r.size = size } if r.actualSize <= 0 && actualSize >= 0 { @@ -134,13 +130,7 @@ func newReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i } if size >= 0 { - var r io.Reader - switch hardLimitReader { - case true: - r = ioutil.HardLimitReader(src, size) - default: - r = io.LimitReader(src, size) - } + r := ioutil.HardLimitReader(src, size) if _, ok := src.(etag.Tagger); !ok { src = etag.NewReader(r, MD5) } else { @@ -163,13 +153,6 @@ func newReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i }, nil } -// NewLimitReader is similar to NewReader but it will read only up to actualsize specified. It will return -// EOF if actualsize is reached. -func NewLimitReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64) (*Reader, error) { - // return io limited reader - return newReader(src, size, md5Hex, sha256Hex, actualSize, false) -} - // ErrInvalidChecksum is returned when an invalid checksum is provided in headers. var ErrInvalidChecksum = errors.New("invalid checksum")