simplify HardLimitReader by using LimitReader for internal usage (#17218)

This commit is contained in:
Klaus Post
2023-05-16 13:14:37 -07:00
committed by GitHub
parent 413549bcf5
commit aaf1abc993
6 changed files with 16 additions and 31 deletions

View File

@@ -22,6 +22,7 @@ import (
"encoding/binary"
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"sort"
@@ -604,9 +605,9 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri
defer z.AbortMultipartUpload(ctx, bucket, objInfo.Name, res.UploadID, ObjectOptions{})
parts := make([]CompletePart, len(objInfo.Parts))
for i, part := range objInfo.Parts {
hr, err := hash.NewLimitReader(gr, part.Size, "", "", part.ActualSize)
hr, err := hash.NewReader(io.LimitReader(gr, part.Size), part.Size, "", "", part.ActualSize)
if err != nil {
return fmt.Errorf("decommissionObject: hash.NewLimitReader() %w", err)
return fmt.Errorf("decommissionObject: hash.NewReader() %w", err)
}
pi, err := z.PutObjectPart(ctx, bucket, objInfo.Name, res.UploadID,
part.Number,
@@ -638,9 +639,9 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri
return err
}
hr, err := hash.NewLimitReader(gr, objInfo.Size, "", "", actualSize)
hr, err := hash.NewReader(io.LimitReader(gr, objInfo.Size), objInfo.Size, "", "", actualSize)
if err != nil {
return fmt.Errorf("decommissionObject: hash.NewLimitReader() %w", err)
return fmt.Errorf("decommissionObject: hash.NewReader() %w", err)
}
_, err = z.PutObject(ctx,
bucket,