mirror of
https://github.com/minio/minio.git
synced 2025-11-09 21:49:46 -05:00
Use hash.NewLimitReader for internal multipart calls (#17191)
This commit is contained in:
@@ -413,7 +413,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a
|
||||
}
|
||||
defer rd.Close()
|
||||
|
||||
hr, err = hash.NewReader(rd, objInfo.Size, "", "", objInfo.Size)
|
||||
hr, err = hash.NewLimitReader(rd, objInfo.Size, "", "", objInfo.Size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1485,7 +1485,7 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
|
||||
)
|
||||
|
||||
for _, partInfo := range objInfo.Parts {
|
||||
hr, err = hash.NewReader(r, partInfo.ActualSize, "", "", partInfo.ActualSize)
|
||||
hr, err = hash.NewLimitReader(r, partInfo.ActualSize, "", "", partInfo.ActualSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2088,7 +2088,7 @@ func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket s
|
||||
|
||||
// rehydrate the parts back on disk as per the original xl.meta prior to transition
|
||||
for _, partInfo := range oi.Parts {
|
||||
hr, err := hash.NewReader(gr, partInfo.Size, "", "", partInfo.Size)
|
||||
hr, err := hash.NewLimitReader(gr, partInfo.Size, "", "", partInfo.Size)
|
||||
if err != nil {
|
||||
return setRestoreHeaderFn(oi, err)
|
||||
}
|
||||
|
||||
@@ -604,9 +604,9 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri
|
||||
defer z.AbortMultipartUpload(ctx, bucket, objInfo.Name, res.UploadID, ObjectOptions{})
|
||||
parts := make([]CompletePart, len(objInfo.Parts))
|
||||
for i, part := range objInfo.Parts {
|
||||
hr, err := hash.NewReader(gr, part.Size, "", "", part.ActualSize)
|
||||
hr, err := hash.NewLimitReader(gr, part.Size, "", "", part.ActualSize)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decommissionObject: hash.NewReader() %w", err)
|
||||
return fmt.Errorf("decommissionObject: hash.NewLimitReader() %w", err)
|
||||
}
|
||||
pi, err := z.PutObjectPart(ctx, bucket, objInfo.Name, res.UploadID,
|
||||
part.Number,
|
||||
@@ -638,9 +638,9 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri
|
||||
return err
|
||||
}
|
||||
|
||||
hr, err := hash.NewReader(gr, objInfo.Size, "", "", actualSize)
|
||||
hr, err := hash.NewLimitReader(gr, objInfo.Size, "", "", actualSize)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decommissionObject: hash.NewReader() %w", err)
|
||||
return fmt.Errorf("decommissionObject: hash.NewLimitReader() %w", err)
|
||||
}
|
||||
_, err = z.PutObject(ctx,
|
||||
bucket,
|
||||
|
||||
@@ -721,9 +721,9 @@ func (z *erasureServerPools) rebalanceObject(ctx context.Context, bucket string,
|
||||
|
||||
parts := make([]CompletePart, len(oi.Parts))
|
||||
for i, part := range oi.Parts {
|
||||
hr, err := hash.NewReader(gr, part.Size, "", "", part.ActualSize)
|
||||
hr, err := hash.NewLimitReader(gr, part.Size, "", "", part.ActualSize)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rebalanceObject: hash.NewReader() %w", err)
|
||||
return fmt.Errorf("rebalanceObject: hash.NewLimitReader() %w", err)
|
||||
}
|
||||
pi, err := z.PutObjectPart(ctx, bucket, oi.Name, res.UploadID,
|
||||
part.Number,
|
||||
|
||||
Reference in New Issue
Block a user