Use hash.NewLimitReader for internal multipart calls (#17191)

This commit is contained in:
Poorna
2023-05-12 11:19:08 -07:00
committed by GitHub
parent 203755793c
commit e07c2ab868
9 changed files with 91 additions and 11 deletions

View File

@@ -413,7 +413,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a
}
defer rd.Close()
hr, err = hash.NewReader(rd, objInfo.Size, "", "", objInfo.Size)
hr, err = hash.NewLimitReader(rd, objInfo.Size, "", "", objInfo.Size)
if err != nil {
return err
}

View File

@@ -1485,7 +1485,7 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
)
for _, partInfo := range objInfo.Parts {
hr, err = hash.NewReader(r, partInfo.ActualSize, "", "", partInfo.ActualSize)
hr, err = hash.NewLimitReader(r, partInfo.ActualSize, "", "", partInfo.ActualSize)
if err != nil {
return err
}

View File

@@ -2088,7 +2088,7 @@ func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket s
// rehydrate the parts back on disk as per the original xl.meta prior to transition
for _, partInfo := range oi.Parts {
hr, err := hash.NewReader(gr, partInfo.Size, "", "", partInfo.Size)
hr, err := hash.NewLimitReader(gr, partInfo.Size, "", "", partInfo.Size)
if err != nil {
return setRestoreHeaderFn(oi, err)
}

View File

@@ -604,9 +604,9 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri
defer z.AbortMultipartUpload(ctx, bucket, objInfo.Name, res.UploadID, ObjectOptions{})
parts := make([]CompletePart, len(objInfo.Parts))
for i, part := range objInfo.Parts {
hr, err := hash.NewReader(gr, part.Size, "", "", part.ActualSize)
hr, err := hash.NewLimitReader(gr, part.Size, "", "", part.ActualSize)
if err != nil {
return fmt.Errorf("decommissionObject: hash.NewReader() %w", err)
return fmt.Errorf("decommissionObject: hash.NewLimitReader() %w", err)
}
pi, err := z.PutObjectPart(ctx, bucket, objInfo.Name, res.UploadID,
part.Number,
@@ -638,9 +638,9 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri
return err
}
hr, err := hash.NewReader(gr, objInfo.Size, "", "", actualSize)
hr, err := hash.NewLimitReader(gr, objInfo.Size, "", "", actualSize)
if err != nil {
return fmt.Errorf("decommissionObject: hash.NewReader() %w", err)
return fmt.Errorf("decommissionObject: hash.NewLimitReader() %w", err)
}
_, err = z.PutObject(ctx,
bucket,

View File

@@ -721,9 +721,9 @@ func (z *erasureServerPools) rebalanceObject(ctx context.Context, bucket string,
parts := make([]CompletePart, len(oi.Parts))
for i, part := range oi.Parts {
hr, err := hash.NewReader(gr, part.Size, "", "", part.ActualSize)
hr, err := hash.NewLimitReader(gr, part.Size, "", "", part.ActualSize)
if err != nil {
return fmt.Errorf("rebalanceObject: hash.NewReader() %w", err)
return fmt.Errorf("rebalanceObject: hash.NewLimitReader() %w", err)
}
pi, err := z.PutObjectPart(ctx, bucket, oi.Name, res.UploadID,
part.Number,