From c839b64f6a0f29db326e999a575e11dbdfbcfb61 Mon Sep 17 00:00:00 2001 From: Klaus Post Date: Fri, 26 May 2023 10:57:07 -0700 Subject: [PATCH] fix: compressed+encrypted block overhead (#17289) --- cmd/erasure-multipart.go | 18 ++---------------- cmd/erasure-server-pool.go | 2 +- cmd/erasure-sets.go | 8 -------- 3 files changed, 3 insertions(+), 25 deletions(-) diff --git a/cmd/erasure-multipart.go b/cmd/erasure-multipart.go index 0c4d87203..1aec3dfc0 100644 --- a/cmd/erasure-multipart.go +++ b/cmd/erasure-multipart.go @@ -499,21 +499,6 @@ func (er erasureObjects) NewMultipartUpload(ctx context.Context, bucket, object return er.newMultipartUpload(ctx, bucket, object, opts) } -// CopyObjectPart - reads incoming stream and internally erasure codes -// them. This call is similar to put object part operation but the source -// data is read from an existing object. -// -// Implements S3 compatible Upload Part Copy API. -func (er erasureObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) { - partInfo, err := er.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, NewPutObjReader(srcInfo.Reader), dstOpts) - if err != nil { - return pi, toObjectErr(err, dstBucket, dstObject) - } - - // Success. - return partInfo, nil -} - // renamePart - renames multipart part to its relevant location under uploadID. func renamePart(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, writeQuorum int) ([]StorageAPI, error) { g := errgroup.WithNErrs(len(disks)) @@ -667,7 +652,8 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF case size == -1: if size := data.ActualSize(); size > 0 && size < fi.Erasure.BlockSize { - buffer = make([]byte, data.ActualSize()+256, data.ActualSize()*2+512) + // Account for padding and forced compression overhead and encryption. + buffer = make([]byte, data.ActualSize()+256+32+32, data.ActualSize()*2+512) } else { buffer = er.bp.Get() defer er.bp.Put(buffer) diff --git a/cmd/erasure-server-pool.go b/cmd/erasure-server-pool.go index 279cafeb8..604406a62 100644 --- a/cmd/erasure-server-pool.go +++ b/cmd/erasure-server-pool.go @@ -1459,7 +1459,7 @@ func (z *erasureServerPools) CopyObjectPart(ctx context.Context, srcBucket, srcO } return z.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, - NewPutObjReader(srcInfo.Reader), dstOpts) + srcInfo.PutObjReader, dstOpts) } // PutObjectPart - writes part of an object to hashedSet based on the object name. diff --git a/cmd/erasure-sets.go b/cmd/erasure-sets.go index 9c4172f31..58974d5de 100644 --- a/cmd/erasure-sets.go +++ b/cmd/erasure-sets.go @@ -899,14 +899,6 @@ func (s *erasureSets) NewMultipartUpload(ctx context.Context, bucket, object str return set.NewMultipartUpload(ctx, bucket, object, opts) } -// Copies a part of an object from source hashedSet to destination hashedSet. -func (s *erasureSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, - startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions, -) (partInfo PartInfo, err error) { - destSet := s.getHashedSet(destObject) - return destSet.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, NewPutObjReader(srcInfo.Reader), dstOpts) -} - // PutObjectPart - writes part of an object to hashedSet based on the object name. func (s *erasureSets) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) { set := s.getHashedSet(object)