mirror of
https://github.com/minio/minio.git
synced 2025-02-03 09:55:59 -05:00
Use hash.NewLimitReader for internal multipart calls (#17191)
This commit is contained in:
parent
203755793c
commit
e07c2ab868
@ -413,7 +413,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a
|
|||||||
}
|
}
|
||||||
defer rd.Close()
|
defer rd.Close()
|
||||||
|
|
||||||
hr, err = hash.NewReader(rd, objInfo.Size, "", "", objInfo.Size)
|
hr, err = hash.NewLimitReader(rd, objInfo.Size, "", "", objInfo.Size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1485,7 +1485,7 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
|
|||||||
)
|
)
|
||||||
|
|
||||||
for _, partInfo := range objInfo.Parts {
|
for _, partInfo := range objInfo.Parts {
|
||||||
hr, err = hash.NewReader(r, partInfo.ActualSize, "", "", partInfo.ActualSize)
|
hr, err = hash.NewLimitReader(r, partInfo.ActualSize, "", "", partInfo.ActualSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -2088,7 +2088,7 @@ func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket s
|
|||||||
|
|
||||||
// rehydrate the parts back on disk as per the original xl.meta prior to transition
|
// rehydrate the parts back on disk as per the original xl.meta prior to transition
|
||||||
for _, partInfo := range oi.Parts {
|
for _, partInfo := range oi.Parts {
|
||||||
hr, err := hash.NewReader(gr, partInfo.Size, "", "", partInfo.Size)
|
hr, err := hash.NewLimitReader(gr, partInfo.Size, "", "", partInfo.Size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return setRestoreHeaderFn(oi, err)
|
return setRestoreHeaderFn(oi, err)
|
||||||
}
|
}
|
||||||
|
@ -604,9 +604,9 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri
|
|||||||
defer z.AbortMultipartUpload(ctx, bucket, objInfo.Name, res.UploadID, ObjectOptions{})
|
defer z.AbortMultipartUpload(ctx, bucket, objInfo.Name, res.UploadID, ObjectOptions{})
|
||||||
parts := make([]CompletePart, len(objInfo.Parts))
|
parts := make([]CompletePart, len(objInfo.Parts))
|
||||||
for i, part := range objInfo.Parts {
|
for i, part := range objInfo.Parts {
|
||||||
hr, err := hash.NewReader(gr, part.Size, "", "", part.ActualSize)
|
hr, err := hash.NewLimitReader(gr, part.Size, "", "", part.ActualSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("decommissionObject: hash.NewReader() %w", err)
|
return fmt.Errorf("decommissionObject: hash.NewLimitReader() %w", err)
|
||||||
}
|
}
|
||||||
pi, err := z.PutObjectPart(ctx, bucket, objInfo.Name, res.UploadID,
|
pi, err := z.PutObjectPart(ctx, bucket, objInfo.Name, res.UploadID,
|
||||||
part.Number,
|
part.Number,
|
||||||
@ -638,9 +638,9 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
hr, err := hash.NewReader(gr, objInfo.Size, "", "", actualSize)
|
hr, err := hash.NewLimitReader(gr, objInfo.Size, "", "", actualSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("decommissionObject: hash.NewReader() %w", err)
|
return fmt.Errorf("decommissionObject: hash.NewLimitReader() %w", err)
|
||||||
}
|
}
|
||||||
_, err = z.PutObject(ctx,
|
_, err = z.PutObject(ctx,
|
||||||
bucket,
|
bucket,
|
||||||
|
@ -721,9 +721,9 @@ func (z *erasureServerPools) rebalanceObject(ctx context.Context, bucket string,
|
|||||||
|
|
||||||
parts := make([]CompletePart, len(oi.Parts))
|
parts := make([]CompletePart, len(oi.Parts))
|
||||||
for i, part := range oi.Parts {
|
for i, part := range oi.Parts {
|
||||||
hr, err := hash.NewReader(gr, part.Size, "", "", part.ActualSize)
|
hr, err := hash.NewLimitReader(gr, part.Size, "", "", part.ActualSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("rebalanceObject: hash.NewReader() %w", err)
|
return fmt.Errorf("rebalanceObject: hash.NewLimitReader() %w", err)
|
||||||
}
|
}
|
||||||
pi, err := z.PutObjectPart(ctx, bucket, oi.Name, res.UploadID,
|
pi, err := z.PutObjectPart(ctx, bucket, oi.Name, res.UploadID,
|
||||||
part.Number,
|
part.Number,
|
||||||
|
@ -149,6 +149,11 @@ if [ $? -eq 0 ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
./mc mb minio1/newbucket
|
./mc mb minio1/newbucket
|
||||||
|
# copy large upload to newbucket on minio1
|
||||||
|
truncate -s 17M lrgfile
|
||||||
|
expected_checksum=$(cat ./lrgfile | md5sum)
|
||||||
|
|
||||||
|
./mc cp ./lrgfile minio1/newbucket
|
||||||
|
|
||||||
# create a bucket bucket2 on minio1.
|
# create a bucket bucket2 on minio1.
|
||||||
./mc mb minio1/bucket2
|
./mc mb minio1/bucket2
|
||||||
@ -181,6 +186,19 @@ if [ $? -ne 0 ]; then
|
|||||||
exit_1;
|
exit_1;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
sleep 10
|
||||||
|
./mc stat minio3/newbucket/lrgfile
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "expected object to be present, exiting.."
|
||||||
|
exit_1;
|
||||||
|
fi
|
||||||
|
actual_checksum=$(./mc cat minio3/newbucket/lrgfile | md5sum)
|
||||||
|
if [ "${expected_checksum}" != "${actual_checksum}" ]; then
|
||||||
|
echo "replication failed on multipart objects expected ${expected_checksum} got ${actual_checksum}"
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
rm ./lrgfile
|
||||||
|
|
||||||
vID=$(./mc stat minio2/newbucket/README.md --json | jq .versionID)
|
vID=$(./mc stat minio2/newbucket/README.md --json | jq .versionID)
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
echo "expecting object to be present. exiting.."
|
echo "expecting object to be present. exiting.."
|
||||||
|
@ -170,6 +170,11 @@ if [ $? -eq 0 ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
./mc mb minio1/newbucket
|
./mc mb minio1/newbucket
|
||||||
|
# copy large upload to newbucket on minio1
|
||||||
|
truncate -s 17M lrgfile
|
||||||
|
expected_checksum=$(cat ./lrgfile | md5sum)
|
||||||
|
|
||||||
|
./mc cp ./lrgfile minio1/newbucket
|
||||||
|
|
||||||
sleep 5
|
sleep 5
|
||||||
./mc stat minio2/newbucket
|
./mc stat minio2/newbucket
|
||||||
@ -210,6 +215,20 @@ if [ $? -ne 0 ]; then
|
|||||||
exit_1;
|
exit_1;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
sleep 10
|
||||||
|
./mc stat minio3/newbucket/lrgfile
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "expected object to be present, exiting.."
|
||||||
|
exit_1;
|
||||||
|
fi
|
||||||
|
|
||||||
|
actual_checksum=$(./mc cat minio3/newbucket/lrgfile | md5sum)
|
||||||
|
if [ "${expected_checksum}" != "${actual_checksum}" ]; then
|
||||||
|
echo "replication failed on multipart objects expected ${expected_checksum} got ${actual_checksum}"
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
rm ./lrgfile
|
||||||
|
|
||||||
vID=$(./mc stat minio2/newbucket/README.md --json | jq .versionID)
|
vID=$(./mc stat minio2/newbucket/README.md --json | jq .versionID)
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
echo "expecting object to be present. exiting.."
|
echo "expecting object to be present. exiting.."
|
||||||
|
@ -168,6 +168,11 @@ fi
|
|||||||
|
|
||||||
./mc mb minio1/newbucket
|
./mc mb minio1/newbucket
|
||||||
|
|
||||||
|
# copy large upload to newbucket on minio1
|
||||||
|
truncate -s 17M lrgfile
|
||||||
|
expected_checksum=$(cat ./lrgfile | md5sum)
|
||||||
|
|
||||||
|
./mc cp ./lrgfile minio1/newbucket
|
||||||
sleep 5
|
sleep 5
|
||||||
./mc stat minio2/newbucket
|
./mc stat minio2/newbucket
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
@ -211,6 +216,19 @@ if [ $? -eq 0 ]; then
|
|||||||
exit_1;
|
exit_1;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
sleep 10
|
||||||
|
./mc stat minio3/newbucket/lrgfile
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "expected object to be present, exiting.."
|
||||||
|
exit_1;
|
||||||
|
fi
|
||||||
|
actual_checksum=$(./mc cat minio3/newbucket/lrgfile | md5sum)
|
||||||
|
if [ "${expected_checksum}" != "${actual_checksum}" ]; then
|
||||||
|
echo "replication failed on multipart objects expected ${expected_checksum} got ${actual_checksum}"
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
rm ./lrgfile
|
||||||
|
|
||||||
./mc mb --with-lock minio3/newbucket-olock
|
./mc mb --with-lock minio3/newbucket-olock
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
|
@ -69,7 +69,14 @@ type Reader struct {
|
|||||||
// NewReader may try merge the given size, MD5 and SHA256 values
|
// NewReader may try merge the given size, MD5 and SHA256 values
|
||||||
// into src - if src is a Reader - to avoid computing the same
|
// into src - if src is a Reader - to avoid computing the same
|
||||||
// checksums multiple times.
|
// checksums multiple times.
|
||||||
|
// NewReader enforces S3 compatibility strictly by ensuring caller
|
||||||
|
// does not send more content than specified size.
|
||||||
func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64) (*Reader, error) {
|
func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64) (*Reader, error) {
|
||||||
|
// return hard limited reader
|
||||||
|
return newReader(src, size, md5Hex, sha256Hex, actualSize, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64, hardLimitReader bool) (*Reader, error) {
|
||||||
MD5, err := hex.DecodeString(md5Hex)
|
MD5, err := hex.DecodeString(md5Hex)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, BadDigest{ // TODO(aead): Return an error that indicates that an invalid ETag has been specified
|
return nil, BadDigest{ // TODO(aead): Return an error that indicates that an invalid ETag has been specified
|
||||||
@ -110,7 +117,12 @@ func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i
|
|||||||
r.checksum = MD5
|
r.checksum = MD5
|
||||||
r.contentSHA256 = SHA256
|
r.contentSHA256 = SHA256
|
||||||
if r.size < 0 && size >= 0 {
|
if r.size < 0 && size >= 0 {
|
||||||
|
switch hardLimitReader {
|
||||||
|
case true:
|
||||||
r.src = etag.Wrap(ioutil.HardLimitReader(r.src, size), r.src)
|
r.src = etag.Wrap(ioutil.HardLimitReader(r.src, size), r.src)
|
||||||
|
default:
|
||||||
|
r.src = etag.Wrap(io.LimitReader(r.src, size), r.src)
|
||||||
|
}
|
||||||
r.size = size
|
r.size = size
|
||||||
}
|
}
|
||||||
if r.actualSize <= 0 && actualSize >= 0 {
|
if r.actualSize <= 0 && actualSize >= 0 {
|
||||||
@ -120,7 +132,13 @@ func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i
|
|||||||
}
|
}
|
||||||
|
|
||||||
if size >= 0 {
|
if size >= 0 {
|
||||||
r := ioutil.HardLimitReader(src, size)
|
var r io.Reader
|
||||||
|
switch hardLimitReader {
|
||||||
|
case true:
|
||||||
|
r = ioutil.HardLimitReader(src, size)
|
||||||
|
default:
|
||||||
|
r = io.LimitReader(src, size)
|
||||||
|
}
|
||||||
if _, ok := src.(etag.Tagger); !ok {
|
if _, ok := src.(etag.Tagger); !ok {
|
||||||
src = etag.NewReader(r, MD5)
|
src = etag.NewReader(r, MD5)
|
||||||
} else {
|
} else {
|
||||||
@ -143,6 +161,13 @@ func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewLimitReader is similar to NewReader but it will read only up to actualsize specified. It will return
|
||||||
|
// EOF if actualsize is reached.
|
||||||
|
func NewLimitReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64) (*Reader, error) {
|
||||||
|
// return io limited reader
|
||||||
|
return newReader(src, size, md5Hex, sha256Hex, actualSize, false)
|
||||||
|
}
|
||||||
|
|
||||||
// ErrInvalidChecksum is returned when an invalid checksum is provided in headers.
|
// ErrInvalidChecksum is returned when an invalid checksum is provided in headers.
|
||||||
var ErrInvalidChecksum = errors.New("invalid checksum")
|
var ErrInvalidChecksum = errors.New("invalid checksum")
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user