mirror of
https://github.com/minio/minio.git
synced 2024-12-24 22:25:54 -05:00
Return errors in PutObject()/PutObjectPart() if input size is -1. (#5015)
Amazon S3 API expects all incoming stream has a content-length set it was superflous for us to support object layer which supports unknown sized stream as well, this PR removes such requirements and explicitly error out if input stream is less than zero.
This commit is contained in:
parent
d1712a46a7
commit
0b546ddfd4
@ -482,6 +482,11 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
|
|||||||
return pi, toObjectErr(err, bucket)
|
return pi, toObjectErr(err, bucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate input data size and it can never be less than zero.
|
||||||
|
if data.Size() < 0 {
|
||||||
|
return pi, toObjectErr(traceError(errInvalidArgument))
|
||||||
|
}
|
||||||
|
|
||||||
// Hold the lock so that two parallel complete-multipart-uploads
|
// Hold the lock so that two parallel complete-multipart-uploads
|
||||||
// do not leave a stale uploads.json behind.
|
// do not leave a stale uploads.json behind.
|
||||||
objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, pathJoin(bucket, object))
|
objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, pathJoin(bucket, object))
|
||||||
|
@ -537,6 +537,11 @@ func (fs fsObjects) PutObject(bucket string, object string, data *HashReader, me
|
|||||||
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object)
|
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate input data size and it can never be less than zero.
|
||||||
|
if data.Size() < 0 {
|
||||||
|
return ObjectInfo{}, traceError(errInvalidArgument)
|
||||||
|
}
|
||||||
|
|
||||||
// No metadata is set, allocate a new one.
|
// No metadata is set, allocate a new one.
|
||||||
if metadata == nil {
|
if metadata == nil {
|
||||||
metadata = make(map[string]string)
|
metadata = make(map[string]string)
|
||||||
|
@ -577,6 +577,11 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
|
|||||||
return pi, err
|
return pi, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate input data size and it can never be less than zero.
|
||||||
|
if data.Size() < 0 {
|
||||||
|
return pi, toObjectErr(traceError(errInvalidArgument))
|
||||||
|
}
|
||||||
|
|
||||||
var partsMetadata []xlMetaV1
|
var partsMetadata []xlMetaV1
|
||||||
var errs []error
|
var errs []error
|
||||||
uploadIDPath := pathJoin(bucket, object, uploadID)
|
uploadIDPath := pathJoin(bucket, object, uploadID)
|
||||||
@ -586,6 +591,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
|
|||||||
if err := preUploadIDLock.GetRLock(globalOperationTimeout); err != nil {
|
if err := preUploadIDLock.GetRLock(globalOperationTimeout); err != nil {
|
||||||
return pi, err
|
return pi, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validates if upload ID exists.
|
// Validates if upload ID exists.
|
||||||
if !xl.isUploadIDExists(bucket, object, uploadID) {
|
if !xl.isUploadIDExists(bucket, object, uploadID) {
|
||||||
preUploadIDLock.RUnlock()
|
preUploadIDLock.RUnlock()
|
||||||
|
@ -443,6 +443,11 @@ func (xl xlObjects) PutObject(bucket string, object string, data *HashReader, me
|
|||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate input data size and it can never be less than zero.
|
||||||
|
if data.Size() < 0 {
|
||||||
|
return ObjectInfo{}, toObjectErr(traceError(errInvalidArgument))
|
||||||
|
}
|
||||||
|
|
||||||
// Check if an object is present as one of the parent dir.
|
// Check if an object is present as one of the parent dir.
|
||||||
// -- FIXME. (needs a new kind of lock).
|
// -- FIXME. (needs a new kind of lock).
|
||||||
// -- FIXME (this also causes performance issue when disks are down).
|
// -- FIXME (this also causes performance issue when disks are down).
|
||||||
@ -504,7 +509,10 @@ func (xl xlObjects) PutObject(bucket string, object string, data *HashReader, me
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
buffer := make([]byte, partsMetadata[0].Erasure.BlockSize, 2*partsMetadata[0].Erasure.BlockSize) // alloc additional space for parity blocks created while erasure coding
|
|
||||||
|
// Alloc additional space for parity blocks created while erasure codinga
|
||||||
|
buffer := make([]byte, partsMetadata[0].Erasure.BlockSize, 2*partsMetadata[0].Erasure.BlockSize)
|
||||||
|
|
||||||
// Read data and split into parts - similar to multipart mechanism
|
// Read data and split into parts - similar to multipart mechanism
|
||||||
for partIdx := 1; ; partIdx++ {
|
for partIdx := 1; ; partIdx++ {
|
||||||
// Compute part name
|
// Compute part name
|
||||||
@ -512,10 +520,10 @@ func (xl xlObjects) PutObject(bucket string, object string, data *HashReader, me
|
|||||||
// Compute the path of current part
|
// Compute the path of current part
|
||||||
tempErasureObj := pathJoin(uniqueID, partName)
|
tempErasureObj := pathJoin(uniqueID, partName)
|
||||||
|
|
||||||
// Calculate the size of the current part, if size is unknown, curPartSize wil be unknown too.
|
// Calculate the size of the current part. AllowEmptyPart will always be true
|
||||||
// allowEmptyPart will always be true if this is the first part and false otherwise.
|
// if this is the first part and false otherwise.
|
||||||
var curPartSize int64
|
var curPartSize int64
|
||||||
curPartSize, err = getPartSizeFromIdx(data.Size(), globalPutPartSize, partIdx)
|
curPartSize, err = calculatePartSizeFromIdx(data.Size(), globalPutPartSize, partIdx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
@ -529,7 +537,8 @@ func (xl xlObjects) PutObject(bucket string, object string, data *HashReader, me
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
file, erasureErr := storage.CreateFile(io.LimitReader(reader, globalPutPartSize), minioMetaTmpBucket, tempErasureObj, buffer, DefaultBitrotAlgorithm, xl.writeQuorum)
|
file, erasureErr := storage.CreateFile(io.LimitReader(reader, curPartSize), minioMetaTmpBucket,
|
||||||
|
tempErasureObj, buffer, DefaultBitrotAlgorithm, xl.writeQuorum)
|
||||||
if erasureErr != nil {
|
if erasureErr != nil {
|
||||||
return ObjectInfo{}, toObjectErr(erasureErr, minioMetaTmpBucket, tempErasureObj)
|
return ObjectInfo{}, toObjectErr(erasureErr, minioMetaTmpBucket, tempErasureObj)
|
||||||
}
|
}
|
||||||
@ -561,7 +570,7 @@ func (xl xlObjects) PutObject(bucket string, object string, data *HashReader, me
|
|||||||
|
|
||||||
// Check part size for the next index.
|
// Check part size for the next index.
|
||||||
var partSize int64
|
var partSize int64
|
||||||
partSize, err = getPartSizeFromIdx(data.Size(), globalPutPartSize, partIdx+1)
|
partSize, err = calculatePartSizeFromIdx(data.Size(), globalPutPartSize, partIdx+1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
@ -570,7 +579,7 @@ func (xl xlObjects) PutObject(bucket string, object string, data *HashReader, me
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if size := data.Size(); size > 0 && sizeWritten < data.Size() {
|
if sizeWritten < data.Size() {
|
||||||
return ObjectInfo{}, traceError(IncompleteBody{})
|
return ObjectInfo{}, traceError(IncompleteBody{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -382,35 +382,37 @@ func evalDisks(disks []StorageAPI, errs []error) []StorageAPI {
|
|||||||
return newDisks
|
return newDisks
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errors specifically generated by getPartSizeFromIdx function.
|
// Errors specifically generated by calculatePartSizeFromIdx function.
|
||||||
var (
|
var (
|
||||||
errPartSizeZero = errors.New("Part size cannot be zero")
|
errPartSizeZero = errors.New("Part size cannot be zero")
|
||||||
errPartSizeIndex = errors.New("Part index cannot be smaller than 1")
|
errPartSizeIndex = errors.New("Part index cannot be smaller than 1")
|
||||||
)
|
)
|
||||||
|
|
||||||
// getPartSizeFromIdx predicts the part size according to its index. It also
|
// calculatePartSizeFromIdx calculates the part size according to input index.
|
||||||
// returns -1 when totalSize is -1.
|
// returns error if totalSize is -1, partSize is 0, partIndex is 0.
|
||||||
func getPartSizeFromIdx(totalSize int64, partSize int64, partIndex int) (int64, error) {
|
func calculatePartSizeFromIdx(totalSize int64, partSize int64, partIndex int) (currPartSize int64, err error) {
|
||||||
|
if totalSize < 0 {
|
||||||
|
return 0, traceError(errInvalidArgument)
|
||||||
|
}
|
||||||
if partSize == 0 {
|
if partSize == 0 {
|
||||||
return 0, traceError(errPartSizeZero)
|
return 0, traceError(errPartSizeZero)
|
||||||
}
|
}
|
||||||
if partIndex < 1 {
|
if partIndex < 1 {
|
||||||
return 0, traceError(errPartSizeIndex)
|
return 0, traceError(errPartSizeIndex)
|
||||||
}
|
}
|
||||||
switch totalSize {
|
if totalSize > 0 {
|
||||||
case -1, 0:
|
// Compute the total count of parts
|
||||||
return totalSize, nil
|
partsCount := totalSize/partSize + 1
|
||||||
}
|
// Return the part's size
|
||||||
// Compute the total count of parts
|
switch {
|
||||||
partsCount := totalSize/partSize + 1
|
case int64(partIndex) < partsCount:
|
||||||
// Return the part's size
|
currPartSize = partSize
|
||||||
switch {
|
case int64(partIndex) == partsCount:
|
||||||
case int64(partIndex) < partsCount:
|
// Size of last part
|
||||||
return partSize, nil
|
currPartSize = totalSize % partSize
|
||||||
case int64(partIndex) == partsCount:
|
default:
|
||||||
// Size of last part
|
currPartSize = 0
|
||||||
return totalSize % partSize, nil
|
}
|
||||||
default:
|
|
||||||
return 0, nil
|
|
||||||
}
|
}
|
||||||
|
return currPartSize, nil
|
||||||
}
|
}
|
||||||
|
@ -340,8 +340,6 @@ func TestGetPartSizeFromIdx(t *testing.T) {
|
|||||||
partIndex int
|
partIndex int
|
||||||
expectedSize int64
|
expectedSize int64
|
||||||
}{
|
}{
|
||||||
// Total size is - 1
|
|
||||||
{-1, 10, 1, -1},
|
|
||||||
// Total size is zero
|
// Total size is zero
|
||||||
{0, 10, 1, 0},
|
{0, 10, 1, 0},
|
||||||
// part size 2MiB, total size 4MiB
|
// part size 2MiB, total size 4MiB
|
||||||
@ -356,7 +354,7 @@ func TestGetPartSizeFromIdx(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
s, err := getPartSizeFromIdx(testCase.totalSize, testCase.partSize, testCase.partIndex)
|
s, err := calculatePartSizeFromIdx(testCase.totalSize, testCase.partSize, testCase.partIndex)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Test %d: Expected to pass but failed. %s", i+1, err)
|
t.Errorf("Test %d: Expected to pass but failed. %s", i+1, err)
|
||||||
}
|
}
|
||||||
@ -371,13 +369,16 @@ func TestGetPartSizeFromIdx(t *testing.T) {
|
|||||||
partIndex int
|
partIndex int
|
||||||
err error
|
err error
|
||||||
}{
|
}{
|
||||||
// partSize is 0, error.
|
// partSize is 0, returns error.
|
||||||
{10, 0, 1, errPartSizeZero},
|
{10, 0, 1, errPartSizeZero},
|
||||||
|
// partIndex is 0, returns error.
|
||||||
{10, 1, 0, errPartSizeIndex},
|
{10, 1, 0, errPartSizeIndex},
|
||||||
|
// Total size is -1, returns error.
|
||||||
|
{-1, 10, 1, errInvalidArgument},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, testCaseFailure := range testCasesFailure {
|
for i, testCaseFailure := range testCasesFailure {
|
||||||
_, err := getPartSizeFromIdx(testCaseFailure.totalSize, testCaseFailure.partSize, testCaseFailure.partIndex)
|
_, err := calculatePartSizeFromIdx(testCaseFailure.totalSize, testCaseFailure.partSize, testCaseFailure.partIndex)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("Test %d: Expected to failed but passed. %s", i+1, err)
|
t.Errorf("Test %d: Expected to failed but passed. %s", i+1, err)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user