Return errors in PutObject()/PutObjectPart() if input size is -1. (#5015)

Amazon S3 API expects all incoming stream has a content-length
set it was superflous for us to support object layer which supports
unknown sized stream as well, this PR removes such requirements
and explicitly error out if input stream is less than zero.
This commit is contained in:
Harshavardhana 2017-10-06 09:38:01 -07:00 committed by Dee Koder
parent d1712a46a7
commit 0b546ddfd4
6 changed files with 59 additions and 31 deletions

View File

@ -482,6 +482,11 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
return pi, toObjectErr(err, bucket)
}
// Validate input data size and it can never be less than zero.
if data.Size() < 0 {
return pi, toObjectErr(traceError(errInvalidArgument))
}
// Hold the lock so that two parallel complete-multipart-uploads
// do not leave a stale uploads.json behind.
objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, pathJoin(bucket, object))

View File

@ -537,6 +537,11 @@ func (fs fsObjects) PutObject(bucket string, object string, data *HashReader, me
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object)
}
// Validate input data size and it can never be less than zero.
if data.Size() < 0 {
return ObjectInfo{}, traceError(errInvalidArgument)
}
// No metadata is set, allocate a new one.
if metadata == nil {
metadata = make(map[string]string)

View File

@ -577,6 +577,11 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
return pi, err
}
// Validate input data size and it can never be less than zero.
if data.Size() < 0 {
return pi, toObjectErr(traceError(errInvalidArgument))
}
var partsMetadata []xlMetaV1
var errs []error
uploadIDPath := pathJoin(bucket, object, uploadID)
@ -586,6 +591,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
if err := preUploadIDLock.GetRLock(globalOperationTimeout); err != nil {
return pi, err
}
// Validates if upload ID exists.
if !xl.isUploadIDExists(bucket, object, uploadID) {
preUploadIDLock.RUnlock()

View File

@ -443,6 +443,11 @@ func (xl xlObjects) PutObject(bucket string, object string, data *HashReader, me
return ObjectInfo{}, err
}
// Validate input data size and it can never be less than zero.
if data.Size() < 0 {
return ObjectInfo{}, toObjectErr(traceError(errInvalidArgument))
}
// Check if an object is present as one of the parent dir.
// -- FIXME. (needs a new kind of lock).
// -- FIXME (this also causes performance issue when disks are down).
@ -504,7 +509,10 @@ func (xl xlObjects) PutObject(bucket string, object string, data *HashReader, me
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
buffer := make([]byte, partsMetadata[0].Erasure.BlockSize, 2*partsMetadata[0].Erasure.BlockSize) // alloc additional space for parity blocks created while erasure coding
// Alloc additional space for parity blocks created while erasure codinga
buffer := make([]byte, partsMetadata[0].Erasure.BlockSize, 2*partsMetadata[0].Erasure.BlockSize)
// Read data and split into parts - similar to multipart mechanism
for partIdx := 1; ; partIdx++ {
// Compute part name
@ -512,10 +520,10 @@ func (xl xlObjects) PutObject(bucket string, object string, data *HashReader, me
// Compute the path of current part
tempErasureObj := pathJoin(uniqueID, partName)
// Calculate the size of the current part, if size is unknown, curPartSize wil be unknown too.
// allowEmptyPart will always be true if this is the first part and false otherwise.
// Calculate the size of the current part. AllowEmptyPart will always be true
// if this is the first part and false otherwise.
var curPartSize int64
curPartSize, err = getPartSizeFromIdx(data.Size(), globalPutPartSize, partIdx)
curPartSize, err = calculatePartSizeFromIdx(data.Size(), globalPutPartSize, partIdx)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
@ -529,7 +537,8 @@ func (xl xlObjects) PutObject(bucket string, object string, data *HashReader, me
}
}
file, erasureErr := storage.CreateFile(io.LimitReader(reader, globalPutPartSize), minioMetaTmpBucket, tempErasureObj, buffer, DefaultBitrotAlgorithm, xl.writeQuorum)
file, erasureErr := storage.CreateFile(io.LimitReader(reader, curPartSize), minioMetaTmpBucket,
tempErasureObj, buffer, DefaultBitrotAlgorithm, xl.writeQuorum)
if erasureErr != nil {
return ObjectInfo{}, toObjectErr(erasureErr, minioMetaTmpBucket, tempErasureObj)
}
@ -561,7 +570,7 @@ func (xl xlObjects) PutObject(bucket string, object string, data *HashReader, me
// Check part size for the next index.
var partSize int64
partSize, err = getPartSizeFromIdx(data.Size(), globalPutPartSize, partIdx+1)
partSize, err = calculatePartSizeFromIdx(data.Size(), globalPutPartSize, partIdx+1)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
@ -570,7 +579,7 @@ func (xl xlObjects) PutObject(bucket string, object string, data *HashReader, me
}
}
if size := data.Size(); size > 0 && sizeWritten < data.Size() {
if sizeWritten < data.Size() {
return ObjectInfo{}, traceError(IncompleteBody{})
}

View File

@ -382,35 +382,37 @@ func evalDisks(disks []StorageAPI, errs []error) []StorageAPI {
return newDisks
}
// Errors specifically generated by getPartSizeFromIdx function.
// Errors specifically generated by calculatePartSizeFromIdx function.
var (
errPartSizeZero = errors.New("Part size cannot be zero")
errPartSizeIndex = errors.New("Part index cannot be smaller than 1")
)
// getPartSizeFromIdx predicts the part size according to its index. It also
// returns -1 when totalSize is -1.
func getPartSizeFromIdx(totalSize int64, partSize int64, partIndex int) (int64, error) {
// calculatePartSizeFromIdx calculates the part size according to input index.
// returns error if totalSize is -1, partSize is 0, partIndex is 0.
func calculatePartSizeFromIdx(totalSize int64, partSize int64, partIndex int) (currPartSize int64, err error) {
if totalSize < 0 {
return 0, traceError(errInvalidArgument)
}
if partSize == 0 {
return 0, traceError(errPartSizeZero)
}
if partIndex < 1 {
return 0, traceError(errPartSizeIndex)
}
switch totalSize {
case -1, 0:
return totalSize, nil
}
// Compute the total count of parts
partsCount := totalSize/partSize + 1
// Return the part's size
switch {
case int64(partIndex) < partsCount:
return partSize, nil
case int64(partIndex) == partsCount:
// Size of last part
return totalSize % partSize, nil
default:
return 0, nil
if totalSize > 0 {
// Compute the total count of parts
partsCount := totalSize/partSize + 1
// Return the part's size
switch {
case int64(partIndex) < partsCount:
currPartSize = partSize
case int64(partIndex) == partsCount:
// Size of last part
currPartSize = totalSize % partSize
default:
currPartSize = 0
}
}
return currPartSize, nil
}

View File

@ -340,8 +340,6 @@ func TestGetPartSizeFromIdx(t *testing.T) {
partIndex int
expectedSize int64
}{
// Total size is - 1
{-1, 10, 1, -1},
// Total size is zero
{0, 10, 1, 0},
// part size 2MiB, total size 4MiB
@ -356,7 +354,7 @@ func TestGetPartSizeFromIdx(t *testing.T) {
}
for i, testCase := range testCases {
s, err := getPartSizeFromIdx(testCase.totalSize, testCase.partSize, testCase.partIndex)
s, err := calculatePartSizeFromIdx(testCase.totalSize, testCase.partSize, testCase.partIndex)
if err != nil {
t.Errorf("Test %d: Expected to pass but failed. %s", i+1, err)
}
@ -371,13 +369,16 @@ func TestGetPartSizeFromIdx(t *testing.T) {
partIndex int
err error
}{
// partSize is 0, error.
// partSize is 0, returns error.
{10, 0, 1, errPartSizeZero},
// partIndex is 0, returns error.
{10, 1, 0, errPartSizeIndex},
// Total size is -1, returns error.
{-1, 10, 1, errInvalidArgument},
}
for i, testCaseFailure := range testCasesFailure {
_, err := getPartSizeFromIdx(testCaseFailure.totalSize, testCaseFailure.partSize, testCaseFailure.partIndex)
_, err := calculatePartSizeFromIdx(testCaseFailure.totalSize, testCaseFailure.partSize, testCaseFailure.partIndex)
if err == nil {
t.Errorf("Test %d: Expected to failed but passed. %s", i+1, err)
}