mirror of
https://github.com/minio/minio.git
synced 2025-02-03 18:06:00 -05:00
XL: CompleteMultipart should ignore last part is 0bytes. (#1931)
Fixes #1917
This commit is contained in:
parent
1ea1dba528
commit
e1aad066c6
@ -39,20 +39,34 @@ func erasureCreateFile(disks []StorageAPI, volume string, path string, partName
|
|||||||
// Read until io.EOF, erasure codes data and writes to all disks.
|
// Read until io.EOF, erasure codes data and writes to all disks.
|
||||||
for {
|
for {
|
||||||
var n int
|
var n int
|
||||||
|
var blocks [][]byte
|
||||||
n, err = io.ReadFull(data, buf)
|
n, err = io.ReadFull(data, buf)
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
|
// We have reached EOF on the first byte read, io.Reader
|
||||||
|
// must be 0bytes, we don't need to erasure code
|
||||||
|
// data. Will create a 0byte file instead.
|
||||||
|
if size == 0 {
|
||||||
|
blocks = make([][]byte, len(disks))
|
||||||
|
err = appendFile(disks, volume, path, blocks, eInfo.Distribution, hashWriters, writeQuorum)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
} // else we have reached EOF after few reads, no need to
|
||||||
|
// add an additional 0bytes at the end.
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err != nil && err != io.ErrUnexpectedEOF {
|
if err != nil && err != io.ErrUnexpectedEOF {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
size += int64(n)
|
size += int64(n)
|
||||||
var blocks [][]byte
|
|
||||||
// Returns encoded blocks.
|
// Returns encoded blocks.
|
||||||
blocks, err = encodeData(buf[:n], eInfo.DataBlocks, eInfo.ParityBlocks)
|
var enErr error
|
||||||
if err != nil {
|
blocks, enErr = encodeData(buf[:n], eInfo.DataBlocks, eInfo.ParityBlocks)
|
||||||
return nil, 0, err
|
if enErr != nil {
|
||||||
|
return nil, 0, enErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Write to all disks.
|
||||||
err = appendFile(disks, volume, path, blocks, eInfo.Distribution, hashWriters, writeQuorum)
|
err = appendFile(disks, volume, path, blocks, eInfo.Distribution, hashWriters, writeQuorum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
|
@ -171,11 +171,6 @@ func (m xlMetaV1) ObjectToPartOffset(offset int64) (partIndex int, partOffset in
|
|||||||
// Seek until object offset maps to a particular part offset.
|
// Seek until object offset maps to a particular part offset.
|
||||||
for i, part := range m.Parts {
|
for i, part := range m.Parts {
|
||||||
partIndex = i
|
partIndex = i
|
||||||
// Last part can be of '0' bytes, treat it specially and
|
|
||||||
// return right here.
|
|
||||||
if part.Size == 0 {
|
|
||||||
return partIndex, partOffset, nil
|
|
||||||
}
|
|
||||||
// Offset is smaller than size we have reached the proper part offset.
|
// Offset is smaller than size we have reached the proper part offset.
|
||||||
if partOffset < part.Size {
|
if partOffset < part.Size {
|
||||||
return partIndex, partOffset, nil
|
return partIndex, partOffset, nil
|
||||||
|
@ -579,17 +579,28 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
|
|||||||
// Validate each part and then commit to disk.
|
// Validate each part and then commit to disk.
|
||||||
for i, part := range parts {
|
for i, part := range parts {
|
||||||
partIdx := currentXLMeta.ObjectPartIndex(part.PartNumber)
|
partIdx := currentXLMeta.ObjectPartIndex(part.PartNumber)
|
||||||
|
// All parts should have same part number.
|
||||||
if partIdx == -1 {
|
if partIdx == -1 {
|
||||||
return "", InvalidPart{}
|
return "", InvalidPart{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// All parts should have same ETag as previously generated.
|
||||||
if currentXLMeta.Parts[partIdx].ETag != part.ETag {
|
if currentXLMeta.Parts[partIdx].ETag != part.ETag {
|
||||||
return "", BadDigest{}
|
return "", BadDigest{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// All parts except the last part has to be atleast 5MB.
|
// All parts except the last part has to be atleast 5MB.
|
||||||
if (i < len(parts)-1) && !isMinAllowedPartSize(currentXLMeta.Parts[partIdx].Size) {
|
if (i < len(parts)-1) && !isMinAllowedPartSize(currentXLMeta.Parts[partIdx].Size) {
|
||||||
return "", PartTooSmall{}
|
return "", PartTooSmall{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Last part could have been uploaded as 0bytes, do not need
|
||||||
|
// to save it in final `xl.json`.
|
||||||
|
if (i == len(parts)-1) && currentXLMeta.Parts[partIdx].Size == 0 {
|
||||||
|
xlMeta.Parts = xlMeta.Parts[:i] // Skip the part.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// Save for total object size.
|
// Save for total object size.
|
||||||
objectSize += currentXLMeta.Parts[partIdx].Size
|
objectSize += currentXLMeta.Parts[partIdx].Size
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user