Simplify data verification with HashReader. (#5071)

Verify() was being called by caller after the data
has been successfully read after io.EOF. This disconnection
opens a race under concurrent access to such an object.
Verification is not necessary outside of Read() call,
we can simply just do checksum verification right inside
Read() call at io.EOF.

This approach simplifies the usage.
This commit is contained in:
Harshavardhana
2017-10-21 22:30:34 -07:00
committed by Nitish Tiwari
parent 65a817fe8c
commit 1d8a8c63db
51 changed files with 749 additions and 499 deletions

View File

@@ -17,7 +17,6 @@
package cmd
import (
"encoding/hex"
"fmt"
"io"
"os"
@@ -25,6 +24,7 @@ import (
"strings"
"time"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/lock"
)
@@ -458,7 +458,12 @@ func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
}()
partInfo, err := fs.PutObjectPart(dstBucket, dstObject, uploadID, partID, NewHashReader(pipeReader, length, "", ""))
hashReader, err := hash.NewReader(pipeReader, length, "", "")
if err != nil {
return pi, toObjectErr(err, dstBucket, dstObject)
}
partInfo, err := fs.PutObjectPart(dstBucket, dstObject, uploadID, partID, hashReader)
if err != nil {
return pi, toObjectErr(err, dstBucket, dstObject)
}
@@ -473,7 +478,7 @@ func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
// an ongoing multipart transaction. Internally incoming data is
// written to '.minio.sys/tmp' location and safely renamed to
// '.minio.sys/multipart' for reach parts.
func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *HashReader) (pi PartInfo, e error) {
func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) {
if err := checkPutObjectPartArgs(bucket, object, fs); err != nil {
return pi, err
}
@@ -552,10 +557,6 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
// delete.
defer fsRemoveFile(fsPartPath)
if err = data.Verify(); err != nil {
return pi, toObjectErr(err, minioMetaTmpBucket, tmpPartPath)
}
partPath := pathJoin(bucket, object, uploadID, partSuffix)
// Lock the part so that another part upload with same part-number gets blocked
// while the part is getting appended in the background.
@@ -570,9 +571,10 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
return pi, toObjectErr(err, minioMetaMultipartBucket, partPath)
}
md5hex := data.MD5HexString()
// Save the object part info in `fs.json`.
md5Hex := hex.EncodeToString(data.MD5())
fsMeta.AddObjectPart(partID, partSuffix, md5Hex, data.Size())
fsMeta.AddObjectPart(partID, partSuffix, md5hex, data.Size())
if _, err = fsMeta.WriteTo(rwlk); err != nil {
partLock.Unlock()
return pi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
@@ -598,7 +600,7 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
return PartInfo{
PartNumber: partID,
LastModified: fi.ModTime(),
ETag: md5Hex,
ETag: md5hex,
Size: fi.Size(),
}, nil
}