Probe revamped to provide for a new WrappedError struct to wrap probes as error interface

This convenience was necessary to be used for golang library functions like io.Copy and io.Pipe
where we shouldn't be writing proxies and alternatives returning *probe.Error

This change also brings more changes across code base for clear separation regarding where an error
interface should be passed encapsulating *probe.Error and where it should be used as is.
This commit is contained in:
Harshavardhana
2015-08-07 23:47:22 -07:00
parent 28d9565400
commit 45b59b8456
34 changed files with 392 additions and 363 deletions

View File

@@ -54,7 +54,7 @@ type bucket struct {
// newBucket - instantiate a new bucket
func newBucket(bucketName, aclType, donutName string, nodes map[string]node) (bucket, BucketMetadata, *probe.Error) {
if strings.TrimSpace(bucketName) == "" || strings.TrimSpace(donutName) == "" {
return bucket{}, BucketMetadata{}, probe.New(InvalidArgument{})
return bucket{}, BucketMetadata{}, probe.NewError(InvalidArgument{})
}
b := bucket{}
@@ -128,7 +128,7 @@ func (b bucket) getBucketMetadata() (*AllBuckets, *probe.Error) {
return metadata, nil
}
}
return nil, probe.New(err)
return nil, probe.NewError(err)
}
// GetObjectMetadata - get metadata for an object
@@ -223,7 +223,7 @@ func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64,
}
// check if object exists
if _, ok := bucketMetadata.Buckets[b.getBucketName()].BucketObjects[objectName]; !ok {
return nil, 0, probe.New(ObjectNotFound{Object: objectName})
return nil, 0, probe.NewError(ObjectNotFound{Object: objectName})
}
objMetadata, err := b.readObjectMetadata(normalizeObjectName(objectName))
if err != nil {
@@ -239,7 +239,7 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64,
b.lock.Lock()
defer b.lock.Unlock()
if objectName == "" || objectData == nil {
return ObjectMetadata{}, probe.New(InvalidArgument{})
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
writers, err := b.getObjectWriters(normalizeObjectName(objectName), "data")
if err != nil {
@@ -266,7 +266,7 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64,
totalLength, err := io.Copy(mw, objectData)
if err != nil {
CleanupWritersOnError(writers)
return ObjectMetadata{}, probe.New(err)
return ObjectMetadata{}, probe.NewError(err)
}
objMetadata.Size = totalLength
case false:
@@ -306,7 +306,7 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64,
//
// Signature mismatch occurred all temp files to be removed and all data purged.
CleanupWritersOnError(writers)
return ObjectMetadata{}, probe.New(SignatureDoesNotMatch{})
return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{})
}
}
objMetadata.MD5Sum = hex.EncodeToString(dataMD5sum)
@@ -337,24 +337,24 @@ func (b bucket) isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) *probe.Error
if strings.TrimSpace(expectedMD5Sum) != "" && strings.TrimSpace(actualMD5Sum) != "" {
expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum)
if err != nil {
return probe.New(err)
return probe.NewError(err)
}
actualMD5SumBytes, err := hex.DecodeString(actualMD5Sum)
if err != nil {
return probe.New(err)
return probe.NewError(err)
}
if !bytes.Equal(expectedMD5SumBytes, actualMD5SumBytes) {
return probe.New(BadDigest{})
return probe.NewError(BadDigest{})
}
return nil
}
return probe.New(InvalidArgument{})
return probe.NewError(InvalidArgument{})
}
// writeObjectMetadata - write additional object metadata
func (b bucket) writeObjectMetadata(objectName string, objMetadata ObjectMetadata) *probe.Error {
if objMetadata.Object == "" {
return probe.New(InvalidArgument{})
return probe.NewError(InvalidArgument{})
}
objMetadataWriters, err := b.getObjectWriters(objectName, objectMetadataConfig)
if err != nil {
@@ -365,7 +365,7 @@ func (b bucket) writeObjectMetadata(objectName string, objMetadata ObjectMetadat
if err := jenc.Encode(&objMetadata); err != nil {
// Close writers and purge all temporary entries
CleanupWritersOnError(objMetadataWriters)
return probe.New(err)
return probe.NewError(err)
}
}
for _, objMetadataWriter := range objMetadataWriters {
@@ -377,7 +377,7 @@ func (b bucket) writeObjectMetadata(objectName string, objMetadata ObjectMetadat
// readObjectMetadata - read object metadata
func (b bucket) readObjectMetadata(objectName string) (ObjectMetadata, *probe.Error) {
if objectName == "" {
return ObjectMetadata{}, probe.New(InvalidArgument{})
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
objMetadata := ObjectMetadata{}
objMetadataReaders, err := b.getObjectReaders(objectName, objectMetadataConfig)
@@ -395,7 +395,7 @@ func (b bucket) readObjectMetadata(objectName string) (ObjectMetadata, *probe.Er
return objMetadata, nil
}
}
return ObjectMetadata{}, probe.New(err)
return ObjectMetadata{}, probe.NewError(err)
}
}
@@ -415,12 +415,12 @@ func normalizeObjectName(objectName string) string {
// getDataAndParity - calculate k, m (data and parity) values from number of disks
func (b bucket) getDataAndParity(totalWriters int) (k uint8, m uint8, err *probe.Error) {
if totalWriters <= 1 {
return 0, 0, probe.New(InvalidArgument{})
return 0, 0, probe.NewError(InvalidArgument{})
}
quotient := totalWriters / 2 // not using float or abs to let integer round off to lower value
// quotient cannot be bigger than (255 / 2) = 127
if quotient > 127 {
return 0, 0, probe.New(ParityOverflow{})
return 0, 0, probe.NewError(ParityOverflow{})
}
remainder := totalWriters % 2 // will be 1 for odd and 0 for even numbers
k = uint8(quotient + remainder)
@@ -450,7 +450,7 @@ func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData
return 0, 0, err.Trace()
}
if _, err := writer.Write(inputData); err != nil {
return 0, 0, probe.New(err)
return 0, 0, probe.NewError(err)
}
for blockIndex, block := range encodedBlocks {
errCh := make(chan error, 1)
@@ -461,7 +461,7 @@ func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData
}(writers[blockIndex], bytes.NewReader(block), errCh)
if err := <-errCh; err != nil {
// Returning error is fine here CleanupErrors() would cleanup writers
return 0, 0, probe.New(err)
return 0, 0, probe.NewError(err)
}
}
chunkCount = chunkCount + 1
@@ -473,7 +473,7 @@ func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData
func (b bucket) readObjectData(objectName string, writer *io.PipeWriter, objMetadata ObjectMetadata) {
readers, err := b.getObjectReaders(objectName, "data")
if err != nil {
writer.CloseWithError(err.Trace())
writer.CloseWithError(probe.NewWrappedError(err))
return
}
for _, reader := range readers {
@@ -484,12 +484,12 @@ func (b bucket) readObjectData(objectName string, writer *io.PipeWriter, objMeta
var err error
expectedMd5sum, err = hex.DecodeString(objMetadata.MD5Sum)
if err != nil {
writer.CloseWithError(probe.New(err))
writer.CloseWithError(probe.NewWrappedError(probe.NewError(err)))
return
}
expected512Sum, err = hex.DecodeString(objMetadata.SHA512Sum)
if err != nil {
writer.CloseWithError(probe.New(err))
writer.CloseWithError(probe.NewWrappedError(probe.NewError(err)))
return
}
}
@@ -499,23 +499,23 @@ func (b bucket) readObjectData(objectName string, writer *io.PipeWriter, objMeta
switch len(readers) > 1 {
case true:
if objMetadata.ErasureTechnique == "" {
writer.CloseWithError(probe.New(MissingErasureTechnique{}))
writer.CloseWithError(probe.NewWrappedError(probe.NewError(MissingErasureTechnique{})))
return
}
encoder, err := newEncoder(objMetadata.DataDisks, objMetadata.ParityDisks, objMetadata.ErasureTechnique)
if err != nil {
writer.CloseWithError(err.Trace())
writer.CloseWithError(probe.NewWrappedError(err))
return
}
totalLeft := objMetadata.Size
for i := 0; i < objMetadata.ChunkCount; i++ {
decodedData, err := b.decodeEncodedData(totalLeft, int64(objMetadata.BlockSize), readers, encoder, writer)
if err != nil {
writer.CloseWithError(err.Trace())
writer.CloseWithError(probe.NewWrappedError(err))
return
}
if _, err := io.Copy(mwriter, bytes.NewReader(decodedData)); err != nil {
writer.CloseWithError(probe.New(err))
writer.CloseWithError(probe.NewWrappedError(probe.NewError(err)))
return
}
totalLeft = totalLeft - int64(objMetadata.BlockSize)
@@ -523,17 +523,17 @@ func (b bucket) readObjectData(objectName string, writer *io.PipeWriter, objMeta
case false:
_, err := io.Copy(writer, readers[0])
if err != nil {
writer.CloseWithError(probe.New(err))
writer.CloseWithError(probe.NewWrappedError(probe.NewError(err)))
return
}
}
// check if decodedData md5sum matches
if !bytes.Equal(expectedMd5sum, hasher.Sum(nil)) {
writer.CloseWithError(probe.New(ChecksumMismatch{}))
writer.CloseWithError(probe.NewWrappedError(probe.NewError(ChecksumMismatch{})))
return
}
if !bytes.Equal(expected512Sum, sum512hasher.Sum(nil)) {
writer.CloseWithError(probe.New(ChecksumMismatch{}))
writer.CloseWithError(probe.NewWrappedError(probe.NewError(ChecksumMismatch{})))
return
}
writer.Close()
@@ -557,7 +557,7 @@ func (b bucket) decodeEncodedData(totalLeft, blockSize int64, readers map[int]io
var bytesBuffer bytes.Buffer
_, err := io.CopyN(&bytesBuffer, reader, int64(curChunkSize))
if err != nil {
return nil, probe.New(err)
return nil, probe.NewError(err)
}
encodedBytes[i] = bytesBuffer.Bytes()
}

View File

@@ -31,7 +31,7 @@ func getDonutConfigPath() (string, *probe.Error) {
}
u, err := user.Current()
if err != nil {
return "", probe.New(err)
return "", probe.NewError(err)
}
donutConfigPath := filepath.Join(u.HomeDir, ".minio", "donut.json")
return donutConfigPath, nil

View File

@@ -39,20 +39,20 @@ type Disk struct {
// New - instantiate new disk
func New(diskPath string) (Disk, *probe.Error) {
if diskPath == "" {
return Disk{}, probe.New(InvalidArgument{})
return Disk{}, probe.NewError(InvalidArgument{})
}
st, err := os.Stat(diskPath)
if err != nil {
return Disk{}, probe.New(err)
return Disk{}, probe.NewError(err)
}
if !st.IsDir() {
return Disk{}, probe.New(syscall.ENOTDIR)
return Disk{}, probe.NewError(syscall.ENOTDIR)
}
s := syscall.Statfs_t{}
err = syscall.Statfs(diskPath, &s)
if err != nil {
return Disk{}, probe.New(err)
return Disk{}, probe.NewError(err)
}
disk := Disk{
lock: &sync.Mutex{},
@@ -64,7 +64,7 @@ func New(diskPath string) (Disk, *probe.Error) {
disk.fsInfo["MountPoint"] = disk.path
return disk, nil
}
return Disk{}, probe.New(UnsupportedFilesystem{Type: strconv.FormatInt(int64(s.Type), 10)})
return Disk{}, probe.NewError(UnsupportedFilesystem{Type: strconv.FormatInt(int64(s.Type), 10)})
}
// IsUsable - is disk usable, alive
@@ -103,7 +103,7 @@ func (disk Disk) MakeDir(dirname string) *probe.Error {
disk.lock.Lock()
defer disk.lock.Unlock()
if err := os.MkdirAll(filepath.Join(disk.path, dirname), 0700); err != nil {
return probe.New(err)
return probe.NewError(err)
}
return nil
}
@@ -115,12 +115,12 @@ func (disk Disk) ListDir(dirname string) ([]os.FileInfo, *probe.Error) {
dir, err := os.Open(filepath.Join(disk.path, dirname))
if err != nil {
return nil, probe.New(err)
return nil, probe.NewError(err)
}
defer dir.Close()
contents, err := dir.Readdir(-1)
if err != nil {
return nil, probe.New(err)
return nil, probe.NewError(err)
}
var directories []os.FileInfo
for _, content := range contents {
@@ -139,12 +139,12 @@ func (disk Disk) ListFiles(dirname string) ([]os.FileInfo, *probe.Error) {
dir, err := os.Open(filepath.Join(disk.path, dirname))
if err != nil {
return nil, probe.New(err)
return nil, probe.NewError(err)
}
defer dir.Close()
contents, err := dir.Readdir(-1)
if err != nil {
return nil, probe.New(err)
return nil, probe.NewError(err)
}
var files []os.FileInfo
for _, content := range contents {
@@ -162,12 +162,12 @@ func (disk Disk) CreateFile(filename string) (*atomic.File, *probe.Error) {
defer disk.lock.Unlock()
if filename == "" {
return nil, probe.New(InvalidArgument{})
return nil, probe.NewError(InvalidArgument{})
}
f, err := atomic.FileCreate(filepath.Join(disk.path, filename))
if err != nil {
return nil, probe.New(err)
return nil, probe.NewError(err)
}
return f, nil
@@ -179,11 +179,11 @@ func (disk Disk) Open(filename string) (*os.File, *probe.Error) {
defer disk.lock.Unlock()
if filename == "" {
return nil, probe.New(InvalidArgument{})
return nil, probe.NewError(InvalidArgument{})
}
dataFile, err := os.Open(filepath.Join(disk.path, filename))
if err != nil {
return nil, probe.New(err)
return nil, probe.NewError(err)
}
return dataFile, nil
}
@@ -194,11 +194,11 @@ func (disk Disk) OpenFile(filename string, flags int, perm os.FileMode) (*os.Fil
defer disk.lock.Unlock()
if filename == "" {
return nil, probe.New(InvalidArgument{})
return nil, probe.NewError(InvalidArgument{})
}
dataFile, err := os.OpenFile(filepath.Join(disk.path, filename), flags, perm)
if err != nil {
return nil, probe.New(err)
return nil, probe.NewError(err)
}
return dataFile, nil
}

View File

@@ -38,8 +38,8 @@ func (s *MyDiskSuite) SetUpSuite(c *C) {
path, err := ioutil.TempDir(os.TempDir(), "disk-")
c.Assert(err, IsNil)
s.path = path
d, err := New(s.path)
c.Assert(err, IsNil)
d, perr := New(s.path)
c.Assert(perr, IsNil)
s.disk = d
}

View File

@@ -54,7 +54,7 @@ const (
// makeBucket - make a new bucket
func (donut API) makeBucket(bucket string, acl BucketACL) *probe.Error {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return probe.New(InvalidArgument{})
return probe.NewError(InvalidArgument{})
}
return donut.makeDonutBucket(bucket, acl.String())
}
@@ -65,7 +65,7 @@ func (donut API) getBucketMetadata(bucketName string) (BucketMetadata, *probe.Er
return BucketMetadata{}, err.Trace()
}
if _, ok := donut.buckets[bucketName]; !ok {
return BucketMetadata{}, probe.New(BucketNotFound{Bucket: bucketName})
return BucketMetadata{}, probe.NewError(BucketNotFound{Bucket: bucketName})
}
metadata, err := donut.getDonutBucketMetadata()
if err != nil {
@@ -86,7 +86,7 @@ func (donut API) setBucketMetadata(bucketName string, bucketMetadata map[string]
oldBucketMetadata := metadata.Buckets[bucketName]
acl, ok := bucketMetadata["acl"]
if !ok {
return probe.New(InvalidArgument{})
return probe.NewError(InvalidArgument{})
}
oldBucketMetadata.ACL = BucketACL(acl)
metadata.Buckets[bucketName] = oldBucketMetadata
@@ -117,7 +117,7 @@ func (donut API) listObjects(bucket, prefix, marker, delimiter string, maxkeys i
return ListObjectsResults{}, err.Trace()
}
if _, ok := donut.buckets[bucket]; !ok {
return ListObjectsResults{}, probe.New(BucketNotFound{Bucket: bucket})
return ListObjectsResults{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
listObjects, err := donut.buckets[bucket].ListObjects(prefix, marker, delimiter, maxkeys)
if err != nil {
@@ -129,23 +129,23 @@ func (donut API) listObjects(bucket, prefix, marker, delimiter string, maxkeys i
// putObject - put object
func (donut API) putObject(bucket, object, expectedMD5Sum string, reader io.Reader, size int64, metadata map[string]string, signature *Signature) (ObjectMetadata, *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return ObjectMetadata{}, probe.New(InvalidArgument{})
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
if object == "" || strings.TrimSpace(object) == "" {
return ObjectMetadata{}, probe.New(InvalidArgument{})
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
if err := donut.listDonutBuckets(); err != nil {
return ObjectMetadata{}, err.Trace()
}
if _, ok := donut.buckets[bucket]; !ok {
return ObjectMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
bucketMeta, err := donut.getDonutBucketMetadata()
if err != nil {
return ObjectMetadata{}, err.Trace()
}
if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; ok {
return ObjectMetadata{}, probe.New(ObjectExists{Object: object})
return ObjectMetadata{}, probe.NewError(ObjectExists{Object: object})
}
objMetadata, err := donut.buckets[bucket].WriteObject(object, reader, size, expectedMD5Sum, metadata, signature)
if err != nil {
@@ -161,26 +161,26 @@ func (donut API) putObject(bucket, object, expectedMD5Sum string, reader io.Read
// putObject - put object
func (donut API) putObjectPart(bucket, object, expectedMD5Sum, uploadID string, partID int, reader io.Reader, size int64, metadata map[string]string, signature *Signature) (PartMetadata, *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return PartMetadata{}, probe.New(InvalidArgument{})
return PartMetadata{}, probe.NewError(InvalidArgument{})
}
if object == "" || strings.TrimSpace(object) == "" {
return PartMetadata{}, probe.New(InvalidArgument{})
return PartMetadata{}, probe.NewError(InvalidArgument{})
}
if err := donut.listDonutBuckets(); err != nil {
return PartMetadata{}, err.Trace()
}
if _, ok := donut.buckets[bucket]; !ok {
return PartMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
return PartMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
bucketMeta, err := donut.getDonutBucketMetadata()
if err != nil {
return PartMetadata{}, err.Trace()
}
if _, ok := bucketMeta.Buckets[bucket].Multiparts[object]; !ok {
return PartMetadata{}, probe.New(InvalidUploadID{UploadID: uploadID})
return PartMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID})
}
if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; ok {
return PartMetadata{}, probe.New(ObjectExists{Object: object})
return PartMetadata{}, probe.NewError(ObjectExists{Object: object})
}
objectPart := object + "/" + "multipart" + "/" + strconv.Itoa(partID)
objmetadata, err := donut.buckets[bucket].WriteObject(objectPart, reader, size, expectedMD5Sum, metadata, signature)
@@ -205,16 +205,16 @@ func (donut API) putObjectPart(bucket, object, expectedMD5Sum, uploadID string,
// getObject - get object
func (donut API) getObject(bucket, object string) (reader io.ReadCloser, size int64, err *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return nil, 0, probe.New(InvalidArgument{})
return nil, 0, probe.NewError(InvalidArgument{})
}
if object == "" || strings.TrimSpace(object) == "" {
return nil, 0, probe.New(InvalidArgument{})
return nil, 0, probe.NewError(InvalidArgument{})
}
if err := donut.listDonutBuckets(); err != nil {
return nil, 0, err.Trace()
}
if _, ok := donut.buckets[bucket]; !ok {
return nil, 0, probe.New(BucketNotFound{Bucket: bucket})
return nil, 0, probe.NewError(BucketNotFound{Bucket: bucket})
}
return donut.buckets[bucket].ReadObject(object)
}
@@ -225,14 +225,14 @@ func (donut API) getObjectMetadata(bucket, object string) (ObjectMetadata, *prob
return ObjectMetadata{}, err.Trace()
}
if _, ok := donut.buckets[bucket]; !ok {
return ObjectMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
bucketMeta, err := donut.getDonutBucketMetadata()
if err != nil {
return ObjectMetadata{}, err.Trace()
}
if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; !ok {
return ObjectMetadata{}, probe.New(ObjectNotFound{Object: object})
return ObjectMetadata{}, probe.NewError(ObjectNotFound{Object: object})
}
objectMetadata, err := donut.buckets[bucket].GetObjectMetadata(object)
if err != nil {
@@ -247,7 +247,7 @@ func (donut API) newMultipartUpload(bucket, object, contentType string) (string,
return "", err.Trace()
}
if _, ok := donut.buckets[bucket]; !ok {
return "", probe.New(BucketNotFound{Bucket: bucket})
return "", probe.NewError(BucketNotFound{Bucket: bucket})
}
allbuckets, err := donut.getDonutBucketMetadata()
if err != nil {
@@ -283,16 +283,16 @@ func (donut API) newMultipartUpload(bucket, object, contentType string) (string,
// listObjectParts list all object parts
func (donut API) listObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return ObjectResourcesMetadata{}, probe.New(InvalidArgument{})
return ObjectResourcesMetadata{}, probe.NewError(InvalidArgument{})
}
if object == "" || strings.TrimSpace(object) == "" {
return ObjectResourcesMetadata{}, probe.New(InvalidArgument{})
return ObjectResourcesMetadata{}, probe.NewError(InvalidArgument{})
}
if err := donut.listDonutBuckets(); err != nil {
return ObjectResourcesMetadata{}, err.Trace()
}
if _, ok := donut.buckets[bucket]; !ok {
return ObjectResourcesMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
return ObjectResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
allBuckets, err := donut.getDonutBucketMetadata()
if err != nil {
@@ -300,10 +300,10 @@ func (donut API) listObjectParts(bucket, object string, resources ObjectResource
}
bucketMetadata := allBuckets.Buckets[bucket]
if _, ok := bucketMetadata.Multiparts[object]; !ok {
return ObjectResourcesMetadata{}, probe.New(InvalidUploadID{UploadID: resources.UploadID})
return ObjectResourcesMetadata{}, probe.NewError(InvalidUploadID{UploadID: resources.UploadID})
}
if bucketMetadata.Multiparts[object].UploadID != resources.UploadID {
return ObjectResourcesMetadata{}, probe.New(InvalidUploadID{UploadID: resources.UploadID})
return ObjectResourcesMetadata{}, probe.NewError(InvalidUploadID{UploadID: resources.UploadID})
}
objectResourcesMetadata := resources
objectResourcesMetadata.Bucket = bucket
@@ -326,7 +326,7 @@ func (donut API) listObjectParts(bucket, object string, resources ObjectResource
}
part, ok := bucketMetadata.Multiparts[object].Parts[strconv.Itoa(i)]
if !ok {
return ObjectResourcesMetadata{}, probe.New(InvalidPart{})
return ObjectResourcesMetadata{}, probe.NewError(InvalidPart{})
}
parts = append(parts, &part)
}
@@ -338,16 +338,16 @@ func (donut API) listObjectParts(bucket, object string, resources ObjectResource
// completeMultipartUpload complete an incomplete multipart upload
func (donut API) completeMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return ObjectMetadata{}, probe.New(InvalidArgument{})
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
if object == "" || strings.TrimSpace(object) == "" {
return ObjectMetadata{}, probe.New(InvalidArgument{})
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
if err := donut.listDonutBuckets(); err != nil {
return ObjectMetadata{}, err.Trace()
}
if _, ok := donut.buckets[bucket]; !ok {
return ObjectMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
allBuckets, err := donut.getDonutBucketMetadata()
if err != nil {
@@ -355,17 +355,17 @@ func (donut API) completeMultipartUpload(bucket, object, uploadID string, data i
}
bucketMetadata := allBuckets.Buckets[bucket]
if _, ok := bucketMetadata.Multiparts[object]; !ok {
return ObjectMetadata{}, probe.New(InvalidUploadID{UploadID: uploadID})
return ObjectMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID})
}
if bucketMetadata.Multiparts[object].UploadID != uploadID {
return ObjectMetadata{}, probe.New(InvalidUploadID{UploadID: uploadID})
return ObjectMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID})
}
var partBytes []byte
{
var err error
partBytes, err = ioutil.ReadAll(data)
if err != nil {
return ObjectMetadata{}, probe.New(err)
return ObjectMetadata{}, probe.NewError(err)
}
}
if signature != nil {
@@ -374,19 +374,19 @@ func (donut API) completeMultipartUpload(bucket, object, uploadID string, data i
return ObjectMetadata{}, err.Trace()
}
if !ok {
return ObjectMetadata{}, probe.New(SignatureDoesNotMatch{})
return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{})
}
}
parts := &CompleteMultipartUpload{}
if err := xml.Unmarshal(partBytes, parts); err != nil {
return ObjectMetadata{}, probe.New(MalformedXML{})
return ObjectMetadata{}, probe.NewError(MalformedXML{})
}
if !sort.IsSorted(completedParts(parts.Part)) {
return ObjectMetadata{}, probe.New(InvalidPartOrder{})
return ObjectMetadata{}, probe.NewError(InvalidPartOrder{})
}
for _, part := range parts.Part {
if strings.Trim(part.ETag, "\"") != bucketMetadata.Multiparts[object].Parts[strconv.Itoa(part.PartNumber)].ETag {
return ObjectMetadata{}, probe.New(InvalidPart{})
return ObjectMetadata{}, probe.NewError(InvalidPart{})
}
}
var finalETagBytes []byte
@@ -395,7 +395,7 @@ func (donut API) completeMultipartUpload(bucket, object, uploadID string, data i
for _, part := range bucketMetadata.Multiparts[object].Parts {
partETagBytes, err := hex.DecodeString(part.ETag)
if err != nil {
return ObjectMetadata{}, probe.New(err)
return ObjectMetadata{}, probe.NewError(err)
}
finalETagBytes = append(finalETagBytes, partETagBytes...)
finalSize += part.Size
@@ -416,7 +416,7 @@ func (donut API) listMultipartUploads(bucket string, resources BucketMultipartRe
return BucketMultipartResourcesMetadata{}, err.Trace()
}
if _, ok := donut.buckets[bucket]; !ok {
return BucketMultipartResourcesMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
return BucketMultipartResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
allbuckets, err := donut.getDonutBucketMetadata()
if err != nil {
@@ -474,7 +474,7 @@ func (donut API) abortMultipartUpload(bucket, object, uploadID string) *probe.Er
return err.Trace()
}
if _, ok := donut.buckets[bucket]; !ok {
return probe.New(BucketNotFound{Bucket: bucket})
return probe.NewError(BucketNotFound{Bucket: bucket})
}
allbuckets, err := donut.getDonutBucketMetadata()
if err != nil {
@@ -482,10 +482,10 @@ func (donut API) abortMultipartUpload(bucket, object, uploadID string) *probe.Er
}
bucketMetadata := allbuckets.Buckets[bucket]
if _, ok := bucketMetadata.Multiparts[object]; !ok {
return probe.New(InvalidUploadID{UploadID: uploadID})
return probe.NewError(InvalidUploadID{UploadID: uploadID})
}
if bucketMetadata.Multiparts[object].UploadID != uploadID {
return probe.New(InvalidUploadID{UploadID: uploadID})
return probe.NewError(InvalidUploadID{UploadID: uploadID})
}
delete(bucketMetadata.Multiparts, object)
@@ -557,7 +557,7 @@ func (donut API) setDonutBucketMetadata(metadata *AllBuckets) *probe.Error {
jenc := json.NewEncoder(writer)
if err := jenc.Encode(metadata); err != nil {
CleanupWritersOnError(writers)
return probe.New(err)
return probe.NewError(err)
}
}
for _, writer := range writers {
@@ -584,7 +584,7 @@ func (donut API) getDonutBucketMetadata() (*AllBuckets, *probe.Error) {
return metadata, nil
}
}
return nil, probe.New(err)
return nil, probe.NewError(err)
}
}
@@ -594,7 +594,7 @@ func (donut API) makeDonutBucket(bucketName, acl string) *probe.Error {
return err.Trace()
}
if _, ok := donut.buckets[bucketName]; ok {
return probe.New(BucketExists{Bucket: bucketName})
return probe.NewError(BucketExists{Bucket: bucketName})
}
bucket, bucketMetadata, err := newBucket(bucketName, acl, donut.config.DonutName, donut.nodes)
if err != nil {
@@ -662,7 +662,7 @@ func (donut API) listDonutBuckets() *probe.Error {
for _, dir := range dirs {
splitDir := strings.Split(dir.Name(), "$")
if len(splitDir) < 3 {
return probe.New(CorruptedBackend{Backend: dir.Name()})
return probe.NewError(CorruptedBackend{Backend: dir.Name()})
}
bucketName := splitDir[0]
// we dont need this once we cache from makeDonutBucket()

View File

@@ -67,15 +67,15 @@ func (s *MyDonutSuite) SetUpSuite(c *C) {
conf.NodeDiskMap = createTestNodeDiskMap(root)
conf.MaxSize = 100000
SetDonutConfigPath(filepath.Join(root, "donut.json"))
err = SaveConfig(conf)
c.Assert(err, IsNil)
perr := SaveConfig(conf)
c.Assert(perr, IsNil)
dd, err = New()
c.Assert(err, IsNil)
dd, perr = New()
c.Assert(perr, IsNil)
// testing empty donut
buckets, err := dd.ListBuckets(nil)
c.Assert(err, IsNil)
buckets, perr := dd.ListBuckets(nil)
c.Assert(perr, IsNil)
c.Assert(len(buckets), Equals, 0)
}

View File

@@ -98,7 +98,7 @@ func New() (Interface, *probe.Error) {
if len(a.config.NodeDiskMap) > 0 {
for k, v := range a.config.NodeDiskMap {
if len(v) == 0 {
return nil, probe.New(InvalidDisksArgument{})
return nil, probe.NewError(InvalidDisksArgument{})
}
err := a.AttachNode(k, v)
if err != nil {
@@ -131,19 +131,19 @@ func (donut API) GetObject(w io.Writer, bucket string, object string, start, len
defer donut.lock.Unlock()
if !IsValidBucket(bucket) {
return 0, probe.New(BucketNameInvalid{Bucket: bucket})
return 0, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(object) {
return 0, probe.New(ObjectNameInvalid{Object: object})
return 0, probe.NewError(ObjectNameInvalid{Object: object})
}
if start < 0 {
return 0, probe.New(InvalidRange{
return 0, probe.NewError(InvalidRange{
Start: start,
Length: length,
})
}
if !donut.storedBuckets.Exists(bucket) {
return 0, probe.New(BucketNotFound{Bucket: bucket})
return 0, probe.NewError(BucketNotFound{Bucket: bucket})
}
objectKey := bucket + "/" + object
data, ok := donut.objects.Get(objectKey)
@@ -156,7 +156,7 @@ func (donut API) GetObject(w io.Writer, bucket string, object string, start, len
}
if start > 0 {
if _, err := io.CopyN(ioutil.Discard, reader, start); err != nil {
return 0, probe.New(err)
return 0, probe.NewError(err)
}
}
// new proxy writer to capture data read from disk
@@ -166,12 +166,12 @@ func (donut API) GetObject(w io.Writer, bucket string, object string, start, len
if length > 0 {
written, err = io.CopyN(pw, reader, length)
if err != nil {
return 0, probe.New(err)
return 0, probe.NewError(err)
}
} else {
written, err = io.CopyN(pw, reader, size)
if err != nil {
return 0, probe.New(err)
return 0, probe.NewError(err)
}
}
}
@@ -180,23 +180,23 @@ func (donut API) GetObject(w io.Writer, bucket string, object string, start, len
pw.writtenBytes = nil
go debug.FreeOSMemory()
if !ok {
return 0, probe.New(InternalError{})
return 0, probe.NewError(InternalError{})
}
return written, nil
}
return 0, probe.New(ObjectNotFound{Object: object})
return 0, probe.NewError(ObjectNotFound{Object: object})
}
{
var err error
if start == 0 && length == 0 {
written, err = io.CopyN(w, bytes.NewBuffer(data), int64(donut.objects.Len(objectKey)))
if err != nil {
return 0, probe.New(err)
return 0, probe.NewError(err)
}
} else {
written, err = io.CopyN(w, bytes.NewBuffer(data[start:]), length)
if err != nil {
return 0, probe.New(err)
return 0, probe.NewError(err)
}
}
return written, nil
@@ -214,12 +214,12 @@ func (donut API) GetBucketMetadata(bucket string, signature *Signature) (BucketM
return BucketMetadata{}, err.Trace()
}
if !ok {
return BucketMetadata{}, probe.New(SignatureDoesNotMatch{})
return BucketMetadata{}, probe.NewError(SignatureDoesNotMatch{})
}
}
if !IsValidBucket(bucket) {
return BucketMetadata{}, probe.New(BucketNameInvalid{Bucket: bucket})
return BucketMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !donut.storedBuckets.Exists(bucket) {
if len(donut.config.NodeDiskMap) > 0 {
@@ -231,7 +231,7 @@ func (donut API) GetBucketMetadata(bucket string, signature *Signature) (BucketM
storedBucket.bucketMetadata = bucketMetadata
donut.storedBuckets.Set(bucket, storedBucket)
}
return BucketMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
return BucketMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
return donut.storedBuckets.Get(bucket).(storedBucket).bucketMetadata, nil
}
@@ -247,15 +247,15 @@ func (donut API) SetBucketMetadata(bucket string, metadata map[string]string, si
return err.Trace()
}
if !ok {
return probe.New(SignatureDoesNotMatch{})
return probe.NewError(SignatureDoesNotMatch{})
}
}
if !IsValidBucket(bucket) {
return probe.New(BucketNameInvalid{Bucket: bucket})
return probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !donut.storedBuckets.Exists(bucket) {
return probe.New(BucketNotFound{Bucket: bucket})
return probe.NewError(BucketNotFound{Bucket: bucket})
}
if len(donut.config.NodeDiskMap) > 0 {
if err := donut.setBucketMetadata(bucket, metadata); err != nil {
@@ -273,18 +273,18 @@ func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) *probe.Error {
if strings.TrimSpace(expectedMD5Sum) != "" && strings.TrimSpace(actualMD5Sum) != "" {
expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum)
if err != nil {
return probe.New(err)
return probe.NewError(err)
}
actualMD5SumBytes, err := hex.DecodeString(actualMD5Sum)
if err != nil {
return probe.New(err)
return probe.NewError(err)
}
if !bytes.Equal(expectedMD5SumBytes, actualMD5SumBytes) {
return probe.New(BadDigest{})
return probe.NewError(BadDigest{})
}
return nil
}
return probe.New(InvalidArgument{})
return probe.NewError(InvalidArgument{})
}
// CreateObject - create an object
@@ -305,7 +305,7 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
if len(donut.config.NodeDiskMap) == 0 {
if size > int64(donut.config.MaxSize) {
generic := GenericObjectError{Bucket: bucket, Object: key}
return ObjectMetadata{}, probe.New(EntityTooLarge{
return ObjectMetadata{}, probe.NewError(EntityTooLarge{
GenericObjectError: generic,
Size: strconv.FormatInt(size, 10),
MaxSize: strconv.FormatUint(donut.config.MaxSize, 10),
@@ -313,19 +313,19 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
}
}
if !IsValidBucket(bucket) {
return ObjectMetadata{}, probe.New(BucketNameInvalid{Bucket: bucket})
return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(key) {
return ObjectMetadata{}, probe.New(ObjectNameInvalid{Object: key})
return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Object: key})
}
if !donut.storedBuckets.Exists(bucket) {
return ObjectMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
// get object key
objectKey := bucket + "/" + key
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
return ObjectMetadata{}, probe.New(ObjectExists{Object: key})
return ObjectMetadata{}, probe.NewError(ObjectExists{Object: key})
}
if contentType == "" {
@@ -336,7 +336,7 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
if err != nil {
// pro-actively close the connection
return ObjectMetadata{}, probe.New(InvalidDigest{Md5: expectedMD5Sum})
return ObjectMetadata{}, probe.NewError(InvalidDigest{Md5: expectedMD5Sum})
}
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
}
@@ -375,7 +375,7 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
sha256hash.Write(byteBuffer[0:length])
ok := donut.objects.Append(objectKey, byteBuffer[0:length])
if !ok {
return ObjectMetadata{}, probe.New(InternalError{})
return ObjectMetadata{}, probe.NewError(InternalError{})
}
totalLength += int64(length)
go debug.FreeOSMemory()
@@ -383,17 +383,17 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
if totalLength != size {
// Delete perhaps the object is already saved, due to the nature of append()
donut.objects.Delete(objectKey)
return ObjectMetadata{}, probe.New(IncompleteBody{Bucket: bucket, Object: key})
return ObjectMetadata{}, probe.NewError(IncompleteBody{Bucket: bucket, Object: key})
}
if err != io.EOF {
return ObjectMetadata{}, probe.New(err)
return ObjectMetadata{}, probe.NewError(err)
}
md5SumBytes := hash.Sum(nil)
md5Sum := hex.EncodeToString(md5SumBytes)
// Verify if the written object is equal to what is expected, only if it is requested as such
if strings.TrimSpace(expectedMD5Sum) != "" {
if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil {
return ObjectMetadata{}, probe.New(BadDigest{})
return ObjectMetadata{}, probe.NewError(BadDigest{})
}
}
if signature != nil {
@@ -402,7 +402,7 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
return ObjectMetadata{}, err.Trace()
}
if !ok {
return ObjectMetadata{}, probe.New(SignatureDoesNotMatch{})
return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{})
}
}
@@ -433,7 +433,7 @@ func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signatur
if location != nil {
locationConstraintBytes, err := ioutil.ReadAll(location)
if err != nil {
return probe.New(InternalError{})
return probe.NewError(InternalError{})
}
locationSum = hex.EncodeToString(sha256.Sum256(locationConstraintBytes)[:])
}
@@ -444,21 +444,21 @@ func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signatur
return err.Trace()
}
if !ok {
return probe.New(SignatureDoesNotMatch{})
return probe.NewError(SignatureDoesNotMatch{})
}
}
if donut.storedBuckets.Stats().Items == totalBuckets {
return probe.New(TooManyBuckets{Bucket: bucketName})
return probe.NewError(TooManyBuckets{Bucket: bucketName})
}
if !IsValidBucket(bucketName) {
return probe.New(BucketNameInvalid{Bucket: bucketName})
return probe.NewError(BucketNameInvalid{Bucket: bucketName})
}
if !IsValidBucketACL(acl) {
return probe.New(InvalidACL{ACL: acl})
return probe.NewError(InvalidACL{ACL: acl})
}
if donut.storedBuckets.Exists(bucketName) {
return probe.New(BucketExists{Bucket: bucketName})
return probe.NewError(BucketExists{Bucket: bucketName})
}
if strings.TrimSpace(acl) == "" {
@@ -493,18 +493,18 @@ func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata, s
return nil, BucketResourcesMetadata{}, err.Trace()
}
if !ok {
return nil, BucketResourcesMetadata{}, probe.New(SignatureDoesNotMatch{})
return nil, BucketResourcesMetadata{}, probe.NewError(SignatureDoesNotMatch{})
}
}
if !IsValidBucket(bucket) {
return nil, BucketResourcesMetadata{IsTruncated: false}, probe.New(BucketNameInvalid{Bucket: bucket})
return nil, BucketResourcesMetadata{IsTruncated: false}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidPrefix(resources.Prefix) {
return nil, BucketResourcesMetadata{IsTruncated: false}, probe.New(ObjectNameInvalid{Object: resources.Prefix})
return nil, BucketResourcesMetadata{IsTruncated: false}, probe.NewError(ObjectNameInvalid{Object: resources.Prefix})
}
if !donut.storedBuckets.Exists(bucket) {
return nil, BucketResourcesMetadata{IsTruncated: false}, probe.New(BucketNotFound{Bucket: bucket})
return nil, BucketResourcesMetadata{IsTruncated: false}, probe.NewError(BucketNotFound{Bucket: bucket})
}
var results []ObjectMetadata
var keys []string
@@ -593,10 +593,10 @@ func (donut API) ListBuckets(signature *Signature) ([]BucketMetadata, *probe.Err
if signature != nil {
ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
if err != nil {
return nil, probe.New(err)
return nil, err.Trace()
}
if !ok {
return nil, probe.New(SignatureDoesNotMatch{})
return nil, probe.NewError(SignatureDoesNotMatch{})
}
}
@@ -630,19 +630,19 @@ func (donut API) GetObjectMetadata(bucket, key string, signature *Signature) (Ob
return ObjectMetadata{}, err.Trace()
}
if !ok {
return ObjectMetadata{}, probe.New(SignatureDoesNotMatch{})
return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{})
}
}
// check if bucket exists
if !IsValidBucket(bucket) {
return ObjectMetadata{}, probe.New(BucketNameInvalid{Bucket: bucket})
return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(key) {
return ObjectMetadata{}, probe.New(ObjectNameInvalid{Object: key})
return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Object: key})
}
if !donut.storedBuckets.Exists(bucket) {
return ObjectMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
objectKey := bucket + "/" + key
@@ -659,7 +659,7 @@ func (donut API) GetObjectMetadata(bucket, key string, signature *Signature) (Ob
donut.storedBuckets.Set(bucket, storedBucket)
return objMetadata, nil
}
return ObjectMetadata{}, probe.New(ObjectNotFound{Object: key})
return ObjectMetadata{}, probe.NewError(ObjectNotFound{Object: key})
}
// evictedObject callback function called when an item is evicted from memory

View File

@@ -45,12 +45,12 @@ func (s *MyCacheSuite) SetUpSuite(c *C) {
s.root = root
SetDonutConfigPath(filepath.Join(root, "donut.json"))
dc, err = New()
c.Assert(err, IsNil)
dc, _ = New()
// testing empty cache
buckets, err := dc.ListBuckets(nil)
c.Assert(err, IsNil)
var buckets []BucketMetadata
buckets, perr := dc.ListBuckets(nil)
c.Assert(perr, IsNil)
c.Assert(len(buckets), Equals, 0)
}

View File

@@ -38,7 +38,7 @@ func getErasureTechnique(technique string) (encoding.Technique, *probe.Error) {
case technique == "Vandermonde":
return encoding.Cauchy, nil
default:
return encoding.None, probe.New(InvalidErasureTechnique{Technique: technique})
return encoding.None, probe.NewError(InvalidErasureTechnique{Technique: technique})
}
}
@@ -52,7 +52,7 @@ func newEncoder(k, m uint8, technique string) (encoder, *probe.Error) {
{
params, err := encoding.ValidateParams(k, m, t)
if err != nil {
return encoder{}, probe.New(err)
return encoder{}, probe.NewError(err)
}
e.encoder = encoding.NewErasure(params)
e.k = k
@@ -66,7 +66,7 @@ func newEncoder(k, m uint8, technique string) (encoder, *probe.Error) {
// GetEncodedBlockLen - wrapper around erasure function with the same name
func (e encoder) GetEncodedBlockLen(dataLength int) (int, *probe.Error) {
if dataLength <= 0 {
return 0, probe.New(InvalidArgument{})
return 0, probe.NewError(InvalidArgument{})
}
return encoding.GetEncodedBlockLen(dataLength, e.k), nil
}
@@ -74,11 +74,11 @@ func (e encoder) GetEncodedBlockLen(dataLength int) (int, *probe.Error) {
// Encode - erasure code input bytes
func (e encoder) Encode(data []byte) ([][]byte, *probe.Error) {
if data == nil {
return nil, probe.New(InvalidArgument{})
return nil, probe.NewError(InvalidArgument{})
}
encodedData, err := e.encoder.Encode(data)
if err != nil {
return nil, probe.New(err)
return nil, probe.NewError(err)
}
return encodedData, nil
}
@@ -86,7 +86,7 @@ func (e encoder) Encode(data []byte) ([][]byte, *probe.Error) {
func (e encoder) EncodeStream(data io.Reader, size int64) ([][]byte, []byte, *probe.Error) {
encodedData, inputData, err := e.encoder.EncodeStream(data, size)
if err != nil {
return nil, nil, probe.New(err)
return nil, nil, probe.NewError(err)
}
return encodedData, inputData, nil
}
@@ -95,7 +95,7 @@ func (e encoder) EncodeStream(data io.Reader, size int64) ([][]byte, []byte, *pr
func (e encoder) Decode(encodedData [][]byte, dataLength int) ([]byte, *probe.Error) {
decodedData, err := e.encoder.Decode(encodedData, dataLength)
if err != nil {
return nil, probe.New(err)
return nil, probe.NewError(err)
}
return decodedData, nil
}

View File

@@ -54,7 +54,7 @@ func (donut API) healBuckets() *probe.Error {
defer bucketMetadataWriter.Close()
jenc := json.NewEncoder(bucketMetadataWriter)
if err := jenc.Encode(bucketMetadata); err != nil {
return probe.New(err)
return probe.NewError(err)
}
for bucket := range bucketMetadata.Buckets {
bucketSlice := fmt.Sprintf("%s$0$%d", bucket, order) // TODO handle node slices

View File

@@ -41,7 +41,7 @@ func (donut API) Info() (nodeDiskMap map[string][]string, err *probe.Error) {
// AttachNode - attach node
func (donut API) AttachNode(hostname string, disks []string) *probe.Error {
if hostname == "" || len(disks) == 0 {
return probe.New(InvalidArgument{})
return probe.NewError(InvalidArgument{})
}
node, err := newNode(hostname)
if err != nil {
@@ -71,7 +71,7 @@ func (donut API) DetachNode(hostname string) *probe.Error {
// Rebalance - rebalance an existing donut with new disks and nodes
func (donut API) Rebalance() *probe.Error {
return probe.New(APINotImplemented{API: "management.Rebalance"})
return probe.NewError(APINotImplemented{API: "management.Rebalance"})
}
// Heal - heal your donuts

View File

@@ -45,10 +45,10 @@ func (donut API) NewMultipartUpload(bucket, key, contentType string, signature *
defer donut.lock.Unlock()
if !IsValidBucket(bucket) {
return "", probe.New(BucketNameInvalid{Bucket: bucket})
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(key) {
return "", probe.New(ObjectNameInvalid{Object: key})
return "", probe.NewError(ObjectNameInvalid{Object: key})
}
if signature != nil {
ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
@@ -56,19 +56,19 @@ func (donut API) NewMultipartUpload(bucket, key, contentType string, signature *
return "", err.Trace()
}
if !ok {
return "", probe.New(SignatureDoesNotMatch{})
return "", probe.NewError(SignatureDoesNotMatch{})
}
}
if len(donut.config.NodeDiskMap) > 0 {
return donut.newMultipartUpload(bucket, key, contentType)
}
if !donut.storedBuckets.Exists(bucket) {
return "", probe.New(BucketNotFound{Bucket: bucket})
return "", probe.NewError(BucketNotFound{Bucket: bucket})
}
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
objectKey := bucket + "/" + key
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
return "", probe.New(ObjectExists{Object: key})
return "", probe.NewError(ObjectExists{Object: key})
}
id := []byte(strconv.Itoa(rand.Int()) + bucket + key + time.Now().UTC().String())
uploadIDSum := sha512.Sum512(id)
@@ -93,10 +93,10 @@ func (donut API) AbortMultipartUpload(bucket, key, uploadID string, signature *S
defer donut.lock.Unlock()
if !IsValidBucket(bucket) {
return probe.New(BucketNameInvalid{Bucket: bucket})
return probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(key) {
return probe.New(ObjectNameInvalid{Object: key})
return probe.NewError(ObjectNameInvalid{Object: key})
}
if signature != nil {
ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
@@ -104,18 +104,18 @@ func (donut API) AbortMultipartUpload(bucket, key, uploadID string, signature *S
return err.Trace()
}
if !ok {
return probe.New(SignatureDoesNotMatch{})
return probe.NewError(SignatureDoesNotMatch{})
}
}
if len(donut.config.NodeDiskMap) > 0 {
return donut.abortMultipartUpload(bucket, key, uploadID)
}
if !donut.storedBuckets.Exists(bucket) {
return probe.New(BucketNotFound{Bucket: bucket})
return probe.NewError(BucketNotFound{Bucket: bucket})
}
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
if storedBucket.multiPartSession[key].UploadID != uploadID {
return probe.New(InvalidUploadID{UploadID: uploadID})
return probe.NewError(InvalidUploadID{UploadID: uploadID})
}
donut.cleanupMultipartSession(bucket, key, uploadID)
return nil
@@ -135,10 +135,10 @@ func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, cont
// createObject - internal wrapper function called by CreateObjectPart
func (donut API) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (string, *probe.Error) {
if !IsValidBucket(bucket) {
return "", probe.New(BucketNameInvalid{Bucket: bucket})
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(key) {
return "", probe.New(ObjectNameInvalid{Object: key})
return "", probe.NewError(ObjectNameInvalid{Object: key})
}
if len(donut.config.NodeDiskMap) > 0 {
metadata := make(map[string]string)
@@ -151,7 +151,7 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
if err != nil {
// pro-actively close the connection
return "", probe.New(InvalidDigest{Md5: expectedMD5Sum})
return "", probe.NewError(InvalidDigest{Md5: expectedMD5Sum})
}
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
}
@@ -163,12 +163,12 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont
}
if !donut.storedBuckets.Exists(bucket) {
return "", probe.New(BucketNotFound{Bucket: bucket})
return "", probe.NewError(BucketNotFound{Bucket: bucket})
}
strBucket := donut.storedBuckets.Get(bucket).(storedBucket)
// Verify upload id
if strBucket.multiPartSession[key].UploadID != uploadID {
return "", probe.New(InvalidUploadID{UploadID: uploadID})
return "", probe.NewError(InvalidUploadID{UploadID: uploadID})
}
// get object key
@@ -185,7 +185,7 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
if err != nil {
// pro-actively close the connection
return "", probe.New(InvalidDigest{Md5: expectedMD5Sum})
return "", probe.NewError(InvalidDigest{Md5: expectedMD5Sum})
}
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
}
@@ -204,17 +204,17 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont
sha256hash.Write(byteBuffer[0:length])
ok := donut.multiPartObjects[uploadID].Append(partID, byteBuffer[0:length])
if !ok {
return "", probe.New(InternalError{})
return "", probe.NewError(InternalError{})
}
totalLength += int64(length)
go debug.FreeOSMemory()
}
if totalLength != size {
donut.multiPartObjects[uploadID].Delete(partID)
return "", probe.New(IncompleteBody{Bucket: bucket, Object: key})
return "", probe.NewError(IncompleteBody{Bucket: bucket, Object: key})
}
if err != io.EOF {
return "", probe.New(err)
return "", probe.NewError(err)
}
md5SumBytes := hash.Sum(nil)
@@ -233,7 +233,7 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont
return "", err.Trace()
}
if !ok {
return "", probe.New(SignatureDoesNotMatch{})
return "", probe.NewError(SignatureDoesNotMatch{})
}
}
}
@@ -271,11 +271,11 @@ func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.R
if !IsValidBucket(bucket) {
donut.lock.Unlock()
return ObjectMetadata{}, probe.New(BucketNameInvalid{Bucket: bucket})
return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(key) {
donut.lock.Unlock()
return ObjectMetadata{}, probe.New(ObjectNameInvalid{Object: key})
return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Object: key})
}
if len(donut.config.NodeDiskMap) > 0 {
donut.lock.Unlock()
@@ -284,18 +284,18 @@ func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.R
if !donut.storedBuckets.Exists(bucket) {
donut.lock.Unlock()
return ObjectMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
// Verify upload id
if storedBucket.multiPartSession[key].UploadID != uploadID {
donut.lock.Unlock()
return ObjectMetadata{}, probe.New(InvalidUploadID{UploadID: uploadID})
return ObjectMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID})
}
partBytes, err := ioutil.ReadAll(data)
if err != nil {
donut.lock.Unlock()
return ObjectMetadata{}, probe.New(err)
return ObjectMetadata{}, probe.NewError(err)
}
if signature != nil {
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256.Sum256(partBytes)[:]))
@@ -305,17 +305,17 @@ func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.R
}
if !ok {
donut.lock.Unlock()
return ObjectMetadata{}, probe.New(SignatureDoesNotMatch{})
return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{})
}
}
parts := &CompleteMultipartUpload{}
if err := xml.Unmarshal(partBytes, parts); err != nil {
donut.lock.Unlock()
return ObjectMetadata{}, probe.New(MalformedXML{})
return ObjectMetadata{}, probe.NewError(MalformedXML{})
}
if !sort.IsSorted(completedParts(parts.Part)) {
donut.lock.Unlock()
return ObjectMetadata{}, probe.New(InvalidPartOrder{})
return ObjectMetadata{}, probe.NewError(InvalidPartOrder{})
}
var size int64
@@ -325,7 +325,7 @@ func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.R
object, ok := donut.multiPartObjects[uploadID].Get(parts.Part[i].PartNumber)
if ok == false {
donut.lock.Unlock()
return ObjectMetadata{}, probe.New(InvalidPart{})
return ObjectMetadata{}, probe.NewError(InvalidPart{})
}
size += int64(len(object))
calcMD5Bytes := md5.Sum(object)
@@ -333,15 +333,15 @@ func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.R
recvMD5Bytes, err := hex.DecodeString(strings.Trim(recvMD5, "\""))
if err != nil {
donut.lock.Unlock()
return ObjectMetadata{}, probe.New(InvalidDigest{Md5: recvMD5})
return ObjectMetadata{}, probe.NewError(InvalidDigest{Md5: recvMD5})
}
if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) {
donut.lock.Unlock()
return ObjectMetadata{}, probe.New(BadDigest{})
return ObjectMetadata{}, probe.NewError(BadDigest{})
}
if _, err := io.Copy(&fullObject, bytes.NewBuffer(object)); err != nil {
donut.lock.Unlock()
return ObjectMetadata{}, probe.New(err)
return ObjectMetadata{}, probe.NewError(err)
}
object = nil
go debug.FreeOSMemory()
@@ -386,12 +386,12 @@ func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartRe
return BucketMultipartResourcesMetadata{}, err.Trace()
}
if !ok {
return BucketMultipartResourcesMetadata{}, probe.New(SignatureDoesNotMatch{})
return BucketMultipartResourcesMetadata{}, probe.NewError(SignatureDoesNotMatch{})
}
}
if !IsValidBucket(bucket) {
return BucketMultipartResourcesMetadata{}, probe.New(BucketNameInvalid{Bucket: bucket})
return BucketMultipartResourcesMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if len(donut.config.NodeDiskMap) > 0 {
@@ -399,7 +399,7 @@ func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartRe
}
if !donut.storedBuckets.Exists(bucket) {
return BucketMultipartResourcesMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
return BucketMultipartResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
@@ -468,15 +468,15 @@ func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMe
return ObjectResourcesMetadata{}, err.Trace()
}
if !ok {
return ObjectResourcesMetadata{}, probe.New(SignatureDoesNotMatch{})
return ObjectResourcesMetadata{}, probe.NewError(SignatureDoesNotMatch{})
}
}
if !IsValidBucket(bucket) {
return ObjectResourcesMetadata{}, probe.New(BucketNameInvalid{Bucket: bucket})
return ObjectResourcesMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(key) {
return ObjectResourcesMetadata{}, probe.New(ObjectNameInvalid{Object: key})
return ObjectResourcesMetadata{}, probe.NewError(ObjectNameInvalid{Object: key})
}
if len(donut.config.NodeDiskMap) > 0 {
@@ -484,14 +484,14 @@ func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMe
}
if !donut.storedBuckets.Exists(bucket) {
return ObjectResourcesMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
return ObjectResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
if _, ok := storedBucket.multiPartSession[key]; ok == false {
return ObjectResourcesMetadata{}, probe.New(ObjectNotFound{Object: key})
return ObjectResourcesMetadata{}, probe.NewError(ObjectNotFound{Object: key})
}
if storedBucket.multiPartSession[key].UploadID != resources.UploadID {
return ObjectResourcesMetadata{}, probe.New(InvalidUploadID{UploadID: resources.UploadID})
return ObjectResourcesMetadata{}, probe.NewError(InvalidUploadID{UploadID: resources.UploadID})
}
storedParts := storedBucket.partMetadata[key]
objectResourcesMetadata := resources
@@ -515,7 +515,7 @@ func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMe
}
part, ok := storedParts[i]
if !ok {
return ObjectResourcesMetadata{}, probe.New(InvalidPart{})
return ObjectResourcesMetadata{}, probe.NewError(InvalidPart{})
}
parts = append(parts, &part)
}

View File

@@ -30,7 +30,7 @@ type node struct {
// newNode - instantiates a new node
func newNode(hostname string) (node, *probe.Error) {
if hostname == "" {
return node{}, probe.New(InvalidArgument{})
return node{}, probe.NewError(InvalidArgument{})
}
disks := make(map[int]disk.Disk)
n := node{
@@ -53,7 +53,7 @@ func (n node) ListDisks() (map[int]disk.Disk, *probe.Error) {
// AttachDisk - attach a disk
func (n node) AttachDisk(disk disk.Disk, diskOrder int) *probe.Error {
if diskOrder < 0 {
return probe.New(InvalidArgument{})
return probe.NewError(InvalidArgument{})
}
n.disks[diskOrder] = disk
return nil
@@ -67,10 +67,10 @@ func (n node) DetachDisk(diskOrder int) *probe.Error {
// SaveConfig - save node configuration
func (n node) SaveConfig() *probe.Error {
return probe.New(NotImplemented{Function: "SaveConfig"})
return probe.NewError(NotImplemented{Function: "SaveConfig"})
}
// LoadConfig - load node configuration from saved configs
func (n node) LoadConfig() *probe.Error {
return probe.New(NotImplemented{Function: "LoadConfig"})
return probe.NewError(NotImplemented{Function: "LoadConfig"})
}

View File

@@ -78,7 +78,7 @@ func urlEncodeName(name string) (string, *probe.Error) {
default:
len := utf8.RuneLen(s)
if len < 0 {
return "", probe.New(InvalidArgument{})
return "", probe.NewError(InvalidArgument{})
}
u := make([]byte, len)
utf8.EncodeRune(u, s)
@@ -220,12 +220,12 @@ func (r *Signature) DoesSignatureMatch(hashedPayload string) (bool, *probe.Error
var date string
if date = r.Request.Header.Get(http.CanonicalHeaderKey("x-amz-date")); date == "" {
if date = r.Request.Header.Get("Date"); date == "" {
return false, probe.New(MissingDateHeader{})
return false, probe.NewError(MissingDateHeader{})
}
}
t, err := time.Parse(iso8601Format, date)
if err != nil {
return false, probe.New(err)
return false, probe.NewError(err)
}
canonicalRequest := r.getCanonicalRequest()
stringToSign := r.getStringToSign(canonicalRequest, t)