mirror of
https://github.com/minio/minio.git
synced 2025-11-07 12:52:58 -05:00
Migrate from iodine to probe
This commit is contained in:
@@ -34,7 +34,7 @@ import (
|
||||
"github.com/minio/minio/pkg/crypto/sha256"
|
||||
"github.com/minio/minio/pkg/crypto/sha512"
|
||||
"github.com/minio/minio/pkg/donut/disk"
|
||||
"github.com/minio/minio/pkg/iodine"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -52,15 +52,9 @@ type bucket struct {
|
||||
}
|
||||
|
||||
// newBucket - instantiate a new bucket
|
||||
func newBucket(bucketName, aclType, donutName string, nodes map[string]node) (bucket, BucketMetadata, error) {
|
||||
errParams := map[string]string{
|
||||
"bucketName": bucketName,
|
||||
"donutName": donutName,
|
||||
"aclType": aclType,
|
||||
}
|
||||
|
||||
func newBucket(bucketName, aclType, donutName string, nodes map[string]node) (bucket, BucketMetadata, *probe.Error) {
|
||||
if strings.TrimSpace(bucketName) == "" || strings.TrimSpace(donutName) == "" {
|
||||
return bucket{}, BucketMetadata{}, iodine.New(InvalidArgument{}, errParams)
|
||||
return bucket{}, BucketMetadata{}, probe.New(InvalidArgument{})
|
||||
}
|
||||
|
||||
b := bucket{}
|
||||
@@ -89,14 +83,14 @@ func (b bucket) getBucketName() string {
|
||||
}
|
||||
|
||||
// getBucketMetadataReaders -
|
||||
func (b bucket) getBucketMetadataReaders() (map[int]io.ReadCloser, error) {
|
||||
func (b bucket) getBucketMetadataReaders() (map[int]io.ReadCloser, *probe.Error) {
|
||||
readers := make(map[int]io.ReadCloser)
|
||||
var disks map[int]disk.Disk
|
||||
var err error
|
||||
var err *probe.Error
|
||||
for _, node := range b.nodes {
|
||||
disks, err = node.ListDisks()
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, err.Trace()
|
||||
}
|
||||
}
|
||||
var bucketMetaDataReader io.ReadCloser
|
||||
@@ -108,40 +102,44 @@ func (b bucket) getBucketMetadataReaders() (map[int]io.ReadCloser, error) {
|
||||
readers[order] = bucketMetaDataReader
|
||||
}
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, err.Trace()
|
||||
}
|
||||
return readers, nil
|
||||
}
|
||||
|
||||
// getBucketMetadata -
|
||||
func (b bucket) getBucketMetadata() (*AllBuckets, error) {
|
||||
var err error
|
||||
func (b bucket) getBucketMetadata() (*AllBuckets, *probe.Error) {
|
||||
metadata := new(AllBuckets)
|
||||
readers, err := b.getBucketMetadataReaders()
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
var readers map[int]io.ReadCloser
|
||||
{
|
||||
var err *probe.Error
|
||||
readers, err = b.getBucketMetadataReaders()
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
}
|
||||
for _, reader := range readers {
|
||||
defer reader.Close()
|
||||
}
|
||||
var err error
|
||||
for _, reader := range readers {
|
||||
jenc := json.NewDecoder(reader)
|
||||
if err = jenc.Decode(metadata); err == nil {
|
||||
return metadata, nil
|
||||
}
|
||||
}
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, probe.New(err)
|
||||
}
|
||||
|
||||
// GetObjectMetadata - get metadata for an object
|
||||
func (b bucket) GetObjectMetadata(objectName string) (ObjectMetadata, error) {
|
||||
func (b bucket) GetObjectMetadata(objectName string) (ObjectMetadata, *probe.Error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
return b.readObjectMetadata(objectName)
|
||||
}
|
||||
|
||||
// ListObjects - list all objects
|
||||
func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (ListObjectsResults, error) {
|
||||
func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (ListObjectsResults, *probe.Error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
if maxkeys <= 0 {
|
||||
@@ -151,7 +149,7 @@ func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (List
|
||||
var objects []string
|
||||
bucketMetadata, err := b.getBucketMetadata()
|
||||
if err != nil {
|
||||
return ListObjectsResults{}, iodine.New(err, nil)
|
||||
return ListObjectsResults{}, err.Trace()
|
||||
}
|
||||
for objectName := range bucketMetadata.Buckets[b.getBucketName()].Multiparts {
|
||||
if strings.HasPrefix(objectName, strings.TrimSpace(prefix)) {
|
||||
@@ -206,7 +204,7 @@ func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (List
|
||||
for _, objectName := range results {
|
||||
objMetadata, err := b.readObjectMetadata(normalizeObjectName(objectName))
|
||||
if err != nil {
|
||||
return ListObjectsResults{}, iodine.New(err, nil)
|
||||
return ListObjectsResults{}, err.Trace()
|
||||
}
|
||||
listObjects.Objects[objectName] = objMetadata
|
||||
}
|
||||
@@ -214,22 +212,22 @@ func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (List
|
||||
}
|
||||
|
||||
// ReadObject - open an object to read
|
||||
func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64, err error) {
|
||||
func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64, err *probe.Error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
reader, writer := io.Pipe()
|
||||
// get list of objects
|
||||
bucketMetadata, err := b.getBucketMetadata()
|
||||
if err != nil {
|
||||
return nil, 0, iodine.New(err, nil)
|
||||
return nil, 0, err.Trace()
|
||||
}
|
||||
// check if object exists
|
||||
if _, ok := bucketMetadata.Buckets[b.getBucketName()].BucketObjects[objectName]; !ok {
|
||||
return nil, 0, iodine.New(ObjectNotFound{Object: objectName}, nil)
|
||||
return nil, 0, probe.New(ObjectNotFound{Object: objectName})
|
||||
}
|
||||
objMetadata, err := b.readObjectMetadata(normalizeObjectName(objectName))
|
||||
if err != nil {
|
||||
return nil, 0, iodine.New(err, nil)
|
||||
return nil, 0, err.Trace()
|
||||
}
|
||||
// read and reply back to GetObject() request in a go-routine
|
||||
go b.readObjectData(normalizeObjectName(objectName), writer, objMetadata)
|
||||
@@ -237,15 +235,15 @@ func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64,
|
||||
}
|
||||
|
||||
// WriteObject - write a new object into bucket
|
||||
func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64, expectedMD5Sum string, metadata map[string]string, signature *Signature) (ObjectMetadata, error) {
|
||||
func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64, expectedMD5Sum string, metadata map[string]string, signature *Signature) (ObjectMetadata, *probe.Error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
if objectName == "" || objectData == nil {
|
||||
return ObjectMetadata{}, iodine.New(InvalidArgument{}, nil)
|
||||
return ObjectMetadata{}, probe.New(InvalidArgument{})
|
||||
}
|
||||
writers, err := b.getObjectWriters(normalizeObjectName(objectName), "data")
|
||||
if err != nil {
|
||||
return ObjectMetadata{}, iodine.New(err, nil)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
sumMD5 := md5.New()
|
||||
sum512 := sha512.New()
|
||||
@@ -268,7 +266,7 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64,
|
||||
totalLength, err := io.Copy(mw, objectData)
|
||||
if err != nil {
|
||||
CleanupWritersOnError(writers)
|
||||
return ObjectMetadata{}, iodine.New(err, nil)
|
||||
return ObjectMetadata{}, probe.New(err)
|
||||
}
|
||||
objMetadata.Size = totalLength
|
||||
case false:
|
||||
@@ -276,13 +274,13 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64,
|
||||
k, m, err := b.getDataAndParity(len(writers))
|
||||
if err != nil {
|
||||
CleanupWritersOnError(writers)
|
||||
return ObjectMetadata{}, iodine.New(err, nil)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
// write encoded data with k, m and writers
|
||||
chunkCount, totalLength, err := b.writeObjectData(k, m, writers, objectData, size, mwriter)
|
||||
if err != nil {
|
||||
CleanupWritersOnError(writers)
|
||||
return ObjectMetadata{}, iodine.New(err, nil)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
/// donutMetadata section
|
||||
objMetadata.BlockSize = blockSize
|
||||
@@ -301,14 +299,14 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64,
|
||||
if err != nil {
|
||||
// error occurred while doing signature calculation, we return and also cleanup any temporary writers.
|
||||
CleanupWritersOnError(writers)
|
||||
return ObjectMetadata{}, iodine.New(err, nil)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
if !ok {
|
||||
// purge all writers, when control flow reaches here
|
||||
//
|
||||
// Signature mismatch occurred all temp files to be removed and all data purged.
|
||||
CleanupWritersOnError(writers)
|
||||
return ObjectMetadata{}, iodine.New(SignatureDoesNotMatch{}, nil)
|
||||
return ObjectMetadata{}, probe.New(SignatureDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
objMetadata.MD5Sum = hex.EncodeToString(dataMD5sum)
|
||||
@@ -317,7 +315,7 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64,
|
||||
// Verify if the written object is equal to what is expected, only if it is requested as such
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" {
|
||||
if err := b.isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), objMetadata.MD5Sum); err != nil {
|
||||
return ObjectMetadata{}, iodine.New(err, nil)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
}
|
||||
objMetadata.Metadata = metadata
|
||||
@@ -325,7 +323,7 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64,
|
||||
if err := b.writeObjectMetadata(normalizeObjectName(objectName), objMetadata); err != nil {
|
||||
// purge all writers, when control flow reaches here
|
||||
CleanupWritersOnError(writers)
|
||||
return ObjectMetadata{}, iodine.New(err, nil)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
// close all writers, when control flow reaches here
|
||||
for _, writer := range writers {
|
||||
@@ -335,39 +333,39 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64,
|
||||
}
|
||||
|
||||
// isMD5SumEqual - returns error if md5sum mismatches, other its `nil`
|
||||
func (b bucket) isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) error {
|
||||
func (b bucket) isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) *probe.Error {
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" && strings.TrimSpace(actualMD5Sum) != "" {
|
||||
expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return probe.New(err)
|
||||
}
|
||||
actualMD5SumBytes, err := hex.DecodeString(actualMD5Sum)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return probe.New(err)
|
||||
}
|
||||
if !bytes.Equal(expectedMD5SumBytes, actualMD5SumBytes) {
|
||||
return iodine.New(BadDigest{}, nil)
|
||||
return probe.New(BadDigest{})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return iodine.New(InvalidArgument{}, nil)
|
||||
return probe.New(InvalidArgument{})
|
||||
}
|
||||
|
||||
// writeObjectMetadata - write additional object metadata
|
||||
func (b bucket) writeObjectMetadata(objectName string, objMetadata ObjectMetadata) error {
|
||||
func (b bucket) writeObjectMetadata(objectName string, objMetadata ObjectMetadata) *probe.Error {
|
||||
if objMetadata.Object == "" {
|
||||
return iodine.New(InvalidArgument{}, nil)
|
||||
return probe.New(InvalidArgument{})
|
||||
}
|
||||
objMetadataWriters, err := b.getObjectWriters(objectName, objectMetadataConfig)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
for _, objMetadataWriter := range objMetadataWriters {
|
||||
jenc := json.NewEncoder(objMetadataWriter)
|
||||
if err := jenc.Encode(&objMetadata); err != nil {
|
||||
// Close writers and purge all temporary entries
|
||||
CleanupWritersOnError(objMetadataWriters)
|
||||
return iodine.New(err, nil)
|
||||
return probe.New(err)
|
||||
}
|
||||
}
|
||||
for _, objMetadataWriter := range objMetadataWriters {
|
||||
@@ -377,26 +375,28 @@ func (b bucket) writeObjectMetadata(objectName string, objMetadata ObjectMetadat
|
||||
}
|
||||
|
||||
// readObjectMetadata - read object metadata
|
||||
func (b bucket) readObjectMetadata(objectName string) (ObjectMetadata, error) {
|
||||
func (b bucket) readObjectMetadata(objectName string) (ObjectMetadata, *probe.Error) {
|
||||
if objectName == "" {
|
||||
return ObjectMetadata{}, iodine.New(InvalidArgument{}, nil)
|
||||
return ObjectMetadata{}, probe.New(InvalidArgument{})
|
||||
}
|
||||
var err error
|
||||
objMetadata := ObjectMetadata{}
|
||||
objMetadataReaders, err := b.getObjectReaders(objectName, objectMetadataConfig)
|
||||
if err != nil {
|
||||
return ObjectMetadata{}, iodine.New(err, nil)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
for _, objMetadataReader := range objMetadataReaders {
|
||||
defer objMetadataReader.Close()
|
||||
}
|
||||
for _, objMetadataReader := range objMetadataReaders {
|
||||
jdec := json.NewDecoder(objMetadataReader)
|
||||
if err = jdec.Decode(&objMetadata); err == nil {
|
||||
return objMetadata, nil
|
||||
{
|
||||
var err error
|
||||
for _, objMetadataReader := range objMetadataReaders {
|
||||
jdec := json.NewDecoder(objMetadataReader)
|
||||
if err = jdec.Decode(&objMetadata); err == nil {
|
||||
return objMetadata, nil
|
||||
}
|
||||
}
|
||||
return ObjectMetadata{}, probe.New(err)
|
||||
}
|
||||
return ObjectMetadata{}, iodine.New(err, nil)
|
||||
}
|
||||
|
||||
// TODO - This a temporary normalization of objectNames, need to find a better way
|
||||
@@ -413,14 +413,14 @@ func normalizeObjectName(objectName string) string {
|
||||
}
|
||||
|
||||
// getDataAndParity - calculate k, m (data and parity) values from number of disks
|
||||
func (b bucket) getDataAndParity(totalWriters int) (k uint8, m uint8, err error) {
|
||||
func (b bucket) getDataAndParity(totalWriters int) (k uint8, m uint8, err *probe.Error) {
|
||||
if totalWriters <= 1 {
|
||||
return 0, 0, iodine.New(InvalidArgument{}, nil)
|
||||
return 0, 0, probe.New(InvalidArgument{})
|
||||
}
|
||||
quotient := totalWriters / 2 // not using float or abs to let integer round off to lower value
|
||||
// quotient cannot be bigger than (255 / 2) = 127
|
||||
if quotient > 127 {
|
||||
return 0, 0, iodine.New(ParityOverflow{}, nil)
|
||||
return 0, 0, probe.New(ParityOverflow{})
|
||||
}
|
||||
remainder := totalWriters % 2 // will be 1 for odd and 0 for even numbers
|
||||
k = uint8(quotient + remainder)
|
||||
@@ -429,11 +429,11 @@ func (b bucket) getDataAndParity(totalWriters int) (k uint8, m uint8, err error)
|
||||
}
|
||||
|
||||
// writeObjectData -
|
||||
func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData io.Reader, size int64, writer io.Writer) (int, int, error) {
|
||||
func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData io.Reader, size int64, writer io.Writer) (int, int, *probe.Error) {
|
||||
encoder, err := newEncoder(k, m, "Cauchy")
|
||||
chunkSize := int64(10 * 1024 * 1024)
|
||||
if err != nil {
|
||||
return 0, 0, iodine.New(err, nil)
|
||||
return 0, 0, err.Trace()
|
||||
}
|
||||
chunkCount := 0
|
||||
totalLength := 0
|
||||
@@ -447,11 +447,10 @@ func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData
|
||||
totalLength = totalLength + int(readSize)
|
||||
encodedBlocks, inputData, err := encoder.EncodeStream(objectData, readSize)
|
||||
if err != nil {
|
||||
return 0, 0, iodine.New(err, nil)
|
||||
return 0, 0, err.Trace()
|
||||
}
|
||||
_, err = writer.Write(inputData)
|
||||
if err != nil {
|
||||
return 0, 0, iodine.New(err, nil)
|
||||
if _, err := writer.Write(inputData); err != nil {
|
||||
return 0, 0, probe.New(err)
|
||||
}
|
||||
for blockIndex, block := range encodedBlocks {
|
||||
errCh := make(chan error, 1)
|
||||
@@ -462,7 +461,7 @@ func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData
|
||||
}(writers[blockIndex], bytes.NewReader(block), errCh)
|
||||
if err := <-errCh; err != nil {
|
||||
// Returning error is fine here CleanupErrors() would cleanup writers
|
||||
return 0, 0, iodine.New(err, nil)
|
||||
return 0, 0, probe.New(err)
|
||||
}
|
||||
}
|
||||
chunkCount = chunkCount + 1
|
||||
@@ -474,21 +473,25 @@ func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData
|
||||
func (b bucket) readObjectData(objectName string, writer *io.PipeWriter, objMetadata ObjectMetadata) {
|
||||
readers, err := b.getObjectReaders(objectName, "data")
|
||||
if err != nil {
|
||||
writer.CloseWithError(iodine.New(err, nil))
|
||||
writer.CloseWithError(err.Trace())
|
||||
return
|
||||
}
|
||||
for _, reader := range readers {
|
||||
defer reader.Close()
|
||||
}
|
||||
expectedMd5sum, err := hex.DecodeString(objMetadata.MD5Sum)
|
||||
if err != nil {
|
||||
writer.CloseWithError(iodine.New(err, nil))
|
||||
return
|
||||
}
|
||||
expected512Sum, err := hex.DecodeString(objMetadata.SHA512Sum)
|
||||
if err != nil {
|
||||
writer.CloseWithError(iodine.New(err, nil))
|
||||
return
|
||||
var expected512Sum, expectedMd5sum []byte
|
||||
{
|
||||
var err error
|
||||
expectedMd5sum, err = hex.DecodeString(objMetadata.MD5Sum)
|
||||
if err != nil {
|
||||
writer.CloseWithError(probe.New(err))
|
||||
return
|
||||
}
|
||||
expected512Sum, err = hex.DecodeString(objMetadata.SHA512Sum)
|
||||
if err != nil {
|
||||
writer.CloseWithError(probe.New(err))
|
||||
return
|
||||
}
|
||||
}
|
||||
hasher := md5.New()
|
||||
sum512hasher := sha256.New()
|
||||
@@ -496,24 +499,23 @@ func (b bucket) readObjectData(objectName string, writer *io.PipeWriter, objMeta
|
||||
switch len(readers) > 1 {
|
||||
case true:
|
||||
if objMetadata.ErasureTechnique == "" {
|
||||
writer.CloseWithError(iodine.New(MissingErasureTechnique{}, nil))
|
||||
writer.CloseWithError(probe.New(MissingErasureTechnique{}))
|
||||
return
|
||||
}
|
||||
encoder, err := newEncoder(objMetadata.DataDisks, objMetadata.ParityDisks, objMetadata.ErasureTechnique)
|
||||
if err != nil {
|
||||
writer.CloseWithError(iodine.New(err, nil))
|
||||
writer.CloseWithError(err.Trace())
|
||||
return
|
||||
}
|
||||
totalLeft := objMetadata.Size
|
||||
for i := 0; i < objMetadata.ChunkCount; i++ {
|
||||
decodedData, err := b.decodeEncodedData(totalLeft, int64(objMetadata.BlockSize), readers, encoder, writer)
|
||||
if err != nil {
|
||||
writer.CloseWithError(iodine.New(err, nil))
|
||||
writer.CloseWithError(err.Trace())
|
||||
return
|
||||
}
|
||||
_, err = io.Copy(mwriter, bytes.NewReader(decodedData))
|
||||
if err != nil {
|
||||
writer.CloseWithError(iodine.New(err, nil))
|
||||
if _, err := io.Copy(mwriter, bytes.NewReader(decodedData)); err != nil {
|
||||
writer.CloseWithError(probe.New(err))
|
||||
return
|
||||
}
|
||||
totalLeft = totalLeft - int64(objMetadata.BlockSize)
|
||||
@@ -521,17 +523,17 @@ func (b bucket) readObjectData(objectName string, writer *io.PipeWriter, objMeta
|
||||
case false:
|
||||
_, err := io.Copy(writer, readers[0])
|
||||
if err != nil {
|
||||
writer.CloseWithError(iodine.New(err, nil))
|
||||
writer.CloseWithError(probe.New(err))
|
||||
return
|
||||
}
|
||||
}
|
||||
// check if decodedData md5sum matches
|
||||
if !bytes.Equal(expectedMd5sum, hasher.Sum(nil)) {
|
||||
writer.CloseWithError(iodine.New(ChecksumMismatch{}, nil))
|
||||
writer.CloseWithError(probe.New(ChecksumMismatch{}))
|
||||
return
|
||||
}
|
||||
if !bytes.Equal(expected512Sum, sum512hasher.Sum(nil)) {
|
||||
writer.CloseWithError(iodine.New(ChecksumMismatch{}, nil))
|
||||
writer.CloseWithError(probe.New(ChecksumMismatch{}))
|
||||
return
|
||||
}
|
||||
writer.Close()
|
||||
@@ -539,7 +541,7 @@ func (b bucket) readObjectData(objectName string, writer *io.PipeWriter, objMeta
|
||||
}
|
||||
|
||||
// decodeEncodedData -
|
||||
func (b bucket) decodeEncodedData(totalLeft, blockSize int64, readers map[int]io.ReadCloser, encoder encoder, writer *io.PipeWriter) ([]byte, error) {
|
||||
func (b bucket) decodeEncodedData(totalLeft, blockSize int64, readers map[int]io.ReadCloser, encoder encoder, writer *io.PipeWriter) ([]byte, *probe.Error) {
|
||||
var curBlockSize int64
|
||||
if blockSize < totalLeft {
|
||||
curBlockSize = blockSize
|
||||
@@ -548,34 +550,34 @@ func (b bucket) decodeEncodedData(totalLeft, blockSize int64, readers map[int]io
|
||||
}
|
||||
curChunkSize, err := encoder.GetEncodedBlockLen(int(curBlockSize))
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, err.Trace()
|
||||
}
|
||||
encodedBytes := make([][]byte, encoder.k+encoder.m)
|
||||
for i, reader := range readers {
|
||||
var bytesBuffer bytes.Buffer
|
||||
_, err := io.CopyN(&bytesBuffer, reader, int64(curChunkSize))
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, probe.New(err)
|
||||
}
|
||||
encodedBytes[i] = bytesBuffer.Bytes()
|
||||
}
|
||||
decodedData, err := encoder.Decode(encodedBytes, int(curBlockSize))
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, err.Trace()
|
||||
}
|
||||
return decodedData, nil
|
||||
}
|
||||
|
||||
// getObjectReaders -
|
||||
func (b bucket) getObjectReaders(objectName, objectMeta string) (map[int]io.ReadCloser, error) {
|
||||
func (b bucket) getObjectReaders(objectName, objectMeta string) (map[int]io.ReadCloser, *probe.Error) {
|
||||
readers := make(map[int]io.ReadCloser)
|
||||
var disks map[int]disk.Disk
|
||||
var err error
|
||||
var err *probe.Error
|
||||
nodeSlice := 0
|
||||
for _, node := range b.nodes {
|
||||
disks, err = node.ListDisks()
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, err.Trace()
|
||||
}
|
||||
for order, disk := range disks {
|
||||
var objectSlice io.ReadCloser
|
||||
@@ -589,19 +591,19 @@ func (b bucket) getObjectReaders(objectName, objectMeta string) (map[int]io.Read
|
||||
nodeSlice = nodeSlice + 1
|
||||
}
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, err.Trace()
|
||||
}
|
||||
return readers, nil
|
||||
}
|
||||
|
||||
// getObjectWriters -
|
||||
func (b bucket) getObjectWriters(objectName, objectMeta string) ([]io.WriteCloser, error) {
|
||||
func (b bucket) getObjectWriters(objectName, objectMeta string) ([]io.WriteCloser, *probe.Error) {
|
||||
var writers []io.WriteCloser
|
||||
nodeSlice := 0
|
||||
for _, node := range b.nodes {
|
||||
disks, err := node.ListDisks()
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, err.Trace()
|
||||
}
|
||||
writers = make([]io.WriteCloser, len(disks))
|
||||
for order, disk := range disks {
|
||||
@@ -609,7 +611,7 @@ func (b bucket) getObjectWriters(objectName, objectMeta string) ([]io.WriteClose
|
||||
objectPath := filepath.Join(b.donutName, bucketSlice, objectName, objectMeta)
|
||||
objectSlice, err := disk.CreateFile(objectPath)
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, err.Trace()
|
||||
}
|
||||
writers[order] = objectSlice
|
||||
}
|
||||
|
||||
@@ -20,18 +20,18 @@ import (
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/minio/minio/pkg/iodine"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
|
||||
// getDonutConfigPath get donut config file path
|
||||
func getDonutConfigPath() (string, error) {
|
||||
func getDonutConfigPath() (string, *probe.Error) {
|
||||
if customConfigPath != "" {
|
||||
return customConfigPath, nil
|
||||
}
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
return "", probe.New(err)
|
||||
}
|
||||
donutConfigPath := filepath.Join(u.HomeDir, ".minio", "donut.json")
|
||||
return donutConfigPath, nil
|
||||
@@ -46,35 +46,35 @@ func SetDonutConfigPath(configPath string) {
|
||||
}
|
||||
|
||||
// SaveConfig save donut config
|
||||
func SaveConfig(a *Config) error {
|
||||
func SaveConfig(a *Config) *probe.Error {
|
||||
donutConfigPath, err := getDonutConfigPath()
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
qc, err := quick.New(a)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
if err := qc.Save(donutConfigPath); err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadConfig load donut config
|
||||
func LoadConfig() (*Config, error) {
|
||||
func LoadConfig() (*Config, *probe.Error) {
|
||||
donutConfigPath, err := getDonutConfigPath()
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, err.Trace()
|
||||
}
|
||||
a := &Config{}
|
||||
a.Version = "0.0.1"
|
||||
qc, err := quick.New(a)
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, err.Trace()
|
||||
}
|
||||
if err := qc.Load(donutConfigPath); err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, err.Trace()
|
||||
}
|
||||
return qc.Data().(*Config), nil
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/minio/minio/pkg/iodine"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/minio/minio/pkg/utils/atomic"
|
||||
)
|
||||
|
||||
@@ -37,21 +37,22 @@ type Disk struct {
|
||||
}
|
||||
|
||||
// New - instantiate new disk
|
||||
func New(diskPath string) (Disk, error) {
|
||||
func New(diskPath string) (Disk, *probe.Error) {
|
||||
if diskPath == "" {
|
||||
return Disk{}, iodine.New(InvalidArgument{}, nil)
|
||||
return Disk{}, probe.New(InvalidArgument{})
|
||||
}
|
||||
st, err := os.Stat(diskPath)
|
||||
if err != nil {
|
||||
return Disk{}, iodine.New(err, nil)
|
||||
return Disk{}, probe.New(err)
|
||||
}
|
||||
|
||||
if !st.IsDir() {
|
||||
return Disk{}, iodine.New(syscall.ENOTDIR, nil)
|
||||
return Disk{}, probe.New(syscall.ENOTDIR)
|
||||
}
|
||||
s := syscall.Statfs_t{}
|
||||
err = syscall.Statfs(diskPath, &s)
|
||||
if err != nil {
|
||||
return Disk{}, iodine.New(err, nil)
|
||||
return Disk{}, probe.New(err)
|
||||
}
|
||||
disk := Disk{
|
||||
lock: &sync.Mutex{},
|
||||
@@ -63,8 +64,7 @@ func New(diskPath string) (Disk, error) {
|
||||
disk.fsInfo["MountPoint"] = disk.path
|
||||
return disk, nil
|
||||
}
|
||||
return Disk{}, iodine.New(UnsupportedFilesystem{Type: strconv.FormatInt(int64(s.Type), 10)},
|
||||
map[string]string{"Type": strconv.FormatInt(int64(s.Type), 10)})
|
||||
return Disk{}, probe.New(UnsupportedFilesystem{Type: strconv.FormatInt(int64(s.Type), 10)})
|
||||
}
|
||||
|
||||
// IsUsable - is disk usable, alive
|
||||
@@ -99,25 +99,28 @@ func (disk Disk) GetFSInfo() map[string]string {
|
||||
}
|
||||
|
||||
// MakeDir - make a directory inside disk root path
|
||||
func (disk Disk) MakeDir(dirname string) error {
|
||||
func (disk Disk) MakeDir(dirname string) *probe.Error {
|
||||
disk.lock.Lock()
|
||||
defer disk.lock.Unlock()
|
||||
return os.MkdirAll(filepath.Join(disk.path, dirname), 0700)
|
||||
if err := os.MkdirAll(filepath.Join(disk.path, dirname), 0700); err != nil {
|
||||
return probe.New(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListDir - list a directory inside disk root path, get only directories
|
||||
func (disk Disk) ListDir(dirname string) ([]os.FileInfo, error) {
|
||||
func (disk Disk) ListDir(dirname string) ([]os.FileInfo, *probe.Error) {
|
||||
disk.lock.Lock()
|
||||
defer disk.lock.Unlock()
|
||||
|
||||
dir, err := os.Open(filepath.Join(disk.path, dirname))
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, probe.New(err)
|
||||
}
|
||||
defer dir.Close()
|
||||
contents, err := dir.Readdir(-1)
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, probe.New(err)
|
||||
}
|
||||
var directories []os.FileInfo
|
||||
for _, content := range contents {
|
||||
@@ -130,18 +133,18 @@ func (disk Disk) ListDir(dirname string) ([]os.FileInfo, error) {
|
||||
}
|
||||
|
||||
// ListFiles - list a directory inside disk root path, get only files
|
||||
func (disk Disk) ListFiles(dirname string) ([]os.FileInfo, error) {
|
||||
func (disk Disk) ListFiles(dirname string) ([]os.FileInfo, *probe.Error) {
|
||||
disk.lock.Lock()
|
||||
defer disk.lock.Unlock()
|
||||
|
||||
dir, err := os.Open(filepath.Join(disk.path, dirname))
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, probe.New(err)
|
||||
}
|
||||
defer dir.Close()
|
||||
contents, err := dir.Readdir(-1)
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, probe.New(err)
|
||||
}
|
||||
var files []os.FileInfo
|
||||
for _, content := range contents {
|
||||
@@ -154,48 +157,48 @@ func (disk Disk) ListFiles(dirname string) ([]os.FileInfo, error) {
|
||||
}
|
||||
|
||||
// CreateFile - create a file inside disk root path, replies with custome disk.File which provides atomic writes
|
||||
func (disk Disk) CreateFile(filename string) (*atomic.File, error) {
|
||||
func (disk Disk) CreateFile(filename string) (*atomic.File, *probe.Error) {
|
||||
disk.lock.Lock()
|
||||
defer disk.lock.Unlock()
|
||||
|
||||
if filename == "" {
|
||||
return nil, iodine.New(InvalidArgument{}, nil)
|
||||
return nil, probe.New(InvalidArgument{})
|
||||
}
|
||||
|
||||
f, err := atomic.FileCreate(filepath.Join(disk.path, filename))
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, probe.New(err)
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Open - read a file inside disk root path
|
||||
func (disk Disk) Open(filename string) (*os.File, error) {
|
||||
func (disk Disk) Open(filename string) (*os.File, *probe.Error) {
|
||||
disk.lock.Lock()
|
||||
defer disk.lock.Unlock()
|
||||
|
||||
if filename == "" {
|
||||
return nil, iodine.New(InvalidArgument{}, nil)
|
||||
return nil, probe.New(InvalidArgument{})
|
||||
}
|
||||
dataFile, err := os.Open(filepath.Join(disk.path, filename))
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, probe.New(err)
|
||||
}
|
||||
return dataFile, nil
|
||||
}
|
||||
|
||||
// OpenFile - Use with caution
|
||||
func (disk Disk) OpenFile(filename string, flags int, perm os.FileMode) (*os.File, error) {
|
||||
func (disk Disk) OpenFile(filename string, flags int, perm os.FileMode) (*os.File, *probe.Error) {
|
||||
disk.lock.Lock()
|
||||
defer disk.lock.Unlock()
|
||||
|
||||
if filename == "" {
|
||||
return nil, iodine.New(InvalidArgument{}, nil)
|
||||
return nil, probe.New(InvalidArgument{})
|
||||
}
|
||||
dataFile, err := os.OpenFile(filepath.Join(disk.path, filename), flags, perm)
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, probe.New(err)
|
||||
}
|
||||
return dataFile, nil
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ import (
|
||||
"github.com/minio/minio/pkg/crypto/sha256"
|
||||
"github.com/minio/minio/pkg/crypto/sha512"
|
||||
"github.com/minio/minio/pkg/donut/disk"
|
||||
"github.com/minio/minio/pkg/iodine"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// config files used inside Donut
|
||||
@@ -52,41 +52,41 @@ const (
|
||||
/// v1 API functions
|
||||
|
||||
// makeBucket - make a new bucket
|
||||
func (donut API) makeBucket(bucket string, acl BucketACL) error {
|
||||
func (donut API) makeBucket(bucket string, acl BucketACL) *probe.Error {
|
||||
if bucket == "" || strings.TrimSpace(bucket) == "" {
|
||||
return iodine.New(InvalidArgument{}, nil)
|
||||
return probe.New(InvalidArgument{})
|
||||
}
|
||||
return donut.makeDonutBucket(bucket, acl.String())
|
||||
}
|
||||
|
||||
// getBucketMetadata - get bucket metadata
|
||||
func (donut API) getBucketMetadata(bucketName string) (BucketMetadata, error) {
|
||||
func (donut API) getBucketMetadata(bucketName string) (BucketMetadata, *probe.Error) {
|
||||
if err := donut.listDonutBuckets(); err != nil {
|
||||
return BucketMetadata{}, iodine.New(err, nil)
|
||||
return BucketMetadata{}, err.Trace()
|
||||
}
|
||||
if _, ok := donut.buckets[bucketName]; !ok {
|
||||
return BucketMetadata{}, iodine.New(BucketNotFound{Bucket: bucketName}, nil)
|
||||
return BucketMetadata{}, probe.New(BucketNotFound{Bucket: bucketName})
|
||||
}
|
||||
metadata, err := donut.getDonutBucketMetadata()
|
||||
if err != nil {
|
||||
return BucketMetadata{}, iodine.New(err, nil)
|
||||
return BucketMetadata{}, err.Trace()
|
||||
}
|
||||
return metadata.Buckets[bucketName], nil
|
||||
}
|
||||
|
||||
// setBucketMetadata - set bucket metadata
|
||||
func (donut API) setBucketMetadata(bucketName string, bucketMetadata map[string]string) error {
|
||||
func (donut API) setBucketMetadata(bucketName string, bucketMetadata map[string]string) *probe.Error {
|
||||
if err := donut.listDonutBuckets(); err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
metadata, err := donut.getDonutBucketMetadata()
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
oldBucketMetadata := metadata.Buckets[bucketName]
|
||||
acl, ok := bucketMetadata["acl"]
|
||||
if !ok {
|
||||
return iodine.New(InvalidArgument{}, nil)
|
||||
return probe.New(InvalidArgument{})
|
||||
}
|
||||
oldBucketMetadata.ACL = BucketACL(acl)
|
||||
metadata.Buckets[bucketName] = oldBucketMetadata
|
||||
@@ -94,9 +94,9 @@ func (donut API) setBucketMetadata(bucketName string, bucketMetadata map[string]
|
||||
}
|
||||
|
||||
// listBuckets - return list of buckets
|
||||
func (donut API) listBuckets() (map[string]BucketMetadata, error) {
|
||||
func (donut API) listBuckets() (map[string]BucketMetadata, *probe.Error) {
|
||||
if err := donut.listDonutBuckets(); err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, err.Trace()
|
||||
}
|
||||
metadata, err := donut.getDonutBucketMetadata()
|
||||
if err != nil {
|
||||
@@ -112,95 +112,80 @@ func (donut API) listBuckets() (map[string]BucketMetadata, error) {
|
||||
}
|
||||
|
||||
// listObjects - return list of objects
|
||||
func (donut API) listObjects(bucket, prefix, marker, delimiter string, maxkeys int) (ListObjectsResults, error) {
|
||||
errParams := map[string]string{
|
||||
"bucket": bucket,
|
||||
"prefix": prefix,
|
||||
"marker": marker,
|
||||
"delimiter": delimiter,
|
||||
"maxkeys": strconv.Itoa(maxkeys),
|
||||
}
|
||||
func (donut API) listObjects(bucket, prefix, marker, delimiter string, maxkeys int) (ListObjectsResults, *probe.Error) {
|
||||
if err := donut.listDonutBuckets(); err != nil {
|
||||
return ListObjectsResults{}, iodine.New(err, errParams)
|
||||
return ListObjectsResults{}, err.Trace()
|
||||
}
|
||||
if _, ok := donut.buckets[bucket]; !ok {
|
||||
return ListObjectsResults{}, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
|
||||
return ListObjectsResults{}, probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
listObjects, err := donut.buckets[bucket].ListObjects(prefix, marker, delimiter, maxkeys)
|
||||
if err != nil {
|
||||
return ListObjectsResults{}, iodine.New(err, errParams)
|
||||
return ListObjectsResults{}, err.Trace()
|
||||
}
|
||||
return listObjects, nil
|
||||
}
|
||||
|
||||
// putObject - put object
|
||||
func (donut API) putObject(bucket, object, expectedMD5Sum string, reader io.Reader, size int64, metadata map[string]string, signature *Signature) (ObjectMetadata, error) {
|
||||
errParams := map[string]string{
|
||||
"bucket": bucket,
|
||||
"object": object,
|
||||
}
|
||||
func (donut API) putObject(bucket, object, expectedMD5Sum string, reader io.Reader, size int64, metadata map[string]string, signature *Signature) (ObjectMetadata, *probe.Error) {
|
||||
if bucket == "" || strings.TrimSpace(bucket) == "" {
|
||||
return ObjectMetadata{}, iodine.New(InvalidArgument{}, errParams)
|
||||
return ObjectMetadata{}, probe.New(InvalidArgument{})
|
||||
}
|
||||
if object == "" || strings.TrimSpace(object) == "" {
|
||||
return ObjectMetadata{}, iodine.New(InvalidArgument{}, errParams)
|
||||
return ObjectMetadata{}, probe.New(InvalidArgument{})
|
||||
}
|
||||
if err := donut.listDonutBuckets(); err != nil {
|
||||
return ObjectMetadata{}, iodine.New(err, errParams)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
if _, ok := donut.buckets[bucket]; !ok {
|
||||
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
return ObjectMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
bucketMeta, err := donut.getDonutBucketMetadata()
|
||||
if err != nil {
|
||||
return ObjectMetadata{}, iodine.New(err, errParams)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; ok {
|
||||
return ObjectMetadata{}, iodine.New(ObjectExists{Object: object}, errParams)
|
||||
return ObjectMetadata{}, probe.New(ObjectExists{Object: object})
|
||||
}
|
||||
objMetadata, err := donut.buckets[bucket].WriteObject(object, reader, size, expectedMD5Sum, metadata, signature)
|
||||
if err != nil {
|
||||
return ObjectMetadata{}, iodine.New(err, errParams)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
bucketMeta.Buckets[bucket].BucketObjects[object] = struct{}{}
|
||||
if err := donut.setDonutBucketMetadata(bucketMeta); err != nil {
|
||||
return ObjectMetadata{}, iodine.New(err, errParams)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
return objMetadata, nil
|
||||
}
|
||||
|
||||
// putObject - put object
|
||||
func (donut API) putObjectPart(bucket, object, expectedMD5Sum, uploadID string, partID int, reader io.Reader, size int64, metadata map[string]string, signature *Signature) (PartMetadata, error) {
|
||||
errParams := map[string]string{
|
||||
"bucket": bucket,
|
||||
"object": object,
|
||||
}
|
||||
func (donut API) putObjectPart(bucket, object, expectedMD5Sum, uploadID string, partID int, reader io.Reader, size int64, metadata map[string]string, signature *Signature) (PartMetadata, *probe.Error) {
|
||||
if bucket == "" || strings.TrimSpace(bucket) == "" {
|
||||
return PartMetadata{}, iodine.New(InvalidArgument{}, errParams)
|
||||
return PartMetadata{}, probe.New(InvalidArgument{})
|
||||
}
|
||||
if object == "" || strings.TrimSpace(object) == "" {
|
||||
return PartMetadata{}, iodine.New(InvalidArgument{}, errParams)
|
||||
return PartMetadata{}, probe.New(InvalidArgument{})
|
||||
}
|
||||
if err := donut.listDonutBuckets(); err != nil {
|
||||
return PartMetadata{}, iodine.New(err, errParams)
|
||||
return PartMetadata{}, err.Trace()
|
||||
}
|
||||
if _, ok := donut.buckets[bucket]; !ok {
|
||||
return PartMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
return PartMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
bucketMeta, err := donut.getDonutBucketMetadata()
|
||||
if err != nil {
|
||||
return PartMetadata{}, iodine.New(err, errParams)
|
||||
return PartMetadata{}, err.Trace()
|
||||
}
|
||||
if _, ok := bucketMeta.Buckets[bucket].Multiparts[object]; !ok {
|
||||
return PartMetadata{}, iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
|
||||
return PartMetadata{}, probe.New(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; ok {
|
||||
return PartMetadata{}, iodine.New(ObjectExists{Object: object}, errParams)
|
||||
return PartMetadata{}, probe.New(ObjectExists{Object: object})
|
||||
}
|
||||
objectPart := object + "/" + "multipart" + "/" + strconv.Itoa(partID)
|
||||
objmetadata, err := donut.buckets[bucket].WriteObject(objectPart, reader, size, expectedMD5Sum, metadata, signature)
|
||||
if err != nil {
|
||||
return PartMetadata{}, iodine.New(err, errParams)
|
||||
return PartMetadata{}, err.Trace()
|
||||
}
|
||||
partMetadata := PartMetadata{
|
||||
PartNumber: partID,
|
||||
@@ -212,74 +197,61 @@ func (donut API) putObjectPart(bucket, object, expectedMD5Sum, uploadID string,
|
||||
multipartSession.Parts[strconv.Itoa(partID)] = partMetadata
|
||||
bucketMeta.Buckets[bucket].Multiparts[object] = multipartSession
|
||||
if err := donut.setDonutBucketMetadata(bucketMeta); err != nil {
|
||||
return PartMetadata{}, iodine.New(err, errParams)
|
||||
return PartMetadata{}, err.Trace()
|
||||
}
|
||||
return partMetadata, nil
|
||||
}
|
||||
|
||||
// getObject - get object
|
||||
func (donut API) getObject(bucket, object string) (reader io.ReadCloser, size int64, err error) {
|
||||
errParams := map[string]string{
|
||||
"bucket": bucket,
|
||||
"object": object,
|
||||
}
|
||||
func (donut API) getObject(bucket, object string) (reader io.ReadCloser, size int64, err *probe.Error) {
|
||||
if bucket == "" || strings.TrimSpace(bucket) == "" {
|
||||
return nil, 0, iodine.New(InvalidArgument{}, errParams)
|
||||
return nil, 0, probe.New(InvalidArgument{})
|
||||
}
|
||||
if object == "" || strings.TrimSpace(object) == "" {
|
||||
return nil, 0, iodine.New(InvalidArgument{}, errParams)
|
||||
return nil, 0, probe.New(InvalidArgument{})
|
||||
}
|
||||
if err := donut.listDonutBuckets(); err != nil {
|
||||
return nil, 0, iodine.New(err, nil)
|
||||
return nil, 0, err.Trace()
|
||||
}
|
||||
if _, ok := donut.buckets[bucket]; !ok {
|
||||
return nil, 0, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
|
||||
return nil, 0, probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
return donut.buckets[bucket].ReadObject(object)
|
||||
}
|
||||
|
||||
// getObjectMetadata - get object metadata
|
||||
func (donut API) getObjectMetadata(bucket, object string) (ObjectMetadata, error) {
|
||||
errParams := map[string]string{
|
||||
"bucket": bucket,
|
||||
"object": object,
|
||||
}
|
||||
func (donut API) getObjectMetadata(bucket, object string) (ObjectMetadata, *probe.Error) {
|
||||
if err := donut.listDonutBuckets(); err != nil {
|
||||
return ObjectMetadata{}, iodine.New(err, errParams)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
if _, ok := donut.buckets[bucket]; !ok {
|
||||
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
|
||||
return ObjectMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
bucketMeta, err := donut.getDonutBucketMetadata()
|
||||
if err != nil {
|
||||
return ObjectMetadata{}, iodine.New(err, errParams)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; !ok {
|
||||
return ObjectMetadata{}, iodine.New(ObjectNotFound{Object: object}, errParams)
|
||||
return ObjectMetadata{}, probe.New(ObjectNotFound{Object: object})
|
||||
}
|
||||
objectMetadata, err := donut.buckets[bucket].GetObjectMetadata(object)
|
||||
if err != nil {
|
||||
return ObjectMetadata{}, iodine.New(err, nil)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
return objectMetadata, nil
|
||||
}
|
||||
|
||||
// newMultipartUpload - new multipart upload request
|
||||
func (donut API) newMultipartUpload(bucket, object, contentType string) (string, error) {
|
||||
errParams := map[string]string{
|
||||
"bucket": bucket,
|
||||
"object": object,
|
||||
"contentType": contentType,
|
||||
}
|
||||
func (donut API) newMultipartUpload(bucket, object, contentType string) (string, *probe.Error) {
|
||||
if err := donut.listDonutBuckets(); err != nil {
|
||||
return "", iodine.New(err, errParams)
|
||||
return "", err.Trace()
|
||||
}
|
||||
if _, ok := donut.buckets[bucket]; !ok {
|
||||
return "", iodine.New(BucketNotFound{Bucket: bucket}, errParams)
|
||||
return "", probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
allbuckets, err := donut.getDonutBucketMetadata()
|
||||
if err != nil {
|
||||
return "", iodine.New(err, errParams)
|
||||
return "", err.Trace()
|
||||
}
|
||||
bucketMetadata := allbuckets.Buckets[bucket]
|
||||
multiparts := make(map[string]MultiPartSession)
|
||||
@@ -302,40 +274,36 @@ func (donut API) newMultipartUpload(bucket, object, contentType string) (string,
|
||||
allbuckets.Buckets[bucket] = bucketMetadata
|
||||
|
||||
if err := donut.setDonutBucketMetadata(allbuckets); err != nil {
|
||||
return "", iodine.New(err, errParams)
|
||||
return "", err.Trace()
|
||||
}
|
||||
|
||||
return uploadID, nil
|
||||
}
|
||||
|
||||
// listObjectParts list all object parts
|
||||
func (donut API) listObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, error) {
|
||||
errParams := map[string]string{
|
||||
"bucket": bucket,
|
||||
"object": object,
|
||||
}
|
||||
func (donut API) listObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error) {
|
||||
if bucket == "" || strings.TrimSpace(bucket) == "" {
|
||||
return ObjectResourcesMetadata{}, iodine.New(InvalidArgument{}, errParams)
|
||||
return ObjectResourcesMetadata{}, probe.New(InvalidArgument{})
|
||||
}
|
||||
if object == "" || strings.TrimSpace(object) == "" {
|
||||
return ObjectResourcesMetadata{}, iodine.New(InvalidArgument{}, errParams)
|
||||
return ObjectResourcesMetadata{}, probe.New(InvalidArgument{})
|
||||
}
|
||||
if err := donut.listDonutBuckets(); err != nil {
|
||||
return ObjectResourcesMetadata{}, iodine.New(err, nil)
|
||||
return ObjectResourcesMetadata{}, err.Trace()
|
||||
}
|
||||
if _, ok := donut.buckets[bucket]; !ok {
|
||||
return ObjectResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
|
||||
return ObjectResourcesMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
allBuckets, err := donut.getDonutBucketMetadata()
|
||||
if err != nil {
|
||||
return ObjectResourcesMetadata{}, iodine.New(err, errParams)
|
||||
return ObjectResourcesMetadata{}, err.Trace()
|
||||
}
|
||||
bucketMetadata := allBuckets.Buckets[bucket]
|
||||
if _, ok := bucketMetadata.Multiparts[object]; !ok {
|
||||
return ObjectResourcesMetadata{}, iodine.New(InvalidUploadID{UploadID: resources.UploadID}, errParams)
|
||||
return ObjectResourcesMetadata{}, probe.New(InvalidUploadID{UploadID: resources.UploadID})
|
||||
}
|
||||
if bucketMetadata.Multiparts[object].UploadID != resources.UploadID {
|
||||
return ObjectResourcesMetadata{}, iodine.New(InvalidUploadID{UploadID: resources.UploadID}, errParams)
|
||||
return ObjectResourcesMetadata{}, probe.New(InvalidUploadID{UploadID: resources.UploadID})
|
||||
}
|
||||
objectResourcesMetadata := resources
|
||||
objectResourcesMetadata.Bucket = bucket
|
||||
@@ -358,7 +326,7 @@ func (donut API) listObjectParts(bucket, object string, resources ObjectResource
|
||||
}
|
||||
part, ok := bucketMetadata.Multiparts[object].Parts[strconv.Itoa(i)]
|
||||
if !ok {
|
||||
return ObjectResourcesMetadata{}, iodine.New(InvalidPart{}, nil)
|
||||
return ObjectResourcesMetadata{}, probe.New(InvalidPart{})
|
||||
}
|
||||
parts = append(parts, &part)
|
||||
}
|
||||
@@ -368,58 +336,57 @@ func (donut API) listObjectParts(bucket, object string, resources ObjectResource
|
||||
}
|
||||
|
||||
// completeMultipartUpload complete an incomplete multipart upload
|
||||
func (donut API) completeMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, error) {
|
||||
errParams := map[string]string{
|
||||
"bucket": bucket,
|
||||
"object": object,
|
||||
"uploadID": uploadID,
|
||||
}
|
||||
func (donut API) completeMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) {
|
||||
if bucket == "" || strings.TrimSpace(bucket) == "" {
|
||||
return ObjectMetadata{}, iodine.New(InvalidArgument{}, errParams)
|
||||
return ObjectMetadata{}, probe.New(InvalidArgument{})
|
||||
}
|
||||
if object == "" || strings.TrimSpace(object) == "" {
|
||||
return ObjectMetadata{}, iodine.New(InvalidArgument{}, errParams)
|
||||
return ObjectMetadata{}, probe.New(InvalidArgument{})
|
||||
}
|
||||
if err := donut.listDonutBuckets(); err != nil {
|
||||
return ObjectMetadata{}, iodine.New(err, errParams)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
if _, ok := donut.buckets[bucket]; !ok {
|
||||
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
|
||||
return ObjectMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
allBuckets, err := donut.getDonutBucketMetadata()
|
||||
if err != nil {
|
||||
return ObjectMetadata{}, iodine.New(err, errParams)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
bucketMetadata := allBuckets.Buckets[bucket]
|
||||
if _, ok := bucketMetadata.Multiparts[object]; !ok {
|
||||
return ObjectMetadata{}, iodine.New(InvalidUploadID{UploadID: uploadID}, errParams)
|
||||
return ObjectMetadata{}, probe.New(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
if bucketMetadata.Multiparts[object].UploadID != uploadID {
|
||||
return ObjectMetadata{}, iodine.New(InvalidUploadID{UploadID: uploadID}, errParams)
|
||||
return ObjectMetadata{}, probe.New(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
partBytes, err := ioutil.ReadAll(data)
|
||||
if err != nil {
|
||||
return ObjectMetadata{}, iodine.New(err, errParams)
|
||||
var partBytes []byte
|
||||
{
|
||||
var err error
|
||||
partBytes, err = ioutil.ReadAll(data)
|
||||
if err != nil {
|
||||
return ObjectMetadata{}, probe.New(err)
|
||||
}
|
||||
}
|
||||
if signature != nil {
|
||||
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256.Sum256(partBytes)[:]))
|
||||
if err != nil {
|
||||
return ObjectMetadata{}, iodine.New(err, errParams)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
if !ok {
|
||||
return ObjectMetadata{}, iodine.New(SignatureDoesNotMatch{}, errParams)
|
||||
return ObjectMetadata{}, probe.New(SignatureDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
parts := &CompleteMultipartUpload{}
|
||||
if err := xml.Unmarshal(partBytes, parts); err != nil {
|
||||
return ObjectMetadata{}, iodine.New(MalformedXML{}, errParams)
|
||||
return ObjectMetadata{}, probe.New(MalformedXML{})
|
||||
}
|
||||
if !sort.IsSorted(completedParts(parts.Part)) {
|
||||
return ObjectMetadata{}, iodine.New(InvalidPartOrder{}, errParams)
|
||||
return ObjectMetadata{}, probe.New(InvalidPartOrder{})
|
||||
}
|
||||
for _, part := range parts.Part {
|
||||
if strings.Trim(part.ETag, "\"") != bucketMetadata.Multiparts[object].Parts[strconv.Itoa(part.PartNumber)].ETag {
|
||||
return ObjectMetadata{}, iodine.New(InvalidPart{}, errParams)
|
||||
return ObjectMetadata{}, probe.New(InvalidPart{})
|
||||
}
|
||||
}
|
||||
var finalETagBytes []byte
|
||||
@@ -428,7 +395,7 @@ func (donut API) completeMultipartUpload(bucket, object, uploadID string, data i
|
||||
for _, part := range bucketMetadata.Multiparts[object].Parts {
|
||||
partETagBytes, err := hex.DecodeString(part.ETag)
|
||||
if err != nil {
|
||||
return ObjectMetadata{}, iodine.New(err, errParams)
|
||||
return ObjectMetadata{}, probe.New(err)
|
||||
}
|
||||
finalETagBytes = append(finalETagBytes, partETagBytes...)
|
||||
finalSize += part.Size
|
||||
@@ -444,19 +411,16 @@ func (donut API) completeMultipartUpload(bucket, object, uploadID string, data i
|
||||
}
|
||||
|
||||
// listMultipartUploads list all multipart uploads
|
||||
func (donut API) listMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, error) {
|
||||
errParams := map[string]string{
|
||||
"bucket": bucket,
|
||||
}
|
||||
func (donut API) listMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error) {
|
||||
if err := donut.listDonutBuckets(); err != nil {
|
||||
return BucketMultipartResourcesMetadata{}, iodine.New(err, errParams)
|
||||
return BucketMultipartResourcesMetadata{}, err.Trace()
|
||||
}
|
||||
if _, ok := donut.buckets[bucket]; !ok {
|
||||
return BucketMultipartResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
|
||||
return BucketMultipartResourcesMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
allbuckets, err := donut.getDonutBucketMetadata()
|
||||
if err != nil {
|
||||
return BucketMultipartResourcesMetadata{}, iodine.New(err, errParams)
|
||||
return BucketMultipartResourcesMetadata{}, err.Trace()
|
||||
}
|
||||
bucketMetadata := allbuckets.Buckets[bucket]
|
||||
var uploads []*UploadMetadata
|
||||
@@ -505,34 +469,29 @@ func (donut API) listMultipartUploads(bucket string, resources BucketMultipartRe
|
||||
}
|
||||
|
||||
// abortMultipartUpload - abort a incomplete multipart upload
|
||||
func (donut API) abortMultipartUpload(bucket, object, uploadID string) error {
|
||||
errParams := map[string]string{
|
||||
"bucket": bucket,
|
||||
"object": object,
|
||||
"uploadID": uploadID,
|
||||
}
|
||||
func (donut API) abortMultipartUpload(bucket, object, uploadID string) *probe.Error {
|
||||
if err := donut.listDonutBuckets(); err != nil {
|
||||
return iodine.New(err, errParams)
|
||||
return err.Trace()
|
||||
}
|
||||
if _, ok := donut.buckets[bucket]; !ok {
|
||||
return iodine.New(BucketNotFound{Bucket: bucket}, errParams)
|
||||
return probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
allbuckets, err := donut.getDonutBucketMetadata()
|
||||
if err != nil {
|
||||
return iodine.New(err, errParams)
|
||||
return err.Trace()
|
||||
}
|
||||
bucketMetadata := allbuckets.Buckets[bucket]
|
||||
if _, ok := bucketMetadata.Multiparts[object]; !ok {
|
||||
return iodine.New(InvalidUploadID{UploadID: uploadID}, errParams)
|
||||
return probe.New(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
if bucketMetadata.Multiparts[object].UploadID != uploadID {
|
||||
return iodine.New(InvalidUploadID{UploadID: uploadID}, errParams)
|
||||
return probe.New(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
delete(bucketMetadata.Multiparts, object)
|
||||
|
||||
allbuckets.Buckets[bucket] = bucketMetadata
|
||||
if err := donut.setDonutBucketMetadata(allbuckets); err != nil {
|
||||
return iodine.New(err, errParams)
|
||||
return err.Trace()
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -541,18 +500,18 @@ func (donut API) abortMultipartUpload(bucket, object, uploadID string) error {
|
||||
//// internal functions
|
||||
|
||||
// getBucketMetadataWriters -
|
||||
func (donut API) getBucketMetadataWriters() ([]io.WriteCloser, error) {
|
||||
func (donut API) getBucketMetadataWriters() ([]io.WriteCloser, *probe.Error) {
|
||||
var writers []io.WriteCloser
|
||||
for _, node := range donut.nodes {
|
||||
disks, err := node.ListDisks()
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, err.Trace()
|
||||
}
|
||||
writers = make([]io.WriteCloser, len(disks))
|
||||
for order, disk := range disks {
|
||||
bucketMetaDataWriter, err := disk.CreateFile(filepath.Join(donut.config.DonutName, bucketMetadataConfig))
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, err.Trace()
|
||||
}
|
||||
writers[order] = bucketMetaDataWriter
|
||||
}
|
||||
@@ -561,14 +520,14 @@ func (donut API) getBucketMetadataWriters() ([]io.WriteCloser, error) {
|
||||
}
|
||||
|
||||
// getBucketMetadataReaders - readers are returned in map rather than slice
|
||||
func (donut API) getBucketMetadataReaders() (map[int]io.ReadCloser, error) {
|
||||
func (donut API) getBucketMetadataReaders() (map[int]io.ReadCloser, *probe.Error) {
|
||||
readers := make(map[int]io.ReadCloser)
|
||||
disks := make(map[int]disk.Disk)
|
||||
var err error
|
||||
var err *probe.Error
|
||||
for _, node := range donut.nodes {
|
||||
nDisks, err := node.ListDisks()
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, err.Trace()
|
||||
}
|
||||
for k, v := range nDisks {
|
||||
disks[k] = v
|
||||
@@ -583,22 +542,22 @@ func (donut API) getBucketMetadataReaders() (map[int]io.ReadCloser, error) {
|
||||
readers[order] = bucketMetaDataReader
|
||||
}
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, err.Trace()
|
||||
}
|
||||
return readers, nil
|
||||
}
|
||||
|
||||
// setDonutBucketMetadata -
|
||||
func (donut API) setDonutBucketMetadata(metadata *AllBuckets) error {
|
||||
func (donut API) setDonutBucketMetadata(metadata *AllBuckets) *probe.Error {
|
||||
writers, err := donut.getBucketMetadataWriters()
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
for _, writer := range writers {
|
||||
jenc := json.NewEncoder(writer)
|
||||
if err := jenc.Encode(metadata); err != nil {
|
||||
CleanupWritersOnError(writers)
|
||||
return iodine.New(err, nil)
|
||||
return probe.New(err)
|
||||
}
|
||||
}
|
||||
for _, writer := range writers {
|
||||
@@ -608,83 +567,85 @@ func (donut API) setDonutBucketMetadata(metadata *AllBuckets) error {
|
||||
}
|
||||
|
||||
// getDonutBucketMetadata -
|
||||
func (donut API) getDonutBucketMetadata() (*AllBuckets, error) {
|
||||
func (donut API) getDonutBucketMetadata() (*AllBuckets, *probe.Error) {
|
||||
metadata := &AllBuckets{}
|
||||
var err error
|
||||
readers, err := donut.getBucketMetadataReaders()
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, err.Trace()
|
||||
}
|
||||
for _, reader := range readers {
|
||||
defer reader.Close()
|
||||
}
|
||||
for _, reader := range readers {
|
||||
jenc := json.NewDecoder(reader)
|
||||
if err = jenc.Decode(metadata); err == nil {
|
||||
return metadata, nil
|
||||
{
|
||||
var err error
|
||||
for _, reader := range readers {
|
||||
jenc := json.NewDecoder(reader)
|
||||
if err = jenc.Decode(metadata); err == nil {
|
||||
return metadata, nil
|
||||
}
|
||||
}
|
||||
return nil, probe.New(err)
|
||||
}
|
||||
return nil, iodine.New(err, nil)
|
||||
}
|
||||
|
||||
// makeDonutBucket -
|
||||
func (donut API) makeDonutBucket(bucketName, acl string) error {
|
||||
func (donut API) makeDonutBucket(bucketName, acl string) *probe.Error {
|
||||
if err := donut.listDonutBuckets(); err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
if _, ok := donut.buckets[bucketName]; ok {
|
||||
return iodine.New(BucketExists{Bucket: bucketName}, nil)
|
||||
return probe.New(BucketExists{Bucket: bucketName})
|
||||
}
|
||||
bucket, bucketMetadata, err := newBucket(bucketName, acl, donut.config.DonutName, donut.nodes)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
nodeNumber := 0
|
||||
donut.buckets[bucketName] = bucket
|
||||
for _, node := range donut.nodes {
|
||||
disks, err := node.ListDisks()
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
for order, disk := range disks {
|
||||
bucketSlice := fmt.Sprintf("%s$%d$%d", bucketName, nodeNumber, order)
|
||||
err := disk.MakeDir(filepath.Join(donut.config.DonutName, bucketSlice))
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
}
|
||||
nodeNumber = nodeNumber + 1
|
||||
}
|
||||
metadata, err := donut.getDonutBucketMetadata()
|
||||
if err != nil {
|
||||
if os.IsNotExist(iodine.ToError(err)) {
|
||||
if os.IsNotExist(err.ToError()) {
|
||||
metadata := new(AllBuckets)
|
||||
metadata.Buckets = make(map[string]BucketMetadata)
|
||||
metadata.Buckets[bucketName] = bucketMetadata
|
||||
err = donut.setDonutBucketMetadata(metadata)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
metadata.Buckets[bucketName] = bucketMetadata
|
||||
err = donut.setDonutBucketMetadata(metadata)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// listDonutBuckets -
|
||||
func (donut API) listDonutBuckets() error {
|
||||
func (donut API) listDonutBuckets() *probe.Error {
|
||||
var disks map[int]disk.Disk
|
||||
var err error
|
||||
var err *probe.Error
|
||||
for _, node := range donut.nodes {
|
||||
disks, err = node.ListDisks()
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
}
|
||||
var dirs []os.FileInfo
|
||||
@@ -696,18 +657,18 @@ func (donut API) listDonutBuckets() error {
|
||||
}
|
||||
// if all disks are missing then return error
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
for _, dir := range dirs {
|
||||
splitDir := strings.Split(dir.Name(), "$")
|
||||
if len(splitDir) < 3 {
|
||||
return iodine.New(CorruptedBackend{Backend: dir.Name()}, nil)
|
||||
return probe.New(CorruptedBackend{Backend: dir.Name()})
|
||||
}
|
||||
bucketName := splitDir[0]
|
||||
// we dont need this once we cache from makeDonutBucket()
|
||||
bucket, _, err := newBucket(bucketName, "private", donut.config.DonutName, donut.nodes)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
donut.buckets[bucketName] = bucket
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ import (
|
||||
"github.com/minio/minio/pkg/crypto/sha256"
|
||||
"github.com/minio/minio/pkg/donut/cache/data"
|
||||
"github.com/minio/minio/pkg/donut/cache/metadata"
|
||||
"github.com/minio/minio/pkg/iodine"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
|
||||
@@ -71,9 +71,9 @@ type storedBucket struct {
|
||||
}
|
||||
|
||||
// New instantiate a new donut
|
||||
func New() (Interface, error) {
|
||||
func New() (Interface, *probe.Error) {
|
||||
var conf *Config
|
||||
var err error
|
||||
var err *probe.Error
|
||||
conf, err = LoadConfig()
|
||||
if err != nil {
|
||||
conf = &Config{
|
||||
@@ -83,7 +83,7 @@ func New() (Interface, error) {
|
||||
DonutName: "",
|
||||
}
|
||||
if err := quick.CheckData(conf); err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, err.Trace()
|
||||
}
|
||||
}
|
||||
a := API{config: conf}
|
||||
@@ -98,17 +98,17 @@ func New() (Interface, error) {
|
||||
if len(a.config.NodeDiskMap) > 0 {
|
||||
for k, v := range a.config.NodeDiskMap {
|
||||
if len(v) == 0 {
|
||||
return nil, iodine.New(InvalidDisksArgument{}, nil)
|
||||
return nil, probe.New(InvalidDisksArgument{})
|
||||
}
|
||||
err := a.AttachNode(k, v)
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, err.Trace()
|
||||
}
|
||||
}
|
||||
/// Initialization, populate all buckets into memory
|
||||
buckets, err := a.listBuckets()
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, err.Trace()
|
||||
}
|
||||
for k, v := range buckets {
|
||||
var newBucket = storedBucket{}
|
||||
@@ -126,58 +126,53 @@ func New() (Interface, error) {
|
||||
/// V2 API functions
|
||||
|
||||
// GetObject - GET object from cache buffer
|
||||
func (donut API) GetObject(w io.Writer, bucket string, object string, start, length int64) (int64, error) {
|
||||
func (donut API) GetObject(w io.Writer, bucket string, object string, start, length int64) (int64, *probe.Error) {
|
||||
donut.lock.Lock()
|
||||
defer donut.lock.Unlock()
|
||||
|
||||
errParams := map[string]string{
|
||||
"bucket": bucket,
|
||||
"object": object,
|
||||
"start": strconv.FormatInt(start, 10),
|
||||
"length": strconv.FormatInt(length, 10),
|
||||
}
|
||||
|
||||
if !IsValidBucket(bucket) {
|
||||
return 0, iodine.New(BucketNameInvalid{Bucket: bucket}, errParams)
|
||||
return 0, probe.New(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return 0, iodine.New(ObjectNameInvalid{Object: object}, errParams)
|
||||
return 0, probe.New(ObjectNameInvalid{Object: object})
|
||||
}
|
||||
if start < 0 {
|
||||
return 0, iodine.New(InvalidRange{
|
||||
return 0, probe.New(InvalidRange{
|
||||
Start: start,
|
||||
Length: length,
|
||||
}, errParams)
|
||||
})
|
||||
}
|
||||
if !donut.storedBuckets.Exists(bucket) {
|
||||
return 0, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
|
||||
return 0, probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
objectKey := bucket + "/" + object
|
||||
data, ok := donut.objects.Get(objectKey)
|
||||
var written int64
|
||||
var err error
|
||||
if !ok {
|
||||
if len(donut.config.NodeDiskMap) > 0 {
|
||||
reader, size, err := donut.getObject(bucket, object)
|
||||
if err != nil {
|
||||
return 0, iodine.New(err, nil)
|
||||
return 0, err.Trace()
|
||||
}
|
||||
if start > 0 {
|
||||
if _, err := io.CopyN(ioutil.Discard, reader, start); err != nil {
|
||||
return 0, iodine.New(err, errParams)
|
||||
return 0, probe.New(err)
|
||||
}
|
||||
}
|
||||
// new proxy writer to capture data read from disk
|
||||
pw := NewProxyWriter(w)
|
||||
if length > 0 {
|
||||
written, err = io.CopyN(pw, reader, length)
|
||||
if err != nil {
|
||||
return 0, iodine.New(err, errParams)
|
||||
}
|
||||
} else {
|
||||
written, err = io.CopyN(pw, reader, size)
|
||||
if err != nil {
|
||||
return 0, iodine.New(err, errParams)
|
||||
{
|
||||
var err error
|
||||
if length > 0 {
|
||||
written, err = io.CopyN(pw, reader, length)
|
||||
if err != nil {
|
||||
return 0, probe.New(err)
|
||||
}
|
||||
} else {
|
||||
written, err = io.CopyN(pw, reader, size)
|
||||
if err != nil {
|
||||
return 0, probe.New(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
/// cache object read from disk
|
||||
@@ -185,83 +180,86 @@ func (donut API) GetObject(w io.Writer, bucket string, object string, start, len
|
||||
pw.writtenBytes = nil
|
||||
go debug.FreeOSMemory()
|
||||
if !ok {
|
||||
return 0, iodine.New(InternalError{}, errParams)
|
||||
return 0, probe.New(InternalError{})
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
return 0, iodine.New(ObjectNotFound{Object: object}, errParams)
|
||||
return 0, probe.New(ObjectNotFound{Object: object})
|
||||
}
|
||||
if start == 0 && length == 0 {
|
||||
written, err = io.CopyN(w, bytes.NewBuffer(data), int64(donut.objects.Len(objectKey)))
|
||||
if err != nil {
|
||||
return 0, iodine.New(err, nil)
|
||||
}
|
||||
} else {
|
||||
written, err = io.CopyN(w, bytes.NewBuffer(data[start:]), length)
|
||||
if err != nil {
|
||||
return 0, iodine.New(err, nil)
|
||||
{
|
||||
var err error
|
||||
if start == 0 && length == 0 {
|
||||
written, err = io.CopyN(w, bytes.NewBuffer(data), int64(donut.objects.Len(objectKey)))
|
||||
if err != nil {
|
||||
return 0, probe.New(err)
|
||||
}
|
||||
} else {
|
||||
written, err = io.CopyN(w, bytes.NewBuffer(data[start:]), length)
|
||||
if err != nil {
|
||||
return 0, probe.New(err)
|
||||
}
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// GetBucketMetadata -
|
||||
func (donut API) GetBucketMetadata(bucket string, signature *Signature) (BucketMetadata, error) {
|
||||
func (donut API) GetBucketMetadata(bucket string, signature *Signature) (BucketMetadata, *probe.Error) {
|
||||
donut.lock.Lock()
|
||||
defer donut.lock.Unlock()
|
||||
|
||||
if signature != nil {
|
||||
ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
|
||||
if err != nil {
|
||||
return BucketMetadata{}, iodine.New(err, nil)
|
||||
return BucketMetadata{}, err.Trace()
|
||||
}
|
||||
if !ok {
|
||||
return BucketMetadata{}, iodine.New(SignatureDoesNotMatch{}, nil)
|
||||
return BucketMetadata{}, probe.New(SignatureDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
|
||||
if !IsValidBucket(bucket) {
|
||||
return BucketMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||
return BucketMetadata{}, probe.New(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !donut.storedBuckets.Exists(bucket) {
|
||||
if len(donut.config.NodeDiskMap) > 0 {
|
||||
bucketMetadata, err := donut.getBucketMetadata(bucket)
|
||||
if err != nil {
|
||||
return BucketMetadata{}, iodine.New(err, nil)
|
||||
return BucketMetadata{}, err.Trace()
|
||||
}
|
||||
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
|
||||
storedBucket.bucketMetadata = bucketMetadata
|
||||
donut.storedBuckets.Set(bucket, storedBucket)
|
||||
}
|
||||
return BucketMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
return BucketMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
return donut.storedBuckets.Get(bucket).(storedBucket).bucketMetadata, nil
|
||||
}
|
||||
|
||||
// SetBucketMetadata -
|
||||
func (donut API) SetBucketMetadata(bucket string, metadata map[string]string, signature *Signature) error {
|
||||
func (donut API) SetBucketMetadata(bucket string, metadata map[string]string, signature *Signature) *probe.Error {
|
||||
donut.lock.Lock()
|
||||
defer donut.lock.Unlock()
|
||||
|
||||
if signature != nil {
|
||||
ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
if !ok {
|
||||
return iodine.New(SignatureDoesNotMatch{}, nil)
|
||||
return probe.New(SignatureDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
|
||||
if !IsValidBucket(bucket) {
|
||||
return iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||
return probe.New(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !donut.storedBuckets.Exists(bucket) {
|
||||
return iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
return probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
if len(donut.config.NodeDiskMap) > 0 {
|
||||
if err := donut.setBucketMetadata(bucket, metadata); err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
}
|
||||
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
|
||||
@@ -271,26 +269,26 @@ func (donut API) SetBucketMetadata(bucket string, metadata map[string]string, si
|
||||
}
|
||||
|
||||
// isMD5SumEqual - returns error if md5sum mismatches, success its `nil`
|
||||
func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) error {
|
||||
func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) *probe.Error {
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" && strings.TrimSpace(actualMD5Sum) != "" {
|
||||
expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return probe.New(err)
|
||||
}
|
||||
actualMD5SumBytes, err := hex.DecodeString(actualMD5Sum)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return probe.New(err)
|
||||
}
|
||||
if !bytes.Equal(expectedMD5SumBytes, actualMD5SumBytes) {
|
||||
return iodine.New(BadDigest{}, nil)
|
||||
return probe.New(BadDigest{})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return iodine.New(InvalidArgument{}, nil)
|
||||
return probe.New(InvalidArgument{})
|
||||
}
|
||||
|
||||
// CreateObject - create an object
|
||||
func (donut API) CreateObject(bucket, key, expectedMD5Sum string, size int64, data io.Reader, metadata map[string]string, signature *Signature) (ObjectMetadata, error) {
|
||||
func (donut API) CreateObject(bucket, key, expectedMD5Sum string, size int64, data io.Reader, metadata map[string]string, signature *Signature) (ObjectMetadata, *probe.Error) {
|
||||
donut.lock.Lock()
|
||||
defer donut.lock.Unlock()
|
||||
|
||||
@@ -299,35 +297,35 @@ func (donut API) CreateObject(bucket, key, expectedMD5Sum string, size int64, da
|
||||
// free
|
||||
debug.FreeOSMemory()
|
||||
|
||||
return objectMetadata, iodine.New(err, nil)
|
||||
return objectMetadata, err.Trace()
|
||||
}
|
||||
|
||||
// createObject - PUT object to cache buffer
|
||||
func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (ObjectMetadata, error) {
|
||||
func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) {
|
||||
if len(donut.config.NodeDiskMap) == 0 {
|
||||
if size > int64(donut.config.MaxSize) {
|
||||
generic := GenericObjectError{Bucket: bucket, Object: key}
|
||||
return ObjectMetadata{}, iodine.New(EntityTooLarge{
|
||||
return ObjectMetadata{}, probe.New(EntityTooLarge{
|
||||
GenericObjectError: generic,
|
||||
Size: strconv.FormatInt(size, 10),
|
||||
MaxSize: strconv.FormatUint(donut.config.MaxSize, 10),
|
||||
}, nil)
|
||||
})
|
||||
}
|
||||
}
|
||||
if !IsValidBucket(bucket) {
|
||||
return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||
return ObjectMetadata{}, probe.New(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(key) {
|
||||
return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil)
|
||||
return ObjectMetadata{}, probe.New(ObjectNameInvalid{Object: key})
|
||||
}
|
||||
if !donut.storedBuckets.Exists(bucket) {
|
||||
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
return ObjectMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
|
||||
// get object key
|
||||
objectKey := bucket + "/" + key
|
||||
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
|
||||
return ObjectMetadata{}, iodine.New(ObjectExists{Object: key}, nil)
|
||||
return ObjectMetadata{}, probe.New(ObjectExists{Object: key})
|
||||
}
|
||||
|
||||
if contentType == "" {
|
||||
@@ -338,7 +336,7 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
|
||||
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
|
||||
if err != nil {
|
||||
// pro-actively close the connection
|
||||
return ObjectMetadata{}, iodine.New(InvalidDigest{Md5: expectedMD5Sum}, nil)
|
||||
return ObjectMetadata{}, probe.New(InvalidDigest{Md5: expectedMD5Sum})
|
||||
}
|
||||
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
|
||||
}
|
||||
@@ -357,7 +355,7 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
|
||||
signature,
|
||||
)
|
||||
if err != nil {
|
||||
return ObjectMetadata{}, iodine.New(err, nil)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
storedBucket.objectMetadata[objectKey] = objMetadata
|
||||
donut.storedBuckets.Set(bucket, storedBucket)
|
||||
@@ -377,7 +375,7 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
|
||||
sha256hash.Write(byteBuffer[0:length])
|
||||
ok := donut.objects.Append(objectKey, byteBuffer[0:length])
|
||||
if !ok {
|
||||
return ObjectMetadata{}, iodine.New(InternalError{}, nil)
|
||||
return ObjectMetadata{}, probe.New(InternalError{})
|
||||
}
|
||||
totalLength += int64(length)
|
||||
go debug.FreeOSMemory()
|
||||
@@ -385,26 +383,26 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
|
||||
if totalLength != size {
|
||||
// Delete perhaps the object is already saved, due to the nature of append()
|
||||
donut.objects.Delete(objectKey)
|
||||
return ObjectMetadata{}, iodine.New(IncompleteBody{Bucket: bucket, Object: key}, nil)
|
||||
return ObjectMetadata{}, probe.New(IncompleteBody{Bucket: bucket, Object: key})
|
||||
}
|
||||
if err != io.EOF {
|
||||
return ObjectMetadata{}, iodine.New(err, nil)
|
||||
return ObjectMetadata{}, probe.New(err)
|
||||
}
|
||||
md5SumBytes := hash.Sum(nil)
|
||||
md5Sum := hex.EncodeToString(md5SumBytes)
|
||||
// Verify if the written object is equal to what is expected, only if it is requested as such
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" {
|
||||
if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil {
|
||||
return ObjectMetadata{}, iodine.New(BadDigest{}, nil)
|
||||
return ObjectMetadata{}, probe.New(BadDigest{})
|
||||
}
|
||||
}
|
||||
if signature != nil {
|
||||
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256hash.Sum(nil)))
|
||||
if err != nil {
|
||||
return ObjectMetadata{}, iodine.New(err, nil)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
if !ok {
|
||||
return ObjectMetadata{}, iodine.New(SignatureDoesNotMatch{}, nil)
|
||||
return ObjectMetadata{}, probe.New(SignatureDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -426,7 +424,7 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
|
||||
}
|
||||
|
||||
// MakeBucket - create bucket in cache
|
||||
func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signature *Signature) error {
|
||||
func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signature *Signature) *probe.Error {
|
||||
donut.lock.Lock()
|
||||
defer donut.lock.Unlock()
|
||||
|
||||
@@ -435,7 +433,7 @@ func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signatur
|
||||
if location != nil {
|
||||
locationConstraintBytes, err := ioutil.ReadAll(location)
|
||||
if err != nil {
|
||||
return iodine.New(InternalError{}, nil)
|
||||
return probe.New(InternalError{})
|
||||
}
|
||||
locationSum = hex.EncodeToString(sha256.Sum256(locationConstraintBytes)[:])
|
||||
}
|
||||
@@ -443,24 +441,24 @@ func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signatur
|
||||
if signature != nil {
|
||||
ok, err := signature.DoesSignatureMatch(locationSum)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
if !ok {
|
||||
return iodine.New(SignatureDoesNotMatch{}, nil)
|
||||
return probe.New(SignatureDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
|
||||
if donut.storedBuckets.Stats().Items == totalBuckets {
|
||||
return iodine.New(TooManyBuckets{Bucket: bucketName}, nil)
|
||||
return probe.New(TooManyBuckets{Bucket: bucketName})
|
||||
}
|
||||
if !IsValidBucket(bucketName) {
|
||||
return iodine.New(BucketNameInvalid{Bucket: bucketName}, nil)
|
||||
return probe.New(BucketNameInvalid{Bucket: bucketName})
|
||||
}
|
||||
if !IsValidBucketACL(acl) {
|
||||
return iodine.New(InvalidACL{ACL: acl}, nil)
|
||||
return probe.New(InvalidACL{ACL: acl})
|
||||
}
|
||||
if donut.storedBuckets.Exists(bucketName) {
|
||||
return iodine.New(BucketExists{Bucket: bucketName}, nil)
|
||||
return probe.New(BucketExists{Bucket: bucketName})
|
||||
}
|
||||
|
||||
if strings.TrimSpace(acl) == "" {
|
||||
@@ -469,7 +467,7 @@ func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signatur
|
||||
}
|
||||
if len(donut.config.NodeDiskMap) > 0 {
|
||||
if err := donut.makeBucket(bucketName, BucketACL(acl)); err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
}
|
||||
var newBucket = storedBucket{}
|
||||
@@ -485,28 +483,28 @@ func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signatur
|
||||
}
|
||||
|
||||
// ListObjects - list objects from cache
|
||||
func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata, signature *Signature) ([]ObjectMetadata, BucketResourcesMetadata, error) {
|
||||
func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata, signature *Signature) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error) {
|
||||
donut.lock.Lock()
|
||||
defer donut.lock.Unlock()
|
||||
|
||||
if signature != nil {
|
||||
ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
|
||||
if err != nil {
|
||||
return nil, BucketResourcesMetadata{}, iodine.New(err, nil)
|
||||
return nil, BucketResourcesMetadata{}, err.Trace()
|
||||
}
|
||||
if !ok {
|
||||
return nil, BucketResourcesMetadata{}, iodine.New(SignatureDoesNotMatch{}, nil)
|
||||
return nil, BucketResourcesMetadata{}, probe.New(SignatureDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
|
||||
if !IsValidBucket(bucket) {
|
||||
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||
return nil, BucketResourcesMetadata{IsTruncated: false}, probe.New(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidPrefix(resources.Prefix) {
|
||||
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(ObjectNameInvalid{Object: resources.Prefix}, nil)
|
||||
return nil, BucketResourcesMetadata{IsTruncated: false}, probe.New(ObjectNameInvalid{Object: resources.Prefix})
|
||||
}
|
||||
if !donut.storedBuckets.Exists(bucket) {
|
||||
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
return nil, BucketResourcesMetadata{IsTruncated: false}, probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
var results []ObjectMetadata
|
||||
var keys []string
|
||||
@@ -519,7 +517,7 @@ func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata, s
|
||||
resources.Maxkeys,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(err, nil)
|
||||
return nil, BucketResourcesMetadata{IsTruncated: false}, probe.New(err)
|
||||
}
|
||||
resources.CommonPrefixes = listObjects.CommonPrefixes
|
||||
resources.IsTruncated = listObjects.IsTruncated
|
||||
@@ -588,17 +586,17 @@ func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
|
||||
|
||||
// ListBuckets - List buckets from cache
|
||||
func (donut API) ListBuckets(signature *Signature) ([]BucketMetadata, error) {
|
||||
func (donut API) ListBuckets(signature *Signature) ([]BucketMetadata, *probe.Error) {
|
||||
donut.lock.Lock()
|
||||
defer donut.lock.Unlock()
|
||||
|
||||
if signature != nil {
|
||||
ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, probe.New(err)
|
||||
}
|
||||
if !ok {
|
||||
return nil, iodine.New(SignatureDoesNotMatch{}, nil)
|
||||
return nil, probe.New(SignatureDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -606,7 +604,7 @@ func (donut API) ListBuckets(signature *Signature) ([]BucketMetadata, error) {
|
||||
if len(donut.config.NodeDiskMap) > 0 {
|
||||
buckets, err := donut.listBuckets()
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, probe.New(err)
|
||||
}
|
||||
for _, bucketMetadata := range buckets {
|
||||
results = append(results, bucketMetadata)
|
||||
@@ -622,29 +620,29 @@ func (donut API) ListBuckets(signature *Signature) ([]BucketMetadata, error) {
|
||||
}
|
||||
|
||||
// GetObjectMetadata - get object metadata from cache
|
||||
func (donut API) GetObjectMetadata(bucket, key string, signature *Signature) (ObjectMetadata, error) {
|
||||
func (donut API) GetObjectMetadata(bucket, key string, signature *Signature) (ObjectMetadata, *probe.Error) {
|
||||
donut.lock.Lock()
|
||||
defer donut.lock.Unlock()
|
||||
|
||||
if signature != nil {
|
||||
ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
|
||||
if err != nil {
|
||||
return ObjectMetadata{}, iodine.New(err, nil)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
if !ok {
|
||||
return ObjectMetadata{}, iodine.New(SignatureDoesNotMatch{}, nil)
|
||||
return ObjectMetadata{}, probe.New(SignatureDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
|
||||
// check if bucket exists
|
||||
if !IsValidBucket(bucket) {
|
||||
return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||
return ObjectMetadata{}, probe.New(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(key) {
|
||||
return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil)
|
||||
return ObjectMetadata{}, probe.New(ObjectNameInvalid{Object: key})
|
||||
}
|
||||
if !donut.storedBuckets.Exists(bucket) {
|
||||
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
return ObjectMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
|
||||
objectKey := bucket + "/" + key
|
||||
@@ -654,14 +652,14 @@ func (donut API) GetObjectMetadata(bucket, key string, signature *Signature) (Ob
|
||||
if len(donut.config.NodeDiskMap) > 0 {
|
||||
objMetadata, err := donut.getObjectMetadata(bucket, key)
|
||||
if err != nil {
|
||||
return ObjectMetadata{}, iodine.New(err, nil)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
// update
|
||||
storedBucket.objectMetadata[objectKey] = objMetadata
|
||||
donut.storedBuckets.Set(bucket, storedBucket)
|
||||
return objMetadata, nil
|
||||
}
|
||||
return ObjectMetadata{}, iodine.New(ObjectNotFound{Object: key}, nil)
|
||||
return ObjectMetadata{}, probe.New(ObjectNotFound{Object: key})
|
||||
}
|
||||
|
||||
// evictedObject callback function called when an item is evicted from memory
|
||||
|
||||
@@ -18,10 +18,9 @@ package donut
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
encoding "github.com/minio/minio/pkg/erasure"
|
||||
"github.com/minio/minio/pkg/iodine"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// encoder internal struct
|
||||
@@ -32,74 +31,71 @@ type encoder struct {
|
||||
}
|
||||
|
||||
// getErasureTechnique - convert technique string into Technique type
|
||||
func getErasureTechnique(technique string) (encoding.Technique, error) {
|
||||
func getErasureTechnique(technique string) (encoding.Technique, *probe.Error) {
|
||||
switch true {
|
||||
case technique == "Cauchy":
|
||||
return encoding.Cauchy, nil
|
||||
case technique == "Vandermonde":
|
||||
return encoding.Cauchy, nil
|
||||
default:
|
||||
return encoding.None, iodine.New(InvalidErasureTechnique{Technique: technique}, nil)
|
||||
return encoding.None, probe.New(InvalidErasureTechnique{Technique: technique})
|
||||
}
|
||||
}
|
||||
|
||||
// newEncoder - instantiate a new encoder
|
||||
func newEncoder(k, m uint8, technique string) (encoder, error) {
|
||||
errParams := map[string]string{
|
||||
"k": strconv.FormatUint(uint64(k), 10),
|
||||
"m": strconv.FormatUint(uint64(m), 10),
|
||||
"technique": technique,
|
||||
}
|
||||
func newEncoder(k, m uint8, technique string) (encoder, *probe.Error) {
|
||||
e := encoder{}
|
||||
t, err := getErasureTechnique(technique)
|
||||
if err != nil {
|
||||
return encoder{}, iodine.New(err, errParams)
|
||||
return encoder{}, err.Trace()
|
||||
}
|
||||
params, err := encoding.ValidateParams(k, m, t)
|
||||
if err != nil {
|
||||
return encoder{}, iodine.New(err, errParams)
|
||||
{
|
||||
params, err := encoding.ValidateParams(k, m, t)
|
||||
if err != nil {
|
||||
return encoder{}, probe.New(err)
|
||||
}
|
||||
e.encoder = encoding.NewErasure(params)
|
||||
e.k = k
|
||||
e.m = m
|
||||
e.technique = t
|
||||
return e, nil
|
||||
}
|
||||
e.encoder = encoding.NewErasure(params)
|
||||
e.k = k
|
||||
e.m = m
|
||||
e.technique = t
|
||||
return e, nil
|
||||
}
|
||||
|
||||
// TODO - think again if this is needed
|
||||
// GetEncodedBlockLen - wrapper around erasure function with the same name
|
||||
func (e encoder) GetEncodedBlockLen(dataLength int) (int, error) {
|
||||
func (e encoder) GetEncodedBlockLen(dataLength int) (int, *probe.Error) {
|
||||
if dataLength <= 0 {
|
||||
return 0, iodine.New(InvalidArgument{}, nil)
|
||||
return 0, probe.New(InvalidArgument{})
|
||||
}
|
||||
return encoding.GetEncodedBlockLen(dataLength, e.k), nil
|
||||
}
|
||||
|
||||
// Encode - erasure code input bytes
|
||||
func (e encoder) Encode(data []byte) (encodedData [][]byte, err error) {
|
||||
func (e encoder) Encode(data []byte) ([][]byte, *probe.Error) {
|
||||
if data == nil {
|
||||
return nil, iodine.New(InvalidArgument{}, nil)
|
||||
return nil, probe.New(InvalidArgument{})
|
||||
}
|
||||
encodedData, err = e.encoder.Encode(data)
|
||||
encodedData, err := e.encoder.Encode(data)
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, probe.New(err)
|
||||
}
|
||||
return encodedData, nil
|
||||
}
|
||||
|
||||
func (e encoder) EncodeStream(data io.Reader, size int64) (encodedData [][]byte, inputData []byte, err error) {
|
||||
encodedData, inputData, err = e.encoder.EncodeStream(data, size)
|
||||
func (e encoder) EncodeStream(data io.Reader, size int64) ([][]byte, []byte, *probe.Error) {
|
||||
encodedData, inputData, err := e.encoder.EncodeStream(data, size)
|
||||
if err != nil {
|
||||
return nil, nil, iodine.New(err, nil)
|
||||
return nil, nil, probe.New(err)
|
||||
}
|
||||
return encodedData, inputData, nil
|
||||
}
|
||||
|
||||
// Decode - erasure decode input encoded bytes
|
||||
func (e encoder) Decode(encodedData [][]byte, dataLength int) (data []byte, err error) {
|
||||
func (e encoder) Decode(encodedData [][]byte, dataLength int) ([]byte, *probe.Error) {
|
||||
decodedData, err := e.encoder.Decode(encodedData, dataLength)
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, probe.New(err)
|
||||
}
|
||||
return decodedData, nil
|
||||
}
|
||||
|
||||
@@ -22,23 +22,23 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/minio/minio/pkg/donut/disk"
|
||||
"github.com/minio/minio/pkg/iodine"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// healBuckets heal bucket slices
|
||||
func (donut API) healBuckets() error {
|
||||
func (donut API) healBuckets() *probe.Error {
|
||||
if err := donut.listDonutBuckets(); err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
bucketMetadata, err := donut.getDonutBucketMetadata()
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
disks := make(map[int]disk.Disk)
|
||||
for _, node := range donut.nodes {
|
||||
nDisks, err := node.ListDisks()
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
for k, v := range nDisks {
|
||||
disks[k] = v
|
||||
@@ -49,18 +49,18 @@ func (donut API) healBuckets() error {
|
||||
disk.MakeDir(donut.config.DonutName)
|
||||
bucketMetadataWriter, err := disk.CreateFile(filepath.Join(donut.config.DonutName, bucketMetadataConfig))
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
defer bucketMetadataWriter.Close()
|
||||
jenc := json.NewEncoder(bucketMetadataWriter)
|
||||
if err := jenc.Encode(bucketMetadata); err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return probe.New(err)
|
||||
}
|
||||
for bucket := range bucketMetadata.Buckets {
|
||||
bucketSlice := fmt.Sprintf("%s$0$%d", bucket, order) // TODO handle node slices
|
||||
err := disk.MakeDir(filepath.Join(donut.config.DonutName, bucketSlice))
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,11 @@
|
||||
|
||||
package donut
|
||||
|
||||
import "io"
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// Collection of Donut specification interfaces
|
||||
|
||||
@@ -29,39 +33,39 @@ type Interface interface {
|
||||
// CloudStorage is a donut cloud storage interface
|
||||
type CloudStorage interface {
|
||||
// Storage service operations
|
||||
GetBucketMetadata(bucket string, signature *Signature) (BucketMetadata, error)
|
||||
SetBucketMetadata(bucket string, metadata map[string]string, signature *Signature) error
|
||||
ListBuckets(signature *Signature) ([]BucketMetadata, error)
|
||||
MakeBucket(bucket string, ACL string, location io.Reader, signature *Signature) error
|
||||
GetBucketMetadata(bucket string, signature *Signature) (BucketMetadata, *probe.Error)
|
||||
SetBucketMetadata(bucket string, metadata map[string]string, signature *Signature) *probe.Error
|
||||
ListBuckets(signature *Signature) ([]BucketMetadata, *probe.Error)
|
||||
MakeBucket(bucket string, ACL string, location io.Reader, signature *Signature) *probe.Error
|
||||
|
||||
// Bucket operations
|
||||
ListObjects(string, BucketResourcesMetadata, *Signature) ([]ObjectMetadata, BucketResourcesMetadata, error)
|
||||
ListObjects(string, BucketResourcesMetadata, *Signature) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error)
|
||||
|
||||
// Object operations
|
||||
GetObject(w io.Writer, bucket, object string, start, length int64) (int64, error)
|
||||
GetObjectMetadata(bucket, object string, signature *Signature) (ObjectMetadata, error)
|
||||
GetObject(w io.Writer, bucket, object string, start, length int64) (int64, *probe.Error)
|
||||
GetObjectMetadata(bucket, object string, signature *Signature) (ObjectMetadata, *probe.Error)
|
||||
// bucket, object, expectedMD5Sum, size, reader, metadata, signature
|
||||
CreateObject(string, string, string, int64, io.Reader, map[string]string, *Signature) (ObjectMetadata, error)
|
||||
CreateObject(string, string, string, int64, io.Reader, map[string]string, *Signature) (ObjectMetadata, *probe.Error)
|
||||
|
||||
Multipart
|
||||
}
|
||||
|
||||
// Multipart API
|
||||
type Multipart interface {
|
||||
NewMultipartUpload(bucket, key, contentType string, signature *Signature) (string, error)
|
||||
AbortMultipartUpload(bucket, key, uploadID string, signature *Signature) error
|
||||
CreateObjectPart(string, string, string, int, string, string, int64, io.Reader, *Signature) (string, error)
|
||||
CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, error)
|
||||
ListMultipartUploads(string, BucketMultipartResourcesMetadata, *Signature) (BucketMultipartResourcesMetadata, error)
|
||||
ListObjectParts(string, string, ObjectResourcesMetadata, *Signature) (ObjectResourcesMetadata, error)
|
||||
NewMultipartUpload(bucket, key, contentType string, signature *Signature) (string, *probe.Error)
|
||||
AbortMultipartUpload(bucket, key, uploadID string, signature *Signature) *probe.Error
|
||||
CreateObjectPart(string, string, string, int, string, string, int64, io.Reader, *Signature) (string, *probe.Error)
|
||||
CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error)
|
||||
ListMultipartUploads(string, BucketMultipartResourcesMetadata, *Signature) (BucketMultipartResourcesMetadata, *probe.Error)
|
||||
ListObjectParts(string, string, ObjectResourcesMetadata, *Signature) (ObjectResourcesMetadata, *probe.Error)
|
||||
}
|
||||
|
||||
// Management is a donut management system interface
|
||||
type Management interface {
|
||||
Heal() error
|
||||
Rebalance() error
|
||||
Info() (map[string][]string, error)
|
||||
Heal() *probe.Error
|
||||
Rebalance() *probe.Error
|
||||
Info() (map[string][]string, *probe.Error)
|
||||
|
||||
AttachNode(hostname string, disks []string) error
|
||||
DetachNode(hostname string) error
|
||||
AttachNode(hostname string, disks []string) *probe.Error
|
||||
DetachNode(hostname string) *probe.Error
|
||||
}
|
||||
|
||||
@@ -18,16 +18,16 @@ package donut
|
||||
|
||||
import (
|
||||
"github.com/minio/minio/pkg/donut/disk"
|
||||
"github.com/minio/minio/pkg/iodine"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// Info - return info about donut configuration
|
||||
func (donut API) Info() (nodeDiskMap map[string][]string, err error) {
|
||||
func (donut API) Info() (nodeDiskMap map[string][]string, err *probe.Error) {
|
||||
nodeDiskMap = make(map[string][]string)
|
||||
for nodeName, node := range donut.nodes {
|
||||
disks, err := node.ListDisks()
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, err.Trace()
|
||||
}
|
||||
diskList := make([]string, len(disks))
|
||||
for diskOrder, disk := range disks {
|
||||
@@ -39,13 +39,13 @@ func (donut API) Info() (nodeDiskMap map[string][]string, err error) {
|
||||
}
|
||||
|
||||
// AttachNode - attach node
|
||||
func (donut API) AttachNode(hostname string, disks []string) error {
|
||||
func (donut API) AttachNode(hostname string, disks []string) *probe.Error {
|
||||
if hostname == "" || len(disks) == 0 {
|
||||
return iodine.New(InvalidArgument{}, nil)
|
||||
return probe.New(InvalidArgument{})
|
||||
}
|
||||
node, err := newNode(hostname)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
donut.nodes[hostname] = node
|
||||
for i, d := range disks {
|
||||
@@ -54,28 +54,28 @@ func (donut API) AttachNode(hostname string, disks []string) error {
|
||||
continue
|
||||
}
|
||||
if err := newDisk.MakeDir(donut.config.DonutName); err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
if err := node.AttachDisk(newDisk, i); err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetachNode - detach node
|
||||
func (donut API) DetachNode(hostname string) error {
|
||||
func (donut API) DetachNode(hostname string) *probe.Error {
|
||||
delete(donut.nodes, hostname)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rebalance - rebalance an existing donut with new disks and nodes
|
||||
func (donut API) Rebalance() error {
|
||||
return iodine.New(APINotImplemented{API: "management.Rebalance"}, nil)
|
||||
func (donut API) Rebalance() *probe.Error {
|
||||
return probe.New(APINotImplemented{API: "management.Rebalance"})
|
||||
}
|
||||
|
||||
// Heal - heal your donuts
|
||||
func (donut API) Heal() error {
|
||||
func (donut API) Heal() *probe.Error {
|
||||
// TODO handle data heal
|
||||
return donut.healBuckets()
|
||||
}
|
||||
|
||||
@@ -34,41 +34,41 @@ import (
|
||||
|
||||
"github.com/minio/minio/pkg/crypto/sha256"
|
||||
"github.com/minio/minio/pkg/donut/cache/data"
|
||||
"github.com/minio/minio/pkg/iodine"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
/// V2 API functions
|
||||
|
||||
// NewMultipartUpload - initiate a new multipart session
|
||||
func (donut API) NewMultipartUpload(bucket, key, contentType string, signature *Signature) (string, error) {
|
||||
func (donut API) NewMultipartUpload(bucket, key, contentType string, signature *Signature) (string, *probe.Error) {
|
||||
donut.lock.Lock()
|
||||
defer donut.lock.Unlock()
|
||||
|
||||
if !IsValidBucket(bucket) {
|
||||
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||
return "", probe.New(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(key) {
|
||||
return "", iodine.New(ObjectNameInvalid{Object: key}, nil)
|
||||
return "", probe.New(ObjectNameInvalid{Object: key})
|
||||
}
|
||||
if signature != nil {
|
||||
ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
return "", err.Trace()
|
||||
}
|
||||
if !ok {
|
||||
return "", iodine.New(SignatureDoesNotMatch{}, nil)
|
||||
return "", probe.New(SignatureDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
if len(donut.config.NodeDiskMap) > 0 {
|
||||
return donut.newMultipartUpload(bucket, key, contentType)
|
||||
}
|
||||
if !donut.storedBuckets.Exists(bucket) {
|
||||
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
return "", probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
|
||||
objectKey := bucket + "/" + key
|
||||
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
|
||||
return "", iodine.New(ObjectExists{Object: key}, nil)
|
||||
return "", probe.New(ObjectExists{Object: key})
|
||||
}
|
||||
id := []byte(strconv.Itoa(rand.Int()) + bucket + key + time.Now().UTC().String())
|
||||
uploadIDSum := sha512.Sum512(id)
|
||||
@@ -88,57 +88,57 @@ func (donut API) NewMultipartUpload(bucket, key, contentType string, signature *
|
||||
}
|
||||
|
||||
// AbortMultipartUpload - abort an incomplete multipart session
|
||||
func (donut API) AbortMultipartUpload(bucket, key, uploadID string, signature *Signature) error {
|
||||
func (donut API) AbortMultipartUpload(bucket, key, uploadID string, signature *Signature) *probe.Error {
|
||||
donut.lock.Lock()
|
||||
defer donut.lock.Unlock()
|
||||
|
||||
if !IsValidBucket(bucket) {
|
||||
return iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||
return probe.New(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(key) {
|
||||
return iodine.New(ObjectNameInvalid{Object: key}, nil)
|
||||
return probe.New(ObjectNameInvalid{Object: key})
|
||||
}
|
||||
if signature != nil {
|
||||
ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
return err.Trace()
|
||||
}
|
||||
if !ok {
|
||||
return iodine.New(SignatureDoesNotMatch{}, nil)
|
||||
return probe.New(SignatureDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
if len(donut.config.NodeDiskMap) > 0 {
|
||||
return donut.abortMultipartUpload(bucket, key, uploadID)
|
||||
}
|
||||
if !donut.storedBuckets.Exists(bucket) {
|
||||
return iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
return probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
|
||||
if storedBucket.multiPartSession[key].UploadID != uploadID {
|
||||
return iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
|
||||
return probe.New(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
donut.cleanupMultipartSession(bucket, key, uploadID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateObjectPart - create a part in a multipart session
|
||||
func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (string, error) {
|
||||
func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (string, *probe.Error) {
|
||||
donut.lock.Lock()
|
||||
etag, err := donut.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data, signature)
|
||||
donut.lock.Unlock()
|
||||
// possible free
|
||||
debug.FreeOSMemory()
|
||||
|
||||
return etag, iodine.New(err, nil)
|
||||
return etag, err.Trace()
|
||||
}
|
||||
|
||||
// createObject - internal wrapper function called by CreateObjectPart
|
||||
func (donut API) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (string, error) {
|
||||
func (donut API) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (string, *probe.Error) {
|
||||
if !IsValidBucket(bucket) {
|
||||
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||
return "", probe.New(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(key) {
|
||||
return "", iodine.New(ObjectNameInvalid{Object: key}, nil)
|
||||
return "", probe.New(ObjectNameInvalid{Object: key})
|
||||
}
|
||||
if len(donut.config.NodeDiskMap) > 0 {
|
||||
metadata := make(map[string]string)
|
||||
@@ -151,24 +151,24 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont
|
||||
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
|
||||
if err != nil {
|
||||
// pro-actively close the connection
|
||||
return "", iodine.New(InvalidDigest{Md5: expectedMD5Sum}, nil)
|
||||
return "", probe.New(InvalidDigest{Md5: expectedMD5Sum})
|
||||
}
|
||||
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
|
||||
}
|
||||
partMetadata, err := donut.putObjectPart(bucket, key, expectedMD5Sum, uploadID, partID, data, size, metadata, signature)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
return "", err.Trace()
|
||||
}
|
||||
return partMetadata.ETag, nil
|
||||
}
|
||||
|
||||
if !donut.storedBuckets.Exists(bucket) {
|
||||
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
return "", probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
strBucket := donut.storedBuckets.Get(bucket).(storedBucket)
|
||||
// Verify upload id
|
||||
if strBucket.multiPartSession[key].UploadID != uploadID {
|
||||
return "", iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
|
||||
return "", probe.New(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
|
||||
// get object key
|
||||
@@ -185,7 +185,7 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont
|
||||
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
|
||||
if err != nil {
|
||||
// pro-actively close the connection
|
||||
return "", iodine.New(InvalidDigest{Md5: expectedMD5Sum}, nil)
|
||||
return "", probe.New(InvalidDigest{Md5: expectedMD5Sum})
|
||||
}
|
||||
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
|
||||
}
|
||||
@@ -194,27 +194,27 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont
|
||||
hash := md5.New()
|
||||
sha256hash := sha256.New()
|
||||
|
||||
var err error
|
||||
var totalLength int64
|
||||
var err error
|
||||
for err == nil {
|
||||
var length int
|
||||
byteBuffer := make([]byte, 1024*1024)
|
||||
length, err = data.Read(byteBuffer)
|
||||
length, err = data.Read(byteBuffer) // do not read error return error here, we will handle this error later
|
||||
hash.Write(byteBuffer[0:length])
|
||||
sha256hash.Write(byteBuffer[0:length])
|
||||
ok := donut.multiPartObjects[uploadID].Append(partID, byteBuffer[0:length])
|
||||
if !ok {
|
||||
return "", iodine.New(InternalError{}, nil)
|
||||
return "", probe.New(InternalError{})
|
||||
}
|
||||
totalLength += int64(length)
|
||||
go debug.FreeOSMemory()
|
||||
}
|
||||
if totalLength != size {
|
||||
donut.multiPartObjects[uploadID].Delete(partID)
|
||||
return "", iodine.New(IncompleteBody{Bucket: bucket, Object: key}, nil)
|
||||
return "", probe.New(IncompleteBody{Bucket: bucket, Object: key})
|
||||
}
|
||||
if err != io.EOF {
|
||||
return "", iodine.New(err, nil)
|
||||
return "", probe.New(err)
|
||||
}
|
||||
|
||||
md5SumBytes := hash.Sum(nil)
|
||||
@@ -222,17 +222,19 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont
|
||||
// Verify if the written object is equal to what is expected, only if it is requested as such
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" {
|
||||
if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil {
|
||||
return "", iodine.New(BadDigest{}, nil)
|
||||
return "", err.Trace()
|
||||
}
|
||||
}
|
||||
|
||||
if signature != nil {
|
||||
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256hash.Sum(nil)))
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
if !ok {
|
||||
return "", iodine.New(SignatureDoesNotMatch{}, nil)
|
||||
{
|
||||
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256hash.Sum(nil)))
|
||||
if err != nil {
|
||||
return "", err.Trace()
|
||||
}
|
||||
if !ok {
|
||||
return "", probe.New(SignatureDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -264,16 +266,16 @@ func (donut API) cleanupMultipartSession(bucket, key, uploadID string) {
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload - complete a multipart upload and persist the data
|
||||
func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, error) {
|
||||
func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) {
|
||||
donut.lock.Lock()
|
||||
|
||||
if !IsValidBucket(bucket) {
|
||||
donut.lock.Unlock()
|
||||
return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||
return ObjectMetadata{}, probe.New(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(key) {
|
||||
donut.lock.Unlock()
|
||||
return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil)
|
||||
return ObjectMetadata{}, probe.New(ObjectNameInvalid{Object: key})
|
||||
}
|
||||
if len(donut.config.NodeDiskMap) > 0 {
|
||||
donut.lock.Unlock()
|
||||
@@ -282,38 +284,38 @@ func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.R
|
||||
|
||||
if !donut.storedBuckets.Exists(bucket) {
|
||||
donut.lock.Unlock()
|
||||
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
return ObjectMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
|
||||
// Verify upload id
|
||||
if storedBucket.multiPartSession[key].UploadID != uploadID {
|
||||
donut.lock.Unlock()
|
||||
return ObjectMetadata{}, iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
|
||||
return ObjectMetadata{}, probe.New(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
partBytes, err := ioutil.ReadAll(data)
|
||||
if err != nil {
|
||||
donut.lock.Unlock()
|
||||
return ObjectMetadata{}, iodine.New(err, nil)
|
||||
return ObjectMetadata{}, probe.New(err)
|
||||
}
|
||||
if signature != nil {
|
||||
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256.Sum256(partBytes)[:]))
|
||||
if err != nil {
|
||||
donut.lock.Unlock()
|
||||
return ObjectMetadata{}, iodine.New(err, nil)
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
if !ok {
|
||||
donut.lock.Unlock()
|
||||
return ObjectMetadata{}, iodine.New(SignatureDoesNotMatch{}, nil)
|
||||
return ObjectMetadata{}, probe.New(SignatureDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
parts := &CompleteMultipartUpload{}
|
||||
if err := xml.Unmarshal(partBytes, parts); err != nil {
|
||||
donut.lock.Unlock()
|
||||
return ObjectMetadata{}, iodine.New(MalformedXML{}, nil)
|
||||
return ObjectMetadata{}, probe.New(MalformedXML{})
|
||||
}
|
||||
if !sort.IsSorted(completedParts(parts.Part)) {
|
||||
donut.lock.Unlock()
|
||||
return ObjectMetadata{}, iodine.New(InvalidPartOrder{}, nil)
|
||||
return ObjectMetadata{}, probe.New(InvalidPartOrder{})
|
||||
}
|
||||
|
||||
var size int64
|
||||
@@ -323,7 +325,7 @@ func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.R
|
||||
object, ok := donut.multiPartObjects[uploadID].Get(parts.Part[i].PartNumber)
|
||||
if ok == false {
|
||||
donut.lock.Unlock()
|
||||
return ObjectMetadata{}, iodine.New(InvalidPart{}, nil)
|
||||
return ObjectMetadata{}, probe.New(InvalidPart{})
|
||||
}
|
||||
size += int64(len(object))
|
||||
calcMD5Bytes := md5.Sum(object)
|
||||
@@ -331,16 +333,15 @@ func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.R
|
||||
recvMD5Bytes, err := hex.DecodeString(strings.Trim(recvMD5, "\""))
|
||||
if err != nil {
|
||||
donut.lock.Unlock()
|
||||
return ObjectMetadata{}, iodine.New(InvalidDigest{Md5: recvMD5}, nil)
|
||||
return ObjectMetadata{}, probe.New(InvalidDigest{Md5: recvMD5})
|
||||
}
|
||||
if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) {
|
||||
donut.lock.Unlock()
|
||||
return ObjectMetadata{}, iodine.New(BadDigest{}, nil)
|
||||
return ObjectMetadata{}, probe.New(BadDigest{})
|
||||
}
|
||||
_, err = io.Copy(&fullObject, bytes.NewBuffer(object))
|
||||
if err != nil {
|
||||
if _, err := io.Copy(&fullObject, bytes.NewBuffer(object)); err != nil {
|
||||
donut.lock.Unlock()
|
||||
return ObjectMetadata{}, iodine.New(err, nil)
|
||||
return ObjectMetadata{}, probe.New(err)
|
||||
}
|
||||
object = nil
|
||||
go debug.FreeOSMemory()
|
||||
@@ -350,18 +351,20 @@ func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.R
|
||||
// this is needed for final verification inside CreateObject, do not convert this to hex
|
||||
md5sum := base64.StdEncoding.EncodeToString(md5sumSlice[:])
|
||||
donut.lock.Unlock()
|
||||
objectMetadata, err := donut.CreateObject(bucket, key, md5sum, size, &fullObject, nil, nil)
|
||||
if err != nil {
|
||||
// No need to call internal cleanup functions here, caller will call AbortMultipartUpload()
|
||||
// which would in-turn cleanup properly in accordance with S3 Spec
|
||||
return ObjectMetadata{}, iodine.New(err, nil)
|
||||
}
|
||||
fullObject.Reset()
|
||||
{
|
||||
objectMetadata, err := donut.CreateObject(bucket, key, md5sum, size, &fullObject, nil, nil)
|
||||
if err != nil {
|
||||
// No need to call internal cleanup functions here, caller should call AbortMultipartUpload()
|
||||
// which would in-turn cleanup properly in accordance with S3 Spec
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
fullObject.Reset()
|
||||
|
||||
donut.lock.Lock()
|
||||
donut.cleanupMultipartSession(bucket, key, uploadID)
|
||||
donut.lock.Unlock()
|
||||
return objectMetadata, nil
|
||||
donut.lock.Lock()
|
||||
donut.cleanupMultipartSession(bucket, key, uploadID)
|
||||
donut.lock.Unlock()
|
||||
return objectMetadata, nil
|
||||
}
|
||||
}
|
||||
|
||||
// byKey is a sortable interface for UploadMetadata slice
|
||||
@@ -372,7 +375,7 @@ func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byKey) Less(i, j int) bool { return a[i].Key < a[j].Key }
|
||||
|
||||
// ListMultipartUploads - list incomplete multipart sessions for a given bucket
|
||||
func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata, signature *Signature) (BucketMultipartResourcesMetadata, error) {
|
||||
func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata, signature *Signature) (BucketMultipartResourcesMetadata, *probe.Error) {
|
||||
// TODO handle delimiter
|
||||
donut.lock.Lock()
|
||||
defer donut.lock.Unlock()
|
||||
@@ -380,15 +383,15 @@ func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartRe
|
||||
if signature != nil {
|
||||
ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
|
||||
if err != nil {
|
||||
return BucketMultipartResourcesMetadata{}, iodine.New(err, nil)
|
||||
return BucketMultipartResourcesMetadata{}, err.Trace()
|
||||
}
|
||||
if !ok {
|
||||
return BucketMultipartResourcesMetadata{}, iodine.New(SignatureDoesNotMatch{}, nil)
|
||||
return BucketMultipartResourcesMetadata{}, probe.New(SignatureDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
|
||||
if !IsValidBucket(bucket) {
|
||||
return BucketMultipartResourcesMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||
return BucketMultipartResourcesMetadata{}, probe.New(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
|
||||
if len(donut.config.NodeDiskMap) > 0 {
|
||||
@@ -396,7 +399,7 @@ func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartRe
|
||||
}
|
||||
|
||||
if !donut.storedBuckets.Exists(bucket) {
|
||||
return BucketMultipartResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
return BucketMultipartResourcesMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
|
||||
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
|
||||
@@ -454,7 +457,7 @@ func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
|
||||
|
||||
// ListObjectParts - list parts from incomplete multipart session for a given object
|
||||
func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata, signature *Signature) (ObjectResourcesMetadata, error) {
|
||||
func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata, signature *Signature) (ObjectResourcesMetadata, *probe.Error) {
|
||||
// Verify upload id
|
||||
donut.lock.Lock()
|
||||
defer donut.lock.Unlock()
|
||||
@@ -462,18 +465,18 @@ func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMe
|
||||
if signature != nil {
|
||||
ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
|
||||
if err != nil {
|
||||
return ObjectResourcesMetadata{}, iodine.New(err, nil)
|
||||
return ObjectResourcesMetadata{}, err.Trace()
|
||||
}
|
||||
if !ok {
|
||||
return ObjectResourcesMetadata{}, iodine.New(SignatureDoesNotMatch{}, nil)
|
||||
return ObjectResourcesMetadata{}, probe.New(SignatureDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
|
||||
if !IsValidBucket(bucket) {
|
||||
return ObjectResourcesMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||
return ObjectResourcesMetadata{}, probe.New(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(key) {
|
||||
return ObjectResourcesMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil)
|
||||
return ObjectResourcesMetadata{}, probe.New(ObjectNameInvalid{Object: key})
|
||||
}
|
||||
|
||||
if len(donut.config.NodeDiskMap) > 0 {
|
||||
@@ -481,14 +484,14 @@ func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMe
|
||||
}
|
||||
|
||||
if !donut.storedBuckets.Exists(bucket) {
|
||||
return ObjectResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
return ObjectResourcesMetadata{}, probe.New(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
|
||||
if _, ok := storedBucket.multiPartSession[key]; ok == false {
|
||||
return ObjectResourcesMetadata{}, iodine.New(ObjectNotFound{Object: key}, nil)
|
||||
return ObjectResourcesMetadata{}, probe.New(ObjectNotFound{Object: key})
|
||||
}
|
||||
if storedBucket.multiPartSession[key].UploadID != resources.UploadID {
|
||||
return ObjectResourcesMetadata{}, iodine.New(InvalidUploadID{UploadID: resources.UploadID}, nil)
|
||||
return ObjectResourcesMetadata{}, probe.New(InvalidUploadID{UploadID: resources.UploadID})
|
||||
}
|
||||
storedParts := storedBucket.partMetadata[key]
|
||||
objectResourcesMetadata := resources
|
||||
@@ -512,7 +515,7 @@ func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMe
|
||||
}
|
||||
part, ok := storedParts[i]
|
||||
if !ok {
|
||||
return ObjectResourcesMetadata{}, iodine.New(InvalidPart{}, nil)
|
||||
return ObjectResourcesMetadata{}, probe.New(InvalidPart{})
|
||||
}
|
||||
parts = append(parts, &part)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ package donut
|
||||
|
||||
import (
|
||||
"github.com/minio/minio/pkg/donut/disk"
|
||||
"github.com/minio/minio/pkg/iodine"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// node struct internal
|
||||
@@ -28,9 +28,9 @@ type node struct {
|
||||
}
|
||||
|
||||
// newNode - instantiates a new node
|
||||
func newNode(hostname string) (node, error) {
|
||||
func newNode(hostname string) (node, *probe.Error) {
|
||||
if hostname == "" {
|
||||
return node{}, iodine.New(InvalidArgument{}, nil)
|
||||
return node{}, probe.New(InvalidArgument{})
|
||||
}
|
||||
disks := make(map[int]disk.Disk)
|
||||
n := node{
|
||||
@@ -46,31 +46,31 @@ func (n node) GetHostname() string {
|
||||
}
|
||||
|
||||
// ListDisks - return number of disks
|
||||
func (n node) ListDisks() (map[int]disk.Disk, error) {
|
||||
func (n node) ListDisks() (map[int]disk.Disk, *probe.Error) {
|
||||
return n.disks, nil
|
||||
}
|
||||
|
||||
// AttachDisk - attach a disk
|
||||
func (n node) AttachDisk(disk disk.Disk, diskOrder int) error {
|
||||
func (n node) AttachDisk(disk disk.Disk, diskOrder int) *probe.Error {
|
||||
if diskOrder < 0 {
|
||||
return iodine.New(InvalidArgument{}, nil)
|
||||
return probe.New(InvalidArgument{})
|
||||
}
|
||||
n.disks[diskOrder] = disk
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetachDisk - detach a disk
|
||||
func (n node) DetachDisk(diskOrder int) error {
|
||||
func (n node) DetachDisk(diskOrder int) *probe.Error {
|
||||
delete(n.disks, diskOrder)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveConfig - save node configuration
|
||||
func (n node) SaveConfig() error {
|
||||
return iodine.New(NotImplemented{Function: "SaveConfig"}, nil)
|
||||
func (n node) SaveConfig() *probe.Error {
|
||||
return probe.New(NotImplemented{Function: "SaveConfig"})
|
||||
}
|
||||
|
||||
// LoadConfig - load node configuration from saved configs
|
||||
func (n node) LoadConfig() error {
|
||||
return iodine.New(NotImplemented{Function: "LoadConfig"}, nil)
|
||||
func (n node) LoadConfig() *probe.Error {
|
||||
return probe.New(NotImplemented{Function: "LoadConfig"})
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/minio/minio/pkg/crypto/sha256"
|
||||
"github.com/minio/minio/pkg/iodine"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// Signature - local variables
|
||||
@@ -59,7 +59,7 @@ func sumHMAC(key []byte, data []byte) []byte {
|
||||
//
|
||||
// This function on the other hand is a direct replacement for url.Encode() technique to support
|
||||
// pretty much every UTF-8 character.
|
||||
func urlEncodeName(name string) (string, error) {
|
||||
func urlEncodeName(name string) (string, *probe.Error) {
|
||||
// if object matches reserved string, no need to encode them
|
||||
reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
|
||||
if reservedNames.MatchString(name) {
|
||||
@@ -78,7 +78,7 @@ func urlEncodeName(name string) (string, error) {
|
||||
default:
|
||||
len := utf8.RuneLen(s)
|
||||
if len < 0 {
|
||||
return "", iodine.New(InvalidArgument{}, nil)
|
||||
return "", probe.New(InvalidArgument{})
|
||||
}
|
||||
u := make([]byte, len)
|
||||
utf8.EncodeRune(u, s)
|
||||
@@ -212,7 +212,7 @@ func (r *Signature) getSignature(signingKey []byte, stringToSign string) string
|
||||
|
||||
// DoesSignatureMatch - Verify authorization header with calculated header in accordance with - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
|
||||
// returns true if matches, false other wise if error is not nil then it is always false
|
||||
func (r *Signature) DoesSignatureMatch(hashedPayload string) (bool, error) {
|
||||
func (r *Signature) DoesSignatureMatch(hashedPayload string) (bool, *probe.Error) {
|
||||
// set new calulated payload
|
||||
r.Request.Header.Set("X-Amz-Content-Sha256", hashedPayload)
|
||||
|
||||
@@ -220,12 +220,12 @@ func (r *Signature) DoesSignatureMatch(hashedPayload string) (bool, error) {
|
||||
var date string
|
||||
if date = r.Request.Header.Get(http.CanonicalHeaderKey("x-amz-date")); date == "" {
|
||||
if date = r.Request.Header.Get("Date"); date == "" {
|
||||
return false, iodine.New(MissingDateHeader{}, nil)
|
||||
return false, probe.New(MissingDateHeader{})
|
||||
}
|
||||
}
|
||||
t, err := time.Parse(iso8601Format, date)
|
||||
if err != nil {
|
||||
return false, iodine.New(err, nil)
|
||||
return false, probe.New(err)
|
||||
}
|
||||
canonicalRequest := r.getCanonicalRequest()
|
||||
stringToSign := r.getStringToSign(canonicalRequest, t)
|
||||
|
||||
Reference in New Issue
Block a user