From d09fd8b0a1bc71865c27f3801ba13ac4e2b36a01 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Mon, 3 Aug 2015 16:17:21 -0700 Subject: [PATCH] Migrate from iodine to probe --- commands.go | 11 +- disks.go | 18 +- main.go | 12 +- pkg/auth/auth.go | 10 +- pkg/auth/config.go | 22 +-- pkg/controller/client.go | 48 +++-- pkg/controller/rpc.go | 12 +- pkg/donut/bucket.go | 192 ++++++++++--------- pkg/donut/config.go | 22 +-- pkg/donut/disk/disk.go | 53 ++--- pkg/donut/donut-v1.go | 297 +++++++++++++---------------- pkg/donut/donut-v2.go | 210 ++++++++++---------- pkg/donut/encoder.go | 56 +++--- pkg/donut/heal.go | 16 +- pkg/donut/interfaces.go | 44 +++-- pkg/donut/management.go | 24 +-- pkg/donut/multipart.go | 159 +++++++-------- pkg/donut/node.go | 22 +-- pkg/donut/signature-v4.go | 12 +- pkg/iodine/iodine.go | 232 ---------------------- pkg/iodine/iodine_test.go | 116 ----------- pkg/probe/probe.go | 3 + pkg/quick/quick.go | 74 ++++--- pkg/server/api/bucket-handlers.go | 110 ++++++----- pkg/server/api/logging-handlers.go | 6 +- pkg/server/api/object-handlers.go | 180 +++++++++-------- pkg/server/api/range.go | 22 +-- pkg/server/api/signature.go | 19 +- pkg/server/minhttp/http.go | 28 +-- pkg/server/minhttp/listen.go | 6 +- pkg/server/minhttp/net.go | 64 +++---- pkg/server/minhttp/net_test.go | 78 -------- pkg/server/rpc/auth.go | 13 +- pkg/server/rpc/donut.go | 6 +- pkg/server/rpc/sysinfo.go | 18 +- pkg/server/rpc_test.go | 9 +- pkg/server/server.go | 16 +- pkg/utils/atomic/atomic.go | 16 +- 38 files changed, 917 insertions(+), 1339 deletions(-) delete mode 100644 pkg/iodine/iodine.go delete mode 100644 pkg/iodine/iodine_test.go delete mode 100644 pkg/server/minhttp/net_test.go diff --git a/commands.go b/commands.go index 3bdeee4c3..d8890614b 100644 --- a/commands.go +++ b/commands.go @@ -18,7 +18,6 @@ package main import ( "os" - "os/user" "path/filepath" "github.com/minio/cli" @@ -163,22 +162,14 @@ func runServer(c *cli.Context) { if c.Args().Present() { cli.ShowCommandHelpAndExit(c, "server", 1) } - _, err := user.Current() - if err != nil { - Fatalf("Unable to determine current user. Reason: %s\n", err) - } apiServerConfig := getServerConfig(c) - err = server.StartServices(apiServerConfig) + err := server.StartServices(apiServerConfig) if err != nil { Fatalln(err) } } func runController(c *cli.Context) { - _, err := user.Current() - if err != nil { - Fatalf("Unable to determine current user. Reason: %s\n", err) - } if len(c.Args()) < 2 || c.Args().First() == "help" { cli.ShowCommandHelpAndExit(c, "controller", 1) // last argument is exit code } diff --git a/disks.go b/disks.go index 438054dd4..5758ec4af 100644 --- a/disks.go +++ b/disks.go @@ -22,34 +22,34 @@ import ( "os" "syscall" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" ) // isUsable provides a comprehensive way of knowing if the provided mountPath is mounted and writable -func isUsable(mountPath string) (bool, error) { +func isUsable(mountPath string) (bool, *probe.Error) { mntpoint, err := os.Stat(mountPath) if err != nil { - return false, iodine.New(err, nil) + return false, probe.New(err) } parent, err := os.Stat("/") if err != nil { - return false, iodine.New(err, nil) + return false, probe.New(err) } mntpointSt := mntpoint.Sys().(*syscall.Stat_t) parentSt := parent.Sys().(*syscall.Stat_t) if mntpointSt.Dev == parentSt.Dev { - return false, iodine.New(fmt.Errorf("Not mounted %s", mountPath), nil) + return false, probe.New(fmt.Errorf("Not mounted %s", mountPath)) } testFile, err := ioutil.TempFile(mountPath, "writetest-") if err != nil { - return false, iodine.New(err, nil) + return false, probe.New(err) } - testFileName := testFile.Name() // close the file, to avoid leaky fd's - testFile.Close() + defer testFile.Close() + testFileName := testFile.Name() if err := os.Remove(testFileName); err != nil { - return false, iodine.New(err, nil) + return false, probe.New(err) } return true, nil } diff --git a/main.go b/main.go index 6c716757c..813eeb4b2 100644 --- a/main.go +++ b/main.go @@ -22,11 +22,9 @@ import ( "os/user" "runtime" "strconv" - "time" "github.com/dustin/go-humanize" "github.com/minio/cli" - "github.com/minio/minio/pkg/iodine" ) var globalDebugFlag = false @@ -96,11 +94,13 @@ func getSystemData() map[string]string { } } -func main() { - // set up iodine - iodine.SetGlobalState("minio.version", Version) - iodine.SetGlobalState("minio.starttime", time.Now().Format(time.RFC3339)) +func init() { + if _, err := user.Current(); err != nil { + Fatalf("Unable to determine current user. Reason: %s\n", err) + } +} +func main() { // set up go max processes runtime.GOMAXPROCS(runtime.NumCPU()) diff --git a/pkg/auth/auth.go b/pkg/auth/auth.go index 5ce2a4025..0c8d589ec 100644 --- a/pkg/auth/auth.go +++ b/pkg/auth/auth.go @@ -19,6 +19,8 @@ package auth import ( "crypto/rand" "encoding/base64" + + "github.com/minio/minio/pkg/probe" ) // Static alphaNumeric table used for generating unique keys @@ -26,11 +28,11 @@ var alphaNumericTable = []byte("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ") // GenerateAccessKeyID - generate random alpha numeric value using only uppercase characters // takes input as size in integer -func GenerateAccessKeyID() ([]byte, error) { +func GenerateAccessKeyID() ([]byte, *probe.Error) { alpha := make([]byte, MinioAccessID) _, err := rand.Read(alpha) if err != nil { - return nil, err + return nil, probe.New(err) } for i := 0; i < MinioAccessID; i++ { alpha[i] = alphaNumericTable[alpha[i]%byte(len(alphaNumericTable))] @@ -39,11 +41,11 @@ func GenerateAccessKeyID() ([]byte, error) { } // GenerateSecretAccessKey - generate random base64 numeric value from a random seed. -func GenerateSecretAccessKey() ([]byte, error) { +func GenerateSecretAccessKey() ([]byte, *probe.Error) { rb := make([]byte, MinioSecretID) _, err := rand.Read(rb) if err != nil { - return nil, err + return nil, probe.New(err) } return []byte(base64.StdEncoding.EncodeToString(rb))[:MinioSecretID], nil } diff --git a/pkg/auth/config.go b/pkg/auth/config.go index 12ace972b..ddbbff3ea 100644 --- a/pkg/auth/config.go +++ b/pkg/auth/config.go @@ -20,7 +20,7 @@ import ( "os/user" "path/filepath" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" "github.com/minio/minio/pkg/quick" ) @@ -38,13 +38,13 @@ type Config struct { } // getAuthConfigPath get donut config file path -func getAuthConfigPath() (string, error) { +func getAuthConfigPath() (string, *probe.Error) { if customConfigPath != "" { return customConfigPath, nil } u, err := user.Current() if err != nil { - return "", iodine.New(err, nil) + return "", probe.New(err) } authConfigPath := filepath.Join(u.HomeDir, ".minio", "users.json") return authConfigPath, nil @@ -59,36 +59,36 @@ func SetAuthConfigPath(configPath string) { } // SaveConfig save donut config -func SaveConfig(a *Config) error { +func SaveConfig(a *Config) *probe.Error { authConfigPath, err := getAuthConfigPath() if err != nil { - return iodine.New(err, nil) + return err.Trace() } qc, err := quick.New(a) if err != nil { - return iodine.New(err, nil) + return err.Trace() } if err := qc.Save(authConfigPath); err != nil { - return iodine.New(err, nil) + return err.Trace() } return nil } // LoadConfig load donut config -func LoadConfig() (*Config, error) { +func LoadConfig() (*Config, *probe.Error) { authConfigPath, err := getAuthConfigPath() if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } a := &Config{} a.Version = "0.0.1" a.Users = make(map[string]*User) qc, err := quick.New(a) if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } if err := qc.Load(authConfigPath); err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } return qc.Data().(*Config), nil } diff --git a/pkg/controller/client.go b/pkg/controller/client.go index ddf106849..c7a7bbb6c 100644 --- a/pkg/controller/client.go +++ b/pkg/controller/client.go @@ -22,7 +22,7 @@ import ( jsonrpc "github.com/gorilla/rpc/v2/json" "github.com/minio/minio/pkg/auth" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" "github.com/minio/minio/pkg/server/rpc" ) @@ -33,25 +33,31 @@ func closeResp(resp *http.Response) { } // GetMemStats get memory status of the server at given url -func GetMemStats(url string) ([]byte, error) { +func GetMemStats(url string) ([]byte, *probe.Error) { op := RPCOps{ Method: "MemStats.Get", Request: rpc.Args{Request: ""}, } req, err := NewRequest(url, op, http.DefaultTransport) if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } resp, err := req.Do() defer closeResp(resp) if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } var reply rpc.MemStatsReply if err := jsonrpc.DecodeClientResponse(resp.Body, &reply); err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) + } + { + jsonRespBytes, err := json.MarshalIndent(reply, "", "\t") + if err != nil { + return nil, probe.New(err) + } + return jsonRespBytes, nil } - return json.MarshalIndent(reply, "", "\t") } // GetSysInfo get system status of the server at given url @@ -62,18 +68,24 @@ func GetSysInfo(url string) ([]byte, error) { } req, err := NewRequest(url, op, http.DefaultTransport) if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } resp, err := req.Do() defer closeResp(resp) if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } var reply rpc.SysInfoReply if err := jsonrpc.DecodeClientResponse(resp.Body, &reply); err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) + } + { + jsonRespBytes, err := json.MarshalIndent(reply, "", "\t") + if err != nil { + return nil, probe.New(err) + } + return jsonRespBytes, nil } - return json.MarshalIndent(reply, "", "\t") } // GetAuthKeys get access key id and secret access key @@ -84,16 +96,16 @@ func GetAuthKeys(url string) ([]byte, error) { } req, err := NewRequest(url, op, http.DefaultTransport) if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } resp, err := req.Do() defer closeResp(resp) if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } var reply rpc.AuthReply if err := jsonrpc.DecodeClientResponse(resp.Body, &reply); err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } authConfig := &auth.Config{} authConfig.Version = "0.0.1" @@ -104,9 +116,15 @@ func GetAuthKeys(url string) ([]byte, error) { user.SecretAccessKey = reply.SecretAccessKey authConfig.Users[reply.AccessKeyID] = user if err := auth.SaveConfig(authConfig); err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() + } + { + jsonRespBytes, err := json.MarshalIndent(reply, "", "\t") + if err != nil { + return nil, probe.New(err) + } + return jsonRespBytes, nil } - return json.MarshalIndent(reply, "", "\t") } // Add more functions here for other RPC messages diff --git a/pkg/controller/rpc.go b/pkg/controller/rpc.go index fc159186b..ae180c232 100644 --- a/pkg/controller/rpc.go +++ b/pkg/controller/rpc.go @@ -21,7 +21,7 @@ import ( "net/http" "github.com/gorilla/rpc/v2/json" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" ) // RPCOps RPC operation @@ -37,14 +37,14 @@ type RPCRequest struct { } // NewRequest initiate a new client RPC request -func NewRequest(url string, op RPCOps, transport http.RoundTripper) (*RPCRequest, error) { +func NewRequest(url string, op RPCOps, transport http.RoundTripper) (*RPCRequest, *probe.Error) { params, err := json.EncodeClientRequest(op.Method, op.Request) if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } req, err := http.NewRequest("POST", url, bytes.NewReader(params)) if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } rpcReq := &RPCRequest{} rpcReq.req = req @@ -57,10 +57,10 @@ func NewRequest(url string, op RPCOps, transport http.RoundTripper) (*RPCRequest } // Do - make a http connection -func (r RPCRequest) Do() (*http.Response, error) { +func (r RPCRequest) Do() (*http.Response, *probe.Error) { resp, err := r.transport.RoundTrip(r.req) if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } return resp, nil } diff --git a/pkg/donut/bucket.go b/pkg/donut/bucket.go index b75e0171f..8b2733f43 100644 --- a/pkg/donut/bucket.go +++ b/pkg/donut/bucket.go @@ -34,7 +34,7 @@ import ( "github.com/minio/minio/pkg/crypto/sha256" "github.com/minio/minio/pkg/crypto/sha512" "github.com/minio/minio/pkg/donut/disk" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" ) const ( @@ -52,15 +52,9 @@ type bucket struct { } // newBucket - instantiate a new bucket -func newBucket(bucketName, aclType, donutName string, nodes map[string]node) (bucket, BucketMetadata, error) { - errParams := map[string]string{ - "bucketName": bucketName, - "donutName": donutName, - "aclType": aclType, - } - +func newBucket(bucketName, aclType, donutName string, nodes map[string]node) (bucket, BucketMetadata, *probe.Error) { if strings.TrimSpace(bucketName) == "" || strings.TrimSpace(donutName) == "" { - return bucket{}, BucketMetadata{}, iodine.New(InvalidArgument{}, errParams) + return bucket{}, BucketMetadata{}, probe.New(InvalidArgument{}) } b := bucket{} @@ -89,14 +83,14 @@ func (b bucket) getBucketName() string { } // getBucketMetadataReaders - -func (b bucket) getBucketMetadataReaders() (map[int]io.ReadCloser, error) { +func (b bucket) getBucketMetadataReaders() (map[int]io.ReadCloser, *probe.Error) { readers := make(map[int]io.ReadCloser) var disks map[int]disk.Disk - var err error + var err *probe.Error for _, node := range b.nodes { disks, err = node.ListDisks() if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } } var bucketMetaDataReader io.ReadCloser @@ -108,40 +102,44 @@ func (b bucket) getBucketMetadataReaders() (map[int]io.ReadCloser, error) { readers[order] = bucketMetaDataReader } if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } return readers, nil } // getBucketMetadata - -func (b bucket) getBucketMetadata() (*AllBuckets, error) { - var err error +func (b bucket) getBucketMetadata() (*AllBuckets, *probe.Error) { metadata := new(AllBuckets) - readers, err := b.getBucketMetadataReaders() - if err != nil { - return nil, iodine.New(err, nil) + var readers map[int]io.ReadCloser + { + var err *probe.Error + readers, err = b.getBucketMetadataReaders() + if err != nil { + return nil, err.Trace() + } } for _, reader := range readers { defer reader.Close() } + var err error for _, reader := range readers { jenc := json.NewDecoder(reader) if err = jenc.Decode(metadata); err == nil { return metadata, nil } } - return nil, iodine.New(err, nil) + return nil, probe.New(err) } // GetObjectMetadata - get metadata for an object -func (b bucket) GetObjectMetadata(objectName string) (ObjectMetadata, error) { +func (b bucket) GetObjectMetadata(objectName string) (ObjectMetadata, *probe.Error) { b.lock.Lock() defer b.lock.Unlock() return b.readObjectMetadata(objectName) } // ListObjects - list all objects -func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (ListObjectsResults, error) { +func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (ListObjectsResults, *probe.Error) { b.lock.Lock() defer b.lock.Unlock() if maxkeys <= 0 { @@ -151,7 +149,7 @@ func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (List var objects []string bucketMetadata, err := b.getBucketMetadata() if err != nil { - return ListObjectsResults{}, iodine.New(err, nil) + return ListObjectsResults{}, err.Trace() } for objectName := range bucketMetadata.Buckets[b.getBucketName()].Multiparts { if strings.HasPrefix(objectName, strings.TrimSpace(prefix)) { @@ -206,7 +204,7 @@ func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (List for _, objectName := range results { objMetadata, err := b.readObjectMetadata(normalizeObjectName(objectName)) if err != nil { - return ListObjectsResults{}, iodine.New(err, nil) + return ListObjectsResults{}, err.Trace() } listObjects.Objects[objectName] = objMetadata } @@ -214,22 +212,22 @@ func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (List } // ReadObject - open an object to read -func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64, err error) { +func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64, err *probe.Error) { b.lock.Lock() defer b.lock.Unlock() reader, writer := io.Pipe() // get list of objects bucketMetadata, err := b.getBucketMetadata() if err != nil { - return nil, 0, iodine.New(err, nil) + return nil, 0, err.Trace() } // check if object exists if _, ok := bucketMetadata.Buckets[b.getBucketName()].BucketObjects[objectName]; !ok { - return nil, 0, iodine.New(ObjectNotFound{Object: objectName}, nil) + return nil, 0, probe.New(ObjectNotFound{Object: objectName}) } objMetadata, err := b.readObjectMetadata(normalizeObjectName(objectName)) if err != nil { - return nil, 0, iodine.New(err, nil) + return nil, 0, err.Trace() } // read and reply back to GetObject() request in a go-routine go b.readObjectData(normalizeObjectName(objectName), writer, objMetadata) @@ -237,15 +235,15 @@ func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64, } // WriteObject - write a new object into bucket -func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64, expectedMD5Sum string, metadata map[string]string, signature *Signature) (ObjectMetadata, error) { +func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64, expectedMD5Sum string, metadata map[string]string, signature *Signature) (ObjectMetadata, *probe.Error) { b.lock.Lock() defer b.lock.Unlock() if objectName == "" || objectData == nil { - return ObjectMetadata{}, iodine.New(InvalidArgument{}, nil) + return ObjectMetadata{}, probe.New(InvalidArgument{}) } writers, err := b.getObjectWriters(normalizeObjectName(objectName), "data") if err != nil { - return ObjectMetadata{}, iodine.New(err, nil) + return ObjectMetadata{}, err.Trace() } sumMD5 := md5.New() sum512 := sha512.New() @@ -268,7 +266,7 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64, totalLength, err := io.Copy(mw, objectData) if err != nil { CleanupWritersOnError(writers) - return ObjectMetadata{}, iodine.New(err, nil) + return ObjectMetadata{}, probe.New(err) } objMetadata.Size = totalLength case false: @@ -276,13 +274,13 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64, k, m, err := b.getDataAndParity(len(writers)) if err != nil { CleanupWritersOnError(writers) - return ObjectMetadata{}, iodine.New(err, nil) + return ObjectMetadata{}, err.Trace() } // write encoded data with k, m and writers chunkCount, totalLength, err := b.writeObjectData(k, m, writers, objectData, size, mwriter) if err != nil { CleanupWritersOnError(writers) - return ObjectMetadata{}, iodine.New(err, nil) + return ObjectMetadata{}, err.Trace() } /// donutMetadata section objMetadata.BlockSize = blockSize @@ -301,14 +299,14 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64, if err != nil { // error occurred while doing signature calculation, we return and also cleanup any temporary writers. CleanupWritersOnError(writers) - return ObjectMetadata{}, iodine.New(err, nil) + return ObjectMetadata{}, err.Trace() } if !ok { // purge all writers, when control flow reaches here // // Signature mismatch occurred all temp files to be removed and all data purged. CleanupWritersOnError(writers) - return ObjectMetadata{}, iodine.New(SignatureDoesNotMatch{}, nil) + return ObjectMetadata{}, probe.New(SignatureDoesNotMatch{}) } } objMetadata.MD5Sum = hex.EncodeToString(dataMD5sum) @@ -317,7 +315,7 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64, // Verify if the written object is equal to what is expected, only if it is requested as such if strings.TrimSpace(expectedMD5Sum) != "" { if err := b.isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), objMetadata.MD5Sum); err != nil { - return ObjectMetadata{}, iodine.New(err, nil) + return ObjectMetadata{}, err.Trace() } } objMetadata.Metadata = metadata @@ -325,7 +323,7 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64, if err := b.writeObjectMetadata(normalizeObjectName(objectName), objMetadata); err != nil { // purge all writers, when control flow reaches here CleanupWritersOnError(writers) - return ObjectMetadata{}, iodine.New(err, nil) + return ObjectMetadata{}, err.Trace() } // close all writers, when control flow reaches here for _, writer := range writers { @@ -335,39 +333,39 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64, } // isMD5SumEqual - returns error if md5sum mismatches, other its `nil` -func (b bucket) isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) error { +func (b bucket) isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) *probe.Error { if strings.TrimSpace(expectedMD5Sum) != "" && strings.TrimSpace(actualMD5Sum) != "" { expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum) if err != nil { - return iodine.New(err, nil) + return probe.New(err) } actualMD5SumBytes, err := hex.DecodeString(actualMD5Sum) if err != nil { - return iodine.New(err, nil) + return probe.New(err) } if !bytes.Equal(expectedMD5SumBytes, actualMD5SumBytes) { - return iodine.New(BadDigest{}, nil) + return probe.New(BadDigest{}) } return nil } - return iodine.New(InvalidArgument{}, nil) + return probe.New(InvalidArgument{}) } // writeObjectMetadata - write additional object metadata -func (b bucket) writeObjectMetadata(objectName string, objMetadata ObjectMetadata) error { +func (b bucket) writeObjectMetadata(objectName string, objMetadata ObjectMetadata) *probe.Error { if objMetadata.Object == "" { - return iodine.New(InvalidArgument{}, nil) + return probe.New(InvalidArgument{}) } objMetadataWriters, err := b.getObjectWriters(objectName, objectMetadataConfig) if err != nil { - return iodine.New(err, nil) + return err.Trace() } for _, objMetadataWriter := range objMetadataWriters { jenc := json.NewEncoder(objMetadataWriter) if err := jenc.Encode(&objMetadata); err != nil { // Close writers and purge all temporary entries CleanupWritersOnError(objMetadataWriters) - return iodine.New(err, nil) + return probe.New(err) } } for _, objMetadataWriter := range objMetadataWriters { @@ -377,26 +375,28 @@ func (b bucket) writeObjectMetadata(objectName string, objMetadata ObjectMetadat } // readObjectMetadata - read object metadata -func (b bucket) readObjectMetadata(objectName string) (ObjectMetadata, error) { +func (b bucket) readObjectMetadata(objectName string) (ObjectMetadata, *probe.Error) { if objectName == "" { - return ObjectMetadata{}, iodine.New(InvalidArgument{}, nil) + return ObjectMetadata{}, probe.New(InvalidArgument{}) } - var err error objMetadata := ObjectMetadata{} objMetadataReaders, err := b.getObjectReaders(objectName, objectMetadataConfig) if err != nil { - return ObjectMetadata{}, iodine.New(err, nil) + return ObjectMetadata{}, err.Trace() } for _, objMetadataReader := range objMetadataReaders { defer objMetadataReader.Close() } - for _, objMetadataReader := range objMetadataReaders { - jdec := json.NewDecoder(objMetadataReader) - if err = jdec.Decode(&objMetadata); err == nil { - return objMetadata, nil + { + var err error + for _, objMetadataReader := range objMetadataReaders { + jdec := json.NewDecoder(objMetadataReader) + if err = jdec.Decode(&objMetadata); err == nil { + return objMetadata, nil + } } + return ObjectMetadata{}, probe.New(err) } - return ObjectMetadata{}, iodine.New(err, nil) } // TODO - This a temporary normalization of objectNames, need to find a better way @@ -413,14 +413,14 @@ func normalizeObjectName(objectName string) string { } // getDataAndParity - calculate k, m (data and parity) values from number of disks -func (b bucket) getDataAndParity(totalWriters int) (k uint8, m uint8, err error) { +func (b bucket) getDataAndParity(totalWriters int) (k uint8, m uint8, err *probe.Error) { if totalWriters <= 1 { - return 0, 0, iodine.New(InvalidArgument{}, nil) + return 0, 0, probe.New(InvalidArgument{}) } quotient := totalWriters / 2 // not using float or abs to let integer round off to lower value // quotient cannot be bigger than (255 / 2) = 127 if quotient > 127 { - return 0, 0, iodine.New(ParityOverflow{}, nil) + return 0, 0, probe.New(ParityOverflow{}) } remainder := totalWriters % 2 // will be 1 for odd and 0 for even numbers k = uint8(quotient + remainder) @@ -429,11 +429,11 @@ func (b bucket) getDataAndParity(totalWriters int) (k uint8, m uint8, err error) } // writeObjectData - -func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData io.Reader, size int64, writer io.Writer) (int, int, error) { +func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData io.Reader, size int64, writer io.Writer) (int, int, *probe.Error) { encoder, err := newEncoder(k, m, "Cauchy") chunkSize := int64(10 * 1024 * 1024) if err != nil { - return 0, 0, iodine.New(err, nil) + return 0, 0, err.Trace() } chunkCount := 0 totalLength := 0 @@ -447,11 +447,10 @@ func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData totalLength = totalLength + int(readSize) encodedBlocks, inputData, err := encoder.EncodeStream(objectData, readSize) if err != nil { - return 0, 0, iodine.New(err, nil) + return 0, 0, err.Trace() } - _, err = writer.Write(inputData) - if err != nil { - return 0, 0, iodine.New(err, nil) + if _, err := writer.Write(inputData); err != nil { + return 0, 0, probe.New(err) } for blockIndex, block := range encodedBlocks { errCh := make(chan error, 1) @@ -462,7 +461,7 @@ func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData }(writers[blockIndex], bytes.NewReader(block), errCh) if err := <-errCh; err != nil { // Returning error is fine here CleanupErrors() would cleanup writers - return 0, 0, iodine.New(err, nil) + return 0, 0, probe.New(err) } } chunkCount = chunkCount + 1 @@ -474,21 +473,25 @@ func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData func (b bucket) readObjectData(objectName string, writer *io.PipeWriter, objMetadata ObjectMetadata) { readers, err := b.getObjectReaders(objectName, "data") if err != nil { - writer.CloseWithError(iodine.New(err, nil)) + writer.CloseWithError(err.Trace()) return } for _, reader := range readers { defer reader.Close() } - expectedMd5sum, err := hex.DecodeString(objMetadata.MD5Sum) - if err != nil { - writer.CloseWithError(iodine.New(err, nil)) - return - } - expected512Sum, err := hex.DecodeString(objMetadata.SHA512Sum) - if err != nil { - writer.CloseWithError(iodine.New(err, nil)) - return + var expected512Sum, expectedMd5sum []byte + { + var err error + expectedMd5sum, err = hex.DecodeString(objMetadata.MD5Sum) + if err != nil { + writer.CloseWithError(probe.New(err)) + return + } + expected512Sum, err = hex.DecodeString(objMetadata.SHA512Sum) + if err != nil { + writer.CloseWithError(probe.New(err)) + return + } } hasher := md5.New() sum512hasher := sha256.New() @@ -496,24 +499,23 @@ func (b bucket) readObjectData(objectName string, writer *io.PipeWriter, objMeta switch len(readers) > 1 { case true: if objMetadata.ErasureTechnique == "" { - writer.CloseWithError(iodine.New(MissingErasureTechnique{}, nil)) + writer.CloseWithError(probe.New(MissingErasureTechnique{})) return } encoder, err := newEncoder(objMetadata.DataDisks, objMetadata.ParityDisks, objMetadata.ErasureTechnique) if err != nil { - writer.CloseWithError(iodine.New(err, nil)) + writer.CloseWithError(err.Trace()) return } totalLeft := objMetadata.Size for i := 0; i < objMetadata.ChunkCount; i++ { decodedData, err := b.decodeEncodedData(totalLeft, int64(objMetadata.BlockSize), readers, encoder, writer) if err != nil { - writer.CloseWithError(iodine.New(err, nil)) + writer.CloseWithError(err.Trace()) return } - _, err = io.Copy(mwriter, bytes.NewReader(decodedData)) - if err != nil { - writer.CloseWithError(iodine.New(err, nil)) + if _, err := io.Copy(mwriter, bytes.NewReader(decodedData)); err != nil { + writer.CloseWithError(probe.New(err)) return } totalLeft = totalLeft - int64(objMetadata.BlockSize) @@ -521,17 +523,17 @@ func (b bucket) readObjectData(objectName string, writer *io.PipeWriter, objMeta case false: _, err := io.Copy(writer, readers[0]) if err != nil { - writer.CloseWithError(iodine.New(err, nil)) + writer.CloseWithError(probe.New(err)) return } } // check if decodedData md5sum matches if !bytes.Equal(expectedMd5sum, hasher.Sum(nil)) { - writer.CloseWithError(iodine.New(ChecksumMismatch{}, nil)) + writer.CloseWithError(probe.New(ChecksumMismatch{})) return } if !bytes.Equal(expected512Sum, sum512hasher.Sum(nil)) { - writer.CloseWithError(iodine.New(ChecksumMismatch{}, nil)) + writer.CloseWithError(probe.New(ChecksumMismatch{})) return } writer.Close() @@ -539,7 +541,7 @@ func (b bucket) readObjectData(objectName string, writer *io.PipeWriter, objMeta } // decodeEncodedData - -func (b bucket) decodeEncodedData(totalLeft, blockSize int64, readers map[int]io.ReadCloser, encoder encoder, writer *io.PipeWriter) ([]byte, error) { +func (b bucket) decodeEncodedData(totalLeft, blockSize int64, readers map[int]io.ReadCloser, encoder encoder, writer *io.PipeWriter) ([]byte, *probe.Error) { var curBlockSize int64 if blockSize < totalLeft { curBlockSize = blockSize @@ -548,34 +550,34 @@ func (b bucket) decodeEncodedData(totalLeft, blockSize int64, readers map[int]io } curChunkSize, err := encoder.GetEncodedBlockLen(int(curBlockSize)) if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } encodedBytes := make([][]byte, encoder.k+encoder.m) for i, reader := range readers { var bytesBuffer bytes.Buffer _, err := io.CopyN(&bytesBuffer, reader, int64(curChunkSize)) if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } encodedBytes[i] = bytesBuffer.Bytes() } decodedData, err := encoder.Decode(encodedBytes, int(curBlockSize)) if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } return decodedData, nil } // getObjectReaders - -func (b bucket) getObjectReaders(objectName, objectMeta string) (map[int]io.ReadCloser, error) { +func (b bucket) getObjectReaders(objectName, objectMeta string) (map[int]io.ReadCloser, *probe.Error) { readers := make(map[int]io.ReadCloser) var disks map[int]disk.Disk - var err error + var err *probe.Error nodeSlice := 0 for _, node := range b.nodes { disks, err = node.ListDisks() if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } for order, disk := range disks { var objectSlice io.ReadCloser @@ -589,19 +591,19 @@ func (b bucket) getObjectReaders(objectName, objectMeta string) (map[int]io.Read nodeSlice = nodeSlice + 1 } if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } return readers, nil } // getObjectWriters - -func (b bucket) getObjectWriters(objectName, objectMeta string) ([]io.WriteCloser, error) { +func (b bucket) getObjectWriters(objectName, objectMeta string) ([]io.WriteCloser, *probe.Error) { var writers []io.WriteCloser nodeSlice := 0 for _, node := range b.nodes { disks, err := node.ListDisks() if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } writers = make([]io.WriteCloser, len(disks)) for order, disk := range disks { @@ -609,7 +611,7 @@ func (b bucket) getObjectWriters(objectName, objectMeta string) ([]io.WriteClose objectPath := filepath.Join(b.donutName, bucketSlice, objectName, objectMeta) objectSlice, err := disk.CreateFile(objectPath) if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } writers[order] = objectSlice } diff --git a/pkg/donut/config.go b/pkg/donut/config.go index deab470ee..4da2d2122 100644 --- a/pkg/donut/config.go +++ b/pkg/donut/config.go @@ -20,18 +20,18 @@ import ( "os/user" "path/filepath" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" "github.com/minio/minio/pkg/quick" ) // getDonutConfigPath get donut config file path -func getDonutConfigPath() (string, error) { +func getDonutConfigPath() (string, *probe.Error) { if customConfigPath != "" { return customConfigPath, nil } u, err := user.Current() if err != nil { - return "", iodine.New(err, nil) + return "", probe.New(err) } donutConfigPath := filepath.Join(u.HomeDir, ".minio", "donut.json") return donutConfigPath, nil @@ -46,35 +46,35 @@ func SetDonutConfigPath(configPath string) { } // SaveConfig save donut config -func SaveConfig(a *Config) error { +func SaveConfig(a *Config) *probe.Error { donutConfigPath, err := getDonutConfigPath() if err != nil { - return iodine.New(err, nil) + return err.Trace() } qc, err := quick.New(a) if err != nil { - return iodine.New(err, nil) + return err.Trace() } if err := qc.Save(donutConfigPath); err != nil { - return iodine.New(err, nil) + return err.Trace() } return nil } // LoadConfig load donut config -func LoadConfig() (*Config, error) { +func LoadConfig() (*Config, *probe.Error) { donutConfigPath, err := getDonutConfigPath() if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } a := &Config{} a.Version = "0.0.1" qc, err := quick.New(a) if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } if err := qc.Load(donutConfigPath); err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } return qc.Data().(*Config), nil } diff --git a/pkg/donut/disk/disk.go b/pkg/donut/disk/disk.go index 8c450740b..c0882ca52 100644 --- a/pkg/donut/disk/disk.go +++ b/pkg/donut/disk/disk.go @@ -25,7 +25,7 @@ import ( "sync" "syscall" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" "github.com/minio/minio/pkg/utils/atomic" ) @@ -37,21 +37,22 @@ type Disk struct { } // New - instantiate new disk -func New(diskPath string) (Disk, error) { +func New(diskPath string) (Disk, *probe.Error) { if diskPath == "" { - return Disk{}, iodine.New(InvalidArgument{}, nil) + return Disk{}, probe.New(InvalidArgument{}) } st, err := os.Stat(diskPath) if err != nil { - return Disk{}, iodine.New(err, nil) + return Disk{}, probe.New(err) } + if !st.IsDir() { - return Disk{}, iodine.New(syscall.ENOTDIR, nil) + return Disk{}, probe.New(syscall.ENOTDIR) } s := syscall.Statfs_t{} err = syscall.Statfs(diskPath, &s) if err != nil { - return Disk{}, iodine.New(err, nil) + return Disk{}, probe.New(err) } disk := Disk{ lock: &sync.Mutex{}, @@ -63,8 +64,7 @@ func New(diskPath string) (Disk, error) { disk.fsInfo["MountPoint"] = disk.path return disk, nil } - return Disk{}, iodine.New(UnsupportedFilesystem{Type: strconv.FormatInt(int64(s.Type), 10)}, - map[string]string{"Type": strconv.FormatInt(int64(s.Type), 10)}) + return Disk{}, probe.New(UnsupportedFilesystem{Type: strconv.FormatInt(int64(s.Type), 10)}) } // IsUsable - is disk usable, alive @@ -99,25 +99,28 @@ func (disk Disk) GetFSInfo() map[string]string { } // MakeDir - make a directory inside disk root path -func (disk Disk) MakeDir(dirname string) error { +func (disk Disk) MakeDir(dirname string) *probe.Error { disk.lock.Lock() defer disk.lock.Unlock() - return os.MkdirAll(filepath.Join(disk.path, dirname), 0700) + if err := os.MkdirAll(filepath.Join(disk.path, dirname), 0700); err != nil { + return probe.New(err) + } + return nil } // ListDir - list a directory inside disk root path, get only directories -func (disk Disk) ListDir(dirname string) ([]os.FileInfo, error) { +func (disk Disk) ListDir(dirname string) ([]os.FileInfo, *probe.Error) { disk.lock.Lock() defer disk.lock.Unlock() dir, err := os.Open(filepath.Join(disk.path, dirname)) if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } defer dir.Close() contents, err := dir.Readdir(-1) if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } var directories []os.FileInfo for _, content := range contents { @@ -130,18 +133,18 @@ func (disk Disk) ListDir(dirname string) ([]os.FileInfo, error) { } // ListFiles - list a directory inside disk root path, get only files -func (disk Disk) ListFiles(dirname string) ([]os.FileInfo, error) { +func (disk Disk) ListFiles(dirname string) ([]os.FileInfo, *probe.Error) { disk.lock.Lock() defer disk.lock.Unlock() dir, err := os.Open(filepath.Join(disk.path, dirname)) if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } defer dir.Close() contents, err := dir.Readdir(-1) if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } var files []os.FileInfo for _, content := range contents { @@ -154,48 +157,48 @@ func (disk Disk) ListFiles(dirname string) ([]os.FileInfo, error) { } // CreateFile - create a file inside disk root path, replies with custome disk.File which provides atomic writes -func (disk Disk) CreateFile(filename string) (*atomic.File, error) { +func (disk Disk) CreateFile(filename string) (*atomic.File, *probe.Error) { disk.lock.Lock() defer disk.lock.Unlock() if filename == "" { - return nil, iodine.New(InvalidArgument{}, nil) + return nil, probe.New(InvalidArgument{}) } f, err := atomic.FileCreate(filepath.Join(disk.path, filename)) if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } return f, nil } // Open - read a file inside disk root path -func (disk Disk) Open(filename string) (*os.File, error) { +func (disk Disk) Open(filename string) (*os.File, *probe.Error) { disk.lock.Lock() defer disk.lock.Unlock() if filename == "" { - return nil, iodine.New(InvalidArgument{}, nil) + return nil, probe.New(InvalidArgument{}) } dataFile, err := os.Open(filepath.Join(disk.path, filename)) if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } return dataFile, nil } // OpenFile - Use with caution -func (disk Disk) OpenFile(filename string, flags int, perm os.FileMode) (*os.File, error) { +func (disk Disk) OpenFile(filename string, flags int, perm os.FileMode) (*os.File, *probe.Error) { disk.lock.Lock() defer disk.lock.Unlock() if filename == "" { - return nil, iodine.New(InvalidArgument{}, nil) + return nil, probe.New(InvalidArgument{}) } dataFile, err := os.OpenFile(filepath.Join(disk.path, filename), flags, perm) if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } return dataFile, nil } diff --git a/pkg/donut/donut-v1.go b/pkg/donut/donut-v1.go index 0735e1698..3dc3d13e1 100644 --- a/pkg/donut/donut-v1.go +++ b/pkg/donut/donut-v1.go @@ -35,7 +35,7 @@ import ( "github.com/minio/minio/pkg/crypto/sha256" "github.com/minio/minio/pkg/crypto/sha512" "github.com/minio/minio/pkg/donut/disk" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" ) // config files used inside Donut @@ -52,41 +52,41 @@ const ( /// v1 API functions // makeBucket - make a new bucket -func (donut API) makeBucket(bucket string, acl BucketACL) error { +func (donut API) makeBucket(bucket string, acl BucketACL) *probe.Error { if bucket == "" || strings.TrimSpace(bucket) == "" { - return iodine.New(InvalidArgument{}, nil) + return probe.New(InvalidArgument{}) } return donut.makeDonutBucket(bucket, acl.String()) } // getBucketMetadata - get bucket metadata -func (donut API) getBucketMetadata(bucketName string) (BucketMetadata, error) { +func (donut API) getBucketMetadata(bucketName string) (BucketMetadata, *probe.Error) { if err := donut.listDonutBuckets(); err != nil { - return BucketMetadata{}, iodine.New(err, nil) + return BucketMetadata{}, err.Trace() } if _, ok := donut.buckets[bucketName]; !ok { - return BucketMetadata{}, iodine.New(BucketNotFound{Bucket: bucketName}, nil) + return BucketMetadata{}, probe.New(BucketNotFound{Bucket: bucketName}) } metadata, err := donut.getDonutBucketMetadata() if err != nil { - return BucketMetadata{}, iodine.New(err, nil) + return BucketMetadata{}, err.Trace() } return metadata.Buckets[bucketName], nil } // setBucketMetadata - set bucket metadata -func (donut API) setBucketMetadata(bucketName string, bucketMetadata map[string]string) error { +func (donut API) setBucketMetadata(bucketName string, bucketMetadata map[string]string) *probe.Error { if err := donut.listDonutBuckets(); err != nil { - return iodine.New(err, nil) + return err.Trace() } metadata, err := donut.getDonutBucketMetadata() if err != nil { - return iodine.New(err, nil) + return err.Trace() } oldBucketMetadata := metadata.Buckets[bucketName] acl, ok := bucketMetadata["acl"] if !ok { - return iodine.New(InvalidArgument{}, nil) + return probe.New(InvalidArgument{}) } oldBucketMetadata.ACL = BucketACL(acl) metadata.Buckets[bucketName] = oldBucketMetadata @@ -94,9 +94,9 @@ func (donut API) setBucketMetadata(bucketName string, bucketMetadata map[string] } // listBuckets - return list of buckets -func (donut API) listBuckets() (map[string]BucketMetadata, error) { +func (donut API) listBuckets() (map[string]BucketMetadata, *probe.Error) { if err := donut.listDonutBuckets(); err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } metadata, err := donut.getDonutBucketMetadata() if err != nil { @@ -112,95 +112,80 @@ func (donut API) listBuckets() (map[string]BucketMetadata, error) { } // listObjects - return list of objects -func (donut API) listObjects(bucket, prefix, marker, delimiter string, maxkeys int) (ListObjectsResults, error) { - errParams := map[string]string{ - "bucket": bucket, - "prefix": prefix, - "marker": marker, - "delimiter": delimiter, - "maxkeys": strconv.Itoa(maxkeys), - } +func (donut API) listObjects(bucket, prefix, marker, delimiter string, maxkeys int) (ListObjectsResults, *probe.Error) { if err := donut.listDonutBuckets(); err != nil { - return ListObjectsResults{}, iodine.New(err, errParams) + return ListObjectsResults{}, err.Trace() } if _, ok := donut.buckets[bucket]; !ok { - return ListObjectsResults{}, iodine.New(BucketNotFound{Bucket: bucket}, errParams) + return ListObjectsResults{}, probe.New(BucketNotFound{Bucket: bucket}) } listObjects, err := donut.buckets[bucket].ListObjects(prefix, marker, delimiter, maxkeys) if err != nil { - return ListObjectsResults{}, iodine.New(err, errParams) + return ListObjectsResults{}, err.Trace() } return listObjects, nil } // putObject - put object -func (donut API) putObject(bucket, object, expectedMD5Sum string, reader io.Reader, size int64, metadata map[string]string, signature *Signature) (ObjectMetadata, error) { - errParams := map[string]string{ - "bucket": bucket, - "object": object, - } +func (donut API) putObject(bucket, object, expectedMD5Sum string, reader io.Reader, size int64, metadata map[string]string, signature *Signature) (ObjectMetadata, *probe.Error) { if bucket == "" || strings.TrimSpace(bucket) == "" { - return ObjectMetadata{}, iodine.New(InvalidArgument{}, errParams) + return ObjectMetadata{}, probe.New(InvalidArgument{}) } if object == "" || strings.TrimSpace(object) == "" { - return ObjectMetadata{}, iodine.New(InvalidArgument{}, errParams) + return ObjectMetadata{}, probe.New(InvalidArgument{}) } if err := donut.listDonutBuckets(); err != nil { - return ObjectMetadata{}, iodine.New(err, errParams) + return ObjectMetadata{}, err.Trace() } if _, ok := donut.buckets[bucket]; !ok { - return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) + return ObjectMetadata{}, probe.New(BucketNotFound{Bucket: bucket}) } bucketMeta, err := donut.getDonutBucketMetadata() if err != nil { - return ObjectMetadata{}, iodine.New(err, errParams) + return ObjectMetadata{}, err.Trace() } if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; ok { - return ObjectMetadata{}, iodine.New(ObjectExists{Object: object}, errParams) + return ObjectMetadata{}, probe.New(ObjectExists{Object: object}) } objMetadata, err := donut.buckets[bucket].WriteObject(object, reader, size, expectedMD5Sum, metadata, signature) if err != nil { - return ObjectMetadata{}, iodine.New(err, errParams) + return ObjectMetadata{}, err.Trace() } bucketMeta.Buckets[bucket].BucketObjects[object] = struct{}{} if err := donut.setDonutBucketMetadata(bucketMeta); err != nil { - return ObjectMetadata{}, iodine.New(err, errParams) + return ObjectMetadata{}, err.Trace() } return objMetadata, nil } // putObject - put object -func (donut API) putObjectPart(bucket, object, expectedMD5Sum, uploadID string, partID int, reader io.Reader, size int64, metadata map[string]string, signature *Signature) (PartMetadata, error) { - errParams := map[string]string{ - "bucket": bucket, - "object": object, - } +func (donut API) putObjectPart(bucket, object, expectedMD5Sum, uploadID string, partID int, reader io.Reader, size int64, metadata map[string]string, signature *Signature) (PartMetadata, *probe.Error) { if bucket == "" || strings.TrimSpace(bucket) == "" { - return PartMetadata{}, iodine.New(InvalidArgument{}, errParams) + return PartMetadata{}, probe.New(InvalidArgument{}) } if object == "" || strings.TrimSpace(object) == "" { - return PartMetadata{}, iodine.New(InvalidArgument{}, errParams) + return PartMetadata{}, probe.New(InvalidArgument{}) } if err := donut.listDonutBuckets(); err != nil { - return PartMetadata{}, iodine.New(err, errParams) + return PartMetadata{}, err.Trace() } if _, ok := donut.buckets[bucket]; !ok { - return PartMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) + return PartMetadata{}, probe.New(BucketNotFound{Bucket: bucket}) } bucketMeta, err := donut.getDonutBucketMetadata() if err != nil { - return PartMetadata{}, iodine.New(err, errParams) + return PartMetadata{}, err.Trace() } if _, ok := bucketMeta.Buckets[bucket].Multiparts[object]; !ok { - return PartMetadata{}, iodine.New(InvalidUploadID{UploadID: uploadID}, nil) + return PartMetadata{}, probe.New(InvalidUploadID{UploadID: uploadID}) } if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; ok { - return PartMetadata{}, iodine.New(ObjectExists{Object: object}, errParams) + return PartMetadata{}, probe.New(ObjectExists{Object: object}) } objectPart := object + "/" + "multipart" + "/" + strconv.Itoa(partID) objmetadata, err := donut.buckets[bucket].WriteObject(objectPart, reader, size, expectedMD5Sum, metadata, signature) if err != nil { - return PartMetadata{}, iodine.New(err, errParams) + return PartMetadata{}, err.Trace() } partMetadata := PartMetadata{ PartNumber: partID, @@ -212,74 +197,61 @@ func (donut API) putObjectPart(bucket, object, expectedMD5Sum, uploadID string, multipartSession.Parts[strconv.Itoa(partID)] = partMetadata bucketMeta.Buckets[bucket].Multiparts[object] = multipartSession if err := donut.setDonutBucketMetadata(bucketMeta); err != nil { - return PartMetadata{}, iodine.New(err, errParams) + return PartMetadata{}, err.Trace() } return partMetadata, nil } // getObject - get object -func (donut API) getObject(bucket, object string) (reader io.ReadCloser, size int64, err error) { - errParams := map[string]string{ - "bucket": bucket, - "object": object, - } +func (donut API) getObject(bucket, object string) (reader io.ReadCloser, size int64, err *probe.Error) { if bucket == "" || strings.TrimSpace(bucket) == "" { - return nil, 0, iodine.New(InvalidArgument{}, errParams) + return nil, 0, probe.New(InvalidArgument{}) } if object == "" || strings.TrimSpace(object) == "" { - return nil, 0, iodine.New(InvalidArgument{}, errParams) + return nil, 0, probe.New(InvalidArgument{}) } if err := donut.listDonutBuckets(); err != nil { - return nil, 0, iodine.New(err, nil) + return nil, 0, err.Trace() } if _, ok := donut.buckets[bucket]; !ok { - return nil, 0, iodine.New(BucketNotFound{Bucket: bucket}, errParams) + return nil, 0, probe.New(BucketNotFound{Bucket: bucket}) } return donut.buckets[bucket].ReadObject(object) } // getObjectMetadata - get object metadata -func (donut API) getObjectMetadata(bucket, object string) (ObjectMetadata, error) { - errParams := map[string]string{ - "bucket": bucket, - "object": object, - } +func (donut API) getObjectMetadata(bucket, object string) (ObjectMetadata, *probe.Error) { if err := donut.listDonutBuckets(); err != nil { - return ObjectMetadata{}, iodine.New(err, errParams) + return ObjectMetadata{}, err.Trace() } if _, ok := donut.buckets[bucket]; !ok { - return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, errParams) + return ObjectMetadata{}, probe.New(BucketNotFound{Bucket: bucket}) } bucketMeta, err := donut.getDonutBucketMetadata() if err != nil { - return ObjectMetadata{}, iodine.New(err, errParams) + return ObjectMetadata{}, err.Trace() } if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; !ok { - return ObjectMetadata{}, iodine.New(ObjectNotFound{Object: object}, errParams) + return ObjectMetadata{}, probe.New(ObjectNotFound{Object: object}) } objectMetadata, err := donut.buckets[bucket].GetObjectMetadata(object) if err != nil { - return ObjectMetadata{}, iodine.New(err, nil) + return ObjectMetadata{}, err.Trace() } return objectMetadata, nil } // newMultipartUpload - new multipart upload request -func (donut API) newMultipartUpload(bucket, object, contentType string) (string, error) { - errParams := map[string]string{ - "bucket": bucket, - "object": object, - "contentType": contentType, - } +func (donut API) newMultipartUpload(bucket, object, contentType string) (string, *probe.Error) { if err := donut.listDonutBuckets(); err != nil { - return "", iodine.New(err, errParams) + return "", err.Trace() } if _, ok := donut.buckets[bucket]; !ok { - return "", iodine.New(BucketNotFound{Bucket: bucket}, errParams) + return "", probe.New(BucketNotFound{Bucket: bucket}) } allbuckets, err := donut.getDonutBucketMetadata() if err != nil { - return "", iodine.New(err, errParams) + return "", err.Trace() } bucketMetadata := allbuckets.Buckets[bucket] multiparts := make(map[string]MultiPartSession) @@ -302,40 +274,36 @@ func (donut API) newMultipartUpload(bucket, object, contentType string) (string, allbuckets.Buckets[bucket] = bucketMetadata if err := donut.setDonutBucketMetadata(allbuckets); err != nil { - return "", iodine.New(err, errParams) + return "", err.Trace() } return uploadID, nil } // listObjectParts list all object parts -func (donut API) listObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, error) { - errParams := map[string]string{ - "bucket": bucket, - "object": object, - } +func (donut API) listObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error) { if bucket == "" || strings.TrimSpace(bucket) == "" { - return ObjectResourcesMetadata{}, iodine.New(InvalidArgument{}, errParams) + return ObjectResourcesMetadata{}, probe.New(InvalidArgument{}) } if object == "" || strings.TrimSpace(object) == "" { - return ObjectResourcesMetadata{}, iodine.New(InvalidArgument{}, errParams) + return ObjectResourcesMetadata{}, probe.New(InvalidArgument{}) } if err := donut.listDonutBuckets(); err != nil { - return ObjectResourcesMetadata{}, iodine.New(err, nil) + return ObjectResourcesMetadata{}, err.Trace() } if _, ok := donut.buckets[bucket]; !ok { - return ObjectResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, errParams) + return ObjectResourcesMetadata{}, probe.New(BucketNotFound{Bucket: bucket}) } allBuckets, err := donut.getDonutBucketMetadata() if err != nil { - return ObjectResourcesMetadata{}, iodine.New(err, errParams) + return ObjectResourcesMetadata{}, err.Trace() } bucketMetadata := allBuckets.Buckets[bucket] if _, ok := bucketMetadata.Multiparts[object]; !ok { - return ObjectResourcesMetadata{}, iodine.New(InvalidUploadID{UploadID: resources.UploadID}, errParams) + return ObjectResourcesMetadata{}, probe.New(InvalidUploadID{UploadID: resources.UploadID}) } if bucketMetadata.Multiparts[object].UploadID != resources.UploadID { - return ObjectResourcesMetadata{}, iodine.New(InvalidUploadID{UploadID: resources.UploadID}, errParams) + return ObjectResourcesMetadata{}, probe.New(InvalidUploadID{UploadID: resources.UploadID}) } objectResourcesMetadata := resources objectResourcesMetadata.Bucket = bucket @@ -358,7 +326,7 @@ func (donut API) listObjectParts(bucket, object string, resources ObjectResource } part, ok := bucketMetadata.Multiparts[object].Parts[strconv.Itoa(i)] if !ok { - return ObjectResourcesMetadata{}, iodine.New(InvalidPart{}, nil) + return ObjectResourcesMetadata{}, probe.New(InvalidPart{}) } parts = append(parts, &part) } @@ -368,58 +336,57 @@ func (donut API) listObjectParts(bucket, object string, resources ObjectResource } // completeMultipartUpload complete an incomplete multipart upload -func (donut API) completeMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, error) { - errParams := map[string]string{ - "bucket": bucket, - "object": object, - "uploadID": uploadID, - } +func (donut API) completeMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) { if bucket == "" || strings.TrimSpace(bucket) == "" { - return ObjectMetadata{}, iodine.New(InvalidArgument{}, errParams) + return ObjectMetadata{}, probe.New(InvalidArgument{}) } if object == "" || strings.TrimSpace(object) == "" { - return ObjectMetadata{}, iodine.New(InvalidArgument{}, errParams) + return ObjectMetadata{}, probe.New(InvalidArgument{}) } if err := donut.listDonutBuckets(); err != nil { - return ObjectMetadata{}, iodine.New(err, errParams) + return ObjectMetadata{}, err.Trace() } if _, ok := donut.buckets[bucket]; !ok { - return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, errParams) + return ObjectMetadata{}, probe.New(BucketNotFound{Bucket: bucket}) } allBuckets, err := donut.getDonutBucketMetadata() if err != nil { - return ObjectMetadata{}, iodine.New(err, errParams) + return ObjectMetadata{}, err.Trace() } bucketMetadata := allBuckets.Buckets[bucket] if _, ok := bucketMetadata.Multiparts[object]; !ok { - return ObjectMetadata{}, iodine.New(InvalidUploadID{UploadID: uploadID}, errParams) + return ObjectMetadata{}, probe.New(InvalidUploadID{UploadID: uploadID}) } if bucketMetadata.Multiparts[object].UploadID != uploadID { - return ObjectMetadata{}, iodine.New(InvalidUploadID{UploadID: uploadID}, errParams) + return ObjectMetadata{}, probe.New(InvalidUploadID{UploadID: uploadID}) } - partBytes, err := ioutil.ReadAll(data) - if err != nil { - return ObjectMetadata{}, iodine.New(err, errParams) + var partBytes []byte + { + var err error + partBytes, err = ioutil.ReadAll(data) + if err != nil { + return ObjectMetadata{}, probe.New(err) + } } if signature != nil { ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256.Sum256(partBytes)[:])) if err != nil { - return ObjectMetadata{}, iodine.New(err, errParams) + return ObjectMetadata{}, err.Trace() } if !ok { - return ObjectMetadata{}, iodine.New(SignatureDoesNotMatch{}, errParams) + return ObjectMetadata{}, probe.New(SignatureDoesNotMatch{}) } } parts := &CompleteMultipartUpload{} if err := xml.Unmarshal(partBytes, parts); err != nil { - return ObjectMetadata{}, iodine.New(MalformedXML{}, errParams) + return ObjectMetadata{}, probe.New(MalformedXML{}) } if !sort.IsSorted(completedParts(parts.Part)) { - return ObjectMetadata{}, iodine.New(InvalidPartOrder{}, errParams) + return ObjectMetadata{}, probe.New(InvalidPartOrder{}) } for _, part := range parts.Part { if strings.Trim(part.ETag, "\"") != bucketMetadata.Multiparts[object].Parts[strconv.Itoa(part.PartNumber)].ETag { - return ObjectMetadata{}, iodine.New(InvalidPart{}, errParams) + return ObjectMetadata{}, probe.New(InvalidPart{}) } } var finalETagBytes []byte @@ -428,7 +395,7 @@ func (donut API) completeMultipartUpload(bucket, object, uploadID string, data i for _, part := range bucketMetadata.Multiparts[object].Parts { partETagBytes, err := hex.DecodeString(part.ETag) if err != nil { - return ObjectMetadata{}, iodine.New(err, errParams) + return ObjectMetadata{}, probe.New(err) } finalETagBytes = append(finalETagBytes, partETagBytes...) finalSize += part.Size @@ -444,19 +411,16 @@ func (donut API) completeMultipartUpload(bucket, object, uploadID string, data i } // listMultipartUploads list all multipart uploads -func (donut API) listMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, error) { - errParams := map[string]string{ - "bucket": bucket, - } +func (donut API) listMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error) { if err := donut.listDonutBuckets(); err != nil { - return BucketMultipartResourcesMetadata{}, iodine.New(err, errParams) + return BucketMultipartResourcesMetadata{}, err.Trace() } if _, ok := donut.buckets[bucket]; !ok { - return BucketMultipartResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, errParams) + return BucketMultipartResourcesMetadata{}, probe.New(BucketNotFound{Bucket: bucket}) } allbuckets, err := donut.getDonutBucketMetadata() if err != nil { - return BucketMultipartResourcesMetadata{}, iodine.New(err, errParams) + return BucketMultipartResourcesMetadata{}, err.Trace() } bucketMetadata := allbuckets.Buckets[bucket] var uploads []*UploadMetadata @@ -505,34 +469,29 @@ func (donut API) listMultipartUploads(bucket string, resources BucketMultipartRe } // abortMultipartUpload - abort a incomplete multipart upload -func (donut API) abortMultipartUpload(bucket, object, uploadID string) error { - errParams := map[string]string{ - "bucket": bucket, - "object": object, - "uploadID": uploadID, - } +func (donut API) abortMultipartUpload(bucket, object, uploadID string) *probe.Error { if err := donut.listDonutBuckets(); err != nil { - return iodine.New(err, errParams) + return err.Trace() } if _, ok := donut.buckets[bucket]; !ok { - return iodine.New(BucketNotFound{Bucket: bucket}, errParams) + return probe.New(BucketNotFound{Bucket: bucket}) } allbuckets, err := donut.getDonutBucketMetadata() if err != nil { - return iodine.New(err, errParams) + return err.Trace() } bucketMetadata := allbuckets.Buckets[bucket] if _, ok := bucketMetadata.Multiparts[object]; !ok { - return iodine.New(InvalidUploadID{UploadID: uploadID}, errParams) + return probe.New(InvalidUploadID{UploadID: uploadID}) } if bucketMetadata.Multiparts[object].UploadID != uploadID { - return iodine.New(InvalidUploadID{UploadID: uploadID}, errParams) + return probe.New(InvalidUploadID{UploadID: uploadID}) } delete(bucketMetadata.Multiparts, object) allbuckets.Buckets[bucket] = bucketMetadata if err := donut.setDonutBucketMetadata(allbuckets); err != nil { - return iodine.New(err, errParams) + return err.Trace() } return nil @@ -541,18 +500,18 @@ func (donut API) abortMultipartUpload(bucket, object, uploadID string) error { //// internal functions // getBucketMetadataWriters - -func (donut API) getBucketMetadataWriters() ([]io.WriteCloser, error) { +func (donut API) getBucketMetadataWriters() ([]io.WriteCloser, *probe.Error) { var writers []io.WriteCloser for _, node := range donut.nodes { disks, err := node.ListDisks() if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } writers = make([]io.WriteCloser, len(disks)) for order, disk := range disks { bucketMetaDataWriter, err := disk.CreateFile(filepath.Join(donut.config.DonutName, bucketMetadataConfig)) if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } writers[order] = bucketMetaDataWriter } @@ -561,14 +520,14 @@ func (donut API) getBucketMetadataWriters() ([]io.WriteCloser, error) { } // getBucketMetadataReaders - readers are returned in map rather than slice -func (donut API) getBucketMetadataReaders() (map[int]io.ReadCloser, error) { +func (donut API) getBucketMetadataReaders() (map[int]io.ReadCloser, *probe.Error) { readers := make(map[int]io.ReadCloser) disks := make(map[int]disk.Disk) - var err error + var err *probe.Error for _, node := range donut.nodes { nDisks, err := node.ListDisks() if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } for k, v := range nDisks { disks[k] = v @@ -583,22 +542,22 @@ func (donut API) getBucketMetadataReaders() (map[int]io.ReadCloser, error) { readers[order] = bucketMetaDataReader } if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } return readers, nil } // setDonutBucketMetadata - -func (donut API) setDonutBucketMetadata(metadata *AllBuckets) error { +func (donut API) setDonutBucketMetadata(metadata *AllBuckets) *probe.Error { writers, err := donut.getBucketMetadataWriters() if err != nil { - return iodine.New(err, nil) + return err.Trace() } for _, writer := range writers { jenc := json.NewEncoder(writer) if err := jenc.Encode(metadata); err != nil { CleanupWritersOnError(writers) - return iodine.New(err, nil) + return probe.New(err) } } for _, writer := range writers { @@ -608,83 +567,85 @@ func (donut API) setDonutBucketMetadata(metadata *AllBuckets) error { } // getDonutBucketMetadata - -func (donut API) getDonutBucketMetadata() (*AllBuckets, error) { +func (donut API) getDonutBucketMetadata() (*AllBuckets, *probe.Error) { metadata := &AllBuckets{} - var err error readers, err := donut.getBucketMetadataReaders() if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } for _, reader := range readers { defer reader.Close() } - for _, reader := range readers { - jenc := json.NewDecoder(reader) - if err = jenc.Decode(metadata); err == nil { - return metadata, nil + { + var err error + for _, reader := range readers { + jenc := json.NewDecoder(reader) + if err = jenc.Decode(metadata); err == nil { + return metadata, nil + } } + return nil, probe.New(err) } - return nil, iodine.New(err, nil) } // makeDonutBucket - -func (donut API) makeDonutBucket(bucketName, acl string) error { +func (donut API) makeDonutBucket(bucketName, acl string) *probe.Error { if err := donut.listDonutBuckets(); err != nil { - return iodine.New(err, nil) + return err.Trace() } if _, ok := donut.buckets[bucketName]; ok { - return iodine.New(BucketExists{Bucket: bucketName}, nil) + return probe.New(BucketExists{Bucket: bucketName}) } bucket, bucketMetadata, err := newBucket(bucketName, acl, donut.config.DonutName, donut.nodes) if err != nil { - return iodine.New(err, nil) + return err.Trace() } nodeNumber := 0 donut.buckets[bucketName] = bucket for _, node := range donut.nodes { disks, err := node.ListDisks() if err != nil { - return iodine.New(err, nil) + return err.Trace() } for order, disk := range disks { bucketSlice := fmt.Sprintf("%s$%d$%d", bucketName, nodeNumber, order) err := disk.MakeDir(filepath.Join(donut.config.DonutName, bucketSlice)) if err != nil { - return iodine.New(err, nil) + return err.Trace() } } nodeNumber = nodeNumber + 1 } metadata, err := donut.getDonutBucketMetadata() if err != nil { - if os.IsNotExist(iodine.ToError(err)) { + if os.IsNotExist(err.ToError()) { metadata := new(AllBuckets) metadata.Buckets = make(map[string]BucketMetadata) metadata.Buckets[bucketName] = bucketMetadata err = donut.setDonutBucketMetadata(metadata) if err != nil { - return iodine.New(err, nil) + return err.Trace() } return nil } - return iodine.New(err, nil) + return err.Trace() } metadata.Buckets[bucketName] = bucketMetadata err = donut.setDonutBucketMetadata(metadata) if err != nil { - return iodine.New(err, nil) + return err.Trace() } return nil } // listDonutBuckets - -func (donut API) listDonutBuckets() error { +func (donut API) listDonutBuckets() *probe.Error { var disks map[int]disk.Disk - var err error + var err *probe.Error for _, node := range donut.nodes { disks, err = node.ListDisks() if err != nil { - return iodine.New(err, nil) + return err.Trace() } } var dirs []os.FileInfo @@ -696,18 +657,18 @@ func (donut API) listDonutBuckets() error { } // if all disks are missing then return error if err != nil { - return iodine.New(err, nil) + return err.Trace() } for _, dir := range dirs { splitDir := strings.Split(dir.Name(), "$") if len(splitDir) < 3 { - return iodine.New(CorruptedBackend{Backend: dir.Name()}, nil) + return probe.New(CorruptedBackend{Backend: dir.Name()}) } bucketName := splitDir[0] // we dont need this once we cache from makeDonutBucket() bucket, _, err := newBucket(bucketName, "private", donut.config.DonutName, donut.nodes) if err != nil { - return iodine.New(err, nil) + return err.Trace() } donut.buckets[bucketName] = bucket } diff --git a/pkg/donut/donut-v2.go b/pkg/donut/donut-v2.go index c49684900..81efd2180 100644 --- a/pkg/donut/donut-v2.go +++ b/pkg/donut/donut-v2.go @@ -34,7 +34,7 @@ import ( "github.com/minio/minio/pkg/crypto/sha256" "github.com/minio/minio/pkg/donut/cache/data" "github.com/minio/minio/pkg/donut/cache/metadata" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" "github.com/minio/minio/pkg/quick" ) @@ -71,9 +71,9 @@ type storedBucket struct { } // New instantiate a new donut -func New() (Interface, error) { +func New() (Interface, *probe.Error) { var conf *Config - var err error + var err *probe.Error conf, err = LoadConfig() if err != nil { conf = &Config{ @@ -83,7 +83,7 @@ func New() (Interface, error) { DonutName: "", } if err := quick.CheckData(conf); err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } } a := API{config: conf} @@ -98,17 +98,17 @@ func New() (Interface, error) { if len(a.config.NodeDiskMap) > 0 { for k, v := range a.config.NodeDiskMap { if len(v) == 0 { - return nil, iodine.New(InvalidDisksArgument{}, nil) + return nil, probe.New(InvalidDisksArgument{}) } err := a.AttachNode(k, v) if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } } /// Initialization, populate all buckets into memory buckets, err := a.listBuckets() if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } for k, v := range buckets { var newBucket = storedBucket{} @@ -126,58 +126,53 @@ func New() (Interface, error) { /// V2 API functions // GetObject - GET object from cache buffer -func (donut API) GetObject(w io.Writer, bucket string, object string, start, length int64) (int64, error) { +func (donut API) GetObject(w io.Writer, bucket string, object string, start, length int64) (int64, *probe.Error) { donut.lock.Lock() defer donut.lock.Unlock() - errParams := map[string]string{ - "bucket": bucket, - "object": object, - "start": strconv.FormatInt(start, 10), - "length": strconv.FormatInt(length, 10), - } - if !IsValidBucket(bucket) { - return 0, iodine.New(BucketNameInvalid{Bucket: bucket}, errParams) + return 0, probe.New(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(object) { - return 0, iodine.New(ObjectNameInvalid{Object: object}, errParams) + return 0, probe.New(ObjectNameInvalid{Object: object}) } if start < 0 { - return 0, iodine.New(InvalidRange{ + return 0, probe.New(InvalidRange{ Start: start, Length: length, - }, errParams) + }) } if !donut.storedBuckets.Exists(bucket) { - return 0, iodine.New(BucketNotFound{Bucket: bucket}, errParams) + return 0, probe.New(BucketNotFound{Bucket: bucket}) } objectKey := bucket + "/" + object data, ok := donut.objects.Get(objectKey) var written int64 - var err error if !ok { if len(donut.config.NodeDiskMap) > 0 { reader, size, err := donut.getObject(bucket, object) if err != nil { - return 0, iodine.New(err, nil) + return 0, err.Trace() } if start > 0 { if _, err := io.CopyN(ioutil.Discard, reader, start); err != nil { - return 0, iodine.New(err, errParams) + return 0, probe.New(err) } } // new proxy writer to capture data read from disk pw := NewProxyWriter(w) - if length > 0 { - written, err = io.CopyN(pw, reader, length) - if err != nil { - return 0, iodine.New(err, errParams) - } - } else { - written, err = io.CopyN(pw, reader, size) - if err != nil { - return 0, iodine.New(err, errParams) + { + var err error + if length > 0 { + written, err = io.CopyN(pw, reader, length) + if err != nil { + return 0, probe.New(err) + } + } else { + written, err = io.CopyN(pw, reader, size) + if err != nil { + return 0, probe.New(err) + } } } /// cache object read from disk @@ -185,83 +180,86 @@ func (donut API) GetObject(w io.Writer, bucket string, object string, start, len pw.writtenBytes = nil go debug.FreeOSMemory() if !ok { - return 0, iodine.New(InternalError{}, errParams) + return 0, probe.New(InternalError{}) } return written, nil } - return 0, iodine.New(ObjectNotFound{Object: object}, errParams) + return 0, probe.New(ObjectNotFound{Object: object}) } - if start == 0 && length == 0 { - written, err = io.CopyN(w, bytes.NewBuffer(data), int64(donut.objects.Len(objectKey))) - if err != nil { - return 0, iodine.New(err, nil) - } - } else { - written, err = io.CopyN(w, bytes.NewBuffer(data[start:]), length) - if err != nil { - return 0, iodine.New(err, nil) + { + var err error + if start == 0 && length == 0 { + written, err = io.CopyN(w, bytes.NewBuffer(data), int64(donut.objects.Len(objectKey))) + if err != nil { + return 0, probe.New(err) + } + } else { + written, err = io.CopyN(w, bytes.NewBuffer(data[start:]), length) + if err != nil { + return 0, probe.New(err) + } } + return written, nil } - return written, nil } // GetBucketMetadata - -func (donut API) GetBucketMetadata(bucket string, signature *Signature) (BucketMetadata, error) { +func (donut API) GetBucketMetadata(bucket string, signature *Signature) (BucketMetadata, *probe.Error) { donut.lock.Lock() defer donut.lock.Unlock() if signature != nil { ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") if err != nil { - return BucketMetadata{}, iodine.New(err, nil) + return BucketMetadata{}, err.Trace() } if !ok { - return BucketMetadata{}, iodine.New(SignatureDoesNotMatch{}, nil) + return BucketMetadata{}, probe.New(SignatureDoesNotMatch{}) } } if !IsValidBucket(bucket) { - return BucketMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) + return BucketMetadata{}, probe.New(BucketNameInvalid{Bucket: bucket}) } if !donut.storedBuckets.Exists(bucket) { if len(donut.config.NodeDiskMap) > 0 { bucketMetadata, err := donut.getBucketMetadata(bucket) if err != nil { - return BucketMetadata{}, iodine.New(err, nil) + return BucketMetadata{}, err.Trace() } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) storedBucket.bucketMetadata = bucketMetadata donut.storedBuckets.Set(bucket, storedBucket) } - return BucketMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) + return BucketMetadata{}, probe.New(BucketNotFound{Bucket: bucket}) } return donut.storedBuckets.Get(bucket).(storedBucket).bucketMetadata, nil } // SetBucketMetadata - -func (donut API) SetBucketMetadata(bucket string, metadata map[string]string, signature *Signature) error { +func (donut API) SetBucketMetadata(bucket string, metadata map[string]string, signature *Signature) *probe.Error { donut.lock.Lock() defer donut.lock.Unlock() if signature != nil { ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") if err != nil { - return iodine.New(err, nil) + return err.Trace() } if !ok { - return iodine.New(SignatureDoesNotMatch{}, nil) + return probe.New(SignatureDoesNotMatch{}) } } if !IsValidBucket(bucket) { - return iodine.New(BucketNameInvalid{Bucket: bucket}, nil) + return probe.New(BucketNameInvalid{Bucket: bucket}) } if !donut.storedBuckets.Exists(bucket) { - return iodine.New(BucketNotFound{Bucket: bucket}, nil) + return probe.New(BucketNotFound{Bucket: bucket}) } if len(donut.config.NodeDiskMap) > 0 { if err := donut.setBucketMetadata(bucket, metadata); err != nil { - return iodine.New(err, nil) + return err.Trace() } } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) @@ -271,26 +269,26 @@ func (donut API) SetBucketMetadata(bucket string, metadata map[string]string, si } // isMD5SumEqual - returns error if md5sum mismatches, success its `nil` -func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) error { +func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) *probe.Error { if strings.TrimSpace(expectedMD5Sum) != "" && strings.TrimSpace(actualMD5Sum) != "" { expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum) if err != nil { - return iodine.New(err, nil) + return probe.New(err) } actualMD5SumBytes, err := hex.DecodeString(actualMD5Sum) if err != nil { - return iodine.New(err, nil) + return probe.New(err) } if !bytes.Equal(expectedMD5SumBytes, actualMD5SumBytes) { - return iodine.New(BadDigest{}, nil) + return probe.New(BadDigest{}) } return nil } - return iodine.New(InvalidArgument{}, nil) + return probe.New(InvalidArgument{}) } // CreateObject - create an object -func (donut API) CreateObject(bucket, key, expectedMD5Sum string, size int64, data io.Reader, metadata map[string]string, signature *Signature) (ObjectMetadata, error) { +func (donut API) CreateObject(bucket, key, expectedMD5Sum string, size int64, data io.Reader, metadata map[string]string, signature *Signature) (ObjectMetadata, *probe.Error) { donut.lock.Lock() defer donut.lock.Unlock() @@ -299,35 +297,35 @@ func (donut API) CreateObject(bucket, key, expectedMD5Sum string, size int64, da // free debug.FreeOSMemory() - return objectMetadata, iodine.New(err, nil) + return objectMetadata, err.Trace() } // createObject - PUT object to cache buffer -func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (ObjectMetadata, error) { +func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) { if len(donut.config.NodeDiskMap) == 0 { if size > int64(donut.config.MaxSize) { generic := GenericObjectError{Bucket: bucket, Object: key} - return ObjectMetadata{}, iodine.New(EntityTooLarge{ + return ObjectMetadata{}, probe.New(EntityTooLarge{ GenericObjectError: generic, Size: strconv.FormatInt(size, 10), MaxSize: strconv.FormatUint(donut.config.MaxSize, 10), - }, nil) + }) } } if !IsValidBucket(bucket) { - return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) + return ObjectMetadata{}, probe.New(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(key) { - return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil) + return ObjectMetadata{}, probe.New(ObjectNameInvalid{Object: key}) } if !donut.storedBuckets.Exists(bucket) { - return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) + return ObjectMetadata{}, probe.New(BucketNotFound{Bucket: bucket}) } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) // get object key objectKey := bucket + "/" + key if _, ok := storedBucket.objectMetadata[objectKey]; ok == true { - return ObjectMetadata{}, iodine.New(ObjectExists{Object: key}, nil) + return ObjectMetadata{}, probe.New(ObjectExists{Object: key}) } if contentType == "" { @@ -338,7 +336,7 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { // pro-actively close the connection - return ObjectMetadata{}, iodine.New(InvalidDigest{Md5: expectedMD5Sum}, nil) + return ObjectMetadata{}, probe.New(InvalidDigest{Md5: expectedMD5Sum}) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } @@ -357,7 +355,7 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s signature, ) if err != nil { - return ObjectMetadata{}, iodine.New(err, nil) + return ObjectMetadata{}, err.Trace() } storedBucket.objectMetadata[objectKey] = objMetadata donut.storedBuckets.Set(bucket, storedBucket) @@ -377,7 +375,7 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s sha256hash.Write(byteBuffer[0:length]) ok := donut.objects.Append(objectKey, byteBuffer[0:length]) if !ok { - return ObjectMetadata{}, iodine.New(InternalError{}, nil) + return ObjectMetadata{}, probe.New(InternalError{}) } totalLength += int64(length) go debug.FreeOSMemory() @@ -385,26 +383,26 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s if totalLength != size { // Delete perhaps the object is already saved, due to the nature of append() donut.objects.Delete(objectKey) - return ObjectMetadata{}, iodine.New(IncompleteBody{Bucket: bucket, Object: key}, nil) + return ObjectMetadata{}, probe.New(IncompleteBody{Bucket: bucket, Object: key}) } if err != io.EOF { - return ObjectMetadata{}, iodine.New(err, nil) + return ObjectMetadata{}, probe.New(err) } md5SumBytes := hash.Sum(nil) md5Sum := hex.EncodeToString(md5SumBytes) // Verify if the written object is equal to what is expected, only if it is requested as such if strings.TrimSpace(expectedMD5Sum) != "" { if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil { - return ObjectMetadata{}, iodine.New(BadDigest{}, nil) + return ObjectMetadata{}, probe.New(BadDigest{}) } } if signature != nil { ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256hash.Sum(nil))) if err != nil { - return ObjectMetadata{}, iodine.New(err, nil) + return ObjectMetadata{}, err.Trace() } if !ok { - return ObjectMetadata{}, iodine.New(SignatureDoesNotMatch{}, nil) + return ObjectMetadata{}, probe.New(SignatureDoesNotMatch{}) } } @@ -426,7 +424,7 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s } // MakeBucket - create bucket in cache -func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signature *Signature) error { +func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signature *Signature) *probe.Error { donut.lock.Lock() defer donut.lock.Unlock() @@ -435,7 +433,7 @@ func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signatur if location != nil { locationConstraintBytes, err := ioutil.ReadAll(location) if err != nil { - return iodine.New(InternalError{}, nil) + return probe.New(InternalError{}) } locationSum = hex.EncodeToString(sha256.Sum256(locationConstraintBytes)[:]) } @@ -443,24 +441,24 @@ func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signatur if signature != nil { ok, err := signature.DoesSignatureMatch(locationSum) if err != nil { - return iodine.New(err, nil) + return err.Trace() } if !ok { - return iodine.New(SignatureDoesNotMatch{}, nil) + return probe.New(SignatureDoesNotMatch{}) } } if donut.storedBuckets.Stats().Items == totalBuckets { - return iodine.New(TooManyBuckets{Bucket: bucketName}, nil) + return probe.New(TooManyBuckets{Bucket: bucketName}) } if !IsValidBucket(bucketName) { - return iodine.New(BucketNameInvalid{Bucket: bucketName}, nil) + return probe.New(BucketNameInvalid{Bucket: bucketName}) } if !IsValidBucketACL(acl) { - return iodine.New(InvalidACL{ACL: acl}, nil) + return probe.New(InvalidACL{ACL: acl}) } if donut.storedBuckets.Exists(bucketName) { - return iodine.New(BucketExists{Bucket: bucketName}, nil) + return probe.New(BucketExists{Bucket: bucketName}) } if strings.TrimSpace(acl) == "" { @@ -469,7 +467,7 @@ func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signatur } if len(donut.config.NodeDiskMap) > 0 { if err := donut.makeBucket(bucketName, BucketACL(acl)); err != nil { - return iodine.New(err, nil) + return err.Trace() } } var newBucket = storedBucket{} @@ -485,28 +483,28 @@ func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signatur } // ListObjects - list objects from cache -func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata, signature *Signature) ([]ObjectMetadata, BucketResourcesMetadata, error) { +func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata, signature *Signature) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error) { donut.lock.Lock() defer donut.lock.Unlock() if signature != nil { ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") if err != nil { - return nil, BucketResourcesMetadata{}, iodine.New(err, nil) + return nil, BucketResourcesMetadata{}, err.Trace() } if !ok { - return nil, BucketResourcesMetadata{}, iodine.New(SignatureDoesNotMatch{}, nil) + return nil, BucketResourcesMetadata{}, probe.New(SignatureDoesNotMatch{}) } } if !IsValidBucket(bucket) { - return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) + return nil, BucketResourcesMetadata{IsTruncated: false}, probe.New(BucketNameInvalid{Bucket: bucket}) } if !IsValidPrefix(resources.Prefix) { - return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(ObjectNameInvalid{Object: resources.Prefix}, nil) + return nil, BucketResourcesMetadata{IsTruncated: false}, probe.New(ObjectNameInvalid{Object: resources.Prefix}) } if !donut.storedBuckets.Exists(bucket) { - return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(BucketNotFound{Bucket: bucket}, nil) + return nil, BucketResourcesMetadata{IsTruncated: false}, probe.New(BucketNotFound{Bucket: bucket}) } var results []ObjectMetadata var keys []string @@ -519,7 +517,7 @@ func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata, s resources.Maxkeys, ) if err != nil { - return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(err, nil) + return nil, BucketResourcesMetadata{IsTruncated: false}, probe.New(err) } resources.CommonPrefixes = listObjects.CommonPrefixes resources.IsTruncated = listObjects.IsTruncated @@ -588,17 +586,17 @@ func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name } // ListBuckets - List buckets from cache -func (donut API) ListBuckets(signature *Signature) ([]BucketMetadata, error) { +func (donut API) ListBuckets(signature *Signature) ([]BucketMetadata, *probe.Error) { donut.lock.Lock() defer donut.lock.Unlock() if signature != nil { ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } if !ok { - return nil, iodine.New(SignatureDoesNotMatch{}, nil) + return nil, probe.New(SignatureDoesNotMatch{}) } } @@ -606,7 +604,7 @@ func (donut API) ListBuckets(signature *Signature) ([]BucketMetadata, error) { if len(donut.config.NodeDiskMap) > 0 { buckets, err := donut.listBuckets() if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } for _, bucketMetadata := range buckets { results = append(results, bucketMetadata) @@ -622,29 +620,29 @@ func (donut API) ListBuckets(signature *Signature) ([]BucketMetadata, error) { } // GetObjectMetadata - get object metadata from cache -func (donut API) GetObjectMetadata(bucket, key string, signature *Signature) (ObjectMetadata, error) { +func (donut API) GetObjectMetadata(bucket, key string, signature *Signature) (ObjectMetadata, *probe.Error) { donut.lock.Lock() defer donut.lock.Unlock() if signature != nil { ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") if err != nil { - return ObjectMetadata{}, iodine.New(err, nil) + return ObjectMetadata{}, err.Trace() } if !ok { - return ObjectMetadata{}, iodine.New(SignatureDoesNotMatch{}, nil) + return ObjectMetadata{}, probe.New(SignatureDoesNotMatch{}) } } // check if bucket exists if !IsValidBucket(bucket) { - return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) + return ObjectMetadata{}, probe.New(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(key) { - return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil) + return ObjectMetadata{}, probe.New(ObjectNameInvalid{Object: key}) } if !donut.storedBuckets.Exists(bucket) { - return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) + return ObjectMetadata{}, probe.New(BucketNotFound{Bucket: bucket}) } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) objectKey := bucket + "/" + key @@ -654,14 +652,14 @@ func (donut API) GetObjectMetadata(bucket, key string, signature *Signature) (Ob if len(donut.config.NodeDiskMap) > 0 { objMetadata, err := donut.getObjectMetadata(bucket, key) if err != nil { - return ObjectMetadata{}, iodine.New(err, nil) + return ObjectMetadata{}, err.Trace() } // update storedBucket.objectMetadata[objectKey] = objMetadata donut.storedBuckets.Set(bucket, storedBucket) return objMetadata, nil } - return ObjectMetadata{}, iodine.New(ObjectNotFound{Object: key}, nil) + return ObjectMetadata{}, probe.New(ObjectNotFound{Object: key}) } // evictedObject callback function called when an item is evicted from memory diff --git a/pkg/donut/encoder.go b/pkg/donut/encoder.go index ee5005a53..77c954ca1 100644 --- a/pkg/donut/encoder.go +++ b/pkg/donut/encoder.go @@ -18,10 +18,9 @@ package donut import ( "io" - "strconv" encoding "github.com/minio/minio/pkg/erasure" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" ) // encoder internal struct @@ -32,74 +31,71 @@ type encoder struct { } // getErasureTechnique - convert technique string into Technique type -func getErasureTechnique(technique string) (encoding.Technique, error) { +func getErasureTechnique(technique string) (encoding.Technique, *probe.Error) { switch true { case technique == "Cauchy": return encoding.Cauchy, nil case technique == "Vandermonde": return encoding.Cauchy, nil default: - return encoding.None, iodine.New(InvalidErasureTechnique{Technique: technique}, nil) + return encoding.None, probe.New(InvalidErasureTechnique{Technique: technique}) } } // newEncoder - instantiate a new encoder -func newEncoder(k, m uint8, technique string) (encoder, error) { - errParams := map[string]string{ - "k": strconv.FormatUint(uint64(k), 10), - "m": strconv.FormatUint(uint64(m), 10), - "technique": technique, - } +func newEncoder(k, m uint8, technique string) (encoder, *probe.Error) { e := encoder{} t, err := getErasureTechnique(technique) if err != nil { - return encoder{}, iodine.New(err, errParams) + return encoder{}, err.Trace() } - params, err := encoding.ValidateParams(k, m, t) - if err != nil { - return encoder{}, iodine.New(err, errParams) + { + params, err := encoding.ValidateParams(k, m, t) + if err != nil { + return encoder{}, probe.New(err) + } + e.encoder = encoding.NewErasure(params) + e.k = k + e.m = m + e.technique = t + return e, nil } - e.encoder = encoding.NewErasure(params) - e.k = k - e.m = m - e.technique = t - return e, nil } // TODO - think again if this is needed // GetEncodedBlockLen - wrapper around erasure function with the same name -func (e encoder) GetEncodedBlockLen(dataLength int) (int, error) { +func (e encoder) GetEncodedBlockLen(dataLength int) (int, *probe.Error) { if dataLength <= 0 { - return 0, iodine.New(InvalidArgument{}, nil) + return 0, probe.New(InvalidArgument{}) } return encoding.GetEncodedBlockLen(dataLength, e.k), nil } // Encode - erasure code input bytes -func (e encoder) Encode(data []byte) (encodedData [][]byte, err error) { +func (e encoder) Encode(data []byte) ([][]byte, *probe.Error) { if data == nil { - return nil, iodine.New(InvalidArgument{}, nil) + return nil, probe.New(InvalidArgument{}) } - encodedData, err = e.encoder.Encode(data) + encodedData, err := e.encoder.Encode(data) if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } return encodedData, nil } -func (e encoder) EncodeStream(data io.Reader, size int64) (encodedData [][]byte, inputData []byte, err error) { - encodedData, inputData, err = e.encoder.EncodeStream(data, size) +func (e encoder) EncodeStream(data io.Reader, size int64) ([][]byte, []byte, *probe.Error) { + encodedData, inputData, err := e.encoder.EncodeStream(data, size) if err != nil { - return nil, nil, iodine.New(err, nil) + return nil, nil, probe.New(err) } return encodedData, inputData, nil } // Decode - erasure decode input encoded bytes -func (e encoder) Decode(encodedData [][]byte, dataLength int) (data []byte, err error) { +func (e encoder) Decode(encodedData [][]byte, dataLength int) ([]byte, *probe.Error) { decodedData, err := e.encoder.Decode(encodedData, dataLength) if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } return decodedData, nil } diff --git a/pkg/donut/heal.go b/pkg/donut/heal.go index ed4dad776..c274440c7 100644 --- a/pkg/donut/heal.go +++ b/pkg/donut/heal.go @@ -22,23 +22,23 @@ import ( "path/filepath" "github.com/minio/minio/pkg/donut/disk" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" ) // healBuckets heal bucket slices -func (donut API) healBuckets() error { +func (donut API) healBuckets() *probe.Error { if err := donut.listDonutBuckets(); err != nil { - return iodine.New(err, nil) + return err.Trace() } bucketMetadata, err := donut.getDonutBucketMetadata() if err != nil { - return iodine.New(err, nil) + return err.Trace() } disks := make(map[int]disk.Disk) for _, node := range donut.nodes { nDisks, err := node.ListDisks() if err != nil { - return iodine.New(err, nil) + return err.Trace() } for k, v := range nDisks { disks[k] = v @@ -49,18 +49,18 @@ func (donut API) healBuckets() error { disk.MakeDir(donut.config.DonutName) bucketMetadataWriter, err := disk.CreateFile(filepath.Join(donut.config.DonutName, bucketMetadataConfig)) if err != nil { - return iodine.New(err, nil) + return err.Trace() } defer bucketMetadataWriter.Close() jenc := json.NewEncoder(bucketMetadataWriter) if err := jenc.Encode(bucketMetadata); err != nil { - return iodine.New(err, nil) + return probe.New(err) } for bucket := range bucketMetadata.Buckets { bucketSlice := fmt.Sprintf("%s$0$%d", bucket, order) // TODO handle node slices err := disk.MakeDir(filepath.Join(donut.config.DonutName, bucketSlice)) if err != nil { - return iodine.New(err, nil) + return err.Trace() } } } diff --git a/pkg/donut/interfaces.go b/pkg/donut/interfaces.go index 0f3536453..be68dc5ae 100644 --- a/pkg/donut/interfaces.go +++ b/pkg/donut/interfaces.go @@ -16,7 +16,11 @@ package donut -import "io" +import ( + "io" + + "github.com/minio/minio/pkg/probe" +) // Collection of Donut specification interfaces @@ -29,39 +33,39 @@ type Interface interface { // CloudStorage is a donut cloud storage interface type CloudStorage interface { // Storage service operations - GetBucketMetadata(bucket string, signature *Signature) (BucketMetadata, error) - SetBucketMetadata(bucket string, metadata map[string]string, signature *Signature) error - ListBuckets(signature *Signature) ([]BucketMetadata, error) - MakeBucket(bucket string, ACL string, location io.Reader, signature *Signature) error + GetBucketMetadata(bucket string, signature *Signature) (BucketMetadata, *probe.Error) + SetBucketMetadata(bucket string, metadata map[string]string, signature *Signature) *probe.Error + ListBuckets(signature *Signature) ([]BucketMetadata, *probe.Error) + MakeBucket(bucket string, ACL string, location io.Reader, signature *Signature) *probe.Error // Bucket operations - ListObjects(string, BucketResourcesMetadata, *Signature) ([]ObjectMetadata, BucketResourcesMetadata, error) + ListObjects(string, BucketResourcesMetadata, *Signature) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error) // Object operations - GetObject(w io.Writer, bucket, object string, start, length int64) (int64, error) - GetObjectMetadata(bucket, object string, signature *Signature) (ObjectMetadata, error) + GetObject(w io.Writer, bucket, object string, start, length int64) (int64, *probe.Error) + GetObjectMetadata(bucket, object string, signature *Signature) (ObjectMetadata, *probe.Error) // bucket, object, expectedMD5Sum, size, reader, metadata, signature - CreateObject(string, string, string, int64, io.Reader, map[string]string, *Signature) (ObjectMetadata, error) + CreateObject(string, string, string, int64, io.Reader, map[string]string, *Signature) (ObjectMetadata, *probe.Error) Multipart } // Multipart API type Multipart interface { - NewMultipartUpload(bucket, key, contentType string, signature *Signature) (string, error) - AbortMultipartUpload(bucket, key, uploadID string, signature *Signature) error - CreateObjectPart(string, string, string, int, string, string, int64, io.Reader, *Signature) (string, error) - CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, error) - ListMultipartUploads(string, BucketMultipartResourcesMetadata, *Signature) (BucketMultipartResourcesMetadata, error) - ListObjectParts(string, string, ObjectResourcesMetadata, *Signature) (ObjectResourcesMetadata, error) + NewMultipartUpload(bucket, key, contentType string, signature *Signature) (string, *probe.Error) + AbortMultipartUpload(bucket, key, uploadID string, signature *Signature) *probe.Error + CreateObjectPart(string, string, string, int, string, string, int64, io.Reader, *Signature) (string, *probe.Error) + CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) + ListMultipartUploads(string, BucketMultipartResourcesMetadata, *Signature) (BucketMultipartResourcesMetadata, *probe.Error) + ListObjectParts(string, string, ObjectResourcesMetadata, *Signature) (ObjectResourcesMetadata, *probe.Error) } // Management is a donut management system interface type Management interface { - Heal() error - Rebalance() error - Info() (map[string][]string, error) + Heal() *probe.Error + Rebalance() *probe.Error + Info() (map[string][]string, *probe.Error) - AttachNode(hostname string, disks []string) error - DetachNode(hostname string) error + AttachNode(hostname string, disks []string) *probe.Error + DetachNode(hostname string) *probe.Error } diff --git a/pkg/donut/management.go b/pkg/donut/management.go index 38b7ca023..7dafa1329 100644 --- a/pkg/donut/management.go +++ b/pkg/donut/management.go @@ -18,16 +18,16 @@ package donut import ( "github.com/minio/minio/pkg/donut/disk" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" ) // Info - return info about donut configuration -func (donut API) Info() (nodeDiskMap map[string][]string, err error) { +func (donut API) Info() (nodeDiskMap map[string][]string, err *probe.Error) { nodeDiskMap = make(map[string][]string) for nodeName, node := range donut.nodes { disks, err := node.ListDisks() if err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } diskList := make([]string, len(disks)) for diskOrder, disk := range disks { @@ -39,13 +39,13 @@ func (donut API) Info() (nodeDiskMap map[string][]string, err error) { } // AttachNode - attach node -func (donut API) AttachNode(hostname string, disks []string) error { +func (donut API) AttachNode(hostname string, disks []string) *probe.Error { if hostname == "" || len(disks) == 0 { - return iodine.New(InvalidArgument{}, nil) + return probe.New(InvalidArgument{}) } node, err := newNode(hostname) if err != nil { - return iodine.New(err, nil) + return err.Trace() } donut.nodes[hostname] = node for i, d := range disks { @@ -54,28 +54,28 @@ func (donut API) AttachNode(hostname string, disks []string) error { continue } if err := newDisk.MakeDir(donut.config.DonutName); err != nil { - return iodine.New(err, nil) + return err.Trace() } if err := node.AttachDisk(newDisk, i); err != nil { - return iodine.New(err, nil) + return err.Trace() } } return nil } // DetachNode - detach node -func (donut API) DetachNode(hostname string) error { +func (donut API) DetachNode(hostname string) *probe.Error { delete(donut.nodes, hostname) return nil } // Rebalance - rebalance an existing donut with new disks and nodes -func (donut API) Rebalance() error { - return iodine.New(APINotImplemented{API: "management.Rebalance"}, nil) +func (donut API) Rebalance() *probe.Error { + return probe.New(APINotImplemented{API: "management.Rebalance"}) } // Heal - heal your donuts -func (donut API) Heal() error { +func (donut API) Heal() *probe.Error { // TODO handle data heal return donut.healBuckets() } diff --git a/pkg/donut/multipart.go b/pkg/donut/multipart.go index 4e316427b..ca7fad101 100644 --- a/pkg/donut/multipart.go +++ b/pkg/donut/multipart.go @@ -34,41 +34,41 @@ import ( "github.com/minio/minio/pkg/crypto/sha256" "github.com/minio/minio/pkg/donut/cache/data" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" ) /// V2 API functions // NewMultipartUpload - initiate a new multipart session -func (donut API) NewMultipartUpload(bucket, key, contentType string, signature *Signature) (string, error) { +func (donut API) NewMultipartUpload(bucket, key, contentType string, signature *Signature) (string, *probe.Error) { donut.lock.Lock() defer donut.lock.Unlock() if !IsValidBucket(bucket) { - return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil) + return "", probe.New(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(key) { - return "", iodine.New(ObjectNameInvalid{Object: key}, nil) + return "", probe.New(ObjectNameInvalid{Object: key}) } if signature != nil { ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") if err != nil { - return "", iodine.New(err, nil) + return "", err.Trace() } if !ok { - return "", iodine.New(SignatureDoesNotMatch{}, nil) + return "", probe.New(SignatureDoesNotMatch{}) } } if len(donut.config.NodeDiskMap) > 0 { return donut.newMultipartUpload(bucket, key, contentType) } if !donut.storedBuckets.Exists(bucket) { - return "", iodine.New(BucketNotFound{Bucket: bucket}, nil) + return "", probe.New(BucketNotFound{Bucket: bucket}) } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) objectKey := bucket + "/" + key if _, ok := storedBucket.objectMetadata[objectKey]; ok == true { - return "", iodine.New(ObjectExists{Object: key}, nil) + return "", probe.New(ObjectExists{Object: key}) } id := []byte(strconv.Itoa(rand.Int()) + bucket + key + time.Now().UTC().String()) uploadIDSum := sha512.Sum512(id) @@ -88,57 +88,57 @@ func (donut API) NewMultipartUpload(bucket, key, contentType string, signature * } // AbortMultipartUpload - abort an incomplete multipart session -func (donut API) AbortMultipartUpload(bucket, key, uploadID string, signature *Signature) error { +func (donut API) AbortMultipartUpload(bucket, key, uploadID string, signature *Signature) *probe.Error { donut.lock.Lock() defer donut.lock.Unlock() if !IsValidBucket(bucket) { - return iodine.New(BucketNameInvalid{Bucket: bucket}, nil) + return probe.New(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(key) { - return iodine.New(ObjectNameInvalid{Object: key}, nil) + return probe.New(ObjectNameInvalid{Object: key}) } if signature != nil { ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") if err != nil { - return iodine.New(err, nil) + return err.Trace() } if !ok { - return iodine.New(SignatureDoesNotMatch{}, nil) + return probe.New(SignatureDoesNotMatch{}) } } if len(donut.config.NodeDiskMap) > 0 { return donut.abortMultipartUpload(bucket, key, uploadID) } if !donut.storedBuckets.Exists(bucket) { - return iodine.New(BucketNotFound{Bucket: bucket}, nil) + return probe.New(BucketNotFound{Bucket: bucket}) } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) if storedBucket.multiPartSession[key].UploadID != uploadID { - return iodine.New(InvalidUploadID{UploadID: uploadID}, nil) + return probe.New(InvalidUploadID{UploadID: uploadID}) } donut.cleanupMultipartSession(bucket, key, uploadID) return nil } // CreateObjectPart - create a part in a multipart session -func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (string, error) { +func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (string, *probe.Error) { donut.lock.Lock() etag, err := donut.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data, signature) donut.lock.Unlock() // possible free debug.FreeOSMemory() - return etag, iodine.New(err, nil) + return etag, err.Trace() } // createObject - internal wrapper function called by CreateObjectPart -func (donut API) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (string, error) { +func (donut API) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (string, *probe.Error) { if !IsValidBucket(bucket) { - return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil) + return "", probe.New(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(key) { - return "", iodine.New(ObjectNameInvalid{Object: key}, nil) + return "", probe.New(ObjectNameInvalid{Object: key}) } if len(donut.config.NodeDiskMap) > 0 { metadata := make(map[string]string) @@ -151,24 +151,24 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { // pro-actively close the connection - return "", iodine.New(InvalidDigest{Md5: expectedMD5Sum}, nil) + return "", probe.New(InvalidDigest{Md5: expectedMD5Sum}) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } partMetadata, err := donut.putObjectPart(bucket, key, expectedMD5Sum, uploadID, partID, data, size, metadata, signature) if err != nil { - return "", iodine.New(err, nil) + return "", err.Trace() } return partMetadata.ETag, nil } if !donut.storedBuckets.Exists(bucket) { - return "", iodine.New(BucketNotFound{Bucket: bucket}, nil) + return "", probe.New(BucketNotFound{Bucket: bucket}) } strBucket := donut.storedBuckets.Get(bucket).(storedBucket) // Verify upload id if strBucket.multiPartSession[key].UploadID != uploadID { - return "", iodine.New(InvalidUploadID{UploadID: uploadID}, nil) + return "", probe.New(InvalidUploadID{UploadID: uploadID}) } // get object key @@ -185,7 +185,7 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { // pro-actively close the connection - return "", iodine.New(InvalidDigest{Md5: expectedMD5Sum}, nil) + return "", probe.New(InvalidDigest{Md5: expectedMD5Sum}) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } @@ -194,27 +194,27 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont hash := md5.New() sha256hash := sha256.New() - var err error var totalLength int64 + var err error for err == nil { var length int byteBuffer := make([]byte, 1024*1024) - length, err = data.Read(byteBuffer) + length, err = data.Read(byteBuffer) // do not read error return error here, we will handle this error later hash.Write(byteBuffer[0:length]) sha256hash.Write(byteBuffer[0:length]) ok := donut.multiPartObjects[uploadID].Append(partID, byteBuffer[0:length]) if !ok { - return "", iodine.New(InternalError{}, nil) + return "", probe.New(InternalError{}) } totalLength += int64(length) go debug.FreeOSMemory() } if totalLength != size { donut.multiPartObjects[uploadID].Delete(partID) - return "", iodine.New(IncompleteBody{Bucket: bucket, Object: key}, nil) + return "", probe.New(IncompleteBody{Bucket: bucket, Object: key}) } if err != io.EOF { - return "", iodine.New(err, nil) + return "", probe.New(err) } md5SumBytes := hash.Sum(nil) @@ -222,17 +222,19 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont // Verify if the written object is equal to what is expected, only if it is requested as such if strings.TrimSpace(expectedMD5Sum) != "" { if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil { - return "", iodine.New(BadDigest{}, nil) + return "", err.Trace() } } if signature != nil { - ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256hash.Sum(nil))) - if err != nil { - return "", iodine.New(err, nil) - } - if !ok { - return "", iodine.New(SignatureDoesNotMatch{}, nil) + { + ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256hash.Sum(nil))) + if err != nil { + return "", err.Trace() + } + if !ok { + return "", probe.New(SignatureDoesNotMatch{}) + } } } @@ -264,16 +266,16 @@ func (donut API) cleanupMultipartSession(bucket, key, uploadID string) { } // CompleteMultipartUpload - complete a multipart upload and persist the data -func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, error) { +func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) { donut.lock.Lock() if !IsValidBucket(bucket) { donut.lock.Unlock() - return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) + return ObjectMetadata{}, probe.New(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(key) { donut.lock.Unlock() - return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil) + return ObjectMetadata{}, probe.New(ObjectNameInvalid{Object: key}) } if len(donut.config.NodeDiskMap) > 0 { donut.lock.Unlock() @@ -282,38 +284,38 @@ func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.R if !donut.storedBuckets.Exists(bucket) { donut.lock.Unlock() - return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) + return ObjectMetadata{}, probe.New(BucketNotFound{Bucket: bucket}) } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) // Verify upload id if storedBucket.multiPartSession[key].UploadID != uploadID { donut.lock.Unlock() - return ObjectMetadata{}, iodine.New(InvalidUploadID{UploadID: uploadID}, nil) + return ObjectMetadata{}, probe.New(InvalidUploadID{UploadID: uploadID}) } partBytes, err := ioutil.ReadAll(data) if err != nil { donut.lock.Unlock() - return ObjectMetadata{}, iodine.New(err, nil) + return ObjectMetadata{}, probe.New(err) } if signature != nil { ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256.Sum256(partBytes)[:])) if err != nil { donut.lock.Unlock() - return ObjectMetadata{}, iodine.New(err, nil) + return ObjectMetadata{}, err.Trace() } if !ok { donut.lock.Unlock() - return ObjectMetadata{}, iodine.New(SignatureDoesNotMatch{}, nil) + return ObjectMetadata{}, probe.New(SignatureDoesNotMatch{}) } } parts := &CompleteMultipartUpload{} if err := xml.Unmarshal(partBytes, parts); err != nil { donut.lock.Unlock() - return ObjectMetadata{}, iodine.New(MalformedXML{}, nil) + return ObjectMetadata{}, probe.New(MalformedXML{}) } if !sort.IsSorted(completedParts(parts.Part)) { donut.lock.Unlock() - return ObjectMetadata{}, iodine.New(InvalidPartOrder{}, nil) + return ObjectMetadata{}, probe.New(InvalidPartOrder{}) } var size int64 @@ -323,7 +325,7 @@ func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.R object, ok := donut.multiPartObjects[uploadID].Get(parts.Part[i].PartNumber) if ok == false { donut.lock.Unlock() - return ObjectMetadata{}, iodine.New(InvalidPart{}, nil) + return ObjectMetadata{}, probe.New(InvalidPart{}) } size += int64(len(object)) calcMD5Bytes := md5.Sum(object) @@ -331,16 +333,15 @@ func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.R recvMD5Bytes, err := hex.DecodeString(strings.Trim(recvMD5, "\"")) if err != nil { donut.lock.Unlock() - return ObjectMetadata{}, iodine.New(InvalidDigest{Md5: recvMD5}, nil) + return ObjectMetadata{}, probe.New(InvalidDigest{Md5: recvMD5}) } if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) { donut.lock.Unlock() - return ObjectMetadata{}, iodine.New(BadDigest{}, nil) + return ObjectMetadata{}, probe.New(BadDigest{}) } - _, err = io.Copy(&fullObject, bytes.NewBuffer(object)) - if err != nil { + if _, err := io.Copy(&fullObject, bytes.NewBuffer(object)); err != nil { donut.lock.Unlock() - return ObjectMetadata{}, iodine.New(err, nil) + return ObjectMetadata{}, probe.New(err) } object = nil go debug.FreeOSMemory() @@ -350,18 +351,20 @@ func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.R // this is needed for final verification inside CreateObject, do not convert this to hex md5sum := base64.StdEncoding.EncodeToString(md5sumSlice[:]) donut.lock.Unlock() - objectMetadata, err := donut.CreateObject(bucket, key, md5sum, size, &fullObject, nil, nil) - if err != nil { - // No need to call internal cleanup functions here, caller will call AbortMultipartUpload() - // which would in-turn cleanup properly in accordance with S3 Spec - return ObjectMetadata{}, iodine.New(err, nil) - } - fullObject.Reset() + { + objectMetadata, err := donut.CreateObject(bucket, key, md5sum, size, &fullObject, nil, nil) + if err != nil { + // No need to call internal cleanup functions here, caller should call AbortMultipartUpload() + // which would in-turn cleanup properly in accordance with S3 Spec + return ObjectMetadata{}, err.Trace() + } + fullObject.Reset() - donut.lock.Lock() - donut.cleanupMultipartSession(bucket, key, uploadID) - donut.lock.Unlock() - return objectMetadata, nil + donut.lock.Lock() + donut.cleanupMultipartSession(bucket, key, uploadID) + donut.lock.Unlock() + return objectMetadata, nil + } } // byKey is a sortable interface for UploadMetadata slice @@ -372,7 +375,7 @@ func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byKey) Less(i, j int) bool { return a[i].Key < a[j].Key } // ListMultipartUploads - list incomplete multipart sessions for a given bucket -func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata, signature *Signature) (BucketMultipartResourcesMetadata, error) { +func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata, signature *Signature) (BucketMultipartResourcesMetadata, *probe.Error) { // TODO handle delimiter donut.lock.Lock() defer donut.lock.Unlock() @@ -380,15 +383,15 @@ func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartRe if signature != nil { ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") if err != nil { - return BucketMultipartResourcesMetadata{}, iodine.New(err, nil) + return BucketMultipartResourcesMetadata{}, err.Trace() } if !ok { - return BucketMultipartResourcesMetadata{}, iodine.New(SignatureDoesNotMatch{}, nil) + return BucketMultipartResourcesMetadata{}, probe.New(SignatureDoesNotMatch{}) } } if !IsValidBucket(bucket) { - return BucketMultipartResourcesMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) + return BucketMultipartResourcesMetadata{}, probe.New(BucketNameInvalid{Bucket: bucket}) } if len(donut.config.NodeDiskMap) > 0 { @@ -396,7 +399,7 @@ func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartRe } if !donut.storedBuckets.Exists(bucket) { - return BucketMultipartResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) + return BucketMultipartResourcesMetadata{}, probe.New(BucketNotFound{Bucket: bucket}) } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) @@ -454,7 +457,7 @@ func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } // ListObjectParts - list parts from incomplete multipart session for a given object -func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata, signature *Signature) (ObjectResourcesMetadata, error) { +func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata, signature *Signature) (ObjectResourcesMetadata, *probe.Error) { // Verify upload id donut.lock.Lock() defer donut.lock.Unlock() @@ -462,18 +465,18 @@ func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMe if signature != nil { ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") if err != nil { - return ObjectResourcesMetadata{}, iodine.New(err, nil) + return ObjectResourcesMetadata{}, err.Trace() } if !ok { - return ObjectResourcesMetadata{}, iodine.New(SignatureDoesNotMatch{}, nil) + return ObjectResourcesMetadata{}, probe.New(SignatureDoesNotMatch{}) } } if !IsValidBucket(bucket) { - return ObjectResourcesMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) + return ObjectResourcesMetadata{}, probe.New(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(key) { - return ObjectResourcesMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil) + return ObjectResourcesMetadata{}, probe.New(ObjectNameInvalid{Object: key}) } if len(donut.config.NodeDiskMap) > 0 { @@ -481,14 +484,14 @@ func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMe } if !donut.storedBuckets.Exists(bucket) { - return ObjectResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) + return ObjectResourcesMetadata{}, probe.New(BucketNotFound{Bucket: bucket}) } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) if _, ok := storedBucket.multiPartSession[key]; ok == false { - return ObjectResourcesMetadata{}, iodine.New(ObjectNotFound{Object: key}, nil) + return ObjectResourcesMetadata{}, probe.New(ObjectNotFound{Object: key}) } if storedBucket.multiPartSession[key].UploadID != resources.UploadID { - return ObjectResourcesMetadata{}, iodine.New(InvalidUploadID{UploadID: resources.UploadID}, nil) + return ObjectResourcesMetadata{}, probe.New(InvalidUploadID{UploadID: resources.UploadID}) } storedParts := storedBucket.partMetadata[key] objectResourcesMetadata := resources @@ -512,7 +515,7 @@ func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMe } part, ok := storedParts[i] if !ok { - return ObjectResourcesMetadata{}, iodine.New(InvalidPart{}, nil) + return ObjectResourcesMetadata{}, probe.New(InvalidPart{}) } parts = append(parts, &part) } diff --git a/pkg/donut/node.go b/pkg/donut/node.go index 1f02431f2..ba3be8d76 100644 --- a/pkg/donut/node.go +++ b/pkg/donut/node.go @@ -18,7 +18,7 @@ package donut import ( "github.com/minio/minio/pkg/donut/disk" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" ) // node struct internal @@ -28,9 +28,9 @@ type node struct { } // newNode - instantiates a new node -func newNode(hostname string) (node, error) { +func newNode(hostname string) (node, *probe.Error) { if hostname == "" { - return node{}, iodine.New(InvalidArgument{}, nil) + return node{}, probe.New(InvalidArgument{}) } disks := make(map[int]disk.Disk) n := node{ @@ -46,31 +46,31 @@ func (n node) GetHostname() string { } // ListDisks - return number of disks -func (n node) ListDisks() (map[int]disk.Disk, error) { +func (n node) ListDisks() (map[int]disk.Disk, *probe.Error) { return n.disks, nil } // AttachDisk - attach a disk -func (n node) AttachDisk(disk disk.Disk, diskOrder int) error { +func (n node) AttachDisk(disk disk.Disk, diskOrder int) *probe.Error { if diskOrder < 0 { - return iodine.New(InvalidArgument{}, nil) + return probe.New(InvalidArgument{}) } n.disks[diskOrder] = disk return nil } // DetachDisk - detach a disk -func (n node) DetachDisk(diskOrder int) error { +func (n node) DetachDisk(diskOrder int) *probe.Error { delete(n.disks, diskOrder) return nil } // SaveConfig - save node configuration -func (n node) SaveConfig() error { - return iodine.New(NotImplemented{Function: "SaveConfig"}, nil) +func (n node) SaveConfig() *probe.Error { + return probe.New(NotImplemented{Function: "SaveConfig"}) } // LoadConfig - load node configuration from saved configs -func (n node) LoadConfig() error { - return iodine.New(NotImplemented{Function: "LoadConfig"}, nil) +func (n node) LoadConfig() *probe.Error { + return probe.New(NotImplemented{Function: "LoadConfig"}) } diff --git a/pkg/donut/signature-v4.go b/pkg/donut/signature-v4.go index b344ff798..7d5a1b4e0 100644 --- a/pkg/donut/signature-v4.go +++ b/pkg/donut/signature-v4.go @@ -28,7 +28,7 @@ import ( "unicode/utf8" "github.com/minio/minio/pkg/crypto/sha256" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" ) // Signature - local variables @@ -59,7 +59,7 @@ func sumHMAC(key []byte, data []byte) []byte { // // This function on the other hand is a direct replacement for url.Encode() technique to support // pretty much every UTF-8 character. -func urlEncodeName(name string) (string, error) { +func urlEncodeName(name string) (string, *probe.Error) { // if object matches reserved string, no need to encode them reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") if reservedNames.MatchString(name) { @@ -78,7 +78,7 @@ func urlEncodeName(name string) (string, error) { default: len := utf8.RuneLen(s) if len < 0 { - return "", iodine.New(InvalidArgument{}, nil) + return "", probe.New(InvalidArgument{}) } u := make([]byte, len) utf8.EncodeRune(u, s) @@ -212,7 +212,7 @@ func (r *Signature) getSignature(signingKey []byte, stringToSign string) string // DoesSignatureMatch - Verify authorization header with calculated header in accordance with - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html // returns true if matches, false other wise if error is not nil then it is always false -func (r *Signature) DoesSignatureMatch(hashedPayload string) (bool, error) { +func (r *Signature) DoesSignatureMatch(hashedPayload string) (bool, *probe.Error) { // set new calulated payload r.Request.Header.Set("X-Amz-Content-Sha256", hashedPayload) @@ -220,12 +220,12 @@ func (r *Signature) DoesSignatureMatch(hashedPayload string) (bool, error) { var date string if date = r.Request.Header.Get(http.CanonicalHeaderKey("x-amz-date")); date == "" { if date = r.Request.Header.Get("Date"); date == "" { - return false, iodine.New(MissingDateHeader{}, nil) + return false, probe.New(MissingDateHeader{}) } } t, err := time.Parse(iso8601Format, date) if err != nil { - return false, iodine.New(err, nil) + return false, probe.New(err) } canonicalRequest := r.getCanonicalRequest() stringToSign := r.getStringToSign(canonicalRequest, t) diff --git a/pkg/iodine/iodine.go b/pkg/iodine/iodine.go deleted file mode 100644 index 4f78f2eae..000000000 --- a/pkg/iodine/iodine.go +++ /dev/null @@ -1,232 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iodine - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "os" - "path/filepath" - "reflect" - "runtime" - "strconv" - "strings" - "sync" - - "github.com/dustin/go-humanize" -) - -// Error is the iodine error which contains a pointer to the original error -// and stack traces. -type Error struct { - EmbeddedError error `json:"-"` - ErrorMessage string - ErrorType string - - Stack []StackEntry -} - -// StackEntry contains the entry in the stack trace -type StackEntry struct { - Host string - File string - Func string - Line int - Data map[string]string -} - -var gopathSource string - -var globalState = struct { - sync.RWMutex - m map[string]string -}{m: make(map[string]string)} - -// SetGlobalState - set global state -func SetGlobalState(key, value string) { - globalState.Lock() - globalState.m[key] = value - globalState.Unlock() -} - -// ClearGlobalState - clear info in globalState struct -func ClearGlobalState() { - globalState.Lock() - for k := range globalState.m { - delete(globalState.m, k) - } - globalState.Unlock() -} - -// GetGlobalState - get map from globalState struct -func GetGlobalState() map[string]string { - result := make(map[string]string) - globalState.RLock() - for k, v := range globalState.m { - result[k] = v - } - globalState.RUnlock() - return result -} - -// GetGlobalStateKey - get value for key from globalState struct -func GetGlobalStateKey(k string) string { - globalState.RLock() - result, _ := globalState.m[k] - globalState.RUnlock() - return result -} - -// ToError returns the input if it is not an iodine error. It returns the embedded error if it is an iodine error. If nil, returns nil. -func ToError(err error) error { - switch err := err.(type) { - case nil: - { - return nil - } - case Error: - { - if err.EmbeddedError != nil { - return err.EmbeddedError - } - return errors.New(err.ErrorMessage) - } - default: - { - return err - } - } -} - -// New - instantiate an error, turning it into an iodine error. -// Adds an initial stack trace. -func New(err error, data map[string]string) error { - if err != nil { - entry := createStackEntry() - var newErr Error - - // check if error is wrapped - switch typedError := err.(type) { - case Error: - { - newErr = typedError - } - default: - { - newErr = Error{ - EmbeddedError: err, - ErrorMessage: err.Error(), - ErrorType: reflect.TypeOf(err).String(), - Stack: []StackEntry{}, - } - } - } - for k, v := range data { - entry.Data[k] = v - } - newErr.Stack = append(newErr.Stack, entry) - return newErr - } - return nil -} - -// createStackEntry - create stack entries -func createStackEntry() StackEntry { - host, _ := os.Hostname() - pc, file, line, _ := runtime.Caller(2) - function := runtime.FuncForPC(pc).Name() - _, function = filepath.Split(function) - file = strings.TrimPrefix(file, gopathSource) // trim gopathSource from file - - data := GetGlobalState() - for k, v := range getSystemData() { - data[k] = v - } - - entry := StackEntry{ - Host: host, - File: file, - Func: function, - Line: line, - Data: data, - } - return entry -} - -func getSystemData() map[string]string { - host, err := os.Hostname() - if err != nil { - host = "" - } - memstats := &runtime.MemStats{} - runtime.ReadMemStats(memstats) - return map[string]string{ - "sys.host": host, - "sys.os": runtime.GOOS, - "sys.arch": runtime.GOARCH, - "sys.go": runtime.Version(), - "sys.cpus": strconv.Itoa(runtime.NumCPU()), - "sys.mem.used": humanize.Bytes(memstats.Alloc), - "sys.mem.allocated": humanize.Bytes(memstats.TotalAlloc), - "sys.mem.heap.used": humanize.Bytes(memstats.HeapAlloc), - "sys.mem.heap.allocated": humanize.Bytes(memstats.HeapSys), - } -} - -// Annotate an error with a stack entry and returns itself -// -// func (err *WrappedError) Annotate(info map[string]string) *WrappedError { -// entry := createStackEntry() -// for k, v := range info { -// entry.Data[k] = v -// } -// err.Stack = append(err.Stack, entry) -// return err -// } - -// EmitJSON writes JSON output for the error -func (err Error) EmitJSON() ([]byte, error) { - return json.MarshalIndent(err, "", "\t") -} - -// EmitHumanReadable returns a human readable error message -func (err Error) EmitHumanReadable() string { - var errorBuffer bytes.Buffer - fmt.Fprintln(&errorBuffer, err.ErrorMessage) - for i, entry := range err.Stack { - prettyData, _ := json.Marshal(entry.Data) - fmt.Fprintln(&errorBuffer, "-", i, entry.Host+":"+entry.File+":"+strconv.Itoa(entry.Line)+" "+entry.Func+"():", string(prettyData)) - } - return string(errorBuffer.Bytes()) -} - -// Emits the original error message -func (err Error) Error() string { - return err.EmitHumanReadable() -} - -func init() { - _, iodineFile, _, _ := runtime.Caller(0) - iodineFile = filepath.Dir(iodineFile) // trim iodine.go - iodineFile = filepath.Dir(iodineFile) // trim iodine - iodineFile = filepath.Dir(iodineFile) // trim pkg - iodineFile = filepath.Dir(iodineFile) // trim minio - iodineFile = filepath.Dir(iodineFile) // trim minio - gopathSource = filepath.Dir(iodineFile) + "/" // trim github.com -} diff --git a/pkg/iodine/iodine_test.go b/pkg/iodine/iodine_test.go deleted file mode 100644 index c7fa0a132..000000000 --- a/pkg/iodine/iodine_test.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Iodine, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iodine - -import ( - "errors" - "fmt" - "testing" - - "encoding/json" - "os" -) - -func TestIodine(t *testing.T) { - iodineError := New(errors.New("Hello"), nil) - iodineError = New(iodineError, nil) - iodineError = New(iodineError, nil) - iodineError = New(iodineError, nil) - - switch typedError := iodineError.(type) { - case Error: - { - // Visually watch out for formating errors - fmt.Println(typedError.EmitHumanReadable()) - - if len(typedError.Stack) != 4 { - t.Fail() - } - _, err := json.MarshalIndent(typedError, "", " ") - if err != nil { - t.Fail() - } - } - default: - { - t.Fail() - } - } -} - -func TestState(t *testing.T) { - SetGlobalState("hello", "world") - result := GetGlobalStateKey("hello") - if result != "world" { - t.Error("global state not set: hello->world") - t.Fail() - } - ClearGlobalState() - if len(GetGlobalState()) != 0 { - t.Fail() - } - SetGlobalState("foo", "bar") - err := New(errors.New("a simple error"), nil) - switch typedError := err.(type) { - case Error: - { - if res, ok := typedError.Stack[0].Data["foo"]; ok { - if res != "bar" { - t.Error("global state not set: foo->bar") - } - } else { - t.Fail() - } - typedError = New(typedError, map[string]string{"foo2": "bar2"}).(Error) - if res, ok := typedError.Stack[0].Data["foo"]; ok { - if res != "bar" { - t.Error("annotate should not modify previous data entries") - } - } else { - t.Error("annotate should not remove previous data entries") - } - if res, ok := typedError.Stack[1].Data["foo"]; ok { - if res != "bar" { - t.Error("global state should set value properly in annotate") - } - } else { - t.Error("global state should set key properly in annotate") - } - if res, ok := typedError.Stack[1].Data["foo2"]; ok { - if res != "bar2" { - // typedError = Error(typedError, nil).(WrappedError) - t.Error("foo2 -> bar should be set") - } - } else { - // typedError = Error(typedError, nil).(WrappedError) - t.Error("foo2 should be set") - } - } - } -} - -func TestToError(t *testing.T) { - _, err := os.Stat("hello") - ierr := New(err, nil) - if ToError(ierr) != err { - t.Error("Error is not the same") - } - ierr = New(ierr, nil) - if ToError(ierr) != err { - t.Error("Stacked Error is not the same") - } -} diff --git a/pkg/probe/probe.go b/pkg/probe/probe.go index 078d23913..957c3384a 100644 --- a/pkg/probe/probe.go +++ b/pkg/probe/probe.go @@ -71,6 +71,9 @@ type Error struct { // trace the return path with Probe.Trace and finally handle reporting or quitting // at the top level. func New(e error) *Error { + if e == nil { + return nil + } Err := Error{sync.RWMutex{}, e, GetSysInfo(), []tracePoint{}} return Err.trace() } diff --git a/pkg/quick/quick.go b/pkg/quick/quick.go index ad77f255e..f10220a9f 100644 --- a/pkg/quick/quick.go +++ b/pkg/quick/quick.go @@ -20,7 +20,6 @@ package quick import ( "encoding/json" - "errors" "fmt" "io/ioutil" "os" @@ -30,19 +29,18 @@ import ( "sync" "github.com/fatih/structs" - "github.com/minio/minio/pkg/iodine" - "github.com/minio/minio/pkg/utils/atomic" + "github.com/minio/minio/pkg/probe" ) // Config - generic config interface functions type Config interface { String() string Version() string - Save(string) error - Load(string) error + Save(string) *probe.Error + Load(string) *probe.Error Data() interface{} - Diff(Config) ([]structs.Field, error) - DeepDiff(Config) ([]structs.Field, error) + Diff(Config) ([]structs.Field, *probe.Error) + DeepDiff(Config) ([]structs.Field, *probe.Error) } // config - implements quick.Config interface @@ -52,29 +50,29 @@ type config struct { } // CheckData - checks the validity of config data. Data sould be of type struct and contain a string type field called "Version" -func CheckData(data interface{}) error { +func CheckData(data interface{}) *probe.Error { if !structs.IsStruct(data) { - return iodine.New(errors.New("Invalid argument type. Expecing \"struct\" type."), nil) + return probe.New(fmt.Errorf("Invalid argument type. Expecing \"struct\" type.")) } st := structs.New(data) f, ok := st.FieldOk("Version") if !ok { - return iodine.New(fmt.Errorf("Invalid type of struct argument. No [%s.Version] field found.", st.Name()), nil) + return probe.New(fmt.Errorf("Invalid type of struct argument. No [%s.Version] field found.", st.Name())) } if f.Kind() != reflect.String { - return iodine.New(fmt.Errorf("Invalid type of struct argument. Expecting \"string\" type [%s.Version] field.", st.Name()), nil) + return probe.New(fmt.Errorf("Invalid type of struct argument. Expecting \"string\" type [%s.Version] field.", st.Name())) } return nil } // New - instantiate a new config -func New(data interface{}) (Config, error) { +func New(data interface{}) (Config, *probe.Error) { err := CheckData(data) if err != nil { - return nil, err + return nil, err.Trace() } d := new(config) @@ -107,47 +105,40 @@ func (d config) String() string { } // Save writes config data in JSON format to a file. -func (d config) Save(filename string) (err error) { +func (d config) Save(filename string) *probe.Error { d.lock.Lock() defer d.lock.Unlock() jsonData, err := json.MarshalIndent(d.data, "", "\t") if err != nil { - return iodine.New(err, nil) - } - - file, err := atomic.FileCreate(filename) - if err != nil { - return iodine.New(err, nil) + return probe.New(err) } if runtime.GOOS == "windows" { jsonData = []byte(strings.Replace(string(jsonData), "\n", "\r\n", -1)) } - _, err = file.Write(jsonData) + + err = ioutil.WriteFile(filename, jsonData, 0600) if err != nil { - return iodine.New(err, nil) + return probe.New(err) } - if err := file.Close(); err != nil { - return iodine.New(err, nil) - } return nil } // Load - loads JSON config from file and merge with currently set values -func (d *config) Load(filename string) (err error) { +func (d *config) Load(filename string) *probe.Error { (*d).lock.Lock() defer (*d).lock.Unlock() - _, err = os.Stat(filename) + _, err := os.Stat(filename) if err != nil { - return iodine.New(err, nil) + return probe.New(err) } fileData, err := ioutil.ReadFile(filename) if err != nil { - return iodine.New(err, nil) + return probe.New(err) } if runtime.GOOS == "windows" { @@ -156,22 +147,21 @@ func (d *config) Load(filename string) (err error) { err = json.Unmarshal(fileData, (*d).data) if err != nil { - return iodine.New(err, nil) + return probe.New(err) } - err = CheckData(*(*d).data) - if err != nil { - return iodine.New(err, nil) + if err := CheckData(*(*d).data); err != nil { + return err.Trace() } st := structs.New(*(*d).data) f, ok := st.FieldOk("Version") if !ok { - return iodine.New(fmt.Errorf("Argument struct [%s] does not contain field \"Version\".", st.Name()), nil) + return probe.New(fmt.Errorf("Argument struct [%s] does not contain field \"Version\".", st.Name())) } if (*d).Version() != f.Value() { - return iodine.New(errors.New("Version mismatch"), nil) + return probe.New(fmt.Errorf("Version mismatch")) } return nil @@ -183,10 +173,11 @@ func (d config) Data() interface{} { } //Diff - list fields that are in A but not in B -func (d config) Diff(c Config) (fields []structs.Field, err error) { - err = CheckData(c.Data()) +func (d config) Diff(c Config) ([]structs.Field, *probe.Error) { + var fields []structs.Field + err := CheckData(c.Data()) if err != nil { - return []structs.Field{}, iodine.New(err, nil) + return []structs.Field{}, err.Trace() } currFields := structs.Fields(d.Data()) @@ -208,10 +199,11 @@ func (d config) Diff(c Config) (fields []structs.Field, err error) { } //DeepDiff - list fields in A that are missing or not equal to fields in B -func (d config) DeepDiff(c Config) (fields []structs.Field, err error) { - err = CheckData(c.Data()) +func (d config) DeepDiff(c Config) ([]structs.Field, *probe.Error) { + var fields []structs.Field + err := CheckData(c.Data()) if err != nil { - return []structs.Field{}, iodine.New(err, nil) + return []structs.Field{}, err.Trace() } currFields := structs.Fields(d.Data()) diff --git a/pkg/server/api/bucket-handlers.go b/pkg/server/api/bucket-handlers.go index 51866a2c7..ea900c100 100644 --- a/pkg/server/api/bucket-handlers.go +++ b/pkg/server/api/bucket-handlers.go @@ -21,7 +21,7 @@ import ( "github.com/gorilla/mux" "github.com/minio/minio/pkg/donut" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" "github.com/minio/minio/pkg/utils/log" ) @@ -30,18 +30,7 @@ func (api Minio) isValidOp(w http.ResponseWriter, req *http.Request, acceptsCont bucket := vars["bucket"] bucketMetadata, err := api.Donut.GetBucketMetadata(bucket, nil) - switch iodine.ToError(err).(type) { - case donut.BucketNotFound: - { - writeErrorResponse(w, req, NoSuchBucket, acceptsContentType, req.URL.Path) - return false - } - case donut.BucketNameInvalid: - { - writeErrorResponse(w, req, InvalidBucketName, acceptsContentType, req.URL.Path) - return false - } - case nil: + if err == nil { if _, err := StripAccessKeyID(req.Header.Get("Authorization")); err != nil { if bucketMetadata.ACL.IsPrivate() { return true @@ -56,13 +45,20 @@ func (api Minio) isValidOp(w http.ResponseWriter, req *http.Request, acceptsCont //return false } } - default: - { - log.Error.Println(iodine.New(err, nil)) - writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) - } + return true + } + switch err.ToError().(type) { + case donut.BucketNotFound: + writeErrorResponse(w, req, NoSuchBucket, acceptsContentType, req.URL.Path) + return false + case donut.BucketNameInvalid: + writeErrorResponse(w, req, InvalidBucketName, acceptsContentType, req.URL.Path) + return false + default: + log.Error.Println(err.Trace()) + writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) + return false } - return true } // ListMultipartUploadsHandler - GET Bucket (List Multipart uploads) @@ -102,7 +98,7 @@ func (api Minio) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Re var signature *donut.Signature if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification - var err error + var err *probe.Error signature, err = InitSignatureV4(req) if err != nil { writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) @@ -111,23 +107,23 @@ func (api Minio) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Re } resources, err := api.Donut.ListMultipartUploads(bucket, resources, signature) - switch iodine.ToError(err).(type) { - case nil: // success - { - // generate response - response := generateListMultipartUploadsResponse(bucket, resources) - encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) - // write headers - setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) - // write body - w.Write(encodedSuccessResponse) - } + if err == nil { + // generate response + response := generateListMultipartUploadsResponse(bucket, resources) + encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) + // write headers + setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) + // write body + w.Write(encodedSuccessResponse) + return + } + switch err.ToError().(type) { case donut.SignatureDoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path) case donut.BucketNotFound: writeErrorResponse(w, req, NoSuchBucket, acceptsContentType, req.URL.Path) default: - log.Error.Println(iodine.New(err, nil)) + log.Error.Println(err.Trace()) writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) } } @@ -173,7 +169,7 @@ func (api Minio) ListObjectsHandler(w http.ResponseWriter, req *http.Request) { var signature *donut.Signature if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification - var err error + var err *probe.Error signature, err = InitSignatureV4(req) if err != nil { writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) @@ -182,8 +178,7 @@ func (api Minio) ListObjectsHandler(w http.ResponseWriter, req *http.Request) { } objects, resources, err := api.Donut.ListObjects(bucket, resources, signature) - switch iodine.ToError(err).(type) { - case nil: + if err == nil { // generate response response := generateListObjectsResponse(bucket, objects, resources) encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) @@ -191,6 +186,9 @@ func (api Minio) ListObjectsHandler(w http.ResponseWriter, req *http.Request) { setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) // write body w.Write(encodedSuccessResponse) + return + } + switch err.ToError().(type) { case donut.SignatureDoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path) case donut.BucketNameInvalid: @@ -202,7 +200,7 @@ func (api Minio) ListObjectsHandler(w http.ResponseWriter, req *http.Request) { case donut.ObjectNameInvalid: writeErrorResponse(w, req, NoSuchKey, acceptsContentType, req.URL.Path) default: - log.Error.Println(iodine.New(err, nil)) + log.Error.Println(err.Trace()) writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) } } @@ -232,7 +230,7 @@ func (api Minio) ListBucketsHandler(w http.ResponseWriter, req *http.Request) { var signature *donut.Signature if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification - var err error + var err *probe.Error signature, err = InitSignatureV4(req) if err != nil { writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) @@ -241,8 +239,7 @@ func (api Minio) ListBucketsHandler(w http.ResponseWriter, req *http.Request) { } buckets, err := api.Donut.ListBuckets(signature) - switch iodine.ToError(err).(type) { - case nil: + if err == nil { // generate response response := generateListBucketsResponse(buckets) encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) @@ -250,10 +247,13 @@ func (api Minio) ListBucketsHandler(w http.ResponseWriter, req *http.Request) { setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) // write response w.Write(encodedSuccessResponse) + return + } + switch err.ToError().(type) { case donut.SignatureDoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path) default: - log.Error.Println(iodine.New(err, nil)) + log.Error.Println(err.Trace()) writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) } } @@ -296,7 +296,7 @@ func (api Minio) PutBucketHandler(w http.ResponseWriter, req *http.Request) { var signature *donut.Signature if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification - var err error + var err *probe.Error signature, err = InitSignatureV4(req) if err != nil { writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) @@ -315,11 +315,13 @@ func (api Minio) PutBucketHandler(w http.ResponseWriter, req *http.Request) { } err := api.Donut.MakeBucket(bucket, getACLTypeString(aclType), req.Body, signature) - switch iodine.ToError(err).(type) { - case nil: + if err == nil { // Make sure to add Location information here only for bucket w.Header().Set("Location", "/"+bucket) writeSuccessResponse(w, acceptsContentType) + return + } + switch err.ToError().(type) { case donut.SignatureDoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path) case donut.TooManyBuckets: @@ -329,7 +331,7 @@ func (api Minio) PutBucketHandler(w http.ResponseWriter, req *http.Request) { case donut.BucketExists: writeErrorResponse(w, req, BucketAlreadyExists, acceptsContentType, req.URL.Path) default: - log.Error.Println(iodine.New(err, nil)) + log.Error.Println(err.Trace()) writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) } } @@ -362,7 +364,7 @@ func (api Minio) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) { var signature *donut.Signature if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification - var err error + var err *probe.Error signature, err = InitSignatureV4(req) if err != nil { writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) @@ -371,9 +373,11 @@ func (api Minio) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) { } err := api.Donut.SetBucketMetadata(bucket, map[string]string{"acl": getACLTypeString(aclType)}, signature) - switch iodine.ToError(err).(type) { - case nil: + if err == nil { writeSuccessResponse(w, acceptsContentType) + return + } + switch err.ToError().(type) { case donut.SignatureDoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path) case donut.BucketNameInvalid: @@ -381,7 +385,7 @@ func (api Minio) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) { case donut.BucketNotFound: writeErrorResponse(w, req, NoSuchBucket, acceptsContentType, req.URL.Path) default: - log.Error.Println(iodine.New(err, nil)) + log.Error.Println(err.Trace()) writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) } } @@ -410,7 +414,7 @@ func (api Minio) HeadBucketHandler(w http.ResponseWriter, req *http.Request) { var signature *donut.Signature if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification - var err error + var err *probe.Error signature, err = InitSignatureV4(req) if err != nil { writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) @@ -419,9 +423,11 @@ func (api Minio) HeadBucketHandler(w http.ResponseWriter, req *http.Request) { } _, err := api.Donut.GetBucketMetadata(bucket, signature) - switch iodine.ToError(err).(type) { - case nil: + if err == nil { writeSuccessResponse(w, acceptsContentType) + return + } + switch err.ToError().(type) { case donut.SignatureDoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path) case donut.BucketNotFound: @@ -429,7 +435,7 @@ func (api Minio) HeadBucketHandler(w http.ResponseWriter, req *http.Request) { case donut.BucketNameInvalid: writeErrorResponse(w, req, InvalidBucketName, acceptsContentType, req.URL.Path) default: - log.Error.Println(iodine.New(err, nil)) + log.Error.Println(err.Trace()) writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) } } diff --git a/pkg/server/api/logging-handlers.go b/pkg/server/api/logging-handlers.go index 1fe9e2101..92fc30273 100644 --- a/pkg/server/api/logging-handlers.go +++ b/pkg/server/api/logging-handlers.go @@ -24,7 +24,7 @@ import ( "os" "time" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" "github.com/minio/minio/pkg/utils/log" ) @@ -99,12 +99,12 @@ func fileLogger(filename string) (chan<- []byte, error) { ch := make(chan []byte) file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) if err != nil { - return nil, iodine.New(err, map[string]string{"logfile": filename}) + return nil, probe.New(err) } go func() { for message := range ch { if _, err := io.Copy(file, bytes.NewBuffer(message)); err != nil { - log.Errorln(iodine.New(err, nil)) + log.Errorln(probe.New(err)) } } }() diff --git a/pkg/server/api/object-handlers.go b/pkg/server/api/object-handlers.go index 1f35cca17..421b6a745 100644 --- a/pkg/server/api/object-handlers.go +++ b/pkg/server/api/object-handlers.go @@ -22,7 +22,7 @@ import ( "github.com/gorilla/mux" "github.com/minio/minio/pkg/donut" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" "github.com/minio/minio/pkg/utils/log" ) @@ -57,7 +57,7 @@ func (api Minio) GetObjectHandler(w http.ResponseWriter, req *http.Request) { var signature *donut.Signature if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification - var err error + var err *probe.Error signature, err = InitSignatureV4(req) if err != nil { writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) @@ -66,21 +66,21 @@ func (api Minio) GetObjectHandler(w http.ResponseWriter, req *http.Request) { } metadata, err := api.Donut.GetObjectMetadata(bucket, object, signature) - switch iodine.ToError(err).(type) { - case nil: // success - { - httpRange, err := getRequestedRange(req.Header.Get("Range"), metadata.Size) - if err != nil { - writeErrorResponse(w, req, InvalidRange, acceptsContentType, req.URL.Path) - return - } - setObjectHeaders(w, metadata, httpRange) - if _, err := api.Donut.GetObject(w, bucket, object, httpRange.start, httpRange.length); err != nil { - // unable to write headers, we've already printed data. Just close the connection. - log.Error.Println(iodine.New(err, nil)) - return - } + if err == nil { + httpRange, err := getRequestedRange(req.Header.Get("Range"), metadata.Size) + if err != nil { + writeErrorResponse(w, req, InvalidRange, acceptsContentType, req.URL.Path) + return } + setObjectHeaders(w, metadata, httpRange) + if _, err := api.Donut.GetObject(w, bucket, object, httpRange.start, httpRange.length); err != nil { + // unable to write headers, we've already printed data. Just close the connection. + log.Error.Println(err.Trace()) + return + } + return + } + switch err.ToError().(type) { case donut.SignatureDoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path) case donut.BucketNameInvalid: @@ -92,7 +92,7 @@ func (api Minio) GetObjectHandler(w http.ResponseWriter, req *http.Request) { case donut.ObjectNameInvalid: writeErrorResponse(w, req, NoSuchKey, acceptsContentType, req.URL.Path) default: - log.Error.Println(iodine.New(err, nil)) + log.Error.Println(err.Trace()) writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) } } @@ -123,7 +123,7 @@ func (api Minio) HeadObjectHandler(w http.ResponseWriter, req *http.Request) { var signature *donut.Signature if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification - var err error + var err *probe.Error signature, err = InitSignatureV4(req) if err != nil { writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) @@ -132,10 +132,12 @@ func (api Minio) HeadObjectHandler(w http.ResponseWriter, req *http.Request) { } metadata, err := api.Donut.GetObjectMetadata(bucket, object, signature) - switch iodine.ToError(err).(type) { - case nil: + if err == nil { setObjectHeaders(w, metadata, nil) w.WriteHeader(http.StatusOK) + return + } + switch err.ToError().(type) { case donut.SignatureDoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path) case donut.BucketNameInvalid: @@ -147,7 +149,7 @@ func (api Minio) HeadObjectHandler(w http.ResponseWriter, req *http.Request) { case donut.ObjectNameInvalid: writeErrorResponse(w, req, NoSuchKey, acceptsContentType, req.URL.Path) default: - log.Error.Println(iodine.New(err, nil)) + log.Error.Println(err.Trace()) writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) } } @@ -202,16 +204,20 @@ func (api Minio) PutObjectHandler(w http.ResponseWriter, req *http.Request) { // writeErrorResponse(w, req, EntityTooSmall, acceptsContentType, req.URL.Path) // return // } - sizeInt64, err := strconv.ParseInt(size, 10, 64) - if err != nil { - writeErrorResponse(w, req, InvalidRequest, acceptsContentType, req.URL.Path) - return + var sizeInt64 int64 + { + var err error + sizeInt64, err = strconv.ParseInt(size, 10, 64) + if err != nil { + writeErrorResponse(w, req, InvalidRequest, acceptsContentType, req.URL.Path) + return + } } var signature *donut.Signature if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification - var err error + var err *probe.Error signature, err = InitSignatureV4(req) if err != nil { writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) @@ -220,10 +226,12 @@ func (api Minio) PutObjectHandler(w http.ResponseWriter, req *http.Request) { } metadata, err := api.Donut.CreateObject(bucket, object, md5, sizeInt64, req.Body, nil, signature) - switch iodine.ToError(err).(type) { - case nil: + if err == nil { w.Header().Set("ETag", metadata.MD5Sum) writeSuccessResponse(w, acceptsContentType) + return + } + switch err.ToError().(type) { case donut.BucketNotFound: writeErrorResponse(w, req, NoSuchBucket, acceptsContentType, req.URL.Path) case donut.BucketNameInvalid: @@ -243,7 +251,7 @@ func (api Minio) PutObjectHandler(w http.ResponseWriter, req *http.Request) { case donut.InvalidDigest: writeErrorResponse(w, req, InvalidDigest, acceptsContentType, req.URL.Path) default: - log.Error.Println(iodine.New(err, nil)) + log.Error.Println(err.Trace()) writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) } } @@ -279,7 +287,7 @@ func (api Minio) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Requ var signature *donut.Signature if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification - var err error + var err *probe.Error signature, err = InitSignatureV4(req) if err != nil { writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) @@ -288,22 +296,22 @@ func (api Minio) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Requ } uploadID, err := api.Donut.NewMultipartUpload(bucket, object, req.Header.Get("Content-Type"), signature) - switch iodine.ToError(err).(type) { - case nil: - { - response := generateInitiateMultipartUploadResponse(bucket, object, uploadID) - encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) - // write headers - setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) - // write body - w.Write(encodedSuccessResponse) - } + if err == nil { + response := generateInitiateMultipartUploadResponse(bucket, object, uploadID) + encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) + // write headers + setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) + // write body + w.Write(encodedSuccessResponse) + return + } + switch err.ToError().(type) { case donut.SignatureDoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path) case donut.ObjectExists: writeErrorResponse(w, req, MethodNotAllowed, acceptsContentType, req.URL.Path) default: - log.Error.Println(iodine.New(err, nil)) + log.Error.Println(err.Trace()) writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) } } @@ -344,10 +352,14 @@ func (api Minio) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) return } - sizeInt64, err := strconv.ParseInt(size, 10, 64) - if err != nil { - writeErrorResponse(w, req, InvalidRequest, acceptsContentType, req.URL.Path) - return + var sizeInt64 int64 + { + var err error + sizeInt64, err = strconv.ParseInt(size, 10, 64) + if err != nil { + writeErrorResponse(w, req, InvalidRequest, acceptsContentType, req.URL.Path) + return + } } vars := mux.Vars(req) @@ -357,15 +369,19 @@ func (api Minio) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) uploadID := req.URL.Query().Get("uploadId") partIDString := req.URL.Query().Get("partNumber") - partID, err := strconv.Atoi(partIDString) - if err != nil { - writeErrorResponse(w, req, InvalidPart, acceptsContentType, req.URL.Path) + var partID int + { + var err error + partID, err = strconv.Atoi(partIDString) + if err != nil { + writeErrorResponse(w, req, InvalidPart, acceptsContentType, req.URL.Path) + } } var signature *donut.Signature if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification - var err error + var err *probe.Error signature, err = InitSignatureV4(req) if err != nil { writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) @@ -374,10 +390,12 @@ func (api Minio) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) } calculatedMD5, err := api.Donut.CreateObjectPart(bucket, object, uploadID, partID, "", md5, sizeInt64, req.Body, signature) - switch iodine.ToError(err).(type) { - case nil: + if err == nil { w.Header().Set("ETag", calculatedMD5) writeSuccessResponse(w, acceptsContentType) + return + } + switch err.ToError().(type) { case donut.InvalidUploadID: writeErrorResponse(w, req, NoSuchUpload, acceptsContentType, req.URL.Path) case donut.ObjectExists: @@ -393,7 +411,7 @@ func (api Minio) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) case donut.InvalidDigest: writeErrorResponse(w, req, InvalidDigest, acceptsContentType, req.URL.Path) default: - log.Error.Println(iodine.New(err, nil)) + log.Error.Println(err.Trace()) writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) } } @@ -423,7 +441,7 @@ func (api Minio) AbortMultipartUploadHandler(w http.ResponseWriter, req *http.Re var signature *donut.Signature if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification - var err error + var err *probe.Error signature, err = InitSignatureV4(req) if err != nil { writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) @@ -432,16 +450,18 @@ func (api Minio) AbortMultipartUploadHandler(w http.ResponseWriter, req *http.Re } err := api.Donut.AbortMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, signature) - switch iodine.ToError(err).(type) { - case nil: + if err == nil { setCommonHeaders(w, getContentTypeString(acceptsContentType), 0) w.WriteHeader(http.StatusNoContent) + return + } + switch err.ToError().(type) { case donut.SignatureDoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path) case donut.InvalidUploadID: writeErrorResponse(w, req, NoSuchUpload, acceptsContentType, req.URL.Path) default: - log.Error.Println(iodine.New(err, nil)) + log.Error.Println(err.Trace()) writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) } } @@ -482,7 +502,7 @@ func (api Minio) ListObjectPartsHandler(w http.ResponseWriter, req *http.Request var signature *donut.Signature if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification - var err error + var err *probe.Error signature, err = InitSignatureV4(req) if err != nil { writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) @@ -491,22 +511,22 @@ func (api Minio) ListObjectPartsHandler(w http.ResponseWriter, req *http.Request } objectResourcesMetadata, err := api.Donut.ListObjectParts(bucket, object, objectResourcesMetadata, signature) - switch iodine.ToError(err).(type) { - case nil: - { - response := generateListPartsResponse(objectResourcesMetadata) - encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) - // write headers - setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) - // write body - w.Write(encodedSuccessResponse) - } + if err == nil { + response := generateListPartsResponse(objectResourcesMetadata) + encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) + // write headers + setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) + // write body + w.Write(encodedSuccessResponse) + return + } + switch err.ToError().(type) { case donut.SignatureDoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path) case donut.InvalidUploadID: writeErrorResponse(w, req, NoSuchUpload, acceptsContentType, req.URL.Path) default: - log.Error.Println(iodine.New(err, nil)) + log.Error.Println(err.Trace()) writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) } } @@ -536,7 +556,7 @@ func (api Minio) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http var signature *donut.Signature if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification - var err error + var err *probe.Error signature, err = InitSignatureV4(req) if err != nil { writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) @@ -544,16 +564,16 @@ func (api Minio) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http } } metadata, err := api.Donut.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, req.Body, signature) - switch iodine.ToError(err).(type) { - case nil: - { - response := generateCompleteMultpartUploadResponse(bucket, object, "", metadata.MD5Sum) - encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) - // write headers - setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) - // write body - w.Write(encodedSuccessResponse) - } + if err == nil { + response := generateCompleteMultpartUploadResponse(bucket, object, "", metadata.MD5Sum) + encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) + // write headers + setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) + // write body + w.Write(encodedSuccessResponse) + return + } + switch err.ToError().(type) { case donut.InvalidUploadID: writeErrorResponse(w, req, NoSuchUpload, acceptsContentType, req.URL.Path) case donut.InvalidPart: @@ -569,7 +589,7 @@ func (api Minio) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http case donut.MalformedXML: writeErrorResponse(w, req, MalformedXML, acceptsContentType, req.URL.Path) default: - log.Error.Println(iodine.New(err, nil)) + log.Error.Println(err.Trace()) writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) } } diff --git a/pkg/server/api/range.go b/pkg/server/api/range.go index 051dd7524..a267c0e74 100644 --- a/pkg/server/api/range.go +++ b/pkg/server/api/range.go @@ -23,7 +23,7 @@ import ( "strings" "github.com/minio/minio/pkg/donut" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" ) const ( @@ -51,7 +51,7 @@ func getRequestedRange(hrange string, size int64) (*httpRange, error) { if hrange != "" { err := r.parseRange(hrange) if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } } return r, nil @@ -60,7 +60,7 @@ func getRequestedRange(hrange string, size int64) (*httpRange, error) { func (r *httpRange) parse(ra string) error { i := strings.Index(ra, "-") if i < 0 { - return iodine.New(donut.InvalidRange{}, nil) + return probe.New(donut.InvalidRange{}) } start, end := strings.TrimSpace(ra[:i]), strings.TrimSpace(ra[i+1:]) if start == "" { @@ -68,7 +68,7 @@ func (r *httpRange) parse(ra string) error { // range start relative to the end of the file. i, err := strconv.ParseInt(end, 10, 64) if err != nil { - return iodine.New(donut.InvalidRange{}, nil) + return probe.New(donut.InvalidRange{}) } if i > r.size { i = r.size @@ -78,7 +78,7 @@ func (r *httpRange) parse(ra string) error { } else { i, err := strconv.ParseInt(start, 10, 64) if err != nil || i > r.size || i < 0 { - return iodine.New(donut.InvalidRange{}, nil) + return probe.New(donut.InvalidRange{}) } r.start = i if end == "" { @@ -87,7 +87,7 @@ func (r *httpRange) parse(ra string) error { } else { i, err := strconv.ParseInt(end, 10, 64) if err != nil || r.start > i { - return iodine.New(donut.InvalidRange{}, nil) + return probe.New(donut.InvalidRange{}) } if i >= r.size { i = r.size - 1 @@ -101,24 +101,24 @@ func (r *httpRange) parse(ra string) error { // parseRange parses a Range header string as per RFC 2616. func (r *httpRange) parseRange(s string) error { if s == "" { - return iodine.New(errors.New("header not present"), nil) + return probe.New(errors.New("header not present")) } if !strings.HasPrefix(s, b) { - return iodine.New(donut.InvalidRange{}, nil) + return probe.New(donut.InvalidRange{}) } ras := strings.Split(s[len(b):], ",") if len(ras) == 0 { - return iodine.New(errors.New("invalid request"), nil) + return probe.New(errors.New("invalid request")) } // Just pick the first one and ignore the rest, we only support one range per object if len(ras) > 1 { - return iodine.New(errors.New("multiple ranges specified"), nil) + return probe.New(errors.New("multiple ranges specified")) } ra := strings.TrimSpace(ras[0]) if ra == "" { - return iodine.New(donut.InvalidRange{}, nil) + return probe.New(donut.InvalidRange{}) } return r.parse(ra) } diff --git a/pkg/server/api/signature.go b/pkg/server/api/signature.go index 002da4360..0b3feba88 100644 --- a/pkg/server/api/signature.go +++ b/pkg/server/api/signature.go @@ -23,7 +23,7 @@ import ( "github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/donut" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" ) const ( @@ -64,16 +64,23 @@ func StripAccessKeyID(ah string) (string, error) { } // InitSignatureV4 initializing signature verification -func InitSignatureV4(req *http.Request) (*donut.Signature, error) { +func InitSignatureV4(req *http.Request) (*donut.Signature, *probe.Error) { // strip auth from authorization header ah := req.Header.Get("Authorization") - accessKeyID, err := StripAccessKeyID(ah) - if err != nil { - return nil, iodine.New(err, nil) + var accessKeyID string + { + var err error + accessKeyID, err = StripAccessKeyID(ah) + if err != nil { + return nil, probe.New(err) + } } authConfig, err := auth.LoadConfig() + if err != nil { + return nil, err.Trace() + } if _, ok := authConfig.Users[accessKeyID]; !ok { - return nil, errors.New("Access ID not found") + return nil, probe.New(errors.New("AccessID not found")) } signature := &donut.Signature{ AccessKeyID: authConfig.Users[accessKeyID].AccessKeyID, diff --git a/pkg/server/minhttp/http.go b/pkg/server/minhttp/http.go index ec794b6f8..97a87e8d5 100644 --- a/pkg/server/minhttp/http.go +++ b/pkg/server/minhttp/http.go @@ -32,7 +32,7 @@ import ( "time" "github.com/facebookgo/httpdown" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" ) // An app contains one or more servers and their associated configuration. @@ -45,11 +45,11 @@ type app struct { } // listen initailize listeners -func (a *app) listen() error { +func (a *app) listen() *probe.Error { for _, s := range a.servers { l, err := a.net.Listen("tcp", s.Addr) if err != nil { - return iodine.New(err, nil) + return probe.New(err) } if s.TLSConfig != nil { l = tls.NewListener(l, s.TLSConfig) @@ -79,7 +79,7 @@ func (a *app) wait() { go func(s httpdown.Server) { defer wg.Done() if err := s.Wait(); err != nil { - a.errors <- iodine.New(err, nil) + a.errors <- probe.New(err) } }(s) } @@ -101,7 +101,7 @@ func (a *app) trapSignal(wg *sync.WaitGroup) { go func(s httpdown.Server) { defer wg.Done() if err := s.Stop(); err != nil { - a.errors <- iodine.New(err, nil) + a.errors <- probe.New(err) } }(s) } @@ -112,7 +112,7 @@ func (a *app) trapSignal(wg *sync.WaitGroup) { // we only return here if there's an error, otherwise the new process // will send us a TERM when it's ready to trigger the actual shutdown. if _, err := a.net.StartProcess(); err != nil { - a.errors <- iodine.New(err, nil) + a.errors <- probe.New(err) } } } @@ -120,7 +120,7 @@ func (a *app) trapSignal(wg *sync.WaitGroup) { // ListenAndServe will serve the given http.Servers and will monitor for signals // allowing for graceful termination (SIGTERM) or restart (SIGUSR2/SIGHUP). -func ListenAndServe(servers ...*http.Server) error { +func ListenAndServe(servers ...*http.Server) *probe.Error { // get parent process id ppid := os.Getppid() @@ -134,7 +134,7 @@ func ListenAndServe(servers ...*http.Server) error { // Acquire Listeners if err := a.listen(); err != nil { - return iodine.New(err, nil) + return err.Trace() } // Start serving. @@ -143,7 +143,7 @@ func ListenAndServe(servers ...*http.Server) error { // Close the parent if we inherited and it wasn't init that started us. if os.Getenv("LISTEN_FDS") != "" && ppid != 1 { if err := syscall.Kill(ppid, syscall.SIGTERM); err != nil { - return iodine.New(err, nil) + return probe.New(err) } } @@ -160,14 +160,14 @@ func ListenAndServe(servers ...*http.Server) error { if err == nil { panic("unexpected nil error") } - return iodine.New(err, nil) + return probe.New(err) case <-waitdone: return nil } } // ListenAndServeLimited is similar to ListenAndServe but ratelimited with connLimit value -func ListenAndServeLimited(connLimit int, servers ...*http.Server) error { +func ListenAndServeLimited(connLimit int, servers ...*http.Server) *probe.Error { // get parent process id ppid := os.Getppid() @@ -181,7 +181,7 @@ func ListenAndServeLimited(connLimit int, servers ...*http.Server) error { // Acquire Listeners if err := a.listen(); err != nil { - return iodine.New(err, nil) + return err.Trace() } // Start serving. @@ -190,7 +190,7 @@ func ListenAndServeLimited(connLimit int, servers ...*http.Server) error { // Close the parent if we inherited and it wasn't init that started us. if os.Getenv("LISTEN_FDS") != "" && ppid != 1 { if err := syscall.Kill(ppid, syscall.SIGTERM); err != nil { - return iodine.New(err, nil) + return probe.New(err) } } @@ -207,7 +207,7 @@ func ListenAndServeLimited(connLimit int, servers ...*http.Server) error { if err == nil { panic("unexpected nil error") } - return iodine.New(err, nil) + return probe.New(err) case <-waitdone: return nil } diff --git a/pkg/server/minhttp/listen.go b/pkg/server/minhttp/listen.go index 3f4ad0d71..59eb30298 100644 --- a/pkg/server/minhttp/listen.go +++ b/pkg/server/minhttp/listen.go @@ -20,8 +20,6 @@ import ( "net" "os" "sync" - - "github.com/minio/minio/pkg/iodine" ) // rateLimitedListener returns a Listener that accepts at most n simultaneous @@ -53,7 +51,7 @@ func (l *rateLimitListener) Accept() (net.Conn, error) { c, err := l.Listener.Accept() if err != nil { l.release() - return nil, iodine.New(err, nil) + return nil, err } return &rateLimitListenerConn{Conn: c, release: l.release}, nil } @@ -67,5 +65,5 @@ type rateLimitListenerConn struct { func (l *rateLimitListenerConn) Close() error { err := l.Conn.Close() l.releaseOnce.Do(l.release) - return iodine.New(err, nil) + return err } diff --git a/pkg/server/minhttp/net.go b/pkg/server/minhttp/net.go index e79bbcb18..5e0db6a1a 100644 --- a/pkg/server/minhttp/net.go +++ b/pkg/server/minhttp/net.go @@ -25,7 +25,7 @@ import ( "strings" "sync" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" ) // This package is a fork https://github.com/facebookgo/grace @@ -71,8 +71,8 @@ type fileListener interface { } // getInheritedListeners - look for LISTEN_FDS in environment variables and populate listeners accordingly -func (n *minNet) getInheritedListeners() error { - var retErr error +func (n *minNet) getInheritedListeners() *probe.Error { + var retErr *probe.Error n.inheritOnce.Do(func() { n.mutex.Lock() defer n.mutex.Unlock() @@ -82,7 +82,7 @@ func (n *minNet) getInheritedListeners() error { } count, err := strconv.Atoi(countStr) if err != nil { - retErr = fmt.Errorf("found invalid count value: %s=%s", envCountKey, countStr) + retErr = probe.New(fmt.Errorf("found invalid count value: %s=%s", envCountKey, countStr)) return } @@ -92,37 +92,40 @@ func (n *minNet) getInheritedListeners() error { l, err := net.FileListener(file) if err != nil { file.Close() - retErr = iodine.New(fmt.Errorf("error inheriting socket fd %d: %s", i, err), nil) + retErr = probe.New(err) return } if err := file.Close(); err != nil { - retErr = iodine.New(fmt.Errorf("error closing inherited socket fd %d: %s", i, err), nil) + retErr = probe.New(err) return } n.inheritedListeners = append(n.inheritedListeners, l) } }) - return iodine.New(retErr, nil) + if retErr != nil { + return probe.New(retErr) + } + return nil } // Listen announces on the local network address laddr. The network net must be // a stream-oriented network: "tcp", "tcp4", "tcp6", "unix" or "unixpacket". It // returns an inherited net.Listener for the matching network and address, or // creates a new one using net.Listen() -func (n *minNet) Listen(nett, laddr string) (net.Listener, error) { +func (n *minNet) Listen(nett, laddr string) (net.Listener, *probe.Error) { switch nett { default: - return nil, net.UnknownNetworkError(nett) + return nil, probe.New(net.UnknownNetworkError(nett)) case "tcp", "tcp4", "tcp6": addr, err := net.ResolveTCPAddr(nett, laddr) if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } return n.ListenTCP(nett, addr) case "unix", "unixpacket": addr, err := net.ResolveUnixAddr(nett, laddr) if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } return n.ListenUnix(nett, addr) } @@ -131,10 +134,9 @@ func (n *minNet) Listen(nett, laddr string) (net.Listener, error) { // ListenTCP announces on the local network address laddr. The network net must // be: "tcp", "tcp4" or "tcp6". It returns an inherited net.Listener for the // matching network and address, or creates a new one using net.ListenTCP. -func (n *minNet) ListenTCP(nett string, laddr *net.TCPAddr) (net.Listener, error) { - var err error +func (n *minNet) ListenTCP(nett string, laddr *net.TCPAddr) (net.Listener, *probe.Error) { if err := n.getInheritedListeners(); err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } n.mutex.Lock() @@ -153,11 +155,10 @@ func (n *minNet) ListenTCP(nett string, laddr *net.TCPAddr) (net.Listener, error } } - var l net.Listener // make a fresh listener - l, err = net.ListenTCP(nett, laddr) + l, err := net.ListenTCP(nett, laddr) if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } n.activeListeners = append(n.activeListeners, rateLimitedListener(l, n.connLimit)) return l, nil @@ -166,10 +167,9 @@ func (n *minNet) ListenTCP(nett string, laddr *net.TCPAddr) (net.Listener, error // ListenUnix announces on the local network address laddr. The network net // must be a: "unix" or "unixpacket". It returns an inherited net.Listener for // the matching network and address, or creates a new one using net.ListenUnix. -func (n *minNet) ListenUnix(nett string, laddr *net.UnixAddr) (net.Listener, error) { - var err error +func (n *minNet) ListenUnix(nett string, laddr *net.UnixAddr) (net.Listener, *probe.Error) { if err := n.getInheritedListeners(); err != nil { - return nil, iodine.New(err, nil) + return nil, err.Trace() } n.mutex.Lock() @@ -188,23 +188,22 @@ func (n *minNet) ListenUnix(nett string, laddr *net.UnixAddr) (net.Listener, err } } - var l net.Listener // make a fresh listener - l, err = net.ListenUnix(nett, laddr) + l, err := net.ListenUnix(nett, laddr) if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } n.activeListeners = append(n.activeListeners, rateLimitedListener(l, n.connLimit)) return l, nil } // activeListeners returns a snapshot copy of the active listeners. -func (n *minNet) getActiveListeners() ([]net.Listener, error) { +func (n *minNet) getActiveListeners() []net.Listener { n.mutex.Lock() defer n.mutex.Unlock() ls := make([]net.Listener, len(n.activeListeners)) copy(ls, n.activeListeners) - return ls, nil + return ls } // IsEqual is synonymous with IP.IsEqual() method, here IsEqual matches net.Addr instead of net.IP @@ -233,18 +232,15 @@ func (n1 minAddr) IsEqual(n2 net.Addr) bool { // arguments as when it was originally started. This allows for a newly // deployed binary to be started. It returns the pid of the newly started // process when successful. -func (n *minNet) StartProcess() (int, error) { - listeners, err := n.getActiveListeners() - if err != nil { - return 0, iodine.New(err, nil) - } - +func (n *minNet) StartProcess() (int, *probe.Error) { + listeners := n.getActiveListeners() // Extract the fds from the listeners. files := make([]*os.File, len(listeners)) for i, l := range listeners { + var err error files[i], err = l.(fileListener).File() if err != nil { - return 0, iodine.New(err, nil) + return 0, probe.New(err) } defer files[i].Close() } @@ -253,7 +249,7 @@ func (n *minNet) StartProcess() (int, error) { // the file it points to has been changed we will use the updated symlink. argv0, err := exec.LookPath(os.Args[0]) if err != nil { - return 0, iodine.New(err, nil) + return 0, probe.New(err) } // Pass on the environment and replace the old count key with the new one. @@ -272,7 +268,7 @@ func (n *minNet) StartProcess() (int, error) { Files: allFiles, }) if err != nil { - return 0, iodine.New(err, nil) + return 0, probe.New(err) } return process.Pid, nil } diff --git a/pkg/server/minhttp/net_test.go b/pkg/server/minhttp/net_test.go deleted file mode 100644 index 45942b4cd..000000000 --- a/pkg/server/minhttp/net_test.go +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minhttp - -import ( - "os" - "regexp" - "testing" - - "github.com/minio/minio/pkg/iodine" - . "gopkg.in/check.v1" -) - -func Test(t *testing.T) { TestingT(t) } - -type MySuite struct{} - -var _ = Suite(&MySuite{}) - -func (s *MySuite) TestEmptyCountEnvVariable(c *C) { - os.Setenv(envCountKey, "") - n := &minNet{} - c.Assert(n.getInheritedListeners(), IsNil) -} - -func (s *MySuite) TestZeroCountEnvVariable(c *C) { - os.Setenv(envCountKey, "0") - n := &minNet{} - c.Assert(n.getInheritedListeners(), IsNil) -} - -func (s *MySuite) TestInvalidCountEnvVariable(c *C) { - os.Setenv(envCountKey, "a") - n := &minNet{} - expected := regexp.MustCompile("^found invalid count value: LISTEN_FDS=a$") - err := n.getInheritedListeners() - c.Assert(err, Not(IsNil)) - c.Assert(expected.MatchString(iodine.ToError(err).Error()), Equals, true) -} - -func (s *MySuite) TestInheritErrorOnListenTCPWithInvalidCount(c *C) { - os.Setenv(envCountKey, "a") - n := &minNet{} - expected := regexp.MustCompile("^found invalid count value: LISTEN_FDS=a$") - _, err := n.Listen("tcp", ":0") - c.Assert(err, Not(IsNil)) - c.Assert(expected.MatchString(iodine.ToError(err).Error()), Equals, true) -} - -func (s *MySuite) TestInvalidNetwork(c *C) { - os.Setenv(envCountKey, "") - n := &minNet{} - _, err := n.Listen("foo", "") - c.Assert(err, Not(IsNil)) - c.Assert(regexp.MustCompile("^unknown network foo$").MatchString(iodine.ToError(err).Error()), Equals, true) -} - -func (s *MySuite) TestInvalidTcpAddr(c *C) { - os.Setenv(envCountKey, "") - n := &minNet{} - _, err := n.Listen("tcp", "abc") - c.Assert(err, Not(IsNil)) - c.Assert(regexp.MustCompile("^missing port in address abc$").MatchString(iodine.ToError(err).Error()), Equals, true) -} diff --git a/pkg/server/rpc/auth.go b/pkg/server/rpc/auth.go index 4cf2288e0..d55a8ad57 100644 --- a/pkg/server/rpc/auth.go +++ b/pkg/server/rpc/auth.go @@ -20,7 +20,7 @@ import ( "net/http" "github.com/minio/minio/pkg/auth" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" ) // AuthService auth service @@ -32,15 +32,15 @@ type AuthReply struct { SecretAccessKey string `json:"secretaccesskey"` } -func getAuth(reply *AuthReply) error { +func getAuth(reply *AuthReply) *probe.Error { accessID, err := auth.GenerateAccessKeyID() if err != nil { - return iodine.New(err, nil) + return err.Trace() } reply.AccessKeyID = string(accessID) secretID, err := auth.GenerateSecretAccessKey() if err != nil { - return iodine.New(err, nil) + return err.Trace() } reply.SecretAccessKey = string(secretID) return nil @@ -48,5 +48,8 @@ func getAuth(reply *AuthReply) error { // Get auth keys func (s *AuthService) Get(r *http.Request, args *Args, reply *AuthReply) error { - return getAuth(reply) + if err := getAuth(reply); err != nil { + return err + } + return nil } diff --git a/pkg/server/rpc/donut.go b/pkg/server/rpc/donut.go index 806f89346..8e3d042a9 100644 --- a/pkg/server/rpc/donut.go +++ b/pkg/server/rpc/donut.go @@ -20,7 +20,7 @@ import ( "net/http" "github.com/minio/minio/pkg/donut" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" ) // DonutService donut service @@ -40,14 +40,14 @@ type Reply struct { Error error `json:"error"` } -func setDonut(args *DonutArgs, reply *Reply) error { +func setDonut(args *DonutArgs, reply *Reply) *probe.Error { conf := &donut.Config{Version: "0.0.1"} conf.DonutName = args.Name conf.MaxSize = args.MaxSize conf.NodeDiskMap = make(map[string][]string) conf.NodeDiskMap[args.Hostname] = args.Disks if err := donut.SaveConfig(conf); err != nil { - return iodine.New(err, nil) + return err.Trace() } reply.Message = "success" reply.Error = nil diff --git a/pkg/server/rpc/sysinfo.go b/pkg/server/rpc/sysinfo.go index cc06ac402..c7e0b92bd 100644 --- a/pkg/server/rpc/sysinfo.go +++ b/pkg/server/rpc/sysinfo.go @@ -21,7 +21,7 @@ import ( "os" "runtime" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" ) // SysInfoService - @@ -45,7 +45,7 @@ type MemStatsReply struct { runtime.MemStats `json:"memstats"` } -func setSysInfoReply(sis *SysInfoReply) error { +func setSysInfoReply(sis *SysInfoReply) *probe.Error { sis.SysARCH = runtime.GOARCH sis.SysOS = runtime.GOOS sis.SysCPUS = runtime.NumCPU() @@ -55,12 +55,12 @@ func setSysInfoReply(sis *SysInfoReply) error { var err error sis.Hostname, err = os.Hostname() if err != nil { - return iodine.New(err, nil) + return probe.New(err) } return nil } -func setMemStatsReply(sis *MemStatsReply) error { +func setMemStatsReply(sis *MemStatsReply) *probe.Error { var memStats runtime.MemStats runtime.ReadMemStats(&memStats) sis.MemStats = memStats @@ -69,10 +69,16 @@ func setMemStatsReply(sis *MemStatsReply) error { // Get method func (s *SysInfoService) Get(r *http.Request, args *Args, reply *SysInfoReply) error { - return setSysInfoReply(reply) + if err := setSysInfoReply(reply); err != nil { + return err + } + return nil } // Get method func (s *MemStatsService) Get(r *http.Request, args *Args, reply *MemStatsReply) error { - return setMemStatsReply(reply) + if err := setMemStatsReply(reply); err != nil { + return err + } + return nil } diff --git a/pkg/server/rpc_test.go b/pkg/server/rpc_test.go index a847da0d1..09ea71d5a 100644 --- a/pkg/server/rpc_test.go +++ b/pkg/server/rpc_test.go @@ -56,8 +56,7 @@ func (s *MyRPCSuite) TestMemStats(c *C) { c.Assert(resp.StatusCode, Equals, http.StatusOK) var reply rpc.MemStatsReply - err = jsonrpc.DecodeClientResponse(resp.Body, &reply) - c.Assert(err, IsNil) + c.Assert(jsonrpc.DecodeClientResponse(resp.Body, &reply), IsNil) resp.Body.Close() c.Assert(reply, Not(DeepEquals), rpc.MemStatsReply{}) } @@ -75,8 +74,7 @@ func (s *MyRPCSuite) TestSysInfo(c *C) { c.Assert(resp.StatusCode, Equals, http.StatusOK) var reply rpc.SysInfoReply - err = jsonrpc.DecodeClientResponse(resp.Body, &reply) - c.Assert(err, IsNil) + c.Assert(jsonrpc.DecodeClientResponse(resp.Body, &reply), IsNil) resp.Body.Close() c.Assert(reply, Not(DeepEquals), rpc.SysInfoReply{}) } @@ -94,8 +92,7 @@ func (s *MyRPCSuite) TestAuth(c *C) { c.Assert(resp.StatusCode, Equals, http.StatusOK) var reply rpc.AuthReply - err = jsonrpc.DecodeClientResponse(resp.Body, &reply) - c.Assert(err, IsNil) + c.Assert(jsonrpc.DecodeClientResponse(resp.Body, &reply), IsNil) resp.Body.Close() c.Assert(reply, Not(DeepEquals), rpc.AuthReply{}) c.Assert(len(reply.AccessKeyID), Equals, 20) diff --git a/pkg/server/server.go b/pkg/server/server.go index 013426d4d..3f06e0209 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -24,13 +24,13 @@ import ( "os" "strings" - "github.com/minio/minio/pkg/iodine" + "github.com/minio/minio/pkg/probe" "github.com/minio/minio/pkg/server/api" "github.com/minio/minio/pkg/server/minhttp" ) // getAPI server instance -func getAPIServer(conf api.Config, apiHandler http.Handler) (*http.Server, error) { +func getAPIServer(conf api.Config, apiHandler http.Handler) (*http.Server, *probe.Error) { // Minio server config httpServer := &http.Server{ Addr: conf.Address, @@ -44,13 +44,13 @@ func getAPIServer(conf api.Config, apiHandler http.Handler) (*http.Server, error httpServer.TLSConfig.Certificates = make([]tls.Certificate, 1) httpServer.TLSConfig.Certificates[0], err = tls.LoadX509KeyPair(conf.CertFile, conf.KeyFile) if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } } host, port, err := net.SplitHostPort(conf.Address) if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } var hosts []string @@ -60,7 +60,7 @@ func getAPIServer(conf api.Config, apiHandler http.Handler) (*http.Server, error default: addrs, err := net.InterfaceAddrs() if err != nil { - return nil, iodine.New(err, nil) + return nil, probe.New(err) } for _, addr := range addrs { if addr.Network() == "ip+net" { @@ -104,18 +104,18 @@ func startTM(a api.Minio) { } // StartServices starts basic services for a server -func StartServices(conf api.Config) error { +func StartServices(conf api.Config) *probe.Error { apiHandler, minioAPI := getAPIHandler(conf) apiServer, err := getAPIServer(conf, apiHandler) if err != nil { - return iodine.New(err, nil) + return err.Trace() } rpcServer := getRPCServer(getRPCHandler()) // start ticket master go startTM(minioAPI) if err := minhttp.ListenAndServeLimited(conf.RateLimit, apiServer, rpcServer); err != nil { - return iodine.New(err, nil) + return err.Trace() } return nil } diff --git a/pkg/utils/atomic/atomic.go b/pkg/utils/atomic/atomic.go index ba0c61815..2e6d23ed3 100644 --- a/pkg/utils/atomic/atomic.go +++ b/pkg/utils/atomic/atomic.go @@ -23,8 +23,6 @@ import ( "io/ioutil" "os" "path/filepath" - - "github.com/minio/minio/pkg/iodine" ) // File container provided for atomic file writes @@ -37,11 +35,11 @@ type File struct { func (f *File) Close() error { // close the embedded fd if err := f.File.Close(); err != nil { - return iodine.New(err, nil) + return err } // atomic rename to final destination if err := os.Rename(f.Name(), f.file); err != nil { - return iodine.New(err, nil) + return err } return nil } @@ -50,7 +48,7 @@ func (f *File) Close() error { func (f *File) CloseAndPurge() error { // close the embedded fd if err := f.File.Close(); err != nil { - return iodine.New(err, nil) + return err } if err := os.Remove(f.Name()); err != nil { return err @@ -63,17 +61,17 @@ func FileCreate(filePath string) (*File, error) { // if parent directories do not exist, ioutil.TempFile doesn't create them // handle such a case with os.MkdirAll() if err := os.MkdirAll(filepath.Dir(filePath), 0700); err != nil { - return nil, iodine.New(err, nil) + return nil, err } f, err := ioutil.TempFile(filepath.Dir(filePath), filepath.Base(filePath)) if err != nil { - return nil, iodine.New(err, nil) + return nil, err } if err := os.Chmod(f.Name(), 0600); err != nil { if err := os.Remove(f.Name()); err != nil { - return nil, iodine.New(err, nil) + return nil, err } - return nil, iodine.New(err, nil) + return nil, err } return &File{File: f, file: filePath}, nil }