Convert errors tracer into a separate package (#5221)

This commit is contained in:
Harshavardhana 2017-11-25 11:58:29 -08:00 committed by GitHub
parent 6e6aeb6a9e
commit 8efa82126b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
82 changed files with 1117 additions and 896 deletions

View File

@ -33,6 +33,7 @@ import (
router "github.com/gorilla/mux" router "github.com/gorilla/mux"
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
) )
var configJSON = []byte(`{ var configJSON = []byte(`{
@ -1033,7 +1034,7 @@ func buildAdminRequest(queryVal url.Values, opHdr, method string,
contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error) { contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error) {
req, err := newTestRequest(method, "/?"+queryVal.Encode(), contentLength, bodySeeker) req, err := newTestRequest(method, "/?"+queryVal.Encode(), contentLength, bodySeeker)
if err != nil { if err != nil {
return nil, traceError(err) return nil, errors.Trace(err)
} }
req.Header.Set(minioAdminOpHeader, opHdr) req.Header.Set(minioAdminOpHeader, opHdr)
@ -1041,7 +1042,7 @@ func buildAdminRequest(queryVal url.Values, opHdr, method string,
cred := serverConfig.GetCredential() cred := serverConfig.GetCredential()
err = signRequestV4(req, cred.AccessKey, cred.SecretKey) err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil { if err != nil {
return nil, traceError(err) return nil, errors.Trace(err)
} }
return req, nil return req, nil

View File

@ -18,7 +18,6 @@ package cmd
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"net" "net"
"os" "os"
@ -30,6 +29,7 @@ import (
"time" "time"
"github.com/minio/minio-go/pkg/set" "github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/pkg/errors"
) )
const ( const (
@ -159,7 +159,7 @@ func (rc remoteAdminClient) ServerInfoData() (sid ServerInfoData, e error) {
// GetConfig - returns config.json of the local server. // GetConfig - returns config.json of the local server.
func (lc localAdminClient) GetConfig() ([]byte, error) { func (lc localAdminClient) GetConfig() ([]byte, error) {
if serverConfig == nil { if serverConfig == nil {
return nil, errors.New("config not present") return nil, fmt.Errorf("config not present")
} }
return json.Marshal(serverConfig) return json.Marshal(serverConfig)
@ -483,7 +483,7 @@ func getPeerConfig(peers adminPeers) ([]byte, error) {
configJSON, err := getValidServerConfig(serverConfigs, errs) configJSON, err := getValidServerConfig(serverConfigs, errs)
if err != nil { if err != nil {
errorIf(err, "Unable to find a valid server config") errorIf(err, "Unable to find a valid server config")
return nil, traceError(err) return nil, errors.Trace(err)
} }
// Return the config.json that was present quorum or more // Return the config.json that was present quorum or more

View File

@ -18,7 +18,6 @@ package cmd
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
@ -26,11 +25,12 @@ import (
"time" "time"
router "github.com/gorilla/mux" router "github.com/gorilla/mux"
"github.com/minio/minio/pkg/errors"
) )
const adminPath = "/admin" const adminPath = "/admin"
var errUnsupportedBackend = errors.New("not supported for non erasure-code backend") var errUnsupportedBackend = fmt.Errorf("not supported for non erasure-code backend")
// adminCmd - exports RPC methods for service status, stop and // adminCmd - exports RPC methods for service status, stop and
// restart commands. // restart commands.
@ -166,7 +166,7 @@ func (s *adminCmd) GetConfig(args *AuthRPCArgs, reply *ConfigReply) error {
} }
if serverConfig == nil { if serverConfig == nil {
return errors.New("config not present") return fmt.Errorf("config not present")
} }
jsonBytes, err := json.Marshal(serverConfig) jsonBytes, err := json.Marshal(serverConfig)
@ -238,7 +238,7 @@ func registerAdminRPCRouter(mux *router.Router) error {
adminRPCServer := newRPCServer() adminRPCServer := newRPCServer()
err := adminRPCServer.RegisterName("Admin", adminRPCHandler) err := adminRPCServer.RegisterName("Admin", adminRPCHandler)
if err != nil { if err != nil {
return traceError(err) return errors.Trace(err)
} }
adminRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter() adminRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter()
adminRouter.Path(adminPath).Handler(adminRPCServer) adminRouter.Path(adminPath).Handler(adminRPCServer)

View File

@ -21,6 +21,7 @@ import (
"net/http" "net/http"
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
) )
@ -752,7 +753,7 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
return ErrNone return ErrNone
} }
err = errorCause(err) err = errors.Cause(err)
// Verify if the underlying error is signature mismatch. // Verify if the underlying error is signature mismatch.
switch err { switch err {
case errSignatureMismatch: case errSignatureMismatch:

View File

@ -18,6 +18,8 @@ package cmd
import ( import (
router "github.com/gorilla/mux" router "github.com/gorilla/mux"
"github.com/minio/minio/pkg/errors"
) )
// Set up an RPC endpoint that receives browser related calls. The // Set up an RPC endpoint that receives browser related calls. The
@ -40,7 +42,7 @@ func registerBrowserPeerRPCRouter(mux *router.Router) error {
bpRPCServer := newRPCServer() bpRPCServer := newRPCServer()
err := bpRPCServer.RegisterName("BrowserPeer", bpHandlers) err := bpRPCServer.RegisterName("BrowserPeer", bpHandlers)
if err != nil { if err != nil {
return traceError(err) return errors.Trace(err)
} }
bpRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter() bpRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter()

View File

@ -32,6 +32,7 @@ import (
mux "github.com/gorilla/mux" mux "github.com/gorilla/mux"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/set" "github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
) )
@ -40,7 +41,7 @@ import (
func enforceBucketPolicy(bucket, action, resource, referer, sourceIP string, queryParams url.Values) (s3Error APIErrorCode) { func enforceBucketPolicy(bucket, action, resource, referer, sourceIP string, queryParams url.Values) (s3Error APIErrorCode) {
// Verify if bucket actually exists // Verify if bucket actually exists
if err := checkBucketExist(bucket, newObjectLayerFn()); err != nil { if err := checkBucketExist(bucket, newObjectLayerFn()); err != nil {
err = errorCause(err) err = errors.Cause(err)
switch err.(type) { switch err.(type) {
case BucketNameInvalid: case BucketNameInvalid:
// Return error for invalid bucket name. // Return error for invalid bucket name.
@ -328,7 +329,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
deletedObjects = append(deletedObjects, object) deletedObjects = append(deletedObjects, object)
continue continue
} }
if _, ok := errorCause(err).(ObjectNotFound); ok { if _, ok := errors.Cause(err).(ObjectNotFound); ok {
// If the object is not found it should be // If the object is not found it should be
// accounted as deleted as per S3 spec. // accounted as deleted as per S3 spec.
deletedObjects = append(deletedObjects, object) deletedObjects = append(deletedObjects, object)

View File

@ -28,6 +28,7 @@ import (
"time" "time"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/minio/minio/pkg/errors"
) )
const ( const (
@ -65,13 +66,13 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
// Attempt to successfully load notification config. // Attempt to successfully load notification config.
nConfig, err := loadNotificationConfig(bucket, objAPI) nConfig, err := loadNotificationConfig(bucket, objAPI)
if err != nil && errorCause(err) != errNoSuchNotifications { if err != nil && errors.Cause(err) != errNoSuchNotifications {
errorIf(err, "Unable to read notification configuration.") errorIf(err, "Unable to read notification configuration.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return return
} }
// For no notifications we write a dummy XML. // For no notifications we write a dummy XML.
if errorCause(err) == errNoSuchNotifications { if errors.Cause(err) == errNoSuchNotifications {
// Complies with the s3 behavior in this regard. // Complies with the s3 behavior in this regard.
nConfig = &notificationConfig{} nConfig = &notificationConfig{}
} }

View File

@ -24,6 +24,7 @@ import (
"sync" "sync"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
) )
@ -88,7 +89,7 @@ func loadAllBucketPolicies(objAPI ObjectLayer) (policies map[string]policy.Bucke
buckets, err := objAPI.ListBuckets() buckets, err := objAPI.ListBuckets()
if err != nil { if err != nil {
errorIf(err, "Unable to list buckets.") errorIf(err, "Unable to list buckets.")
return nil, errorCause(err) return nil, errors.Cause(err)
} }
policies = make(map[string]policy.BucketAccessPolicy) policies = make(map[string]policy.BucketAccessPolicy)
@ -99,7 +100,7 @@ func loadAllBucketPolicies(objAPI ObjectLayer) (policies map[string]policy.Bucke
if pErr != nil { if pErr != nil {
// net.Dial fails for rpc client or any // net.Dial fails for rpc client or any
// other unexpected errors during net.Dial. // other unexpected errors during net.Dial.
if !isErrIgnored(pErr, errDiskNotFound) { if !errors.IsErrIgnored(pErr, errDiskNotFound) {
if !isErrBucketPolicyNotFound(pErr) { if !isErrBucketPolicyNotFound(pErr) {
pErrs = append(pErrs, pErr) pErrs = append(pErrs, pErr)
} }
@ -162,7 +163,7 @@ func readBucketPolicyJSON(bucket string, objAPI ObjectLayer) (bucketPolicyReader
return nil, BucketPolicyNotFound{Bucket: bucket} return nil, BucketPolicyNotFound{Bucket: bucket}
} }
errorIf(err, "Unable to load policy for the bucket %s.", bucket) errorIf(err, "Unable to load policy for the bucket %s.", bucket)
return nil, errorCause(err) return nil, errors.Cause(err)
} }
return &buffer, nil return &buffer, nil
@ -199,7 +200,7 @@ func removeBucketPolicy(bucket string, objAPI ObjectLayer) error {
err := objAPI.DeleteObject(minioMetaBucket, policyPath) err := objAPI.DeleteObject(minioMetaBucket, policyPath)
if err != nil { if err != nil {
errorIf(err, "Unable to remove bucket-policy on bucket %s.", bucket) errorIf(err, "Unable to remove bucket-policy on bucket %s.", bucket)
err = errorCause(err) err = errors.Cause(err)
if _, ok := err.(ObjectNotFound); ok { if _, ok := err.(ObjectNotFound); ok {
return BucketPolicyNotFound{Bucket: bucket} return BucketPolicyNotFound{Bucket: bucket}
} }
@ -226,12 +227,12 @@ func writeBucketPolicy(bucket string, objAPI ObjectLayer, bpy policy.BucketAcces
hashReader, err := hash.NewReader(bytes.NewReader(buf), int64(len(buf)), "", getSHA256Hash(buf)) hashReader, err := hash.NewReader(bytes.NewReader(buf), int64(len(buf)), "", getSHA256Hash(buf))
if err != nil { if err != nil {
errorIf(err, "Unable to set policy for the bucket %s", bucket) errorIf(err, "Unable to set policy for the bucket %s", bucket)
return errorCause(err) return errors.Cause(err)
} }
if _, err = objAPI.PutObject(minioMetaBucket, policyPath, hashReader, nil); err != nil { if _, err = objAPI.PutObject(minioMetaBucket, policyPath, hashReader, nil); err != nil {
errorIf(err, "Unable to set policy for the bucket %s", bucket) errorIf(err, "Unable to set policy for the bucket %s", bucket)
return errorCause(err) return errors.Cause(err)
} }
return nil return nil
} }

View File

@ -16,11 +16,13 @@
package cmd package cmd
import "go/build"
// DO NOT EDIT THIS FILE DIRECTLY. These are build-time constants // DO NOT EDIT THIS FILE DIRECTLY. These are build-time constants
// set through buildscripts/gen-ldflags.go. // set through buildscripts/gen-ldflags.go.
var ( var (
// GOPATH - GOPATH value at the time of build. // GOPATH - GOPATH value at the time of build.
GOPATH = "" GOPATH = build.Default.GOPATH
// Go get development tag. // Go get development tag.
goGetTag = "DEVELOPMENT.GOGET" goGetTag = "DEVELOPMENT.GOGET"

View File

@ -19,6 +19,8 @@ package cmd
import ( import (
"hash" "hash"
"io" "io"
"github.com/minio/minio/pkg/errors"
) )
// CreateFile creates a new bitrot encoded file spread over all available disks. CreateFile will create // CreateFile creates a new bitrot encoded file spread over all available disks. CreateFile will create
@ -26,14 +28,14 @@ import (
// be used to protect the erasure encoded file. // be used to protect the erasure encoded file.
func (s *ErasureStorage) CreateFile(src io.Reader, volume, path string, buffer []byte, algorithm BitrotAlgorithm, writeQuorum int) (f ErasureFileInfo, err error) { func (s *ErasureStorage) CreateFile(src io.Reader, volume, path string, buffer []byte, algorithm BitrotAlgorithm, writeQuorum int) (f ErasureFileInfo, err error) {
if !algorithm.Available() { if !algorithm.Available() {
return f, traceError(errBitrotHashAlgoInvalid) return f, errors.Trace(errBitrotHashAlgoInvalid)
} }
f.Checksums = make([][]byte, len(s.disks)) f.Checksums = make([][]byte, len(s.disks))
hashers := make([]hash.Hash, len(s.disks)) hashers := make([]hash.Hash, len(s.disks))
for i := range hashers { for i := range hashers {
hashers[i] = algorithm.New() hashers[i] = algorithm.New()
} }
errChans, errors := make([]chan error, len(s.disks)), make([]error, len(s.disks)) errChans, errs := make([]chan error, len(s.disks)), make([]error, len(s.disks))
for i := range errChans { for i := range errChans {
errChans[i] = make(chan error, 1) // create buffered channel to let finished go-routines die early errChans[i] = make(chan error, 1) // create buffered channel to let finished go-routines die early
} }
@ -53,19 +55,19 @@ func (s *ErasureStorage) CreateFile(src io.Reader, volume, path string, buffer [
return f, err return f, err
} }
} else { } else {
return f, traceError(err) return f, errors.Trace(err)
} }
for i := range errChans { // span workers for i := range errChans { // span workers
go erasureAppendFile(s.disks[i], volume, path, hashers[i], blocks[i], errChans[i]) go erasureAppendFile(s.disks[i], volume, path, hashers[i], blocks[i], errChans[i])
} }
for i := range errChans { // what until all workers are finished for i := range errChans { // what until all workers are finished
errors[i] = <-errChans[i] errs[i] = <-errChans[i]
} }
if err = reduceWriteQuorumErrs(errors, objectOpIgnoredErrs, writeQuorum); err != nil { if err = reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, writeQuorum); err != nil {
return f, err return f, err
} }
s.disks = evalDisks(s.disks, errors) s.disks = evalDisks(s.disks, errs)
f.Size += int64(n) f.Size += int64(n)
} }
@ -83,7 +85,7 @@ func (s *ErasureStorage) CreateFile(src io.Reader, volume, path string, buffer [
// the hash of the written data. It sends the write error (or nil) over the error channel. // the hash of the written data. It sends the write error (or nil) over the error channel.
func erasureAppendFile(disk StorageAPI, volume, path string, hash hash.Hash, buf []byte, errChan chan<- error) { func erasureAppendFile(disk StorageAPI, volume, path string, hash hash.Hash, buf []byte, errChan chan<- error) {
if disk == OfflineDisk { if disk == OfflineDisk {
errChan <- traceError(errDiskNotFound) errChan <- errors.Trace(errDiskNotFound)
return return
} }
err := disk.AppendFile(volume, path, buf) err := disk.AppendFile(volume, path, buf)

View File

@ -20,6 +20,8 @@ import (
"fmt" "fmt"
"hash" "hash"
"strings" "strings"
"github.com/minio/minio/pkg/errors"
) )
// HealFile tries to reconstruct an erasure-coded file spread over all // HealFile tries to reconstruct an erasure-coded file spread over all
@ -48,7 +50,7 @@ func (s ErasureStorage) HealFile(staleDisks []StorageAPI, volume, path string, b
f ErasureFileInfo, err error) { f ErasureFileInfo, err error) {
if !alg.Available() { if !alg.Available() {
return f, traceError(errBitrotHashAlgoInvalid) return f, errors.Trace(errBitrotHashAlgoInvalid)
} }
// Initialization // Initialization
@ -144,7 +146,7 @@ func (s ErasureStorage) HealFile(staleDisks []StorageAPI, volume, path string, b
// If all disks had write errors we quit. // If all disks had write errors we quit.
if !writeSucceeded { if !writeSucceeded {
// build error from all write errors // build error from all write errors
return f, traceError(joinWriteErrors(writeErrors)) return f, errors.Trace(joinWriteErrors(writeErrors))
} }
} }

View File

@ -18,6 +18,8 @@ package cmd
import ( import (
"io" "io"
"github.com/minio/minio/pkg/errors"
) )
// ReadFile reads as much data as requested from the file under the given volume and path and writes the data to the provided writer. // ReadFile reads as much data as requested from the file under the given volume and path and writes the data to the provided writer.
@ -25,13 +27,13 @@ import (
// up to the given length. If parts of the file are corrupted ReadFile tries to reconstruct the data. // up to the given length. If parts of the file are corrupted ReadFile tries to reconstruct the data.
func (s ErasureStorage) ReadFile(writer io.Writer, volume, path string, offset, length int64, totalLength int64, checksums [][]byte, algorithm BitrotAlgorithm, blocksize int64) (f ErasureFileInfo, err error) { func (s ErasureStorage) ReadFile(writer io.Writer, volume, path string, offset, length int64, totalLength int64, checksums [][]byte, algorithm BitrotAlgorithm, blocksize int64) (f ErasureFileInfo, err error) {
if offset < 0 || length < 0 { if offset < 0 || length < 0 {
return f, traceError(errUnexpected) return f, errors.Trace(errUnexpected)
} }
if offset+length > totalLength { if offset+length > totalLength {
return f, traceError(errUnexpected) return f, errors.Trace(errUnexpected)
} }
if !algorithm.Available() { if !algorithm.Available() {
return f, traceError(errBitrotHashAlgoInvalid) return f, errors.Trace(errBitrotHashAlgoInvalid)
} }
f.Checksums = make([][]byte, len(s.disks)) f.Checksums = make([][]byte, len(s.disks))
@ -66,7 +68,7 @@ func (s ErasureStorage) ReadFile(writer io.Writer, volume, path string, offset,
} }
err = s.readConcurrent(volume, path, blockOffset, blocks, verifiers, errChans) err = s.readConcurrent(volume, path, blockOffset, blocks, verifiers, errChans)
if err != nil { if err != nil {
return f, traceError(errXLReadQuorum) return f, errors.Trace(errXLReadQuorum)
} }
writeLength := blocksize - startOffset writeLength := blocksize - startOffset
@ -150,7 +152,7 @@ func erasureReadBlocksConcurrent(disks []StorageAPI, volume, path string, offset
// It sends the returned error through the error channel. // It sends the returned error through the error channel.
func erasureReadFromFile(disk StorageAPI, volume, path string, offset int64, buffer []byte, verifier *BitrotVerifier, errChan chan<- error) { func erasureReadFromFile(disk StorageAPI, volume, path string, offset int64, buffer []byte, verifier *BitrotVerifier, errChan chan<- error) {
if disk == OfflineDisk { if disk == OfflineDisk {
errChan <- traceError(errDiskNotFound) errChan <- errors.Trace(errDiskNotFound)
return return
} }
_, err := disk.ReadFile(volume, path, offset, buffer, verifier) _, err := disk.ReadFile(volume, path, offset, buffer, verifier)

View File

@ -21,6 +21,7 @@ import (
"io" "io"
"github.com/klauspost/reedsolomon" "github.com/klauspost/reedsolomon"
"github.com/minio/minio/pkg/errors"
) )
// getDataBlockLen - get length of data blocks from encoded blocks. // getDataBlockLen - get length of data blocks from encoded blocks.
@ -38,17 +39,17 @@ func getDataBlockLen(enBlocks [][]byte, dataBlocks int) int {
func writeDataBlocks(dst io.Writer, enBlocks [][]byte, dataBlocks int, offset int64, length int64) (int64, error) { func writeDataBlocks(dst io.Writer, enBlocks [][]byte, dataBlocks int, offset int64, length int64) (int64, error) {
// Offset and out size cannot be negative. // Offset and out size cannot be negative.
if offset < 0 || length < 0 { if offset < 0 || length < 0 {
return 0, traceError(errUnexpected) return 0, errors.Trace(errUnexpected)
} }
// Do we have enough blocks? // Do we have enough blocks?
if len(enBlocks) < dataBlocks { if len(enBlocks) < dataBlocks {
return 0, traceError(reedsolomon.ErrTooFewShards) return 0, errors.Trace(reedsolomon.ErrTooFewShards)
} }
// Do we have enough data? // Do we have enough data?
if int64(getDataBlockLen(enBlocks, dataBlocks)) < length { if int64(getDataBlockLen(enBlocks, dataBlocks)) < length {
return 0, traceError(reedsolomon.ErrShortData) return 0, errors.Trace(reedsolomon.ErrShortData)
} }
// Counter to decrement total left to write. // Counter to decrement total left to write.
@ -76,7 +77,7 @@ func writeDataBlocks(dst io.Writer, enBlocks [][]byte, dataBlocks int, offset in
if write < int64(len(block)) { if write < int64(len(block)) {
n, err := io.Copy(dst, bytes.NewReader(block[:write])) n, err := io.Copy(dst, bytes.NewReader(block[:write]))
if err != nil { if err != nil {
return 0, traceError(err) return 0, errors.Trace(err)
} }
totalWritten += n totalWritten += n
break break
@ -84,7 +85,7 @@ func writeDataBlocks(dst io.Writer, enBlocks [][]byte, dataBlocks int, offset in
// Copy the block. // Copy the block.
n, err := io.Copy(dst, bytes.NewReader(block)) n, err := io.Copy(dst, bytes.NewReader(block))
if err != nil { if err != nil {
return 0, traceError(err) return 0, errors.Trace(err)
} }
// Decrement output size. // Decrement output size.

View File

@ -21,6 +21,7 @@ import (
"hash" "hash"
"github.com/klauspost/reedsolomon" "github.com/klauspost/reedsolomon"
"github.com/minio/minio/pkg/errors"
) )
// OfflineDisk represents an unavailable disk. // OfflineDisk represents an unavailable disk.
@ -46,7 +47,7 @@ type ErasureStorage struct {
func NewErasureStorage(disks []StorageAPI, dataBlocks, parityBlocks int) (s ErasureStorage, err error) { func NewErasureStorage(disks []StorageAPI, dataBlocks, parityBlocks int) (s ErasureStorage, err error) {
erasure, err := reedsolomon.New(dataBlocks, parityBlocks) erasure, err := reedsolomon.New(dataBlocks, parityBlocks)
if err != nil { if err != nil {
return s, traceErrorf("failed to create erasure coding: %v", err) return s, errors.Tracef("failed to create erasure coding: %v", err)
} }
s = ErasureStorage{ s = ErasureStorage{
disks: make([]StorageAPI, len(disks)), disks: make([]StorageAPI, len(disks)),
@ -63,10 +64,10 @@ func NewErasureStorage(disks []StorageAPI, dataBlocks, parityBlocks int) (s Eras
func (s *ErasureStorage) ErasureEncode(data []byte) ([][]byte, error) { func (s *ErasureStorage) ErasureEncode(data []byte) ([][]byte, error) {
encoded, err := s.erasure.Split(data) encoded, err := s.erasure.Split(data)
if err != nil { if err != nil {
return nil, traceErrorf("failed to split data: %v", err) return nil, errors.Tracef("failed to split data: %v", err)
} }
if err = s.erasure.Encode(encoded); err != nil { if err = s.erasure.Encode(encoded); err != nil {
return nil, traceErrorf("failed to encode data: %v", err) return nil, errors.Tracef("failed to encode data: %v", err)
} }
return encoded, nil return encoded, nil
} }
@ -76,7 +77,7 @@ func (s *ErasureStorage) ErasureEncode(data []byte) ([][]byte, error) {
// It returns an error if the decoding failed. // It returns an error if the decoding failed.
func (s *ErasureStorage) ErasureDecodeDataBlocks(data [][]byte) error { func (s *ErasureStorage) ErasureDecodeDataBlocks(data [][]byte) error {
if err := s.erasure.ReconstructData(data); err != nil { if err := s.erasure.ReconstructData(data); err != nil {
return traceErrorf("failed to reconstruct data: %v", err) return errors.Tracef("failed to reconstruct data: %v", err)
} }
return nil return nil
} }
@ -85,7 +86,7 @@ func (s *ErasureStorage) ErasureDecodeDataBlocks(data [][]byte) error {
// It returns an error if the decoding failed. // It returns an error if the decoding failed.
func (s *ErasureStorage) ErasureDecodeDataAndParityBlocks(data [][]byte) error { func (s *ErasureStorage) ErasureDecodeDataAndParityBlocks(data [][]byte) error {
if err := s.erasure.Reconstruct(data); err != nil { if err := s.erasure.Reconstruct(data); err != nil {
return traceErrorf("failed to reconstruct data: %v", err) return errors.Tracef("failed to reconstruct data: %v", err)
} }
return nil return nil
} }

View File

@ -1,158 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
)
// Holds the current directory path. Used for trimming path in traceError()
var rootPath string
// Figure out the rootPath
func initError() {
// Root path is automatically determined from the calling function's source file location.
// Catch the calling function's source file path.
_, file, _, _ := runtime.Caller(1)
// Save the directory alone.
rootPath = filepath.Dir(file)
}
// Represents a stack frame in the stack trace.
type traceInfo struct {
file string // File where error occurred
line int // Line where error occurred
name string // Name of the function where error occurred
}
// Error - error type containing cause and the stack trace.
type Error struct {
e error // Holds the cause error
trace []traceInfo // stack trace
errs []error // Useful for XL to hold errors from all disks
}
// Implement error interface.
func (e Error) Error() string {
return e.e.Error()
}
// Trace - returns stack trace.
func (e Error) Trace() []string {
var traceArr []string
for _, info := range e.trace {
traceArr = append(traceArr, fmt.Sprintf("%s:%d:%s",
info.file, info.line, info.name))
}
return traceArr
}
// NewStorageError - return new Error type.
func traceError(e error, errs ...error) error {
if e == nil {
return nil
}
err := &Error{}
err.e = e
err.errs = errs
stack := make([]uintptr, 40)
length := runtime.Callers(2, stack)
if length > len(stack) {
length = len(stack)
}
stack = stack[:length]
for _, pc := range stack {
pc = pc - 1
fn := runtime.FuncForPC(pc)
file, line := fn.FileLine(pc)
name := fn.Name()
if hasSuffix(name, "ServeHTTP") {
break
}
if hasSuffix(name, "runtime.") {
break
}
file = strings.TrimPrefix(file, rootPath+string(os.PathSeparator))
name = strings.TrimPrefix(name, "github.com/minio/minio/cmd.")
err.trace = append(err.trace, traceInfo{file, line, name})
}
return err
}
// Returns the underlying cause error.
func errorCause(err error) error {
if e, ok := err.(*Error); ok {
err = e.e
}
return err
}
// Returns slice of underlying cause error.
func errorsCause(errs []error) []error {
cerrs := make([]error, len(errs))
for i, err := range errs {
if err == nil {
continue
}
cerrs[i] = errorCause(err)
}
return cerrs
}
// Collection of basic errors.
var baseErrs = []error{
errDiskNotFound,
errFaultyDisk,
errFaultyRemoteDisk,
}
var baseIgnoredErrs = baseErrs
// isErrIgnored returns whether given error is ignored or not.
func isErrIgnored(err error, ignoredErrs ...error) bool {
err = errorCause(err)
for _, ignoredErr := range ignoredErrs {
if ignoredErr == err {
return true
}
}
return false
}
// isErr returns whether given error is exact error.
func isErr(err error, errs ...error) bool {
err = errorCause(err)
for _, exactErr := range errs {
if err == exactErr {
return true
}
}
return false
}
// traceErrorf behaves like fmt.traceErrorf but also traces the returned error.
func traceErrorf(format string, args ...interface{}) error {
return traceError(fmt.Errorf(format, args...))
}

View File

@ -27,6 +27,7 @@ import (
"sync" "sync"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
) )
@ -378,7 +379,7 @@ func loadNotificationConfig(bucket string, objAPI ObjectLayer) (*notificationCon
// 'errNoSuchNotifications'. This is default when no // 'errNoSuchNotifications'. This is default when no
// bucket notifications are found on the bucket. // bucket notifications are found on the bucket.
if isErrObjectNotFound(err) || isErrIncompleteBody(err) { if isErrObjectNotFound(err) || isErrIncompleteBody(err) {
return nil, traceError(errNoSuchNotifications) return nil, errors.Trace(errNoSuchNotifications)
} }
errorIf(err, "Unable to load bucket-notification for bucket %s", bucket) errorIf(err, "Unable to load bucket-notification for bucket %s", bucket)
// Returns error for other errors. // Returns error for other errors.
@ -387,7 +388,7 @@ func loadNotificationConfig(bucket string, objAPI ObjectLayer) (*notificationCon
// if `notifications.xml` is empty we should return NoSuchNotifications. // if `notifications.xml` is empty we should return NoSuchNotifications.
if buffer.Len() == 0 { if buffer.Len() == 0 {
return nil, traceError(errNoSuchNotifications) return nil, errors.Trace(errNoSuchNotifications)
} }
// Unmarshal notification bytes. // Unmarshal notification bytes.
@ -395,7 +396,7 @@ func loadNotificationConfig(bucket string, objAPI ObjectLayer) (*notificationCon
notificationCfg := &notificationConfig{} notificationCfg := &notificationConfig{}
// Unmarshal notification bytes only if we read data. // Unmarshal notification bytes only if we read data.
if err = xml.Unmarshal(notificationConfigBytes, notificationCfg); err != nil { if err = xml.Unmarshal(notificationConfigBytes, notificationCfg); err != nil {
return nil, traceError(err) return nil, errors.Trace(err)
} }
// Return success. // Return success.
@ -429,7 +430,7 @@ func loadListenerConfig(bucket string, objAPI ObjectLayer) ([]listenerConfig, er
// 'errNoSuchNotifications'. This is default when no // 'errNoSuchNotifications'. This is default when no
// bucket listeners are found on the bucket // bucket listeners are found on the bucket
if isErrObjectNotFound(err) || isErrIncompleteBody(err) { if isErrObjectNotFound(err) || isErrIncompleteBody(err) {
return nil, traceError(errNoSuchNotifications) return nil, errors.Trace(errNoSuchNotifications)
} }
errorIf(err, "Unable to load bucket-listeners for bucket %s", bucket) errorIf(err, "Unable to load bucket-listeners for bucket %s", bucket)
// Returns error for other errors. // Returns error for other errors.
@ -438,14 +439,14 @@ func loadListenerConfig(bucket string, objAPI ObjectLayer) ([]listenerConfig, er
// if `listener.json` is empty we should return NoSuchNotifications. // if `listener.json` is empty we should return NoSuchNotifications.
if buffer.Len() == 0 { if buffer.Len() == 0 {
return nil, traceError(errNoSuchNotifications) return nil, errors.Trace(errNoSuchNotifications)
} }
var lCfg []listenerConfig var lCfg []listenerConfig
lConfigBytes := buffer.Bytes() lConfigBytes := buffer.Bytes()
if err = json.Unmarshal(lConfigBytes, &lCfg); err != nil { if err = json.Unmarshal(lConfigBytes, &lCfg); err != nil {
errorIf(err, "Unable to unmarshal listener config from JSON.") errorIf(err, "Unable to unmarshal listener config from JSON.")
return nil, traceError(err) return nil, errors.Trace(err)
} }
// Return success. // Return success.
@ -552,13 +553,13 @@ func removeListenerConfig(bucket string, objAPI ObjectLayer) error {
func loadNotificationAndListenerConfig(bucketName string, objAPI ObjectLayer) (nCfg *notificationConfig, lCfg []listenerConfig, err error) { func loadNotificationAndListenerConfig(bucketName string, objAPI ObjectLayer) (nCfg *notificationConfig, lCfg []listenerConfig, err error) {
// Loads notification config if any. // Loads notification config if any.
nCfg, err = loadNotificationConfig(bucketName, objAPI) nCfg, err = loadNotificationConfig(bucketName, objAPI)
if err != nil && !isErrIgnored(err, errDiskNotFound, errNoSuchNotifications) { if err != nil && !errors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) {
return nil, nil, err return nil, nil, err
} }
// Loads listener config if any. // Loads listener config if any.
lCfg, err = loadListenerConfig(bucketName, objAPI) lCfg, err = loadListenerConfig(bucketName, objAPI)
if err != nil && !isErrIgnored(err, errDiskNotFound, errNoSuchNotifications) { if err != nil && !errors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) {
return nil, nil, err return nil, nil, err
} }
return nCfg, lCfg, nil return nCfg, lCfg, nil

View File

@ -23,6 +23,8 @@ import (
"reflect" "reflect"
"testing" "testing"
"time" "time"
"github.com/minio/minio/pkg/errors"
) )
// Test InitEventNotifier with faulty disks // Test InitEventNotifier with faulty disks
@ -71,7 +73,7 @@ func TestInitEventNotifierFaultyDisks(t *testing.T) {
} }
// Test initEventNotifier() with faulty disks // Test initEventNotifier() with faulty disks
for i := 1; i <= 3; i++ { for i := 1; i <= 3; i++ {
if err := initEventNotifier(xl); errorCause(err) != errFaultyDisk { if err := initEventNotifier(xl); errors.Cause(err) != errFaultyDisk {
t.Fatal("Unexpected error:", err) t.Fatal("Unexpected error:", err)
} }
} }

View File

@ -25,6 +25,7 @@ import (
"reflect" "reflect"
"sync" "sync"
errors2 "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/lock" "github.com/minio/minio/pkg/lock"
) )
@ -123,10 +124,10 @@ func (f *formatConfigV1) CheckFS() error {
// if reading format.json fails with io.EOF. // if reading format.json fails with io.EOF.
func (f *formatConfigV1) LoadFormat(lk *lock.LockedFile) error { func (f *formatConfigV1) LoadFormat(lk *lock.LockedFile) error {
_, err := f.ReadFrom(lk) _, err := f.ReadFrom(lk)
if errorCause(err) == io.EOF { if errors2.Cause(err) == io.EOF {
// No data on disk `format.json` still empty // No data on disk `format.json` still empty
// treat it as unformatted disk. // treat it as unformatted disk.
return traceError(errUnformattedDisk) return errors2.Trace(errUnformattedDisk)
} }
return err return err
} }
@ -136,14 +137,14 @@ func (f *formatConfigV1) WriteTo(lk *lock.LockedFile) (n int64, err error) {
var fbytes []byte var fbytes []byte
fbytes, err = json.Marshal(f) fbytes, err = json.Marshal(f)
if err != nil { if err != nil {
return 0, traceError(err) return 0, errors2.Trace(err)
} }
if err = lk.Truncate(0); err != nil { if err = lk.Truncate(0); err != nil {
return 0, traceError(err) return 0, errors2.Trace(err)
} }
_, err = lk.Write(fbytes) _, err = lk.Write(fbytes)
if err != nil { if err != nil {
return 0, traceError(err) return 0, errors2.Trace(err)
} }
return int64(len(fbytes)), nil return int64(len(fbytes)), nil
} }
@ -152,18 +153,18 @@ func (f *formatConfigV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
var fbytes []byte var fbytes []byte
fi, err := lk.Stat() fi, err := lk.Stat()
if err != nil { if err != nil {
return 0, traceError(err) return 0, errors2.Trace(err)
} }
fbytes, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size())) fbytes, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size()))
if err != nil { if err != nil {
return 0, traceError(err) return 0, errors2.Trace(err)
} }
if len(fbytes) == 0 { if len(fbytes) == 0 {
return 0, traceError(io.EOF) return 0, errors2.Trace(io.EOF)
} }
// Decode `format.json`. // Decode `format.json`.
if err = json.Unmarshal(fbytes, f); err != nil { if err = json.Unmarshal(fbytes, f); err != nil {
return 0, traceError(err) return 0, errors2.Trace(err)
} }
return int64(len(fbytes)), nil return int64(len(fbytes)), nil
} }

View File

@ -23,6 +23,7 @@ import (
"path/filepath" "path/filepath"
"testing" "testing"
errors2 "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/lock" "github.com/minio/minio/pkg/lock"
) )
@ -756,7 +757,7 @@ func TestFSCheckFormatFSErr(t *testing.T) {
t.Errorf("Test %d: Should fail with expected %s, got nil", i+1, testCase.formatCheckErr) t.Errorf("Test %d: Should fail with expected %s, got nil", i+1, testCase.formatCheckErr)
} }
if err != nil && !testCase.shouldPass { if err != nil && !testCase.shouldPass {
if errorCause(err).Error() != testCase.formatCheckErr.Error() { if errors2.Cause(err).Error() != testCase.formatCheckErr.Error() {
t.Errorf("Test %d: Should fail with expected %s, got %s", i+1, testCase.formatCheckErr, err) t.Errorf("Test %d: Should fail with expected %s, got %s", i+1, testCase.formatCheckErr, err)
} }
} }

View File

@ -22,6 +22,8 @@ import (
"os" "os"
pathutil "path" pathutil "path"
"runtime" "runtime"
"github.com/minio/minio/pkg/errors"
) )
// Removes only the file at given path does not remove // Removes only the file at given path does not remove
@ -29,11 +31,11 @@ import (
// windows automatically. // windows automatically.
func fsRemoveFile(filePath string) (err error) { func fsRemoveFile(filePath string) (err error) {
if filePath == "" { if filePath == "" {
return traceError(errInvalidArgument) return errors.Trace(errInvalidArgument)
} }
if err = checkPathLength(filePath); err != nil { if err = checkPathLength(filePath); err != nil {
return traceError(err) return errors.Trace(err)
} }
if err = os.Remove((filePath)); err != nil { if err = os.Remove((filePath)); err != nil {
@ -47,20 +49,20 @@ func fsRemoveFile(filePath string) (err error) {
// long paths for windows automatically. // long paths for windows automatically.
func fsRemoveAll(dirPath string) (err error) { func fsRemoveAll(dirPath string) (err error) {
if dirPath == "" { if dirPath == "" {
return traceError(errInvalidArgument) return errors.Trace(errInvalidArgument)
} }
if err = checkPathLength(dirPath); err != nil { if err = checkPathLength(dirPath); err != nil {
return traceError(err) return errors.Trace(err)
} }
if err = os.RemoveAll(dirPath); err != nil { if err = os.RemoveAll(dirPath); err != nil {
if os.IsPermission(err) { if os.IsPermission(err) {
return traceError(errVolumeAccessDenied) return errors.Trace(errVolumeAccessDenied)
} else if isSysErrNotEmpty(err) { } else if isSysErrNotEmpty(err) {
return traceError(errVolumeNotEmpty) return errors.Trace(errVolumeNotEmpty)
} }
return traceError(err) return errors.Trace(err)
} }
return nil return nil
@ -70,20 +72,20 @@ func fsRemoveAll(dirPath string) (err error) {
// paths for windows automatically. // paths for windows automatically.
func fsRemoveDir(dirPath string) (err error) { func fsRemoveDir(dirPath string) (err error) {
if dirPath == "" { if dirPath == "" {
return traceError(errInvalidArgument) return errors.Trace(errInvalidArgument)
} }
if err = checkPathLength(dirPath); err != nil { if err = checkPathLength(dirPath); err != nil {
return traceError(err) return errors.Trace(err)
} }
if err = os.Remove((dirPath)); err != nil { if err = os.Remove((dirPath)); err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
return traceError(errVolumeNotFound) return errors.Trace(errVolumeNotFound)
} else if isSysErrNotEmpty(err) { } else if isSysErrNotEmpty(err) {
return traceError(errVolumeNotEmpty) return errors.Trace(errVolumeNotEmpty)
} }
return traceError(err) return errors.Trace(err)
} }
return nil return nil
@ -93,15 +95,15 @@ func fsRemoveDir(dirPath string) (err error) {
// if it doesn't exist. // if it doesn't exist.
func fsMkdirAll(dirPath string) (err error) { func fsMkdirAll(dirPath string) (err error) {
if dirPath == "" { if dirPath == "" {
return traceError(errInvalidArgument) return errors.Trace(errInvalidArgument)
} }
if err = checkPathLength(dirPath); err != nil { if err = checkPathLength(dirPath); err != nil {
return traceError(err) return errors.Trace(err)
} }
if err = os.MkdirAll(dirPath, 0777); err != nil { if err = os.MkdirAll(dirPath, 0777); err != nil {
return traceError(err) return errors.Trace(err)
} }
return nil return nil
@ -113,27 +115,27 @@ func fsMkdirAll(dirPath string) (err error) {
// are handled automatically. // are handled automatically.
func fsMkdir(dirPath string) (err error) { func fsMkdir(dirPath string) (err error) {
if dirPath == "" { if dirPath == "" {
return traceError(errInvalidArgument) return errors.Trace(errInvalidArgument)
} }
if err = checkPathLength(dirPath); err != nil { if err = checkPathLength(dirPath); err != nil {
return traceError(err) return errors.Trace(err)
} }
if err = os.Mkdir((dirPath), 0777); err != nil { if err = os.Mkdir((dirPath), 0777); err != nil {
if os.IsExist(err) { if os.IsExist(err) {
return traceError(errVolumeExists) return errors.Trace(errVolumeExists)
} else if os.IsPermission(err) { } else if os.IsPermission(err) {
return traceError(errDiskAccessDenied) return errors.Trace(errDiskAccessDenied)
} else if isSysErrNotDir(err) { } else if isSysErrNotDir(err) {
// File path cannot be verified since // File path cannot be verified since
// one of the parents is a file. // one of the parents is a file.
return traceError(errDiskAccessDenied) return errors.Trace(errDiskAccessDenied)
} else if isSysErrPathNotFound(err) { } else if isSysErrPathNotFound(err) {
// Add specific case for windows. // Add specific case for windows.
return traceError(errDiskAccessDenied) return errors.Trace(errDiskAccessDenied)
} }
return traceError(err) return errors.Trace(err)
} }
return nil return nil
@ -146,14 +148,14 @@ func fsMkdir(dirPath string) (err error) {
// fsStatFileDir, fsStatFile, fsStatDir. // fsStatFileDir, fsStatFile, fsStatDir.
func fsStat(statLoc string) (os.FileInfo, error) { func fsStat(statLoc string) (os.FileInfo, error) {
if statLoc == "" { if statLoc == "" {
return nil, traceError(errInvalidArgument) return nil, errors.Trace(errInvalidArgument)
} }
if err := checkPathLength(statLoc); err != nil { if err := checkPathLength(statLoc); err != nil {
return nil, traceError(err) return nil, errors.Trace(err)
} }
fi, err := os.Stat((statLoc)) fi, err := os.Stat((statLoc))
if err != nil { if err != nil {
return nil, traceError(err) return nil, errors.Trace(err)
} }
return fi, nil return fi, nil
@ -163,17 +165,17 @@ func fsStat(statLoc string) (os.FileInfo, error) {
func fsStatVolume(volume string) (os.FileInfo, error) { func fsStatVolume(volume string) (os.FileInfo, error) {
fi, err := fsStat(volume) fi, err := fsStat(volume)
if err != nil { if err != nil {
err = errorCause(err) err = errors.Cause(err)
if os.IsNotExist(err) { if os.IsNotExist(err) {
return nil, traceError(errVolumeNotFound) return nil, errors.Trace(errVolumeNotFound)
} else if os.IsPermission(err) { } else if os.IsPermission(err) {
return nil, traceError(errVolumeAccessDenied) return nil, errors.Trace(errVolumeAccessDenied)
} }
return nil, traceError(err) return nil, errors.Trace(err)
} }
if !fi.IsDir() { if !fi.IsDir() {
return nil, traceError(errVolumeAccessDenied) return nil, errors.Trace(errVolumeAccessDenied)
} }
return fi, nil return fi, nil
@ -187,18 +189,18 @@ func osErrToFSFileErr(err error) error {
if err == nil { if err == nil {
return nil return nil
} }
err = errorCause(err) err = errors.Cause(err)
if os.IsNotExist(err) { if os.IsNotExist(err) {
return traceError(errFileNotFound) return errors.Trace(errFileNotFound)
} }
if os.IsPermission(err) { if os.IsPermission(err) {
return traceError(errFileAccessDenied) return errors.Trace(errFileAccessDenied)
} }
if isSysErrNotDir(err) { if isSysErrNotDir(err) {
return traceError(errFileAccessDenied) return errors.Trace(errFileAccessDenied)
} }
if isSysErrPathNotFound(err) { if isSysErrPathNotFound(err) {
return traceError(errFileNotFound) return errors.Trace(errFileNotFound)
} }
return err return err
} }
@ -210,7 +212,7 @@ func fsStatDir(statDir string) (os.FileInfo, error) {
return nil, osErrToFSFileErr(err) return nil, osErrToFSFileErr(err)
} }
if !fi.IsDir() { if !fi.IsDir() {
return nil, traceError(errFileAccessDenied) return nil, errors.Trace(errFileAccessDenied)
} }
return fi, nil return fi, nil
} }
@ -222,7 +224,7 @@ func fsStatFile(statFile string) (os.FileInfo, error) {
return nil, osErrToFSFileErr(err) return nil, osErrToFSFileErr(err)
} }
if fi.IsDir() { if fi.IsDir() {
return nil, traceError(errFileAccessDenied) return nil, errors.Trace(errFileAccessDenied)
} }
return fi, nil return fi, nil
} }
@ -231,10 +233,10 @@ func fsStatFile(statFile string) (os.FileInfo, error) {
// a readable stream and the size of the readable stream. // a readable stream and the size of the readable stream.
func fsOpenFile(readPath string, offset int64) (io.ReadCloser, int64, error) { func fsOpenFile(readPath string, offset int64) (io.ReadCloser, int64, error) {
if readPath == "" || offset < 0 { if readPath == "" || offset < 0 {
return nil, 0, traceError(errInvalidArgument) return nil, 0, errors.Trace(errInvalidArgument)
} }
if err := checkPathLength(readPath); err != nil { if err := checkPathLength(readPath); err != nil {
return nil, 0, traceError(err) return nil, 0, errors.Trace(err)
} }
fr, err := os.Open((readPath)) fr, err := os.Open((readPath))
@ -245,19 +247,19 @@ func fsOpenFile(readPath string, offset int64) (io.ReadCloser, int64, error) {
// Stat to get the size of the file at path. // Stat to get the size of the file at path.
st, err := os.Stat((readPath)) st, err := os.Stat((readPath))
if err != nil { if err != nil {
return nil, 0, traceError(err) return nil, 0, errors.Trace(err)
} }
// Verify if its not a regular file, since subsequent Seek is undefined. // Verify if its not a regular file, since subsequent Seek is undefined.
if !st.Mode().IsRegular() { if !st.Mode().IsRegular() {
return nil, 0, traceError(errIsNotRegular) return nil, 0, errors.Trace(errIsNotRegular)
} }
// Seek to the requested offset. // Seek to the requested offset.
if offset > 0 { if offset > 0 {
_, err = fr.Seek(offset, os.SEEK_SET) _, err = fr.Seek(offset, os.SEEK_SET)
if err != nil { if err != nil {
return nil, 0, traceError(err) return nil, 0, errors.Trace(err)
} }
} }
@ -268,19 +270,19 @@ func fsOpenFile(readPath string, offset int64) (io.ReadCloser, int64, error) {
// Creates a file and copies data from incoming reader. Staging buffer is used by io.CopyBuffer. // Creates a file and copies data from incoming reader. Staging buffer is used by io.CopyBuffer.
func fsCreateFile(filePath string, reader io.Reader, buf []byte, fallocSize int64) (int64, error) { func fsCreateFile(filePath string, reader io.Reader, buf []byte, fallocSize int64) (int64, error) {
if filePath == "" || reader == nil { if filePath == "" || reader == nil {
return 0, traceError(errInvalidArgument) return 0, errors.Trace(errInvalidArgument)
} }
if err := checkPathLength(filePath); err != nil { if err := checkPathLength(filePath); err != nil {
return 0, traceError(err) return 0, errors.Trace(err)
} }
if err := os.MkdirAll(pathutil.Dir(filePath), 0777); err != nil { if err := os.MkdirAll(pathutil.Dir(filePath), 0777); err != nil {
return 0, traceError(err) return 0, errors.Trace(err)
} }
if err := checkDiskFree(pathutil.Dir(filePath), fallocSize); err != nil { if err := checkDiskFree(pathutil.Dir(filePath), fallocSize); err != nil {
return 0, traceError(err) return 0, errors.Trace(err)
} }
writer, err := os.OpenFile((filePath), os.O_CREATE|os.O_WRONLY, 0666) writer, err := os.OpenFile((filePath), os.O_CREATE|os.O_WRONLY, 0666)
@ -292,7 +294,7 @@ func fsCreateFile(filePath string, reader io.Reader, buf []byte, fallocSize int6
// Fallocate only if the size is final object is known. // Fallocate only if the size is final object is known.
if fallocSize > 0 { if fallocSize > 0 {
if err = fsFAllocate(int(writer.Fd()), 0, fallocSize); err != nil { if err = fsFAllocate(int(writer.Fd()), 0, fallocSize); err != nil {
return 0, traceError(err) return 0, errors.Trace(err)
} }
} }
@ -300,12 +302,12 @@ func fsCreateFile(filePath string, reader io.Reader, buf []byte, fallocSize int6
if buf != nil { if buf != nil {
bytesWritten, err = io.CopyBuffer(writer, reader, buf) bytesWritten, err = io.CopyBuffer(writer, reader, buf)
if err != nil { if err != nil {
return 0, traceError(err) return 0, errors.Trace(err)
} }
} else { } else {
bytesWritten, err = io.Copy(writer, reader) bytesWritten, err = io.Copy(writer, reader)
if err != nil { if err != nil {
return 0, traceError(err) return 0, errors.Trace(err)
} }
} }
return bytesWritten, nil return bytesWritten, nil
@ -314,26 +316,26 @@ func fsCreateFile(filePath string, reader io.Reader, buf []byte, fallocSize int6
// Removes uploadID at destination path. // Removes uploadID at destination path.
func fsRemoveUploadIDPath(basePath, uploadIDPath string) error { func fsRemoveUploadIDPath(basePath, uploadIDPath string) error {
if basePath == "" || uploadIDPath == "" { if basePath == "" || uploadIDPath == "" {
return traceError(errInvalidArgument) return errors.Trace(errInvalidArgument)
} }
if err := checkPathLength(basePath); err != nil { if err := checkPathLength(basePath); err != nil {
return traceError(err) return errors.Trace(err)
} }
if err := checkPathLength(uploadIDPath); err != nil { if err := checkPathLength(uploadIDPath); err != nil {
return traceError(err) return errors.Trace(err)
} }
// List all the entries in uploadID. // List all the entries in uploadID.
entries, err := readDir(uploadIDPath) entries, err := readDir(uploadIDPath)
if err != nil && err != errFileNotFound { if err != nil && err != errFileNotFound {
return traceError(err) return errors.Trace(err)
} }
// Delete all the entries obtained from previous readdir. // Delete all the entries obtained from previous readdir.
for _, entryPath := range entries { for _, entryPath := range entries {
err = fsDeleteFile(basePath, pathJoin(uploadIDPath, entryPath)) err = fsDeleteFile(basePath, pathJoin(uploadIDPath, entryPath))
if err != nil && err != errFileNotFound { if err != nil && err != errFileNotFound {
return traceError(err) return errors.Trace(err)
} }
} }
@ -367,23 +369,23 @@ func fsFAllocate(fd int, offset int64, len int64) (err error) {
// missing parents if they don't exist. // missing parents if they don't exist.
func fsRenameFile(sourcePath, destPath string) error { func fsRenameFile(sourcePath, destPath string) error {
if err := checkPathLength(sourcePath); err != nil { if err := checkPathLength(sourcePath); err != nil {
return traceError(err) return errors.Trace(err)
} }
if err := checkPathLength(destPath); err != nil { if err := checkPathLength(destPath); err != nil {
return traceError(err) return errors.Trace(err)
} }
// Verify if source path exists. // Verify if source path exists.
if _, err := os.Stat((sourcePath)); err != nil { if _, err := os.Stat((sourcePath)); err != nil {
return osErrToFSFileErr(err) return osErrToFSFileErr(err)
} }
if err := os.MkdirAll(pathutil.Dir(destPath), 0777); err != nil { if err := os.MkdirAll(pathutil.Dir(destPath), 0777); err != nil {
return traceError(err) return errors.Trace(err)
} }
if err := os.Rename((sourcePath), (destPath)); err != nil { if err := os.Rename((sourcePath), (destPath)); err != nil {
if isSysErrCrossDevice(err) { if isSysErrCrossDevice(err) {
return traceError(fmt.Errorf("%s (%s)->(%s)", errCrossDeviceLink, sourcePath, destPath)) return errors.Trace(fmt.Errorf("%s (%s)->(%s)", errCrossDeviceLink, sourcePath, destPath))
} }
return traceError(err) return errors.Trace(err)
} }
return nil return nil
} }
@ -391,11 +393,11 @@ func fsRenameFile(sourcePath, destPath string) error {
// fsDeleteFile is a wrapper for deleteFile(), after checking the path length. // fsDeleteFile is a wrapper for deleteFile(), after checking the path length.
func fsDeleteFile(basePath, deletePath string) error { func fsDeleteFile(basePath, deletePath string) error {
if err := checkPathLength(basePath); err != nil { if err := checkPathLength(basePath); err != nil {
return traceError(err) return errors.Trace(err)
} }
if err := checkPathLength(deletePath); err != nil { if err := checkPathLength(deletePath); err != nil {
return traceError(err) return errors.Trace(err)
} }
return deleteFile(basePath, deletePath) return deleteFile(basePath, deletePath)

View File

@ -24,6 +24,7 @@ import (
"path" "path"
"testing" "testing"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/lock" "github.com/minio/minio/pkg/lock"
) )
@ -36,11 +37,11 @@ func TestFSMkdirAll(t *testing.T) {
} }
defer os.RemoveAll(path) defer os.RemoveAll(path)
if err = fsMkdirAll(""); errorCause(err) != errInvalidArgument { if err = fsMkdirAll(""); errors.Cause(err) != errInvalidArgument {
t.Fatal("Unexpected error", err) t.Fatal("Unexpected error", err)
} }
if err = fsMkdirAll(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errorCause(err) != errFileNameTooLong { if err = fsMkdirAll(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errors.Cause(err) != errFileNameTooLong {
t.Fatal("Unexpected error", err) t.Fatal("Unexpected error", err)
} }
@ -63,13 +64,13 @@ func TestFSRenameFile(t *testing.T) {
if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); err != nil { if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); errorCause(err) != errFileNotFound { if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); errors.Cause(err) != errFileNotFound {
t.Fatal(err) t.Fatal(err)
} }
if err = fsRenameFile(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"), pathJoin(path, "testvolume2")); errorCause(err) != errFileNameTooLong { if err = fsRenameFile(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"), pathJoin(path, "testvolume2")); errors.Cause(err) != errFileNameTooLong {
t.Fatal("Unexpected error", err) t.Fatal("Unexpected error", err)
} }
if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errorCause(err) != errFileNameTooLong { if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errors.Cause(err) != errFileNameTooLong {
t.Fatal("Unexpected error", err) t.Fatal("Unexpected error", err)
} }
} }
@ -84,11 +85,11 @@ func TestFSStats(t *testing.T) {
// Setup test environment. // Setup test environment.
if err = fsMkdir(""); errorCause(err) != errInvalidArgument { if err = fsMkdir(""); errors.Cause(err) != errInvalidArgument {
t.Fatal("Unexpected error", err) t.Fatal("Unexpected error", err)
} }
if err = fsMkdir(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errorCause(err) != errFileNameTooLong { if err = fsMkdir(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errors.Cause(err) != errFileNameTooLong {
t.Fatal("Unexpected error", err) t.Fatal("Unexpected error", err)
} }
@ -103,7 +104,7 @@ func TestFSStats(t *testing.T) {
// Seek back. // Seek back.
reader.Seek(0, 0) reader.Seek(0, 0)
if err = fsMkdir(pathJoin(path, "success-vol", "success-file")); errorCause(err) != errVolumeExists { if err = fsMkdir(pathJoin(path, "success-vol", "success-file")); errors.Cause(err) != errVolumeExists {
t.Fatal("Unexpected error", err) t.Fatal("Unexpected error", err)
} }
@ -191,11 +192,11 @@ func TestFSStats(t *testing.T) {
for i, testCase := range testCases { for i, testCase := range testCases {
if testCase.srcPath != "" { if testCase.srcPath != "" {
if _, err := fsStatFile(pathJoin(testCase.srcFSPath, testCase.srcVol, if _, err := fsStatFile(pathJoin(testCase.srcFSPath, testCase.srcVol,
testCase.srcPath)); errorCause(err) != testCase.expectedErr { testCase.srcPath)); errors.Cause(err) != testCase.expectedErr {
t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
} }
} else { } else {
if _, err := fsStatVolume(pathJoin(testCase.srcFSPath, testCase.srcVol)); errorCause(err) != testCase.expectedErr { if _, err := fsStatVolume(pathJoin(testCase.srcFSPath, testCase.srcVol)); errors.Cause(err) != testCase.expectedErr {
t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
} }
} }
@ -214,11 +215,11 @@ func TestFSCreateAndOpen(t *testing.T) {
t.Fatalf("Unable to create directory, %s", err) t.Fatalf("Unable to create directory, %s", err)
} }
if _, err = fsCreateFile("", nil, nil, 0); errorCause(err) != errInvalidArgument { if _, err = fsCreateFile("", nil, nil, 0); errors.Cause(err) != errInvalidArgument {
t.Fatal("Unexpected error", err) t.Fatal("Unexpected error", err)
} }
if _, _, err = fsOpenFile("", -1); errorCause(err) != errInvalidArgument { if _, _, err = fsOpenFile("", -1); errors.Cause(err) != errInvalidArgument {
t.Fatal("Unexpected error", err) t.Fatal("Unexpected error", err)
} }
@ -252,17 +253,17 @@ func TestFSCreateAndOpen(t *testing.T) {
for i, testCase := range testCases { for i, testCase := range testCases {
_, err = fsCreateFile(pathJoin(path, testCase.srcVol, testCase.srcPath), reader, nil, 0) _, err = fsCreateFile(pathJoin(path, testCase.srcVol, testCase.srcPath), reader, nil, 0)
if errorCause(err) != testCase.expectedErr { if errors.Cause(err) != testCase.expectedErr {
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
} }
_, _, err = fsOpenFile(pathJoin(path, testCase.srcVol, testCase.srcPath), 0) _, _, err = fsOpenFile(pathJoin(path, testCase.srcVol, testCase.srcPath), 0)
if errorCause(err) != testCase.expectedErr { if errors.Cause(err) != testCase.expectedErr {
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
} }
} }
// Attempt to open a directory. // Attempt to open a directory.
if _, _, err = fsOpenFile(pathJoin(path), 0); errorCause(err) != errIsNotRegular { if _, _, err = fsOpenFile(pathJoin(path), 0); errors.Cause(err) != errIsNotRegular {
t.Fatal("Unexpected error", err) t.Fatal("Unexpected error", err)
} }
} }
@ -364,7 +365,7 @@ func TestFSDeletes(t *testing.T) {
} }
for i, testCase := range testCases { for i, testCase := range testCases {
if err = fsDeleteFile(testCase.basePath, pathJoin(testCase.basePath, testCase.srcVol, testCase.srcPath)); errorCause(err) != testCase.expectedErr { if err = fsDeleteFile(testCase.basePath, pathJoin(testCase.basePath, testCase.srcVol, testCase.srcPath)); errors.Cause(err) != testCase.expectedErr {
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
} }
} }
@ -498,11 +499,11 @@ func TestFSRemoves(t *testing.T) {
for i, testCase := range testCases { for i, testCase := range testCases {
if testCase.srcPath != "" { if testCase.srcPath != "" {
if err = fsRemoveFile(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); errorCause(err) != testCase.expectedErr { if err = fsRemoveFile(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); errors.Cause(err) != testCase.expectedErr {
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
} }
} else { } else {
if err = fsRemoveDir(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); errorCause(err) != testCase.expectedErr { if err = fsRemoveDir(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); errors.Cause(err) != testCase.expectedErr {
t.Error(err) t.Error(err)
} }
} }
@ -512,11 +513,11 @@ func TestFSRemoves(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if err = fsRemoveAll(""); errorCause(err) != errInvalidArgument { if err = fsRemoveAll(""); errors.Cause(err) != errInvalidArgument {
t.Fatal(err) t.Fatal(err)
} }
if err = fsRemoveAll("my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); errorCause(err) != errFileNameTooLong { if err = fsRemoveAll("my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); errors.Cause(err) != errFileNameTooLong {
t.Fatal(err) t.Fatal(err)
} }
} }

View File

@ -26,6 +26,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/lock" "github.com/minio/minio/pkg/lock"
"github.com/minio/minio/pkg/mimedb" "github.com/minio/minio/pkg/mimedb"
"github.com/tidwall/gjson" "github.com/tidwall/gjson"
@ -166,15 +167,15 @@ func (m *fsMetaV1) WriteTo(lk *lock.LockedFile) (n int64, err error) {
var metadataBytes []byte var metadataBytes []byte
metadataBytes, err = json.Marshal(m) metadataBytes, err = json.Marshal(m)
if err != nil { if err != nil {
return 0, traceError(err) return 0, errors.Trace(err)
} }
if err = lk.Truncate(0); err != nil { if err = lk.Truncate(0); err != nil {
return 0, traceError(err) return 0, errors.Trace(err)
} }
if _, err = lk.Write(metadataBytes); err != nil { if _, err = lk.Write(metadataBytes); err != nil {
return 0, traceError(err) return 0, errors.Trace(err)
} }
// Success. // Success.
@ -222,16 +223,16 @@ func (m *fsMetaV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
var fsMetaBuf []byte var fsMetaBuf []byte
fi, err := lk.Stat() fi, err := lk.Stat()
if err != nil { if err != nil {
return 0, traceError(err) return 0, errors.Trace(err)
} }
fsMetaBuf, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size())) fsMetaBuf, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size()))
if err != nil { if err != nil {
return 0, traceError(err) return 0, errors.Trace(err)
} }
if len(fsMetaBuf) == 0 { if len(fsMetaBuf) == 0 {
return 0, traceError(io.EOF) return 0, errors.Trace(io.EOF)
} }
// obtain version. // obtain version.
@ -243,7 +244,7 @@ func (m *fsMetaV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
// Verify if the format is valid, return corrupted format // Verify if the format is valid, return corrupted format
// for unrecognized formats. // for unrecognized formats.
if !isFSMetaValid(m.Version, m.Format) { if !isFSMetaValid(m.Version, m.Format) {
return 0, traceError(errCorruptedFormat) return 0, errors.Trace(errCorruptedFormat)
} }
// obtain metadata. // obtain metadata.
@ -278,9 +279,9 @@ func checkLockedValidFormatFS(fsPath string) (*lock.RLockedFile, error) {
if os.IsNotExist(err) { if os.IsNotExist(err) {
// If format.json not found then // If format.json not found then
// its an unformatted disk. // its an unformatted disk.
return nil, traceError(errUnformattedDisk) return nil, errors.Trace(errUnformattedDisk)
} }
return nil, traceError(err) return nil, errors.Trace(err)
} }
var format = &formatConfigV1{} var format = &formatConfigV1{}
@ -296,7 +297,7 @@ func checkLockedValidFormatFS(fsPath string) (*lock.RLockedFile, error) {
} }
// Always return read lock here and should be closed by the caller. // Always return read lock here and should be closed by the caller.
return rlk, traceError(err) return rlk, errors.Trace(err)
} }
// Creates a new format.json if unformatted. // Creates a new format.json if unformatted.
@ -307,7 +308,7 @@ func createFormatFS(fsPath string) error {
// file stored in minioMetaBucket(.minio.sys) directory. // file stored in minioMetaBucket(.minio.sys) directory.
lk, err := lock.TryLockedOpenFile((fsFormatPath), os.O_RDWR|os.O_CREATE, 0600) lk, err := lock.TryLockedOpenFile((fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
if err != nil { if err != nil {
return traceError(err) return errors.Trace(err)
} }
// Close the locked file upon return. // Close the locked file upon return.
defer lk.Close() defer lk.Close()
@ -316,7 +317,7 @@ func createFormatFS(fsPath string) error {
// writes the new format.json // writes the new format.json
var format = &formatConfigV1{} var format = &formatConfigV1{}
err = format.LoadFormat(lk) err = format.LoadFormat(lk)
if errorCause(err) == errUnformattedDisk { if errors.Cause(err) == errUnformattedDisk {
_, err = newFSFormat().WriteTo(lk) _, err = newFSFormat().WriteTo(lk)
return err return err
} }
@ -338,10 +339,10 @@ func initFormatFS(fsPath string) (rlk *lock.RLockedFile, err error) {
// is blocked if attempted in-turn avoiding corruption on // is blocked if attempted in-turn avoiding corruption on
// the backend disk. // the backend disk.
return rlk, nil return rlk, nil
case errorCause(err) == errUnformattedDisk: case errors.Cause(err) == errUnformattedDisk:
if err = createFormatFS(fsPath); err != nil { if err = createFormatFS(fsPath); err != nil {
// Existing write locks detected. // Existing write locks detected.
if errorCause(err) == lock.ErrAlreadyLocked { if errors.Cause(err) == lock.ErrAlreadyLocked {
// Lock already present, sleep and attempt again. // Lock already present, sleep and attempt again.
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
continue continue

View File

@ -25,6 +25,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/lock" "github.com/minio/minio/pkg/lock"
) )
@ -41,7 +42,7 @@ func (fs fsObjects) isMultipartUpload(bucket, prefix string) bool {
uploadsIDPath := pathJoin(fs.fsPath, bucket, prefix, uploadsJSONFile) uploadsIDPath := pathJoin(fs.fsPath, bucket, prefix, uploadsJSONFile)
_, err := fsStatFile(uploadsIDPath) _, err := fsStatFile(uploadsIDPath)
if err != nil { if err != nil {
if errorCause(err) == errFileNotFound { if errors.Cause(err) == errFileNotFound {
return false return false
} }
errorIf(err, "Unable to access uploads.json "+uploadsIDPath) errorIf(err, "Unable to access uploads.json "+uploadsIDPath)
@ -91,13 +92,13 @@ func (fs fsObjects) addUploadID(bucket, object, uploadID string, initiated time.
_, err := uploadIDs.ReadFrom(rwlk) _, err := uploadIDs.ReadFrom(rwlk)
// For all unexpected errors, we return. // For all unexpected errors, we return.
if err != nil && errorCause(err) != io.EOF { if err != nil && errors.Cause(err) != io.EOF {
return err return err
} }
// If we couldn't read anything, we assume a default // If we couldn't read anything, we assume a default
// (empty) upload info. // (empty) upload info.
if errorCause(err) == io.EOF { if errors.Cause(err) == io.EOF {
uploadIDs = newUploadsV1("fs") uploadIDs = newUploadsV1("fs")
} }
@ -117,7 +118,7 @@ func (fs fsObjects) listMultipartUploadIDs(bucketName, objectName, uploadIDMarke
// do not leave a stale uploads.json behind. // do not leave a stale uploads.json behind.
objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, pathJoin(bucketName, objectName)) objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, pathJoin(bucketName, objectName))
if err := objectMPartPathLock.GetRLock(globalListingTimeout); err != nil { if err := objectMPartPathLock.GetRLock(globalListingTimeout); err != nil {
return nil, false, traceError(err) return nil, false, errors.Trace(err)
} }
defer objectMPartPathLock.RUnlock() defer objectMPartPathLock.RUnlock()
@ -127,7 +128,7 @@ func (fs fsObjects) listMultipartUploadIDs(bucketName, objectName, uploadIDMarke
if err == errFileNotFound || err == errFileAccessDenied { if err == errFileNotFound || err == errFileAccessDenied {
return nil, true, nil return nil, true, nil
} }
return nil, false, traceError(err) return nil, false, errors.Trace(err)
} }
defer fs.rwPool.Close(pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadsPath)) defer fs.rwPool.Close(pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadsPath))
@ -235,7 +236,7 @@ func (fs fsObjects) listMultipartUploadsCleanup(bucket, prefix, keyMarker, uploa
// For any walk error return right away. // For any walk error return right away.
if walkResult.err != nil { if walkResult.err != nil {
// File not found or Disk not found is a valid case. // File not found or Disk not found is a valid case.
if isErrIgnored(walkResult.err, fsTreeWalkIgnoredErrs...) { if errors.IsErrIgnored(walkResult.err, fsTreeWalkIgnoredErrs...) {
eof = true eof = true
break break
} }
@ -372,7 +373,7 @@ func (fs fsObjects) newMultipartUpload(bucket string, object string, meta map[st
uploadsPath := pathJoin(bucket, object, uploadsJSONFile) uploadsPath := pathJoin(bucket, object, uploadsJSONFile)
rwlk, err := fs.rwPool.Create(pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadsPath)) rwlk, err := fs.rwPool.Create(pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadsPath))
if err != nil { if err != nil {
return "", toObjectErr(traceError(err), bucket, object) return "", toObjectErr(errors.Trace(err), bucket, object)
} }
defer rwlk.Close() defer rwlk.Close()
@ -380,7 +381,7 @@ func (fs fsObjects) newMultipartUpload(bucket string, object string, meta map[st
fsMetaPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, fsMetaJSONFile) fsMetaPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, fsMetaJSONFile)
metaFile, err := fs.rwPool.Create(fsMetaPath) metaFile, err := fs.rwPool.Create(fsMetaPath)
if err != nil { if err != nil {
return "", toObjectErr(traceError(err), bucket, object) return "", toObjectErr(errors.Trace(err), bucket, object)
} }
defer metaFile.Close() defer metaFile.Close()
@ -490,7 +491,7 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
// Validate input data size and it can never be less than zero. // Validate input data size and it can never be less than zero.
if data.Size() < 0 { if data.Size() < 0 {
return pi, toObjectErr(traceError(errInvalidArgument)) return pi, toObjectErr(errors.Trace(errInvalidArgument))
} }
// Hold the lock so that two parallel complete-multipart-uploads // Hold the lock so that two parallel complete-multipart-uploads
@ -505,9 +506,9 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
uploadsPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object, uploadsJSONFile) uploadsPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object, uploadsJSONFile)
if _, err := fs.rwPool.Open(uploadsPath); err != nil { if _, err := fs.rwPool.Open(uploadsPath); err != nil {
if err == errFileNotFound || err == errFileAccessDenied { if err == errFileNotFound || err == errFileAccessDenied {
return pi, traceError(InvalidUploadID{UploadID: uploadID}) return pi, errors.Trace(InvalidUploadID{UploadID: uploadID})
} }
return pi, toObjectErr(traceError(err), bucket, object) return pi, toObjectErr(errors.Trace(err), bucket, object)
} }
defer fs.rwPool.Close(uploadsPath) defer fs.rwPool.Close(uploadsPath)
@ -518,9 +519,9 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
rwlk, err := fs.rwPool.Write(fsMetaPath) rwlk, err := fs.rwPool.Write(fsMetaPath)
if err != nil { if err != nil {
if err == errFileNotFound || err == errFileAccessDenied { if err == errFileNotFound || err == errFileAccessDenied {
return pi, traceError(InvalidUploadID{UploadID: uploadID}) return pi, errors.Trace(InvalidUploadID{UploadID: uploadID})
} }
return pi, toObjectErr(traceError(err), bucket, object) return pi, toObjectErr(errors.Trace(err), bucket, object)
} }
defer rwlk.Close() defer rwlk.Close()
@ -550,7 +551,7 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
// bytes than specified in request header. // bytes than specified in request header.
if bytesWritten < data.Size() { if bytesWritten < data.Size() {
fsRemoveFile(fsPartPath) fsRemoveFile(fsPartPath)
return pi, traceError(IncompleteBody{}) return pi, errors.Trace(IncompleteBody{})
} }
// Delete temporary part in case of failure. If // Delete temporary part in case of failure. If
@ -618,9 +619,9 @@ func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberM
if err != nil { if err != nil {
if err == errFileNotFound || err == errFileAccessDenied { if err == errFileNotFound || err == errFileAccessDenied {
// On windows oddly this is returned. // On windows oddly this is returned.
return lpi, traceError(InvalidUploadID{UploadID: uploadID}) return lpi, errors.Trace(InvalidUploadID{UploadID: uploadID})
} }
return lpi, toObjectErr(traceError(err), bucket, object) return lpi, toObjectErr(errors.Trace(err), bucket, object)
} }
defer fs.rwPool.Close(fsMetaPath) defer fs.rwPool.Close(fsMetaPath)
@ -695,7 +696,7 @@ func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberM
// do not leave a stale uploads.json behind. // do not leave a stale uploads.json behind.
objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, pathJoin(bucket, object)) objectMPartPathLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, pathJoin(bucket, object))
if err := objectMPartPathLock.GetRLock(globalListingTimeout); err != nil { if err := objectMPartPathLock.GetRLock(globalListingTimeout); err != nil {
return lpi, traceError(err) return lpi, errors.Trace(err)
} }
defer objectMPartPathLock.RUnlock() defer objectMPartPathLock.RUnlock()
@ -720,7 +721,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
// Check if an object is present as one of the parent dir. // Check if an object is present as one of the parent dir.
if fs.parentDirIsObject(bucket, pathutil.Dir(object)) { if fs.parentDirIsObject(bucket, pathutil.Dir(object)) {
return oi, toObjectErr(traceError(errFileAccessDenied), bucket, object) return oi, toObjectErr(errors.Trace(errFileAccessDenied), bucket, object)
} }
if _, err := fs.statBucketDir(bucket); err != nil { if _, err := fs.statBucketDir(bucket); err != nil {
@ -747,7 +748,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
if removeObjectDir { if removeObjectDir {
basePath := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket) basePath := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket)
derr := fsDeleteFile(basePath, pathJoin(basePath, object)) derr := fsDeleteFile(basePath, pathJoin(basePath, object))
if derr = errorCause(derr); derr != nil { if derr = errors.Cause(derr); derr != nil {
// In parallel execution, CompleteMultipartUpload could have deleted temporary // In parallel execution, CompleteMultipartUpload could have deleted temporary
// state files/directory, it is safe to ignore errFileNotFound // state files/directory, it is safe to ignore errFileNotFound
if derr != errFileNotFound { if derr != errFileNotFound {
@ -762,9 +763,9 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
rlk, err := fs.rwPool.Open(fsMetaPathMultipart) rlk, err := fs.rwPool.Open(fsMetaPathMultipart)
if err != nil { if err != nil {
if err == errFileNotFound || err == errFileAccessDenied { if err == errFileNotFound || err == errFileAccessDenied {
return oi, traceError(InvalidUploadID{UploadID: uploadID}) return oi, errors.Trace(InvalidUploadID{UploadID: uploadID})
} }
return oi, toObjectErr(traceError(err), bucket, object) return oi, toObjectErr(errors.Trace(err), bucket, object)
} }
// Disallow any parallel abort or complete multipart operations. // Disallow any parallel abort or complete multipart operations.
@ -772,9 +773,9 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
if err != nil { if err != nil {
fs.rwPool.Close(fsMetaPathMultipart) fs.rwPool.Close(fsMetaPathMultipart)
if err == errFileNotFound || err == errFileAccessDenied { if err == errFileNotFound || err == errFileAccessDenied {
return oi, traceError(InvalidUploadID{UploadID: uploadID}) return oi, errors.Trace(InvalidUploadID{UploadID: uploadID})
} }
return oi, toObjectErr(traceError(err), bucket, object) return oi, toObjectErr(errors.Trace(err), bucket, object)
} }
defer rwlk.Close() defer rwlk.Close()
@ -792,18 +793,18 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
partIdx := fsMeta.ObjectPartIndex(part.PartNumber) partIdx := fsMeta.ObjectPartIndex(part.PartNumber)
if partIdx == -1 { if partIdx == -1 {
fs.rwPool.Close(fsMetaPathMultipart) fs.rwPool.Close(fsMetaPathMultipart)
return oi, traceError(InvalidPart{}) return oi, errors.Trace(InvalidPart{})
} }
if fsMeta.Parts[partIdx].ETag != part.ETag { if fsMeta.Parts[partIdx].ETag != part.ETag {
fs.rwPool.Close(fsMetaPathMultipart) fs.rwPool.Close(fsMetaPathMultipart)
return oi, traceError(InvalidPart{}) return oi, errors.Trace(InvalidPart{})
} }
// All parts except the last part has to be atleast 5MB. // All parts except the last part has to be atleast 5MB.
if (i < len(parts)-1) && !isMinAllowedPartSize(fsMeta.Parts[partIdx].Size) { if (i < len(parts)-1) && !isMinAllowedPartSize(fsMeta.Parts[partIdx].Size) {
fs.rwPool.Close(fsMetaPathMultipart) fs.rwPool.Close(fsMetaPathMultipart)
return oi, traceError(PartTooSmall{ return oi, errors.Trace(PartTooSmall{
PartNumber: part.PartNumber, PartNumber: part.PartNumber,
PartSize: fsMeta.Parts[partIdx].Size, PartSize: fsMeta.Parts[partIdx].Size,
PartETag: part.ETag, PartETag: part.ETag,
@ -821,7 +822,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
// renamed to the main name-space. // renamed to the main name-space.
if (i < len(parts)-1) && partSize != fsMeta.Parts[partIdx].Size { if (i < len(parts)-1) && partSize != fsMeta.Parts[partIdx].Size {
fs.rwPool.Close(fsMetaPathMultipart) fs.rwPool.Close(fsMetaPathMultipart)
return oi, traceError(PartsSizeUnequal{}) return oi, errors.Trace(PartsSizeUnequal{})
} }
} }
@ -831,7 +832,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
metaFile, err := fs.rwPool.Create(fsMetaPath) metaFile, err := fs.rwPool.Create(fsMetaPath)
if err != nil { if err != nil {
fs.rwPool.Close(fsMetaPathMultipart) fs.rwPool.Close(fsMetaPathMultipart)
return oi, toObjectErr(traceError(err), bucket, object) return oi, toObjectErr(errors.Trace(err), bucket, object)
} }
defer metaFile.Close() defer metaFile.Close()
@ -877,9 +878,9 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
if err != nil { if err != nil {
fs.rwPool.Close(fsMetaPathMultipart) fs.rwPool.Close(fsMetaPathMultipart)
if err == errFileNotFound { if err == errFileNotFound {
return oi, traceError(InvalidPart{}) return oi, errors.Trace(InvalidPart{})
} }
return oi, toObjectErr(traceError(err), minioMetaMultipartBucket, partSuffix) return oi, toObjectErr(errors.Trace(err), minioMetaMultipartBucket, partSuffix)
} }
// No need to hold a lock, this is a unique file and will be only written // No need to hold a lock, this is a unique file and will be only written
@ -889,7 +890,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
if err != nil { if err != nil {
reader.Close() reader.Close()
fs.rwPool.Close(fsMetaPathMultipart) fs.rwPool.Close(fsMetaPathMultipart)
return oi, toObjectErr(traceError(err), bucket, object) return oi, toObjectErr(errors.Trace(err), bucket, object)
} }
_, err = io.CopyBuffer(wfile, reader, buf) _, err = io.CopyBuffer(wfile, reader, buf)
@ -897,7 +898,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
wfile.Close() wfile.Close()
reader.Close() reader.Close()
fs.rwPool.Close(fsMetaPathMultipart) fs.rwPool.Close(fsMetaPathMultipart)
return oi, toObjectErr(traceError(err), bucket, object) return oi, toObjectErr(errors.Trace(err), bucket, object)
} }
wfile.Close() wfile.Close()
@ -988,7 +989,7 @@ func (fs fsObjects) AbortMultipartUpload(bucket, object, uploadID string) error
if removeObjectDir { if removeObjectDir {
basePath := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket) basePath := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket)
derr := fsDeleteFile(basePath, pathJoin(basePath, object)) derr := fsDeleteFile(basePath, pathJoin(basePath, object))
if derr = errorCause(derr); derr != nil { if derr = errors.Cause(derr); derr != nil {
// In parallel execution, AbortMultipartUpload could have deleted temporary // In parallel execution, AbortMultipartUpload could have deleted temporary
// state files/directory, it is safe to ignore errFileNotFound // state files/directory, it is safe to ignore errFileNotFound
if derr != errFileNotFound { if derr != errFileNotFound {
@ -1002,9 +1003,9 @@ func (fs fsObjects) AbortMultipartUpload(bucket, object, uploadID string) error
fsMetaPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, fsMetaJSONFile) fsMetaPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, fsMetaJSONFile)
if _, err := fs.rwPool.Open(fsMetaPath); err != nil { if _, err := fs.rwPool.Open(fsMetaPath); err != nil {
if err == errFileNotFound || err == errFileAccessDenied { if err == errFileNotFound || err == errFileAccessDenied {
return traceError(InvalidUploadID{UploadID: uploadID}) return errors.Trace(InvalidUploadID{UploadID: uploadID})
} }
return toObjectErr(traceError(err), bucket, object) return toObjectErr(errors.Trace(err), bucket, object)
} }
uploadsPath := pathJoin(bucket, object, uploadsJSONFile) uploadsPath := pathJoin(bucket, object, uploadsJSONFile)
@ -1012,9 +1013,9 @@ func (fs fsObjects) AbortMultipartUpload(bucket, object, uploadID string) error
if err != nil { if err != nil {
fs.rwPool.Close(fsMetaPath) fs.rwPool.Close(fsMetaPath)
if err == errFileNotFound || err == errFileAccessDenied { if err == errFileNotFound || err == errFileAccessDenied {
return traceError(InvalidUploadID{UploadID: uploadID}) return errors.Trace(InvalidUploadID{UploadID: uploadID})
} }
return toObjectErr(traceError(err), bucket, object) return toObjectErr(errors.Trace(err), bucket, object)
} }
defer rwlk.Close() defer rwlk.Close()

View File

@ -22,6 +22,8 @@ import (
"path/filepath" "path/filepath"
"testing" "testing"
"time" "time"
"github.com/minio/minio/pkg/errors"
) )
func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) { func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) {
@ -56,7 +58,7 @@ func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) {
// Check if upload id was already purged. // Check if upload id was already purged.
if err = obj.AbortMultipartUpload(bucketName, objectName, uploadID); err != nil { if err = obj.AbortMultipartUpload(bucketName, objectName, uploadID); err != nil {
err = errorCause(err) err = errors.Cause(err)
if _, ok := err.(InvalidUploadID); !ok { if _, ok := err.(InvalidUploadID); !ok {
t.Fatal("Unexpected err: ", err) t.Fatal("Unexpected err: ", err)
} }
@ -93,7 +95,7 @@ func TestFSCleanupMultipartUpload(t *testing.T) {
// Check if upload id was already purged. // Check if upload id was already purged.
if err = obj.AbortMultipartUpload(bucketName, objectName, uploadID); err != nil { if err = obj.AbortMultipartUpload(bucketName, objectName, uploadID); err != nil {
err = errorCause(err) err = errors.Cause(err)
if _, ok := err.(InvalidUploadID); !ok { if _, ok := err.(InvalidUploadID); !ok {
t.Fatal("Unexpected err: ", err) t.Fatal("Unexpected err: ", err)
} }
@ -122,7 +124,7 @@ func TestFSWriteUploadJSON(t *testing.T) {
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
_, err = obj.NewMultipartUpload(bucketName, objectName, nil) _, err = obj.NewMultipartUpload(bucketName, objectName, nil)
if err != nil { if err != nil {
if _, ok := errorCause(err).(BucketNotFound); !ok { if _, ok := errors.Cause(err).(BucketNotFound); !ok {
t.Fatal("Unexpected err: ", err) t.Fatal("Unexpected err: ", err)
} }
} }
@ -146,7 +148,7 @@ func TestNewMultipartUploadFaultyDisk(t *testing.T) {
// Test with disk removed. // Test with disk removed.
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
if _, err := fs.NewMultipartUpload(bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}); err != nil { if _, err := fs.NewMultipartUpload(bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}); err != nil {
if !isSameType(errorCause(err), BucketNotFound{}) { if !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
} }
@ -184,7 +186,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
_, err = fs.PutObjectPart(bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), dataLen, md5Hex, sha256sum)) _, err = fs.PutObjectPart(bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), dataLen, md5Hex, sha256sum))
if !isSameType(errorCause(err), BucketNotFound{}) { if !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
} }
@ -220,7 +222,7 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) {
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
if _, err := fs.CompleteMultipartUpload(bucketName, objectName, uploadID, parts); err != nil { if _, err := fs.CompleteMultipartUpload(bucketName, objectName, uploadID, parts); err != nil {
if !isSameType(errorCause(err), BucketNotFound{}) { if !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
} }
@ -323,7 +325,7 @@ func TestListMultipartUploadsFaultyDisk(t *testing.T) {
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
if _, err := fs.ListMultipartUploads(bucketName, objectName, "", "", "", 1000); err != nil { if _, err := fs.ListMultipartUploads(bucketName, objectName, "", "", "", 1000); err != nil {
if !isSameType(errorCause(err), BucketNotFound{}) { if !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
} }

View File

@ -27,6 +27,7 @@ import (
"sort" "sort"
"syscall" "syscall"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/lock" "github.com/minio/minio/pkg/lock"
) )
@ -190,7 +191,7 @@ func (fs fsObjects) StorageInfo() StorageInfo {
func (fs fsObjects) getBucketDir(bucket string) (string, error) { func (fs fsObjects) getBucketDir(bucket string) (string, error) {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return "", traceError(BucketNameInvalid{Bucket: bucket}) return "", errors.Trace(BucketNameInvalid{Bucket: bucket})
} }
bucketDir := pathJoin(fs.fsPath, bucket) bucketDir := pathJoin(fs.fsPath, bucket)
@ -242,12 +243,12 @@ func (fs fsObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
// ListBuckets - list all s3 compatible buckets (directories) at fsPath. // ListBuckets - list all s3 compatible buckets (directories) at fsPath.
func (fs fsObjects) ListBuckets() ([]BucketInfo, error) { func (fs fsObjects) ListBuckets() ([]BucketInfo, error) {
if err := checkPathLength(fs.fsPath); err != nil { if err := checkPathLength(fs.fsPath); err != nil {
return nil, traceError(err) return nil, errors.Trace(err)
} }
var bucketInfos []BucketInfo var bucketInfos []BucketInfo
entries, err := readDir((fs.fsPath)) entries, err := readDir((fs.fsPath))
if err != nil { if err != nil {
return nil, toObjectErr(traceError(errDiskNotFound)) return nil, toObjectErr(errors.Trace(errDiskNotFound))
} }
for _, entry := range entries { for _, entry := range entries {
@ -330,7 +331,7 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
var wlk *lock.LockedFile var wlk *lock.LockedFile
wlk, err = fs.rwPool.Write(fsMetaPath) wlk, err = fs.rwPool.Write(fsMetaPath)
if err != nil { if err != nil {
return oi, toObjectErr(traceError(err), srcBucket, srcObject) return oi, toObjectErr(errors.Trace(err), srcBucket, srcObject)
} }
// This close will allow for locks to be synchronized on `fs.json`. // This close will allow for locks to be synchronized on `fs.json`.
defer wlk.Close() defer wlk.Close()
@ -395,25 +396,25 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64,
// Offset cannot be negative. // Offset cannot be negative.
if offset < 0 { if offset < 0 {
return toObjectErr(traceError(errUnexpected), bucket, object) return toObjectErr(errors.Trace(errUnexpected), bucket, object)
} }
// Writer cannot be nil. // Writer cannot be nil.
if writer == nil { if writer == nil {
return toObjectErr(traceError(errUnexpected), bucket, object) return toObjectErr(errors.Trace(errUnexpected), bucket, object)
} }
// If its a directory request, we return an empty body. // If its a directory request, we return an empty body.
if hasSuffix(object, slashSeparator) { if hasSuffix(object, slashSeparator) {
_, err = writer.Write([]byte("")) _, err = writer.Write([]byte(""))
return toObjectErr(traceError(err), bucket, object) return toObjectErr(errors.Trace(err), bucket, object)
} }
if bucket != minioMetaBucket { if bucket != minioMetaBucket {
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile) fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
_, err = fs.rwPool.Open(fsMetaPath) _, err = fs.rwPool.Open(fsMetaPath)
if err != nil && err != errFileNotFound { if err != nil && err != errFileNotFound {
return toObjectErr(traceError(err), bucket, object) return toObjectErr(errors.Trace(err), bucket, object)
} }
defer fs.rwPool.Close(fsMetaPath) defer fs.rwPool.Close(fsMetaPath)
} }
@ -438,7 +439,7 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64,
// Reply back invalid range if the input offset and length fall out of range. // Reply back invalid range if the input offset and length fall out of range.
if offset > size || offset+length > size { if offset > size || offset+length > size {
return traceError(InvalidRange{offset, length, size}) return errors.Trace(InvalidRange{offset, length, size})
} }
// Allocate a staging buffer. // Allocate a staging buffer.
@ -446,14 +447,14 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64,
_, err = io.CopyBuffer(writer, io.LimitReader(reader, length), buf) _, err = io.CopyBuffer(writer, io.LimitReader(reader, length), buf)
return toObjectErr(traceError(err), bucket, object) return toObjectErr(errors.Trace(err), bucket, object)
} }
// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo. // getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
func (fs fsObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e error) { func (fs fsObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e error) {
fsMeta := fsMetaV1{} fsMeta := fsMetaV1{}
fi, err := fsStatDir(pathJoin(fs.fsPath, bucket, object)) fi, err := fsStatDir(pathJoin(fs.fsPath, bucket, object))
if err != nil && errorCause(err) != errFileAccessDenied { if err != nil && errors.Cause(err) != errFileAccessDenied {
return oi, toObjectErr(err, bucket, object) return oi, toObjectErr(err, bucket, object)
} }
if fi != nil { if fi != nil {
@ -477,7 +478,7 @@ func (fs fsObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e error
// `fs.json` can be empty due to previously failed // `fs.json` can be empty due to previously failed
// PutObject() transaction, if we arrive at such // PutObject() transaction, if we arrive at such
// a situation we just ignore and continue. // a situation we just ignore and continue.
if errorCause(rerr) != io.EOF { if errors.Cause(rerr) != io.EOF {
return oi, toObjectErr(rerr, bucket, object) return oi, toObjectErr(rerr, bucket, object)
} }
} }
@ -485,7 +486,7 @@ func (fs fsObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e error
// Ignore if `fs.json` is not available, this is true for pre-existing data. // Ignore if `fs.json` is not available, this is true for pre-existing data.
if err != nil && err != errFileNotFound { if err != nil && err != errFileNotFound {
return oi, toObjectErr(traceError(err), bucket, object) return oi, toObjectErr(errors.Trace(err), bucket, object)
} }
// Stat the file to get file size. // Stat the file to get file size.
@ -501,14 +502,14 @@ func (fs fsObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e error
func checkBucketAndObjectNamesFS(bucket, object string) error { func checkBucketAndObjectNamesFS(bucket, object string) error {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return traceError(BucketNameInvalid{Bucket: bucket}) return errors.Trace(BucketNameInvalid{Bucket: bucket})
} }
// Verify if object is valid. // Verify if object is valid.
if len(object) == 0 { if len(object) == 0 {
return traceError(ObjectNameInvalid{Bucket: bucket, Object: object}) return errors.Trace(ObjectNameInvalid{Bucket: bucket, Object: object})
} }
if !IsValidObjectPrefix(object) { if !IsValidObjectPrefix(object) {
return traceError(ObjectNameInvalid{Bucket: bucket, Object: object}) return errors.Trace(ObjectNameInvalid{Bucket: bucket, Object: object})
} }
return nil return nil
} }
@ -572,7 +573,7 @@ func (fs fsObjects) PutObject(bucket string, object string, data *hash.Reader, m
if isObjectDir(object, data.Size()) { if isObjectDir(object, data.Size()) {
// Check if an object is present as one of the parent dir. // Check if an object is present as one of the parent dir.
if fs.parentDirIsObject(bucket, path.Dir(object)) { if fs.parentDirIsObject(bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object) return ObjectInfo{}, toObjectErr(errors.Trace(errFileAccessDenied), bucket, object)
} }
if err = fsMkdirAll(pathJoin(fs.fsPath, bucket, object)); err != nil { if err = fsMkdirAll(pathJoin(fs.fsPath, bucket, object)); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
@ -590,12 +591,12 @@ func (fs fsObjects) PutObject(bucket string, object string, data *hash.Reader, m
// Check if an object is present as one of the parent dir. // Check if an object is present as one of the parent dir.
if fs.parentDirIsObject(bucket, path.Dir(object)) { if fs.parentDirIsObject(bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object) return ObjectInfo{}, toObjectErr(errors.Trace(errFileAccessDenied), bucket, object)
} }
// Validate input data size and it can never be less than zero. // Validate input data size and it can never be less than zero.
if data.Size() < 0 { if data.Size() < 0 {
return ObjectInfo{}, traceError(errInvalidArgument) return ObjectInfo{}, errors.Trace(errInvalidArgument)
} }
var wlk *lock.LockedFile var wlk *lock.LockedFile
@ -604,7 +605,7 @@ func (fs fsObjects) PutObject(bucket string, object string, data *hash.Reader, m
fsMetaPath := pathJoin(bucketMetaDir, bucket, object, fsMetaJSONFile) fsMetaPath := pathJoin(bucketMetaDir, bucket, object, fsMetaJSONFile)
wlk, err = fs.rwPool.Create(fsMetaPath) wlk, err = fs.rwPool.Create(fsMetaPath)
if err != nil { if err != nil {
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) return ObjectInfo{}, toObjectErr(errors.Trace(err), bucket, object)
} }
// This close will allow for locks to be synchronized on `fs.json`. // This close will allow for locks to be synchronized on `fs.json`.
defer wlk.Close() defer wlk.Close()
@ -643,7 +644,7 @@ func (fs fsObjects) PutObject(bucket string, object string, data *hash.Reader, m
// bytes than specified in request header. // bytes than specified in request header.
if bytesWritten < data.Size() { if bytesWritten < data.Size() {
fsRemoveFile(fsTmpObjPath) fsRemoveFile(fsTmpObjPath)
return ObjectInfo{}, traceError(IncompleteBody{}) return ObjectInfo{}, errors.Trace(IncompleteBody{})
} }
// Delete the temporary object in the case of a // Delete the temporary object in the case of a
@ -694,7 +695,7 @@ func (fs fsObjects) DeleteObject(bucket, object string) error {
defer rwlk.Close() defer rwlk.Close()
} }
if lerr != nil && lerr != errFileNotFound { if lerr != nil && lerr != errFileNotFound {
return toObjectErr(traceError(lerr), bucket, object) return toObjectErr(errors.Trace(lerr), bucket, object)
} }
} }
@ -706,7 +707,7 @@ func (fs fsObjects) DeleteObject(bucket, object string) error {
if bucket != minioMetaBucket { if bucket != minioMetaBucket {
// Delete the metadata object. // Delete the metadata object.
err := fsDeleteFile(minioMetaBucketDir, fsMetaPath) err := fsDeleteFile(minioMetaBucketDir, fsMetaPath)
if err != nil && errorCause(err) != errFileNotFound { if err != nil && errors.Cause(err) != errFileNotFound {
return toObjectErr(err, bucket, object) return toObjectErr(err, bucket, object)
} }
} }
@ -747,7 +748,7 @@ func (fs fsObjects) getObjectETag(bucket, entry string) (string, error) {
rlk, err := fs.rwPool.Open(fsMetaPath) rlk, err := fs.rwPool.Open(fsMetaPath)
// Ignore if `fs.json` is not available, this is true for pre-existing data. // Ignore if `fs.json` is not available, this is true for pre-existing data.
if err != nil && err != errFileNotFound { if err != nil && err != errFileNotFound {
return "", toObjectErr(traceError(err), bucket, entry) return "", toObjectErr(errors.Trace(err), bucket, entry)
} }
// If file is not found, we don't need to proceed forward. // If file is not found, we don't need to proceed forward.
@ -761,7 +762,7 @@ func (fs fsObjects) getObjectETag(bucket, entry string) (string, error) {
// Fetch the size of the underlying file. // Fetch the size of the underlying file.
fi, err := rlk.LockedFile.Stat() fi, err := rlk.LockedFile.Stat()
if err != nil { if err != nil {
return "", toObjectErr(traceError(err), bucket, entry) return "", toObjectErr(errors.Trace(err), bucket, entry)
} }
// `fs.json` can be empty due to previously failed // `fs.json` can be empty due to previously failed
@ -775,12 +776,12 @@ func (fs fsObjects) getObjectETag(bucket, entry string) (string, error) {
// make sure the underlying offsets don't move. // make sure the underlying offsets don't move.
fsMetaBuf, err := ioutil.ReadAll(io.NewSectionReader(rlk.LockedFile, 0, fi.Size())) fsMetaBuf, err := ioutil.ReadAll(io.NewSectionReader(rlk.LockedFile, 0, fi.Size()))
if err != nil { if err != nil {
return "", traceError(err) return "", errors.Trace(err)
} }
// Check if FS metadata is valid, if not return error. // Check if FS metadata is valid, if not return error.
if !isFSMetaValid(parseFSVersion(fsMetaBuf), parseFSFormat(fsMetaBuf)) { if !isFSMetaValid(parseFSVersion(fsMetaBuf), parseFSFormat(fsMetaBuf)) {
return "", toObjectErr(traceError(errCorruptedFormat), bucket, entry) return "", toObjectErr(errors.Trace(errCorruptedFormat), bucket, entry)
} }
return extractETag(parseFSMetaMap(fsMetaBuf)), nil return extractETag(parseFSMetaMap(fsMetaBuf)), nil
@ -902,7 +903,7 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
// For any walk error return right away. // For any walk error return right away.
if walkResult.err != nil { if walkResult.err != nil {
// File not found is a valid case. // File not found is a valid case.
if errorCause(walkResult.err) == errFileNotFound { if errors.Cause(walkResult.err) == errFileNotFound {
return loi, nil return loi, nil
} }
return loi, toObjectErr(walkResult.err, bucket, prefix) return loi, toObjectErr(walkResult.err, bucket, prefix)
@ -943,25 +944,25 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
// HealObject - no-op for fs. Valid only for XL. // HealObject - no-op for fs. Valid only for XL.
func (fs fsObjects) HealObject(bucket, object string) (int, int, error) { func (fs fsObjects) HealObject(bucket, object string) (int, int, error) {
return 0, 0, traceError(NotImplemented{}) return 0, 0, errors.Trace(NotImplemented{})
} }
// HealBucket - no-op for fs, Valid only for XL. // HealBucket - no-op for fs, Valid only for XL.
func (fs fsObjects) HealBucket(bucket string) error { func (fs fsObjects) HealBucket(bucket string) error {
return traceError(NotImplemented{}) return errors.Trace(NotImplemented{})
} }
// ListObjectsHeal - list all objects to be healed. Valid only for XL // ListObjectsHeal - list all objects to be healed. Valid only for XL
func (fs fsObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { func (fs fsObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
return loi, traceError(NotImplemented{}) return loi, errors.Trace(NotImplemented{})
} }
// ListBucketsHeal - list all buckets to be healed. Valid only for XL // ListBucketsHeal - list all buckets to be healed. Valid only for XL
func (fs fsObjects) ListBucketsHeal() ([]BucketInfo, error) { func (fs fsObjects) ListBucketsHeal() ([]BucketInfo, error) {
return []BucketInfo{}, traceError(NotImplemented{}) return []BucketInfo{}, errors.Trace(NotImplemented{})
} }
func (fs fsObjects) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker, func (fs fsObjects) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) { delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
return lmi, traceError(NotImplemented{}) return lmi, errors.Trace(NotImplemented{})
} }

View File

@ -22,6 +22,8 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
"github.com/minio/minio/pkg/errors"
) )
// Tests for if parent directory is object // Tests for if parent directory is object
@ -178,7 +180,7 @@ func TestFSGetBucketInfo(t *testing.T) {
// Test with inexistant bucket // Test with inexistant bucket
_, err = fs.GetBucketInfo("a") _, err = fs.GetBucketInfo("a")
if !isSameType(errorCause(err), BucketNameInvalid{}) { if !isSameType(errors.Cause(err), BucketNameInvalid{}) {
t.Fatal("BucketNameInvalid error not returned") t.Fatal("BucketNameInvalid error not returned")
} }
@ -186,7 +188,7 @@ func TestFSGetBucketInfo(t *testing.T) {
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
_, err = fs.GetBucketInfo(bucketName) _, err = fs.GetBucketInfo(bucketName)
if !isSameType(errorCause(err), BucketNotFound{}) { if !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("BucketNotFound error not returned") t.Fatal("BucketNotFound error not returned")
} }
} }
@ -209,7 +211,7 @@ func TestFSPutObject(t *testing.T) {
if err == nil { if err == nil {
t.Fatal("Unexpected should fail here, bucket doesn't exist") t.Fatal("Unexpected should fail here, bucket doesn't exist")
} }
if _, ok := errorCause(err).(BucketNotFound); !ok { if _, ok := errors.Cause(err).(BucketNotFound); !ok {
t.Fatalf("Expected error type BucketNotFound, got %#v", err) t.Fatalf("Expected error type BucketNotFound, got %#v", err)
} }
@ -218,7 +220,7 @@ func TestFSPutObject(t *testing.T) {
if err == nil { if err == nil {
t.Fatal("Unexpected should fail here, bucket doesn't exist") t.Fatal("Unexpected should fail here, bucket doesn't exist")
} }
if _, ok := errorCause(err).(BucketNotFound); !ok { if _, ok := errors.Cause(err).(BucketNotFound); !ok {
t.Fatalf("Expected error type BucketNotFound, got %#v", err) t.Fatalf("Expected error type BucketNotFound, got %#v", err)
} }
@ -230,7 +232,7 @@ func TestFSPutObject(t *testing.T) {
if err == nil { if err == nil {
t.Fatal("Unexpected should fail here, backend corruption occurred") t.Fatal("Unexpected should fail here, backend corruption occurred")
} }
if nerr, ok := errorCause(err).(PrefixAccessDenied); !ok { if nerr, ok := errors.Cause(err).(PrefixAccessDenied); !ok {
t.Fatalf("Expected PrefixAccessDenied, got %#v", err) t.Fatalf("Expected PrefixAccessDenied, got %#v", err)
} else { } else {
if nerr.Bucket != "bucket" { if nerr.Bucket != "bucket" {
@ -245,7 +247,7 @@ func TestFSPutObject(t *testing.T) {
if err == nil { if err == nil {
t.Fatal("Unexpected should fail here, backned corruption occurred") t.Fatal("Unexpected should fail here, backned corruption occurred")
} }
if nerr, ok := errorCause(err).(PrefixAccessDenied); !ok { if nerr, ok := errors.Cause(err).(PrefixAccessDenied); !ok {
t.Fatalf("Expected PrefixAccessDenied, got %#v", err) t.Fatalf("Expected PrefixAccessDenied, got %#v", err)
} else { } else {
if nerr.Bucket != "bucket" { if nerr.Bucket != "bucket" {
@ -272,19 +274,19 @@ func TestFSDeleteObject(t *testing.T) {
obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil) obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
// Test with invalid bucket name // Test with invalid bucket name
if err := fs.DeleteObject("fo", objectName); !isSameType(errorCause(err), BucketNameInvalid{}) { if err := fs.DeleteObject("fo", objectName); !isSameType(errors.Cause(err), BucketNameInvalid{}) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
// Test with bucket does not exist // Test with bucket does not exist
if err := fs.DeleteObject("foobucket", "fooobject"); !isSameType(errorCause(err), BucketNotFound{}) { if err := fs.DeleteObject("foobucket", "fooobject"); !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
// Test with invalid object name // Test with invalid object name
if err := fs.DeleteObject(bucketName, "\\"); !isSameType(errorCause(err), ObjectNameInvalid{}) { if err := fs.DeleteObject(bucketName, "\\"); !isSameType(errors.Cause(err), ObjectNameInvalid{}) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
// Test with object does not exist. // Test with object does not exist.
if err := fs.DeleteObject(bucketName, "foooobject"); !isSameType(errorCause(err), ObjectNotFound{}) { if err := fs.DeleteObject(bucketName, "foooobject"); !isSameType(errors.Cause(err), ObjectNotFound{}) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
// Test with valid condition // Test with valid condition
@ -295,7 +297,7 @@ func TestFSDeleteObject(t *testing.T) {
// Delete object should err disk not found. // Delete object should err disk not found.
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
if err := fs.DeleteObject(bucketName, objectName); err != nil { if err := fs.DeleteObject(bucketName, objectName); err != nil {
if !isSameType(errorCause(err), BucketNotFound{}) { if !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
} }
@ -318,11 +320,11 @@ func TestFSDeleteBucket(t *testing.T) {
} }
// Test with an invalid bucket name // Test with an invalid bucket name
if err = fs.DeleteBucket("fo"); !isSameType(errorCause(err), BucketNameInvalid{}) { if err = fs.DeleteBucket("fo"); !isSameType(errors.Cause(err), BucketNameInvalid{}) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
// Test with an inexistant bucket // Test with an inexistant bucket
if err = fs.DeleteBucket("foobucket"); !isSameType(errorCause(err), BucketNotFound{}) { if err = fs.DeleteBucket("foobucket"); !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
// Test with a valid case // Test with a valid case
@ -335,7 +337,7 @@ func TestFSDeleteBucket(t *testing.T) {
// Delete bucket should get error disk not found. // Delete bucket should get error disk not found.
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
if err = fs.DeleteBucket(bucketName); err != nil { if err = fs.DeleteBucket(bucketName); err != nil {
if !isSameType(errorCause(err), BucketNotFound{}) { if !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
} }
@ -378,7 +380,7 @@ func TestFSListBuckets(t *testing.T) {
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
if _, err := fs.ListBuckets(); err != nil { if _, err := fs.ListBuckets(); err != nil {
if errorCause(err) != errDiskNotFound { if errors.Cause(err) != errDiskNotFound {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
} }
@ -386,7 +388,7 @@ func TestFSListBuckets(t *testing.T) {
longPath := fmt.Sprintf("%0256d", 1) longPath := fmt.Sprintf("%0256d", 1)
fs.fsPath = longPath fs.fsPath = longPath
if _, err := fs.ListBuckets(); err != nil { if _, err := fs.ListBuckets(); err != nil {
if errorCause(err) != errFileNameTooLong { if errors.Cause(err) != errFileNameTooLong {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
} }
@ -399,7 +401,7 @@ func TestFSHealObject(t *testing.T) {
obj := initFSObjects(disk, t) obj := initFSObjects(disk, t)
_, _, err := obj.HealObject("bucket", "object") _, _, err := obj.HealObject("bucket", "object")
if err == nil || !isSameType(errorCause(err), NotImplemented{}) { if err == nil || !isSameType(errors.Cause(err), NotImplemented{}) {
t.Fatalf("Heal Object should return NotImplemented error ") t.Fatalf("Heal Object should return NotImplemented error ")
} }
} }
@ -411,7 +413,7 @@ func TestFSListObjectsHeal(t *testing.T) {
obj := initFSObjects(disk, t) obj := initFSObjects(disk, t)
_, err := obj.ListObjectsHeal("bucket", "prefix", "marker", "delimiter", 1000) _, err := obj.ListObjectsHeal("bucket", "prefix", "marker", "delimiter", 1000)
if err == nil || !isSameType(errorCause(err), NotImplemented{}) { if err == nil || !isSameType(errors.Cause(err), NotImplemented{}) {
t.Fatalf("Heal Object should return NotImplemented error ") t.Fatalf("Heal Object should return NotImplemented error ")
} }
} }

View File

@ -28,6 +28,7 @@ import (
"time" "time"
"github.com/Azure/azure-sdk-for-go/storage" "github.com/Azure/azure-sdk-for-go/storage"
"github.com/minio/minio/pkg/errors"
) )
// Copied from github.com/Azure/azure-sdk-for-go/storage/container.go // Copied from github.com/Azure/azure-sdk-for-go/storage/container.go
@ -116,22 +117,22 @@ func (a *azureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo,
blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL() blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL()
url, err := url.Parse(blobURL) url, err := url.Parse(blobURL)
if err != nil { if err != nil {
return bucketInfo, azureToObjectError(traceError(err)) return bucketInfo, azureToObjectError(errors.Trace(err))
} }
url.RawQuery = "restype=container" url.RawQuery = "restype=container"
resp, err := azureAnonRequest(httpHEAD, url.String(), nil) resp, err := azureAnonRequest(httpHEAD, url.String(), nil)
if err != nil { if err != nil {
return bucketInfo, azureToObjectError(traceError(err), bucket) return bucketInfo, azureToObjectError(errors.Trace(err), bucket)
} }
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return bucketInfo, azureToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket)), bucket) return bucketInfo, azureToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket)), bucket)
} }
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified")) t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
if err != nil { if err != nil {
return bucketInfo, traceError(err) return bucketInfo, errors.Trace(err)
} }
bucketInfo = BucketInfo{ bucketInfo = BucketInfo{
@ -155,16 +156,16 @@ func (a *azureObjects) AnonGetObject(bucket, object string, startOffset int64, l
blobURL := a.client.GetContainerReference(bucket).GetBlobReference(object).GetURL() blobURL := a.client.GetContainerReference(bucket).GetBlobReference(object).GetURL()
resp, err := azureAnonRequest(httpGET, blobURL, h) resp, err := azureAnonRequest(httpGET, blobURL, h)
if err != nil { if err != nil {
return azureToObjectError(traceError(err), bucket, object) return azureToObjectError(errors.Trace(err), bucket, object)
} }
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK {
return azureToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object) return azureToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
} }
_, err = io.Copy(writer, resp.Body) _, err = io.Copy(writer, resp.Body)
return traceError(err) return errors.Trace(err)
} }
// AnonGetObjectInfo - Send HEAD request without authentication and convert the // AnonGetObjectInfo - Send HEAD request without authentication and convert the
@ -173,12 +174,12 @@ func (a *azureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectI
blobURL := a.client.GetContainerReference(bucket).GetBlobReference(object).GetURL() blobURL := a.client.GetContainerReference(bucket).GetBlobReference(object).GetURL()
resp, err := azureAnonRequest(httpHEAD, blobURL, nil) resp, err := azureAnonRequest(httpHEAD, blobURL, nil)
if err != nil { if err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, object) return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
} }
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return objInfo, azureToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object) return objInfo, azureToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
} }
var contentLength int64 var contentLength int64
@ -186,13 +187,13 @@ func (a *azureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectI
if contentLengthStr != "" { if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
if err != nil { if err != nil {
return objInfo, azureToObjectError(traceError(errUnexpected), bucket, object) return objInfo, azureToObjectError(errors.Trace(errUnexpected), bucket, object)
} }
} }
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified")) t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
if err != nil { if err != nil {
return objInfo, traceError(err) return objInfo, errors.Trace(err)
} }
objInfo.ModTime = t objInfo.ModTime = t
@ -225,13 +226,13 @@ func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string,
blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL() blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL()
url, err := url.Parse(blobURL) url, err := url.Parse(blobURL)
if err != nil { if err != nil {
return result, azureToObjectError(traceError(err)) return result, azureToObjectError(errors.Trace(err))
} }
url.RawQuery = q.Encode() url.RawQuery = q.Encode()
resp, err := azureAnonRequest(httpGET, url.String(), nil) resp, err := azureAnonRequest(httpGET, url.String(), nil)
if err != nil { if err != nil {
return result, azureToObjectError(traceError(err)) return result, azureToObjectError(errors.Trace(err))
} }
defer resp.Body.Close() defer resp.Body.Close()
@ -239,11 +240,11 @@ func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string,
data, err := ioutil.ReadAll(resp.Body) data, err := ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
return result, azureToObjectError(traceError(err)) return result, azureToObjectError(errors.Trace(err))
} }
err = xml.Unmarshal(data, &listResp) err = xml.Unmarshal(data, &listResp)
if err != nil { if err != nil {
return result, azureToObjectError(traceError(err)) return result, azureToObjectError(errors.Trace(err))
} }
result.IsTruncated = listResp.NextMarker != "" result.IsTruncated = listResp.NextMarker != ""
@ -279,13 +280,13 @@ func (a *azureObjects) AnonListObjectsV2(bucket, prefix, continuationToken, deli
blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL() blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL()
url, err := url.Parse(blobURL) url, err := url.Parse(blobURL)
if err != nil { if err != nil {
return result, azureToObjectError(traceError(err)) return result, azureToObjectError(errors.Trace(err))
} }
url.RawQuery = q.Encode() url.RawQuery = q.Encode()
resp, err := http.Get(url.String()) resp, err := http.Get(url.String())
if err != nil { if err != nil {
return result, azureToObjectError(traceError(err)) return result, azureToObjectError(errors.Trace(err))
} }
defer resp.Body.Close() defer resp.Body.Close()
@ -293,11 +294,11 @@ func (a *azureObjects) AnonListObjectsV2(bucket, prefix, continuationToken, deli
data, err := ioutil.ReadAll(resp.Body) data, err := ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
return result, azureToObjectError(traceError(err)) return result, azureToObjectError(errors.Trace(err))
} }
err = xml.Unmarshal(data, &listResp) err = xml.Unmarshal(data, &listResp)
if err != nil { if err != nil {
return result, azureToObjectError(traceError(err)) return result, azureToObjectError(errors.Trace(err))
} }
// If NextMarker is not empty, this means response is truncated and NextContinuationToken should be set // If NextMarker is not empty, this means response is truncated and NextContinuationToken should be set

View File

@ -23,7 +23,6 @@ import (
"encoding/base64" "encoding/base64"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
@ -36,6 +35,7 @@ import (
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
) )
@ -133,7 +133,7 @@ func s3MetaToAzureProperties(s3Metadata map[string]string) (storage.BlobMetadata
storage.BlobProperties, error) { storage.BlobProperties, error) {
for k := range s3Metadata { for k := range s3Metadata {
if strings.Contains(k, "--") { if strings.Contains(k, "--") {
return storage.BlobMetadata{}, storage.BlobProperties{}, traceError(UnsupportedMetadata{}) return storage.BlobMetadata{}, storage.BlobProperties{}, errors.Trace(UnsupportedMetadata{})
} }
} }
@ -248,15 +248,15 @@ func azureToObjectError(err error, params ...string) error {
return nil return nil
} }
e, ok := err.(*Error) e, ok := err.(*errors.Error)
if !ok { if !ok {
// Code should be fixed if this function is called without doing traceError() // Code should be fixed if this function is called without doing errors.Trace()
// Else handling different situations in this function makes this function complicated. // Else handling different situations in this function makes this function complicated.
errorIf(err, "Expected type *Error") errorIf(err, "Expected type *Error")
return err return err
} }
err = e.e err = e.Cause
bucket := "" bucket := ""
object := "" object := ""
if len(params) >= 1 { if len(params) >= 1 {
@ -294,7 +294,7 @@ func azureToObjectError(err error, params ...string) error {
err = BucketNameInvalid{Bucket: bucket} err = BucketNameInvalid{Bucket: bucket}
} }
} }
e.e = err e.Cause = err
return e return e
} }
@ -316,11 +316,11 @@ func mustGetAzureUploadID() string {
// checkAzureUploadID - returns error in case of given string is upload ID. // checkAzureUploadID - returns error in case of given string is upload ID.
func checkAzureUploadID(uploadID string) (err error) { func checkAzureUploadID(uploadID string) (err error) {
if len(uploadID) != 16 { if len(uploadID) != 16 {
return traceError(MalformedUploadID{uploadID}) return errors.Trace(MalformedUploadID{uploadID})
} }
if _, err = hex.DecodeString(uploadID); err != nil { if _, err = hex.DecodeString(uploadID); err != nil {
return traceError(MalformedUploadID{uploadID}) return errors.Trace(MalformedUploadID{uploadID})
} }
return nil return nil
@ -403,7 +403,7 @@ func (a *azureObjects) MakeBucketWithLocation(bucket, location string) error {
err := container.Create(&storage.CreateContainerOptions{ err := container.Create(&storage.CreateContainerOptions{
Access: storage.ContainerAccessTypePrivate, Access: storage.ContainerAccessTypePrivate,
}) })
return azureToObjectError(traceError(err), bucket) return azureToObjectError(errors.Trace(err), bucket)
} }
// GetBucketInfo - Get bucket metadata.. // GetBucketInfo - Get bucket metadata..
@ -413,7 +413,7 @@ func (a *azureObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
// in azure documentation, so we will simply use the same function here. // in azure documentation, so we will simply use the same function here.
// Ref - https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata // Ref - https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return bi, traceError(BucketNameInvalid{Bucket: bucket}) return bi, errors.Trace(BucketNameInvalid{Bucket: bucket})
} }
// Azure does not have an equivalent call, hence use // Azure does not have an equivalent call, hence use
@ -422,7 +422,7 @@ func (a *azureObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
Prefix: bucket, Prefix: bucket,
}) })
if err != nil { if err != nil {
return bi, azureToObjectError(traceError(err), bucket) return bi, azureToObjectError(errors.Trace(err), bucket)
} }
for _, container := range resp.Containers { for _, container := range resp.Containers {
if container.Name == bucket { if container.Name == bucket {
@ -435,19 +435,19 @@ func (a *azureObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
} // else continue } // else continue
} }
} }
return bi, traceError(BucketNotFound{Bucket: bucket}) return bi, errors.Trace(BucketNotFound{Bucket: bucket})
} }
// ListBuckets - Lists all azure containers, uses Azure equivalent ListContainers. // ListBuckets - Lists all azure containers, uses Azure equivalent ListContainers.
func (a *azureObjects) ListBuckets() (buckets []BucketInfo, err error) { func (a *azureObjects) ListBuckets() (buckets []BucketInfo, err error) {
resp, err := a.client.ListContainers(storage.ListContainersParameters{}) resp, err := a.client.ListContainers(storage.ListContainersParameters{})
if err != nil { if err != nil {
return nil, azureToObjectError(traceError(err)) return nil, azureToObjectError(errors.Trace(err))
} }
for _, container := range resp.Containers { for _, container := range resp.Containers {
t, e := time.Parse(time.RFC1123, container.Properties.LastModified) t, e := time.Parse(time.RFC1123, container.Properties.LastModified)
if e != nil { if e != nil {
return nil, traceError(e) return nil, errors.Trace(e)
} }
buckets = append(buckets, BucketInfo{ buckets = append(buckets, BucketInfo{
Name: container.Name, Name: container.Name,
@ -460,7 +460,7 @@ func (a *azureObjects) ListBuckets() (buckets []BucketInfo, err error) {
// DeleteBucket - delete a container on azure, uses Azure equivalent DeleteContainer. // DeleteBucket - delete a container on azure, uses Azure equivalent DeleteContainer.
func (a *azureObjects) DeleteBucket(bucket string) error { func (a *azureObjects) DeleteBucket(bucket string) error {
container := a.client.GetContainerReference(bucket) container := a.client.GetContainerReference(bucket)
return azureToObjectError(traceError(container.Delete(nil)), bucket) return azureToObjectError(errors.Trace(container.Delete(nil)), bucket)
} }
// ListObjects - lists all blobs on azure with in a container filtered by prefix // ListObjects - lists all blobs on azure with in a container filtered by prefix
@ -477,7 +477,7 @@ func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, max
MaxResults: uint(maxKeys), MaxResults: uint(maxKeys),
}) })
if err != nil { if err != nil {
return result, azureToObjectError(traceError(err), bucket, prefix) return result, azureToObjectError(errors.Trace(err), bucket, prefix)
} }
for _, object := range resp.Blobs { for _, object := range resp.Blobs {
@ -545,7 +545,7 @@ func (a *azureObjects) ListObjectsV2(bucket, prefix, continuationToken, delimite
func (a *azureObjects) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) error { func (a *azureObjects) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) error {
// startOffset cannot be negative. // startOffset cannot be negative.
if startOffset < 0 { if startOffset < 0 {
return toObjectErr(traceError(errUnexpected), bucket, object) return toObjectErr(errors.Trace(errUnexpected), bucket, object)
} }
blobRange := &storage.BlobRange{Start: uint64(startOffset)} blobRange := &storage.BlobRange{Start: uint64(startOffset)}
@ -564,11 +564,11 @@ func (a *azureObjects) GetObject(bucket, object string, startOffset int64, lengt
}) })
} }
if err != nil { if err != nil {
return azureToObjectError(traceError(err), bucket, object) return azureToObjectError(errors.Trace(err), bucket, object)
} }
_, err = io.Copy(writer, rc) _, err = io.Copy(writer, rc)
rc.Close() rc.Close()
return traceError(err) return errors.Trace(err)
} }
// GetObjectInfo - reads blob metadata properties and replies back ObjectInfo, // GetObjectInfo - reads blob metadata properties and replies back ObjectInfo,
@ -577,7 +577,7 @@ func (a *azureObjects) GetObjectInfo(bucket, object string) (objInfo ObjectInfo,
blob := a.client.GetContainerReference(bucket).GetBlobReference(object) blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
err = blob.GetProperties(nil) err = blob.GetProperties(nil)
if err != nil { if err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, object) return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
} }
meta := azurePropertiesToS3Meta(blob.Metadata, blob.Properties) meta := azurePropertiesToS3Meta(blob.Metadata, blob.Properties)
@ -604,7 +604,7 @@ func (a *azureObjects) PutObject(bucket, object string, data *hash.Reader, metad
} }
err = blob.CreateBlockBlobFromReader(data, nil) err = blob.CreateBlockBlobFromReader(data, nil)
if err != nil { if err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, object) return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
} }
return a.GetObjectInfo(bucket, object) return a.GetObjectInfo(bucket, object)
} }
@ -621,12 +621,12 @@ func (a *azureObjects) CopyObject(srcBucket, srcObject, destBucket, destObject s
destBlob.Metadata = azureMeta destBlob.Metadata = azureMeta
err = destBlob.Copy(srcBlobURL, nil) err = destBlob.Copy(srcBlobURL, nil)
if err != nil { if err != nil {
return objInfo, azureToObjectError(traceError(err), srcBucket, srcObject) return objInfo, azureToObjectError(errors.Trace(err), srcBucket, srcObject)
} }
destBlob.Properties = props destBlob.Properties = props
err = destBlob.SetProperties(nil) err = destBlob.SetProperties(nil)
if err != nil { if err != nil {
return objInfo, azureToObjectError(traceError(err), srcBucket, srcObject) return objInfo, azureToObjectError(errors.Trace(err), srcBucket, srcObject)
} }
return a.GetObjectInfo(destBucket, destObject) return a.GetObjectInfo(destBucket, destObject)
} }
@ -637,7 +637,7 @@ func (a *azureObjects) DeleteObject(bucket, object string) error {
blob := a.client.GetContainerReference(bucket).GetBlobReference(object) blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
err := blob.Delete(nil) err := blob.Delete(nil)
if err != nil { if err != nil {
return azureToObjectError(traceError(err), bucket, object) return azureToObjectError(errors.Trace(err), bucket, object)
} }
return nil return nil
} }
@ -661,10 +661,10 @@ func (a *azureObjects) checkUploadIDExists(bucketName, objectName, uploadID stri
blob := a.client.GetContainerReference(bucketName).GetBlobReference( blob := a.client.GetContainerReference(bucketName).GetBlobReference(
getAzureMetadataObjectName(objectName, uploadID)) getAzureMetadataObjectName(objectName, uploadID))
err = blob.GetMetadata(nil) err = blob.GetMetadata(nil)
err = azureToObjectError(traceError(err), bucketName, objectName) err = azureToObjectError(errors.Trace(err), bucketName, objectName)
oerr := ObjectNotFound{bucketName, objectName} oerr := ObjectNotFound{bucketName, objectName}
if errorCause(err) == oerr { if errors.Cause(err) == oerr {
err = traceError(InvalidUploadID{}) err = errors.Trace(InvalidUploadID{})
} }
return err return err
} }
@ -673,19 +673,19 @@ func (a *azureObjects) checkUploadIDExists(bucketName, objectName, uploadID stri
func (a *azureObjects) NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error) { func (a *azureObjects) NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error) {
uploadID = mustGetAzureUploadID() uploadID = mustGetAzureUploadID()
if err = a.checkUploadIDExists(bucket, object, uploadID); err == nil { if err = a.checkUploadIDExists(bucket, object, uploadID); err == nil {
return "", traceError(errors.New("Upload ID name collision")) return "", errors.Trace(fmt.Errorf("Upload ID name collision"))
} }
metadataObject := getAzureMetadataObjectName(object, uploadID) metadataObject := getAzureMetadataObjectName(object, uploadID)
var jsonData []byte var jsonData []byte
if jsonData, err = json.Marshal(azureMultipartMetadata{Name: object, Metadata: metadata}); err != nil { if jsonData, err = json.Marshal(azureMultipartMetadata{Name: object, Metadata: metadata}); err != nil {
return "", traceError(err) return "", errors.Trace(err)
} }
blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject) blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject)
err = blob.CreateBlockBlobFromReader(bytes.NewBuffer(jsonData), nil) err = blob.CreateBlockBlobFromReader(bytes.NewBuffer(jsonData), nil)
if err != nil { if err != nil {
return "", azureToObjectError(traceError(err), bucket, metadataObject) return "", azureToObjectError(errors.Trace(err), bucket, metadataObject)
} }
return uploadID, nil return uploadID, nil
@ -721,7 +721,7 @@ func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int
blob := a.client.GetContainerReference(bucket).GetBlobReference(object) blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
err = blob.PutBlockWithLength(id, uint64(subPartSize), io.LimitReader(data, subPartSize), nil) err = blob.PutBlockWithLength(id, uint64(subPartSize), io.LimitReader(data, subPartSize), nil)
if err != nil { if err != nil {
return info, azureToObjectError(traceError(err), bucket, object) return info, azureToObjectError(errors.Trace(err), bucket, object)
} }
subPartNumber++ subPartNumber++
} }
@ -747,7 +747,7 @@ func (a *azureObjects) ListObjectParts(bucket, object, uploadID string, partNumb
objBlob := a.client.GetContainerReference(bucket).GetBlobReference(object) objBlob := a.client.GetContainerReference(bucket).GetBlobReference(object)
resp, err := objBlob.GetBlockList(storage.BlockListTypeUncommitted, nil) resp, err := objBlob.GetBlockList(storage.BlockListTypeUncommitted, nil)
if err != nil { if err != nil {
return result, azureToObjectError(traceError(err), bucket, object) return result, azureToObjectError(errors.Trace(err), bucket, object)
} }
// Build a sorted list of parts and return the requested entries. // Build a sorted list of parts and return the requested entries.
partsMap := make(map[int]PartInfo) partsMap := make(map[int]PartInfo)
@ -756,7 +756,7 @@ func (a *azureObjects) ListObjectParts(bucket, object, uploadID string, partNumb
var parsedUploadID string var parsedUploadID string
var md5Hex string var md5Hex string
if partNumber, _, parsedUploadID, md5Hex, err = azureParseBlockID(block.Name); err != nil { if partNumber, _, parsedUploadID, md5Hex, err = azureParseBlockID(block.Name); err != nil {
return result, azureToObjectError(traceError(errUnexpected), bucket, object) return result, azureToObjectError(errors.Trace(errUnexpected), bucket, object)
} }
if parsedUploadID != uploadID { if parsedUploadID != uploadID {
continue continue
@ -773,7 +773,7 @@ func (a *azureObjects) ListObjectParts(bucket, object, uploadID string, partNumb
if part.ETag != md5Hex { if part.ETag != md5Hex {
// If two parts of same partNumber were uploaded with different contents // If two parts of same partNumber were uploaded with different contents
// return error as we won't be able to decide which the latest part is. // return error as we won't be able to decide which the latest part is.
return result, azureToObjectError(traceError(errUnexpected), bucket, object) return result, azureToObjectError(errors.Trace(errUnexpected), bucket, object)
} }
part.Size += block.Size part.Size += block.Size
partsMap[partNumber] = part partsMap[partNumber] = part
@ -839,12 +839,12 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string,
var metadataReader io.Reader var metadataReader io.Reader
blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject) blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject)
if metadataReader, err = blob.Get(nil); err != nil { if metadataReader, err = blob.Get(nil); err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, metadataObject) return objInfo, azureToObjectError(errors.Trace(err), bucket, metadataObject)
} }
var metadata azureMultipartMetadata var metadata azureMultipartMetadata
if err = json.NewDecoder(metadataReader).Decode(&metadata); err != nil { if err = json.NewDecoder(metadataReader).Decode(&metadata); err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, metadataObject) return objInfo, azureToObjectError(errors.Trace(err), bucket, metadataObject)
} }
defer func() { defer func() {
@ -860,7 +860,7 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string,
objBlob := a.client.GetContainerReference(bucket).GetBlobReference(object) objBlob := a.client.GetContainerReference(bucket).GetBlobReference(object)
resp, err := objBlob.GetBlockList(storage.BlockListTypeUncommitted, nil) resp, err := objBlob.GetBlockList(storage.BlockListTypeUncommitted, nil)
if err != nil { if err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, object) return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
} }
getBlocks := func(partNumber int, etag string) (blocks []storage.Block, size int64, err error) { getBlocks := func(partNumber int, etag string) (blocks []storage.Block, size int64, err error) {
@ -896,7 +896,7 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string,
var size int64 var size int64
blocks, size, err = getBlocks(part.PartNumber, part.ETag) blocks, size, err = getBlocks(part.PartNumber, part.ETag)
if err != nil { if err != nil {
return objInfo, traceError(err) return objInfo, errors.Trace(err)
} }
allBlocks = append(allBlocks, blocks...) allBlocks = append(allBlocks, blocks...)
@ -906,7 +906,7 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string,
// Error out if parts except last part sizing < 5MiB. // Error out if parts except last part sizing < 5MiB.
for i, size := range partSizes[:len(partSizes)-1] { for i, size := range partSizes[:len(partSizes)-1] {
if size < globalMinPartSize { if size < globalMinPartSize {
return objInfo, traceError(PartTooSmall{ return objInfo, errors.Trace(PartTooSmall{
PartNumber: uploadedParts[i].PartNumber, PartNumber: uploadedParts[i].PartNumber,
PartSize: size, PartSize: size,
PartETag: uploadedParts[i].ETag, PartETag: uploadedParts[i].ETag,
@ -916,7 +916,7 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string,
err = objBlob.PutBlockList(allBlocks, nil) err = objBlob.PutBlockList(allBlocks, nil)
if err != nil { if err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, object) return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
} }
if len(metadata.Metadata) > 0 { if len(metadata.Metadata) > 0 {
objBlob.Metadata, objBlob.Properties, err = s3MetaToAzureProperties(metadata.Metadata) objBlob.Metadata, objBlob.Properties, err = s3MetaToAzureProperties(metadata.Metadata)
@ -925,11 +925,11 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string,
} }
err = objBlob.SetProperties(nil) err = objBlob.SetProperties(nil)
if err != nil { if err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, object) return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
} }
err = objBlob.SetMetadata(nil) err = objBlob.SetMetadata(nil)
if err != nil { if err != nil {
return objInfo, azureToObjectError(traceError(err), bucket, object) return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
} }
} }
return a.GetObjectInfo(bucket, object) return a.GetObjectInfo(bucket, object)
@ -952,13 +952,13 @@ func (a *azureObjects) SetBucketPolicies(bucket string, policyInfo policy.Bucket
} }
prefix := bucket + "/*" // For all objects inside the bucket. prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 { if len(policies) != 1 {
return traceError(NotImplemented{}) return errors.Trace(NotImplemented{})
} }
if policies[0].Prefix != prefix { if policies[0].Prefix != prefix {
return traceError(NotImplemented{}) return errors.Trace(NotImplemented{})
} }
if policies[0].Policy != policy.BucketPolicyReadOnly { if policies[0].Policy != policy.BucketPolicyReadOnly {
return traceError(NotImplemented{}) return errors.Trace(NotImplemented{})
} }
perm := storage.ContainerPermissions{ perm := storage.ContainerPermissions{
AccessType: storage.ContainerAccessTypeContainer, AccessType: storage.ContainerAccessTypeContainer,
@ -966,7 +966,7 @@ func (a *azureObjects) SetBucketPolicies(bucket string, policyInfo policy.Bucket
} }
container := a.client.GetContainerReference(bucket) container := a.client.GetContainerReference(bucket)
err := container.SetPermissions(perm, nil) err := container.SetPermissions(perm, nil)
return azureToObjectError(traceError(err), bucket) return azureToObjectError(errors.Trace(err), bucket)
} }
// GetBucketPolicies - Get the container ACL and convert it to canonical []bucketAccessPolicy // GetBucketPolicies - Get the container ACL and convert it to canonical []bucketAccessPolicy
@ -975,15 +975,15 @@ func (a *azureObjects) GetBucketPolicies(bucket string) (policy.BucketAccessPoli
container := a.client.GetContainerReference(bucket) container := a.client.GetContainerReference(bucket)
perm, err := container.GetPermissions(nil) perm, err := container.GetPermissions(nil)
if err != nil { if err != nil {
return policy.BucketAccessPolicy{}, azureToObjectError(traceError(err), bucket) return policy.BucketAccessPolicy{}, azureToObjectError(errors.Trace(err), bucket)
} }
switch perm.AccessType { switch perm.AccessType {
case storage.ContainerAccessTypePrivate: case storage.ContainerAccessTypePrivate:
return policy.BucketAccessPolicy{}, traceError(PolicyNotFound{Bucket: bucket}) return policy.BucketAccessPolicy{}, errors.Trace(PolicyNotFound{Bucket: bucket})
case storage.ContainerAccessTypeContainer: case storage.ContainerAccessTypeContainer:
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "") policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "")
default: default:
return policy.BucketAccessPolicy{}, azureToObjectError(traceError(NotImplemented{})) return policy.BucketAccessPolicy{}, azureToObjectError(errors.Trace(NotImplemented{}))
} }
return policyInfo, nil return policyInfo, nil
} }
@ -996,5 +996,5 @@ func (a *azureObjects) DeleteBucketPolicies(bucket string) error {
} }
container := a.client.GetContainerReference(bucket) container := a.client.GetContainerReference(bucket)
err := container.SetPermissions(perm, nil) err := container.SetPermissions(perm, nil)
return azureToObjectError(traceError(err)) return azureToObjectError(errors.Trace(err))
} }

View File

@ -23,6 +23,7 @@ import (
"testing" "testing"
"github.com/Azure/azure-sdk-for-go/storage" "github.com/Azure/azure-sdk-for-go/storage"
"github.com/minio/minio/pkg/errors"
) )
// Test canonical metadata. // Test canonical metadata.
@ -60,7 +61,7 @@ func TestS3MetaToAzureProperties(t *testing.T) {
"invalid--meta": "value", "invalid--meta": "value",
} }
_, _, err = s3MetaToAzureProperties(headers) _, _, err = s3MetaToAzureProperties(headers)
if err = errorCause(err); err != nil { if err = errors.Cause(err); err != nil {
if _, ok := err.(UnsupportedMetadata); !ok { if _, ok := err.(UnsupportedMetadata); !ok {
t.Fatalf("Test failed with unexpected error %s, expected UnsupportedMetadata", err) t.Fatalf("Test failed with unexpected error %s, expected UnsupportedMetadata", err)
} }
@ -118,23 +119,23 @@ func TestAzureToObjectError(t *testing.T) {
nil, nil, "", "", nil, nil, "", "",
}, },
{ {
traceError(errUnexpected), errUnexpected, "", "", errors.Trace(errUnexpected), errUnexpected, "", "",
}, },
{ {
traceError(errUnexpected), traceError(errUnexpected), "", "", errors.Trace(errUnexpected), errors.Trace(errUnexpected), "", "",
}, },
{ {
traceError(storage.AzureStorageServiceError{ errors.Trace(storage.AzureStorageServiceError{
Code: "ContainerAlreadyExists", Code: "ContainerAlreadyExists",
}), BucketExists{Bucket: "bucket"}, "bucket", "", }), BucketExists{Bucket: "bucket"}, "bucket", "",
}, },
{ {
traceError(storage.AzureStorageServiceError{ errors.Trace(storage.AzureStorageServiceError{
Code: "InvalidResourceName", Code: "InvalidResourceName",
}), BucketNameInvalid{Bucket: "bucket."}, "bucket.", "", }), BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
}, },
{ {
traceError(storage.AzureStorageServiceError{ errors.Trace(storage.AzureStorageServiceError{
StatusCode: http.StatusNotFound, StatusCode: http.StatusNotFound,
}), ObjectNotFound{ }), ObjectNotFound{
Bucket: "bucket", Bucket: "bucket",
@ -142,12 +143,12 @@ func TestAzureToObjectError(t *testing.T) {
}, "bucket", "object", }, "bucket", "object",
}, },
{ {
traceError(storage.AzureStorageServiceError{ errors.Trace(storage.AzureStorageServiceError{
StatusCode: http.StatusNotFound, StatusCode: http.StatusNotFound,
}), BucketNotFound{Bucket: "bucket"}, "bucket", "", }), BucketNotFound{Bucket: "bucket"}, "bucket", "",
}, },
{ {
traceError(storage.AzureStorageServiceError{ errors.Trace(storage.AzureStorageServiceError{
StatusCode: http.StatusBadRequest, StatusCode: http.StatusBadRequest,
}), BucketNameInvalid{Bucket: "bucket."}, "bucket.", "", }), BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
}, },

View File

@ -17,7 +17,6 @@
package cmd package cmd
import ( import (
"errors"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
@ -25,6 +24,8 @@ import (
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/minio/minio/pkg/errors"
) )
// mkRange converts offset, size into Range header equivalent. // mkRange converts offset, size into Range header equivalent.
@ -44,7 +45,7 @@ func (l *b2Objects) AnonGetObject(bucket string, object string, startOffset int6
uri := fmt.Sprintf("%s/file/%s/%s", l.b2Client.DownloadURI, bucket, object) uri := fmt.Sprintf("%s/file/%s/%s", l.b2Client.DownloadURI, bucket, object)
req, err := http.NewRequest("GET", uri, nil) req, err := http.NewRequest("GET", uri, nil)
if err != nil { if err != nil {
return b2ToObjectError(traceError(err), bucket, object) return b2ToObjectError(errors.Trace(err), bucket, object)
} }
rng := mkRange(startOffset, length) rng := mkRange(startOffset, length)
if rng != "" { if rng != "" {
@ -52,14 +53,14 @@ func (l *b2Objects) AnonGetObject(bucket string, object string, startOffset int6
} }
resp, err := l.anonClient.Do(req) resp, err := l.anonClient.Do(req)
if err != nil { if err != nil {
return b2ToObjectError(traceError(err), bucket, object) return b2ToObjectError(errors.Trace(err), bucket, object)
} }
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return b2ToObjectError(traceError(errors.New(resp.Status)), bucket, object) return b2ToObjectError(errors.Trace(fmt.Errorf(resp.Status)), bucket, object)
} }
_, err = io.Copy(writer, resp.Body) _, err = io.Copy(writer, resp.Body)
return b2ToObjectError(traceError(err), bucket, object) return b2ToObjectError(errors.Trace(err), bucket, object)
} }
// Converts http Header into ObjectInfo. This function looks for all the // Converts http Header into ObjectInfo. This function looks for all the
@ -73,13 +74,13 @@ func (l *b2Objects) AnonGetObject(bucket string, object string, startOffset int6
func headerToObjectInfo(bucket, object string, header http.Header) (objInfo ObjectInfo, err error) { func headerToObjectInfo(bucket, object string, header http.Header) (objInfo ObjectInfo, err error) {
clen, err := strconv.ParseInt(header.Get("Content-Length"), 10, 64) clen, err := strconv.ParseInt(header.Get("Content-Length"), 10, 64)
if err != nil { if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object) return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
} }
// Converting upload timestamp in milliseconds to a time.Time value for ObjectInfo.ModTime. // Converting upload timestamp in milliseconds to a time.Time value for ObjectInfo.ModTime.
timeStamp, err := strconv.ParseInt(header.Get("X-Bz-Upload-Timestamp"), 10, 64) timeStamp, err := strconv.ParseInt(header.Get("X-Bz-Upload-Timestamp"), 10, 64)
if err != nil { if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object) return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
} }
// Populate user metadata by looking for all the X-Bz-Info-<name> // Populate user metadata by looking for all the X-Bz-Info-<name>
@ -91,12 +92,12 @@ func headerToObjectInfo(bucket, object string, header http.Header) (objInfo Obje
var name string var name string
name, err = url.QueryUnescape(strings.TrimPrefix(key, "X-Bz-Info-")) name, err = url.QueryUnescape(strings.TrimPrefix(key, "X-Bz-Info-"))
if err != nil { if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object) return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
} }
var val string var val string
val, err = url.QueryUnescape(header.Get(key)) val, err = url.QueryUnescape(header.Get(key))
if err != nil { if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object) return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
} }
userMetadata[name] = val userMetadata[name] = val
} }
@ -119,15 +120,15 @@ func (l *b2Objects) AnonGetObjectInfo(bucket string, object string) (objInfo Obj
uri := fmt.Sprintf("%s/file/%s/%s", l.b2Client.DownloadURI, bucket, object) uri := fmt.Sprintf("%s/file/%s/%s", l.b2Client.DownloadURI, bucket, object)
req, err := http.NewRequest("HEAD", uri, nil) req, err := http.NewRequest("HEAD", uri, nil)
if err != nil { if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object) return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
} }
resp, err := l.anonClient.Do(req) resp, err := l.anonClient.Do(req)
if err != nil { if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object) return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
} }
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return objInfo, b2ToObjectError(traceError(errors.New(resp.Status)), bucket, object) return objInfo, b2ToObjectError(errors.Trace(fmt.Errorf(resp.Status)), bucket, object)
} }
return headerToObjectInfo(bucket, object, resp.Header) return headerToObjectInfo(bucket, object, resp.Header)
} }

View File

@ -32,6 +32,7 @@ import (
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
h2 "github.com/minio/minio/pkg/hash" h2 "github.com/minio/minio/pkg/hash"
) )
@ -134,15 +135,15 @@ func b2ToObjectError(err error, params ...string) error {
return nil return nil
} }
e, ok := err.(*Error) e, ok := err.(*errors.Error)
if !ok { if !ok {
// Code should be fixed if this function is called without doing traceError() // Code should be fixed if this function is called without doing errors.Trace()
// Else handling different situations in this function makes this function complicated. // Else handling different situations in this function makes this function complicated.
errorIf(err, "Expected type *Error") errorIf(err, "Expected type *Error")
return err return err
} }
err = e.e err = e.Cause
bucket := "" bucket := ""
object := "" object := ""
uploadID := "" uploadID := ""
@ -189,7 +190,7 @@ func b2ToObjectError(err error, params ...string) error {
err = InvalidUploadID{uploadID} err = InvalidUploadID{uploadID}
} }
e.e = err e.Cause = err
return e return e
} }
@ -211,7 +212,7 @@ func (l *b2Objects) MakeBucketWithLocation(bucket, location string) error {
// All buckets are set to private by default. // All buckets are set to private by default.
_, err := l.b2Client.CreateBucket(l.ctx, bucket, bucketTypePrivate, nil, nil) _, err := l.b2Client.CreateBucket(l.ctx, bucket, bucketTypePrivate, nil, nil)
return b2ToObjectError(traceError(err), bucket) return b2ToObjectError(errors.Trace(err), bucket)
} }
func (l *b2Objects) reAuthorizeAccount() error { func (l *b2Objects) reAuthorizeAccount() error {
@ -252,14 +253,14 @@ func (l *b2Objects) listBuckets(err error) ([]*b2.Bucket, error) {
func (l *b2Objects) Bucket(bucket string) (*b2.Bucket, error) { func (l *b2Objects) Bucket(bucket string) (*b2.Bucket, error) {
bktList, err := l.listBuckets(nil) bktList, err := l.listBuckets(nil)
if err != nil { if err != nil {
return nil, b2ToObjectError(traceError(err), bucket) return nil, b2ToObjectError(errors.Trace(err), bucket)
} }
for _, bkt := range bktList { for _, bkt := range bktList {
if bkt.Name == bucket { if bkt.Name == bucket {
return bkt, nil return bkt, nil
} }
} }
return nil, traceError(BucketNotFound{Bucket: bucket}) return nil, errors.Trace(BucketNotFound{Bucket: bucket})
} }
// GetBucketInfo gets bucket metadata.. // GetBucketInfo gets bucket metadata..
@ -296,7 +297,7 @@ func (l *b2Objects) DeleteBucket(bucket string) error {
return err return err
} }
err = bkt.DeleteBucket(l.ctx) err = bkt.DeleteBucket(l.ctx)
return b2ToObjectError(traceError(err), bucket) return b2ToObjectError(errors.Trace(err), bucket)
} }
// ListObjects lists all objects in B2 bucket filtered by prefix, returns upto at max 1000 entries at a time. // ListObjects lists all objects in B2 bucket filtered by prefix, returns upto at max 1000 entries at a time.
@ -308,7 +309,7 @@ func (l *b2Objects) ListObjects(bucket string, prefix string, marker string, del
loi = ListObjectsInfo{} loi = ListObjectsInfo{}
files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter) files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter)
if lerr != nil { if lerr != nil {
return loi, b2ToObjectError(traceError(lerr), bucket) return loi, b2ToObjectError(errors.Trace(lerr), bucket)
} }
loi.IsTruncated = next != "" loi.IsTruncated = next != ""
loi.NextMarker = next loi.NextMarker = next
@ -342,7 +343,7 @@ func (l *b2Objects) ListObjectsV2(bucket, prefix, continuationToken, delimiter s
loi = ListObjectsV2Info{} loi = ListObjectsV2Info{}
files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, continuationToken, prefix, delimiter) files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, continuationToken, prefix, delimiter)
if lerr != nil { if lerr != nil {
return loi, b2ToObjectError(traceError(lerr), bucket) return loi, b2ToObjectError(errors.Trace(lerr), bucket)
} }
loi.IsTruncated = next != "" loi.IsTruncated = next != ""
loi.ContinuationToken = continuationToken loi.ContinuationToken = continuationToken
@ -379,11 +380,11 @@ func (l *b2Objects) GetObject(bucket string, object string, startOffset int64, l
} }
reader, err := bkt.DownloadFileByName(l.ctx, object, startOffset, length) reader, err := bkt.DownloadFileByName(l.ctx, object, startOffset, length)
if err != nil { if err != nil {
return b2ToObjectError(traceError(err), bucket, object) return b2ToObjectError(errors.Trace(err), bucket, object)
} }
defer reader.Close() defer reader.Close()
_, err = io.Copy(writer, reader) _, err = io.Copy(writer, reader)
return b2ToObjectError(traceError(err), bucket, object) return b2ToObjectError(errors.Trace(err), bucket, object)
} }
// GetObjectInfo reads object info and replies back ObjectInfo // GetObjectInfo reads object info and replies back ObjectInfo
@ -394,12 +395,12 @@ func (l *b2Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectI
} }
f, err := bkt.DownloadFileByName(l.ctx, object, 0, 1) f, err := bkt.DownloadFileByName(l.ctx, object, 0, 1)
if err != nil { if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object) return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
} }
f.Close() f.Close()
fi, err := bkt.File(f.ID, object).GetFileInfo(l.ctx) fi, err := bkt.File(f.ID, object).GetFileInfo(l.ctx)
if err != nil { if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object) return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
} }
objInfo = ObjectInfo{ objInfo = ObjectInfo{
Bucket: bucket, Bucket: bucket,
@ -491,20 +492,20 @@ func (l *b2Objects) PutObject(bucket string, object string, data *h2.Reader, met
var u *b2.URL var u *b2.URL
u, err = bkt.GetUploadURL(l.ctx) u, err = bkt.GetUploadURL(l.ctx)
if err != nil { if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object) return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
} }
hr := newB2Reader(data, data.Size()) hr := newB2Reader(data, data.Size())
var f *b2.File var f *b2.File
f, err = u.UploadFile(l.ctx, hr, int(hr.Size()), object, contentType, sha1AtEOF, metadata) f, err = u.UploadFile(l.ctx, hr, int(hr.Size()), object, contentType, sha1AtEOF, metadata)
if err != nil { if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object) return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
} }
var fi *b2.FileInfo var fi *b2.FileInfo
fi, err = f.GetFileInfo(l.ctx) fi, err = f.GetFileInfo(l.ctx)
if err != nil { if err != nil {
return objInfo, b2ToObjectError(traceError(err), bucket, object) return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
} }
return ObjectInfo{ return ObjectInfo{
@ -521,7 +522,7 @@ func (l *b2Objects) PutObject(bucket string, object string, data *h2.Reader, met
// CopyObject copies a blob from source container to destination container. // CopyObject copies a blob from source container to destination container.
func (l *b2Objects) CopyObject(srcBucket string, srcObject string, dstBucket string, func (l *b2Objects) CopyObject(srcBucket string, srcObject string, dstBucket string,
dstObject string, metadata map[string]string) (objInfo ObjectInfo, err error) { dstObject string, metadata map[string]string) (objInfo ObjectInfo, err error) {
return objInfo, traceError(NotImplemented{}) return objInfo, errors.Trace(NotImplemented{})
} }
// DeleteObject deletes a blob in bucket // DeleteObject deletes a blob in bucket
@ -532,12 +533,12 @@ func (l *b2Objects) DeleteObject(bucket string, object string) error {
} }
reader, err := bkt.DownloadFileByName(l.ctx, object, 0, 1) reader, err := bkt.DownloadFileByName(l.ctx, object, 0, 1)
if err != nil { if err != nil {
return b2ToObjectError(traceError(err), bucket, object) return b2ToObjectError(errors.Trace(err), bucket, object)
} }
io.Copy(ioutil.Discard, reader) io.Copy(ioutil.Discard, reader)
reader.Close() reader.Close()
err = bkt.File(reader.ID, object).DeleteFileVersion(l.ctx) err = bkt.File(reader.ID, object).DeleteFileVersion(l.ctx)
return b2ToObjectError(traceError(err), bucket, object) return b2ToObjectError(errors.Trace(err), bucket, object)
} }
// ListMultipartUploads lists all multipart uploads. // ListMultipartUploads lists all multipart uploads.
@ -556,7 +557,7 @@ func (l *b2Objects) ListMultipartUploads(bucket string, prefix string, keyMarker
} }
largeFiles, nextMarker, err := bkt.ListUnfinishedLargeFiles(l.ctx, uploadIDMarker, maxUploads) largeFiles, nextMarker, err := bkt.ListUnfinishedLargeFiles(l.ctx, uploadIDMarker, maxUploads)
if err != nil { if err != nil {
return lmi, b2ToObjectError(traceError(err), bucket) return lmi, b2ToObjectError(errors.Trace(err), bucket)
} }
lmi = ListMultipartsInfo{ lmi = ListMultipartsInfo{
MaxUploads: maxUploads, MaxUploads: maxUploads,
@ -591,7 +592,7 @@ func (l *b2Objects) NewMultipartUpload(bucket string, object string, metadata ma
delete(metadata, "content-type") delete(metadata, "content-type")
lf, err := bkt.StartLargeFile(l.ctx, object, contentType, metadata) lf, err := bkt.StartLargeFile(l.ctx, object, contentType, metadata)
if err != nil { if err != nil {
return uploadID, b2ToObjectError(traceError(err), bucket, object) return uploadID, b2ToObjectError(errors.Trace(err), bucket, object)
} }
return lf.ID, nil return lf.ID, nil
@ -600,7 +601,7 @@ func (l *b2Objects) NewMultipartUpload(bucket string, object string, metadata ma
// CopyObjectPart copy part of object to other bucket and object. // CopyObjectPart copy part of object to other bucket and object.
func (l *b2Objects) CopyObjectPart(srcBucket string, srcObject string, destBucket string, destObject string, func (l *b2Objects) CopyObjectPart(srcBucket string, srcObject string, destBucket string, destObject string,
uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error) { uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error) {
return PartInfo{}, traceError(NotImplemented{}) return PartInfo{}, errors.Trace(NotImplemented{})
} }
// PutObjectPart puts a part of object in bucket, uses B2's LargeFile upload API. // PutObjectPart puts a part of object in bucket, uses B2's LargeFile upload API.
@ -612,13 +613,13 @@ func (l *b2Objects) PutObjectPart(bucket string, object string, uploadID string,
fc, err := bkt.File(uploadID, object).CompileParts(0, nil).GetUploadPartURL(l.ctx) fc, err := bkt.File(uploadID, object).CompileParts(0, nil).GetUploadPartURL(l.ctx)
if err != nil { if err != nil {
return pi, b2ToObjectError(traceError(err), bucket, object, uploadID) return pi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID)
} }
hr := newB2Reader(data, data.Size()) hr := newB2Reader(data, data.Size())
sha1, err := fc.UploadPart(l.ctx, hr, sha1AtEOF, int(hr.Size()), partID) sha1, err := fc.UploadPart(l.ctx, hr, sha1AtEOF, int(hr.Size()), partID)
if err != nil { if err != nil {
return pi, b2ToObjectError(traceError(err), bucket, object, uploadID) return pi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID)
} }
return PartInfo{ return PartInfo{
@ -646,7 +647,7 @@ func (l *b2Objects) ListObjectParts(bucket string, object string, uploadID strin
partNumberMarker++ partNumberMarker++
partsList, next, err := bkt.File(uploadID, object).ListParts(l.ctx, partNumberMarker, maxParts) partsList, next, err := bkt.File(uploadID, object).ListParts(l.ctx, partNumberMarker, maxParts)
if err != nil { if err != nil {
return lpi, b2ToObjectError(traceError(err), bucket, object, uploadID) return lpi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID)
} }
if next != 0 { if next != 0 {
lpi.IsTruncated = true lpi.IsTruncated = true
@ -669,7 +670,7 @@ func (l *b2Objects) AbortMultipartUpload(bucket string, object string, uploadID
return err return err
} }
err = bkt.File(uploadID, object).CompileParts(0, nil).CancelLargeFile(l.ctx) err = bkt.File(uploadID, object).CompileParts(0, nil).CancelLargeFile(l.ctx)
return b2ToObjectError(traceError(err), bucket, object, uploadID) return b2ToObjectError(errors.Trace(err), bucket, object, uploadID)
} }
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object, uses B2's LargeFile upload API. // CompleteMultipartUpload completes ongoing multipart upload and finalizes object, uses B2's LargeFile upload API.
@ -683,7 +684,7 @@ func (l *b2Objects) CompleteMultipartUpload(bucket string, object string, upload
// B2 requires contigous part numbers starting with 1, they do not support // B2 requires contigous part numbers starting with 1, they do not support
// hand picking part numbers, we return an S3 compatible error instead. // hand picking part numbers, we return an S3 compatible error instead.
if i+1 != uploadedPart.PartNumber { if i+1 != uploadedPart.PartNumber {
return oi, b2ToObjectError(traceError(InvalidPart{}), bucket, object, uploadID) return oi, b2ToObjectError(errors.Trace(InvalidPart{}), bucket, object, uploadID)
} }
// Trim "-1" suffix in ETag as PutObjectPart() treats B2 returned SHA1 as ETag. // Trim "-1" suffix in ETag as PutObjectPart() treats B2 returned SHA1 as ETag.
@ -691,7 +692,7 @@ func (l *b2Objects) CompleteMultipartUpload(bucket string, object string, upload
} }
if _, err = bkt.File(uploadID, object).CompileParts(0, hashes).FinishLargeFile(l.ctx); err != nil { if _, err = bkt.File(uploadID, object).CompileParts(0, hashes).FinishLargeFile(l.ctx); err != nil {
return oi, b2ToObjectError(traceError(err), bucket, object, uploadID) return oi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID)
} }
return l.GetObjectInfo(bucket, object) return l.GetObjectInfo(bucket, object)
@ -712,13 +713,13 @@ func (l *b2Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAcc
} }
prefix := bucket + "/*" // For all objects inside the bucket. prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 { if len(policies) != 1 {
return traceError(NotImplemented{}) return errors.Trace(NotImplemented{})
} }
if policies[0].Prefix != prefix { if policies[0].Prefix != prefix {
return traceError(NotImplemented{}) return errors.Trace(NotImplemented{})
} }
if policies[0].Policy != policy.BucketPolicyReadOnly { if policies[0].Policy != policy.BucketPolicyReadOnly {
return traceError(NotImplemented{}) return errors.Trace(NotImplemented{})
} }
bkt, err := l.Bucket(bucket) bkt, err := l.Bucket(bucket)
if err != nil { if err != nil {
@ -726,7 +727,7 @@ func (l *b2Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAcc
} }
bkt.Type = bucketTypeReadOnly bkt.Type = bucketTypeReadOnly
_, err = bkt.Update(l.ctx) _, err = bkt.Update(l.ctx)
return b2ToObjectError(traceError(err)) return b2ToObjectError(errors.Trace(err))
} }
// GetBucketPolicies, returns the current bucketType from B2 backend and convert // GetBucketPolicies, returns the current bucketType from B2 backend and convert
@ -744,7 +745,7 @@ func (l *b2Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy,
// bkt.Type can also be snapshot, but it is only allowed through B2 browser console, // bkt.Type can also be snapshot, but it is only allowed through B2 browser console,
// just return back as policy not found for all cases. // just return back as policy not found for all cases.
// CreateBucket always sets the value to allPrivate by default. // CreateBucket always sets the value to allPrivate by default.
return policy.BucketAccessPolicy{}, traceError(PolicyNotFound{Bucket: bucket}) return policy.BucketAccessPolicy{}, errors.Trace(PolicyNotFound{Bucket: bucket})
} }
// DeleteBucketPolicies - resets the bucketType of bucket on B2 to 'allPrivate'. // DeleteBucketPolicies - resets the bucketType of bucket on B2 to 'allPrivate'.
@ -755,5 +756,5 @@ func (l *b2Objects) DeleteBucketPolicies(bucket string) error {
} }
bkt.Type = bucketTypePrivate bkt.Type = bucketTypePrivate
_, err = bkt.Update(l.ctx) _, err = bkt.Update(l.ctx)
return b2ToObjectError(traceError(err)) return b2ToObjectError(errors.Trace(err))
} }

View File

@ -22,6 +22,8 @@ import (
"net/http" "net/http"
"strconv" "strconv"
"time" "time"
"github.com/minio/minio/pkg/errors"
) )
func toGCSPublicURL(bucket, object string) string { func toGCSPublicURL(bucket, object string) string {
@ -32,7 +34,7 @@ func toGCSPublicURL(bucket, object string) string {
func (l *gcsGateway) AnonGetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer) error { func (l *gcsGateway) AnonGetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer) error {
req, err := http.NewRequest("GET", toGCSPublicURL(bucket, object), nil) req, err := http.NewRequest("GET", toGCSPublicURL(bucket, object), nil)
if err != nil { if err != nil {
return gcsToObjectError(traceError(err), bucket, object) return gcsToObjectError(errors.Trace(err), bucket, object)
} }
if length > 0 && startOffset > 0 { if length > 0 && startOffset > 0 {
@ -43,28 +45,28 @@ func (l *gcsGateway) AnonGetObject(bucket string, object string, startOffset int
resp, err := http.DefaultClient.Do(req) resp, err := http.DefaultClient.Do(req)
if err != nil { if err != nil {
return gcsToObjectError(traceError(err), bucket, object) return gcsToObjectError(errors.Trace(err), bucket, object)
} }
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK {
return gcsToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object) return gcsToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
} }
_, err = io.Copy(writer, resp.Body) _, err = io.Copy(writer, resp.Body)
return gcsToObjectError(traceError(err), bucket, object) return gcsToObjectError(errors.Trace(err), bucket, object)
} }
// AnonGetObjectInfo - Get object info anonymously // AnonGetObjectInfo - Get object info anonymously
func (l *gcsGateway) AnonGetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) { func (l *gcsGateway) AnonGetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) {
resp, err := http.Head(toGCSPublicURL(bucket, object)) resp, err := http.Head(toGCSPublicURL(bucket, object))
if err != nil { if err != nil {
return objInfo, gcsToObjectError(traceError(err), bucket, object) return objInfo, gcsToObjectError(errors.Trace(err), bucket, object)
} }
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return objInfo, gcsToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object) return objInfo, gcsToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
} }
var contentLength int64 var contentLength int64
@ -72,13 +74,13 @@ func (l *gcsGateway) AnonGetObjectInfo(bucket string, object string) (objInfo Ob
if contentLengthStr != "" { if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
if err != nil { if err != nil {
return objInfo, gcsToObjectError(traceError(errUnexpected), bucket, object) return objInfo, gcsToObjectError(errors.Trace(errUnexpected), bucket, object)
} }
} }
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified")) t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
if err != nil { if err != nil {
return objInfo, traceError(err) return objInfo, errors.Trace(err)
} }
objInfo.ModTime = t objInfo.ModTime = t
@ -99,7 +101,7 @@ func (l *gcsGateway) AnonGetObjectInfo(bucket string, object string) (objInfo Ob
func (l *gcsGateway) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) { func (l *gcsGateway) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys) result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil { if err != nil {
return ListObjectsInfo{}, s3ToObjectError(traceError(err), bucket) return ListObjectsInfo{}, s3ToObjectError(errors.Trace(err), bucket)
} }
return fromMinioClientListBucketResult(bucket, result), nil return fromMinioClientListBucketResult(bucket, result), nil
@ -110,7 +112,7 @@ func (l *gcsGateway) AnonListObjectsV2(bucket, prefix, continuationToken, delimi
// Request V1 List Object to the backend // Request V1 List Object to the backend
result, err := l.anonClient.ListObjects(bucket, prefix, continuationToken, delimiter, maxKeys) result, err := l.anonClient.ListObjects(bucket, prefix, continuationToken, delimiter, maxKeys)
if err != nil { if err != nil {
return ListObjectsV2Info{}, s3ToObjectError(traceError(err), bucket) return ListObjectsV2Info{}, s3ToObjectError(errors.Trace(err), bucket)
} }
// translate V1 Result to V2Info // translate V1 Result to V2Info
return fromMinioClientListBucketResultToV2Info(bucket, result), nil return fromMinioClientListBucketResultToV2Info(bucket, result), nil
@ -120,18 +122,18 @@ func (l *gcsGateway) AnonListObjectsV2(bucket, prefix, continuationToken, delimi
func (l *gcsGateway) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error) { func (l *gcsGateway) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error) {
resp, err := http.Head(toGCSPublicURL(bucket, "")) resp, err := http.Head(toGCSPublicURL(bucket, ""))
if err != nil { if err != nil {
return bucketInfo, gcsToObjectError(traceError(err)) return bucketInfo, gcsToObjectError(errors.Trace(err))
} }
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return bucketInfo, gcsToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket)), bucket) return bucketInfo, gcsToObjectError(errors.Trace(anonErrToObjectErr(resp.StatusCode, bucket)), bucket)
} }
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified")) t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
if err != nil { if err != nil {
return bucketInfo, traceError(err) return bucketInfo, errors.Trace(err)
} }
// Last-Modified date being returned by GCS // Last-Modified date being returned by GCS

View File

@ -38,6 +38,8 @@ import (
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
"google.golang.org/api/iterator" "google.golang.org/api/iterator"
"google.golang.org/api/option" "google.golang.org/api/option"
errors2 "github.com/minio/minio/pkg/errors"
) )
var ( var (
@ -179,15 +181,15 @@ func gcsToObjectError(err error, params ...string) error {
return nil return nil
} }
e, ok := err.(*Error) e, ok := err.(*errors2.Error)
if !ok { if !ok {
// Code should be fixed if this function is called without doing traceError() // Code should be fixed if this function is called without doing errors2.Trace()
// Else handling different situations in this function makes this function complicated. // Else handling different situations in this function makes this function complicated.
errorIf(err, "Expected type *Error") errorIf(err, "Expected type *Error")
return err return err
} }
err = e.e err = e.Cause
bucket := "" bucket := ""
object := "" object := ""
@ -208,7 +210,7 @@ func gcsToObjectError(err error, params ...string) error {
err = BucketNotFound{ err = BucketNotFound{
Bucket: bucket, Bucket: bucket,
} }
e.e = err e.Cause = err
return e return e
case "storage: object doesn't exist": case "storage: object doesn't exist":
if uploadID != "" { if uploadID != "" {
@ -221,7 +223,7 @@ func gcsToObjectError(err error, params ...string) error {
Object: object, Object: object,
} }
} }
e.e = err e.Cause = err
return e return e
} }
@ -229,12 +231,12 @@ func gcsToObjectError(err error, params ...string) error {
if !ok { if !ok {
// We don't interpret non Minio errors. As minio errors will // We don't interpret non Minio errors. As minio errors will
// have StatusCode to help to convert to object errors. // have StatusCode to help to convert to object errors.
e.e = err e.Cause = err
return e return e
} }
if len(googleAPIErr.Errors) == 0 { if len(googleAPIErr.Errors) == 0 {
e.e = err e.Cause = err
return e return e
} }
@ -279,7 +281,7 @@ func gcsToObjectError(err error, params ...string) error {
err = fmt.Errorf("Unsupported error reason: %s", reason) err = fmt.Errorf("Unsupported error reason: %s", reason)
} }
e.e = err e.Cause = err
return e return e
} }
@ -424,14 +426,14 @@ func (l *gcsGateway) MakeBucketWithLocation(bucket, location string) error {
Location: location, Location: location,
}) })
return gcsToObjectError(traceError(err), bucket) return gcsToObjectError(errors2.Trace(err), bucket)
} }
// GetBucketInfo - Get bucket metadata.. // GetBucketInfo - Get bucket metadata..
func (l *gcsGateway) GetBucketInfo(bucket string) (BucketInfo, error) { func (l *gcsGateway) GetBucketInfo(bucket string) (BucketInfo, error) {
attrs, err := l.client.Bucket(bucket).Attrs(l.ctx) attrs, err := l.client.Bucket(bucket).Attrs(l.ctx)
if err != nil { if err != nil {
return BucketInfo{}, gcsToObjectError(traceError(err), bucket) return BucketInfo{}, gcsToObjectError(errors2.Trace(err), bucket)
} }
return BucketInfo{ return BucketInfo{
@ -452,7 +454,7 @@ func (l *gcsGateway) ListBuckets() (buckets []BucketInfo, err error) {
} }
if ierr != nil { if ierr != nil {
return buckets, gcsToObjectError(traceError(ierr)) return buckets, gcsToObjectError(errors2.Trace(ierr))
} }
buckets = append(buckets, BucketInfo{ buckets = append(buckets, BucketInfo{
@ -477,7 +479,7 @@ func (l *gcsGateway) DeleteBucket(bucket string) error {
break break
} }
if err != nil { if err != nil {
return gcsToObjectError(traceError(err)) return gcsToObjectError(errors2.Trace(err))
} }
if objAttrs.Prefix == globalMinioSysTmp { if objAttrs.Prefix == globalMinioSysTmp {
gcsMinioPathFound = true gcsMinioPathFound = true
@ -487,7 +489,7 @@ func (l *gcsGateway) DeleteBucket(bucket string) error {
break break
} }
if nonGCSMinioPathFound { if nonGCSMinioPathFound {
return gcsToObjectError(traceError(BucketNotEmpty{})) return gcsToObjectError(errors2.Trace(BucketNotEmpty{}))
} }
if gcsMinioPathFound { if gcsMinioPathFound {
// Remove minio.sys.tmp before deleting the bucket. // Remove minio.sys.tmp before deleting the bucket.
@ -498,16 +500,16 @@ func (l *gcsGateway) DeleteBucket(bucket string) error {
break break
} }
if err != nil { if err != nil {
return gcsToObjectError(traceError(err)) return gcsToObjectError(errors2.Trace(err))
} }
err = l.client.Bucket(bucket).Object(objAttrs.Name).Delete(l.ctx) err = l.client.Bucket(bucket).Object(objAttrs.Name).Delete(l.ctx)
if err != nil { if err != nil {
return gcsToObjectError(traceError(err)) return gcsToObjectError(errors2.Trace(err))
} }
} }
} }
err := l.client.Bucket(bucket).Delete(l.ctx) err := l.client.Bucket(bucket).Delete(l.ctx)
return gcsToObjectError(traceError(err), bucket) return gcsToObjectError(errors2.Trace(err), bucket)
} }
func toGCSPageToken(name string) string { func toGCSPageToken(name string) string {
@ -589,7 +591,7 @@ func (l *gcsGateway) ListObjects(bucket string, prefix string, marker string, de
break break
} }
if err != nil { if err != nil {
return ListObjectsInfo{}, gcsToObjectError(traceError(err), bucket, prefix) return ListObjectsInfo{}, gcsToObjectError(errors2.Trace(err), bucket, prefix)
} }
nextMarker = toGCSPageToken(attrs.Name) nextMarker = toGCSPageToken(attrs.Name)
@ -672,7 +674,7 @@ func (l *gcsGateway) ListObjectsV2(bucket, prefix, continuationToken, delimiter
} }
if err != nil { if err != nil {
return ListObjectsV2Info{}, gcsToObjectError(traceError(err), bucket, prefix) return ListObjectsV2Info{}, gcsToObjectError(errors2.Trace(err), bucket, prefix)
} }
if attrs.Prefix == globalMinioSysTmp { if attrs.Prefix == globalMinioSysTmp {
@ -716,18 +718,18 @@ func (l *gcsGateway) GetObject(bucket string, key string, startOffset int64, len
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket // otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil { if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
return gcsToObjectError(traceError(err), bucket) return gcsToObjectError(errors2.Trace(err), bucket)
} }
object := l.client.Bucket(bucket).Object(key) object := l.client.Bucket(bucket).Object(key)
r, err := object.NewRangeReader(l.ctx, startOffset, length) r, err := object.NewRangeReader(l.ctx, startOffset, length)
if err != nil { if err != nil {
return gcsToObjectError(traceError(err), bucket, key) return gcsToObjectError(errors2.Trace(err), bucket, key)
} }
defer r.Close() defer r.Close()
if _, err := io.Copy(writer, r); err != nil { if _, err := io.Copy(writer, r); err != nil {
return gcsToObjectError(traceError(err), bucket, key) return gcsToObjectError(errors2.Trace(err), bucket, key)
} }
return nil return nil
@ -776,12 +778,12 @@ func (l *gcsGateway) GetObjectInfo(bucket string, object string) (ObjectInfo, er
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket // otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil { if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket) return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket)
} }
attrs, err := l.client.Bucket(bucket).Object(object).Attrs(l.ctx) attrs, err := l.client.Bucket(bucket).Object(object).Attrs(l.ctx)
if err != nil { if err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, object) return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, object)
} }
return fromGCSAttrsToObjectInfo(attrs), nil return fromGCSAttrsToObjectInfo(attrs), nil
@ -792,7 +794,7 @@ func (l *gcsGateway) PutObject(bucket string, key string, data *hash.Reader, met
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket // otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil { if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket) return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket)
} }
object := l.client.Bucket(bucket).Object(key) object := l.client.Bucket(bucket).Object(key)
@ -806,7 +808,7 @@ func (l *gcsGateway) PutObject(bucket string, key string, data *hash.Reader, met
if _, err := io.Copy(w, data); err != nil { if _, err := io.Copy(w, data); err != nil {
// Close the object writer upon error. // Close the object writer upon error.
w.CloseWithError(err) w.CloseWithError(err)
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key) return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key)
} }
// Close the object writer upon success. // Close the object writer upon success.
@ -814,7 +816,7 @@ func (l *gcsGateway) PutObject(bucket string, key string, data *hash.Reader, met
attrs, err := object.Attrs(l.ctx) attrs, err := object.Attrs(l.ctx)
if err != nil { if err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key) return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key)
} }
return fromGCSAttrsToObjectInfo(attrs), nil return fromGCSAttrsToObjectInfo(attrs), nil
@ -832,7 +834,7 @@ func (l *gcsGateway) CopyObject(srcBucket string, srcObject string, destBucket s
attrs, err := copier.Run(l.ctx) attrs, err := copier.Run(l.ctx)
if err != nil { if err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), destBucket, destObject) return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), destBucket, destObject)
} }
return fromGCSAttrsToObjectInfo(attrs), nil return fromGCSAttrsToObjectInfo(attrs), nil
@ -842,7 +844,7 @@ func (l *gcsGateway) CopyObject(srcBucket string, srcObject string, destBucket s
func (l *gcsGateway) DeleteObject(bucket string, object string) error { func (l *gcsGateway) DeleteObject(bucket string, object string) error {
err := l.client.Bucket(bucket).Object(object).Delete(l.ctx) err := l.client.Bucket(bucket).Object(object).Delete(l.ctx)
if err != nil { if err != nil {
return gcsToObjectError(traceError(err), bucket, object) return gcsToObjectError(errors2.Trace(err), bucket, object)
} }
return nil return nil
@ -868,7 +870,7 @@ func (l *gcsGateway) NewMultipartUpload(bucket string, key string, metadata map[
bucket, bucket,
key, key,
}); err != nil { }); err != nil {
return "", gcsToObjectError(traceError(err), bucket, key) return "", gcsToObjectError(errors2.Trace(err), bucket, key)
} }
return uploadID, nil return uploadID, nil
} }
@ -888,7 +890,7 @@ func (l *gcsGateway) ListMultipartUploads(bucket string, prefix string, keyMarke
// an object layer compatible error upon any error. // an object layer compatible error upon any error.
func (l *gcsGateway) checkUploadIDExists(bucket string, key string, uploadID string) error { func (l *gcsGateway) checkUploadIDExists(bucket string, key string, uploadID string) error {
_, err := l.client.Bucket(bucket).Object(gcsMultipartMetaName(uploadID)).Attrs(l.ctx) _, err := l.client.Bucket(bucket).Object(gcsMultipartMetaName(uploadID)).Attrs(l.ctx)
return gcsToObjectError(traceError(err), bucket, key, uploadID) return gcsToObjectError(errors2.Trace(err), bucket, key, uploadID)
} }
// PutObjectPart puts a part of object in bucket // PutObjectPart puts a part of object in bucket
@ -909,7 +911,7 @@ func (l *gcsGateway) PutObjectPart(bucket string, key string, uploadID string, p
if _, err := io.Copy(w, data); err != nil { if _, err := io.Copy(w, data); err != nil {
// Make sure to close object writer upon error. // Make sure to close object writer upon error.
w.Close() w.Close()
return PartInfo{}, gcsToObjectError(traceError(err), bucket, key) return PartInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key)
} }
// Make sure to close the object writer upon success. // Make sure to close the object writer upon success.
w.Close() w.Close()
@ -940,7 +942,7 @@ func (l *gcsGateway) cleanupMultipartUpload(bucket, key, uploadID string) error
break break
} }
if err != nil { if err != nil {
return gcsToObjectError(traceError(err), bucket, key) return gcsToObjectError(errors2.Trace(err), bucket, key)
} }
object := l.client.Bucket(bucket).Object(attrs.Name) object := l.client.Bucket(bucket).Object(attrs.Name)
@ -973,23 +975,23 @@ func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID
partZeroAttrs, err := object.Attrs(l.ctx) partZeroAttrs, err := object.Attrs(l.ctx)
if err != nil { if err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key, uploadID) return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key, uploadID)
} }
r, err := object.NewReader(l.ctx) r, err := object.NewReader(l.ctx)
if err != nil { if err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key) return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key)
} }
defer r.Close() defer r.Close()
// Check version compatibility of the meta file before compose() // Check version compatibility of the meta file before compose()
multipartMeta := gcsMultipartMetaV1{} multipartMeta := gcsMultipartMetaV1{}
if err = json.NewDecoder(r).Decode(&multipartMeta); err != nil { if err = json.NewDecoder(r).Decode(&multipartMeta); err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key) return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key)
} }
if multipartMeta.Version != gcsMinioMultipartMetaCurrentVersion { if multipartMeta.Version != gcsMinioMultipartMetaCurrentVersion {
return ObjectInfo{}, gcsToObjectError(traceError(errFormatNotSupported), bucket, key) return ObjectInfo{}, gcsToObjectError(errors2.Trace(errFormatNotSupported), bucket, key)
} }
// Validate if the gcs.json stores valid entries for the bucket and key. // Validate if the gcs.json stores valid entries for the bucket and key.
@ -1006,7 +1008,7 @@ func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID
uploadedPart.PartNumber, uploadedPart.ETag))) uploadedPart.PartNumber, uploadedPart.ETag)))
partAttr, pErr := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, uploadedPart.PartNumber, uploadedPart.ETag)).Attrs(l.ctx) partAttr, pErr := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, uploadedPart.PartNumber, uploadedPart.ETag)).Attrs(l.ctx)
if pErr != nil { if pErr != nil {
return ObjectInfo{}, gcsToObjectError(traceError(pErr), bucket, key, uploadID) return ObjectInfo{}, gcsToObjectError(errors2.Trace(pErr), bucket, key, uploadID)
} }
partSizes[i] = partAttr.Size partSizes[i] = partAttr.Size
} }
@ -1014,7 +1016,7 @@ func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID
// Error out if parts except last part sizing < 5MiB. // Error out if parts except last part sizing < 5MiB.
for i, size := range partSizes[:len(partSizes)-1] { for i, size := range partSizes[:len(partSizes)-1] {
if size < globalMinPartSize { if size < globalMinPartSize {
return ObjectInfo{}, traceError(PartTooSmall{ return ObjectInfo{}, errors2.Trace(PartTooSmall{
PartNumber: uploadedParts[i].PartNumber, PartNumber: uploadedParts[i].PartNumber,
PartSize: size, PartSize: size,
PartETag: uploadedParts[i].ETag, PartETag: uploadedParts[i].ETag,
@ -1045,7 +1047,7 @@ func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID
composer.Metadata = partZeroAttrs.Metadata composer.Metadata = partZeroAttrs.Metadata
if _, err = composer.Run(l.ctx); err != nil { if _, err = composer.Run(l.ctx); err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key) return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key)
} }
} }
@ -1058,10 +1060,10 @@ func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID
composer.Metadata = partZeroAttrs.Metadata composer.Metadata = partZeroAttrs.Metadata
attrs, err := composer.Run(l.ctx) attrs, err := composer.Run(l.ctx)
if err != nil { if err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key) return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key)
} }
if err = l.cleanupMultipartUpload(bucket, key, uploadID); err != nil { if err = l.cleanupMultipartUpload(bucket, key, uploadID); err != nil {
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key) return ObjectInfo{}, gcsToObjectError(errors2.Trace(err), bucket, key)
} }
return fromGCSAttrsToObjectInfo(attrs), nil return fromGCSAttrsToObjectInfo(attrs), nil
} }
@ -1080,16 +1082,16 @@ func (l *gcsGateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAc
prefix := bucket + "/*" // For all objects inside the bucket. prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 { if len(policies) != 1 {
return traceError(NotImplemented{}) return errors2.Trace(NotImplemented{})
} }
if policies[0].Prefix != prefix { if policies[0].Prefix != prefix {
return traceError(NotImplemented{}) return errors2.Trace(NotImplemented{})
} }
acl := l.client.Bucket(bucket).ACL() acl := l.client.Bucket(bucket).ACL()
if policies[0].Policy == policy.BucketPolicyNone { if policies[0].Policy == policy.BucketPolicyNone {
if err := acl.Delete(l.ctx, storage.AllUsers); err != nil { if err := acl.Delete(l.ctx, storage.AllUsers); err != nil {
return gcsToObjectError(traceError(err), bucket) return gcsToObjectError(errors2.Trace(err), bucket)
} }
return nil return nil
} }
@ -1101,11 +1103,11 @@ func (l *gcsGateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAc
case policy.BucketPolicyWriteOnly: case policy.BucketPolicyWriteOnly:
role = storage.RoleWriter role = storage.RoleWriter
default: default:
return traceError(NotImplemented{}) return errors2.Trace(NotImplemented{})
} }
if err := acl.Set(l.ctx, storage.AllUsers, role); err != nil { if err := acl.Set(l.ctx, storage.AllUsers, role); err != nil {
return gcsToObjectError(traceError(err), bucket) return gcsToObjectError(errors2.Trace(err), bucket)
} }
return nil return nil
@ -1115,7 +1117,7 @@ func (l *gcsGateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAc
func (l *gcsGateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) { func (l *gcsGateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
rules, err := l.client.Bucket(bucket).ACL().List(l.ctx) rules, err := l.client.Bucket(bucket).ACL().List(l.ctx)
if err != nil { if err != nil {
return policy.BucketAccessPolicy{}, gcsToObjectError(traceError(err), bucket) return policy.BucketAccessPolicy{}, gcsToObjectError(errors2.Trace(err), bucket)
} }
policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"} policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"}
for _, r := range rules { for _, r := range rules {
@ -1131,7 +1133,7 @@ func (l *gcsGateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy
} }
// Return NoSuchBucketPolicy error, when policy is not set // Return NoSuchBucketPolicy error, when policy is not set
if len(policyInfo.Statements) == 0 { if len(policyInfo.Statements) == 0 {
return policy.BucketAccessPolicy{}, gcsToObjectError(traceError(PolicyNotFound{}), bucket) return policy.BucketAccessPolicy{}, gcsToObjectError(errors2.Trace(PolicyNotFound{}), bucket)
} }
return policyInfo, nil return policyInfo, nil
} }
@ -1140,7 +1142,7 @@ func (l *gcsGateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy
func (l *gcsGateway) DeleteBucketPolicies(bucket string) error { func (l *gcsGateway) DeleteBucketPolicies(bucket string) error {
// This only removes the storage.AllUsers policies // This only removes the storage.AllUsers policies
if err := l.client.Bucket(bucket).ACL().Delete(l.ctx, storage.AllUsers); err != nil { if err := l.client.Bucket(bucket).ACL().Delete(l.ctx, storage.AllUsers); err != nil {
return gcsToObjectError(traceError(err), bucket) return gcsToObjectError(errors2.Trace(err), bucket)
} }
return nil return nil

View File

@ -17,7 +17,6 @@
package cmd package cmd
import ( import (
"errors"
"fmt" "fmt"
"net/url" "net/url"
"os" "os"
@ -28,6 +27,7 @@ import (
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/minio/pkg/errors"
miniohttp "github.com/minio/minio/pkg/http" miniohttp "github.com/minio/minio/pkg/http"
) )
@ -115,7 +115,7 @@ func validateGatewayArguments(serverAddr, endpointAddr string) error {
return err return err
} }
if sameTarget { if sameTarget {
return errors.New("endpoint points to the local gateway") return fmt.Errorf("endpoint points to the local gateway")
} }
} }
return nil return nil
@ -144,7 +144,7 @@ func startGateway(ctx *cli.Context, gw Gateway) {
// Validate if we have access, secret set through environment. // Validate if we have access, secret set through environment.
gatewayName := gw.Name() gatewayName := gw.Name()
if !globalIsEnvCreds { if !globalIsEnvCreds {
errorIf(errors.New("Access and secret keys not set"), "Access and Secret keys should be set through ENVs for backend [%s]", gatewayName) errorIf(fmt.Errorf("Access and secret keys not set"), "Access and Secret keys should be set through ENVs for backend [%s]", gatewayName)
cli.ShowCommandHelpAndExit(ctx, gatewayName, 1) cli.ShowCommandHelpAndExit(ctx, gatewayName, 1)
} }
@ -158,7 +158,7 @@ func startGateway(ctx *cli.Context, gw Gateway) {
enableLoggers() enableLoggers()
// Init the error tracing module. // Init the error tracing module.
initError() errors.Init(GOPATH, "github.com/minio/minio")
// Check and load SSL certificates. // Check and load SSL certificates.
var err error var err error

View File

@ -20,6 +20,7 @@ import (
"io" "io"
minio "github.com/minio/minio-go" minio "github.com/minio/minio-go"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
) )
@ -27,7 +28,7 @@ import (
func (l *s3Objects) AnonPutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, e error) { func (l *s3Objects) AnonPutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, e error) {
oi, err := l.anonClient.PutObject(bucket, object, data, data.Size(), data.MD5(), data.SHA256(), toMinioClientMetadata(metadata)) oi, err := l.anonClient.PutObject(bucket, object, data, data.Size(), data.MD5(), data.SHA256(), toMinioClientMetadata(metadata))
if err != nil { if err != nil {
return objInfo, s3ToObjectError(traceError(err), bucket, object) return objInfo, s3ToObjectError(errors.Trace(err), bucket, object)
} }
return fromMinioClientObjectInfo(bucket, oi), nil return fromMinioClientObjectInfo(bucket, oi), nil
@ -37,17 +38,17 @@ func (l *s3Objects) AnonPutObject(bucket string, object string, data *hash.Reade
func (l *s3Objects) AnonGetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error { func (l *s3Objects) AnonGetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error {
opts := minio.GetObjectOptions{} opts := minio.GetObjectOptions{}
if err := opts.SetRange(startOffset, startOffset+length-1); err != nil { if err := opts.SetRange(startOffset, startOffset+length-1); err != nil {
return s3ToObjectError(traceError(err), bucket, key) return s3ToObjectError(errors.Trace(err), bucket, key)
} }
object, _, err := l.anonClient.GetObject(bucket, key, opts) object, _, err := l.anonClient.GetObject(bucket, key, opts)
if err != nil { if err != nil {
return s3ToObjectError(traceError(err), bucket, key) return s3ToObjectError(errors.Trace(err), bucket, key)
} }
defer object.Close() defer object.Close()
if _, err := io.CopyN(writer, object, length); err != nil { if _, err := io.CopyN(writer, object, length); err != nil {
return s3ToObjectError(traceError(err), bucket, key) return s3ToObjectError(errors.Trace(err), bucket, key)
} }
return nil return nil
@ -57,7 +58,7 @@ func (l *s3Objects) AnonGetObject(bucket string, key string, startOffset int64,
func (l *s3Objects) AnonGetObjectInfo(bucket string, object string) (objInfo ObjectInfo, e error) { func (l *s3Objects) AnonGetObjectInfo(bucket string, object string) (objInfo ObjectInfo, e error) {
oi, err := l.anonClient.StatObject(bucket, object, minio.StatObjectOptions{}) oi, err := l.anonClient.StatObject(bucket, object, minio.StatObjectOptions{})
if err != nil { if err != nil {
return objInfo, s3ToObjectError(traceError(err), bucket, object) return objInfo, s3ToObjectError(errors.Trace(err), bucket, object)
} }
return fromMinioClientObjectInfo(bucket, oi), nil return fromMinioClientObjectInfo(bucket, oi), nil
@ -67,7 +68,7 @@ func (l *s3Objects) AnonGetObjectInfo(bucket string, object string) (objInfo Obj
func (l *s3Objects) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { func (l *s3Objects) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys) result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil { if err != nil {
return loi, s3ToObjectError(traceError(err), bucket) return loi, s3ToObjectError(errors.Trace(err), bucket)
} }
return fromMinioClientListBucketResult(bucket, result), nil return fromMinioClientListBucketResult(bucket, result), nil
@ -77,7 +78,7 @@ func (l *s3Objects) AnonListObjects(bucket string, prefix string, marker string,
func (l *s3Objects) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi ListObjectsV2Info, e error) { func (l *s3Objects) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi ListObjectsV2Info, e error) {
result, err := l.anonClient.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys) result, err := l.anonClient.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys)
if err != nil { if err != nil {
return loi, s3ToObjectError(traceError(err), bucket) return loi, s3ToObjectError(errors.Trace(err), bucket)
} }
return fromMinioClientListBucketV2Result(bucket, result), nil return fromMinioClientListBucketV2Result(bucket, result), nil
@ -86,14 +87,14 @@ func (l *s3Objects) AnonListObjectsV2(bucket, prefix, continuationToken, delimit
// AnonGetBucketInfo - Get bucket metadata anonymously. // AnonGetBucketInfo - Get bucket metadata anonymously.
func (l *s3Objects) AnonGetBucketInfo(bucket string) (bi BucketInfo, e error) { func (l *s3Objects) AnonGetBucketInfo(bucket string) (bi BucketInfo, e error) {
if exists, err := l.anonClient.BucketExists(bucket); err != nil { if exists, err := l.anonClient.BucketExists(bucket); err != nil {
return bi, s3ToObjectError(traceError(err), bucket) return bi, s3ToObjectError(errors.Trace(err), bucket)
} else if !exists { } else if !exists {
return bi, traceError(BucketNotFound{Bucket: bucket}) return bi, errors.Trace(BucketNotFound{Bucket: bucket})
} }
buckets, err := l.anonClient.ListBuckets() buckets, err := l.anonClient.ListBuckets()
if err != nil { if err != nil {
return bi, s3ToObjectError(traceError(err), bucket) return bi, s3ToObjectError(errors.Trace(err), bucket)
} }
for _, bi := range buckets { for _, bi := range buckets {
@ -107,5 +108,5 @@ func (l *s3Objects) AnonGetBucketInfo(bucket string) (bi BucketInfo, e error) {
}, nil }, nil
} }
return bi, traceError(BucketNotFound{Bucket: bucket}) return bi, errors.Trace(BucketNotFound{Bucket: bucket})
} }

View File

@ -24,6 +24,7 @@ import (
minio "github.com/minio/minio-go" minio "github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/s3utils" "github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
) )
@ -105,15 +106,15 @@ func s3ToObjectError(err error, params ...string) error {
return nil return nil
} }
e, ok := err.(*Error) e, ok := err.(*errors.Error)
if !ok { if !ok {
// Code should be fixed if this function is called without doing traceError() // Code should be fixed if this function is called without doing errors.Trace()
// Else handling different situations in this function makes this function complicated. // Else handling different situations in this function makes this function complicated.
errorIf(err, "Expected type *Error") errorIf(err, "Expected type *Error")
return err return err
} }
err = e.e err = e.Cause
bucket := "" bucket := ""
object := "" object := ""
@ -163,7 +164,7 @@ func s3ToObjectError(err error, params ...string) error {
err = PartTooSmall{} err = PartTooSmall{}
} }
e.e = err e.Cause = err
return e return e
} }
@ -230,7 +231,7 @@ func (l *s3Objects) StorageInfo() (si StorageInfo) {
func (l *s3Objects) MakeBucketWithLocation(bucket, location string) error { func (l *s3Objects) MakeBucketWithLocation(bucket, location string) error {
err := l.Client.MakeBucket(bucket, location) err := l.Client.MakeBucket(bucket, location)
if err != nil { if err != nil {
return s3ToObjectError(traceError(err), bucket) return s3ToObjectError(errors.Trace(err), bucket)
} }
return err return err
} }
@ -245,12 +246,12 @@ func (l *s3Objects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
// access to these buckets. // access to these buckets.
// Ref - http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html // Ref - http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
if s3utils.CheckValidBucketName(bucket) != nil { if s3utils.CheckValidBucketName(bucket) != nil {
return bi, traceError(BucketNameInvalid{Bucket: bucket}) return bi, errors.Trace(BucketNameInvalid{Bucket: bucket})
} }
buckets, err := l.Client.ListBuckets() buckets, err := l.Client.ListBuckets()
if err != nil { if err != nil {
return bi, s3ToObjectError(traceError(err), bucket) return bi, s3ToObjectError(errors.Trace(err), bucket)
} }
for _, bi := range buckets { for _, bi := range buckets {
@ -264,14 +265,14 @@ func (l *s3Objects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
}, nil }, nil
} }
return bi, traceError(BucketNotFound{Bucket: bucket}) return bi, errors.Trace(BucketNotFound{Bucket: bucket})
} }
// ListBuckets lists all S3 buckets // ListBuckets lists all S3 buckets
func (l *s3Objects) ListBuckets() ([]BucketInfo, error) { func (l *s3Objects) ListBuckets() ([]BucketInfo, error) {
buckets, err := l.Client.ListBuckets() buckets, err := l.Client.ListBuckets()
if err != nil { if err != nil {
return nil, s3ToObjectError(traceError(err)) return nil, s3ToObjectError(errors.Trace(err))
} }
b := make([]BucketInfo, len(buckets)) b := make([]BucketInfo, len(buckets))
@ -289,7 +290,7 @@ func (l *s3Objects) ListBuckets() ([]BucketInfo, error) {
func (l *s3Objects) DeleteBucket(bucket string) error { func (l *s3Objects) DeleteBucket(bucket string) error {
err := l.Client.RemoveBucket(bucket) err := l.Client.RemoveBucket(bucket)
if err != nil { if err != nil {
return s3ToObjectError(traceError(err), bucket) return s3ToObjectError(errors.Trace(err), bucket)
} }
return nil return nil
} }
@ -298,7 +299,7 @@ func (l *s3Objects) DeleteBucket(bucket string) error {
func (l *s3Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { func (l *s3Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys) result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil { if err != nil {
return loi, s3ToObjectError(traceError(err), bucket) return loi, s3ToObjectError(errors.Trace(err), bucket)
} }
return fromMinioClientListBucketResult(bucket, result), nil return fromMinioClientListBucketResult(bucket, result), nil
@ -308,7 +309,7 @@ func (l *s3Objects) ListObjects(bucket string, prefix string, marker string, del
func (l *s3Objects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi ListObjectsV2Info, e error) { func (l *s3Objects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi ListObjectsV2Info, e error) {
result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys) result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys)
if err != nil { if err != nil {
return loi, s3ToObjectError(traceError(err), bucket) return loi, s3ToObjectError(errors.Trace(err), bucket)
} }
return fromMinioClientListBucketV2Result(bucket, result), nil return fromMinioClientListBucketV2Result(bucket, result), nil
@ -366,23 +367,23 @@ func fromMinioClientListBucketResult(bucket string, result minio.ListBucketResul
// length indicates the total length of the object. // length indicates the total length of the object.
func (l *s3Objects) GetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error { func (l *s3Objects) GetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error {
if length < 0 && length != -1 { if length < 0 && length != -1 {
return s3ToObjectError(traceError(errInvalidArgument), bucket, key) return s3ToObjectError(errors.Trace(errInvalidArgument), bucket, key)
} }
opts := minio.GetObjectOptions{} opts := minio.GetObjectOptions{}
if startOffset >= 0 && length >= 0 { if startOffset >= 0 && length >= 0 {
if err := opts.SetRange(startOffset, startOffset+length-1); err != nil { if err := opts.SetRange(startOffset, startOffset+length-1); err != nil {
return s3ToObjectError(traceError(err), bucket, key) return s3ToObjectError(errors.Trace(err), bucket, key)
} }
} }
object, _, err := l.Client.GetObject(bucket, key, opts) object, _, err := l.Client.GetObject(bucket, key, opts)
if err != nil { if err != nil {
return s3ToObjectError(traceError(err), bucket, key) return s3ToObjectError(errors.Trace(err), bucket, key)
} }
defer object.Close() defer object.Close()
if _, err := io.Copy(writer, object); err != nil { if _, err := io.Copy(writer, object); err != nil {
return s3ToObjectError(traceError(err), bucket, key) return s3ToObjectError(errors.Trace(err), bucket, key)
} }
return nil return nil
} }
@ -408,7 +409,7 @@ func fromMinioClientObjectInfo(bucket string, oi minio.ObjectInfo) ObjectInfo {
func (l *s3Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) { func (l *s3Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) {
oi, err := l.Client.StatObject(bucket, object, minio.StatObjectOptions{}) oi, err := l.Client.StatObject(bucket, object, minio.StatObjectOptions{})
if err != nil { if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object) return ObjectInfo{}, s3ToObjectError(errors.Trace(err), bucket, object)
} }
return fromMinioClientObjectInfo(bucket, oi), nil return fromMinioClientObjectInfo(bucket, oi), nil
@ -418,7 +419,7 @@ func (l *s3Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectI
func (l *s3Objects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) { func (l *s3Objects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5(), data.SHA256(), toMinioClientMetadata(metadata)) oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5(), data.SHA256(), toMinioClientMetadata(metadata))
if err != nil { if err != nil {
return objInfo, s3ToObjectError(traceError(err), bucket, object) return objInfo, s3ToObjectError(errors.Trace(err), bucket, object)
} }
return fromMinioClientObjectInfo(bucket, oi), nil return fromMinioClientObjectInfo(bucket, oi), nil
@ -432,7 +433,7 @@ func (l *s3Objects) CopyObject(srcBucket string, srcObject string, dstBucket str
// So preserve it by adding "REPLACE" directive to save all the metadata set by CopyObject API. // So preserve it by adding "REPLACE" directive to save all the metadata set by CopyObject API.
metadata["x-amz-metadata-directive"] = "REPLACE" metadata["x-amz-metadata-directive"] = "REPLACE"
if _, err = l.Client.CopyObject(srcBucket, srcObject, dstBucket, dstObject, metadata); err != nil { if _, err = l.Client.CopyObject(srcBucket, srcObject, dstBucket, dstObject, metadata); err != nil {
return objInfo, s3ToObjectError(traceError(err), srcBucket, srcObject) return objInfo, s3ToObjectError(errors.Trace(err), srcBucket, srcObject)
} }
return l.GetObjectInfo(dstBucket, dstObject) return l.GetObjectInfo(dstBucket, dstObject)
} }
@ -441,7 +442,7 @@ func (l *s3Objects) CopyObject(srcBucket string, srcObject string, dstBucket str
func (l *s3Objects) DeleteObject(bucket string, object string) error { func (l *s3Objects) DeleteObject(bucket string, object string) error {
err := l.Client.RemoveObject(bucket, object) err := l.Client.RemoveObject(bucket, object)
if err != nil { if err != nil {
return s3ToObjectError(traceError(err), bucket, object) return s3ToObjectError(errors.Trace(err), bucket, object)
} }
return nil return nil
@ -519,7 +520,7 @@ func (l *s3Objects) NewMultipartUpload(bucket string, object string, metadata ma
opts := minio.PutObjectOptions{UserMetadata: metadata} opts := minio.PutObjectOptions{UserMetadata: metadata}
uploadID, err = l.Client.NewMultipartUpload(bucket, object, opts) uploadID, err = l.Client.NewMultipartUpload(bucket, object, opts)
if err != nil { if err != nil {
return uploadID, s3ToObjectError(traceError(err), bucket, object) return uploadID, s3ToObjectError(errors.Trace(err), bucket, object)
} }
return uploadID, nil return uploadID, nil
} }
@ -538,7 +539,7 @@ func fromMinioClientObjectPart(op minio.ObjectPart) PartInfo {
func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) { func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) {
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5(), data.SHA256()) info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5(), data.SHA256())
if err != nil { if err != nil {
return pi, s3ToObjectError(traceError(err), bucket, object) return pi, s3ToObjectError(errors.Trace(err), bucket, object)
} }
return fromMinioClientObjectPart(info), nil return fromMinioClientObjectPart(info), nil
@ -582,7 +583,7 @@ func (l *s3Objects) ListObjectParts(bucket string, object string, uploadID strin
// AbortMultipartUpload aborts a ongoing multipart upload // AbortMultipartUpload aborts a ongoing multipart upload
func (l *s3Objects) AbortMultipartUpload(bucket string, object string, uploadID string) error { func (l *s3Objects) AbortMultipartUpload(bucket string, object string, uploadID string) error {
err := l.Client.AbortMultipartUpload(bucket, object, uploadID) err := l.Client.AbortMultipartUpload(bucket, object, uploadID)
return s3ToObjectError(traceError(err), bucket, object) return s3ToObjectError(errors.Trace(err), bucket, object)
} }
// toMinioClientCompletePart converts CompletePart to minio CompletePart // toMinioClientCompletePart converts CompletePart to minio CompletePart
@ -606,7 +607,7 @@ func toMinioClientCompleteParts(parts []CompletePart) []minio.CompletePart {
func (l *s3Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []CompletePart) (oi ObjectInfo, e error) { func (l *s3Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []CompletePart) (oi ObjectInfo, e error) {
err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, toMinioClientCompleteParts(uploadedParts)) err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, toMinioClientCompleteParts(uploadedParts))
if err != nil { if err != nil {
return oi, s3ToObjectError(traceError(err), bucket, object) return oi, s3ToObjectError(errors.Trace(err), bucket, object)
} }
return l.GetObjectInfo(bucket, object) return l.GetObjectInfo(bucket, object)
@ -615,7 +616,7 @@ func (l *s3Objects) CompleteMultipartUpload(bucket string, object string, upload
// SetBucketPolicies sets policy on bucket // SetBucketPolicies sets policy on bucket
func (l *s3Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error { func (l *s3Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
if err := l.Client.PutBucketPolicy(bucket, policyInfo); err != nil { if err := l.Client.PutBucketPolicy(bucket, policyInfo); err != nil {
return s3ToObjectError(traceError(err), bucket, "") return s3ToObjectError(errors.Trace(err), bucket, "")
} }
return nil return nil
@ -625,7 +626,7 @@ func (l *s3Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAcc
func (l *s3Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) { func (l *s3Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
policyInfo, err := l.Client.GetBucketPolicy(bucket) policyInfo, err := l.Client.GetBucketPolicy(bucket)
if err != nil { if err != nil {
return policy.BucketAccessPolicy{}, s3ToObjectError(traceError(err), bucket, "") return policy.BucketAccessPolicy{}, s3ToObjectError(errors.Trace(err), bucket, "")
} }
return policyInfo, nil return policyInfo, nil
} }
@ -633,7 +634,7 @@ func (l *s3Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy,
// DeleteBucketPolicies deletes all policies on bucket // DeleteBucketPolicies deletes all policies on bucket
func (l *s3Objects) DeleteBucketPolicies(bucket string) error { func (l *s3Objects) DeleteBucketPolicies(bucket string) error {
if err := l.Client.PutBucketPolicy(bucket, policy.BucketAccessPolicy{}); err != nil { if err := l.Client.PutBucketPolicy(bucket, policy.BucketAccessPolicy{}); err != nil {
return s3ToObjectError(traceError(err), bucket, "") return s3ToObjectError(errors.Trace(err), bucket, "")
} }
return nil return nil
} }

View File

@ -21,6 +21,7 @@ import (
"testing" "testing"
minio "github.com/minio/minio-go" minio "github.com/minio/minio-go"
errors2 "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
) )
@ -114,8 +115,8 @@ func TestS3ToObjectError(t *testing.T) {
for i, tc := range testCases { for i, tc := range testCases {
actualErr := s3ToObjectError(tc.inputErr, tc.bucket, tc.object) actualErr := s3ToObjectError(tc.inputErr, tc.bucket, tc.object)
if e, ok := actualErr.(*Error); ok && e.e != tc.expectedErr { if e, ok := actualErr.(*errors2.Error); ok && e.Cause != tc.expectedErr {
t.Errorf("Test case %d: Expected error %v but received error %v", i+1, tc.expectedErr, e.e) t.Errorf("Test case %d: Expected error %v but received error %v", i+1, tc.expectedErr, e.Cause)
} }
} }
} }

View File

@ -18,7 +18,6 @@ package cmd
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
@ -31,6 +30,7 @@ import (
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/minio-go/pkg/set" "github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
) )
@ -152,7 +152,7 @@ func (s SiaMethodNotSupported) Error() string {
func apiGet(addr, call, apiPassword string) (*http.Response, error) { func apiGet(addr, call, apiPassword string) (*http.Response, error) {
req, err := http.NewRequest("GET", "http://"+addr+call, nil) req, err := http.NewRequest("GET", "http://"+addr+call, nil)
if err != nil { if err != nil {
return nil, traceError(err) return nil, errors.Trace(err)
} }
req.Header.Set("User-Agent", "Sia-Agent") req.Header.Set("User-Agent", "Sia-Agent")
if apiPassword != "" { if apiPassword != "" {
@ -160,7 +160,7 @@ func apiGet(addr, call, apiPassword string) (*http.Response, error) {
} }
resp, err := http.DefaultClient.Do(req) resp, err := http.DefaultClient.Do(req)
if err != nil { if err != nil {
return nil, traceError(err) return nil, errors.Trace(err)
} }
if resp.StatusCode == http.StatusNotFound { if resp.StatusCode == http.StatusNotFound {
resp.Body.Close() resp.Body.Close()
@ -225,7 +225,7 @@ func list(addr string, apiPassword string, obj *renterFiles) error {
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode == http.StatusNoContent { if resp.StatusCode == http.StatusNoContent {
return errors.New("Expecting a response, but API returned status code 204 No Content") return fmt.Errorf("Expecting a response, but API returned %s", resp.Status)
} }
return json.NewDecoder(resp.Body).Decode(obj) return json.NewDecoder(resp.Body).Decode(obj)
@ -369,7 +369,7 @@ func (s *siaObjects) ListObjects(bucket string, prefix string, marker string, de
func (s *siaObjects) GetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer) error { func (s *siaObjects) GetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer) error {
if !isValidObjectName(object) { if !isValidObjectName(object) {
return traceError(ObjectNameInvalid{bucket, object}) return errors.Trace(ObjectNameInvalid{bucket, object})
} }
dstFile := pathJoin(s.TempDir, mustGetUUID()) dstFile := pathJoin(s.TempDir, mustGetUUID())
@ -398,7 +398,7 @@ func (s *siaObjects) GetObject(bucket string, object string, startOffset int64,
// Reply back invalid range if the input offset and length fall out of range. // Reply back invalid range if the input offset and length fall out of range.
if startOffset > size || startOffset+length > size { if startOffset > size || startOffset+length > size {
return traceError(InvalidRange{startOffset, length, size}) return errors.Trace(InvalidRange{startOffset, length, size})
} }
// Allocate a staging buffer. // Allocate a staging buffer.
@ -430,14 +430,14 @@ func (s *siaObjects) GetObjectInfo(bucket string, object string) (objInfo Object
} }
} }
return objInfo, traceError(ObjectNotFound{bucket, object}) return objInfo, errors.Trace(ObjectNotFound{bucket, object})
} }
// PutObject creates a new object with the incoming data, // PutObject creates a new object with the incoming data,
func (s *siaObjects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) { func (s *siaObjects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
// Check the object's name first // Check the object's name first
if !isValidObjectName(object) { if !isValidObjectName(object) {
return objInfo, traceError(ObjectNameInvalid{bucket, object}) return objInfo, errors.Trace(ObjectNameInvalid{bucket, object})
} }
bufSize := int64(readSizeV1) bufSize := int64(readSizeV1)

View File

@ -20,6 +20,7 @@ import (
"io" "io"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
) )
@ -27,119 +28,120 @@ type gatewayUnsupported struct{}
// ListMultipartUploads lists all multipart uploads. // ListMultipartUploads lists all multipart uploads.
func (a gatewayUnsupported) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, err error) { func (a gatewayUnsupported) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, err error) {
return lmi, traceError(NotImplemented{}) return lmi, errors.Trace(NotImplemented{})
} }
// NewMultipartUpload upload object in multiple parts // NewMultipartUpload upload object in multiple parts
func (a gatewayUnsupported) NewMultipartUpload(bucket string, object string, metadata map[string]string) (uploadID string, err error) { func (a gatewayUnsupported) NewMultipartUpload(bucket string, object string, metadata map[string]string) (uploadID string, err error) {
return "", traceError(NotImplemented{}) return "", errors.Trace(NotImplemented{})
}
// CopyObjectPart copy part of object to other bucket and object
func (a gatewayUnsupported) CopyObjectPart(srcBucket string, srcObject string, destBucket string, destObject string, uploadID string, partID int, startOffset int64, length int64) (pi PartInfo, err error) {
return pi, traceError(NotImplemented{})
} }
// PutObjectPart puts a part of object in bucket // PutObjectPart puts a part of object in bucket
func (a gatewayUnsupported) PutObjectPart(bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi PartInfo, err error) { func (a gatewayUnsupported) PutObjectPart(bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi PartInfo, err error) {
return pi, traceError(NotImplemented{}) return pi, errors.Trace(NotImplemented{})
} }
// ListObjectParts returns all object parts for specified object in specified bucket // ListObjectParts returns all object parts for specified object in specified bucket
func (a gatewayUnsupported) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi ListPartsInfo, err error) { func (a gatewayUnsupported) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi ListPartsInfo, err error) {
return lpi, traceError(NotImplemented{}) return lpi, errors.Trace(NotImplemented{})
} }
// AbortMultipartUpload aborts a ongoing multipart upload // AbortMultipartUpload aborts a ongoing multipart upload
func (a gatewayUnsupported) AbortMultipartUpload(bucket string, object string, uploadID string) error { func (a gatewayUnsupported) AbortMultipartUpload(bucket string, object string, uploadID string) error {
return traceError(NotImplemented{}) return errors.Trace(NotImplemented{})
} }
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object // CompleteMultipartUpload completes ongoing multipart upload and finalizes object
func (a gatewayUnsupported) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []CompletePart) (oi ObjectInfo, err error) { func (a gatewayUnsupported) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []CompletePart) (oi ObjectInfo, err error) {
return oi, traceError(NotImplemented{}) return oi, errors.Trace(NotImplemented{})
} }
// SetBucketPolicies sets policy on bucket // SetBucketPolicies sets policy on bucket
func (a gatewayUnsupported) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error { func (a gatewayUnsupported) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
return traceError(NotImplemented{}) return errors.Trace(NotImplemented{})
} }
// GetBucketPolicies will get policy on bucket // GetBucketPolicies will get policy on bucket
func (a gatewayUnsupported) GetBucketPolicies(bucket string) (bal policy.BucketAccessPolicy, err error) { func (a gatewayUnsupported) GetBucketPolicies(bucket string) (bal policy.BucketAccessPolicy, err error) {
return bal, traceError(NotImplemented{}) return bal, errors.Trace(NotImplemented{})
} }
// DeleteBucketPolicies deletes all policies on bucket // DeleteBucketPolicies deletes all policies on bucket
func (a gatewayUnsupported) DeleteBucketPolicies(bucket string) error { func (a gatewayUnsupported) DeleteBucketPolicies(bucket string) error {
return traceError(NotImplemented{}) return errors.Trace(NotImplemented{})
}
// CopyObjectPart - Not implemented.
func (a gatewayUnsupported) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string,
partID int, startOffset int64, length int64) (info PartInfo, err error) {
return info, errors.Trace(NotImplemented{})
} }
// HealBucket - Not relevant. // HealBucket - Not relevant.
func (a gatewayUnsupported) HealBucket(bucket string) error { func (a gatewayUnsupported) HealBucket(bucket string) error {
return traceError(NotImplemented{}) return errors.Trace(NotImplemented{})
} }
// ListBucketsHeal - Not relevant. // ListBucketsHeal - Not relevant.
func (a gatewayUnsupported) ListBucketsHeal() (buckets []BucketInfo, err error) { func (a gatewayUnsupported) ListBucketsHeal() (buckets []BucketInfo, err error) {
return nil, traceError(NotImplemented{}) return nil, errors.Trace(NotImplemented{})
} }
// HealObject - Not relevant. // HealObject - Not relevant.
func (a gatewayUnsupported) HealObject(bucket, object string) (int, int, error) { func (a gatewayUnsupported) HealObject(bucket, object string) (int, int, error) {
return 0, 0, traceError(NotImplemented{}) return 0, 0, errors.Trace(NotImplemented{})
} }
func (a gatewayUnsupported) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { func (a gatewayUnsupported) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
return result, traceError(NotImplemented{}) return result, errors.Trace(NotImplemented{})
} }
// ListObjectsHeal - Not relevant. // ListObjectsHeal - Not relevant.
func (a gatewayUnsupported) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { func (a gatewayUnsupported) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
return loi, traceError(NotImplemented{}) return loi, errors.Trace(NotImplemented{})
} }
// ListUploadsHeal - Not relevant. // ListUploadsHeal - Not relevant.
func (a gatewayUnsupported) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker, func (a gatewayUnsupported) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) { delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
return lmi, traceError(NotImplemented{}) return lmi, errors.Trace(NotImplemented{})
} }
// AnonListObjects - List objects anonymously // AnonListObjects - List objects anonymously
func (a gatewayUnsupported) AnonListObjects(bucket string, prefix string, marker string, delimiter string, func (a gatewayUnsupported) AnonListObjects(bucket string, prefix string, marker string, delimiter string,
maxKeys int) (loi ListObjectsInfo, err error) { maxKeys int) (loi ListObjectsInfo, err error) {
return loi, traceError(NotImplemented{}) return loi, errors.Trace(NotImplemented{})
} }
// AnonListObjectsV2 - List objects in V2 mode, anonymously // AnonListObjectsV2 - List objects in V2 mode, anonymously
func (a gatewayUnsupported) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, func (a gatewayUnsupported) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int,
fetchOwner bool, startAfter string) (loi ListObjectsV2Info, err error) { fetchOwner bool, startAfter string) (loi ListObjectsV2Info, err error) {
return loi, traceError(NotImplemented{}) return loi, errors.Trace(NotImplemented{})
} }
// AnonGetBucketInfo - Get bucket metadata anonymously. // AnonGetBucketInfo - Get bucket metadata anonymously.
func (a gatewayUnsupported) AnonGetBucketInfo(bucket string) (bi BucketInfo, err error) { func (a gatewayUnsupported) AnonGetBucketInfo(bucket string) (bi BucketInfo, err error) {
return bi, traceError(NotImplemented{}) return bi, errors.Trace(NotImplemented{})
} }
// AnonPutObject creates a new object anonymously with the incoming data, // AnonPutObject creates a new object anonymously with the incoming data,
func (a gatewayUnsupported) AnonPutObject(bucket, object string, data *hash.Reader, func (a gatewayUnsupported) AnonPutObject(bucket, object string, data *hash.Reader,
metadata map[string]string) (ObjectInfo, error) { metadata map[string]string) (ObjectInfo, error) {
return ObjectInfo{}, traceError(NotImplemented{}) return ObjectInfo{}, errors.Trace(NotImplemented{})
} }
// AnonGetObject downloads object anonymously. // AnonGetObject downloads object anonymously.
func (a gatewayUnsupported) AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error) { func (a gatewayUnsupported) AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error) {
return traceError(NotImplemented{}) return errors.Trace(NotImplemented{})
} }
// AnonGetObjectInfo returns stat information about an object anonymously. // AnonGetObjectInfo returns stat information about an object anonymously.
func (a gatewayUnsupported) AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) { func (a gatewayUnsupported) AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) {
return objInfo, traceError(NotImplemented{}) return objInfo, errors.Trace(NotImplemented{})
} }
// CopyObject copies a blob from source container to destination container. // CopyObject copies a blob from source container to destination container.
func (a gatewayUnsupported) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string, func (a gatewayUnsupported) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string,
metadata map[string]string) (objInfo ObjectInfo, err error) { metadata map[string]string) (objInfo ObjectInfo, err error) {
return objInfo, traceError(NotImplemented{}) return objInfo, errors.Trace(NotImplemented{})
} }

View File

@ -25,6 +25,7 @@ import (
"os" "os"
"strings" "strings"
"github.com/minio/minio/pkg/errors"
httptracer "github.com/minio/minio/pkg/handlers" httptracer "github.com/minio/minio/pkg/handlers"
) )
@ -112,7 +113,7 @@ var userMetadataKeyPrefixes = []string{
// extractMetadataFromHeader extracts metadata from HTTP header. // extractMetadataFromHeader extracts metadata from HTTP header.
func extractMetadataFromHeader(header http.Header) (map[string]string, error) { func extractMetadataFromHeader(header http.Header) (map[string]string, error) {
if header == nil { if header == nil {
return nil, traceError(errInvalidArgument) return nil, errors.Trace(errInvalidArgument)
} }
metadata := make(map[string]string) metadata := make(map[string]string)
// Save standard supported headers. // Save standard supported headers.
@ -129,7 +130,7 @@ func extractMetadataFromHeader(header http.Header) (map[string]string, error) {
// Go through all other headers for any additional headers that needs to be saved. // Go through all other headers for any additional headers that needs to be saved.
for key := range header { for key := range header {
if key != http.CanonicalHeaderKey(key) { if key != http.CanonicalHeaderKey(key) {
return nil, traceError(errInvalidArgument) return nil, errors.Trace(errInvalidArgument)
} }
for _, prefix := range userMetadataKeyPrefixes { for _, prefix := range userMetadataKeyPrefixes {
if strings.HasPrefix(key, prefix) { if strings.HasPrefix(key, prefix) {
@ -187,7 +188,7 @@ func validateFormFieldSize(formValues http.Header) error {
for k := range formValues { for k := range formValues {
// Check if value's field exceeds S3 limit // Check if value's field exceeds S3 limit
if int64(len(formValues.Get(k))) > maxFormFieldSize { if int64(len(formValues.Get(k))) > maxFormFieldSize {
return traceError(errSizeUnexpected) return errors.Trace(errSizeUnexpected)
} }
} }
@ -216,7 +217,7 @@ func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser,
canonicalFormName := http.CanonicalHeaderKey(k) canonicalFormName := http.CanonicalHeaderKey(k)
if canonicalFormName == "File" { if canonicalFormName == "File" {
if len(v) == 0 { if len(v) == 0 {
return nil, "", 0, nil, traceError(errInvalidArgument) return nil, "", 0, nil, errors.Trace(errInvalidArgument)
} }
// Fetch fileHeader which has the uploaded file information // Fetch fileHeader which has the uploaded file information
fileHeader := v[0] fileHeader := v[0]
@ -225,17 +226,17 @@ func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser,
// Open the uploaded part // Open the uploaded part
filePart, err = fileHeader.Open() filePart, err = fileHeader.Open()
if err != nil { if err != nil {
return nil, "", 0, nil, traceError(err) return nil, "", 0, nil, errors.Trace(err)
} }
// Compute file size // Compute file size
fileSize, err = filePart.(io.Seeker).Seek(0, 2) fileSize, err = filePart.(io.Seeker).Seek(0, 2)
if err != nil { if err != nil {
return nil, "", 0, nil, traceError(err) return nil, "", 0, nil, errors.Trace(err)
} }
// Reset Seek to the beginning // Reset Seek to the beginning
_, err = filePart.(io.Seeker).Seek(0, 0) _, err = filePart.(io.Seeker).Seek(0, 0)
if err != nil { if err != nil {
return nil, "", 0, nil, traceError(err) return nil, "", 0, nil, errors.Trace(err)
} }
// File found and ready for reading // File found and ready for reading
break break

View File

@ -25,6 +25,8 @@ import (
"reflect" "reflect"
"strings" "strings"
"testing" "testing"
"github.com/minio/minio/pkg/errors"
) )
// Tests validate bucket LocationConstraint. // Tests validate bucket LocationConstraint.
@ -114,7 +116,7 @@ func TestValidateFormFieldSize(t *testing.T) {
for i, testCase := range testCases { for i, testCase := range testCases {
err := validateFormFieldSize(testCase.header) err := validateFormFieldSize(testCase.header)
if err != nil { if err != nil {
if errorCause(err).Error() != testCase.err.Error() { if errors.Cause(err).Error() != testCase.err.Error() {
t.Errorf("Test %d: Expected error %s, got %s", i+1, testCase.err, err) t.Errorf("Test %d: Expected error %s, got %s", i+1, testCase.err, err)
} }
} }

View File

@ -19,6 +19,8 @@ package cmd
import ( import (
"fmt" "fmt"
"time" "time"
"github.com/minio/minio/pkg/errors"
) )
type statusType string type statusType string
@ -116,23 +118,23 @@ func (n *nsLockMap) statusBlockedToRunning(param nsParam, lockSource, opsID stri
// Check whether the lock info entry for <volume, path> pair already exists. // Check whether the lock info entry for <volume, path> pair already exists.
_, ok := n.debugLockMap[param] _, ok := n.debugLockMap[param]
if !ok { if !ok {
return traceError(LockInfoVolPathMissing{param.volume, param.path}) return errors.Trace(LockInfoVolPathMissing{param.volume, param.path})
} }
// Check whether lock info entry for the given `opsID` exists. // Check whether lock info entry for the given `opsID` exists.
lockInfo, ok := n.debugLockMap[param].lockInfo[opsID] lockInfo, ok := n.debugLockMap[param].lockInfo[opsID]
if !ok { if !ok {
return traceError(LockInfoOpsIDNotFound{param.volume, param.path, opsID}) return errors.Trace(LockInfoOpsIDNotFound{param.volume, param.path, opsID})
} }
// Check whether lockSource is same. // Check whether lockSource is same.
if lockInfo.lockSource != lockSource { if lockInfo.lockSource != lockSource {
return traceError(LockInfoOriginMismatch{param.volume, param.path, opsID, lockSource}) return errors.Trace(LockInfoOriginMismatch{param.volume, param.path, opsID, lockSource})
} }
// Status of the lock should be set to "Blocked". // Status of the lock should be set to "Blocked".
if lockInfo.status != blockedStatus { if lockInfo.status != blockedStatus {
return traceError(LockInfoStateNotBlocked{param.volume, param.path, opsID}) return errors.Trace(LockInfoStateNotBlocked{param.volume, param.path, opsID})
} }
// Change lock status to running and update the time. // Change lock status to running and update the time.
n.debugLockMap[param].lockInfo[opsID] = newDebugLockInfo(lockSource, runningStatus, readLock) n.debugLockMap[param].lockInfo[opsID] = newDebugLockInfo(lockSource, runningStatus, readLock)
@ -181,23 +183,23 @@ func (n *nsLockMap) statusNoneToBlocked(param nsParam, lockSource, opsID string,
func (n *nsLockMap) statusBlockedToNone(param nsParam, lockSource, opsID string, readLock bool) error { func (n *nsLockMap) statusBlockedToNone(param nsParam, lockSource, opsID string, readLock bool) error {
_, ok := n.debugLockMap[param] _, ok := n.debugLockMap[param]
if !ok { if !ok {
return traceError(LockInfoVolPathMissing{param.volume, param.path}) return errors.Trace(LockInfoVolPathMissing{param.volume, param.path})
} }
// Check whether lock info entry for the given `opsID` exists. // Check whether lock info entry for the given `opsID` exists.
lockInfo, ok := n.debugLockMap[param].lockInfo[opsID] lockInfo, ok := n.debugLockMap[param].lockInfo[opsID]
if !ok { if !ok {
return traceError(LockInfoOpsIDNotFound{param.volume, param.path, opsID}) return errors.Trace(LockInfoOpsIDNotFound{param.volume, param.path, opsID})
} }
// Check whether lockSource is same. // Check whether lockSource is same.
if lockInfo.lockSource != lockSource { if lockInfo.lockSource != lockSource {
return traceError(LockInfoOriginMismatch{param.volume, param.path, opsID, lockSource}) return errors.Trace(LockInfoOriginMismatch{param.volume, param.path, opsID, lockSource})
} }
// Status of the lock should be set to "Blocked". // Status of the lock should be set to "Blocked".
if lockInfo.status != blockedStatus { if lockInfo.status != blockedStatus {
return traceError(LockInfoStateNotBlocked{param.volume, param.path, opsID}) return errors.Trace(LockInfoStateNotBlocked{param.volume, param.path, opsID})
} }
// Clear the status by removing the entry for the given `opsID`. // Clear the status by removing the entry for the given `opsID`.
delete(n.debugLockMap[param].lockInfo, opsID) delete(n.debugLockMap[param].lockInfo, opsID)
@ -214,7 +216,7 @@ func (n *nsLockMap) statusBlockedToNone(param nsParam, lockSource, opsID string,
func (n *nsLockMap) deleteLockInfoEntryForVolumePath(param nsParam) error { func (n *nsLockMap) deleteLockInfoEntryForVolumePath(param nsParam) error {
// delete the lock info for the given operation. // delete the lock info for the given operation.
if _, found := n.debugLockMap[param]; !found { if _, found := n.debugLockMap[param]; !found {
return traceError(LockInfoVolPathMissing{param.volume, param.path}) return errors.Trace(LockInfoVolPathMissing{param.volume, param.path})
} }
// The following stats update is relevant only in case of a // The following stats update is relevant only in case of a
@ -238,14 +240,14 @@ func (n *nsLockMap) deleteLockInfoEntryForOps(param nsParam, opsID string) error
// delete the lock info for the given operation. // delete the lock info for the given operation.
infoMap, found := n.debugLockMap[param] infoMap, found := n.debugLockMap[param]
if !found { if !found {
return traceError(LockInfoVolPathMissing{param.volume, param.path}) return errors.Trace(LockInfoVolPathMissing{param.volume, param.path})
} }
// The operation finished holding the lock on the resource, remove // The operation finished holding the lock on the resource, remove
// the entry for the given operation with the operation ID. // the entry for the given operation with the operation ID.
opsIDLock, foundInfo := infoMap.lockInfo[opsID] opsIDLock, foundInfo := infoMap.lockInfo[opsID]
if !foundInfo { if !foundInfo {
// Unlock request with invalid operation ID not accepted. // Unlock request with invalid operation ID not accepted.
return traceError(LockInfoOpsIDNotFound{param.volume, param.path, opsID}) return errors.Trace(LockInfoOpsIDNotFound{param.volume, param.path, opsID})
} }
// Update global and (volume, path) lock status. // Update global and (volume, path) lock status.
granted := opsIDLock.status == runningStatus granted := opsIDLock.status == runningStatus

View File

@ -16,7 +16,11 @@
package cmd package cmd
import "testing" import (
"testing"
"github.com/minio/minio/pkg/errors"
)
type lockStateCase struct { type lockStateCase struct {
volume string volume string
@ -278,7 +282,7 @@ func TestNsLockMapStatusBlockedToRunning(t *testing.T) {
testCases[0].opsID, testCases[0].readLock) testCases[0].opsID, testCases[0].readLock)
expectedErr := LockInfoVolPathMissing{testCases[0].volume, testCases[0].path} expectedErr := LockInfoVolPathMissing{testCases[0].volume, testCases[0].path}
if errorCause(actualErr) != expectedErr { if errors.Cause(actualErr) != expectedErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedErr, actualErr) t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedErr, actualErr)
} }
@ -298,7 +302,7 @@ func TestNsLockMapStatusBlockedToRunning(t *testing.T) {
testCases[0].opsID, testCases[0].readLock) testCases[0].opsID, testCases[0].readLock)
expectedOpsErr := LockInfoOpsIDNotFound{testCases[0].volume, testCases[0].path, testCases[0].opsID} expectedOpsErr := LockInfoOpsIDNotFound{testCases[0].volume, testCases[0].path, testCases[0].opsID}
if errorCause(actualErr) != expectedOpsErr { if errors.Cause(actualErr) != expectedOpsErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedOpsErr, actualErr) t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedOpsErr, actualErr)
} }
@ -321,7 +325,7 @@ func TestNsLockMapStatusBlockedToRunning(t *testing.T) {
testCases[0].opsID, testCases[0].readLock) testCases[0].opsID, testCases[0].readLock)
expectedBlockErr := LockInfoStateNotBlocked{testCases[0].volume, testCases[0].path, testCases[0].opsID} expectedBlockErr := LockInfoStateNotBlocked{testCases[0].volume, testCases[0].path, testCases[0].opsID}
if errorCause(actualErr) != expectedBlockErr { if errors.Cause(actualErr) != expectedBlockErr {
t.Fatalf("Errors mismatch: Expected: \"%s\", got: \"%s\"", expectedBlockErr, actualErr) t.Fatalf("Errors mismatch: Expected: \"%s\", got: \"%s\"", expectedBlockErr, actualErr)
} }
@ -342,7 +346,7 @@ func TestNsLockMapStatusBlockedToRunning(t *testing.T) {
} }
// invoking the method under test. // invoking the method under test.
actualErr = globalNSMutex.statusBlockedToRunning(param, testCase.lockSource, testCase.opsID, testCase.readLock) actualErr = globalNSMutex.statusBlockedToRunning(param, testCase.lockSource, testCase.opsID, testCase.readLock)
if errorCause(actualErr) != testCase.expectedErr { if errors.Cause(actualErr) != testCase.expectedErr {
t.Fatalf("Test %d: Errors mismatch: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, actualErr) t.Fatalf("Test %d: Errors mismatch: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, actualErr)
} }
// In case of no error proceed with validating the lock state information. // In case of no error proceed with validating the lock state information.
@ -461,7 +465,7 @@ func TestNsLockMapStatusNoneToBlocked(t *testing.T) {
testCases[0].opsID, testCases[0].readLock) testCases[0].opsID, testCases[0].readLock)
expectedErr := LockInfoVolPathMissing{testCases[0].volume, testCases[0].path} expectedErr := LockInfoVolPathMissing{testCases[0].volume, testCases[0].path}
if errorCause(actualErr) != expectedErr { if errors.Cause(actualErr) != expectedErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedErr, actualErr) t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedErr, actualErr)
} }
@ -505,7 +509,7 @@ func TestNsLockMapDeleteLockInfoEntryForOps(t *testing.T) {
actualErr := globalNSMutex.deleteLockInfoEntryForOps(param, testCases[0].opsID) actualErr := globalNSMutex.deleteLockInfoEntryForOps(param, testCases[0].opsID)
expectedErr := LockInfoVolPathMissing{testCases[0].volume, testCases[0].path} expectedErr := LockInfoVolPathMissing{testCases[0].volume, testCases[0].path}
if errorCause(actualErr) != expectedErr { if errors.Cause(actualErr) != expectedErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedErr, actualErr) t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedErr, actualErr)
} }
@ -524,7 +528,7 @@ func TestNsLockMapDeleteLockInfoEntryForOps(t *testing.T) {
actualErr = globalNSMutex.deleteLockInfoEntryForOps(param, "non-existent-OpsID") actualErr = globalNSMutex.deleteLockInfoEntryForOps(param, "non-existent-OpsID")
expectedOpsIDErr := LockInfoOpsIDNotFound{param.volume, param.path, "non-existent-OpsID"} expectedOpsIDErr := LockInfoOpsIDNotFound{param.volume, param.path, "non-existent-OpsID"}
if errorCause(actualErr) != expectedOpsIDErr { if errors.Cause(actualErr) != expectedOpsIDErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedOpsIDErr, actualErr) t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedOpsIDErr, actualErr)
} }
// case - 4. // case - 4.
@ -588,7 +592,7 @@ func TestNsLockMapDeleteLockInfoEntryForVolumePath(t *testing.T) {
param := nsParam{testCases[0].volume, testCases[0].path} param := nsParam{testCases[0].volume, testCases[0].path}
actualErr := globalNSMutex.deleteLockInfoEntryForVolumePath(param) actualErr := globalNSMutex.deleteLockInfoEntryForVolumePath(param)
expectedNilErr := LockInfoVolPathMissing{param.volume, param.path} expectedNilErr := LockInfoVolPathMissing{param.volume, param.path}
if errorCause(actualErr) != expectedNilErr { if errors.Cause(actualErr) != expectedNilErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedNilErr, actualErr) t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedNilErr, actualErr)
} }

View File

@ -25,6 +25,7 @@ import (
router "github.com/gorilla/mux" router "github.com/gorilla/mux"
"github.com/minio/dsync" "github.com/minio/dsync"
"github.com/minio/minio/pkg/errors"
) )
const ( const (
@ -100,7 +101,7 @@ func registerStorageLockers(mux *router.Router, lockServers []*lockServer) error
for _, lockServer := range lockServers { for _, lockServer := range lockServers {
lockRPCServer := newRPCServer() lockRPCServer := newRPCServer()
if err := lockRPCServer.RegisterName(lockServiceName, lockServer); err != nil { if err := lockRPCServer.RegisterName(lockServiceName, lockServer); err != nil {
return traceError(err) return errors.Trace(err)
} }
lockRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter() lockRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter()
lockRouter.Path(path.Join(lockServicePath, lockServer.ll.serviceEndpoint)).Handler(lockRPCServer) lockRouter.Path(path.Join(lockServicePath, lockServer.ll.serviceEndpoint)).Handler(lockRPCServer)

View File

@ -17,7 +17,6 @@
package cmd package cmd
import ( import (
"errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"path" "path"
@ -27,6 +26,7 @@ import (
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/minio/mc/pkg/console" "github.com/minio/mc/pkg/console"
"github.com/minio/minio/pkg/errors"
) )
var log = NewLogger() var log = NewLogger()
@ -42,7 +42,7 @@ func (l *loggers) Validate() (err error) {
if l != nil { if l != nil {
fileLogger := l.GetFile() fileLogger := l.GetFile()
if fileLogger.Enable && fileLogger.Filename == "" { if fileLogger.Enable && fileLogger.Filename == "" {
err = errors.New("Missing filename for enabled file logger") err = fmt.Errorf("Missing filename for enabled file logger")
} }
} }
@ -186,7 +186,7 @@ func getSource() string {
func logIf(level logrus.Level, source string, err error, msg string, data ...interface{}) { func logIf(level logrus.Level, source string, err error, msg string, data ...interface{}) {
isErrIgnored := func(err error) (ok bool) { isErrIgnored := func(err error) (ok bool) {
err = errorCause(err) err = errors.Cause(err)
switch err.(type) { switch err.(type) {
case BucketNotFound, BucketNotEmpty, BucketExists: case BucketNotFound, BucketNotEmpty, BucketExists:
ok = true ok = true
@ -207,8 +207,8 @@ func logIf(level logrus.Level, source string, err error, msg string, data ...int
"cause": err.Error(), "cause": err.Error(),
} }
if terr, ok := err.(*Error); ok { if terr, ok := err.(*errors.Error); ok {
fields["stack"] = strings.Join(terr.Trace(), " ") fields["stack"] = strings.Join(terr.Stack(), " ")
} }
switch level { switch level {

View File

@ -20,6 +20,7 @@ import (
"sync" "sync"
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/errors"
) )
const ( const (
@ -107,7 +108,7 @@ func houseKeeping(storageDisks []StorageAPI) error {
// Cleanup all temp entries upon start. // Cleanup all temp entries upon start.
err := cleanupDir(disk, minioMetaTmpBucket, "") err := cleanupDir(disk, minioMetaTmpBucket, "")
if err != nil { if err != nil {
if !isErrIgnored(errorCause(err), errDiskNotFound, errVolumeNotFound, errFileNotFound) { if !errors.IsErrIgnored(errors.Cause(err), errDiskNotFound, errVolumeNotFound, errFileNotFound) {
errs[index] = err errs[index] = err
} }
} }
@ -164,21 +165,21 @@ func initMetaVolume(storageDisks []StorageAPI) error {
// Attempt to create `.minio.sys`. // Attempt to create `.minio.sys`.
err := disk.MakeVol(minioMetaBucket) err := disk.MakeVol(minioMetaBucket)
if err != nil { if err != nil {
if !isErrIgnored(err, initMetaVolIgnoredErrs...) { if !errors.IsErrIgnored(err, initMetaVolIgnoredErrs...) {
errs[index] = err errs[index] = err
return return
} }
} }
err = disk.MakeVol(minioMetaTmpBucket) err = disk.MakeVol(minioMetaTmpBucket)
if err != nil { if err != nil {
if !isErrIgnored(err, initMetaVolIgnoredErrs...) { if !errors.IsErrIgnored(err, initMetaVolIgnoredErrs...) {
errs[index] = err errs[index] = err
return return
} }
} }
err = disk.MakeVol(minioMetaMultipartBucket) err = disk.MakeVol(minioMetaMultipartBucket)
if err != nil { if err != nil {
if !isErrIgnored(err, initMetaVolIgnoredErrs...) { if !errors.IsErrIgnored(err, initMetaVolIgnoredErrs...) {
errs[index] = err errs[index] = err
return return
} }
@ -208,7 +209,7 @@ func cleanupDir(storage StorageAPI, volume, dirPath string) error {
delFunc = func(entryPath string) error { delFunc = func(entryPath string) error {
if !hasSuffix(entryPath, slashSeparator) { if !hasSuffix(entryPath, slashSeparator) {
// Delete the file entry. // Delete the file entry.
return traceError(storage.DeleteFile(volume, entryPath)) return errors.Trace(storage.DeleteFile(volume, entryPath))
} }
// If it's a directory, list and call delFunc() for each entry. // If it's a directory, list and call delFunc() for each entry.
@ -217,7 +218,7 @@ func cleanupDir(storage StorageAPI, volume, dirPath string) error {
if err == errFileNotFound { if err == errFileNotFound {
return nil return nil
} else if err != nil { // For any other errors fail. } else if err != nil { // For any other errors fail.
return traceError(err) return errors.Trace(err)
} // else on success.. } // else on success..
// Recurse and delete all other entries. // Recurse and delete all other entries.

View File

@ -19,6 +19,8 @@ package cmd
import ( import (
"sync" "sync"
"testing" "testing"
"github.com/minio/minio/pkg/errors"
) )
func TestHouseKeeping(t *testing.T) { func TestHouseKeeping(t *testing.T) {
@ -90,7 +92,7 @@ func TestHouseKeeping(t *testing.T) {
{nilDiskStorage, nil}, {nilDiskStorage, nil},
} }
for i, test := range testCases { for i, test := range testCases {
actualErr := errorCause(houseKeeping(test.store)) actualErr := errors.Cause(houseKeeping(test.store))
if actualErr != test.expectedErr { if actualErr != test.expectedErr {
t.Errorf("Test %d - actual error is %#v, expected error was %#v", t.Errorf("Test %d - actual error is %#v, expected error was %#v",
i+1, actualErr, test.expectedErr) i+1, actualErr, test.expectedErr)

View File

@ -19,15 +19,17 @@ package cmd
import ( import (
"fmt" "fmt"
"io" "io"
"github.com/minio/minio/pkg/errors"
) )
// Converts underlying storage error. Convenience function written to // Converts underlying storage error. Convenience function written to
// handle all cases where we have known types of errors returned by // handle all cases where we have known types of errors returned by
// underlying storage layer. // underlying storage layer.
func toObjectErr(err error, params ...string) error { func toObjectErr(err error, params ...string) error {
e, ok := err.(*Error) e, ok := err.(*errors.Error)
if ok { if ok {
err = e.e err = e.Cause
} }
switch err { switch err {
@ -95,7 +97,7 @@ func toObjectErr(err error, params ...string) error {
err = IncompleteBody{} err = IncompleteBody{}
} }
if ok { if ok {
e.e = err e.Cause = err
return e return e
} }
return err return err
@ -377,7 +379,7 @@ func (e UnsupportedMetadata) Error() string {
// isErrIncompleteBody - Check if error type is IncompleteBody. // isErrIncompleteBody - Check if error type is IncompleteBody.
func isErrIncompleteBody(err error) bool { func isErrIncompleteBody(err error) bool {
err = errorCause(err) err = errors.Cause(err)
switch err.(type) { switch err.(type) {
case IncompleteBody: case IncompleteBody:
return true return true
@ -387,7 +389,7 @@ func isErrIncompleteBody(err error) bool {
// isErrBucketPolicyNotFound - Check if error type is BucketPolicyNotFound. // isErrBucketPolicyNotFound - Check if error type is BucketPolicyNotFound.
func isErrBucketPolicyNotFound(err error) bool { func isErrBucketPolicyNotFound(err error) bool {
err = errorCause(err) err = errors.Cause(err)
switch err.(type) { switch err.(type) {
case BucketPolicyNotFound: case BucketPolicyNotFound:
return true return true
@ -397,7 +399,7 @@ func isErrBucketPolicyNotFound(err error) bool {
// isErrObjectNotFound - Check if error type is ObjectNotFound. // isErrObjectNotFound - Check if error type is ObjectNotFound.
func isErrObjectNotFound(err error) bool { func isErrObjectNotFound(err error) bool {
err = errorCause(err) err = errors.Cause(err)
switch err.(type) { switch err.(type) {
case ObjectNotFound: case ObjectNotFound:
return true return true

View File

@ -16,7 +16,10 @@
package cmd package cmd
import "github.com/skyrings/skyring-common/tools/uuid" import (
"github.com/minio/minio/pkg/errors"
"github.com/skyrings/skyring-common/tools/uuid"
)
// Checks on GetObject arguments, bucket and object. // Checks on GetObject arguments, bucket and object.
func checkGetObjArgs(bucket, object string) error { func checkGetObjArgs(bucket, object string) error {
@ -32,15 +35,15 @@ func checkDelObjArgs(bucket, object string) error {
func checkBucketAndObjectNames(bucket, object string) error { func checkBucketAndObjectNames(bucket, object string) error {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return traceError(BucketNameInvalid{Bucket: bucket}) return errors.Trace(BucketNameInvalid{Bucket: bucket})
} }
// Verify if object is valid. // Verify if object is valid.
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
// Objects with "/" are invalid, verify to return a different error. // Objects with "/" are invalid, verify to return a different error.
if hasSuffix(object, slashSeparator) || hasPrefix(object, slashSeparator) { if hasSuffix(object, slashSeparator) || hasPrefix(object, slashSeparator) {
return traceError(ObjectNotFound{Bucket: bucket, Object: object}) return errors.Trace(ObjectNotFound{Bucket: bucket, Object: object})
} }
return traceError(ObjectNameInvalid{Bucket: bucket, Object: object}) return errors.Trace(ObjectNameInvalid{Bucket: bucket, Object: object})
} }
return nil return nil
} }
@ -53,24 +56,24 @@ func checkListObjsArgs(bucket, prefix, marker, delimiter string, obj ObjectLayer
// happen before we return an error for invalid object name. // happen before we return an error for invalid object name.
// FIXME: should be moved to handler layer. // FIXME: should be moved to handler layer.
if err := checkBucketExist(bucket, obj); err != nil { if err := checkBucketExist(bucket, obj); err != nil {
return traceError(err) return errors.Trace(err)
} }
// Validates object prefix validity after bucket exists. // Validates object prefix validity after bucket exists.
if !IsValidObjectPrefix(prefix) { if !IsValidObjectPrefix(prefix) {
return traceError(ObjectNameInvalid{ return errors.Trace(ObjectNameInvalid{
Bucket: bucket, Bucket: bucket,
Object: prefix, Object: prefix,
}) })
} }
// Verify if delimiter is anything other than '/', which we do not support. // Verify if delimiter is anything other than '/', which we do not support.
if delimiter != "" && delimiter != slashSeparator { if delimiter != "" && delimiter != slashSeparator {
return traceError(UnsupportedDelimiter{ return errors.Trace(UnsupportedDelimiter{
Delimiter: delimiter, Delimiter: delimiter,
}) })
} }
// Verify if marker has prefix. // Verify if marker has prefix.
if marker != "" && !hasPrefix(marker, prefix) { if marker != "" && !hasPrefix(marker, prefix) {
return traceError(InvalidMarkerPrefixCombination{ return errors.Trace(InvalidMarkerPrefixCombination{
Marker: marker, Marker: marker,
Prefix: prefix, Prefix: prefix,
}) })
@ -85,17 +88,17 @@ func checkListMultipartArgs(bucket, prefix, keyMarker, uploadIDMarker, delimiter
} }
if uploadIDMarker != "" { if uploadIDMarker != "" {
if hasSuffix(keyMarker, slashSeparator) { if hasSuffix(keyMarker, slashSeparator) {
return traceError(InvalidUploadIDKeyCombination{ return errors.Trace(InvalidUploadIDKeyCombination{
UploadIDMarker: uploadIDMarker, UploadIDMarker: uploadIDMarker,
KeyMarker: keyMarker, KeyMarker: keyMarker,
}) })
} }
id, err := uuid.Parse(uploadIDMarker) id, err := uuid.Parse(uploadIDMarker)
if err != nil { if err != nil {
return traceError(err) return errors.Trace(err)
} }
if id.IsZero() { if id.IsZero() {
return traceError(MalformedUploadID{ return errors.Trace(MalformedUploadID{
UploadID: uploadIDMarker, UploadID: uploadIDMarker,
}) })
} }
@ -136,11 +139,11 @@ func checkPutObjectArgs(bucket, object string, obj ObjectLayer) error {
// happen before we return an error for invalid object name. // happen before we return an error for invalid object name.
// FIXME: should be moved to handler layer. // FIXME: should be moved to handler layer.
if err := checkBucketExist(bucket, obj); err != nil { if err := checkBucketExist(bucket, obj); err != nil {
return traceError(err) return errors.Trace(err)
} }
// Validates object name validity after bucket exists. // Validates object name validity after bucket exists.
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return traceError(ObjectNameInvalid{ return errors.Trace(ObjectNameInvalid{
Bucket: bucket, Bucket: bucket,
Object: object, Object: object,
}) })
@ -155,7 +158,7 @@ func checkBucketExist(bucket string, obj ObjectLayer) error {
} }
_, err := obj.GetBucketInfo(bucket) _, err := obj.GetBucketInfo(bucket)
if err != nil { if err != nil {
return errorCause(err) return errors.Cause(err)
} }
return nil return nil
} }

View File

@ -24,6 +24,7 @@ import (
"sort" "sort"
"time" "time"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/lock" "github.com/minio/minio/pkg/lock"
) )
@ -80,14 +81,14 @@ func (u *uploadsV1) WriteTo(lk *lock.LockedFile) (n int64, err error) {
var uplBytes []byte var uplBytes []byte
uplBytes, err = json.Marshal(u) uplBytes, err = json.Marshal(u)
if err != nil { if err != nil {
return 0, traceError(err) return 0, errors.Trace(err)
} }
if err = lk.Truncate(0); err != nil { if err = lk.Truncate(0); err != nil {
return 0, traceError(err) return 0, errors.Trace(err)
} }
_, err = lk.Write(uplBytes) _, err = lk.Write(uplBytes)
if err != nil { if err != nil {
return 0, traceError(err) return 0, errors.Trace(err)
} }
return int64(len(uplBytes)), nil return int64(len(uplBytes)), nil
} }
@ -96,18 +97,18 @@ func (u *uploadsV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
var uploadIDBytes []byte var uploadIDBytes []byte
fi, err := lk.Stat() fi, err := lk.Stat()
if err != nil { if err != nil {
return 0, traceError(err) return 0, errors.Trace(err)
} }
uploadIDBytes, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size())) uploadIDBytes, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size()))
if err != nil { if err != nil {
return 0, traceError(err) return 0, errors.Trace(err)
} }
if len(uploadIDBytes) == 0 { if len(uploadIDBytes) == 0 {
return 0, traceError(io.EOF) return 0, errors.Trace(io.EOF)
} }
// Decode `uploads.json`. // Decode `uploads.json`.
if err = json.Unmarshal(uploadIDBytes, u); err != nil { if err = json.Unmarshal(uploadIDBytes, u); err != nil {
return 0, traceError(err) return 0, errors.Trace(err)
} }
return int64(len(uploadIDBytes)), nil return int64(len(uploadIDBytes)), nil
} }
@ -118,12 +119,12 @@ func readUploadsJSON(bucket, object string, disk StorageAPI) (uploadIDs uploadsV
// Reads entire `uploads.json`. // Reads entire `uploads.json`.
buf, err := disk.ReadAll(minioMetaMultipartBucket, uploadJSONPath) buf, err := disk.ReadAll(minioMetaMultipartBucket, uploadJSONPath)
if err != nil { if err != nil {
return uploadsV1{}, traceError(err) return uploadsV1{}, errors.Trace(err)
} }
// Decode `uploads.json`. // Decode `uploads.json`.
if err = json.Unmarshal(buf, &uploadIDs); err != nil { if err = json.Unmarshal(buf, &uploadIDs); err != nil {
return uploadsV1{}, traceError(err) return uploadsV1{}, errors.Trace(err)
} }
// Success. // Success.
@ -142,20 +143,20 @@ func writeUploadJSON(u *uploadsV1, uploadsPath, tmpPath string, disk StorageAPI)
// Serialize to prepare to write to disk. // Serialize to prepare to write to disk.
uplBytes, wErr := json.Marshal(&u) uplBytes, wErr := json.Marshal(&u)
if wErr != nil { if wErr != nil {
return traceError(wErr) return errors.Trace(wErr)
} }
// Write `uploads.json` to disk. First to tmp location and then rename. // Write `uploads.json` to disk. First to tmp location and then rename.
if wErr = disk.AppendFile(minioMetaTmpBucket, tmpPath, uplBytes); wErr != nil { if wErr = disk.AppendFile(minioMetaTmpBucket, tmpPath, uplBytes); wErr != nil {
return traceError(wErr) return errors.Trace(wErr)
} }
wErr = disk.RenameFile(minioMetaTmpBucket, tmpPath, minioMetaMultipartBucket, uploadsPath) wErr = disk.RenameFile(minioMetaTmpBucket, tmpPath, minioMetaMultipartBucket, uploadsPath)
if wErr != nil { if wErr != nil {
if dErr := disk.DeleteFile(minioMetaTmpBucket, tmpPath); dErr != nil { if dErr := disk.DeleteFile(minioMetaTmpBucket, tmpPath); dErr != nil {
// we return the most recent error. // we return the most recent error.
return traceError(dErr) return errors.Trace(dErr)
} }
return traceError(wErr) return errors.Trace(wErr)
} }
return nil return nil
} }

View File

@ -24,6 +24,7 @@ import (
"testing" "testing"
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
) )
@ -122,7 +123,7 @@ func testObjectAbortMultipartUpload(obj ObjectLayer, instanceType string, t Test
if testCase.expectedErrType == nil && err != nil { if testCase.expectedErrType == nil && err != nil {
t.Errorf("Test %d, unexpected err is received: %v, expected:%v\n", i+1, err, testCase.expectedErrType) t.Errorf("Test %d, unexpected err is received: %v, expected:%v\n", i+1, err, testCase.expectedErrType)
} }
if testCase.expectedErrType != nil && !isSameType(errorCause(err), testCase.expectedErrType) { if testCase.expectedErrType != nil && !isSameType(errors.Cause(err), testCase.expectedErrType) {
t.Errorf("Test %d, unexpected err is received: %v, expected:%v\n", i+1, err, testCase.expectedErrType) t.Errorf("Test %d, unexpected err is received: %v, expected:%v\n", i+1, err, testCase.expectedErrType)
} }
} }
@ -151,7 +152,7 @@ func testObjectAPIIsUploadIDExists(obj ObjectLayer, instanceType string, t TestE
} }
err = obj.AbortMultipartUpload(bucket, object, "abc") err = obj.AbortMultipartUpload(bucket, object, "abc")
err = errorCause(err) err = errors.Cause(err)
switch err.(type) { switch err.(type) {
case InvalidUploadID: case InvalidUploadID:
default: default:

View File

@ -26,6 +26,7 @@ import (
"testing" "testing"
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
) )
@ -162,7 +163,7 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
for i, testCase := range testCases { for i, testCase := range testCases {
objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256), testCase.inputMeta) objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256), testCase.inputMeta)
actualErr = errorCause(actualErr) actualErr = errors.Cause(actualErr)
if actualErr != nil && testCase.expectedError == nil { if actualErr != nil && testCase.expectedError == nil {
t.Errorf("Test %d: %s: Expected to pass, but failed with: error %s.", i+1, instanceType, actualErr.Error()) t.Errorf("Test %d: %s: Expected to pass, but failed with: error %s.", i+1, instanceType, actualErr.Error())
} }
@ -236,7 +237,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
sha256sum := "" sha256sum := ""
for i, testCase := range testCases { for i, testCase := range testCases {
objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta) objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta)
actualErr = errorCause(actualErr) actualErr = errors.Cause(actualErr)
if actualErr != nil && testCase.shouldPass { if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error()) t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
} }
@ -286,7 +287,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
} }
_, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta) _, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta)
actualErr = errorCause(actualErr) actualErr = errors.Cause(actualErr)
if actualErr != nil && testCase.shouldPass { if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", len(testCases)+1, instanceType, actualErr.Error()) t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", len(testCases)+1, instanceType, actualErr.Error())
} }

View File

@ -24,6 +24,7 @@ import (
"strings" "strings"
"unicode/utf8" "unicode/utf8"
"github.com/minio/minio/pkg/errors"
"github.com/skyrings/skyring-common/tools/uuid" "github.com/skyrings/skyring-common/tools/uuid"
) )
@ -178,7 +179,7 @@ func getCompleteMultipartMD5(parts []CompletePart) (string, error) {
for _, part := range parts { for _, part := range parts {
md5Bytes, err := hex.DecodeString(part.ETag) md5Bytes, err := hex.DecodeString(part.ETag)
if err != nil { if err != nil {
return "", traceError(err) return "", errors.Trace(err)
} }
finalMD5Bytes = append(finalMD5Bytes, md5Bytes...) finalMD5Bytes = append(finalMD5Bytes, md5Bytes...)
} }

View File

@ -28,6 +28,7 @@ import (
"strconv" "strconv"
mux "github.com/gorilla/mux" mux "github.com/gorilla/mux"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/ioutil" "github.com/minio/minio/pkg/ioutil"
) )
@ -1065,7 +1066,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
objInfo, err := objectAPI.CompleteMultipartUpload(bucket, object, uploadID, completeParts) objInfo, err := objectAPI.CompleteMultipartUpload(bucket, object, uploadID, completeParts)
if err != nil { if err != nil {
errorIf(err, "Unable to complete multipart upload.") errorIf(err, "Unable to complete multipart upload.")
err = errorCause(err) err = errors.Cause(err)
switch oErr := err.(type) { switch oErr := err.(type) {
case PartTooSmall: case PartTooSmall:
// Write part too small error. // Write part too small error.

View File

@ -24,6 +24,7 @@ import (
"testing" "testing"
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/errors"
) )
// Return pointer to testOneByteReadEOF{} // Return pointer to testOneByteReadEOF{}
@ -754,7 +755,7 @@ func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string,
for i, testCase := range testCases { for i, testCase := range testCases {
_, expectedErr := obj.GetObjectInfo(bucketName, testCase.dir) _, expectedErr := obj.GetObjectInfo(bucketName, testCase.dir)
if expectedErr != nil { if expectedErr != nil {
expectedErr = errorCause(expectedErr) expectedErr = errors.Cause(expectedErr)
if expectedErr.Error() != testCase.err.Error() { if expectedErr.Error() != testCase.err.Error() {
t.Errorf("Test %d, %s: Expected error %s, got %s", i+1, instanceType, testCase.err, expectedErr) t.Errorf("Test %d, %s: Expected error %s, got %s", i+1, instanceType, testCase.err, expectedErr)
} }

View File

@ -17,11 +17,11 @@
package cmd package cmd
import ( import (
"errors"
"fmt" "fmt"
"time" "time"
"github.com/minio/mc/pkg/console" "github.com/minio/mc/pkg/console"
"github.com/minio/minio/pkg/errors"
) )
/* /*
@ -140,7 +140,7 @@ func prepForInitXL(firstDisk bool, sErrs []error, diskCount int) InitActions {
// Count errors by error value. // Count errors by error value.
errMap := make(map[error]int) errMap := make(map[error]int)
for _, err := range sErrs { for _, err := range sErrs {
errMap[errorCause(err)]++ errMap[errors.Cause(err)]++
} }
// Validates and converts specific config errors into WaitForConfig. // Validates and converts specific config errors into WaitForConfig.
@ -296,7 +296,7 @@ func retryFormattingXLDisks(firstDisk bool, endpoints EndpointList, storageDisks
console.Printf("Initializing data volume for first time. Waiting for first server to come online (elapsed %s)\n", getElapsedTime()) console.Printf("Initializing data volume for first time. Waiting for first server to come online (elapsed %s)\n", getElapsedTime())
} }
case <-globalServiceDoneCh: case <-globalServiceDoneCh:
return errors.New("Initializing data volumes gracefully stopped") return fmt.Errorf("Initializing data volumes gracefully stopped")
} }
} }
} }

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
router "github.com/gorilla/mux" router "github.com/gorilla/mux"
"github.com/minio/minio/pkg/errors"
) )
const ( const (
@ -40,7 +41,7 @@ func registerS3PeerRPCRouter(mux *router.Router) error {
s3PeerRPCServer := newRPCServer() s3PeerRPCServer := newRPCServer()
err := s3PeerRPCServer.RegisterName("S3", s3PeerHandlers) err := s3PeerRPCServer.RegisterName("S3", s3PeerHandlers)
if err != nil { if err != nil {
return traceError(err) return errors.Trace(err)
} }
s3PeerRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter() s3PeerRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter()

View File

@ -25,6 +25,7 @@ import (
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/dsync" "github.com/minio/dsync"
"github.com/minio/minio/pkg/errors"
miniohttp "github.com/minio/minio/pkg/http" miniohttp "github.com/minio/minio/pkg/http"
) )
@ -150,7 +151,7 @@ func serverMain(ctx *cli.Context) {
enableLoggers() enableLoggers()
// Init the error tracing module. // Init the error tracing module.
initError() errors.Init(GOPATH, "github.com/minio/minio")
// Check and load SSL certificates. // Check and load SSL certificates.
var err error var err error

View File

@ -99,3 +99,12 @@ func (h hashMismatchError) Error() string {
"Bitrot verification mismatch - expected %v, received %v", "Bitrot verification mismatch - expected %v, received %v",
h.expected, h.computed) h.expected, h.computed)
} }
// Collection of basic errors.
var baseErrs = []error{
errDiskNotFound,
errFaultyDisk,
errFaultyRemoteDisk,
}
var baseIgnoredErrs = baseErrs

View File

@ -23,6 +23,7 @@ import (
router "github.com/gorilla/mux" router "github.com/gorilla/mux"
"github.com/minio/minio/pkg/disk" "github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/errors"
) )
// Storage server implements rpc primitives to facilitate exporting a // Storage server implements rpc primitives to facilitate exporting a
@ -223,7 +224,7 @@ func registerStorageRPCRouters(mux *router.Router, endpoints EndpointList) error
// Initialize storage rpc servers for every disk that is hosted on this node. // Initialize storage rpc servers for every disk that is hosted on this node.
storageRPCs, err := newStorageRPCServer(endpoints) storageRPCs, err := newStorageRPCServer(endpoints)
if err != nil { if err != nil {
return traceError(err) return errors.Trace(err)
} }
// Create a unique route for each disk exported from this node. // Create a unique route for each disk exported from this node.
@ -231,7 +232,7 @@ func registerStorageRPCRouters(mux *router.Router, endpoints EndpointList) error
storageRPCServer := newRPCServer() storageRPCServer := newRPCServer()
err = storageRPCServer.RegisterName("Storage", stServer) err = storageRPCServer.RegisterName("Storage", stServer)
if err != nil { if err != nil {
return traceError(err) return errors.Trace(err)
} }
// Add minio storage routes. // Add minio storage routes.
storageRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter() storageRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter()

View File

@ -21,6 +21,7 @@ import (
"testing" "testing"
"github.com/minio/minio/pkg/disk" "github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/errors"
) )
type testStorageRPCServer struct { type testStorageRPCServer struct {
@ -68,7 +69,7 @@ func createTestStorageServer(t *testing.T) *testStorageRPCServer {
} }
func errorIfInvalidToken(t *testing.T, err error) { func errorIfInvalidToken(t *testing.T, err error) {
realErr := errorCause(err) realErr := errors.Cause(err)
if realErr != errInvalidToken { if realErr != errInvalidToken {
t.Errorf("Expected to fail with %s but failed with %s", errInvalidToken, realErr) t.Errorf("Expected to fail with %s but failed with %s", errInvalidToken, realErr)
} }

View File

@ -19,6 +19,8 @@ package cmd
import ( import (
"sort" "sort"
"strings" "strings"
"github.com/minio/minio/pkg/errors"
) )
// Tree walk result carries results of tree walking. // Tree walk result carries results of tree walking.
@ -141,7 +143,7 @@ func doTreeWalk(bucket, prefixDir, entryPrefixMatch, marker string, recursive bo
if err != nil { if err != nil {
select { select {
case <-endWalkCh: case <-endWalkCh:
return traceError(errWalkAbort) return errors.Trace(errWalkAbort)
case resultCh <- treeWalkResult{err: err}: case resultCh <- treeWalkResult{err: err}:
return err return err
} }
@ -203,7 +205,7 @@ func doTreeWalk(bucket, prefixDir, entryPrefixMatch, marker string, recursive bo
isEOF := ((i == len(entries)-1) && isEnd) isEOF := ((i == len(entries)-1) && isEnd)
select { select {
case <-endWalkCh: case <-endWalkCh:
return traceError(errWalkAbort) return errors.Trace(errWalkAbort)
case resultCh <- treeWalkResult{entry: pathJoin(prefixDir, entry), end: isEOF}: case resultCh <- treeWalkResult{entry: pathJoin(prefixDir, entry), end: isEOF}:
} }
} }

View File

@ -19,7 +19,6 @@ package cmd
import ( import (
"archive/zip" "archive/zip"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@ -37,6 +36,7 @@ import (
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/browser" "github.com/minio/minio/browser"
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
) )
@ -433,7 +433,7 @@ func (web *webAPIHandlers) SetAuth(r *http.Request, args *SetAuthArgs, reply *Se
// Since the error message may be very long to display // Since the error message may be very long to display
// on the browser, we tell the user to check the // on the browser, we tell the user to check the
// server logs. // server logs.
return toJSONError(errors.New("unexpected error(s) occurred - please check minio server logs")) return toJSONError(fmt.Errorf("unexpected error(s) occurred - please check minio server logs"))
} }
// As we have updated access/secret key, generate new auth token. // As we have updated access/secret key, generate new auth token.
@ -748,7 +748,7 @@ func (web *webAPIHandlers) GetBucketPolicy(r *http.Request, args *GetBucketPolic
var policyInfo, err = getBucketAccessPolicy(objectAPI, args.BucketName) var policyInfo, err = getBucketAccessPolicy(objectAPI, args.BucketName)
if err != nil { if err != nil {
_, ok := errorCause(err).(PolicyNotFound) _, ok := errors.Cause(err).(PolicyNotFound)
if !ok { if !ok {
return toJSONError(err, args.BucketName) return toJSONError(err, args.BucketName)
} }
@ -790,7 +790,7 @@ func (web *webAPIHandlers) ListAllBucketPolicies(r *http.Request, args *ListAllB
var policyInfo, err = getBucketAccessPolicy(objectAPI, args.BucketName) var policyInfo, err = getBucketAccessPolicy(objectAPI, args.BucketName)
if err != nil { if err != nil {
_, ok := errorCause(err).(PolicyNotFound) _, ok := errors.Cause(err).(PolicyNotFound)
if !ok { if !ok {
return toJSONError(err, args.BucketName) return toJSONError(err, args.BucketName)
} }
@ -834,7 +834,7 @@ func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolic
var policyInfo, err = getBucketAccessPolicy(objectAPI, args.BucketName) var policyInfo, err = getBucketAccessPolicy(objectAPI, args.BucketName)
if err != nil { if err != nil {
if _, ok := errorCause(err).(PolicyNotFound); !ok { if _, ok := errors.Cause(err).(PolicyNotFound); !ok {
return toJSONError(err, args.BucketName) return toJSONError(err, args.BucketName)
} }
policyInfo = policy.BucketAccessPolicy{Version: "2012-10-17"} policyInfo = policy.BucketAccessPolicy{Version: "2012-10-17"}
@ -878,7 +878,7 @@ func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolic
if apiErr.Code == "XMinioPolicyNesting" { if apiErr.Code == "XMinioPolicyNesting" {
err = PolicyNesting{} err = PolicyNesting{}
} else { } else {
err = errors.New(apiErr.Description) err = fmt.Errorf(apiErr.Description)
} }
return toJSONError(err, args.BucketName) return toJSONError(err, args.BucketName)
} }
@ -1004,7 +1004,7 @@ func toJSONError(err error, params ...string) (jerr *json2.Error) {
// toWebAPIError - convert into error into APIError. // toWebAPIError - convert into error into APIError.
func toWebAPIError(err error) APIError { func toWebAPIError(err error) APIError {
err = errorCause(err) err = errors.Cause(err)
if err == errAuthentication { if err == errAuthentication {
return APIError{ return APIError{
Code: "AccessDenied", Code: "AccessDenied",

View File

@ -19,6 +19,8 @@ package cmd
import ( import (
"sort" "sort"
"sync" "sync"
"github.com/minio/minio/pkg/errors"
) )
// list all errors that can be ignore in a bucket operation. // list all errors that can be ignore in a bucket operation.
@ -33,7 +35,7 @@ var bucketMetadataOpIgnoredErrs = append(bucketOpIgnoredErrs, errVolumeNotFound)
func (xl xlObjects) MakeBucketWithLocation(bucket, location string) error { func (xl xlObjects) MakeBucketWithLocation(bucket, location string) error {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return traceError(BucketNameInvalid{Bucket: bucket}) return errors.Trace(BucketNameInvalid{Bucket: bucket})
} }
// Initialize sync waitgroup. // Initialize sync waitgroup.
@ -45,7 +47,7 @@ func (xl xlObjects) MakeBucketWithLocation(bucket, location string) error {
// Make a volume entry on all underlying storage disks. // Make a volume entry on all underlying storage disks.
for index, disk := range xl.storageDisks { for index, disk := range xl.storageDisks {
if disk == nil { if disk == nil {
dErrs[index] = traceError(errDiskNotFound) dErrs[index] = errors.Trace(errDiskNotFound)
continue continue
} }
wg.Add(1) wg.Add(1)
@ -54,7 +56,7 @@ func (xl xlObjects) MakeBucketWithLocation(bucket, location string) error {
defer wg.Done() defer wg.Done()
err := disk.MakeVol(bucket) err := disk.MakeVol(bucket)
if err != nil { if err != nil {
dErrs[index] = traceError(err) dErrs[index] = errors.Trace(err)
} }
}(index, disk) }(index, disk)
} }
@ -63,7 +65,7 @@ func (xl xlObjects) MakeBucketWithLocation(bucket, location string) error {
wg.Wait() wg.Wait()
err := reduceWriteQuorumErrs(dErrs, bucketOpIgnoredErrs, xl.writeQuorum) err := reduceWriteQuorumErrs(dErrs, bucketOpIgnoredErrs, xl.writeQuorum)
if errorCause(err) == errXLWriteQuorum { if errors.Cause(err) == errXLWriteQuorum {
// Purge successfully created buckets if we don't have writeQuorum. // Purge successfully created buckets if we don't have writeQuorum.
undoMakeBucket(xl.storageDisks, bucket) undoMakeBucket(xl.storageDisks, bucket)
} }
@ -127,9 +129,9 @@ func (xl xlObjects) getBucketInfo(bucketName string) (bucketInfo BucketInfo, err
} }
return bucketInfo, nil return bucketInfo, nil
} }
err = traceError(serr) err = errors.Trace(serr)
// For any reason disk went offline continue and pick the next one. // For any reason disk went offline continue and pick the next one.
if isErrIgnored(err, bucketMetadataOpIgnoredErrs...) { if errors.IsErrIgnored(err, bucketMetadataOpIgnoredErrs...) {
bucketErrs = append(bucketErrs, err) bucketErrs = append(bucketErrs, err)
continue continue
} }
@ -187,9 +189,9 @@ func (xl xlObjects) listBuckets() (bucketsInfo []BucketInfo, err error) {
} }
return bucketsInfo, nil return bucketsInfo, nil
} }
err = traceError(err) err = errors.Trace(err)
// Ignore any disks not found. // Ignore any disks not found.
if isErrIgnored(err, bucketMetadataOpIgnoredErrs...) { if errors.IsErrIgnored(err, bucketMetadataOpIgnoredErrs...) {
continue continue
} }
break break
@ -222,7 +224,7 @@ func (xl xlObjects) DeleteBucket(bucket string) error {
// Remove a volume entry on all underlying storage disks. // Remove a volume entry on all underlying storage disks.
for index, disk := range xl.storageDisks { for index, disk := range xl.storageDisks {
if disk == nil { if disk == nil {
dErrs[index] = traceError(errDiskNotFound) dErrs[index] = errors.Trace(errDiskNotFound)
continue continue
} }
wg.Add(1) wg.Add(1)
@ -232,13 +234,13 @@ func (xl xlObjects) DeleteBucket(bucket string) error {
// Attempt to delete bucket. // Attempt to delete bucket.
err := disk.DeleteVol(bucket) err := disk.DeleteVol(bucket)
if err != nil { if err != nil {
dErrs[index] = traceError(err) dErrs[index] = errors.Trace(err)
return return
} }
// Cleanup all the previously incomplete multiparts. // Cleanup all the previously incomplete multiparts.
err = cleanupDir(disk, minioMetaMultipartBucket, bucket) err = cleanupDir(disk, minioMetaMultipartBucket, bucket)
if err != nil { if err != nil {
if errorCause(err) == errVolumeNotFound { if errors.Cause(err) == errVolumeNotFound {
return return
} }
dErrs[index] = err dErrs[index] = err
@ -250,7 +252,7 @@ func (xl xlObjects) DeleteBucket(bucket string) error {
wg.Wait() wg.Wait()
err := reduceWriteQuorumErrs(dErrs, bucketOpIgnoredErrs, xl.writeQuorum) err := reduceWriteQuorumErrs(dErrs, bucketOpIgnoredErrs, xl.writeQuorum)
if errorCause(err) == errXLWriteQuorum { if errors.Cause(err) == errXLWriteQuorum {
xl.undoDeleteBucket(bucket) xl.undoDeleteBucket(bucket)
} }
return toObjectErr(err, bucket) return toObjectErr(err, bucket)

View File

@ -18,6 +18,8 @@ package cmd
import ( import (
"path" "path"
"github.com/minio/minio/pkg/errors"
) )
// getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice. // getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice.
@ -61,7 +63,7 @@ func (xl xlObjects) isObject(bucket, prefix string) (ok bool) {
return true return true
} }
// Ignore for file not found, disk not found or faulty disk. // Ignore for file not found, disk not found or faulty disk.
if isErrIgnored(err, xlTreeWalkIgnoredErrs...) { if errors.IsErrIgnored(err, xlTreeWalkIgnoredErrs...) {
continue continue
} }
errorIf(err, "Unable to stat a file %s/%s/%s", bucket, prefix, xlMetaJSONFile) errorIf(err, "Unable to stat a file %s/%s/%s", bucket, prefix, xlMetaJSONFile)

View File

@ -19,6 +19,8 @@ package cmd
import ( import (
"path/filepath" "path/filepath"
"time" "time"
"github.com/minio/minio/pkg/errors"
) )
// commonTime returns a maximally occurring time from a list of time. // commonTime returns a maximally occurring time from a list of time.
@ -130,7 +132,7 @@ func outDatedDisks(disks, latestDisks []StorageAPI, errs []error, partsMetadata
continue continue
} }
// disk either has an older xl.json or doesn't have one. // disk either has an older xl.json or doesn't have one.
switch errorCause(errs[index]) { switch errors.Cause(errs[index]) {
case nil, errFileNotFound: case nil, errFileNotFound:
outDatedDisks[index] = disks[index] outDatedDisks[index] = disks[index]
} }
@ -210,7 +212,7 @@ func xlHealStat(xl xlObjects, partsMetadata []xlMetaV1, errs []error) HealObject
// xl.json is not found, which implies the erasure // xl.json is not found, which implies the erasure
// coded blocks are unavailable in the corresponding disk. // coded blocks are unavailable in the corresponding disk.
// First half of the disks are data and the rest are parity. // First half of the disks are data and the rest are parity.
switch realErr := errorCause(err); realErr { switch realErr := errors.Cause(err); realErr {
case errDiskNotFound: case errDiskNotFound:
disksMissing = true disksMissing = true
fallthrough fallthrough
@ -280,7 +282,7 @@ func disksWithAllParts(onlineDisks []StorageAPI, partsMetadata []xlMetaV1, errs
availableDisks[i] = OfflineDisk availableDisks[i] = OfflineDisk
break break
} }
return nil, nil, traceError(hErr) return nil, nil, errors.Trace(hErr)
} }
} }

View File

@ -21,6 +21,8 @@ import (
"path" "path"
"sort" "sort"
"sync" "sync"
"github.com/minio/minio/pkg/errors"
) )
// healFormatXL - heals missing `format.json` on freshly or corrupted // healFormatXL - heals missing `format.json` on freshly or corrupted
@ -105,7 +107,7 @@ func healBucket(storageDisks []StorageAPI, bucket string, writeQuorum int) error
// Make a volume entry on all underlying storage disks. // Make a volume entry on all underlying storage disks.
for index, disk := range storageDisks { for index, disk := range storageDisks {
if disk == nil { if disk == nil {
dErrs[index] = traceError(errDiskNotFound) dErrs[index] = errors.Trace(errDiskNotFound)
continue continue
} }
wg.Add(1) wg.Add(1)
@ -114,11 +116,11 @@ func healBucket(storageDisks []StorageAPI, bucket string, writeQuorum int) error
defer wg.Done() defer wg.Done()
if _, err := disk.StatVol(bucket); err != nil { if _, err := disk.StatVol(bucket); err != nil {
if err != errVolumeNotFound { if err != errVolumeNotFound {
dErrs[index] = traceError(err) dErrs[index] = errors.Trace(err)
return return
} }
if err = disk.MakeVol(bucket); err != nil { if err = disk.MakeVol(bucket); err != nil {
dErrs[index] = traceError(err) dErrs[index] = errors.Trace(err)
} }
} }
}(index, disk) }(index, disk)
@ -128,7 +130,7 @@ func healBucket(storageDisks []StorageAPI, bucket string, writeQuorum int) error
wg.Wait() wg.Wait()
reducedErr := reduceWriteQuorumErrs(dErrs, bucketOpIgnoredErrs, writeQuorum) reducedErr := reduceWriteQuorumErrs(dErrs, bucketOpIgnoredErrs, writeQuorum)
if errorCause(reducedErr) == errXLWriteQuorum { if errors.Cause(reducedErr) == errXLWriteQuorum {
// Purge successfully created buckets if we don't have writeQuorum. // Purge successfully created buckets if we don't have writeQuorum.
undoMakeBucket(storageDisks, bucket) undoMakeBucket(storageDisks, bucket)
} }
@ -198,7 +200,7 @@ func listAllBuckets(storageDisks []StorageAPI) (buckets map[string]VolInfo, buck
continue continue
} }
// Ignore any disks not found. // Ignore any disks not found.
if isErrIgnored(err, bucketMetadataOpIgnoredErrs...) { if errors.IsErrIgnored(err, bucketMetadataOpIgnoredErrs...) {
continue continue
} }
break break
@ -416,7 +418,7 @@ func healObject(storageDisks []StorageAPI, bucket, object string, quorum int) (i
// may have object parts still present in the object // may have object parts still present in the object
// directory. This needs to be deleted for object to // directory. This needs to be deleted for object to
// healed successfully. // healed successfully.
if errs[index] != nil && !isErr(errs[index], errFileNotFound) { if errs[index] != nil && !errors.IsErr(errs[index], errFileNotFound) {
continue continue
} }
@ -522,7 +524,7 @@ func healObject(storageDisks []StorageAPI, bucket, object string, quorum int) (i
aErr = disk.RenameFile(minioMetaTmpBucket, retainSlash(tmpID), bucket, aErr = disk.RenameFile(minioMetaTmpBucket, retainSlash(tmpID), bucket,
retainSlash(object)) retainSlash(object))
if aErr != nil { if aErr != nil {
return 0, 0, toObjectErr(traceError(aErr), bucket, object) return 0, 0, toObjectErr(errors.Trace(aErr), bucket, object)
} }
} }
return numOfflineDisks, numHealedDisks, nil return numOfflineDisks, numHealedDisks, nil

View File

@ -22,6 +22,8 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
"github.com/minio/minio/pkg/errors"
) )
// Tests healing of format XL. // Tests healing of format XL.
@ -289,7 +291,7 @@ func TestUndoMakeBucket(t *testing.T) {
// Validate if bucket was deleted properly. // Validate if bucket was deleted properly.
_, err = obj.GetBucketInfo(bucketName) _, err = obj.GetBucketInfo(bucketName)
if err != nil { if err != nil {
err = errorCause(err) err = errors.Cause(err)
switch err.(type) { switch err.(type) {
case BucketNotFound: case BucketNotFound:
default: default:
@ -531,7 +533,7 @@ func TestHealObjectXL(t *testing.T) {
// Try healing now, expect to receive errDiskNotFound. // Try healing now, expect to receive errDiskNotFound.
_, _, err = obj.HealObject(bucket, object) _, _, err = obj.HealObject(bucket, object)
if errorCause(err) != errDiskNotFound { if errors.Cause(err) != errDiskNotFound {
t.Errorf("Expected %v but received %v", errDiskNotFound, err) t.Errorf("Expected %v but received %v", errDiskNotFound, err)
} }
} }

View File

@ -20,6 +20,8 @@ import (
"path/filepath" "path/filepath"
"sort" "sort"
"strings" "strings"
"github.com/minio/minio/pkg/errors"
) )
func listDirHealFactory(isLeaf isLeafFunc, disks ...StorageAPI) listDirFunc { func listDirHealFactory(isLeaf isLeafFunc, disks ...StorageAPI) listDirFunc {
@ -112,7 +114,7 @@ func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, ma
objInfo, err = xl.getObjectInfo(bucket, entry) objInfo, err = xl.getObjectInfo(bucket, entry)
if err != nil { if err != nil {
// Ignore errFileNotFound // Ignore errFileNotFound
if errorCause(err) == errFileNotFound { if errors.Cause(err) == errFileNotFound {
continue continue
} }
return loi, toObjectErr(err, bucket, prefix) return loi, toObjectErr(err, bucket, prefix)
@ -238,7 +240,7 @@ func fetchMultipartUploadIDs(bucket, keyMarker, uploadIDMarker string,
uploads, end, err = listMultipartUploadIDs(bucket, keyMarker, uploads, end, err = listMultipartUploadIDs(bucket, keyMarker,
uploadIDMarker, maxUploads, disk) uploadIDMarker, maxUploads, disk)
if err == nil || if err == nil ||
!isErrIgnored(err, objMetadataOpIgnoredErrs...) { !errors.IsErrIgnored(err, objMetadataOpIgnoredErrs...) {
break break
} }
} }

View File

@ -16,6 +16,8 @@
package cmd package cmd
import "github.com/minio/minio/pkg/errors"
// Returns function "listDir" of the type listDirFunc. // Returns function "listDir" of the type listDirFunc.
// isLeaf - is used by listDir function to check if an entry is a leaf or non-leaf entry. // isLeaf - is used by listDir function to check if an entry is a leaf or non-leaf entry.
// disks - used for doing disk.ListDir(). FS passes single disk argument, XL passes a list of disks. // disks - used for doing disk.ListDir(). FS passes single disk argument, XL passes a list of disks.
@ -30,10 +32,10 @@ func listDirFactory(isLeaf isLeafFunc, treeWalkIgnoredErrs []error, disks ...Sto
if err != nil { if err != nil {
// For any reason disk was deleted or goes offline, continue // For any reason disk was deleted or goes offline, continue
// and list from other disks if possible. // and list from other disks if possible.
if isErrIgnored(err, treeWalkIgnoredErrs...) { if errors.IsErrIgnored(err, treeWalkIgnoredErrs...) {
continue continue
} }
return nil, false, traceError(err) return nil, false, errors.Trace(err)
} }
entries, delayIsLeaf = filterListEntries(bucket, prefixDir, entries, prefixEntry, isLeaf) entries, delayIsLeaf = filterListEntries(bucket, prefixDir, entries, prefixEntry, isLeaf)
@ -89,7 +91,7 @@ func (xl xlObjects) listObjects(bucket, prefix, marker, delimiter string, maxKey
objInfo, err = xl.getObjectInfo(bucket, entry) objInfo, err = xl.getObjectInfo(bucket, entry)
if err != nil { if err != nil {
// Ignore errFileNotFound // Ignore errFileNotFound
if errorCause(err) == errFileNotFound { if errors.Cause(err) == errFileNotFound {
continue continue
} }
return loi, toObjectErr(err, bucket, prefix) return loi, toObjectErr(err, bucket, prefix)

View File

@ -21,7 +21,6 @@ import (
"crypto/sha256" "crypto/sha256"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"hash" "hash"
"path" "path"
@ -30,6 +29,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/minio/minio/pkg/errors"
"golang.org/x/crypto/blake2b" "golang.org/x/crypto/blake2b"
) )
@ -354,7 +354,7 @@ func (m xlMetaV1) ObjectToPartOffset(offset int64) (partIndex int, partOffset in
partOffset -= part.Size partOffset -= part.Size
} }
// Offset beyond the size of the object return InvalidRange. // Offset beyond the size of the object return InvalidRange.
return 0, 0, traceError(InvalidRange{}) return 0, 0, errors.Trace(InvalidRange{})
} }
// pickValidXLMeta - picks one valid xlMeta content and returns from a // pickValidXLMeta - picks one valid xlMeta content and returns from a
@ -367,7 +367,7 @@ func pickValidXLMeta(metaArr []xlMetaV1, modTime time.Time) (xmv xlMetaV1, e err
return meta, nil return meta, nil
} }
} }
return xmv, traceError(errors.New("No valid xl.json present")) return xmv, errors.Trace(fmt.Errorf("No valid xl.json present"))
} }
// list of all errors that can be ignored in a metadata operation. // list of all errors that can be ignored in a metadata operation.
@ -387,7 +387,7 @@ func (xl xlObjects) readXLMetaParts(bucket, object string) (xlMetaParts []object
} }
// For any reason disk or bucket is not available continue // For any reason disk or bucket is not available continue
// and read from other disks. // and read from other disks.
if isErrIgnored(err, objMetadataOpIgnoredErrs...) { if errors.IsErrIgnored(err, objMetadataOpIgnoredErrs...) {
ignoredErrs = append(ignoredErrs, err) ignoredErrs = append(ignoredErrs, err)
continue continue
} }
@ -414,7 +414,7 @@ func (xl xlObjects) readXLMetaStat(bucket, object string) (xlStat statInfo, xlMe
} }
// For any reason disk or bucket is not available continue // For any reason disk or bucket is not available continue
// and read from other disks. // and read from other disks.
if isErrIgnored(err, objMetadataOpIgnoredErrs...) { if errors.IsErrIgnored(err, objMetadataOpIgnoredErrs...) {
ignoredErrs = append(ignoredErrs, err) ignoredErrs = append(ignoredErrs, err)
continue continue
} }
@ -429,7 +429,7 @@ func (xl xlObjects) readXLMetaStat(bucket, object string) (xlStat statInfo, xlMe
// deleteXLMetadata - deletes `xl.json` on a single disk. // deleteXLMetadata - deletes `xl.json` on a single disk.
func deleteXLMetdata(disk StorageAPI, bucket, prefix string) error { func deleteXLMetdata(disk StorageAPI, bucket, prefix string) error {
jsonFile := path.Join(prefix, xlMetaJSONFile) jsonFile := path.Join(prefix, xlMetaJSONFile)
return traceError(disk.DeleteFile(bucket, jsonFile)) return errors.Trace(disk.DeleteFile(bucket, jsonFile))
} }
// writeXLMetadata - writes `xl.json` to a single disk. // writeXLMetadata - writes `xl.json` to a single disk.
@ -439,10 +439,10 @@ func writeXLMetadata(disk StorageAPI, bucket, prefix string, xlMeta xlMetaV1) er
// Marshal json. // Marshal json.
metadataBytes, err := json.Marshal(&xlMeta) metadataBytes, err := json.Marshal(&xlMeta)
if err != nil { if err != nil {
return traceError(err) return errors.Trace(err)
} }
// Persist marshalled data. // Persist marshalled data.
return traceError(disk.AppendFile(bucket, jsonFile, metadataBytes)) return errors.Trace(disk.AppendFile(bucket, jsonFile, metadataBytes))
} }
// deleteAllXLMetadata - deletes all partially written `xl.json` depending on errs. // deleteAllXLMetadata - deletes all partially written `xl.json` depending on errs.
@ -482,7 +482,7 @@ func writeUniqueXLMetadata(disks []StorageAPI, bucket, prefix string, xlMetas []
// Start writing `xl.json` to all disks in parallel. // Start writing `xl.json` to all disks in parallel.
for index, disk := range disks { for index, disk := range disks {
if disk == nil { if disk == nil {
mErrs[index] = traceError(errDiskNotFound) mErrs[index] = errors.Trace(errDiskNotFound)
continue continue
} }
wg.Add(1) wg.Add(1)
@ -505,7 +505,7 @@ func writeUniqueXLMetadata(disks []StorageAPI, bucket, prefix string, xlMetas []
wg.Wait() wg.Wait()
err := reduceWriteQuorumErrs(mErrs, objectOpIgnoredErrs, quorum) err := reduceWriteQuorumErrs(mErrs, objectOpIgnoredErrs, quorum)
if errorCause(err) == errXLWriteQuorum { if errors.Cause(err) == errXLWriteQuorum {
// Delete all `xl.json` successfully renamed. // Delete all `xl.json` successfully renamed.
deleteAllXLMetadata(disks, bucket, prefix, mErrs) deleteAllXLMetadata(disks, bucket, prefix, mErrs)
} }
@ -520,7 +520,7 @@ func writeSameXLMetadata(disks []StorageAPI, bucket, prefix string, xlMeta xlMet
// Start writing `xl.json` to all disks in parallel. // Start writing `xl.json` to all disks in parallel.
for index, disk := range disks { for index, disk := range disks {
if disk == nil { if disk == nil {
mErrs[index] = traceError(errDiskNotFound) mErrs[index] = errors.Trace(errDiskNotFound)
continue continue
} }
wg.Add(1) wg.Add(1)
@ -543,7 +543,7 @@ func writeSameXLMetadata(disks []StorageAPI, bucket, prefix string, xlMeta xlMet
wg.Wait() wg.Wait()
err := reduceWriteQuorumErrs(mErrs, objectOpIgnoredErrs, writeQuorum) err := reduceWriteQuorumErrs(mErrs, objectOpIgnoredErrs, writeQuorum)
if errorCause(err) == errXLWriteQuorum { if errors.Cause(err) == errXLWriteQuorum {
// Delete all `xl.json` successfully renamed. // Delete all `xl.json` successfully renamed.
deleteAllXLMetadata(disks, bucket, prefix, mErrs) deleteAllXLMetadata(disks, bucket, prefix, mErrs)
} }

View File

@ -26,6 +26,7 @@ import (
"time" "time"
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
errors2 "github.com/minio/minio/pkg/errors"
) )
// Tests for reading XL object info. // Tests for reading XL object info.
@ -93,7 +94,7 @@ func testXLReadStat(obj ObjectLayer, instanceType string, disks []string, t *tes
} }
_, _, err = obj.(*xlObjects).readXLMetaStat(bucketName, objectName) _, _, err = obj.(*xlObjects).readXLMetaStat(bucketName, objectName)
if errorCause(err) != errVolumeNotFound { if errors2.Cause(err) != errVolumeNotFound {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -178,7 +179,7 @@ func testXLReadMetaParts(obj ObjectLayer, instanceType string, disks []string, t
} }
_, err = obj.(*xlObjects).readXLMetaParts(minioMetaMultipartBucket, uploadIDPath) _, err = obj.(*xlObjects).readXLMetaParts(minioMetaMultipartBucket, uploadIDPath)
if errorCause(err) != errFileNotFound { if errors2.Cause(err) != errFileNotFound {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -297,7 +298,7 @@ func TestObjectToPartOffset(t *testing.T) {
// Test them. // Test them.
for _, testCase := range testCases { for _, testCase := range testCases {
index, offset, err := xlMeta.ObjectToPartOffset(testCase.offset) index, offset, err := xlMeta.ObjectToPartOffset(testCase.offset)
err = errorCause(err) err = errors2.Cause(err)
if err != testCase.expectedErr { if err != testCase.expectedErr {
t.Fatalf("%+v: expected = %s, got: %s", testCase, testCase.expectedErr, err) t.Fatalf("%+v: expected = %s, got: %s", testCase, testCase.expectedErr, err)
} }
@ -355,7 +356,7 @@ func TestPickValidXLMeta(t *testing.T) {
for i, test := range testCases { for i, test := range testCases {
xlMeta, err := pickValidXLMeta(test.metaArr, test.modTime) xlMeta, err := pickValidXLMeta(test.metaArr, test.modTime)
if test.expectedErr != nil { if test.expectedErr != nil {
if errorCause(err).Error() != test.expectedErr.Error() { if errors2.Cause(err).Error() != test.expectedErr.Error() {
t.Errorf("Test %d: Expected to fail with %v but received %v", t.Errorf("Test %d: Expected to fail with %v but received %v",
i+1, test.expectedErr, err) i+1, test.expectedErr, err)
} }

View File

@ -26,6 +26,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/mimedb" "github.com/minio/minio/pkg/mimedb"
) )
@ -43,7 +44,7 @@ func (xl xlObjects) updateUploadJSON(bucket, object, uploadID string, initiated
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
for index, disk := range xl.storageDisks { for index, disk := range xl.storageDisks {
if disk == nil { if disk == nil {
errs[index] = traceError(errDiskNotFound) errs[index] = errors.Trace(errDiskNotFound)
continue continue
} }
// Update `uploads.json` in a go routine. // Update `uploads.json` in a go routine.
@ -53,7 +54,7 @@ func (xl xlObjects) updateUploadJSON(bucket, object, uploadID string, initiated
// read and parse uploads.json on this disk // read and parse uploads.json on this disk
uploadsJSON, err := readUploadsJSON(bucket, object, disk) uploadsJSON, err := readUploadsJSON(bucket, object, disk)
if errorCause(err) == errFileNotFound { if errors.Cause(err) == errFileNotFound {
// If file is not found, we assume an // If file is not found, we assume an
// default (empty) upload info. // default (empty) upload info.
uploadsJSON, err = newUploadsV1("xl"), nil uploadsJSON, err = newUploadsV1("xl"), nil
@ -84,7 +85,7 @@ func (xl xlObjects) updateUploadJSON(bucket, object, uploadID string, initiated
} else { } else {
wErr := disk.RenameFile(minioMetaMultipartBucket, uploadsPath, minioMetaTmpBucket, tmpUploadsPath) wErr := disk.RenameFile(minioMetaMultipartBucket, uploadsPath, minioMetaTmpBucket, tmpUploadsPath)
if wErr != nil { if wErr != nil {
errs[index] = traceError(wErr) errs[index] = errors.Trace(wErr)
} }
} }
@ -95,7 +96,7 @@ func (xl xlObjects) updateUploadJSON(bucket, object, uploadID string, initiated
wg.Wait() wg.Wait()
err := reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, xl.writeQuorum) err := reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, xl.writeQuorum)
if errorCause(err) == errXLWriteQuorum { if errors.Cause(err) == errXLWriteQuorum {
// No quorum. Perform cleanup on the minority of disks // No quorum. Perform cleanup on the minority of disks
// on which the operation succeeded. // on which the operation succeeded.
@ -170,7 +171,7 @@ func (xl xlObjects) isMultipartUpload(bucket, prefix string) bool {
return true return true
} }
// For any reason disk was deleted or goes offline, continue // For any reason disk was deleted or goes offline, continue
if isErrIgnored(err, objMetadataOpIgnoredErrs...) { if errors.IsErrIgnored(err, objMetadataOpIgnoredErrs...) {
continue continue
} }
break break
@ -218,12 +219,12 @@ func (xl xlObjects) statPart(bucket, object, uploadID, partName string) (fileInf
return fileInfo, nil return fileInfo, nil
} }
// For any reason disk was deleted or goes offline we continue to next disk. // For any reason disk was deleted or goes offline we continue to next disk.
if isErrIgnored(err, objMetadataOpIgnoredErrs...) { if errors.IsErrIgnored(err, objMetadataOpIgnoredErrs...) {
ignoredErrs = append(ignoredErrs, err) ignoredErrs = append(ignoredErrs, err)
continue continue
} }
// Error is not ignored, return right here. // Error is not ignored, return right here.
return FileInfo{}, traceError(err) return FileInfo{}, errors.Trace(err)
} }
// If all errors were ignored, reduce to maximal occurrence // If all errors were ignored, reduce to maximal occurrence
// based on the read quorum. // based on the read quorum.
@ -241,7 +242,7 @@ func commitXLMetadata(disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPr
// Rename `xl.json` to all disks in parallel. // Rename `xl.json` to all disks in parallel.
for index, disk := range disks { for index, disk := range disks {
if disk == nil { if disk == nil {
mErrs[index] = traceError(errDiskNotFound) mErrs[index] = errors.Trace(errDiskNotFound)
continue continue
} }
wg.Add(1) wg.Add(1)
@ -254,7 +255,7 @@ func commitXLMetadata(disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPr
// Renames `xl.json` from source prefix to destination prefix. // Renames `xl.json` from source prefix to destination prefix.
rErr := disk.RenameFile(srcBucket, srcJSONFile, dstBucket, dstJSONFile) rErr := disk.RenameFile(srcBucket, srcJSONFile, dstBucket, dstJSONFile)
if rErr != nil { if rErr != nil {
mErrs[index] = traceError(rErr) mErrs[index] = errors.Trace(rErr)
return return
} }
mErrs[index] = nil mErrs[index] = nil
@ -264,7 +265,7 @@ func commitXLMetadata(disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPr
wg.Wait() wg.Wait()
err := reduceWriteQuorumErrs(mErrs, objectOpIgnoredErrs, quorum) err := reduceWriteQuorumErrs(mErrs, objectOpIgnoredErrs, quorum)
if errorCause(err) == errXLWriteQuorum { if errors.Cause(err) == errXLWriteQuorum {
// Delete all `xl.json` successfully renamed. // Delete all `xl.json` successfully renamed.
deleteAllXLMetadata(disks, dstBucket, dstPrefix, mErrs) deleteAllXLMetadata(disks, dstBucket, dstPrefix, mErrs)
} }
@ -317,7 +318,7 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
if err == nil { if err == nil {
break break
} }
if isErrIgnored(err, objMetadataOpIgnoredErrs...) { if errors.IsErrIgnored(err, objMetadataOpIgnoredErrs...) {
continue continue
} }
break break
@ -386,14 +387,14 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
if err == nil { if err == nil {
break break
} }
if isErrIgnored(err, objMetadataOpIgnoredErrs...) { if errors.IsErrIgnored(err, objMetadataOpIgnoredErrs...) {
continue continue
} }
break break
} }
entryLock.RUnlock() entryLock.RUnlock()
if err != nil { if err != nil {
if isErrIgnored(err, xlTreeWalkIgnoredErrs...) { if errors.IsErrIgnored(err, xlTreeWalkIgnoredErrs...) {
continue continue
} }
return lmi, err return lmi, err
@ -585,7 +586,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
// Validate input data size and it can never be less than zero. // Validate input data size and it can never be less than zero.
if data.Size() < 0 { if data.Size() < 0 {
return pi, toObjectErr(traceError(errInvalidArgument)) return pi, toObjectErr(errors.Trace(errInvalidArgument))
} }
var partsMetadata []xlMetaV1 var partsMetadata []xlMetaV1
@ -601,14 +602,14 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
// Validates if upload ID exists. // Validates if upload ID exists.
if !xl.isUploadIDExists(bucket, object, uploadID) { if !xl.isUploadIDExists(bucket, object, uploadID) {
preUploadIDLock.RUnlock() preUploadIDLock.RUnlock()
return pi, traceError(InvalidUploadID{UploadID: uploadID}) return pi, errors.Trace(InvalidUploadID{UploadID: uploadID})
} }
// Read metadata associated with the object from all disks. // Read metadata associated with the object from all disks.
partsMetadata, errs = readAllXLMetadata(xl.storageDisks, minioMetaMultipartBucket, partsMetadata, errs = readAllXLMetadata(xl.storageDisks, minioMetaMultipartBucket,
uploadIDPath) uploadIDPath)
reducedErr := reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, xl.writeQuorum) reducedErr := reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, xl.writeQuorum)
if errorCause(reducedErr) == errXLWriteQuorum { if errors.Cause(reducedErr) == errXLWriteQuorum {
preUploadIDLock.RUnlock() preUploadIDLock.RUnlock()
return pi, toObjectErr(reducedErr, bucket, object) return pi, toObjectErr(reducedErr, bucket, object)
} }
@ -654,7 +655,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
// Should return IncompleteBody{} error when reader has fewer bytes // Should return IncompleteBody{} error when reader has fewer bytes
// than specified in request header. // than specified in request header.
if file.Size < data.Size() { if file.Size < data.Size() {
return pi, traceError(IncompleteBody{}) return pi, errors.Trace(IncompleteBody{})
} }
// post-upload check (write) lock // post-upload check (write) lock
@ -666,7 +667,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
// Validate again if upload ID still exists. // Validate again if upload ID still exists.
if !xl.isUploadIDExists(bucket, object, uploadID) { if !xl.isUploadIDExists(bucket, object, uploadID) {
return pi, traceError(InvalidUploadID{UploadID: uploadID}) return pi, errors.Trace(InvalidUploadID{UploadID: uploadID})
} }
// Rename temporary part file to its final location. // Rename temporary part file to its final location.
@ -679,7 +680,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
// Read metadata again because it might be updated with parallel upload of another part. // Read metadata again because it might be updated with parallel upload of another part.
partsMetadata, errs = readAllXLMetadata(onlineDisks, minioMetaMultipartBucket, uploadIDPath) partsMetadata, errs = readAllXLMetadata(onlineDisks, minioMetaMultipartBucket, uploadIDPath)
reducedErr = reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, xl.writeQuorum) reducedErr = reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, xl.writeQuorum)
if errorCause(reducedErr) == errXLWriteQuorum { if errors.Cause(reducedErr) == errXLWriteQuorum {
return pi, toObjectErr(reducedErr, bucket, object) return pi, toObjectErr(reducedErr, bucket, object)
} }
@ -820,7 +821,7 @@ func (xl xlObjects) ListObjectParts(bucket, object, uploadID string, partNumberM
defer uploadIDLock.Unlock() defer uploadIDLock.Unlock()
if !xl.isUploadIDExists(bucket, object, uploadID) { if !xl.isUploadIDExists(bucket, object, uploadID) {
return lpi, traceError(InvalidUploadID{UploadID: uploadID}) return lpi, errors.Trace(InvalidUploadID{UploadID: uploadID})
} }
result, err := xl.listObjectParts(bucket, object, uploadID, partNumberMarker, maxParts) result, err := xl.listObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
return result, err return result, err
@ -851,13 +852,13 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
defer uploadIDLock.Unlock() defer uploadIDLock.Unlock()
if !xl.isUploadIDExists(bucket, object, uploadID) { if !xl.isUploadIDExists(bucket, object, uploadID) {
return oi, traceError(InvalidUploadID{UploadID: uploadID}) return oi, errors.Trace(InvalidUploadID{UploadID: uploadID})
} }
// Check if an object is present as one of the parent dir. // Check if an object is present as one of the parent dir.
// -- FIXME. (needs a new kind of lock). // -- FIXME. (needs a new kind of lock).
if xl.parentDirIsObject(bucket, path.Dir(object)) { if xl.parentDirIsObject(bucket, path.Dir(object)) {
return oi, toObjectErr(traceError(errFileAccessDenied), bucket, object) return oi, toObjectErr(errors.Trace(errFileAccessDenied), bucket, object)
} }
// Calculate s3 compatible md5sum for complete multipart. // Calculate s3 compatible md5sum for complete multipart.
@ -871,7 +872,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
// Read metadata associated with the object from all disks. // Read metadata associated with the object from all disks.
partsMetadata, errs := readAllXLMetadata(xl.storageDisks, minioMetaMultipartBucket, uploadIDPath) partsMetadata, errs := readAllXLMetadata(xl.storageDisks, minioMetaMultipartBucket, uploadIDPath)
reducedErr := reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, xl.writeQuorum) reducedErr := reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, xl.writeQuorum)
if errorCause(reducedErr) == errXLWriteQuorum { if errors.Cause(reducedErr) == errXLWriteQuorum {
return oi, toObjectErr(reducedErr, bucket, object) return oi, toObjectErr(reducedErr, bucket, object)
} }
@ -903,17 +904,17 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
partIdx := objectPartIndex(currentXLMeta.Parts, part.PartNumber) partIdx := objectPartIndex(currentXLMeta.Parts, part.PartNumber)
// All parts should have same part number. // All parts should have same part number.
if partIdx == -1 { if partIdx == -1 {
return oi, traceError(InvalidPart{}) return oi, errors.Trace(InvalidPart{})
} }
// All parts should have same ETag as previously generated. // All parts should have same ETag as previously generated.
if currentXLMeta.Parts[partIdx].ETag != part.ETag { if currentXLMeta.Parts[partIdx].ETag != part.ETag {
return oi, traceError(InvalidPart{}) return oi, errors.Trace(InvalidPart{})
} }
// All parts except the last part has to be atleast 5MB. // All parts except the last part has to be atleast 5MB.
if (i < len(parts)-1) && !isMinAllowedPartSize(currentXLMeta.Parts[partIdx].Size) { if (i < len(parts)-1) && !isMinAllowedPartSize(currentXLMeta.Parts[partIdx].Size) {
return oi, traceError(PartTooSmall{ return oi, errors.Trace(PartTooSmall{
PartNumber: part.PartNumber, PartNumber: part.PartNumber,
PartSize: currentXLMeta.Parts[partIdx].Size, PartSize: currentXLMeta.Parts[partIdx].Size,
PartETag: part.ETag, PartETag: part.ETag,
@ -1057,7 +1058,7 @@ func (xl xlObjects) cleanupUploadedParts(bucket, object, uploadID string) error
// Cleanup uploadID for all disks. // Cleanup uploadID for all disks.
for index, disk := range xl.storageDisks { for index, disk := range xl.storageDisks {
if disk == nil { if disk == nil {
errs[index] = traceError(errDiskNotFound) errs[index] = errors.Trace(errDiskNotFound)
continue continue
} }
wg.Add(1) wg.Add(1)
@ -1131,7 +1132,7 @@ func (xl xlObjects) AbortMultipartUpload(bucket, object, uploadID string) error
defer uploadIDLock.Unlock() defer uploadIDLock.Unlock()
if !xl.isUploadIDExists(bucket, object, uploadID) { if !xl.isUploadIDExists(bucket, object, uploadID) {
return traceError(InvalidUploadID{UploadID: uploadID}) return errors.Trace(InvalidUploadID{UploadID: uploadID})
} }
return xl.abortMultipartUpload(bucket, object, uploadID) return xl.abortMultipartUpload(bucket, object, uploadID)
} }

View File

@ -24,6 +24,7 @@ import (
"strings" "strings"
"sync" "sync"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/mimedb" "github.com/minio/minio/pkg/mimedb"
"github.com/minio/minio/pkg/objcache" "github.com/minio/minio/pkg/objcache"
@ -116,7 +117,7 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
hashReader, err := hash.NewReader(pipeReader, length, "", "") hashReader, err := hash.NewReader(pipeReader, length, "", "")
if err != nil { if err != nil {
return oi, toObjectErr(traceError(err), dstBucket, dstObject) return oi, toObjectErr(errors.Trace(err), dstBucket, dstObject)
} }
objInfo, err := xl.PutObject(dstBucket, dstObject, hashReader, metadata) objInfo, err := xl.PutObject(dstBucket, dstObject, hashReader, metadata)
@ -143,12 +144,12 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
// Start offset cannot be negative. // Start offset cannot be negative.
if startOffset < 0 { if startOffset < 0 {
return traceError(errUnexpected) return errors.Trace(errUnexpected)
} }
// Writer cannot be nil. // Writer cannot be nil.
if writer == nil { if writer == nil {
return traceError(errUnexpected) return errors.Trace(errUnexpected)
} }
// Read metadata associated with the object from all disks. // Read metadata associated with the object from all disks.
@ -179,13 +180,13 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
// Reply back invalid range if the input offset and length fall out of range. // Reply back invalid range if the input offset and length fall out of range.
if startOffset > xlMeta.Stat.Size || startOffset+length > xlMeta.Stat.Size { if startOffset > xlMeta.Stat.Size || startOffset+length > xlMeta.Stat.Size {
return traceError(InvalidRange{startOffset, length, xlMeta.Stat.Size}) return errors.Trace(InvalidRange{startOffset, length, xlMeta.Stat.Size})
} }
// Get start part index and offset. // Get start part index and offset.
partIndex, partOffset, err := xlMeta.ObjectToPartOffset(startOffset) partIndex, partOffset, err := xlMeta.ObjectToPartOffset(startOffset)
if err != nil { if err != nil {
return traceError(InvalidRange{startOffset, length, xlMeta.Stat.Size}) return errors.Trace(InvalidRange{startOffset, length, xlMeta.Stat.Size})
} }
// Calculate endOffset according to length // Calculate endOffset according to length
@ -197,7 +198,7 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
// Get last part index to read given length. // Get last part index to read given length.
lastPartIndex, _, err := xlMeta.ObjectToPartOffset(endOffset) lastPartIndex, _, err := xlMeta.ObjectToPartOffset(endOffset)
if err != nil { if err != nil {
return traceError(InvalidRange{startOffset, length, xlMeta.Stat.Size}) return errors.Trace(InvalidRange{startOffset, length, xlMeta.Stat.Size})
} }
// Save the writer. // Save the writer.
@ -214,7 +215,7 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
// Copy the data out. // Copy the data out.
if _, err = io.Copy(writer, reader); err != nil { if _, err = io.Copy(writer, reader); err != nil {
return traceError(err) return errors.Trace(err)
} }
// Success. // Success.
@ -224,7 +225,7 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
// For unknown error, return and error out. // For unknown error, return and error out.
if err != objcache.ErrKeyNotFoundInCache { if err != objcache.ErrKeyNotFoundInCache {
return traceError(err) return errors.Trace(err)
} // Cache has not been found, fill the cache. } // Cache has not been found, fill the cache.
// Cache is only set if whole object is being read. // Cache is only set if whole object is being read.
@ -241,7 +242,7 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
// Ignore error if cache is full, proceed to write the object. // Ignore error if cache is full, proceed to write the object.
if err != nil && err != objcache.ErrCacheFull { if err != nil && err != objcache.ErrCacheFull {
// For any other error return here. // For any other error return here.
return toObjectErr(traceError(err), bucket, object) return toObjectErr(errors.Trace(err), bucket, object)
} }
} }
} }
@ -390,7 +391,7 @@ func rename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string,
defer wg.Done() defer wg.Done()
err := disk.RenameFile(srcBucket, srcEntry, dstBucket, dstEntry) err := disk.RenameFile(srcBucket, srcEntry, dstBucket, dstEntry)
if err != nil && err != errFileNotFound { if err != nil && err != errFileNotFound {
errs[index] = traceError(err) errs[index] = errors.Trace(err)
} }
}(index, disk) }(index, disk)
} }
@ -401,7 +402,7 @@ func rename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string,
// We can safely allow RenameFile errors up to len(xl.storageDisks) - xl.writeQuorum // We can safely allow RenameFile errors up to len(xl.storageDisks) - xl.writeQuorum
// otherwise return failure. Cleanup successful renames. // otherwise return failure. Cleanup successful renames.
err := reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, quorum) err := reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, quorum)
if errorCause(err) == errXLWriteQuorum { if errors.Cause(err) == errXLWriteQuorum {
// Undo all the partial rename operations. // Undo all the partial rename operations.
undoRename(disks, srcBucket, srcEntry, dstBucket, dstEntry, isDir, errs) undoRename(disks, srcBucket, srcEntry, dstBucket, dstEntry, isDir, errs)
} }
@ -439,7 +440,7 @@ func (xl xlObjects) PutObject(bucket string, object string, data *hash.Reader, m
// -- FIXME. (needs a new kind of lock). // -- FIXME. (needs a new kind of lock).
// -- FIXME (this also causes performance issue when disks are down). // -- FIXME (this also causes performance issue when disks are down).
if xl.parentDirIsObject(bucket, path.Dir(object)) { if xl.parentDirIsObject(bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object) return ObjectInfo{}, toObjectErr(errors.Trace(errFileAccessDenied), bucket, object)
} }
return dirObjectInfo(bucket, object, data.Size(), metadata), nil return dirObjectInfo(bucket, object, data.Size(), metadata), nil
} }
@ -451,14 +452,14 @@ func (xl xlObjects) PutObject(bucket string, object string, data *hash.Reader, m
// Validate input data size and it can never be less than zero. // Validate input data size and it can never be less than zero.
if data.Size() < 0 { if data.Size() < 0 {
return ObjectInfo{}, toObjectErr(traceError(errInvalidArgument)) return ObjectInfo{}, toObjectErr(errors.Trace(errInvalidArgument))
} }
// Check if an object is present as one of the parent dir. // Check if an object is present as one of the parent dir.
// -- FIXME. (needs a new kind of lock). // -- FIXME. (needs a new kind of lock).
// -- FIXME (this also causes performance issue when disks are down). // -- FIXME (this also causes performance issue when disks are down).
if xl.parentDirIsObject(bucket, path.Dir(object)) { if xl.parentDirIsObject(bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object) return ObjectInfo{}, toObjectErr(errors.Trace(errFileAccessDenied), bucket, object)
} }
// No metadata is set, allocate a new one. // No metadata is set, allocate a new one.
@ -488,7 +489,7 @@ func (xl xlObjects) PutObject(bucket string, object string, data *hash.Reader, m
} else { } else {
// Return errors other than ErrCacheFull // Return errors other than ErrCacheFull
if err != objcache.ErrCacheFull { if err != objcache.ErrCacheFull {
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) return ObjectInfo{}, toObjectErr(errors.Trace(err), bucket, object)
} }
} }
} }
@ -561,7 +562,7 @@ func (xl xlObjects) PutObject(bucket string, object string, data *hash.Reader, m
// Should return IncompleteBody{} error when reader has fewer bytes // Should return IncompleteBody{} error when reader has fewer bytes
// than specified in request header. // than specified in request header.
if file.Size < curPartSize { if file.Size < curPartSize {
return ObjectInfo{}, traceError(IncompleteBody{}) return ObjectInfo{}, errors.Trace(IncompleteBody{})
} }
// Update the total written size // Update the total written size
@ -663,14 +664,14 @@ func (xl xlObjects) deleteObject(bucket, object string) error {
for index, disk := range xl.storageDisks { for index, disk := range xl.storageDisks {
if disk == nil { if disk == nil {
dErrs[index] = traceError(errDiskNotFound) dErrs[index] = errors.Trace(errDiskNotFound)
continue continue
} }
wg.Add(1) wg.Add(1)
go func(index int, disk StorageAPI) { go func(index int, disk StorageAPI) {
defer wg.Done() defer wg.Done()
err := cleanupDir(disk, bucket, object) err := cleanupDir(disk, bucket, object)
if err != nil && errorCause(err) != errVolumeNotFound { if err != nil && errors.Cause(err) != errVolumeNotFound {
dErrs[index] = err dErrs[index] = err
} }
}(index, disk) }(index, disk)
@ -692,7 +693,7 @@ func (xl xlObjects) DeleteObject(bucket, object string) (err error) {
// Validate object exists. // Validate object exists.
if !xl.isObject(bucket, object) { if !xl.isObject(bucket, object) {
return traceError(ObjectNotFound{bucket, object}) return errors.Trace(ObjectNotFound{bucket, object})
} // else proceed to delete the object. } // else proceed to delete the object.
// Delete the object on all disks. // Delete the object on all disks.

View File

@ -27,6 +27,7 @@ import (
"time" "time"
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/errors"
) )
func TestRepeatPutObjectPart(t *testing.T) { func TestRepeatPutObjectPart(t *testing.T) {
@ -98,7 +99,7 @@ func TestXLDeleteObjectBasic(t *testing.T) {
} }
for i, test := range testCases { for i, test := range testCases {
actualErr := xl.DeleteObject(test.bucket, test.object) actualErr := xl.DeleteObject(test.bucket, test.object)
actualErr = errorCause(actualErr) actualErr = errors.Cause(actualErr)
if test.expectedErr != nil && actualErr != test.expectedErr { if test.expectedErr != nil && actualErr != test.expectedErr {
t.Errorf("Test %d: Expected to fail with %s, but failed with %s", i+1, test.expectedErr, actualErr) t.Errorf("Test %d: Expected to fail with %s, but failed with %s", i+1, test.expectedErr, actualErr)
} }
@ -152,7 +153,7 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
xl.storageDisks[7] = nil xl.storageDisks[7] = nil
xl.storageDisks[8] = nil xl.storageDisks[8] = nil
err = obj.DeleteObject(bucket, object) err = obj.DeleteObject(bucket, object)
err = errorCause(err) err = errors.Cause(err)
if err != toObjectErr(errXLWriteQuorum, bucket, object) { if err != toObjectErr(errXLWriteQuorum, bucket, object) {
t.Errorf("Expected deleteObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err) t.Errorf("Expected deleteObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
} }
@ -203,7 +204,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
} }
// Fetch object from store. // Fetch object from store.
err = xl.GetObject(bucket, object, 0, int64(len("abcd")), ioutil.Discard) err = xl.GetObject(bucket, object, 0, int64(len("abcd")), ioutil.Discard)
err = errorCause(err) err = errors.Cause(err)
if err != toObjectErr(errXLReadQuorum, bucket, object) { if err != toObjectErr(errXLReadQuorum, bucket, object) {
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err) t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
} }
@ -254,7 +255,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
} }
// Upload new content to same object "object" // Upload new content to same object "object"
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil) _, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
err = errorCause(err) err = errors.Cause(err)
if err != toObjectErr(errXLWriteQuorum, bucket, object) { if err != toObjectErr(errXLWriteQuorum, bucket, object) {
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err) t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
} }

View File

@ -24,6 +24,7 @@ import (
"sync" "sync"
"time" "time"
errors2 "github.com/minio/minio/pkg/errors"
"github.com/tidwall/gjson" "github.com/tidwall/gjson"
) )
@ -35,9 +36,9 @@ import (
// maximal values would occur quorum or more number of times. // maximal values would occur quorum or more number of times.
func reduceErrs(errs []error, ignoredErrs []error) (maxCount int, maxErr error) { func reduceErrs(errs []error, ignoredErrs []error) (maxCount int, maxErr error) {
errorCounts := make(map[error]int) errorCounts := make(map[error]int)
errs = errorsCause(errs) errs = errors2.Causes(errs)
for _, err := range errs { for _, err := range errs {
if isErrIgnored(err, ignoredErrs...) { if errors2.IsErrIgnored(err, ignoredErrs...) {
continue continue
} }
errorCounts[err]++ errorCounts[err]++
@ -72,10 +73,10 @@ func reduceQuorumErrs(errs []error, ignoredErrs []error, quorum int, quorumErr e
} }
if maxErr != nil && maxCount >= quorum { if maxErr != nil && maxCount >= quorum {
// Errors in quorum. // Errors in quorum.
return traceError(maxErr, errs...) return errors2.Trace(maxErr, errs...)
} }
// No quorum satisfied. // No quorum satisfied.
maxErr = traceError(quorumErr, errs...) maxErr = errors2.Trace(quorumErr, errs...)
return return
} }
@ -174,11 +175,11 @@ func parseXLErasureInfo(xlMetaBuf []byte) (ErasureInfo, error) {
for i, v := range checkSumsResult { for i, v := range checkSumsResult {
algorithm := BitrotAlgorithmFromString(v.Get("algorithm").String()) algorithm := BitrotAlgorithmFromString(v.Get("algorithm").String())
if !algorithm.Available() { if !algorithm.Available() {
return erasure, traceError(errBitrotHashAlgoInvalid) return erasure, errors2.Trace(errBitrotHashAlgoInvalid)
} }
hash, err := hex.DecodeString(v.Get("hash").String()) hash, err := hex.DecodeString(v.Get("hash").String())
if err != nil { if err != nil {
return erasure, traceError(err) return erasure, errors2.Trace(err)
} }
checkSums[i] = ChecksumInfo{Name: v.Get("name").String(), Algorithm: algorithm, Hash: hash} checkSums[i] = ChecksumInfo{Name: v.Get("name").String(), Algorithm: algorithm, Hash: hash}
} }
@ -245,7 +246,7 @@ func readXLMetaParts(disk StorageAPI, bucket string, object string) ([]objectPar
// Reads entire `xl.json`. // Reads entire `xl.json`.
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile)) xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
if err != nil { if err != nil {
return nil, traceError(err) return nil, errors2.Trace(err)
} }
// obtain xlMetaV1{}.Partsusing `github.com/tidwall/gjson`. // obtain xlMetaV1{}.Partsusing `github.com/tidwall/gjson`.
xlMetaParts := parseXLParts(xlMetaBuf) xlMetaParts := parseXLParts(xlMetaBuf)
@ -258,7 +259,7 @@ func readXLMetaStat(disk StorageAPI, bucket string, object string) (si statInfo,
// Reads entire `xl.json`. // Reads entire `xl.json`.
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile)) xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
if err != nil { if err != nil {
return si, nil, traceError(err) return si, nil, errors2.Trace(err)
} }
// obtain version. // obtain version.
@ -270,7 +271,7 @@ func readXLMetaStat(disk StorageAPI, bucket string, object string) (si statInfo,
// Validate if the xl.json we read is sane, return corrupted format. // Validate if the xl.json we read is sane, return corrupted format.
if !isXLMetaValid(xlVersion, xlFormat) { if !isXLMetaValid(xlVersion, xlFormat) {
// For version mismatchs and unrecognized format, return corrupted format. // For version mismatchs and unrecognized format, return corrupted format.
return si, nil, traceError(errCorruptedFormat) return si, nil, errors2.Trace(errCorruptedFormat)
} }
// obtain xlMetaV1{}.Meta using `github.com/tidwall/gjson`. // obtain xlMetaV1{}.Meta using `github.com/tidwall/gjson`.
@ -279,7 +280,7 @@ func readXLMetaStat(disk StorageAPI, bucket string, object string) (si statInfo,
// obtain xlMetaV1{}.Stat using `github.com/tidwall/gjson`. // obtain xlMetaV1{}.Stat using `github.com/tidwall/gjson`.
xlStat, err := parseXLStat(xlMetaBuf) xlStat, err := parseXLStat(xlMetaBuf)
if err != nil { if err != nil {
return si, nil, traceError(err) return si, nil, errors2.Trace(err)
} }
// Return structured `xl.json`. // Return structured `xl.json`.
@ -291,12 +292,12 @@ func readXLMeta(disk StorageAPI, bucket string, object string) (xlMeta xlMetaV1,
// Reads entire `xl.json`. // Reads entire `xl.json`.
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile)) xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
if err != nil { if err != nil {
return xlMetaV1{}, traceError(err) return xlMetaV1{}, errors2.Trace(err)
} }
// obtain xlMetaV1{} using `github.com/tidwall/gjson`. // obtain xlMetaV1{} using `github.com/tidwall/gjson`.
xlMeta, err = xlMetaV1UnmarshalJSON(xlMetaBuf) xlMeta, err = xlMetaV1UnmarshalJSON(xlMetaBuf)
if err != nil { if err != nil {
return xlMetaV1{}, traceError(err) return xlMetaV1{}, errors2.Trace(err)
} }
// Return structured `xl.json`. // Return structured `xl.json`.
return xlMeta, nil return xlMeta, nil
@ -392,13 +393,13 @@ var (
// returns error if totalSize is -1, partSize is 0, partIndex is 0. // returns error if totalSize is -1, partSize is 0, partIndex is 0.
func calculatePartSizeFromIdx(totalSize int64, partSize int64, partIndex int) (currPartSize int64, err error) { func calculatePartSizeFromIdx(totalSize int64, partSize int64, partIndex int) (currPartSize int64, err error) {
if totalSize < 0 { if totalSize < 0 {
return 0, traceError(errInvalidArgument) return 0, errors2.Trace(errInvalidArgument)
} }
if partSize == 0 { if partSize == 0 {
return 0, traceError(errPartSizeZero) return 0, errors2.Trace(errPartSizeZero)
} }
if partIndex < 1 { if partIndex < 1 {
return 0, traceError(errPartSizeIndex) return 0, errors2.Trace(errPartSizeIndex)
} }
if totalSize > 0 { if totalSize > 0 {
// Compute the total count of parts // Compute the total count of parts

View File

@ -25,6 +25,7 @@ import (
"testing" "testing"
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/errors"
) )
// Tests caclculating disk count. // Tests caclculating disk count.
@ -91,11 +92,11 @@ func TestReduceErrs(t *testing.T) {
// Validates list of all the testcases for returning valid errors. // Validates list of all the testcases for returning valid errors.
for i, testCase := range testCases { for i, testCase := range testCases {
gotErr := reduceReadQuorumErrs(testCase.errs, testCase.ignoredErrs, 5) gotErr := reduceReadQuorumErrs(testCase.errs, testCase.ignoredErrs, 5)
if errorCause(gotErr) != testCase.err { if errors.Cause(gotErr) != testCase.err {
t.Errorf("Test %d : expected %s, got %s", i+1, testCase.err, gotErr) t.Errorf("Test %d : expected %s, got %s", i+1, testCase.err, gotErr)
} }
gotNewErr := reduceWriteQuorumErrs(testCase.errs, testCase.ignoredErrs, 6) gotNewErr := reduceWriteQuorumErrs(testCase.errs, testCase.ignoredErrs, 6)
if errorCause(gotNewErr) != errXLWriteQuorum { if errors.Cause(gotNewErr) != errXLWriteQuorum {
t.Errorf("Test %d : expected %s, got %s", i+1, errXLWriteQuorum, gotErr) t.Errorf("Test %d : expected %s, got %s", i+1, errXLWriteQuorum, gotErr)
} }
} }
@ -382,8 +383,8 @@ func TestGetPartSizeFromIdx(t *testing.T) {
if err == nil { if err == nil {
t.Errorf("Test %d: Expected to failed but passed. %s", i+1, err) t.Errorf("Test %d: Expected to failed but passed. %s", i+1, err)
} }
if err != nil && errorCause(err) != testCaseFailure.err { if err != nil && errors.Cause(err) != testCaseFailure.err {
t.Errorf("Test %d: Expected err %s, but got %s", i+1, testCaseFailure.err, errorCause(err)) t.Errorf("Test %d: Expected err %s, but got %s", i+1, testCaseFailure.err, errors.Cause(err))
} }
} }
} }

View File

@ -24,6 +24,7 @@ import (
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/disk" "github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/objcache" "github.com/minio/minio/pkg/objcache"
) )
@ -197,7 +198,7 @@ func getDisksInfo(disks []StorageAPI) (disksInfo []disk.Info, onlineDisks int, o
info, err := storageDisk.DiskInfo() info, err := storageDisk.DiskInfo()
if err != nil { if err != nil {
errorIf(err, "Unable to fetch disk info for %#v", storageDisk) errorIf(err, "Unable to fetch disk info for %#v", storageDisk)
if isErr(err, baseErrs...) { if errors.IsErr(err, baseErrs...) {
offlineDisks++ offlineDisks++
continue continue
} }

View File

@ -84,7 +84,7 @@ GetObject() holds a read lock on `fs.json`.
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile) fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
rlk, err := fs.rwPool.Open(fsMetaPath) rlk, err := fs.rwPool.Open(fsMetaPath)
if err != nil { if err != nil {
return toObjectErr(traceError(err), bucket, object) return toObjectErr(errors.Trace(err), bucket, object)
} }
defer rlk.Close() defer rlk.Close()

View File

@ -88,7 +88,7 @@ GetObject()持有`fs.json`的一个读锁。
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile) fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
rlk, err := fs.rwPool.Open(fsMetaPath) rlk, err := fs.rwPool.Open(fsMetaPath)
if err != nil { if err != nil {
return toObjectErr(traceError(err), bucket, object) return toObjectErr(errors.Trace(err), bucket, object)
} }
defer rlk.Close() defer rlk.Close()

154
pkg/errors/errors.go Normal file
View File

@ -0,0 +1,154 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package errors
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
)
var (
// Package path of the project.
pkgPath string
)
// Init - initialize package path.
func Init(gopath string, p string) {
pkgPath = filepath.Join(gopath, "src", p) + string(os.PathSeparator)
}
// stackInfo - Represents a stack frame in the stack trace.
type stackInfo struct {
Filename string `json:"fileName"` // File where error occurred
Line int `json:"line"` // Line where error occurred
Name string `json:"name"` // Name of the function where error occurred
}
// Error - error type containing cause and the stack trace.
type Error struct {
Cause error // Holds the cause error
stack []stackInfo // Stack trace info.
errs []error // Useful for XL to hold errors from all disks
}
// Implement error interface.
func (e Error) Error() string {
return e.Cause.Error()
}
// Stack - returns slice of stack trace.
func (e Error) Stack() []string {
var stack []string
for _, info := range e.stack {
stack = append(stack, fmt.Sprintf("%s:%d:%s()", info.Filename, info.Line, info.Name))
}
return stack
}
// Trace - return new Error type.
func Trace(e error, errs ...error) error {
// Error is nil nothing to do return nil.
if e == nil {
return nil
}
// Already a trace error should be returned as is.
if _, ok := e.(*Error); ok {
return e
}
err := &Error{}
err.Cause = e
err.errs = errs
stack := make([]uintptr, 40)
length := runtime.Callers(2, stack)
if length > len(stack) {
length = len(stack)
}
stack = stack[:length]
for _, pc := range stack {
pc = pc - 1
fn := runtime.FuncForPC(pc)
file, line := fn.FileLine(pc)
var suffixFound bool
for _, ignoreName := range []string{
"runtime.",
"testing.",
} {
if strings.HasPrefix(fn.Name(), ignoreName) {
suffixFound = true
break
}
}
if suffixFound {
continue
}
_, name := filepath.Split(fn.Name())
name = strings.SplitN(name, ".", 2)[1]
file = filepath.FromSlash(strings.TrimPrefix(filepath.ToSlash(file), filepath.ToSlash(pkgPath)))
err.stack = append(err.stack, stackInfo{
Filename: file,
Line: line,
Name: name,
})
}
return err
}
// Cause - Returns the underlying cause error.
func Cause(err error) error {
if e, ok := err.(*Error); ok {
err = e.Cause
}
return err
}
// Causes - Returns slice of underlying cause error.
func Causes(errs []error) (cerrs []error) {
for _, err := range errs {
cerrs = append(cerrs, Cause(err))
}
return cerrs
}
// IsErrIgnored returns whether given error is ignored or not.
func IsErrIgnored(err error, ignoredErrs ...error) bool {
return IsErr(err, ignoredErrs...)
}
// IsErr returns whether given error is exact error.
func IsErr(err error, errs ...error) bool {
err = Cause(err)
for _, exactErr := range errs {
if err == exactErr {
return true
}
}
return false
}
// Tracef behaves like fmt.Errorf but adds traces to the returned error.
func Tracef(format string, args ...interface{}) error {
return Trace(fmt.Errorf(format, args...))
}

120
pkg/errors/errors_test.go Normal file
View File

@ -0,0 +1,120 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package errors
import (
"fmt"
"go/build"
"path/filepath"
"reflect"
"strings"
"testing"
)
// Test trace errors.
func TestTrace(t *testing.T) {
var errExpectedCause = fmt.Errorf("traceable error")
var testCases = []struct {
expectedCauseErr error
}{
{
expectedCauseErr: nil,
},
{
expectedCauseErr: errExpectedCause,
},
{
expectedCauseErr: Trace(errExpectedCause),
},
}
for i, testCase := range testCases {
if err := Trace(testCase.expectedCauseErr); err != nil {
if errGotCause := Cause(err); errGotCause != Cause(testCase.expectedCauseErr) {
t.Errorf("Test: %d Expected %s, got %s", i+1, testCase.expectedCauseErr, errGotCause)
}
}
}
}
// Test if isErrIgnored works correctly.
func TestIsErrIgnored(t *testing.T) {
var errIgnored = fmt.Errorf("ignored error")
var testCases = []struct {
err error
ignored bool
}{
{
err: nil,
ignored: false,
},
{
err: errIgnored,
ignored: true,
},
{
err: Trace(errIgnored),
ignored: true,
},
}
for i, testCase := range testCases {
if ok := IsErrIgnored(testCase.err, errIgnored); ok != testCase.ignored {
t.Errorf("Test: %d, Expected %t, got %t", i+1, testCase.ignored, ok)
}
}
}
// Tests if pkgPath is set properly in init.
func TestInit(t *testing.T) {
Init("/home/test/go", "test")
if filepath.ToSlash(pkgPath) != "/home/test/go/src/test/" {
t.Fatalf("Expected pkgPath to be \"/home/test/go/src/test/\", found %s", pkgPath)
}
}
// Tests stack output.
func TestStack(t *testing.T) {
Init(build.Default.GOPATH, "github.com/minio/minio")
err := Trace(fmt.Errorf("traceable error"))
if terr, ok := err.(*Error); ok {
if !strings.HasSuffix(terr.Stack()[0], "TestStack()") {
t.Errorf("Expected suffix \"TestStack()\", got %s", terr.Stack()[0])
}
}
// Test if the cause error is returned properly with the underlying string.
if err.Error() != "traceable error" {
t.Errorf("Expected \"traceable error\", got %s", err.Error())
}
}
// Tests converting error causes.
func TestErrCauses(t *testing.T) {
errTraceableError := fmt.Errorf("traceable error")
var errs = []error{
errTraceableError,
errTraceableError,
errTraceableError,
}
var terrs []error
for _, err := range errs {
terrs = append(terrs, Trace(err))
}
cerrs := Causes(terrs)
if !reflect.DeepEqual(errs, cerrs) {
t.Errorf("Expected %#v, got %#v", errs, cerrs)
}
}