xl/fs: Split object layer into interface. (#1415)

This commit is contained in:
Harshavardhana 2016-04-29 14:24:10 -07:00 committed by Anand Babu (AB) Periasamy
parent 4d1b3d5e9a
commit 4e34e03dd4
61 changed files with 1815 additions and 1037 deletions

View File

@ -21,8 +21,6 @@ import (
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"regexp" "regexp"
"github.com/minio/minio/pkg/probe"
) )
// credential container for access and secret keys. // credential container for access and secret keys.
@ -52,19 +50,19 @@ var isValidAccessKey = regexp.MustCompile(`^[a-zA-Z0-9\\-\\.\\_\\~]{5,20}$`)
// mustGenAccessKeys - must generate access credentials. // mustGenAccessKeys - must generate access credentials.
func mustGenAccessKeys() (creds credential) { func mustGenAccessKeys() (creds credential) {
creds, err := genAccessKeys() creds, err := genAccessKeys()
fatalIf(err.Trace(), "Unable to generate access keys.", nil) fatalIf(err, "Unable to generate access keys.", nil)
return creds return creds
} }
// genAccessKeys - generate access credentials. // genAccessKeys - generate access credentials.
func genAccessKeys() (credential, *probe.Error) { func genAccessKeys() (credential, error) {
accessKeyID, err := genAccessKeyID() accessKeyID, err := genAccessKeyID()
if err != nil { if err != nil {
return credential{}, err.Trace() return credential{}, err
} }
secretAccessKey, err := genSecretAccessKey() secretAccessKey, err := genSecretAccessKey()
if err != nil { if err != nil {
return credential{}, err.Trace() return credential{}, err
} }
creds := credential{ creds := credential{
AccessKeyID: string(accessKeyID), AccessKeyID: string(accessKeyID),
@ -75,10 +73,10 @@ func genAccessKeys() (credential, *probe.Error) {
// genAccessKeyID - generate random alpha numeric value using only uppercase characters // genAccessKeyID - generate random alpha numeric value using only uppercase characters
// takes input as size in integer // takes input as size in integer
func genAccessKeyID() ([]byte, *probe.Error) { func genAccessKeyID() ([]byte, error) {
alpha := make([]byte, minioAccessID) alpha := make([]byte, minioAccessID)
if _, e := rand.Read(alpha); e != nil { if _, err := rand.Read(alpha); err != nil {
return nil, probe.NewError(e) return nil, err
} }
for i := 0; i < minioAccessID; i++ { for i := 0; i < minioAccessID; i++ {
alpha[i] = alphaNumericTable[alpha[i]%byte(len(alphaNumericTable))] alpha[i] = alphaNumericTable[alpha[i]%byte(len(alphaNumericTable))]
@ -87,10 +85,10 @@ func genAccessKeyID() ([]byte, *probe.Error) {
} }
// genSecretAccessKey - generate random base64 numeric value from a random seed. // genSecretAccessKey - generate random base64 numeric value from a random seed.
func genSecretAccessKey() ([]byte, *probe.Error) { func genSecretAccessKey() ([]byte, error) {
rb := make([]byte, minioSecretID) rb := make([]byte, minioSecretID)
if _, e := rand.Read(rb); e != nil { if _, err := rand.Read(rb); err != nil {
return nil, probe.NewError(e) return nil, err
} }
return []byte(base64.StdEncoding.EncodeToString(rb))[:minioSecretID], nil return []byte(base64.StdEncoding.EncodeToString(rb))[:minioSecretID], nil
} }

View File

@ -22,8 +22,6 @@ import (
"net/url" "net/url"
"os" "os"
"time" "time"
"github.com/minio/minio/pkg/probe"
) )
type accessLogHandler struct { type accessLogHandler struct {
@ -60,14 +58,14 @@ type LogMessage struct {
func (h *accessLogHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { func (h *accessLogHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
message, err := getLogMessage(w, req) message, err := getLogMessage(w, req)
fatalIf(err.Trace(), "Unable to extract http message.", nil) fatalIf(err, "Unable to extract http message.", nil)
_, e := h.accessLogFile.Write(message) _, err = h.accessLogFile.Write(message)
fatalIf(probe.NewError(e), "Writing to log file failed.", nil) fatalIf(err, "Writing to log file failed.", nil)
h.Handler.ServeHTTP(w, req) h.Handler.ServeHTTP(w, req)
} }
func getLogMessage(w http.ResponseWriter, req *http.Request) ([]byte, *probe.Error) { func getLogMessage(w http.ResponseWriter, req *http.Request) ([]byte, error) {
logMessage := &LogMessage{ logMessage := &LogMessage{
StartTime: time.Now().UTC(), StartTime: time.Now().UTC(),
} }
@ -103,9 +101,9 @@ func getLogMessage(w http.ResponseWriter, req *http.Request) ([]byte, *probe.Err
// logMessage.HTTP.Request = req // logMessage.HTTP.Request = req
logMessage.Duration = time.Now().UTC().Sub(logMessage.StartTime) logMessage.Duration = time.Now().UTC().Sub(logMessage.StartTime)
js, e := json.Marshal(logMessage) js, err := json.Marshal(logMessage)
if e != nil { if err != nil {
return nil, probe.NewError(e) return nil, err
} }
js = append(js, byte('\n')) // append a new line js = append(js, byte('\n')) // append a new line
return js, nil return js, nil
@ -113,8 +111,8 @@ func getLogMessage(w http.ResponseWriter, req *http.Request) ([]byte, *probe.Err
// setAccessLogHandler logs requests // setAccessLogHandler logs requests
func setAccessLogHandler(h http.Handler) http.Handler { func setAccessLogHandler(h http.Handler) http.Handler {
file, e := os.OpenFile("access.log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) file, err := os.OpenFile("access.log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
fatalIf(probe.NewError(e), "Unable to open access log.", nil) fatalIf(err, "Unable to open access log.", nil)
return &accessLogHandler{Handler: h, accessLogFile: file} return &accessLogHandler{Handler: h, accessLogFile: file}
} }

View File

@ -20,7 +20,7 @@ import router "github.com/gorilla/mux"
// objectAPIHandler implements and provides http handlers for S3 API. // objectAPIHandler implements and provides http handlers for S3 API.
type objectAPIHandlers struct { type objectAPIHandlers struct {
ObjectAPI objectAPI ObjectAPI ObjectLayer
} }
// registerAPIRouter - registers S3 compatible APIs. // registerAPIRouter - registers S3 compatible APIs.

View File

@ -26,7 +26,6 @@ import (
"strings" "strings"
fastSha256 "github.com/minio/minio/pkg/crypto/sha256" fastSha256 "github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/probe"
) )
// Verify if request has JWT. // Verify if request has JWT.
@ -113,12 +112,12 @@ func sumMD5(data []byte) []byte {
// Verify if request has valid AWS Signature Version '4'. // Verify if request has valid AWS Signature Version '4'.
func isReqAuthenticated(r *http.Request) (s3Error APIErrorCode) { func isReqAuthenticated(r *http.Request) (s3Error APIErrorCode) {
if r == nil { if r == nil {
errorIf(probe.NewError(errInvalidArgument), "HTTP request cannot be empty.", nil) errorIf(errInvalidArgument, "HTTP request cannot be empty.", nil)
return ErrInternalError return ErrInternalError
} }
payload, e := ioutil.ReadAll(r.Body) payload, err := ioutil.ReadAll(r.Body)
if e != nil { if err != nil {
errorIf(probe.NewError(e), "Unable to read HTTP body.", nil) errorIf(err, "Unable to read HTTP body.", nil)
return ErrInternalError return ErrInternalError
} }
// Verify Content-Md5, if payload is set. // Verify Content-Md5, if payload is set.

View File

@ -29,7 +29,6 @@ import (
"strings" "strings"
mux "github.com/gorilla/mux" mux "github.com/gorilla/mux"
"github.com/minio/minio/pkg/probe"
) )
// http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html // http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
@ -37,8 +36,8 @@ func enforceBucketPolicy(action string, bucket string, reqURL *url.URL) (s3Error
// Read saved bucket policy. // Read saved bucket policy.
policy, err := readBucketPolicy(bucket) policy, err := readBucketPolicy(bucket)
if err != nil { if err != nil {
errorIf(err.Trace(bucket), "GetBucketPolicy failed.", nil) errorIf(err, "GetBucketPolicy failed.", nil)
switch err.ToGoError().(type) { switch err.(type) {
case BucketNotFound: case BucketNotFound:
return ErrNoSuchBucket return ErrNoSuchBucket
case BucketNameInvalid: case BucketNameInvalid:
@ -49,9 +48,9 @@ func enforceBucketPolicy(action string, bucket string, reqURL *url.URL) (s3Error
} }
} }
// Parse the saved policy. // Parse the saved policy.
bucketPolicy, e := parseBucketPolicy(policy) bucketPolicy, err := parseBucketPolicy(policy)
if e != nil { if err != nil {
errorIf(probe.NewError(e), "Parse policy failed.", nil) errorIf(err, "Parse policy failed.", nil)
return ErrAccessDenied return ErrAccessDenied
} }
@ -90,8 +89,8 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
return return
} }
case authTypeSigned, authTypePresigned: case authTypeSigned, authTypePresigned:
payload, e := ioutil.ReadAll(r.Body) payload, err := ioutil.ReadAll(r.Body)
if e != nil { if err != nil {
writeErrorResponse(w, r, ErrInternalError, r.URL.Path) writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
return return
} }
@ -117,10 +116,9 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
} }
} }
_, err := api.ObjectAPI.GetBucketInfo(bucket) if _, err := api.ObjectAPI.GetBucketInfo(bucket); err != nil {
if err != nil { errorIf(err, "GetBucketInfo failed.", nil)
errorIf(err.Trace(), "GetBucketInfo failed.", nil) switch err.(type) {
switch err.ToGoError().(type) {
case BucketNotFound: case BucketNotFound:
writeErrorResponse(w, r, ErrNoSuchBucket, r.URL.Path) writeErrorResponse(w, r, ErrNoSuchBucket, r.URL.Path)
case BucketNameInvalid: case BucketNameInvalid:
@ -181,26 +179,24 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
} }
if keyMarker != "" { if keyMarker != "" {
// Unescape keyMarker string // Unescape keyMarker string
keyMarkerUnescaped, e := url.QueryUnescape(keyMarker) keyMarkerUnescaped, err := url.QueryUnescape(keyMarker)
if e != nil { if err != nil {
if e != nil { // Return 'NoSuchKey' to indicate invalid marker key.
// Return 'NoSuchKey' to indicate invalid marker key. writeErrorResponse(w, r, ErrNoSuchKey, r.URL.Path)
writeErrorResponse(w, r, ErrNoSuchKey, r.URL.Path) return
return }
} keyMarker = keyMarkerUnescaped
keyMarker = keyMarkerUnescaped // Marker not common with prefix is not implemented.
// Marker not common with prefix is not implemented. if !strings.HasPrefix(keyMarker, prefix) {
if !strings.HasPrefix(keyMarker, prefix) { writeErrorResponse(w, r, ErrNotImplemented, r.URL.Path)
writeErrorResponse(w, r, ErrNotImplemented, r.URL.Path) return
return
}
} }
} }
listMultipartsInfo, err := api.ObjectAPI.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) listMultipartsInfo, err := api.ObjectAPI.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
if err != nil { if err != nil {
errorIf(err.Trace(), "ListMultipartUploads failed.", nil) errorIf(err, "ListMultipartUploads failed.", nil)
switch err.ToGoError().(type) { switch err.(type) {
case BucketNotFound: case BucketNotFound:
writeErrorResponse(w, r, ErrNoSuchBucket, r.URL.Path) writeErrorResponse(w, r, ErrNoSuchBucket, r.URL.Path)
default: default:
@ -259,8 +255,8 @@ func (api objectAPIHandlers) ListObjectsHandler(w http.ResponseWriter, r *http.R
// If marker is set unescape. // If marker is set unescape.
if marker != "" { if marker != "" {
// Try to unescape marker. // Try to unescape marker.
markerUnescaped, e := url.QueryUnescape(marker) markerUnescaped, err := url.QueryUnescape(marker)
if e != nil { if err != nil {
// Return 'NoSuchKey' to indicate invalid marker key. // Return 'NoSuchKey' to indicate invalid marker key.
writeErrorResponse(w, r, ErrNoSuchKey, r.URL.Path) writeErrorResponse(w, r, ErrNoSuchKey, r.URL.Path)
return return
@ -284,7 +280,8 @@ func (api objectAPIHandlers) ListObjectsHandler(w http.ResponseWriter, r *http.R
writeSuccessResponse(w, encodedSuccessResponse) writeSuccessResponse(w, encodedSuccessResponse)
return return
} }
switch err.ToGoError().(type) { errorIf(err, "ListObjects failed.", nil)
switch err.(type) {
case BucketNameInvalid: case BucketNameInvalid:
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path) writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
case BucketNotFound: case BucketNotFound:
@ -292,7 +289,6 @@ func (api objectAPIHandlers) ListObjectsHandler(w http.ResponseWriter, r *http.R
case ObjectNameInvalid: case ObjectNameInvalid:
writeErrorResponse(w, r, ErrNoSuchKey, r.URL.Path) writeErrorResponse(w, r, ErrNoSuchKey, r.URL.Path)
default: default:
errorIf(err.Trace(), "ListObjects failed.", nil)
writeErrorResponse(w, r, ErrInternalError, r.URL.Path) writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
} }
} }
@ -347,8 +343,8 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
writeSuccessResponse(w, encodedSuccessResponse) writeSuccessResponse(w, encodedSuccessResponse)
return return
} }
errorIf(err.Trace(), "ListBuckets failed.", nil) errorIf(err, "ListBuckets failed.", nil)
switch err.ToGoError().(type) { switch err.(type) {
case StorageInsufficientReadResources: case StorageInsufficientReadResources:
writeErrorResponse(w, r, ErrInsufficientReadResources, r.URL.Path) writeErrorResponse(w, r, ErrInsufficientReadResources, r.URL.Path)
default: default:
@ -398,16 +394,16 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
deleteXMLBytes := make([]byte, r.ContentLength) deleteXMLBytes := make([]byte, r.ContentLength)
// Read incoming body XML bytes. // Read incoming body XML bytes.
_, e := io.ReadFull(r.Body, deleteXMLBytes) if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil {
if e != nil { errorIf(err, "DeleteMultipleObjects failed.", nil)
errorIf(probe.NewError(e), "DeleteMultipleObjects failed.", nil)
writeErrorResponse(w, r, ErrInternalError, r.URL.Path) writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
return return
} }
// Unmarshal list of keys to be deleted. // Unmarshal list of keys to be deleted.
deleteObjects := &DeleteObjectsRequest{} deleteObjects := &DeleteObjectsRequest{}
if e := xml.Unmarshal(deleteXMLBytes, deleteObjects); e != nil { if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
errorIf(err, "DeleteMultipartObjects xml decoding failed.", nil)
writeErrorResponse(w, r, ErrMalformedXML, r.URL.Path) writeErrorResponse(w, r, ErrMalformedXML, r.URL.Path)
return return
} }
@ -422,8 +418,8 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
ObjectName: object.ObjectName, ObjectName: object.ObjectName,
}) })
} else { } else {
errorIf(err.Trace(object.ObjectName), "DeleteObject failed.", nil) errorIf(err, "DeleteObject failed.", nil)
switch err.ToGoError().(type) { switch err.(type) {
case BucketNameInvalid: case BucketNameInvalid:
deleteErrors = append(deleteErrors, DeleteError{ deleteErrors = append(deleteErrors, DeleteError{
Code: errorCodeResponse[ErrInvalidBucketName].Code, Code: errorCodeResponse[ErrInvalidBucketName].Code,
@ -498,8 +494,8 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
// Make bucket. // Make bucket.
err := api.ObjectAPI.MakeBucket(bucket) err := api.ObjectAPI.MakeBucket(bucket)
if err != nil { if err != nil {
errorIf(err.Trace(), "MakeBucket failed.", nil) errorIf(err, "MakeBucket failed.", nil)
switch err.ToGoError().(type) { switch err.(type) {
case BucketNameInvalid: case BucketNameInvalid:
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path) writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
case BucketExists: case BucketExists:
@ -514,24 +510,25 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
writeSuccessResponse(w, nil) writeSuccessResponse(w, nil)
} }
func extractHTTPFormValues(reader *multipart.Reader) (io.Reader, map[string]string, *probe.Error) { func extractHTTPFormValues(reader *multipart.Reader) (io.Reader, map[string]string, error) {
/// HTML Form values /// HTML Form values
formValues := make(map[string]string) formValues := make(map[string]string)
filePart := new(bytes.Buffer) filePart := new(bytes.Buffer)
var e error var err error
for e == nil { for err == nil {
var part *multipart.Part var part *multipart.Part
part, e = reader.NextPart() part, err = reader.NextPart()
if part != nil { if part != nil {
if part.FileName() == "" { if part.FileName() == "" {
buffer, e := ioutil.ReadAll(part) var buffer []byte
if e != nil { buffer, err = ioutil.ReadAll(part)
return nil, nil, probe.NewError(e) if err != nil {
return nil, nil, err
} }
formValues[http.CanonicalHeaderKey(part.FormName())] = string(buffer) formValues[http.CanonicalHeaderKey(part.FormName())] = string(buffer)
} else { } else {
if _, e := io.Copy(filePart, part); e != nil { if _, err = io.Copy(filePart, part); err != nil {
return nil, nil, probe.NewError(e) return nil, nil, err
} }
} }
} }
@ -546,16 +543,16 @@ func extractHTTPFormValues(reader *multipart.Reader) (io.Reader, map[string]stri
func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) { func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
// Here the parameter is the size of the form data that should // Here the parameter is the size of the form data that should
// be loaded in memory, the remaining being put in temporary files. // be loaded in memory, the remaining being put in temporary files.
reader, e := r.MultipartReader() reader, err := r.MultipartReader()
if e != nil { if err != nil {
errorIf(probe.NewError(e), "Unable to initialize multipart reader.", nil) errorIf(err, "Unable to initialize multipart reader.", nil)
writeErrorResponse(w, r, ErrMalformedPOSTRequest, r.URL.Path) writeErrorResponse(w, r, ErrMalformedPOSTRequest, r.URL.Path)
return return
} }
fileBody, formValues, err := extractHTTPFormValues(reader) fileBody, formValues, err := extractHTTPFormValues(reader)
if err != nil { if err != nil {
errorIf(err.Trace(), "Unable to parse form values.", nil) errorIf(err, "Unable to parse form values.", nil)
writeErrorResponse(w, r, ErrMalformedPOSTRequest, r.URL.Path) writeErrorResponse(w, r, ErrMalformedPOSTRequest, r.URL.Path)
return return
} }
@ -575,8 +572,8 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
} }
md5Sum, err := api.ObjectAPI.PutObject(bucket, object, -1, fileBody, nil) md5Sum, err := api.ObjectAPI.PutObject(bucket, object, -1, fileBody, nil)
if err != nil { if err != nil {
errorIf(err.Trace(), "PutObject failed.", nil) errorIf(err, "PutObject failed.", nil)
switch err.ToGoError().(type) { switch err.(type) {
case StorageFull: case StorageFull:
writeErrorResponse(w, r, ErrStorageFull, r.URL.Path) writeErrorResponse(w, r, ErrStorageFull, r.URL.Path)
case BucketNotFound: case BucketNotFound:
@ -626,10 +623,9 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
} }
} }
_, err := api.ObjectAPI.GetBucketInfo(bucket) if _, err := api.ObjectAPI.GetBucketInfo(bucket); err != nil {
if err != nil { errorIf(err, "GetBucketInfo failed.", nil)
errorIf(err.Trace(), "GetBucketInfo failed.", nil) switch err.(type) {
switch err.ToGoError().(type) {
case BucketNotFound: case BucketNotFound:
writeErrorResponse(w, r, ErrNoSuchBucket, r.URL.Path) writeErrorResponse(w, r, ErrNoSuchBucket, r.URL.Path)
case BucketNameInvalid: case BucketNameInvalid:
@ -661,10 +657,9 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
} }
} }
err := api.ObjectAPI.DeleteBucket(bucket) if err := api.ObjectAPI.DeleteBucket(bucket); err != nil {
if err != nil { errorIf(err, "DeleteBucket failed.", nil)
errorIf(err.Trace(), "DeleteBucket failed.", nil) switch err.(type) {
switch err.ToGoError().(type) {
case BucketNotFound: case BucketNotFound:
writeErrorResponse(w, r, ErrNoSuchBucket, r.URL.Path) writeErrorResponse(w, r, ErrNoSuchBucket, r.URL.Path)
case BucketNotEmpty: case BucketNotEmpty:

View File

@ -25,7 +25,6 @@ import (
"strings" "strings"
mux "github.com/gorilla/mux" mux "github.com/gorilla/mux"
"github.com/minio/minio/pkg/probe"
) )
// maximum supported access policy size. // maximum supported access policy size.
@ -67,8 +66,8 @@ func bucketPolicyMatchStatement(action string, resource string, conditions map[s
func bucketPolicyActionMatch(action string, statement policyStatement) bool { func bucketPolicyActionMatch(action string, statement policyStatement) bool {
for _, policyAction := range statement.Actions { for _, policyAction := range statement.Actions {
// Policy action can be a regex, validate the action with matching string. // Policy action can be a regex, validate the action with matching string.
matched, e := regexp.MatchString(policyAction, action) matched, err := regexp.MatchString(policyAction, action)
fatalIf(probe.NewError(e), "Invalid pattern, please verify the pattern string.", nil) fatalIf(err, "Invalid pattern, please verify the pattern string.", nil)
if matched { if matched {
return true return true
} }
@ -79,8 +78,8 @@ func bucketPolicyActionMatch(action string, statement policyStatement) bool {
// Verify if given resource matches with policy statement. // Verify if given resource matches with policy statement.
func bucketPolicyResourceMatch(resource string, statement policyStatement) bool { func bucketPolicyResourceMatch(resource string, statement policyStatement) bool {
for _, presource := range statement.Resources { for _, presource := range statement.Resources {
matched, e := regexp.MatchString(presource, strings.TrimPrefix(resource, "/")) matched, err := regexp.MatchString(presource, strings.TrimPrefix(resource, "/"))
fatalIf(probe.NewError(e), "Invalid pattern, please verify the pattern string.", nil) fatalIf(err, "Invalid pattern, please verify the pattern string.", nil)
// For any path matches, we return quickly and the let the caller continue. // For any path matches, we return quickly and the let the caller continue.
if matched { if matched {
return true return true
@ -161,17 +160,17 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
// Read access policy up to maxAccessPolicySize. // Read access policy up to maxAccessPolicySize.
// http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html // http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
// bucket policies are limited to 20KB in size, using a limit reader. // bucket policies are limited to 20KB in size, using a limit reader.
bucketPolicyBuf, e := ioutil.ReadAll(io.LimitReader(r.Body, maxAccessPolicySize)) bucketPolicyBuf, err := ioutil.ReadAll(io.LimitReader(r.Body, maxAccessPolicySize))
if e != nil { if err != nil {
errorIf(probe.NewError(e).Trace(bucket), "Reading policy failed.", nil) errorIf(err, "Reading policy failed.", nil)
writeErrorResponse(w, r, ErrInternalError, r.URL.Path) writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
return return
} }
// Parse bucket policy. // Parse bucket policy.
bucketPolicy, e := parseBucketPolicy(bucketPolicyBuf) bucketPolicy, err := parseBucketPolicy(bucketPolicyBuf)
if e != nil { if err != nil {
errorIf(probe.NewError(e), "Unable to parse bucket policy.", nil) errorIf(err, "Unable to parse bucket policy.", nil)
writeErrorResponse(w, r, ErrInvalidPolicyDocument, r.URL.Path) writeErrorResponse(w, r, ErrInvalidPolicyDocument, r.URL.Path)
return return
} }
@ -183,10 +182,9 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
} }
// Save bucket policy. // Save bucket policy.
err := writeBucketPolicy(bucket, bucketPolicyBuf) if err := writeBucketPolicy(bucket, bucketPolicyBuf); err != nil {
if err != nil { errorIf(err, "SaveBucketPolicy failed.", nil)
errorIf(err.Trace(bucket, string(bucketPolicyBuf)), "SaveBucketPolicy failed.", nil) switch err.(type) {
switch err.ToGoError().(type) {
case BucketNameInvalid: case BucketNameInvalid:
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path) writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
default: default:
@ -218,10 +216,9 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
} }
// Delete bucket access policy. // Delete bucket access policy.
err := removeBucketPolicy(bucket) if err := removeBucketPolicy(bucket); err != nil {
if err != nil { errorIf(err, "DeleteBucketPolicy failed.", nil)
errorIf(err.Trace(bucket), "DeleteBucketPolicy failed.", nil) switch err.(type) {
switch err.ToGoError().(type) {
case BucketNameInvalid: case BucketNameInvalid:
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path) writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
case BucketPolicyNotFound: case BucketPolicyNotFound:
@ -257,8 +254,8 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht
// Read bucket access policy. // Read bucket access policy.
p, err := readBucketPolicy(bucket) p, err := readBucketPolicy(bucket)
if err != nil { if err != nil {
errorIf(err.Trace(bucket), "GetBucketPolicy failed.", nil) errorIf(err, "GetBucketPolicy failed.", nil)
switch err.ToGoError().(type) { switch err.(type) {
case BucketNameInvalid: case BucketNameInvalid:
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path) writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
case BucketPolicyNotFound: case BucketPolicyNotFound:

View File

@ -20,132 +20,115 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"github.com/minio/minio/pkg/probe"
) )
// getBucketsConfigPath - get buckets path. // getBucketsConfigPath - get buckets path.
func getBucketsConfigPath() (string, *probe.Error) { func getBucketsConfigPath() (string, error) {
configPath, err := getConfigPath() configPath, err := getConfigPath()
if err != nil { if err != nil {
return "", err.Trace() return "", err
} }
return filepath.Join(configPath, "buckets"), nil return filepath.Join(configPath, "buckets"), nil
} }
// createBucketsConfigPath - create buckets directory. // createBucketsConfigPath - create buckets directory.
func createBucketsConfigPath() *probe.Error { func createBucketsConfigPath() error {
bucketsConfigPath, err := getBucketsConfigPath() bucketsConfigPath, err := getBucketsConfigPath()
if err != nil { if err != nil {
return err return err
} }
if e := os.MkdirAll(bucketsConfigPath, 0700); e != nil { return os.MkdirAll(bucketsConfigPath, 0700)
return probe.NewError(e)
}
return nil
} }
// getBucketConfigPath - get bucket config path. // getBucketConfigPath - get bucket config path.
func getBucketConfigPath(bucket string) (string, *probe.Error) { func getBucketConfigPath(bucket string) (string, error) {
bucketsConfigPath, err := getBucketsConfigPath() bucketsConfigPath, err := getBucketsConfigPath()
if err != nil { if err != nil {
return "", err.Trace() return "", err
} }
return filepath.Join(bucketsConfigPath, bucket), nil return filepath.Join(bucketsConfigPath, bucket), nil
} }
// createBucketConfigPath - create bucket config directory. // createBucketConfigPath - create bucket config directory.
func createBucketConfigPath(bucket string) *probe.Error { func createBucketConfigPath(bucket string) error {
bucketConfigPath, err := getBucketConfigPath(bucket) bucketConfigPath, err := getBucketConfigPath(bucket)
if err != nil { if err != nil {
return err return err
} }
if e := os.MkdirAll(bucketConfigPath, 0700); e != nil { return os.MkdirAll(bucketConfigPath, 0700)
return probe.NewError(e)
}
return nil
} }
// readBucketPolicy - read bucket policy. // readBucketPolicy - read bucket policy.
func readBucketPolicy(bucket string) ([]byte, *probe.Error) { func readBucketPolicy(bucket string) ([]byte, error) {
// Verify bucket is valid. // Verify bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return nil, probe.NewError(BucketNameInvalid{Bucket: bucket}) return nil, BucketNameInvalid{Bucket: bucket}
} }
bucketConfigPath, err := getBucketConfigPath(bucket) bucketConfigPath, err := getBucketConfigPath(bucket)
if err != nil { if err != nil {
return nil, err.Trace() return nil, err
} }
// Get policy file. // Get policy file.
bucketPolicyFile := filepath.Join(bucketConfigPath, "access-policy.json") bucketPolicyFile := filepath.Join(bucketConfigPath, "access-policy.json")
if _, e := os.Stat(bucketPolicyFile); e != nil { if _, err = os.Stat(bucketPolicyFile); err != nil {
if os.IsNotExist(e) { if os.IsNotExist(err) {
return nil, probe.NewError(BucketPolicyNotFound{Bucket: bucket}) return nil, BucketPolicyNotFound{Bucket: bucket}
} }
return nil, probe.NewError(e) return nil, err
} }
return ioutil.ReadFile(bucketPolicyFile)
accessPolicyBytes, e := ioutil.ReadFile(bucketPolicyFile)
if e != nil {
return nil, probe.NewError(e)
}
return accessPolicyBytes, nil
} }
// removeBucketPolicy - remove bucket policy. // removeBucketPolicy - remove bucket policy.
func removeBucketPolicy(bucket string) *probe.Error { func removeBucketPolicy(bucket string) error {
// Verify bucket is valid. // Verify bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return probe.NewError(BucketNameInvalid{Bucket: bucket}) return BucketNameInvalid{Bucket: bucket}
} }
bucketConfigPath, err := getBucketConfigPath(bucket) bucketConfigPath, err := getBucketConfigPath(bucket)
if err != nil { if err != nil {
return err.Trace(bucket) return err
} }
// Get policy file. // Get policy file.
bucketPolicyFile := filepath.Join(bucketConfigPath, "access-policy.json") bucketPolicyFile := filepath.Join(bucketConfigPath, "access-policy.json")
if _, e := os.Stat(bucketPolicyFile); e != nil { if _, err = os.Stat(bucketPolicyFile); err != nil {
if os.IsNotExist(e) { if os.IsNotExist(err) {
return probe.NewError(BucketPolicyNotFound{Bucket: bucket}) return BucketPolicyNotFound{Bucket: bucket}
} }
return probe.NewError(e) return err
} }
return nil return nil
} }
// writeBucketPolicy - save bucket policy. // writeBucketPolicy - save bucket policy.
func writeBucketPolicy(bucket string, accessPolicyBytes []byte) *probe.Error { func writeBucketPolicy(bucket string, accessPolicyBytes []byte) error {
// Verify if bucket path legal // Verify if bucket path legal
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return probe.NewError(BucketNameInvalid{Bucket: bucket}) return BucketNameInvalid{Bucket: bucket}
} }
// Create bucket config path. // Create bucket config path.
if err := createBucketConfigPath(bucket); err != nil { if err := createBucketConfigPath(bucket); err != nil {
return err.Trace() return err
} }
bucketConfigPath, err := getBucketConfigPath(bucket) bucketConfigPath, err := getBucketConfigPath(bucket)
if err != nil { if err != nil {
return err.Trace() return err
} }
// Get policy file. // Get policy file.
bucketPolicyFile := filepath.Join(bucketConfigPath, "access-policy.json") bucketPolicyFile := filepath.Join(bucketConfigPath, "access-policy.json")
if _, e := os.Stat(bucketPolicyFile); e != nil { if _, err := os.Stat(bucketPolicyFile); err != nil {
if !os.IsNotExist(e) { if !os.IsNotExist(err) {
return probe.NewError(e) return err
} }
} }
// Write bucket policy. // Write bucket policy.
if e := ioutil.WriteFile(bucketPolicyFile, accessPolicyBytes, 0600); e != nil { return ioutil.WriteFile(bucketPolicyFile, accessPolicyBytes, 0600)
return probe.NewError(e)
}
return nil
} }

View File

@ -21,26 +21,22 @@ import (
"path/filepath" "path/filepath"
"github.com/minio/go-homedir" "github.com/minio/go-homedir"
"github.com/minio/minio/pkg/probe"
) )
// createCertsPath create certs path. // createCertsPath create certs path.
func createCertsPath() *probe.Error { func createCertsPath() error {
certsPath, err := getCertsPath() certsPath, err := getCertsPath()
if err != nil { if err != nil {
return err.Trace() return err
} }
if err := os.MkdirAll(certsPath, 0700); err != nil { return os.MkdirAll(certsPath, 0700)
return probe.NewError(err)
}
return nil
} }
// getCertsPath get certs path. // getCertsPath get certs path.
func getCertsPath() (string, *probe.Error) { func getCertsPath() (string, error) {
homeDir, e := homedir.Dir() homeDir, err := homedir.Dir()
if e != nil { if err != nil {
return "", probe.NewError(e) return "", err
} }
certsPath := filepath.Join(homeDir, globalMinioCertsDir) certsPath := filepath.Join(homeDir, globalMinioCertsDir)
return certsPath, nil return certsPath, nil
@ -49,7 +45,7 @@ func getCertsPath() (string, *probe.Error) {
// mustGetCertsPath must get certs path. // mustGetCertsPath must get certs path.
func mustGetCertsPath() string { func mustGetCertsPath() string {
certsPath, err := getCertsPath() certsPath, err := getCertsPath()
fatalIf(err.Trace(), "Unable to retrieve certs path.", nil) fatalIf(err, "Unable to retrieve certs path.", nil)
return certsPath return certsPath
} }

View File

@ -22,7 +22,6 @@ import (
"path/filepath" "path/filepath"
"github.com/minio/mc/pkg/console" "github.com/minio/mc/pkg/console"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/quick" "github.com/minio/minio/pkg/quick"
) )
@ -38,35 +37,31 @@ func migrateConfig() {
// Version '1' is not supported anymore and deprecated, safe to delete. // Version '1' is not supported anymore and deprecated, safe to delete.
func purgeV1() { func purgeV1() {
cv1, err := loadConfigV1() cv1, err := loadConfigV1()
if err != nil { if err != nil && os.IsNotExist(err) {
if os.IsNotExist(err.ToGoError()) { return
return
}
} }
fatalIf(err.Trace(), "Unable to load config version 1.", nil) fatalIf(err, "Unable to load config version 1.", nil)
if cv1.Version == "1" { if cv1.Version == "1" {
console.Println("Unsupported config version 1 found, removed successfully.") console.Println("Unsupported config version 1 found, removed successfully.")
/// Purge old fsUsers.json file /// Purge old fsUsers.json file
configPath, err := getConfigPath() configPath, err := getConfigPath()
fatalIf(err.Trace(), "Unable to retrieve config path.", nil) fatalIf(err, "Unable to retrieve config path.", nil)
configFile := filepath.Join(configPath, "fsUsers.json") configFile := filepath.Join(configPath, "fsUsers.json")
os.RemoveAll(configFile) os.RemoveAll(configFile)
} }
fatalIf(probe.NewError(errors.New("")), "Unexpected version found "+cv1.Version+", cannot migrate.", nil) fatalIf(errors.New(""), "Unexpected version found "+cv1.Version+", cannot migrate.", nil)
} }
// Version '2' to '3' config migration adds new fields and re-orders // Version '2' to '3' config migration adds new fields and re-orders
// previous fields. Simplifies config for future additions. // previous fields. Simplifies config for future additions.
func migrateV2ToV3() { func migrateV2ToV3() {
cv2, err := loadConfigV2() cv2, err := loadConfigV2()
if err != nil { if err != nil && os.IsNotExist(err) {
if os.IsNotExist(err.ToGoError()) { return
return
}
} }
fatalIf(err.Trace(), "Unable to load config version 2.", nil) fatalIf(err, "Unable to load config version 2.", nil)
if cv2.Version != "2" { if cv2.Version != "2" {
return return
} }
@ -99,14 +94,14 @@ func migrateV2ToV3() {
srvConfig.Logger.Syslog = slogger srvConfig.Logger.Syslog = slogger
qc, err := quick.New(srvConfig) qc, err := quick.New(srvConfig)
fatalIf(err.Trace(), "Unable to initialize config.", nil) fatalIf(err, "Unable to initialize config.", nil)
configFile, err := getConfigFile() configFile, err := getConfigFile()
fatalIf(err.Trace(), "Unable to get config file.", nil) fatalIf(err, "Unable to get config file.", nil)
// Migrate the config. // Migrate the config.
err = qc.Save(configFile) err = qc.Save(configFile)
fatalIf(err.Trace(), "Migrating from version "+cv2.Version+" to "+srvConfig.Version+" failed.", nil) fatalIf(err, "Migrating from version "+cv2.Version+" to "+srvConfig.Version+" failed.", nil)
console.Println("Migration from version " + cv2.Version + " to " + srvConfig.Version + " completed successfully.") console.Println("Migration from version " + cv2.Version + " to " + srvConfig.Version + " completed successfully.")
} }
@ -116,12 +111,10 @@ func migrateV2ToV3() {
// the config for future additions. // the config for future additions.
func migrateV3ToV4() { func migrateV3ToV4() {
cv3, err := loadConfigV3() cv3, err := loadConfigV3()
if err != nil { if err != nil && os.IsNotExist(err) {
if os.IsNotExist(err.ToGoError()) { return
return
}
} }
fatalIf(err.Trace(), "Unable to load config version 3.", nil) fatalIf(err, "Unable to load config version 3.", nil)
if cv3.Version != "3" { if cv3.Version != "3" {
return return
} }
@ -136,12 +129,12 @@ func migrateV3ToV4() {
srvConfig.Logger.Syslog = cv3.Logger.Syslog srvConfig.Logger.Syslog = cv3.Logger.Syslog
qc, err := quick.New(srvConfig) qc, err := quick.New(srvConfig)
fatalIf(err.Trace(), "Unable to initialize the quick config.", nil) fatalIf(err, "Unable to initialize the quick config.", nil)
configFile, err := getConfigFile() configFile, err := getConfigFile()
fatalIf(err.Trace(), "Unable to get config file.", nil) fatalIf(err, "Unable to get config file.", nil)
err = qc.Save(configFile) err = qc.Save(configFile)
fatalIf(err.Trace(), "Migrating from version "+cv3.Version+" to "+srvConfig.Version+" failed.", nil) fatalIf(err, "Migrating from version "+cv3.Version+" to "+srvConfig.Version+" failed.", nil)
console.Println("Migration from version " + cv3.Version + " to " + srvConfig.Version + " completed successfully.") console.Println("Migration from version " + cv3.Version + " to " + srvConfig.Version + " completed successfully.")
} }

View File

@ -4,7 +4,6 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/quick" "github.com/minio/minio/pkg/quick"
) )
@ -16,23 +15,23 @@ type configV1 struct {
} }
// loadConfigV1 load config // loadConfigV1 load config
func loadConfigV1() (*configV1, *probe.Error) { func loadConfigV1() (*configV1, error) {
configPath, err := getConfigPath() configPath, err := getConfigPath()
if err != nil { if err != nil {
return nil, err.Trace() return nil, err
} }
configFile := filepath.Join(configPath, "fsUsers.json") configFile := filepath.Join(configPath, "fsUsers.json")
if _, err := os.Stat(configFile); err != nil { if _, err = os.Stat(configFile); err != nil {
return nil, probe.NewError(err) return nil, err
} }
a := &configV1{} a := &configV1{}
a.Version = "1" a.Version = "1"
qc, err := quick.New(a) qc, err := quick.New(a)
if err != nil { if err != nil {
return nil, err.Trace() return nil, err
} }
if err := qc.Load(configFile); err != nil { if err := qc.Load(configFile); err != nil {
return nil, err.Trace() return nil, err
} }
return qc.Data().(*configV1), nil return qc.Data().(*configV1), nil
} }
@ -60,22 +59,22 @@ type configV2 struct {
} }
// loadConfigV2 load config version '2'. // loadConfigV2 load config version '2'.
func loadConfigV2() (*configV2, *probe.Error) { func loadConfigV2() (*configV2, error) {
configFile, err := getConfigFile() configFile, err := getConfigFile()
if err != nil { if err != nil {
return nil, err.Trace() return nil, err
} }
if _, err := os.Stat(configFile); err != nil { if _, err = os.Stat(configFile); err != nil {
return nil, probe.NewError(err) return nil, err
} }
a := &configV2{} a := &configV2{}
a.Version = "2" a.Version = "2"
qc, err := quick.New(a) qc, err := quick.New(a)
if err != nil { if err != nil {
return nil, err.Trace() return nil, err
} }
if err := qc.Load(configFile); err != nil { if err := qc.Load(configFile); err != nil {
return nil, err.Trace() return nil, err
} }
return qc.Data().(*configV2), nil return qc.Data().(*configV2), nil
} }
@ -127,22 +126,22 @@ type configV3 struct {
} }
// loadConfigV3 load config version '3'. // loadConfigV3 load config version '3'.
func loadConfigV3() (*configV3, *probe.Error) { func loadConfigV3() (*configV3, error) {
configFile, err := getConfigFile() configFile, err := getConfigFile()
if err != nil { if err != nil {
return nil, err.Trace() return nil, err
} }
if _, err := os.Stat(configFile); err != nil { if _, err = os.Stat(configFile); err != nil {
return nil, probe.NewError(err) return nil, err
} }
a := &configV3{} a := &configV3{}
a.Version = "3" a.Version = "3"
qc, err := quick.New(a) qc, err := quick.New(a)
if err != nil { if err != nil {
return nil, err.Trace() return nil, err
} }
if err := qc.Load(configFile); err != nil { if err := qc.Load(configFile); err != nil {
return nil, err.Trace() return nil, err
} }
return qc.Data().(*configV3), nil return qc.Data().(*configV3), nil
} }

View File

@ -20,7 +20,6 @@ import (
"os" "os"
"sync" "sync"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/quick" "github.com/minio/minio/pkg/quick"
) )
@ -40,7 +39,7 @@ type serverConfigV4 struct {
} }
// initConfig - initialize server config. config version (called only once). // initConfig - initialize server config. config version (called only once).
func initConfig() *probe.Error { func initConfig() error {
if !isConfigFileExists() { if !isConfigFileExists() {
srvCfg := &serverConfigV4{} srvCfg := &serverConfigV4{}
srvCfg.Version = globalMinioConfigVersion srvCfg.Version = globalMinioConfigVersion
@ -55,41 +54,37 @@ func initConfig() *probe.Error {
// Create config path. // Create config path.
err := createConfigPath() err := createConfigPath()
if err != nil { if err != nil {
return err.Trace() return err
} }
// Create certs path. // Create certs path.
err = createCertsPath() err = createCertsPath()
if err != nil { if err != nil {
return err.Trace() return err
} }
// Save the new config globally. // Save the new config globally.
serverConfig = srvCfg serverConfig = srvCfg
// Save config into file. // Save config into file.
err = serverConfig.Save() return serverConfig.Save()
if err != nil {
return err.Trace()
}
return nil
} }
configFile, err := getConfigFile() configFile, err := getConfigFile()
if err != nil { if err != nil {
return err.Trace() return err
} }
if _, e := os.Stat(configFile); err != nil { if _, err = os.Stat(configFile); err != nil {
return probe.NewError(e) return err
} }
srvCfg := &serverConfigV4{} srvCfg := &serverConfigV4{}
srvCfg.Version = globalMinioConfigVersion srvCfg.Version = globalMinioConfigVersion
srvCfg.rwMutex = &sync.RWMutex{} srvCfg.rwMutex = &sync.RWMutex{}
qc, err := quick.New(srvCfg) qc, err := quick.New(srvCfg)
if err != nil { if err != nil {
return err.Trace() return err
} }
if err := qc.Load(configFile); err != nil { if err := qc.Load(configFile); err != nil {
return err.Trace() return err
} }
// Save the loaded config globally. // Save the loaded config globally.
serverConfig = qc.Data().(*serverConfigV4) serverConfig = qc.Data().(*serverConfigV4)
@ -181,27 +176,22 @@ func (s serverConfigV4) GetCredential() credential {
} }
// Save config. // Save config.
func (s serverConfigV4) Save() *probe.Error { func (s serverConfigV4) Save() error {
s.rwMutex.RLock() s.rwMutex.RLock()
defer s.rwMutex.RUnlock() defer s.rwMutex.RUnlock()
// get config file. // get config file.
configFile, err := getConfigFile() configFile, err := getConfigFile()
if err != nil { if err != nil {
return err.Trace() return err
} }
// initialize quick. // initialize quick.
qc, err := quick.New(&s) qc, err := quick.New(&s)
if err != nil { if err != nil {
return err.Trace() return err
} }
// Save config file. // Save config file.
if err := qc.Save(configFile); err != nil { return qc.Save(configFile)
return err.Trace()
}
// Return success.
return nil
} }

View File

@ -21,7 +21,6 @@ import (
"path/filepath" "path/filepath"
"github.com/minio/go-homedir" "github.com/minio/go-homedir"
"github.com/minio/minio/pkg/probe"
) )
// configPath for custom config path only for testing purposes // configPath for custom config path only for testing purposes
@ -33,13 +32,13 @@ func setGlobalConfigPath(configPath string) {
} }
// getConfigPath get server config path // getConfigPath get server config path
func getConfigPath() (string, *probe.Error) { func getConfigPath() (string, error) {
if customConfigPath != "" { if customConfigPath != "" {
return customConfigPath, nil return customConfigPath, nil
} }
homeDir, e := homedir.Dir() homeDir, err := homedir.Dir()
if e != nil { if err != nil {
return "", probe.NewError(e) return "", err
} }
configPath := filepath.Join(homeDir, globalMinioConfigDir) configPath := filepath.Join(homeDir, globalMinioConfigDir)
return configPath, nil return configPath, nil
@ -48,27 +47,24 @@ func getConfigPath() (string, *probe.Error) {
// mustGetConfigPath must get server config path. // mustGetConfigPath must get server config path.
func mustGetConfigPath() string { func mustGetConfigPath() string {
configPath, err := getConfigPath() configPath, err := getConfigPath()
fatalIf(err.Trace(), "Unable to get config path.", nil) fatalIf(err, "Unable to get config path.", nil)
return configPath return configPath
} }
// createConfigPath create server config path. // createConfigPath create server config path.
func createConfigPath() *probe.Error { func createConfigPath() error {
configPath, err := getConfigPath() configPath, err := getConfigPath()
if err != nil { if err != nil {
return err.Trace() return err
} }
if err := os.MkdirAll(configPath, 0700); err != nil { return os.MkdirAll(configPath, 0700)
return probe.NewError(err)
}
return nil
} }
// isConfigFileExists - returns true if config file exists. // isConfigFileExists - returns true if config file exists.
func isConfigFileExists() bool { func isConfigFileExists() bool {
st, e := os.Stat(mustGetConfigFile()) st, err := os.Stat(mustGetConfigFile())
// If file exists and is regular return true. // If file exists and is regular return true.
if e == nil && st.Mode().IsRegular() { if err == nil && st.Mode().IsRegular() {
return true return true
} }
return false return false
@ -77,16 +73,16 @@ func isConfigFileExists() bool {
// mustGetConfigFile must get server config file. // mustGetConfigFile must get server config file.
func mustGetConfigFile() string { func mustGetConfigFile() string {
configFile, err := getConfigFile() configFile, err := getConfigFile()
fatalIf(err.Trace(), "Unable to get config file.", nil) fatalIf(err, "Unable to get config file.", nil)
return configFile return configFile
} }
// getConfigFile get server config file. // getConfigFile get server config file.
func getConfigFile() (string, *probe.Error) { func getConfigFile() (string, error) {
configPath, err := getConfigPath() configPath, err := getConfigPath()
if err != nil { if err != nil {
return "", err.Trace() return "", err
} }
return filepath.Join(configPath, globalMinioConfigFile), nil return filepath.Join(configPath, globalMinioConfigFile), nil
} }

View File

@ -27,28 +27,22 @@ import (
"strings" "strings"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/minio/minio/pkg/probe"
"github.com/skyrings/skyring-common/tools/uuid" "github.com/skyrings/skyring-common/tools/uuid"
) )
const (
// Minio meta volume.
minioMetaVolume = ".minio"
)
// listLeafEntries - lists all entries if a given prefixPath is a leaf // listLeafEntries - lists all entries if a given prefixPath is a leaf
// directory, returns error if any - returns empty list if prefixPath // directory, returns error if any - returns empty list if prefixPath
// is not a leaf directory. // is not a leaf directory.
func (o objectAPI) listLeafEntries(prefixPath string) (entries []FileInfo, e error) { func (fs fsObjects) listLeafEntries(prefixPath string) (entries []FileInfo, e error) {
var markerPath string var markerPath string
for { for {
fileInfos, eof, e := o.storage.ListFiles(minioMetaVolume, prefixPath, markerPath, false, 1000) fileInfos, eof, err := fs.storage.ListFiles(minioMetaVolume, prefixPath, markerPath, false, 1000)
if e != nil { if err != nil {
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"prefixPath": prefixPath, "prefixPath": prefixPath,
"markerPath": markerPath, "markerPath": markerPath,
}).Errorf("%s", e) }).Errorf("%s", err)
return nil, e return nil, err
} }
for _, fileInfo := range fileInfos { for _, fileInfo := range fileInfos {
// Set marker for next batch of ListFiles. // Set marker for next batch of ListFiles.
@ -72,7 +66,7 @@ func (o objectAPI) listLeafEntries(prefixPath string) (entries []FileInfo, e err
} }
// listMetaVolumeFiles - list all files at a given prefix inside minioMetaVolume. // listMetaVolumeFiles - list all files at a given prefix inside minioMetaVolume.
func (o objectAPI) listMetaVolumeFiles(prefixPath string, markerPath string, recursive bool, maxKeys int) (allFileInfos []FileInfo, eof bool, e error) { func (fs fsObjects) listMetaVolumeFiles(prefixPath string, markerPath string, recursive bool, maxKeys int) (allFileInfos []FileInfo, eof bool, err error) {
// newMaxKeys tracks the size of entries which are going to be // newMaxKeys tracks the size of entries which are going to be
// returned back. // returned back.
var newMaxKeys int var newMaxKeys int
@ -82,15 +76,15 @@ func (o objectAPI) listMetaVolumeFiles(prefixPath string, markerPath string, rec
for { for {
var fileInfos []FileInfo var fileInfos []FileInfo
// List files up to maxKeys-newMaxKeys, since we are skipping entries for special files. // List files up to maxKeys-newMaxKeys, since we are skipping entries for special files.
fileInfos, eof, e = o.storage.ListFiles(minioMetaVolume, prefixPath, markerPath, recursive, maxKeys-newMaxKeys) fileInfos, eof, err = fs.storage.ListFiles(minioMetaVolume, prefixPath, markerPath, recursive, maxKeys-newMaxKeys)
if e != nil { if err != nil {
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"prefixPath": prefixPath, "prefixPath": prefixPath,
"markerPath": markerPath, "markerPath": markerPath,
"recursive": recursive, "recursive": recursive,
"maxKeys": maxKeys, "maxKeys": maxKeys,
}).Errorf("%s", e) }).Errorf("%s", err)
return nil, true, e return nil, true, err
} }
// Loop through and validate individual file. // Loop through and validate individual file.
for _, fi := range fileInfos { for _, fi := range fileInfos {
@ -99,20 +93,18 @@ func (o objectAPI) listMetaVolumeFiles(prefixPath string, markerPath string, rec
// List all the entries if fi.Name is a leaf directory, if // List all the entries if fi.Name is a leaf directory, if
// fi.Name is not a leaf directory then the resulting // fi.Name is not a leaf directory then the resulting
// entries are empty. // entries are empty.
entries, e = o.listLeafEntries(fi.Name) entries, err = fs.listLeafEntries(fi.Name)
if e != nil { if err != nil {
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"prefixPath": fi.Name, "prefixPath": fi.Name,
}).Errorf("%s", e) }).Errorf("%s", err)
return nil, false, e return nil, false, err
} }
} }
// Set markerPath for next batch of listing. // Set markerPath for next batch of listing.
markerPath = fi.Name markerPath = fi.Name
if len(entries) > 0 { if len(entries) > 0 {
// We reach here for non-recursive case and a leaf entry. // We reach here for non-recursive case and a leaf entry.
for _, entry := range entries { for _, entry := range entries {
allFileInfos = append(allFileInfos, entry) allFileInfos = append(allFileInfos, entry)
newMaxKeys++ newMaxKeys++
@ -121,24 +113,22 @@ func (o objectAPI) listMetaVolumeFiles(prefixPath string, markerPath string, rec
if newMaxKeys == maxKeys { if newMaxKeys == maxKeys {
// Return values: // Return values:
// allFileInfos : "maxKeys" number of entries. // allFileInfos : "maxKeys" number of entries.
// eof : eof returned by o.storage.ListFiles() // eof : eof returned by fs.storage.ListFiles()
// error : nil // error : nil
return return
} }
} }
continue } else {
} // We reach here for a non-recursive case non-leaf entry
// OR recursive case with fi.Name matching pattern bucket/object/uploadID[.partNum.md5sum]
// We reach here for a non-recursive case non-leaf entry if !fi.Mode.IsDir() { // Do not skip non-recursive case directory entries.
// OR recursive case with fi.Name matching pattern bucket/object/uploadID[.partNum.md5sum] // Skip files matching pattern bucket/object/uploadID.partNum.md5sum
// and retain files matching pattern bucket/object/uploadID
if !fi.Mode.IsDir() { // Do not skip non-recursive case directory entries. specialFile := path.Base(fi.Name)
// Skip files matching pattern bucket/object/uploadID.partNum.md5sum if strings.Contains(specialFile, ".") {
// and retain files matching pattern bucket/object/uploadID // Contains partnumber and md5sum info, skip this.
specialFile := path.Base(fi.Name) continue
if strings.Contains(specialFile, ".") { }
// Contains partnumber and md5sum info, skip this.
continue
} }
} }
allFileInfos = append(allFileInfos, fi) allFileInfos = append(allFileInfos, fi)
@ -148,7 +138,7 @@ func (o objectAPI) listMetaVolumeFiles(prefixPath string, markerPath string, rec
if newMaxKeys == maxKeys { if newMaxKeys == maxKeys {
// Return values: // Return values:
// allFileInfos : "maxKeys" number of entries. // allFileInfos : "maxKeys" number of entries.
// eof : eof returned by o.storage.ListFiles() // eof : eof returned by fs.storage.ListFiles()
// error : nil // error : nil
return return
} }
@ -164,41 +154,41 @@ func (o objectAPI) listMetaVolumeFiles(prefixPath string, markerPath string, rec
} }
// ListMultipartUploads - list multipart uploads. // ListMultipartUploads - list multipart uploads.
func (o objectAPI) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, *probe.Error) { func (fs fsObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
result := ListMultipartsInfo{} result := ListMultipartsInfo{}
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return ListMultipartsInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) return ListMultipartsInfo{}, (BucketNameInvalid{Bucket: bucket})
} }
if !IsValidObjectPrefix(prefix) { if !IsValidObjectPrefix(prefix) {
return ListMultipartsInfo{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: prefix}) return ListMultipartsInfo{}, (ObjectNameInvalid{Bucket: bucket, Object: prefix})
} }
// Verify if delimiter is anything other than '/', which we do not support. // Verify if delimiter is anything other than '/', which we do not support.
if delimiter != "" && delimiter != slashSeparator { if delimiter != "" && delimiter != slashSeparator {
return ListMultipartsInfo{}, probe.NewError(UnsupportedDelimiter{ return ListMultipartsInfo{}, (UnsupportedDelimiter{
Delimiter: delimiter, Delimiter: delimiter,
}) })
} }
// Verify if marker has prefix. // Verify if marker has prefix.
if keyMarker != "" && !strings.HasPrefix(keyMarker, prefix) { if keyMarker != "" && !strings.HasPrefix(keyMarker, prefix) {
return ListMultipartsInfo{}, probe.NewError(InvalidMarkerPrefixCombination{ return ListMultipartsInfo{}, (InvalidMarkerPrefixCombination{
Marker: keyMarker, Marker: keyMarker,
Prefix: prefix, Prefix: prefix,
}) })
} }
if uploadIDMarker != "" { if uploadIDMarker != "" {
if strings.HasSuffix(keyMarker, slashSeparator) { if strings.HasSuffix(keyMarker, slashSeparator) {
return result, probe.NewError(InvalidUploadIDKeyCombination{ return result, (InvalidUploadIDKeyCombination{
UploadIDMarker: uploadIDMarker, UploadIDMarker: uploadIDMarker,
KeyMarker: keyMarker, KeyMarker: keyMarker,
}) })
} }
id, e := uuid.Parse(uploadIDMarker) id, err := uuid.Parse(uploadIDMarker)
if e != nil { if err != nil {
return result, probe.NewError(e) return result, err
} }
if id.IsZero() { if id.IsZero() {
return result, probe.NewError(MalformedUploadID{ return result, (MalformedUploadID{
UploadID: uploadIDMarker, UploadID: uploadIDMarker,
}) })
} }
@ -220,15 +210,15 @@ func (o objectAPI) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarke
keyMarkerPath = pathJoin(pathJoin(bucket, keyMarker), uploadIDMarker) keyMarkerPath = pathJoin(pathJoin(bucket, keyMarker), uploadIDMarker)
} }
// List all the multipart files at prefixPath, starting with marker keyMarkerPath. // List all the multipart files at prefixPath, starting with marker keyMarkerPath.
fileInfos, eof, e := o.listMetaVolumeFiles(prefixPath, keyMarkerPath, recursive, maxUploads) fileInfos, eof, err := fs.listMetaVolumeFiles(prefixPath, keyMarkerPath, recursive, maxUploads)
if e != nil { if err != nil {
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"prefixPath": prefixPath, "prefixPath": prefixPath,
"markerPath": keyMarkerPath, "markerPath": keyMarkerPath,
"recursive": recursive, "recursive": recursive,
"maxUploads": maxUploads, "maxUploads": maxUploads,
}).Errorf("listMetaVolumeFiles failed with %s", e) }).Errorf("listMetaVolumeFiles failed with %s", err)
return ListMultipartsInfo{}, probe.NewError(e) return ListMultipartsInfo{}, err
} }
// Loop through all the received files fill in the multiparts result. // Loop through all the received files fill in the multiparts result.
@ -260,55 +250,50 @@ func (o objectAPI) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarke
return result, nil return result, nil
} }
func (o objectAPI) NewMultipartUpload(bucket, object string) (string, *probe.Error) { func (fs fsObjects) NewMultipartUpload(bucket, object string) (string, error) {
// Verify if bucket name is valid. // Verify if bucket name is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return "", probe.NewError(BucketNameInvalid{Bucket: bucket}) return "", (BucketNameInvalid{Bucket: bucket})
} }
// Verify if object name is valid. // Verify if object name is valid.
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return "", probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) return "", ObjectNameInvalid{Bucket: bucket, Object: object}
} }
// Verify whether the bucket exists. // Verify whether the bucket exists.
isExist, err := o.isBucketExist(bucket) if isExist, err := isBucketExist(fs.storage, bucket); err != nil {
if err != nil { return "", err
return "", probe.NewError(err) } else if !isExist {
} return "", BucketNotFound{Bucket: bucket}
if !isExist {
return "", probe.NewError(BucketNotFound{Bucket: bucket})
} }
if _, e := o.storage.StatVol(minioMetaVolume); e != nil { if _, err := fs.storage.StatVol(minioMetaVolume); err != nil {
if e == errVolumeNotFound { if err == errVolumeNotFound {
e = o.storage.MakeVol(minioMetaVolume) err = fs.storage.MakeVol(minioMetaVolume)
if e != nil { if err != nil {
if e == errDiskFull { return "", toObjectErr(err)
return "", probe.NewError(StorageFull{})
}
return "", probe.NewError(e)
} }
} }
} }
for { for {
uuid, e := uuid.New() uuid, err := uuid.New()
if e != nil { if err != nil {
return "", probe.NewError(e) return "", err
} }
uploadID := uuid.String() uploadID := uuid.String()
uploadIDPath := path.Join(bucket, object, uploadID) uploadIDPath := path.Join(bucket, object, uploadID)
if _, e = o.storage.StatFile(minioMetaVolume, uploadIDPath); e != nil { if _, err = fs.storage.StatFile(minioMetaVolume, uploadIDPath); err != nil {
if e != errFileNotFound { if err != errFileNotFound {
return "", probe.NewError(toObjectErr(e, minioMetaVolume, uploadIDPath)) return "", (toObjectErr(err, minioMetaVolume, uploadIDPath))
} }
// uploadIDPath doesn't exist, so create empty file to reserve the name // uploadIDPath doesn't exist, so create empty file to reserve the name
var w io.WriteCloser var w io.WriteCloser
if w, e = o.storage.CreateFile(minioMetaVolume, uploadIDPath); e == nil { if w, err = fs.storage.CreateFile(minioMetaVolume, uploadIDPath); err == nil {
// Close the writer. // Close the writer.
if e = w.Close(); e != nil { if err = w.Close(); err != nil {
return "", probe.NewError(e) return "", err
} }
} else { } else {
return "", probe.NewError(toObjectErr(e, minioMetaVolume, uploadIDPath)) return "", toObjectErr(err, minioMetaVolume, uploadIDPath)
} }
return uploadID, nil return uploadID, nil
} }
@ -318,48 +303,46 @@ func (o objectAPI) NewMultipartUpload(bucket, object string) (string, *probe.Err
} }
// isUploadIDExists - verify if a given uploadID exists and is valid. // isUploadIDExists - verify if a given uploadID exists and is valid.
func (o objectAPI) isUploadIDExists(bucket, object, uploadID string) (bool, error) { func isUploadIDExists(storage StorageAPI, bucket, object, uploadID string) (bool, error) {
uploadIDPath := path.Join(bucket, object, uploadID) uploadIDPath := path.Join(bucket, object, uploadID)
st, e := o.storage.StatFile(minioMetaVolume, uploadIDPath) st, err := storage.StatFile(minioMetaVolume, uploadIDPath)
if e != nil { if err != nil {
// Upload id does not exist. // Upload id does not exist.
if e == errFileNotFound { if err == errFileNotFound {
return false, nil return false, nil
} }
return false, e return false, err
} }
// Upload id exists and is a regular file. // Upload id exists and is a regular file.
return st.Mode.IsRegular(), nil return st.Mode.IsRegular(), nil
} }
// PutObjectPart - writes the multipart upload chunks. // PutObjectPart - writes the multipart upload chunks.
func (o objectAPI) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error) { func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, error) {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return "", probe.NewError(BucketNameInvalid{Bucket: bucket}) return "", BucketNameInvalid{Bucket: bucket}
} }
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return "", probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) return "", ObjectNameInvalid{Bucket: bucket, Object: object}
} }
// Verify whether the bucket exists. // Verify whether the bucket exists.
isExist, err := o.isBucketExist(bucket) if isExist, err := isBucketExist(fs.storage, bucket); err != nil {
if err != nil { return "", err
return "", probe.NewError(err) } else if !isExist {
} return "", BucketNotFound{Bucket: bucket}
if !isExist {
return "", probe.NewError(BucketNotFound{Bucket: bucket})
} }
if status, e := o.isUploadIDExists(bucket, object, uploadID); e != nil { if status, err := isUploadIDExists(fs.storage, bucket, object, uploadID); err != nil {
return "", probe.NewError(e) return "", err
} else if !status { } else if !status {
return "", probe.NewError(InvalidUploadID{UploadID: uploadID}) return "", InvalidUploadID{UploadID: uploadID}
} }
partSuffix := fmt.Sprintf("%s.%d.%s", uploadID, partID, md5Hex) partSuffix := fmt.Sprintf("%s.%d.%s", uploadID, partID, md5Hex)
fileWriter, e := o.storage.CreateFile(minioMetaVolume, path.Join(bucket, object, partSuffix)) fileWriter, err := fs.storage.CreateFile(minioMetaVolume, path.Join(bucket, object, partSuffix))
if e != nil { if err != nil {
return "", probe.NewError(toObjectErr(e, bucket, object)) return "", toObjectErr(err, bucket, object)
} }
// Initialize md5 writer. // Initialize md5 writer.
@ -370,21 +353,21 @@ func (o objectAPI) PutObjectPart(bucket, object, uploadID string, partID int, si
// Instantiate checksum hashers and create a multiwriter. // Instantiate checksum hashers and create a multiwriter.
if size > 0 { if size > 0 {
if _, e = io.CopyN(multiWriter, data, size); e != nil { if _, err = io.CopyN(multiWriter, data, size); err != nil {
safeCloseAndRemove(fileWriter) safeCloseAndRemove(fileWriter)
return "", probe.NewError(toObjectErr(e)) return "", (toObjectErr(err))
} }
// Reader shouldn't have more data what mentioned in size argument. // Reader shouldn't have more data what mentioned in size argument.
// reading one more byte from the reader to validate it. // reading one more byte from the reader to validate it.
// expected to fail, success validates existence of more data in the reader. // expected to fail, success validates existence of more data in the reader.
if _, e = io.CopyN(ioutil.Discard, data, 1); e == nil { if _, err = io.CopyN(ioutil.Discard, data, 1); err == nil {
safeCloseAndRemove(fileWriter) safeCloseAndRemove(fileWriter)
return "", probe.NewError(UnExpectedDataSize{Size: int(size)}) return "", (UnExpectedDataSize{Size: int(size)})
} }
} else { } else {
if _, e = io.Copy(multiWriter, data); e != nil { if _, err = io.Copy(multiWriter, data); err != nil {
safeCloseAndRemove(fileWriter) safeCloseAndRemove(fileWriter)
return "", probe.NewError(toObjectErr(e)) return "", (toObjectErr(err))
} }
} }
@ -392,28 +375,28 @@ func (o objectAPI) PutObjectPart(bucket, object, uploadID string, partID int, si
if md5Hex != "" { if md5Hex != "" {
if newMD5Hex != md5Hex { if newMD5Hex != md5Hex {
safeCloseAndRemove(fileWriter) safeCloseAndRemove(fileWriter)
return "", probe.NewError(BadDigest{md5Hex, newMD5Hex}) return "", (BadDigest{md5Hex, newMD5Hex})
} }
} }
e = fileWriter.Close() err = fileWriter.Close()
if e != nil { if err != nil {
return "", probe.NewError(e) return "", err
} }
return newMD5Hex, nil return newMD5Hex, nil
} }
func (o objectAPI) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, *probe.Error) { func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return ListPartsInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) return ListPartsInfo{}, (BucketNameInvalid{Bucket: bucket})
} }
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return ListPartsInfo{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) return ListPartsInfo{}, (ObjectNameInvalid{Bucket: bucket, Object: object})
} }
if status, e := o.isUploadIDExists(bucket, object, uploadID); e != nil { if status, err := isUploadIDExists(fs.storage, bucket, object, uploadID); err != nil {
return ListPartsInfo{}, probe.NewError(e) return ListPartsInfo{}, err
} else if !status { } else if !status {
return ListPartsInfo{}, probe.NewError(InvalidUploadID{UploadID: uploadID}) return ListPartsInfo{}, (InvalidUploadID{UploadID: uploadID})
} }
result := ListPartsInfo{} result := ListPartsInfo{}
var markerPath string var markerPath string
@ -423,26 +406,26 @@ func (o objectAPI) ListObjectParts(bucket, object, uploadID string, partNumberMa
// partNumberMarker is already set. // partNumberMarker is already set.
if partNumberMarker > 0 { if partNumberMarker > 0 {
partNumberMarkerPath := uploadIDPath + "." + strconv.Itoa(partNumberMarker) + "." partNumberMarkerPath := uploadIDPath + "." + strconv.Itoa(partNumberMarker) + "."
fileInfos, _, e := o.storage.ListFiles(minioMetaVolume, partNumberMarkerPath, "", false, 1) fileInfos, _, err := fs.storage.ListFiles(minioMetaVolume, partNumberMarkerPath, "", false, 1)
if e != nil { if err != nil {
return result, probe.NewError(toObjectErr(e, minioMetaVolume, partNumberMarkerPath)) return result, toObjectErr(err, minioMetaVolume, partNumberMarkerPath)
} }
if len(fileInfos) == 0 { if len(fileInfos) == 0 {
return result, probe.NewError(InvalidPart{}) return result, (InvalidPart{})
} }
markerPath = fileInfos[0].Name markerPath = fileInfos[0].Name
} }
uploadIDPrefix := uploadIDPath + "." uploadIDPrefix := uploadIDPath + "."
fileInfos, eof, e := o.storage.ListFiles(minioMetaVolume, uploadIDPrefix, markerPath, false, maxParts) fileInfos, eof, err := fs.storage.ListFiles(minioMetaVolume, uploadIDPrefix, markerPath, false, maxParts)
if e != nil { if err != nil {
return result, probe.NewError(InvalidPart{}) return result, InvalidPart{}
} }
for _, fileInfo := range fileInfos { for _, fileInfo := range fileInfos {
fileName := path.Base(fileInfo.Name) fileName := path.Base(fileInfo.Name)
splitResult := strings.Split(fileName, ".") splitResult := strings.Split(fileName, ".")
partNum, e := strconv.Atoi(splitResult[1]) partNum, err := strconv.Atoi(splitResult[1])
if e != nil { if err != nil {
return result, probe.NewError(e) return result, err
} }
md5sum := splitResult[2] md5sum := splitResult[2]
result.Parts = append(result.Parts, partInfo{ result.Parts = append(result.Parts, partInfo{
@ -463,86 +446,90 @@ func (o objectAPI) ListObjectParts(bucket, object, uploadID string, partNumberMa
return result, nil return result, nil
} }
// Create an s3 compatible MD5sum for complete multipart transaction. func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (string, error) {
func makeS3MD5(md5Strs ...string) (string, *probe.Error) {
var finalMD5Bytes []byte
for _, md5Str := range md5Strs {
md5Bytes, e := hex.DecodeString(md5Str)
if e != nil {
return "", probe.NewError(e)
}
finalMD5Bytes = append(finalMD5Bytes, md5Bytes...)
}
md5Hasher := md5.New()
md5Hasher.Write(finalMD5Bytes)
s3MD5 := fmt.Sprintf("%s-%d", hex.EncodeToString(md5Hasher.Sum(nil)), len(md5Strs))
return s3MD5, nil
}
func (o objectAPI) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (string, *probe.Error) {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return "", probe.NewError(BucketNameInvalid{Bucket: bucket}) return "", (BucketNameInvalid{Bucket: bucket})
} }
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return "", probe.NewError(ObjectNameInvalid{ return "", (ObjectNameInvalid{
Bucket: bucket, Bucket: bucket,
Object: object, Object: object,
}) })
} }
if status, e := o.isUploadIDExists(bucket, object, uploadID); e != nil { if status, err := isUploadIDExists(fs.storage, bucket, object, uploadID); err != nil {
return "", probe.NewError(e) return "", err
} else if !status { } else if !status {
return "", probe.NewError(InvalidUploadID{UploadID: uploadID}) return "", (InvalidUploadID{UploadID: uploadID})
}
fileWriter, err := fs.storage.CreateFile(bucket, object)
if err != nil {
return "", toObjectErr(err, bucket, object)
} }
var md5Sums []string var md5Sums []string
for _, part := range parts { for _, part := range parts {
// Construct part suffix. // Construct part suffix.
partSuffix := fmt.Sprintf("%s.%d.%s", uploadID, part.PartNumber, part.ETag) partSuffix := fmt.Sprintf("%s.%d.%s", uploadID, part.PartNumber, part.ETag)
e := o.storage.RenameFile(minioMetaVolume, path.Join(bucket, object, partSuffix), bucket, path.Join(object, fmt.Sprint(part.PartNumber))) var fileReader io.ReadCloser
if e != nil { fileReader, err = fs.storage.ReadFile(minioMetaVolume, path.Join(bucket, object, partSuffix), 0)
return "", probe.NewError(e) if err != nil {
if err == errFileNotFound {
return "", (InvalidPart{})
}
return "", err
}
_, err = io.Copy(fileWriter, fileReader)
if err != nil {
return "", err
}
err = fileReader.Close()
if err != nil {
return "", err
} }
md5Sums = append(md5Sums, part.ETag) md5Sums = append(md5Sums, part.ETag)
} }
fileWriter, e := o.storage.CreateFile(bucket, path.Join(object, "multipart.json"))
if e != nil { err = fileWriter.Close()
return "", probe.NewError(e) if err != nil {
return "", err
} }
fileWriter.Close()
// Save the s3 md5. // Save the s3 md5.
s3MD5, err := makeS3MD5(md5Sums...) s3MD5, err := makeS3MD5(md5Sums...)
if err != nil { if err != nil {
return "", err.Trace(md5Sums...) return "", err
} }
// Cleanup all the parts. // Cleanup all the parts.
// o.removeMultipartUpload(bucket, object, uploadID) fs.removeMultipartUpload(bucket, object, uploadID)
// Return md5sum. // Return md5sum.
return s3MD5, nil return s3MD5, nil
} }
func (o objectAPI) removeMultipartUpload(bucket, object, uploadID string) *probe.Error { func (fs fsObjects) removeMultipartUpload(bucket, object, uploadID string) error {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return probe.NewError(BucketNameInvalid{Bucket: bucket}) return (BucketNameInvalid{Bucket: bucket})
} }
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) return (ObjectNameInvalid{Bucket: bucket, Object: object})
} }
marker := "" marker := ""
for { for {
uploadIDPath := path.Join(bucket, object, uploadID) uploadIDPath := path.Join(bucket, object, uploadID)
fileInfos, eof, e := o.storage.ListFiles(minioMetaVolume, uploadIDPath, marker, false, 1000) fileInfos, eof, err := fs.storage.ListFiles(minioMetaVolume, uploadIDPath, marker, false, 1000)
if e != nil { if err != nil {
if err == errFileNotFound {
return probe.NewError(InvalidUploadID{UploadID: uploadID}) return (InvalidUploadID{UploadID: uploadID})
}
return toObjectErr(err)
} }
for _, fileInfo := range fileInfos { for _, fileInfo := range fileInfos {
o.storage.DeleteFile(minioMetaVolume, fileInfo.Name) fs.storage.DeleteFile(minioMetaVolume, fileInfo.Name)
marker = fileInfo.Name marker = fileInfo.Name
} }
if eof { if eof {
@ -552,22 +539,18 @@ func (o objectAPI) removeMultipartUpload(bucket, object, uploadID string) *probe
return nil return nil
} }
func (o objectAPI) AbortMultipartUpload(bucket, object, uploadID string) *probe.Error { func (fs fsObjects) AbortMultipartUpload(bucket, object, uploadID string) error {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return probe.NewError(BucketNameInvalid{Bucket: bucket}) return (BucketNameInvalid{Bucket: bucket})
} }
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) return (ObjectNameInvalid{Bucket: bucket, Object: object})
} }
if status, e := o.isUploadIDExists(bucket, object, uploadID); e != nil { if status, err := isUploadIDExists(fs.storage, bucket, object, uploadID); err != nil {
return probe.NewError(e) return err
} else if !status { } else if !status {
return probe.NewError(InvalidUploadID{UploadID: uploadID}) return (InvalidUploadID{UploadID: uploadID})
} }
err := o.removeMultipartUpload(bucket, object, uploadID) return fs.removeMultipartUpload(bucket, object, uploadID)
if err != nil {
return err.Trace(bucket, object, uploadID)
}
return nil
} }

333
fs-objects.go Normal file
View File

@ -0,0 +1,333 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"crypto/md5"
"encoding/hex"
"io"
"path/filepath"
"sort"
"strings"
"github.com/minio/minio/pkg/mimedb"
)
// fsObjects - Implements fs object layer.
type fsObjects struct {
storage StorageAPI
}
// newFSObjects - initialize new fs object layer.
func newFSObjects(exportPath string) (ObjectLayer, error) {
var storage StorageAPI
var err error
if !strings.ContainsRune(exportPath, ':') || filepath.VolumeName(exportPath) != "" {
// Initialize filesystem storage API.
storage, err = newPosix(exportPath)
if err != nil {
return nil, err
}
} else {
// Initialize rpc client storage API.
storage, err = newRPCClient(exportPath)
if err != nil {
return nil, err
}
}
return fsObjects{storage}, nil
}
// checks whether bucket exists.
func isBucketExist(storage StorageAPI, bucketName string) (bool, error) {
// Check whether bucket exists.
if _, e := storage.StatVol(bucketName); e != nil {
if e == errVolumeNotFound {
return false, nil
}
return false, e
}
return true, nil
}
/// Bucket operations
// MakeBucket - make a bucket.
func (fs fsObjects) MakeBucket(bucket string) error {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return BucketNameInvalid{Bucket: bucket}
}
if err := fs.storage.MakeVol(bucket); err != nil {
return toObjectErr(err, bucket)
}
// This happens for the first time, but keep this here since this
// is the only place where it can be made expensive optimizing all
// other calls.
// Create minio meta volume, if it doesn't exist yet.
if err := fs.storage.MakeVol(minioMetaVolume); err != nil {
if err != errVolumeExists {
return toObjectErr(err, minioMetaVolume)
}
}
return nil
}
// GetBucketInfo - get bucket info.
func (fs fsObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return BucketInfo{}, BucketNameInvalid{Bucket: bucket}
}
vi, err := fs.storage.StatVol(bucket)
if err != nil {
return BucketInfo{}, toObjectErr(err, bucket)
}
return BucketInfo{
Name: bucket,
Created: vi.Created,
Total: vi.Total,
Free: vi.Free,
}, nil
}
// ListBuckets - list buckets.
func (fs fsObjects) ListBuckets() ([]BucketInfo, error) {
var bucketInfos []BucketInfo
vols, err := fs.storage.ListVols()
if err != nil {
return nil, toObjectErr(err)
}
for _, vol := range vols {
// StorageAPI can send volume names which are incompatible
// with buckets, handle it and skip them.
if !IsValidBucketName(vol.Name) {
continue
}
bucketInfos = append(bucketInfos, BucketInfo{
Name: vol.Name,
Created: vol.Created,
Total: vol.Total,
Free: vol.Free,
})
}
sort.Sort(byBucketName(bucketInfos))
return bucketInfos, nil
}
// DeleteBucket - delete a bucket.
func (fs fsObjects) DeleteBucket(bucket string) error {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return BucketNameInvalid{Bucket: bucket}
}
if err := fs.storage.DeleteVol(bucket); err != nil {
return toObjectErr(err)
}
return nil
}
/// Object Operations
// GetObject - get an object.
func (fs fsObjects) GetObject(bucket, object string, startOffset int64) (io.ReadCloser, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return nil, (BucketNameInvalid{Bucket: bucket})
}
// Verify if object is valid.
if !IsValidObjectName(object) {
return nil, (ObjectNameInvalid{Bucket: bucket, Object: object})
}
fileReader, err := fs.storage.ReadFile(bucket, object, startOffset)
if err != nil {
return nil, toObjectErr(err, bucket, object)
}
return fileReader, nil
}
// GetObjectInfo - get object info.
func (fs fsObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return ObjectInfo{}, (BucketNameInvalid{Bucket: bucket})
}
// Verify if object is valid.
if !IsValidObjectName(object) {
return ObjectInfo{}, (ObjectNameInvalid{Bucket: bucket, Object: object})
}
fi, err := fs.storage.StatFile(bucket, object)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
contentType := "application/octet-stream"
if objectExt := filepath.Ext(object); objectExt != "" {
content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))]
if ok {
contentType = content.ContentType
}
}
return ObjectInfo{
Bucket: bucket,
Name: object,
ModTime: fi.ModTime,
Size: fi.Size,
IsDir: fi.Mode.IsDir(),
ContentType: contentType,
MD5Sum: "", // Read from metadata.
}, nil
}
func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (string, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return "", (BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(object) {
return "", (ObjectNameInvalid{
Bucket: bucket,
Object: object,
})
}
// Check whether the bucket exists.
if isExist, err := isBucketExist(fs.storage, bucket); err != nil {
return "", err
} else if !isExist {
return "", BucketNotFound{Bucket: bucket}
}
fileWriter, err := fs.storage.CreateFile(bucket, object)
if err != nil {
return "", toObjectErr(err, bucket, object)
}
// Initialize md5 writer.
md5Writer := md5.New()
// Instantiate a new multi writer.
multiWriter := io.MultiWriter(md5Writer, fileWriter)
// Instantiate checksum hashers and create a multiwriter.
if size > 0 {
if _, err = io.CopyN(multiWriter, data, size); err != nil {
if clErr := safeCloseAndRemove(fileWriter); clErr != nil {
return "", clErr
}
return "", toObjectErr(err)
}
} else {
if _, err = io.Copy(multiWriter, data); err != nil {
if clErr := safeCloseAndRemove(fileWriter); clErr != nil {
return "", clErr
}
return "", err
}
}
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
// md5Hex representation.
var md5Hex string
if len(metadata) != 0 {
md5Hex = metadata["md5Sum"]
}
if md5Hex != "" {
if newMD5Hex != md5Hex {
if err = safeCloseAndRemove(fileWriter); err != nil {
return "", err
}
return "", BadDigest{md5Hex, newMD5Hex}
}
}
err = fileWriter.Close()
if err != nil {
return "", err
}
// Return md5sum, successfully wrote object.
return newMD5Hex, nil
}
func (fs fsObjects) DeleteObject(bucket, object string) error {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return BucketNameInvalid{Bucket: bucket}
}
if !IsValidObjectName(object) {
return ObjectNameInvalid{Bucket: bucket, Object: object}
}
if err := fs.storage.DeleteFile(bucket, object); err != nil {
return toObjectErr(err, bucket, object)
}
return nil
}
func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return ListObjectsInfo{}, BucketNameInvalid{Bucket: bucket}
}
if !IsValidObjectPrefix(prefix) {
return ListObjectsInfo{}, ObjectNameInvalid{Bucket: bucket, Object: prefix}
}
// Verify if delimiter is anything other than '/', which we do not support.
if delimiter != "" && delimiter != slashSeparator {
return ListObjectsInfo{}, UnsupportedDelimiter{
Delimiter: delimiter,
}
}
// Verify if marker has prefix.
if marker != "" {
if !strings.HasPrefix(marker, prefix) {
return ListObjectsInfo{}, InvalidMarkerPrefixCombination{
Marker: marker,
Prefix: prefix,
}
}
}
// Default is recursive, if delimiter is set then list non recursive.
recursive := true
if delimiter == slashSeparator {
recursive = false
}
fileInfos, eof, err := fs.storage.ListFiles(bucket, prefix, marker, recursive, maxKeys)
if err != nil {
return ListObjectsInfo{}, toObjectErr(err, bucket)
}
if maxKeys == 0 {
return ListObjectsInfo{}, nil
}
result := ListObjectsInfo{IsTruncated: !eof}
for _, fileInfo := range fileInfos {
// With delimiter set we fill in NextMarker and Prefixes.
if delimiter == slashSeparator {
result.NextMarker = fileInfo.Name
if fileInfo.Mode.IsDir() {
result.Prefixes = append(result.Prefixes, fileInfo.Name)
continue
}
}
result.Objects = append(result.Objects, ObjectInfo{
Name: fileInfo.Name,
ModTime: fileInfo.ModTime,
Size: fileInfo.Size,
IsDir: false,
})
}
return result, nil
}

View File

@ -21,8 +21,6 @@ import (
"fmt" "fmt"
"strconv" "strconv"
"strings" "strings"
"github.com/minio/minio/pkg/probe"
) )
const ( const (
@ -50,7 +48,7 @@ func (r *httpRange) String() string {
} }
// Grab new range from request header // Grab new range from request header
func getRequestedRange(hrange string, size int64) (*httpRange, *probe.Error) { func getRequestedRange(hrange string, size int64) (*httpRange, error) {
r := &httpRange{ r := &httpRange{
start: 0, start: 0,
length: 0, length: 0,
@ -60,16 +58,16 @@ func getRequestedRange(hrange string, size int64) (*httpRange, *probe.Error) {
if hrange != "" { if hrange != "" {
err := r.parseRange(hrange) err := r.parseRange(hrange)
if err != nil { if err != nil {
return nil, err.Trace() return nil, err
} }
} }
return r, nil return r, nil
} }
func (r *httpRange) parse(ra string) *probe.Error { func (r *httpRange) parse(ra string) error {
i := strings.Index(ra, "-") i := strings.Index(ra, "-")
if i < 0 { if i < 0 {
return probe.NewError(InvalidRange{}) return InvalidRange{}
} }
start, end := strings.TrimSpace(ra[:i]), strings.TrimSpace(ra[i+1:]) start, end := strings.TrimSpace(ra[:i]), strings.TrimSpace(ra[i+1:])
if start == "" { if start == "" {
@ -77,7 +75,7 @@ func (r *httpRange) parse(ra string) *probe.Error {
// range start relative to the end of the file. // range start relative to the end of the file.
i, err := strconv.ParseInt(end, 10, 64) i, err := strconv.ParseInt(end, 10, 64)
if err != nil { if err != nil {
return probe.NewError(InvalidRange{}) return InvalidRange{}
} }
if i > r.size { if i > r.size {
i = r.size i = r.size
@ -87,7 +85,7 @@ func (r *httpRange) parse(ra string) *probe.Error {
} else { } else {
i, err := strconv.ParseInt(start, 10, 64) i, err := strconv.ParseInt(start, 10, 64)
if err != nil || i > r.size || i < 0 { if err != nil || i > r.size || i < 0 {
return probe.NewError(InvalidRange{}) return InvalidRange{}
} }
r.start = i r.start = i
if end == "" { if end == "" {
@ -96,7 +94,7 @@ func (r *httpRange) parse(ra string) *probe.Error {
} else { } else {
i, err := strconv.ParseInt(end, 10, 64) i, err := strconv.ParseInt(end, 10, 64)
if err != nil || r.start > i { if err != nil || r.start > i {
return probe.NewError(InvalidRange{}) return InvalidRange{}
} }
if i >= r.size { if i >= r.size {
i = r.size - 1 i = r.size - 1
@ -108,26 +106,26 @@ func (r *httpRange) parse(ra string) *probe.Error {
} }
// parseRange parses a Range header string as per RFC 2616. // parseRange parses a Range header string as per RFC 2616.
func (r *httpRange) parseRange(s string) *probe.Error { func (r *httpRange) parseRange(s string) error {
if s == "" { if s == "" {
return probe.NewError(errors.New("header not present")) return errors.New("header not present")
} }
if !strings.HasPrefix(s, b) { if !strings.HasPrefix(s, b) {
return probe.NewError(InvalidRange{}) return InvalidRange{}
} }
ras := strings.Split(s[len(b):], ",") ras := strings.Split(s[len(b):], ",")
if len(ras) == 0 { if len(ras) == 0 {
return probe.NewError(errors.New("invalid request")) return errors.New("invalid request")
} }
// Just pick the first one and ignore the rest, we only support one range per object // Just pick the first one and ignore the rest, we only support one range per object
if len(ras) > 1 { if len(ras) > 1 {
return probe.NewError(errors.New("multiple ranges specified")) return errors.New("multiple ranges specified")
} }
ra := strings.TrimSpace(ras[0]) ra := strings.TrimSpace(ras[0])
if ra == "" { if ra == "" {
return probe.NewError(InvalidRange{}) return InvalidRange{}
} }
return r.parse(ra) return r.parse(ra)
} }

View File

@ -20,7 +20,6 @@ import (
"io/ioutil" "io/ioutil"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/minio/minio/pkg/probe"
) )
// consoleLogger - default logger if not other logging is enabled. // consoleLogger - default logger if not other logging is enabled.
@ -39,8 +38,8 @@ func enableConsoleLogger() {
} }
// log.Out and log.Formatter use the default versions. // log.Out and log.Formatter use the default versions.
// Only set specific log level. // Only set specific log level.
lvl, e := logrus.ParseLevel(clogger.Level) lvl, err := logrus.ParseLevel(clogger.Level)
fatalIf(probe.NewError(e), "Unknown log level detected, please fix your console logger configuration.", nil) fatalIf(err, "Unknown log level detected, please fix your console logger configuration.", nil)
log.Level = lvl log.Level = lvl
} }

View File

@ -21,7 +21,6 @@ import (
"os" "os"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/minio/minio/pkg/probe"
) )
type fileLogger struct { type fileLogger struct {
@ -40,14 +39,14 @@ func enableFileLogger() {
return return
} }
file, e := os.OpenFile(flogger.Filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) file, err := os.OpenFile(flogger.Filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
fatalIf(probe.NewError(e), "Unable to open log file.", nil) fatalIf(err, "Unable to open log file.", nil)
// Add a local file hook. // Add a local file hook.
log.Hooks.Add(&localFile{file}) log.Hooks.Add(&localFile{file})
lvl, e := logrus.ParseLevel(flogger.Level) lvl, err := logrus.ParseLevel(flogger.Level)
fatalIf(probe.NewError(e), "Unknown log level detected, please fix your console logger configuration.", nil) fatalIf(err, "Unknown log level detected, please fix your console logger configuration.", nil)
// Set default JSON formatter. // Set default JSON formatter.
log.Formatter = new(logrus.JSONFormatter) log.Formatter = new(logrus.JSONFormatter)

View File

@ -23,7 +23,6 @@ import (
"log/syslog" "log/syslog"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/minio/minio/pkg/probe"
) )
type syslogLogger struct { type syslogLogger struct {
@ -41,8 +40,8 @@ type syslogHook struct {
// enableSyslogLogger - enable logger at raddr. // enableSyslogLogger - enable logger at raddr.
func enableSyslogLogger(raddr string) { func enableSyslogLogger(raddr string) {
syslogHook, e := newSyslog("udp", raddr, syslog.LOG_ERR, "MINIO") syslogHook, err := newSyslog("udp", raddr, syslog.LOG_ERR, "MINIO")
fatalIf(probe.NewError(e), "Unable to instantiate syslog.", nil) fatalIf(err, "Unable to instantiate syslog.", nil)
log.Hooks.Add(syslogHook) // Add syslog hook. log.Hooks.Add(syslogHook) // Add syslog hook.
log.Formatter = &logrus.JSONFormatter{} // JSON formatted log. log.Formatter = &logrus.JSONFormatter{} // JSON formatted log.
@ -51,15 +50,15 @@ func enableSyslogLogger(raddr string) {
// newSyslog - Creates a hook to be added to an instance of logger. // newSyslog - Creates a hook to be added to an instance of logger.
func newSyslog(network, raddr string, priority syslog.Priority, tag string) (*syslogHook, error) { func newSyslog(network, raddr string, priority syslog.Priority, tag string) (*syslogHook, error) {
w, e := syslog.Dial(network, raddr, priority, tag) w, err := syslog.Dial(network, raddr, priority, tag)
return &syslogHook{w, network, raddr}, e return &syslogHook{w, network, raddr}, err
} }
// Fire - fire the log event // Fire - fire the log event
func (hook *syslogHook) Fire(entry *logrus.Entry) error { func (hook *syslogHook) Fire(entry *logrus.Entry) error {
line, e := entry.String() line, err := entry.String()
if e != nil { if err != nil {
return fmt.Errorf("Unable to read entry, %v", e) return fmt.Errorf("Unable to read entry, %v", err)
} }
switch entry.Level { switch entry.Level {
case logrus.PanicLevel: case logrus.PanicLevel:

View File

@ -18,8 +18,6 @@
package main package main
import "github.com/minio/minio/pkg/probe"
type syslogLogger struct { type syslogLogger struct {
Enable bool `json:"enable"` Enable bool `json:"enable"`
Addr string `json:"address"` Addr string `json:"address"`
@ -28,5 +26,5 @@ type syslogLogger struct {
// enableSyslogLogger - unsupported on windows. // enableSyslogLogger - unsupported on windows.
func enableSyslogLogger(raddr string) { func enableSyslogLogger(raddr string) {
fatalIf(probe.NewError(errSyslogNotSupported), "Unable to enable syslog.", nil) fatalIf(errSyslogNotSupported, "Unable to enable syslog.", nil)
} }

View File

@ -17,11 +17,9 @@
package main package main
import ( import (
"encoding/json"
"reflect" "reflect"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/minio/minio/pkg/probe"
) )
type fields map[string]interface{} type fields map[string]interface{}
@ -43,7 +41,7 @@ type logger struct {
} }
// errorIf synonymous with fatalIf but doesn't exit on error != nil // errorIf synonymous with fatalIf but doesn't exit on error != nil
func errorIf(err *probe.Error, msg string, fields logrus.Fields) { func errorIf(err error, msg string, fields logrus.Fields) {
if err == nil { if err == nil {
return return
} }
@ -51,21 +49,17 @@ func errorIf(err *probe.Error, msg string, fields logrus.Fields) {
fields = make(logrus.Fields) fields = make(logrus.Fields)
} }
fields["Error"] = struct { fields["Error"] = struct {
Cause string `json:"cause,omitempty"` Cause string `json:"cause,omitempty"`
Type string `json:"type,omitempty"` Type string `json:"type,omitempty"`
CallTrace []probe.TracePoint `json:"trace,omitempty"`
SysInfo map[string]string `json:"sysinfo,omitempty"`
}{ }{
err.Cause.Error(), err.Error(),
reflect.TypeOf(err.Cause).String(), reflect.TypeOf(err).String(),
err.CallTrace,
err.SysInfo,
} }
log.WithFields(fields).Error(msg) log.WithFields(fields).Error(msg)
} }
// fatalIf wrapper function which takes error and prints jsonic error messages. // fatalIf wrapper function which takes error and prints jsonic error messages.
func fatalIf(err *probe.Error, msg string, fields logrus.Fields) { func fatalIf(err error, msg string, fields logrus.Fields) {
if err == nil { if err == nil {
return return
} }
@ -73,9 +67,12 @@ func fatalIf(err *probe.Error, msg string, fields logrus.Fields) {
fields = make(logrus.Fields) fields = make(logrus.Fields)
} }
fields["error"] = err.ToGoError() fields["Error"] = struct {
if jsonErr, e := json.Marshal(err); e == nil { Cause string `json:"cause,omitempty"`
fields["probe"] = string(jsonErr) Type string `json:"type,omitempty"`
}{
err.Error(),
reflect.TypeOf(err).String(),
} }
log.WithFields(fields).Fatal(msg) log.WithFields(fields).Fatal(msg)
} }

View File

@ -22,7 +22,6 @@ import (
"errors" "errors"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/minio/minio/pkg/probe"
. "gopkg.in/check.v1" . "gopkg.in/check.v1"
) )
@ -37,7 +36,7 @@ func (s *LoggerSuite) TestLogger(c *C) {
log.Out = &buffer log.Out = &buffer
log.Formatter = new(logrus.JSONFormatter) log.Formatter = new(logrus.JSONFormatter)
errorIf(probe.NewError(errors.New("Fake error")), "Failed with error.", nil) errorIf(errors.New("Fake error"), "Failed with error.", nil)
err := json.Unmarshal(buffer.Bytes(), &fields) err := json.Unmarshal(buffer.Bytes(), &fields)
c.Assert(err, IsNil) c.Assert(err, IsNil)
c.Assert(fields["level"], Equals, "error") c.Assert(fields["level"], Equals, "error")

View File

@ -197,7 +197,7 @@ func main() {
// Initialize config. // Initialize config.
err := initConfig() err := initConfig()
fatalIf(err.Trace(), "Unable to initialize minio config.", nil) fatalIf(err, "Unable to initialize minio config.", nil)
// Enable all loggers by now. // Enable all loggers by now.
enableLoggers() enableLoggers()

View File

@ -22,12 +22,11 @@ import (
"strings" "strings"
"github.com/fatih/color" "github.com/fatih/color"
"github.com/minio/minio/pkg/probe"
"github.com/olekukonko/ts" "github.com/olekukonko/ts"
) )
// colorizeUpdateMessage - inspired from Yeoman project npm package https://github.com/yeoman/update-notifier // colorizeUpdateMessage - inspired from Yeoman project npm package https://github.com/yeoman/update-notifier
func colorizeUpdateMessage(updateString string) (string, *probe.Error) { func colorizeUpdateMessage(updateString string) (string, error) {
// Initialize coloring. // Initialize coloring.
cyan := color.New(color.FgCyan, color.Bold).SprintFunc() cyan := color.New(color.FgCyan, color.Bold).SprintFunc()
yellow := color.New(color.FgYellow, color.Bold).SprintfFunc() yellow := color.New(color.FgYellow, color.Bold).SprintfFunc()
@ -47,7 +46,7 @@ func colorizeUpdateMessage(updateString string) (string, *probe.Error) {
terminal, err := ts.GetSize() terminal, err := ts.GetSize()
if err != nil { if err != nil {
return "", probe.NewError(err) return "", err
} }
var message string var message string

View File

@ -25,27 +25,22 @@ import (
"os" "os"
"strconv" "strconv"
"testing" "testing"
"github.com/minio/minio/pkg/probe"
) )
// Testing GetObjectInfo(). // Testing GetObjectInfo().
func TestGetObjectInfo(t *testing.T) { func TestGetObjectInfo(t *testing.T) {
directory, e := ioutil.TempDir("", "minio-get-objinfo-test") directory, err := ioutil.TempDir("", "minio-get-objinfo-test")
if e != nil { if err != nil {
t.Fatal(e) t.Fatal(err)
} }
defer os.RemoveAll(directory) defer os.RemoveAll(directory)
// Create the obj. // Create the obj.
fs, e := newFS(directory) obj, err := newFSObjects(directory)
if e != nil { if err != nil {
t.Fatal(e) t.Fatal(err)
} }
obj := newObjectLayer(fs)
var err *probe.Error
// This bucket is used for testing getObjectInfo operations. // This bucket is used for testing getObjectInfo operations.
err = obj.MakeBucket("test-getobjectinfo") err = obj.MakeBucket("test-getobjectinfo")
if err != nil { if err != nil {
@ -93,15 +88,15 @@ func TestGetObjectInfo(t *testing.T) {
for i, testCase := range testCases { for i, testCase := range testCases {
result, err := obj.GetObjectInfo(testCase.bucketName, testCase.objectName) result, err := obj.GetObjectInfo(testCase.bucketName, testCase.objectName)
if err != nil && testCase.shouldPass { if err != nil && testCase.shouldPass {
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Cause.Error()) t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
} }
if err == nil && !testCase.shouldPass { if err == nil && !testCase.shouldPass {
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error()) t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
} }
// Failed as expected, but does it fail for the expected reason. // Failed as expected, but does it fail for the expected reason.
if err != nil && !testCase.shouldPass { if err != nil && !testCase.shouldPass {
if testCase.err.Error() != err.Cause.Error() { if testCase.err.Error() != err.Error() {
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Cause.Error()) t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
} }
} }
@ -125,21 +120,18 @@ func TestGetObjectInfo(t *testing.T) {
func BenchmarkGetObject(b *testing.B) { func BenchmarkGetObject(b *testing.B) {
// Make a temporary directory to use as the obj. // Make a temporary directory to use as the obj.
directory, e := ioutil.TempDir("", "minio-benchmark-getobject") directory, err := ioutil.TempDir("", "minio-benchmark-getobject")
if e != nil { if err != nil {
b.Fatal(e) b.Fatal(err)
} }
defer os.RemoveAll(directory) defer os.RemoveAll(directory)
// Create the obj. // Create the obj.
fs, e := newFS(directory) obj, err := newFSObjects(directory)
if e != nil { if err != nil {
b.Fatal(e) b.Fatal(err)
} }
obj := newObjectLayer(fs)
var err *probe.Error
// Make a bucket and put in a few objects. // Make a bucket and put in a few objects.
err = obj.MakeBucket("bucket") err = obj.MakeBucket("bucket")
if err != nil { if err != nil {
@ -165,8 +157,8 @@ func BenchmarkGetObject(b *testing.B) {
if err != nil { if err != nil {
b.Error(err) b.Error(err)
} }
if _, e := io.Copy(buffer, r); e != nil { if _, err := io.Copy(buffer, r); err != nil {
b.Error(e) b.Error(err)
} }
if buffer.Len() != len(text) { if buffer.Len() != len(text) {
b.Errorf("GetObject returned incorrect length %d (should be %d)\n", buffer.Len(), len(text)) b.Errorf("GetObject returned incorrect length %d (should be %d)\n", buffer.Len(), len(text))

View File

@ -24,26 +24,22 @@ import (
"strconv" "strconv"
"strings" "strings"
"testing" "testing"
"github.com/minio/minio/pkg/probe"
) )
func TestListObjects(t *testing.T) { func TestListObjects(t *testing.T) {
// Make a temporary directory to use as the obj. // Make a temporary directory to use as the obj.
directory, e := ioutil.TempDir("", "minio-list-object-test") directory, err := ioutil.TempDir("", "minio-list-object-test")
if e != nil { if err != nil {
t.Fatal(e) t.Fatal(err)
} }
defer os.RemoveAll(directory) defer os.RemoveAll(directory)
// Create the obj. // Create the obj.
fs, e := newFS(directory) obj, err := newFSObjects(directory)
if e != nil { if err != nil {
t.Fatal(e) t.Fatal(err)
} }
obj := newObjectLayer(fs)
var err *probe.Error
// This bucket is used for testing ListObject operations. // This bucket is used for testing ListObject operations.
err = obj.MakeBucket("test-bucket-list-object") err = obj.MakeBucket("test-bucket-list-object")
if err != nil { if err != nil {
@ -56,25 +52,25 @@ func TestListObjects(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
tmpfile, e := ioutil.TempFile("", "simple-file.txt") tmpfile, err := ioutil.TempFile("", "simple-file.txt")
if e != nil { if err != nil {
t.Fatal(e) t.Fatal(err)
} }
defer os.Remove(tmpfile.Name()) // clean up defer os.Remove(tmpfile.Name()) // clean up
_, err = obj.PutObject("test-bucket-list-object", "Asia-maps", int64(len("asia-maps")), bytes.NewBufferString("asia-maps"), nil) _, err = obj.PutObject("test-bucket-list-object", "Asia-maps", int64(len("asia-maps")), bytes.NewBufferString("asia-maps"), nil)
if err != nil { if err != nil {
t.Fatal(e) t.Fatal(err)
} }
_, err = obj.PutObject("test-bucket-list-object", "Asia/India/India-summer-photos-1", int64(len("contentstring")), bytes.NewBufferString("contentstring"), nil) _, err = obj.PutObject("test-bucket-list-object", "Asia/India/India-summer-photos-1", int64(len("contentstring")), bytes.NewBufferString("contentstring"), nil)
if err != nil { if err != nil {
t.Fatal(e) t.Fatal(err)
} }
_, err = obj.PutObject("test-bucket-list-object", "Asia/India/Karnataka/Bangalore/Koramangala/pics", int64(len("contentstring")), bytes.NewBufferString("contentstring"), nil) _, err = obj.PutObject("test-bucket-list-object", "Asia/India/Karnataka/Bangalore/Koramangala/pics", int64(len("contentstring")), bytes.NewBufferString("contentstring"), nil)
if err != nil { if err != nil {
t.Fatal(e) t.Fatal(err)
} }
for i := 0; i < 2; i++ { for i := 0; i < 2; i++ {
@ -86,7 +82,7 @@ func TestListObjects(t *testing.T) {
} }
_, err = obj.PutObject("test-bucket-list-object", "newzen/zen/recurse/again/again/again/pics", int64(len("recurse")), bytes.NewBufferString("recurse"), nil) _, err = obj.PutObject("test-bucket-list-object", "newzen/zen/recurse/again/again/again/pics", int64(len("recurse")), bytes.NewBufferString("recurse"), nil)
if err != nil { if err != nil {
t.Fatal(e) t.Fatal(err)
} }
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
@ -536,15 +532,15 @@ func TestListObjects(t *testing.T) {
for i, testCase := range testCases { for i, testCase := range testCases {
result, err := obj.ListObjects(testCase.bucketName, testCase.prefix, testCase.marker, testCase.delimeter, testCase.maxKeys) result, err := obj.ListObjects(testCase.bucketName, testCase.prefix, testCase.marker, testCase.delimeter, testCase.maxKeys)
if err != nil && testCase.shouldPass { if err != nil && testCase.shouldPass {
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Cause.Error()) t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
} }
if err == nil && !testCase.shouldPass { if err == nil && !testCase.shouldPass {
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error()) t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
} }
// Failed as expected, but does it fail for the expected reason. // Failed as expected, but does it fail for the expected reason.
if err != nil && !testCase.shouldPass { if err != nil && !testCase.shouldPass {
if !strings.Contains(err.Cause.Error(), testCase.err.Error()) { if !strings.Contains(err.Error(), testCase.err.Error()) {
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Cause.Error()) t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
} }
} }
// Since there are cases for which ListObjects fails, this is necessary. // Since there are cases for which ListObjects fails, this is necessary.
@ -571,21 +567,18 @@ func TestListObjects(t *testing.T) {
func BenchmarkListObjects(b *testing.B) { func BenchmarkListObjects(b *testing.B) {
// Make a temporary directory to use as the obj. // Make a temporary directory to use as the obj.
directory, e := ioutil.TempDir("", "minio-list-benchmark") directory, err := ioutil.TempDir("", "minio-list-benchmark")
if e != nil { if err != nil {
b.Fatal(e) b.Fatal(err)
} }
defer os.RemoveAll(directory) defer os.RemoveAll(directory)
// Create the obj. // Create the obj.
fs, e := newFS(directory) obj, err := newFSObjects(directory)
if e != nil { if err != nil {
b.Fatal(e) b.Fatal(err)
} }
obj := newObjectLayer(fs)
var err *probe.Error
// Create a bucket. // Create a bucket.
err = obj.MakeBucket("ls-benchmark-bucket") err = obj.MakeBucket("ls-benchmark-bucket")
if err != nil { if err != nil {

View File

@ -21,27 +21,23 @@ import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"path"
"testing" "testing"
) )
// Tests validate creation of new multipart upload instance. // Tests validate creation of new multipart upload instance.
func TestObjectNewMultipartUpload(t *testing.T) { func TestObjectNewMultipartUpload(t *testing.T) {
directory, e := ioutil.TempDir("", "minio-multipart-1-test") directory, err := ioutil.TempDir("", "minio-multipart-1-test")
if e != nil { if err != nil {
t.Fatal(e) t.Fatal(err)
} }
defer os.RemoveAll(directory) defer os.RemoveAll(directory)
// Initialize fs layer. // Initialize fs object layer.
fs, e := newFS(directory) obj, err := newFSObjects(directory)
if e != nil { if err != nil {
t.Fatal(e) t.Fatal(err)
} }
// Initialize object layer.
obj := newObjectLayer(fs)
bucket := "minio-bucket" bucket := "minio-bucket"
object := "minio-object" object := "minio-object"
@ -49,77 +45,68 @@ func TestObjectNewMultipartUpload(t *testing.T) {
// opearation expected to fail since the bucket on which NewMultipartUpload is being initiated doesn't exist. // opearation expected to fail since the bucket on which NewMultipartUpload is being initiated doesn't exist.
uploadID, err := obj.NewMultipartUpload(bucket, object) uploadID, err := obj.NewMultipartUpload(bucket, object)
if err == nil { if err == nil {
t.Fatalf("Expcected to fail since the NewMultipartUpload is intialized on a non-existant bucket.") t.Fatalf("Expected to fail since the NewMultipartUpload is intialized on a non-existant bucket.")
} }
if errMsg != err.ToGoError().Error() { if errMsg != err.Error() {
t.Errorf("Expected to fail with Error \"%s\", but instead found \"%s\".", errMsg, err.ToGoError().Error()) t.Errorf("Expected to fail with Error \"%s\", but instead found \"%s\".", errMsg, err.Error())
} }
// Create bucket before intiating NewMultipartUpload. // Create bucket before intiating NewMultipartUpload.
err = obj.MakeBucket(bucket) err = obj.MakeBucket(bucket)
if err != nil { if err != nil {
// failed to create newbucket, abort. // failed to create newbucket, abort.
t.Fatal(err.ToGoError()) t.Fatal(err)
} }
uploadID, err = obj.NewMultipartUpload(bucket, object) uploadID, err = obj.NewMultipartUpload(bucket, object)
if err != nil { if err != nil {
t.Fatal(err.ToGoError()) t.Fatal(err)
} }
uploadIDPath := path.Join(bucket, object, uploadID) err = obj.AbortMultipartUpload(bucket, object, uploadID)
_, e = obj.storage.StatFile(minioMetaVolume, uploadIDPath) if err != nil {
if e != nil { switch err.(type) {
if e == errFileNotFound { case InvalidUploadID:
t.Fatalf("New Multipart upload failed to create uuid file.") t.Fatalf("New Multipart upload failed to create uuid file.")
default:
t.Fatalf(err.Error())
} }
t.Fatalf(e.Error())
} }
} }
// Tests validates the validator for existence of uploadID. // Tests validates the validator for existence of uploadID.
func TestObjectAPIIsUploadIDExists(t *testing.T) { func TestObjectAPIIsUploadIDExists(t *testing.T) {
directory, e := ioutil.TempDir("", "minio-multipart-2-test") directory, err := ioutil.TempDir("", "minio-multipart-2-test")
if e != nil { if err != nil {
t.Fatal(e) t.Fatal(err)
} }
defer os.RemoveAll(directory) defer os.RemoveAll(directory)
// Initialize fs layer. // Initialize fs object layer.
fs, e := newFS(directory) obj, err := newFSObjects(directory)
if e != nil { if err != nil {
t.Fatal(e) t.Fatal(err)
} }
// Initialize object layer.
obj := newObjectLayer(fs)
bucket := "minio-bucket" bucket := "minio-bucket"
object := "minio-object" object := "minio-object"
// Create bucket before intiating NewMultipartUpload. // Create bucket before intiating NewMultipartUpload.
err := obj.MakeBucket(bucket) err = obj.MakeBucket(bucket)
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatal(err.ToGoError()) t.Fatal(err)
} }
// UploadID file shouldn't exist. _, err = obj.NewMultipartUpload(bucket, object)
isExists, e := obj.isUploadIDExists(bucket, object, "abc")
if e == nil && isExists {
t.Fatal("Expected uploadIDPath to not to exist.")
}
uploadID, err := obj.NewMultipartUpload(bucket, object)
if err != nil { if err != nil {
t.Fatal(err.ToGoError()) t.Fatal(err)
} }
// UploadID file should exist.
isExists, e = obj.isUploadIDExists(bucket, object, uploadID) err = obj.AbortMultipartUpload(bucket, object, "abc")
if e != nil { switch err.(type) {
t.Fatal(e.Error()) case InvalidUploadID:
} default:
if !isExists {
t.Fatal("Expected uploadIDPath to exist.") t.Fatal("Expected uploadIDPath to exist.")
} }
} }
@ -127,40 +114,38 @@ func TestObjectAPIIsUploadIDExists(t *testing.T) {
// Tests validate correctness of PutObjectPart. // Tests validate correctness of PutObjectPart.
func TestObjectAPIPutObjectPart(t *testing.T) { func TestObjectAPIPutObjectPart(t *testing.T) {
// Generating cases for which the PutObjectPart fails. // Generating cases for which the PutObjectPart fails.
directory, e := ioutil.TempDir("", "minio-multipart-3-test") directory, err := ioutil.TempDir("", "minio-multipart-3-test")
if e != nil { if err != nil {
t.Fatal(e) t.Fatal(err)
} }
defer os.RemoveAll(directory) defer os.RemoveAll(directory)
// Initializing fs layer. // Initializing fs object layer.
fs, e := newFS(directory) obj, err := newFSObjects(directory)
if e != nil { if err != nil {
t.Fatal(e) t.Fatal(err)
} }
bucket := "minio-bucket" bucket := "minio-bucket"
object := "minio-object" object := "minio-object"
// Initializing object layer.
obj := newObjectLayer(fs)
// Create bucket before intiating NewMultipartUpload. // Create bucket before intiating NewMultipartUpload.
err := obj.MakeBucket(bucket) err = obj.MakeBucket(bucket)
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatal(err.ToGoError()) t.Fatal(err)
} }
// Initiate Multipart Upload on the above created bucket. // Initiate Multipart Upload on the above created bucket.
uploadID, err := obj.NewMultipartUpload(bucket, object) uploadID, err := obj.NewMultipartUpload(bucket, object)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatal(err.ToGoError()) t.Fatal(err)
} }
// Creating a dummy bucket for tests. // Creating a dummy bucket for tests.
err = obj.MakeBucket("unused-bucket") err = obj.MakeBucket("unused-bucket")
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatal(err.ToGoError()) t.Fatal(err)
} }
failCases := []struct { failCases := []struct {
@ -235,31 +220,24 @@ func TestObjectAPIPutObjectPart(t *testing.T) {
// All are test cases above are expected to fail. // All are test cases above are expected to fail.
if actualErr != nil && testCase.shouldPass { if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s.", i+1, actualErr.ToGoError().Error()) t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s.", i+1, actualErr.Error())
} }
if actualErr == nil && !testCase.shouldPass { if actualErr == nil && !testCase.shouldPass {
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead.", i+1, testCase.expectedError.Error()) t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead.", i+1, testCase.expectedError.Error())
} }
// Failed as expected, but does it fail for the expected reason. // Failed as expected, but does it fail for the expected reason.
if actualErr != nil && !testCase.shouldPass { if actualErr != nil && !testCase.shouldPass {
if testCase.expectedError.Error() != actualErr.ToGoError().Error() { if testCase.expectedError.Error() != actualErr.Error() {
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", i+1, t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", i+1,
testCase.expectedError.Error(), actualErr.ToGoError().Error()) testCase.expectedError.Error(), actualErr.Error())
} }
} }
// Since there are cases for which ListObjects fails, this is necessary.
// Test passes as expected, but the output values are verified for correctness here. // Test passes as expected, but the output values are verified for correctness here.
if actualErr == nil && testCase.shouldPass { if actualErr == nil && testCase.shouldPass {
// Asserting whether the md5 output is correct. // Asserting whether the md5 output is correct.
if testCase.inputMd5 != actualMd5Hex { if testCase.inputMd5 != actualMd5Hex {
t.Errorf("Test %d: Calculated Md5 different from the actual one %s.", i+1, actualMd5Hex) t.Errorf("Test %d: Calculated Md5 different from the actual one %s.", i+1, actualMd5Hex)
} }
partSuffix := fmt.Sprintf("%s.%d.%s", uploadID, testCase.PartID, testCase.inputMd5)
// Verifying whether the part file is created.
_, e := obj.storage.StatFile(minioMetaVolume, path.Join(bucket, object, partSuffix))
if e != nil {
t.Errorf("Test %d: Failed to create the Part file.", i+1)
}
} }
} }
} }

View File

@ -29,12 +29,11 @@ var _ = Suite(&MySuite{})
func (s *MySuite) TestFSAPISuite(c *C) { func (s *MySuite) TestFSAPISuite(c *C) {
var storageList []string var storageList []string
create := func() objectAPI { create := func() ObjectLayer {
path, err := ioutil.TempDir(os.TempDir(), "minio-") path, err := ioutil.TempDir(os.TempDir(), "minio-")
c.Check(err, IsNil) c.Check(err, IsNil)
storageAPI, err := newStorageAPI(path) objAPI, err := newFSObjects(path)
c.Check(err, IsNil) c.Check(err, IsNil)
objAPI := newObjectLayer(storageAPI)
storageList = append(storageList, path) storageList = append(storageList, path)
return objAPI return objAPI
} }
@ -48,7 +47,7 @@ func (s *MySuite) TestXLAPISuite(c *C) {
// Initialize name space lock. // Initialize name space lock.
initNSLock() initNSLock()
create := func() objectAPI { create := func() ObjectLayer {
var nDisks = 16 // Maximum disks. var nDisks = 16 // Maximum disks.
var erasureDisks []string var erasureDisks []string
for i := 0; i < nDisks; i++ { for i := 0; i < nDisks; i++ {
@ -56,10 +55,8 @@ func (s *MySuite) TestXLAPISuite(c *C) {
c.Check(err, IsNil) c.Check(err, IsNil)
erasureDisks = append(erasureDisks, path) erasureDisks = append(erasureDisks, path)
} }
storageList = append(storageList, erasureDisks...) objAPI, err := newXLObjects(erasureDisks...)
storageAPI, err := newStorageAPI(erasureDisks...)
c.Check(err, IsNil) c.Check(err, IsNil)
objAPI := newObjectLayer(storageAPI)
return objAPI return objAPI
} }
APITestSuite(c, create) APITestSuite(c, create)

View File

@ -32,7 +32,6 @@ import (
fastSha256 "github.com/minio/minio/pkg/crypto/sha256" fastSha256 "github.com/minio/minio/pkg/crypto/sha256"
mux "github.com/gorilla/mux" mux "github.com/gorilla/mux"
"github.com/minio/minio/pkg/probe"
) )
// supportedGetReqParams - supported request parameters for GET presigned request. // supportedGetReqParams - supported request parameters for GET presigned request.
@ -100,7 +99,8 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
// Fetch object stat info. // Fetch object stat info.
objInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object) objInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object)
if err != nil { if err != nil {
switch err.ToGoError().(type) { errorIf(err, "GetObjectInfo failed.", nil)
switch err.(type) {
case BucketNameInvalid: case BucketNameInvalid:
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path) writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
case BucketNotFound: case BucketNotFound:
@ -110,7 +110,6 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
case ObjectNameInvalid: case ObjectNameInvalid:
writeErrorResponse(w, r, ErrNoSuchKey, r.URL.Path) writeErrorResponse(w, r, ErrNoSuchKey, r.URL.Path)
default: default:
errorIf(err.Trace(), "GetObjectInfo failed.", nil)
writeErrorResponse(w, r, ErrInternalError, r.URL.Path) writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
} }
return return
@ -137,13 +136,13 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
startOffset := hrange.start startOffset := hrange.start
readCloser, err := api.ObjectAPI.GetObject(bucket, object, startOffset) readCloser, err := api.ObjectAPI.GetObject(bucket, object, startOffset)
if err != nil { if err != nil {
switch err.ToGoError().(type) { switch err.(type) {
case BucketNotFound: case BucketNotFound:
writeErrorResponse(w, r, ErrNoSuchBucket, r.URL.Path) writeErrorResponse(w, r, ErrNoSuchBucket, r.URL.Path)
case ObjectNotFound: case ObjectNotFound:
writeErrorResponse(w, r, errAllowableObjectNotFound(bucket, r), r.URL.Path) writeErrorResponse(w, r, errAllowableObjectNotFound(bucket, r), r.URL.Path)
default: default:
errorIf(err.Trace(), "GetObject failed.", nil) errorIf(err, "GetObject failed.", nil)
writeErrorResponse(w, r, ErrInternalError, r.URL.Path) writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
} }
return return
@ -157,14 +156,14 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
setGetRespHeaders(w, r.URL.Query()) setGetRespHeaders(w, r.URL.Query())
if hrange.length > 0 { if hrange.length > 0 {
if _, e := io.CopyN(w, readCloser, hrange.length); e != nil { if _, err := io.CopyN(w, readCloser, hrange.length); err != nil {
errorIf(probe.NewError(e), "Writing to client failed", nil) errorIf(err, "Writing to client failed", nil)
// Do not send error response here, since client could have died. // Do not send error response here, since client could have died.
return return
} }
} else { } else {
if _, e := io.Copy(w, readCloser); e != nil { if _, err := io.Copy(w, readCloser); err != nil {
errorIf(probe.NewError(e), "Writing to client failed", nil) errorIf(err, "Writing to client failed", nil)
// Do not send error response here, since client could have died. // Do not send error response here, since client could have died.
return return
} }
@ -294,8 +293,8 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
objInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object) objInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object)
if err != nil { if err != nil {
errorIf(err.Trace(bucket, object), "GetObjectInfo failed.", nil) errorIf(err, "GetObjectInfo failed.", nil)
switch err.ToGoError().(type) { switch err.(type) {
case BucketNameInvalid: case BucketNameInvalid:
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path) writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
case BucketNotFound: case BucketNotFound:
@ -387,8 +386,8 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
objInfo, err := api.ObjectAPI.GetObjectInfo(sourceBucket, sourceObject) objInfo, err := api.ObjectAPI.GetObjectInfo(sourceBucket, sourceObject)
if err != nil { if err != nil {
errorIf(err.Trace(), "GetObjectInfo failed.", nil) errorIf(err, "GetObjectInfo failed.", nil)
switch err.ToGoError().(type) { switch err.(type) {
case BucketNameInvalid: case BucketNameInvalid:
writeErrorResponse(w, r, ErrInvalidBucketName, objectSource) writeErrorResponse(w, r, ErrInvalidBucketName, objectSource)
case BucketNotFound: case BucketNotFound:
@ -425,10 +424,9 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
var md5Bytes []byte var md5Bytes []byte
if objInfo.MD5Sum != "" { if objInfo.MD5Sum != "" {
var e error md5Bytes, err = hex.DecodeString(objInfo.MD5Sum)
md5Bytes, e = hex.DecodeString(objInfo.MD5Sum) if err != nil {
if e != nil { errorIf(err, "Decoding md5 failed.", nil)
errorIf(probe.NewError(e), "Decoding md5 failed.", nil)
writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path) writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path)
return return
} }
@ -436,10 +434,10 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
startOffset := int64(0) // Read the whole file. startOffset := int64(0) // Read the whole file.
// Get the object. // Get the object.
readCloser, getErr := api.ObjectAPI.GetObject(sourceBucket, sourceObject, startOffset) readCloser, err := api.ObjectAPI.GetObject(sourceBucket, sourceObject, startOffset)
if getErr != nil { if err != nil {
errorIf(getErr.Trace(sourceBucket, sourceObject), "Reading "+objectSource+" failed.", nil) errorIf(err, "Reading "+objectSource+" failed.", nil)
switch err.ToGoError().(type) { switch err.(type) {
case BucketNotFound: case BucketNotFound:
writeErrorResponse(w, r, ErrNoSuchBucket, objectSource) writeErrorResponse(w, r, ErrNoSuchBucket, objectSource)
case ObjectNotFound: case ObjectNotFound:
@ -459,7 +457,8 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
// Create the object. // Create the object.
md5Sum, err := api.ObjectAPI.PutObject(bucket, object, size, readCloser, metadata) md5Sum, err := api.ObjectAPI.PutObject(bucket, object, size, readCloser, metadata)
if err != nil { if err != nil {
switch err.ToGoError().(type) { errorIf(err, "PutObject failed.", nil)
switch err.(type) {
case StorageFull: case StorageFull:
writeErrorResponse(w, r, ErrStorageFull, r.URL.Path) writeErrorResponse(w, r, ErrStorageFull, r.URL.Path)
case BucketNotFound: case BucketNotFound:
@ -473,7 +472,6 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
case ObjectExistsAsPrefix: case ObjectExistsAsPrefix:
writeErrorResponse(w, r, ErrObjectExistsAsPrefix, r.URL.Path) writeErrorResponse(w, r, ErrObjectExistsAsPrefix, r.URL.Path)
default: default:
errorIf(err.Trace(), "PutObject failed.", nil)
writeErrorResponse(w, r, ErrInternalError, r.URL.Path) writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
} }
return return
@ -481,7 +479,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
objInfo, err = api.ObjectAPI.GetObjectInfo(bucket, object) objInfo, err = api.ObjectAPI.GetObjectInfo(bucket, object)
if err != nil { if err != nil {
errorIf(err.Trace(), "GetObjectInfo failed.", nil) errorIf(err, "GetObjectInfo failed.", nil)
writeErrorResponse(w, r, ErrInternalError, r.URL.Path) writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
return return
} }
@ -605,7 +603,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
// Get Content-Md5 sent by client and verify if valid // Get Content-Md5 sent by client and verify if valid
md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5")) md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5"))
if err != nil { if err != nil {
errorIf(err.Trace(r.Header.Get("Content-Md5")), "Decoding md5 failed.", nil) errorIf(err, "Decoding md5 failed.", nil)
writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path) writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path)
return return
} }
@ -643,9 +641,9 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
go func() { go func() {
shaWriter := fastSha256.New() shaWriter := fastSha256.New()
multiWriter := io.MultiWriter(shaWriter, writer) multiWriter := io.MultiWriter(shaWriter, writer)
if _, e := io.CopyN(multiWriter, r.Body, size); e != nil { if _, cerr := io.CopyN(multiWriter, r.Body, size); cerr != nil {
errorIf(probe.NewError(e), "Unable to read HTTP body.", nil) errorIf(cerr, "Unable to read HTTP body.", nil)
writer.CloseWithError(e) writer.CloseWithError(err)
return return
} }
shaPayload := shaWriter.Sum(nil) shaPayload := shaWriter.Sum(nil)
@ -676,14 +674,13 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
md5Sum, err = api.ObjectAPI.PutObject(bucket, object, size, reader, metadata) md5Sum, err = api.ObjectAPI.PutObject(bucket, object, size, reader, metadata)
} }
if err != nil { if err != nil {
errorIf(err.Trace(), "PutObject failed.", nil) errorIf(err, "PutObject failed.", nil)
e := err.ToGoError()
// Verify if the underlying error is signature mismatch. // Verify if the underlying error is signature mismatch.
if e == errSignatureMismatch { if err == errSignatureMismatch {
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path) writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
return return
} }
switch e.(type) { switch err.(type) {
case StorageFull: case StorageFull:
writeErrorResponse(w, r, ErrStorageFull, r.URL.Path) writeErrorResponse(w, r, ErrStorageFull, r.URL.Path)
case BucketNotFound: case BucketNotFound:
@ -736,8 +733,8 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
uploadID, err := api.ObjectAPI.NewMultipartUpload(bucket, object) uploadID, err := api.ObjectAPI.NewMultipartUpload(bucket, object)
if err != nil { if err != nil {
errorIf(err.Trace(), "NewMultipartUpload failed.", nil) errorIf(err, "NewMultipartUpload failed.", nil)
switch err.ToGoError().(type) { switch err.(type) {
case StorageFull: case StorageFull:
writeErrorResponse(w, r, ErrStorageFull, r.URL.Path) writeErrorResponse(w, r, ErrStorageFull, r.URL.Path)
case BucketNameInvalid: case BucketNameInvalid:
@ -771,7 +768,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
// get Content-Md5 sent by client and verify if valid // get Content-Md5 sent by client and verify if valid
md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5")) md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5"))
if err != nil { if err != nil {
errorIf(err.Trace(r.Header.Get("Content-Md5")), "Decoding md5 failed.", nil) errorIf(err, "Decoding md5 failed.", nil)
writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path) writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path)
return return
} }
@ -792,8 +789,8 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
uploadID := r.URL.Query().Get("uploadId") uploadID := r.URL.Query().Get("uploadId")
partIDString := r.URL.Query().Get("partNumber") partIDString := r.URL.Query().Get("partNumber")
partID, e := strconv.Atoi(partIDString) partID, err := strconv.Atoi(partIDString)
if e != nil { if err != nil {
writeErrorResponse(w, r, ErrInvalidPart, r.URL.Path) writeErrorResponse(w, r, ErrInvalidPart, r.URL.Path)
return return
} }
@ -822,9 +819,9 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
go func() { go func() {
shaWriter := fastSha256.New() shaWriter := fastSha256.New()
multiWriter := io.MultiWriter(shaWriter, writer) multiWriter := io.MultiWriter(shaWriter, writer)
if _, e := io.CopyN(multiWriter, r.Body, size); e != nil { if _, err = io.CopyN(multiWriter, r.Body, size); err != nil {
errorIf(probe.NewError(e), "Unable to read HTTP body.", nil) errorIf(err, "Unable to read HTTP body.", nil)
writer.CloseWithError(e) writer.CloseWithError(err)
return return
} }
shaPayload := shaWriter.Sum(nil) shaPayload := shaWriter.Sum(nil)
@ -848,14 +845,13 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
partMD5, err = api.ObjectAPI.PutObjectPart(bucket, object, uploadID, partID, size, reader, hex.EncodeToString(md5Bytes)) partMD5, err = api.ObjectAPI.PutObjectPart(bucket, object, uploadID, partID, size, reader, hex.EncodeToString(md5Bytes))
} }
if err != nil { if err != nil {
errorIf(err.Trace(), "PutObjectPart failed.", nil) errorIf(err, "PutObjectPart failed.", nil)
e := err.ToGoError()
// Verify if the underlying error is signature mismatch. // Verify if the underlying error is signature mismatch.
if e == errSignatureMismatch { if err == errSignatureMismatch {
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path) writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
return return
} }
switch e.(type) { switch err.(type) {
case StorageFull: case StorageFull:
writeErrorResponse(w, r, ErrStorageFull, r.URL.Path) writeErrorResponse(w, r, ErrStorageFull, r.URL.Path)
case InvalidUploadID: case InvalidUploadID:
@ -900,10 +896,9 @@ func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter,
} }
uploadID, _, _, _ := getObjectResources(r.URL.Query()) uploadID, _, _, _ := getObjectResources(r.URL.Query())
err := api.ObjectAPI.AbortMultipartUpload(bucket, object, uploadID) if err := api.ObjectAPI.AbortMultipartUpload(bucket, object, uploadID); err != nil {
if err != nil { errorIf(err, "AbortMutlipartUpload failed.", nil)
errorIf(err.Trace(), "AbortMutlipartUpload failed.", nil) switch err.(type) {
switch err.ToGoError().(type) {
case BucketNameInvalid: case BucketNameInvalid:
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path) writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
case BucketNotFound: case BucketNotFound:
@ -961,8 +956,8 @@ func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *ht
listPartsInfo, err := api.ObjectAPI.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts) listPartsInfo, err := api.ObjectAPI.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
if err != nil { if err != nil {
errorIf(err.Trace(), "ListObjectParts failed.", nil) errorIf(err, "ListObjectParts failed.", nil)
switch err.ToGoError().(type) { switch err.(type) {
case BucketNameInvalid: case BucketNameInvalid:
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path) writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
case BucketNotFound: case BucketNotFound:
@ -998,7 +993,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
uploadID, _, _, _ := getObjectResources(r.URL.Query()) uploadID, _, _, _ := getObjectResources(r.URL.Query())
var md5Sum string var md5Sum string
var err *probe.Error var err error
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
default: default:
// For all unknown auth types return error. // For all unknown auth types return error.
@ -1016,15 +1011,15 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
return return
} }
} }
completeMultipartBytes, e := ioutil.ReadAll(r.Body) completeMultipartBytes, err := ioutil.ReadAll(r.Body)
if e != nil { if err != nil {
errorIf(probe.NewError(e), "CompleteMultipartUpload failed.", nil) errorIf(err, "CompleteMultipartUpload failed.", nil)
writeErrorResponse(w, r, ErrInternalError, r.URL.Path) writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
return return
} }
complMultipartUpload := &completeMultipartUpload{} complMultipartUpload := &completeMultipartUpload{}
if e = xml.Unmarshal(completeMultipartBytes, complMultipartUpload); e != nil { if err = xml.Unmarshal(completeMultipartBytes, complMultipartUpload); err != nil {
errorIf(probe.NewError(e), "XML Unmarshal failed", nil) errorIf(err, "XML Unmarshal failed", nil)
writeErrorResponse(w, r, ErrMalformedXML, r.URL.Path) writeErrorResponse(w, r, ErrMalformedXML, r.URL.Path)
return return
} }
@ -1042,8 +1037,8 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
// Complete multipart upload. // Complete multipart upload.
md5Sum, err = api.ObjectAPI.CompleteMultipartUpload(bucket, object, uploadID, completeParts) md5Sum, err = api.ObjectAPI.CompleteMultipartUpload(bucket, object, uploadID, completeParts)
if err != nil { if err != nil {
errorIf(err.Trace(), "CompleteMultipartUpload failed.", nil) errorIf(err, "CompleteMultipartUpload failed.", nil)
switch err.ToGoError().(type) { switch err.(type) {
case BucketNameInvalid: case BucketNameInvalid:
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path) writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
case BucketNotFound: case BucketNotFound:
@ -1099,10 +1094,9 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
return return
} }
} }
err := api.ObjectAPI.DeleteObject(bucket, object) if err := api.ObjectAPI.DeleteObject(bucket, object); err != nil {
if err != nil { errorIf(err, "DeleteObject failed.", nil)
errorIf(err.Trace(), "DeleteObject failed.", nil) switch err.(type) {
switch err.ToGoError().(type) {
case BucketNameInvalid: case BucketNameInvalid:
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path) writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
case BucketNotFound: case BucketNotFound:

43
object-interface.go Normal file
View File

@ -0,0 +1,43 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import "io"
// ObjectLayer implements primitives for object API layer.
type ObjectLayer interface {
// Bucket operations.
MakeBucket(bucket string) error
GetBucketInfo(bucket string) (bucketInfo BucketInfo, err error)
ListBuckets() (buckets []BucketInfo, err error)
DeleteBucket(bucket string) error
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error)
// Object operations.
GetObject(bucket, object string, startOffset int64) (reader io.ReadCloser, err error)
GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error)
PutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string) (md5 string, err error)
DeleteObject(bucket, object string) error
// Multipart operations.
ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error)
NewMultipartUpload(bucket, object string) (uploadID string, err error)
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (md5 string, err error)
ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error)
AbortMultipartUpload(bucket, object, uploadID string) error
CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []completePart) (md5 string, err error)
}

View File

@ -17,9 +17,21 @@
package main package main
import ( import (
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"io"
"regexp" "regexp"
"strings" "strings"
"unicode/utf8" "unicode/utf8"
"github.com/minio/minio/pkg/safe"
)
const (
// Minio meta volume.
minioMetaVolume = ".minio"
) )
// validBucket regexp. // validBucket regexp.
@ -96,3 +108,41 @@ func retainSlash(s string) string {
func pathJoin(s1 string, s2 string) string { func pathJoin(s1 string, s2 string) string {
return retainSlash(s1) + s2 return retainSlash(s1) + s2
} }
// Create an s3 compatible MD5sum for complete multipart transaction.
func makeS3MD5(md5Strs ...string) (string, error) {
var finalMD5Bytes []byte
for _, md5Str := range md5Strs {
md5Bytes, err := hex.DecodeString(md5Str)
if err != nil {
return "", err
}
finalMD5Bytes = append(finalMD5Bytes, md5Bytes...)
}
md5Hasher := md5.New()
md5Hasher.Write(finalMD5Bytes)
s3MD5 := fmt.Sprintf("%s-%d", hex.EncodeToString(md5Hasher.Sum(nil)), len(md5Strs))
return s3MD5, nil
}
// byBucketName is a collection satisfying sort.Interface.
type byBucketName []BucketInfo
func (d byBucketName) Len() int { return len(d) }
func (d byBucketName) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d byBucketName) Less(i, j int) bool { return d[i].Name < d[j].Name }
// safeCloseAndRemove - safely closes and removes underlying temporary
// file writer if possible.
func safeCloseAndRemove(writer io.WriteCloser) error {
// If writer is a safe file, Attempt to close and remove.
safeWriter, ok := writer.(*safe.File)
if ok {
return safeWriter.CloseAndRemove()
}
pipeWriter, ok := writer.(*io.PipeWriter)
if ok {
return pipeWriter.CloseWithError(errors.New("Close and error out."))
}
return nil
}

View File

@ -31,7 +31,7 @@ import (
// TODO - enable all the commented tests. // TODO - enable all the commented tests.
// APITestSuite - collection of API tests. // APITestSuite - collection of API tests.
func APITestSuite(c *check.C, create func() objectAPI) { func APITestSuite(c *check.C, create func() ObjectLayer) {
testMakeBucket(c, create) testMakeBucket(c, create)
testMultipleObjectCreation(c, create) testMultipleObjectCreation(c, create)
testPaging(c, create) testPaging(c, create)
@ -50,7 +50,7 @@ func APITestSuite(c *check.C, create func() objectAPI) {
} }
// Tests validate bucket creation. // Tests validate bucket creation.
func testMakeBucket(c *check.C, create func() objectAPI) { func testMakeBucket(c *check.C, create func() ObjectLayer) {
obj := create() obj := create()
err := obj.MakeBucket("bucket-unknown") err := obj.MakeBucket("bucket-unknown")
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -58,7 +58,7 @@ func testMakeBucket(c *check.C, create func() objectAPI) {
} }
// Tests validate creation of part files during Multipart operation. // Tests validate creation of part files during Multipart operation.
func testMultipartObjectCreation(c *check.C, create func() objectAPI) { func testMultipartObjectCreation(c *check.C, create func() ObjectLayer) {
obj := create() obj := create()
err := obj.MakeBucket("bucket") err := obj.MakeBucket("bucket")
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -83,7 +83,7 @@ func testMultipartObjectCreation(c *check.C, create func() objectAPI) {
} }
// Tests validate abortion of Multipart operation. // Tests validate abortion of Multipart operation.
func testMultipartObjectAbort(c *check.C, create func() objectAPI) { func testMultipartObjectAbort(c *check.C, create func() ObjectLayer) {
obj := create() obj := create()
err := obj.MakeBucket("bucket") err := obj.MakeBucket("bucket")
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -115,7 +115,7 @@ func testMultipartObjectAbort(c *check.C, create func() objectAPI) {
} }
// Tests validate object creation. // Tests validate object creation.
func testMultipleObjectCreation(c *check.C, create func() objectAPI) { func testMultipleObjectCreation(c *check.C, create func() ObjectLayer) {
objects := make(map[string][]byte) objects := make(map[string][]byte)
obj := create() obj := create()
err := obj.MakeBucket("bucket") err := obj.MakeBucket("bucket")
@ -157,7 +157,7 @@ func testMultipleObjectCreation(c *check.C, create func() objectAPI) {
} }
// Tests validate creation of objects and the order of listing using various filters for ListObjects operation. // Tests validate creation of objects and the order of listing using various filters for ListObjects operation.
func testPaging(c *check.C, create func() objectAPI) { func testPaging(c *check.C, create func() ObjectLayer) {
obj := create() obj := create()
obj.MakeBucket("bucket") obj.MakeBucket("bucket")
result, err := obj.ListObjects("bucket", "", "", "", 0) result, err := obj.ListObjects("bucket", "", "", "", 0)
@ -261,7 +261,7 @@ func testPaging(c *check.C, create func() objectAPI) {
} }
// Tests validate overwriting of an existing object. // Tests validate overwriting of an existing object.
func testObjectOverwriteWorks(c *check.C, create func() objectAPI) { func testObjectOverwriteWorks(c *check.C, create func() ObjectLayer) {
obj := create() obj := create()
err := obj.MakeBucket("bucket") err := obj.MakeBucket("bucket")
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -284,25 +284,25 @@ func testObjectOverwriteWorks(c *check.C, create func() objectAPI) {
} }
// Tests validate that bucket operation on non-existent bucket fails. // Tests validate that bucket operation on non-existent bucket fails.
func testNonExistantBucketOperations(c *check.C, create func() objectAPI) { func testNonExistantBucketOperations(c *check.C, create func() ObjectLayer) {
obj := create() obj := create()
_, err := obj.PutObject("bucket1", "object", int64(len("one")), bytes.NewBufferString("one"), nil) _, err := obj.PutObject("bucket1", "object", int64(len("one")), bytes.NewBufferString("one"), nil)
c.Assert(err, check.Not(check.IsNil)) c.Assert(err, check.Not(check.IsNil))
c.Assert(err.ToGoError().Error(), check.Equals, "Bucket not found: bucket1") c.Assert(err.Error(), check.Equals, "Bucket not found: bucket1")
} }
// Tests validate that recreation of the bucket fails. // Tests validate that recreation of the bucket fails.
func testBucketRecreateFails(c *check.C, create func() objectAPI) { func testBucketRecreateFails(c *check.C, create func() ObjectLayer) {
obj := create() obj := create()
err := obj.MakeBucket("string") err := obj.MakeBucket("string")
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
err = obj.MakeBucket("string") err = obj.MakeBucket("string")
c.Assert(err, check.Not(check.IsNil)) c.Assert(err, check.Not(check.IsNil))
c.Assert(err.ToGoError().Error(), check.Equals, "Bucket exists: string") c.Assert(err.Error(), check.Equals, "Bucket exists: string")
} }
// Tests validate PutObject with subdirectory prefix. // Tests validate PutObject with subdirectory prefix.
func testPutObjectInSubdir(c *check.C, create func() objectAPI) { func testPutObjectInSubdir(c *check.C, create func() ObjectLayer) {
obj := create() obj := create()
err := obj.MakeBucket("bucket") err := obj.MakeBucket("bucket")
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -321,7 +321,7 @@ func testPutObjectInSubdir(c *check.C, create func() objectAPI) {
} }
// Tests validate ListBuckets. // Tests validate ListBuckets.
func testListBuckets(c *check.C, create func() objectAPI) { func testListBuckets(c *check.C, create func() ObjectLayer) {
obj := create() obj := create()
// test empty list. // test empty list.
@ -354,7 +354,7 @@ func testListBuckets(c *check.C, create func() objectAPI) {
} }
// Tests validate the order of result of ListBuckets. // Tests validate the order of result of ListBuckets.
func testListBucketsOrder(c *check.C, create func() objectAPI) { func testListBucketsOrder(c *check.C, create func() ObjectLayer) {
// if implementation contains a map, order of map keys will vary. // if implementation contains a map, order of map keys will vary.
// this ensures they return in the same order each time. // this ensures they return in the same order each time.
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
@ -373,24 +373,24 @@ func testListBucketsOrder(c *check.C, create func() objectAPI) {
} }
// Tests validate that ListObjects operation on a non-existent bucket fails as expected. // Tests validate that ListObjects operation on a non-existent bucket fails as expected.
func testListObjectsTestsForNonExistantBucket(c *check.C, create func() objectAPI) { func testListObjectsTestsForNonExistantBucket(c *check.C, create func() ObjectLayer) {
obj := create() obj := create()
result, err := obj.ListObjects("bucket", "", "", "", 1000) result, err := obj.ListObjects("bucket", "", "", "", 1000)
c.Assert(err, check.Not(check.IsNil)) c.Assert(err, check.Not(check.IsNil))
c.Assert(result.IsTruncated, check.Equals, false) c.Assert(result.IsTruncated, check.Equals, false)
c.Assert(len(result.Objects), check.Equals, 0) c.Assert(len(result.Objects), check.Equals, 0)
c.Assert(err.ToGoError().Error(), check.Equals, "Bucket not found: bucket") c.Assert(err.Error(), check.Equals, "Bucket not found: bucket")
} }
// Tests validate that GetObject fails on a non-existent bucket as expected. // Tests validate that GetObject fails on a non-existent bucket as expected.
func testNonExistantObjectInBucket(c *check.C, create func() objectAPI) { func testNonExistantObjectInBucket(c *check.C, create func() ObjectLayer) {
obj := create() obj := create()
err := obj.MakeBucket("bucket") err := obj.MakeBucket("bucket")
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
_, err = obj.GetObject("bucket", "dir1", 0) _, err = obj.GetObject("bucket", "dir1", 0)
c.Assert(err, check.Not(check.IsNil)) c.Assert(err, check.Not(check.IsNil))
switch err := err.ToGoError().(type) { switch err := err.(type) {
case ObjectNotFound: case ObjectNotFound:
c.Assert(err, check.ErrorMatches, "Object not found: bucket#dir1") c.Assert(err, check.ErrorMatches, "Object not found: bucket#dir1")
default: default:
@ -399,7 +399,7 @@ func testNonExistantObjectInBucket(c *check.C, create func() objectAPI) {
} }
// Tests validate that GetObject on an existing directory fails as expected. // Tests validate that GetObject on an existing directory fails as expected.
func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() objectAPI) { func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() ObjectLayer) {
obj := create() obj := create()
err := obj.MakeBucket("bucket") err := obj.MakeBucket("bucket")
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -408,7 +408,7 @@ func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() objectAPI)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
_, err = obj.GetObject("bucket", "dir1", 0) _, err = obj.GetObject("bucket", "dir1", 0)
switch err := err.ToGoError().(type) { switch err := err.(type) {
case ObjectNotFound: case ObjectNotFound:
c.Assert(err.Bucket, check.Equals, "bucket") c.Assert(err.Bucket, check.Equals, "bucket")
c.Assert(err.Object, check.Equals, "dir1") c.Assert(err.Object, check.Equals, "dir1")
@ -418,7 +418,7 @@ func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() objectAPI)
} }
_, err = obj.GetObject("bucket", "dir1/", 0) _, err = obj.GetObject("bucket", "dir1/", 0)
switch err := err.ToGoError().(type) { switch err := err.(type) {
case ObjectNotFound: case ObjectNotFound:
c.Assert(err.Bucket, check.Equals, "bucket") c.Assert(err.Bucket, check.Equals, "bucket")
c.Assert(err.Object, check.Equals, "dir1/") c.Assert(err.Object, check.Equals, "dir1/")
@ -429,7 +429,7 @@ func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() objectAPI)
} }
// Tests valdiate the default ContentType. // Tests valdiate the default ContentType.
func testDefaultContentType(c *check.C, create func() objectAPI) { func testDefaultContentType(c *check.C, create func() ObjectLayer) {
obj := create() obj := create()
err := obj.MakeBucket("bucket") err := obj.MakeBucket("bucket")
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)

View File

@ -30,7 +30,6 @@ import (
"sync" "sync"
"github.com/fatih/structs" "github.com/fatih/structs"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/safe" "github.com/minio/minio/pkg/safe"
) )
@ -38,11 +37,11 @@ import (
type Config interface { type Config interface {
String() string String() string
Version() string Version() string
Save(string) *probe.Error Save(string) error
Load(string) *probe.Error Load(string) error
Data() interface{} Data() interface{}
Diff(Config) ([]structs.Field, *probe.Error) Diff(Config) ([]structs.Field, error)
DeepDiff(Config) ([]structs.Field, *probe.Error) DeepDiff(Config) ([]structs.Field, error)
} }
// config - implements quick.Config interface // config - implements quick.Config interface
@ -53,28 +52,28 @@ type config struct {
// CheckData - checks the validity of config data. Data should be of // CheckData - checks the validity of config data. Data should be of
// type struct and contain a string type field called "Version". // type struct and contain a string type field called "Version".
func CheckData(data interface{}) *probe.Error { func CheckData(data interface{}) error {
if !structs.IsStruct(data) { if !structs.IsStruct(data) {
return probe.NewError(fmt.Errorf("Invalid argument type. Expecing \"struct\" type.")) return fmt.Errorf("Invalid argument type. Expecing \"struct\" type.")
} }
st := structs.New(data) st := structs.New(data)
f, ok := st.FieldOk("Version") f, ok := st.FieldOk("Version")
if !ok { if !ok {
return probe.NewError(fmt.Errorf("Invalid type of struct argument. No [%s.Version] field found.", st.Name())) return fmt.Errorf("Invalid type of struct argument. No [%s.Version] field found.", st.Name())
} }
if f.Kind() != reflect.String { if f.Kind() != reflect.String {
return probe.NewError(fmt.Errorf("Invalid type of struct argument. Expecting \"string\" type [%s.Version] field.", st.Name())) return fmt.Errorf("Invalid type of struct argument. Expecting \"string\" type [%s.Version] field.", st.Name())
} }
return nil return nil
} }
// New - instantiate a new config // New - instantiate a new config
func New(data interface{}) (Config, *probe.Error) { func New(data interface{}) (Config, error) {
if err := CheckData(data); err != nil { if err := CheckData(data); err != nil {
return nil, err.Trace() return nil, err
} }
d := new(config) d := new(config)
@ -85,15 +84,15 @@ func New(data interface{}) (Config, *probe.Error) {
// CheckVersion - loads json and compares the version number provided returns back true or false - any failure // CheckVersion - loads json and compares the version number provided returns back true or false - any failure
// is returned as error. // is returned as error.
func CheckVersion(filename string, version string) (bool, *probe.Error) { func CheckVersion(filename string, version string) (bool, error) {
_, e := os.Stat(filename) _, err := os.Stat(filename)
if e != nil { if err != nil {
return false, probe.NewError(e) return false, err
} }
fileData, e := ioutil.ReadFile(filename) fileData, err := ioutil.ReadFile(filename)
if e != nil { if err != nil {
return false, probe.NewError(e) return false, err
} }
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
@ -104,18 +103,18 @@ func CheckVersion(filename string, version string) (bool, *probe.Error) {
}{ }{
Version: "", Version: "",
} }
e = json.Unmarshal(fileData, &data) err = json.Unmarshal(fileData, &data)
if e != nil { if err != nil {
switch e := e.(type) { switch err := err.(type) {
case *json.SyntaxError: case *json.SyntaxError:
return false, probe.NewError(FormatJSONSyntaxError(bytes.NewReader(fileData), e)) return false, FormatJSONSyntaxError(bytes.NewReader(fileData), err)
default: default:
return false, probe.NewError(e) return false, err
} }
} }
config, err := New(data) config, err := New(data)
if err != nil { if err != nil {
return false, err.Trace() return false, err
} }
if config.Version() != version { if config.Version() != version {
return false, nil return false, nil
@ -124,34 +123,34 @@ func CheckVersion(filename string, version string) (bool, *probe.Error) {
} }
// Load - loads json config from filename for the a given struct data // Load - loads json config from filename for the a given struct data
func Load(filename string, data interface{}) (Config, *probe.Error) { func Load(filename string, data interface{}) (Config, error) {
_, e := os.Stat(filename) _, err := os.Stat(filename)
if e != nil { if err != nil {
return nil, probe.NewError(e) return nil, err
} }
fileData, e := ioutil.ReadFile(filename) fileData, err := ioutil.ReadFile(filename)
if e != nil { if err != nil {
return nil, probe.NewError(e) return nil, err
} }
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
fileData = []byte(strings.Replace(string(fileData), "\r\n", "\n", -1)) fileData = []byte(strings.Replace(string(fileData), "\r\n", "\n", -1))
} }
e = json.Unmarshal(fileData, &data) err = json.Unmarshal(fileData, &data)
if e != nil { if err != nil {
switch e := e.(type) { switch err := err.(type) {
case *json.SyntaxError: case *json.SyntaxError:
return nil, probe.NewError(FormatJSONSyntaxError(bytes.NewReader(fileData), e)) return nil, FormatJSONSyntaxError(bytes.NewReader(fileData), err)
default: default:
return nil, probe.NewError(e) return nil, err
} }
} }
config, err := New(data) config, err := New(data)
if err != nil { if err != nil {
return nil, err.Trace() return nil, err
} }
return config, nil return config, nil
@ -177,20 +176,16 @@ func (d config) Version() string {
// writeFile writes data to a file named by filename. // writeFile writes data to a file named by filename.
// If the file does not exist, writeFile creates it; // If the file does not exist, writeFile creates it;
// otherwise writeFile truncates it before writing. // otherwise writeFile truncates it before writing.
func writeFile(filename string, data []byte) *probe.Error { func writeFile(filename string, data []byte) error {
safeFile, e := safe.CreateFile(filename) safeFile, err := safe.CreateFile(filename)
if e != nil { if err != nil {
return probe.NewError(e) return err
} }
_, e = safeFile.Write(data) _, err = safeFile.Write(data)
if e != nil { if err != nil {
return probe.NewError(e) return err
} }
e = safeFile.Close() return safeFile.Close()
if e != nil {
return probe.NewError(e)
}
return nil
} }
// String converts JSON config to printable string // String converts JSON config to printable string
@ -200,37 +195,37 @@ func (d config) String() string {
} }
// Save writes config data in JSON format to a file. // Save writes config data in JSON format to a file.
func (d config) Save(filename string) *probe.Error { func (d config) Save(filename string) error {
d.lock.Lock() d.lock.Lock()
defer d.lock.Unlock() defer d.lock.Unlock()
// Check for existing file, if yes create a backup. // Check for existing file, if yes create a backup.
st, e := os.Stat(filename) st, err := os.Stat(filename)
// If file exists and stat failed return here. // If file exists and stat failed return here.
if e != nil && !os.IsNotExist(e) { if err != nil && !os.IsNotExist(err) {
return probe.NewError(e) return err
} }
// File exists and proceed to take backup. // File exists and proceed to take backup.
if e == nil { if err == nil {
// File exists and is not a regular file return error. // File exists and is not a regular file return error.
if !st.Mode().IsRegular() { if !st.Mode().IsRegular() {
return probe.NewError(fmt.Errorf("%s is not a regular file", filename)) return fmt.Errorf("%s is not a regular file", filename)
} }
// Read old data. // Read old data.
var oldData []byte var oldData []byte
oldData, e = ioutil.ReadFile(filename) oldData, err = ioutil.ReadFile(filename)
if e != nil { if err != nil {
return probe.NewError(e) return err
} }
// Save read data to the backup file. // Save read data to the backup file.
if err := writeFile(filename+".old", oldData); err != nil { if err = writeFile(filename+".old", oldData); err != nil {
return err.Trace(filename + ".old") return err
} }
} }
// Proceed to create or overwrite file. // Proceed to create or overwrite file.
jsonData, e := json.MarshalIndent(d.data, "", "\t") jsonData, err := json.MarshalIndent(d.data, "", "\t")
if e != nil { if err != nil {
return probe.NewError(e) return err
} }
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
@ -238,23 +233,22 @@ func (d config) Save(filename string) *probe.Error {
} }
// Save data. // Save data.
err := writeFile(filename, jsonData) return writeFile(filename, jsonData)
return err.Trace(filename)
} }
// Load - loads JSON config from file and merge with currently set values // Load - loads JSON config from file and merge with currently set values
func (d *config) Load(filename string) *probe.Error { func (d *config) Load(filename string) error {
d.lock.Lock() d.lock.Lock()
defer d.lock.Unlock() defer d.lock.Unlock()
_, e := os.Stat(filename) _, err := os.Stat(filename)
if e != nil { if err != nil {
return probe.NewError(e) return err
} }
fileData, e := ioutil.ReadFile(filename) fileData, err := ioutil.ReadFile(filename)
if e != nil { if err != nil {
return probe.NewError(e) return err
} }
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
@ -264,25 +258,25 @@ func (d *config) Load(filename string) *probe.Error {
st := structs.New(d.data) st := structs.New(d.data)
f, ok := st.FieldOk("Version") f, ok := st.FieldOk("Version")
if !ok { if !ok {
return probe.NewError(fmt.Errorf("Argument struct [%s] does not contain field \"Version\".", st.Name())) return fmt.Errorf("Argument struct [%s] does not contain field \"Version\".", st.Name())
} }
e = json.Unmarshal(fileData, d.data) err = json.Unmarshal(fileData, d.data)
if e != nil { if err != nil {
switch e := e.(type) { switch err := err.(type) {
case *json.SyntaxError: case *json.SyntaxError:
return probe.NewError(FormatJSONSyntaxError(bytes.NewReader(fileData), e)) return FormatJSONSyntaxError(bytes.NewReader(fileData), err)
default: default:
return probe.NewError(e) return err
} }
} }
if err := CheckData(d.data); err != nil { if err := CheckData(d.data); err != nil {
return err.Trace(filename) return err
} }
if (*d).Version() != f.Value() { if (*d).Version() != f.Value() {
return probe.NewError(fmt.Errorf("Version mismatch")) return fmt.Errorf("Version mismatch")
} }
return nil return nil
@ -294,11 +288,11 @@ func (d config) Data() interface{} {
} }
//Diff - list fields that are in A but not in B //Diff - list fields that are in A but not in B
func (d config) Diff(c Config) ([]structs.Field, *probe.Error) { func (d config) Diff(c Config) ([]structs.Field, error) {
var fields []structs.Field var fields []structs.Field
err := CheckData(c.Data()) err := CheckData(c.Data())
if err != nil { if err != nil {
return []structs.Field{}, err.Trace() return []structs.Field{}, err
} }
currFields := structs.Fields(d.Data()) currFields := structs.Fields(d.Data())
@ -320,11 +314,11 @@ func (d config) Diff(c Config) ([]structs.Field, *probe.Error) {
} }
//DeepDiff - list fields in A that are missing or not equal to fields in B //DeepDiff - list fields in A that are missing or not equal to fields in B
func (d config) DeepDiff(c Config) ([]structs.Field, *probe.Error) { func (d config) DeepDiff(c Config) ([]structs.Field, error) {
var fields []structs.Field var fields []structs.Field
err := CheckData(c.Data()) err := CheckData(c.Data())
if err != nil { if err != nil {
return []structs.Field{}, err.Trace() return []structs.Field{}, err
} }
currFields := structs.Fields(d.Data()) currFields := structs.Fields(d.Data())

View File

@ -78,7 +78,7 @@ func isDirExist(dirname string) (bool, error) {
} }
// Initialize a new storage disk. // Initialize a new storage disk.
func newFS(diskPath string) (StorageAPI, error) { func newPosix(diskPath string) (StorageAPI, error) {
if diskPath == "" { if diskPath == "" {
log.Debug("Disk cannot be empty") log.Debug("Disk cannot be empty")
return nil, errInvalidArgument return nil, errInvalidArgument

View File

@ -18,38 +18,30 @@ package main
import ( import (
"net/http" "net/http"
"path/filepath"
"strings"
router "github.com/gorilla/mux" router "github.com/gorilla/mux"
"github.com/minio/minio/pkg/probe"
) )
// newStorageAPI - initialize any storage API depending on the export path style. // newObjectLayer - initialize any object layer depending on the
func newStorageAPI(exportPaths ...string) (StorageAPI, error) { // number of export paths.
func newObjectLayer(exportPaths ...string) (ObjectLayer, error) {
if len(exportPaths) == 1 { if len(exportPaths) == 1 {
exportPath := exportPaths[0] exportPath := exportPaths[0]
if !strings.ContainsRune(exportPath, ':') || filepath.VolumeName(exportPath) != "" { // Initialize FS object layer.
// Initialize filesystem storage API. return newFSObjects(exportPath)
return newFS(exportPath)
}
// Initialize network storage API.
return newNetworkFS(exportPath)
} }
// Initialize XL storage API. // Initialize XL object layer.
return newXL(exportPaths...) return newXLObjects(exportPaths...)
} }
// configureServer handler returns final handler for the http server. // configureServer handler returns final handler for the http server.
func configureServerHandler(srvCmdConfig serverCmdConfig) http.Handler { func configureServerHandler(srvCmdConfig serverCmdConfig) http.Handler {
storageAPI, e := newStorageAPI(srvCmdConfig.exportPaths...) objAPI, err := newObjectLayer(srvCmdConfig.exportPaths...)
fatalIf(probe.NewError(e), "Initializing storage API failed.", nil) fatalIf(err, "Initializing object layer failed.", nil)
// Initialize object layer. // Initialize storage rpc server.
objAPI := newObjectLayer(storageAPI) storageRPC, err := newRPCServer(srvCmdConfig.exportPaths[0]) // FIXME: should only have one path.
fatalIf(err, "Initializing storage rpc server failed.", nil)
// Initialize storage rpc.
storageRPC := newStorageRPC(storageAPI)
// Initialize API. // Initialize API.
apiHandlers := objectAPIHandlers{ apiHandlers := objectAPIHandlers{

View File

@ -76,8 +76,8 @@ func toStorageErr(err error) error {
return err return err
} }
// Initialize new network file system. // Initialize new rpc client.
func newNetworkFS(networkPath string) (StorageAPI, error) { func newRPCClient(networkPath string) (StorageAPI, error) {
// Input validation. // Input validation.
if networkPath == "" || strings.LastIndex(networkPath, ":") == -1 { if networkPath == "" || strings.LastIndex(networkPath, ":") == -1 {
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{

View File

@ -119,10 +119,15 @@ func (s *storageServer) DeleteFileHandler(arg *DeleteFileArgs, reply *GenericRep
} }
// Initialize new storage rpc. // Initialize new storage rpc.
func newStorageRPC(storageAPI StorageAPI) *storageServer { func newRPCServer(exportPath string) (*storageServer, error) {
return &storageServer{ // Initialize posix storage API.
storage: storageAPI, storage, err := newPosix(exportPath)
if err != nil {
return nil, err
} }
return &storageServer{
storage: storage,
}, nil
} }
// registerStorageRPCRouter - register storage rpc router. // registerStorageRPCRouter - register storage rpc router.

View File

@ -31,7 +31,6 @@ import (
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/mc/pkg/console" "github.com/minio/mc/pkg/console"
"github.com/minio/minio/pkg/minhttp" "github.com/minio/minio/pkg/minhttp"
"github.com/minio/minio/pkg/probe"
) )
var serverCmd = cli.Command{ var serverCmd = cli.Command{
@ -89,11 +88,11 @@ func configureServer(srvCmdConfig serverCmdConfig) *http.Server {
// Configure TLS if certs are available. // Configure TLS if certs are available.
if isSSL() { if isSSL() {
var e error var err error
apiServer.TLSConfig = &tls.Config{} apiServer.TLSConfig = &tls.Config{}
apiServer.TLSConfig.Certificates = make([]tls.Certificate, 1) apiServer.TLSConfig.Certificates = make([]tls.Certificate, 1)
apiServer.TLSConfig.Certificates[0], e = tls.LoadX509KeyPair(mustGetCertFile(), mustGetKeyFile()) apiServer.TLSConfig.Certificates[0], err = tls.LoadX509KeyPair(mustGetCertFile(), mustGetKeyFile())
fatalIf(probe.NewError(e), "Unable to load certificates.", nil) fatalIf(err, "Unable to load certificates.", nil)
} }
// Returns configured HTTP server. // Returns configured HTTP server.
@ -102,16 +101,16 @@ func configureServer(srvCmdConfig serverCmdConfig) *http.Server {
// Print listen ips. // Print listen ips.
func printListenIPs(httpServerConf *http.Server) { func printListenIPs(httpServerConf *http.Server) {
host, port, e := net.SplitHostPort(httpServerConf.Addr) host, port, err := net.SplitHostPort(httpServerConf.Addr)
fatalIf(probe.NewError(e), "Unable to split host port.", nil) fatalIf(err, "Unable to split host port.", nil)
var hosts []string var hosts []string
switch { switch {
case host != "": case host != "":
hosts = append(hosts, host) hosts = append(hosts, host)
default: default:
addrs, e := net.InterfaceAddrs() addrs, err := net.InterfaceAddrs()
fatalIf(probe.NewError(e), "Unable to get interface address.", nil) fatalIf(err, "Unable to get interface address.", nil)
for _, addr := range addrs { for _, addr := range addrs {
if addr.Network() == "ip+net" { if addr.Network() == "ip+net" {
host := strings.Split(addr.String(), "/")[0] host := strings.Split(addr.String(), "/")[0]
@ -134,7 +133,7 @@ func printListenIPs(httpServerConf *http.Server) {
func initServerConfig(c *cli.Context) { func initServerConfig(c *cli.Context) {
// Save new config. // Save new config.
err := serverConfig.Save() err := serverConfig.Save()
fatalIf(err.Trace(), "Unable to save config.", nil) fatalIf(err, "Unable to save config.", nil)
// Fetch access keys from environment variables if any and update the config. // Fetch access keys from environment variables if any and update the config.
accessKey := os.Getenv("MINIO_ACCESS_KEY") accessKey := os.Getenv("MINIO_ACCESS_KEY")
@ -143,10 +142,10 @@ func initServerConfig(c *cli.Context) {
// Validate if both keys are specified and they are valid save them. // Validate if both keys are specified and they are valid save them.
if accessKey != "" && secretKey != "" { if accessKey != "" && secretKey != "" {
if !isValidAccessKey.MatchString(accessKey) { if !isValidAccessKey.MatchString(accessKey) {
fatalIf(probe.NewError(errInvalidArgument), "Access key does not have required length", nil) fatalIf(errInvalidArgument, "Access key does not have required length", nil)
} }
if !isValidSecretKey.MatchString(secretKey) { if !isValidSecretKey.MatchString(secretKey) {
fatalIf(probe.NewError(errInvalidArgument), "Secret key does not have required length", nil) fatalIf(errInvalidArgument, "Secret key does not have required length", nil)
} }
serverConfig.SetCredential(credential{ serverConfig.SetCredential(credential{
AccessKeyID: accessKey, AccessKeyID: accessKey,
@ -169,10 +168,10 @@ func checkServerSyntax(c *cli.Context) {
// Extract port number from address address should be of the form host:port. // Extract port number from address address should be of the form host:port.
func getPort(address string) int { func getPort(address string) int {
_, portStr, e := net.SplitHostPort(address) _, portStr, err := net.SplitHostPort(address)
fatalIf(probe.NewError(e), "Unable to split host port.", nil) fatalIf(err, "Unable to split host port.", nil)
portInt, e := strconv.Atoi(portStr) portInt, err := strconv.Atoi(portStr)
fatalIf(probe.NewError(e), "Invalid port number.", nil) fatalIf(err, "Invalid port number.", nil)
return portInt return portInt
} }
@ -186,11 +185,11 @@ func getPort(address string) int {
// on 127.0.0.1 even though minio server is running. So before we start // on 127.0.0.1 even though minio server is running. So before we start
// the minio server we make sure that the port is free on all the IPs. // the minio server we make sure that the port is free on all the IPs.
func checkPortAvailability(port int) { func checkPortAvailability(port int) {
isAddrInUse := func(e error) bool { isAddrInUse := func(err error) bool {
// Check if the syscall error is EADDRINUSE. // Check if the syscall error is EADDRINUSE.
// EADDRINUSE is the system call error if another process is // EADDRINUSE is the system call error if another process is
// already listening at the specified port. // already listening at the specified port.
neterr, ok := e.(*net.OpError) neterr, ok := err.(*net.OpError)
if !ok { if !ok {
return false return false
} }
@ -207,19 +206,19 @@ func checkPortAvailability(port int) {
} }
return true return true
} }
ifcs, e := net.Interfaces() ifcs, err := net.Interfaces()
if e != nil { if err != nil {
fatalIf(probe.NewError(e), "Unable to list interfaces.", nil) fatalIf(err, "Unable to list interfaces.", nil)
} }
for _, ifc := range ifcs { for _, ifc := range ifcs {
addrs, e := ifc.Addrs() addrs, err := ifc.Addrs()
if e != nil { if err != nil {
fatalIf(probe.NewError(e), fmt.Sprintf("Unable to list addresses on interface %s.", ifc.Name), nil) fatalIf(err, fmt.Sprintf("Unable to list addresses on interface %s.", ifc.Name), nil)
} }
for _, addr := range addrs { for _, addr := range addrs {
ipnet, ok := addr.(*net.IPNet) ipnet, ok := addr.(*net.IPNet)
if !ok { if !ok {
errorIf(probe.NewError(errors.New("")), "Interface type assertion to (*net.IPNet) failed.", nil) errorIf(errors.New(""), "Interface type assertion to (*net.IPNet) failed.", nil)
continue continue
} }
ip := ipnet.IP ip := ipnet.IP
@ -228,19 +227,18 @@ func checkPortAvailability(port int) {
network = "tcp6" network = "tcp6"
} }
tcpAddr := net.TCPAddr{IP: ip, Port: port, Zone: ifc.Name} tcpAddr := net.TCPAddr{IP: ip, Port: port, Zone: ifc.Name}
l, e := net.ListenTCP(network, &tcpAddr) l, err := net.ListenTCP(network, &tcpAddr)
if e != nil { if err != nil {
if isAddrInUse(e) { if isAddrInUse(err) {
// Fail if port is already in use. // Fail if port is already in use.
fatalIf(probe.NewError(e), fmt.Sprintf("Unable to listen on IP %s, port %.d", tcpAddr.IP, tcpAddr.Port), nil) fatalIf(err, fmt.Sprintf("Unable to listen on IP %s, port %.d", tcpAddr.IP, tcpAddr.Port), nil)
} else { } else {
// Ignore other errors. // Ignore other errors.
continue continue
} }
} }
e = l.Close() if err = l.Close(); err != nil {
if e != nil { fatalIf(err, fmt.Sprintf("Unable to close listener on IP %s, port %.d", tcpAddr.IP, tcpAddr.Port), nil)
fatalIf(probe.NewError(e), fmt.Sprintf("Unable to close listener on IP %s, port %.d", tcpAddr.IP, tcpAddr.Port), nil)
} }
} }
} }
@ -308,5 +306,5 @@ func serverMain(c *cli.Context) {
// Start server. // Start server.
err := minhttp.ListenAndServe(apiServer) err := minhttp.ListenAndServe(apiServer)
errorIf(err.Trace(), "Failed to start the minio server.", nil) errorIf(err.Cause, "Failed to start the minio server.", nil)
} }

View File

@ -21,7 +21,6 @@ import (
"time" "time"
jwtgo "github.com/dgrijalva/jwt-go" jwtgo "github.com/dgrijalva/jwt-go"
"github.com/minio/minio/pkg/probe"
"golang.org/x/crypto/bcrypt" "golang.org/x/crypto/bcrypt"
) )
@ -49,17 +48,13 @@ func initJWT() *JWT {
} }
// GenerateToken - generates a new Json Web Token based on the incoming user id. // GenerateToken - generates a new Json Web Token based on the incoming user id.
func (jwt *JWT) GenerateToken(userName string) (string, *probe.Error) { func (jwt *JWT) GenerateToken(userName string) (string, error) {
token := jwtgo.New(jwtgo.SigningMethodHS512) token := jwtgo.New(jwtgo.SigningMethodHS512)
// Token expires in 10hrs. // Token expires in 10hrs.
token.Claims["exp"] = time.Now().Add(time.Hour * tokenExpires).Unix() token.Claims["exp"] = time.Now().Add(time.Hour * tokenExpires).Unix()
token.Claims["iat"] = time.Now().Unix() token.Claims["iat"] = time.Now().Unix()
token.Claims["sub"] = userName token.Claims["sub"] = userName
tokenString, e := token.SignedString([]byte(jwt.SecretAccessKey)) return token.SignedString([]byte(jwt.SecretAccessKey))
if e != nil {
return "", probe.NewError(e)
}
return tokenString, nil
} }
// Authenticate - authenticates incoming username and password. // Authenticate - authenticates incoming username and password.

View File

@ -23,8 +23,6 @@ import (
"reflect" "reflect"
"strings" "strings"
"time" "time"
"github.com/minio/minio/pkg/probe"
) )
// toString - Safely convert interface to string without causing panic. // toString - Safely convert interface to string without causing panic.
@ -70,7 +68,7 @@ type PostPolicyForm struct {
} }
// parsePostPolicyFormV4 - Parse JSON policy string into typed POostPolicyForm structure. // parsePostPolicyFormV4 - Parse JSON policy string into typed POostPolicyForm structure.
func parsePostPolicyFormV4(policy string) (PostPolicyForm, *probe.Error) { func parsePostPolicyFormV4(policy string) (PostPolicyForm, error) {
// Convert po into interfaces and // Convert po into interfaces and
// perform strict type conversion using reflection. // perform strict type conversion using reflection.
var rawPolicy struct { var rawPolicy struct {
@ -78,17 +76,17 @@ func parsePostPolicyFormV4(policy string) (PostPolicyForm, *probe.Error) {
Conditions []interface{} `json:"conditions"` Conditions []interface{} `json:"conditions"`
} }
e := json.Unmarshal([]byte(policy), &rawPolicy) err := json.Unmarshal([]byte(policy), &rawPolicy)
if e != nil { if err != nil {
return PostPolicyForm{}, probe.NewError(e) return PostPolicyForm{}, err
} }
parsedPolicy := PostPolicyForm{} parsedPolicy := PostPolicyForm{}
// Parse expiry time. // Parse expiry time.
parsedPolicy.Expiration, e = time.Parse(time.RFC3339Nano, rawPolicy.Expiration) parsedPolicy.Expiration, err = time.Parse(time.RFC3339Nano, rawPolicy.Expiration)
if e != nil { if err != nil {
return PostPolicyForm{}, probe.NewError(e) return PostPolicyForm{}, err
} }
parsedPolicy.Conditions.Policies = make(map[string]struct { parsedPolicy.Conditions.Policies = make(map[string]struct {
Operator string Operator string
@ -102,8 +100,7 @@ func parsePostPolicyFormV4(policy string) (PostPolicyForm, *probe.Error) {
for k, v := range condt { for k, v := range condt {
if !isString(v) { // Pre-check value type. if !isString(v) { // Pre-check value type.
// All values must be of type string. // All values must be of type string.
return parsedPolicy, probe.NewError(fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form.", return parsedPolicy, fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form.", reflect.TypeOf(condt).String(), condt)
reflect.TypeOf(condt).String(), condt))
} }
// {"acl": "public-read" } is an alternate way to indicate - [ "eq", "$acl", "public-read" ] // {"acl": "public-read" } is an alternate way to indicate - [ "eq", "$acl", "public-read" ]
// In this case we will just collapse this into "eq" for all use cases. // In this case we will just collapse this into "eq" for all use cases.
@ -117,16 +114,14 @@ func parsePostPolicyFormV4(policy string) (PostPolicyForm, *probe.Error) {
} }
case []interface{}: // Handle array types. case []interface{}: // Handle array types.
if len(condt) != 3 { // Return error if we have insufficient elements. if len(condt) != 3 { // Return error if we have insufficient elements.
return parsedPolicy, probe.NewError(fmt.Errorf("Malformed conditional fields %s of type %s found in POST policy form.", return parsedPolicy, fmt.Errorf("Malformed conditional fields %s of type %s found in POST policy form.", condt, reflect.TypeOf(condt).String())
condt, reflect.TypeOf(condt).String()))
} }
switch toString(condt[0]) { switch toString(condt[0]) {
case "eq", "starts-with": case "eq", "starts-with":
for _, v := range condt { // Pre-check all values for type. for _, v := range condt { // Pre-check all values for type.
if !isString(v) { if !isString(v) {
// All values must be of type string. // All values must be of type string.
return parsedPolicy, probe.NewError(fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form.", return parsedPolicy, fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form.", reflect.TypeOf(condt).String(), condt)
reflect.TypeOf(condt).String(), condt))
} }
} }
operator, matchType, value := toString(condt[0]), toString(condt[1]), toString(condt[2]) operator, matchType, value := toString(condt[0]), toString(condt[1]), toString(condt[2])
@ -147,12 +142,10 @@ func parsePostPolicyFormV4(policy string) (PostPolicyForm, *probe.Error) {
} }
default: default:
// Condition should be valid. // Condition should be valid.
return parsedPolicy, probe.NewError(fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form.", return parsedPolicy, fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form.", reflect.TypeOf(condt).String(), condt)
reflect.TypeOf(condt).String(), condt))
} }
default: default:
return parsedPolicy, probe.NewError(fmt.Errorf("Unknown field %s of type %s found in POST policy form.", return parsedPolicy, fmt.Errorf("Unknown field %s of type %s found in POST policy form.", condt, reflect.TypeOf(condt).String())
condt, reflect.TypeOf(condt).String()))
} }
} }
return parsedPolicy, nil return parsedPolicy, nil

View File

@ -28,7 +28,6 @@ import (
"github.com/fatih/color" "github.com/fatih/color"
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/mc/pkg/console" "github.com/minio/mc/pkg/console"
"github.com/minio/minio/pkg/probe"
) )
// command specific flags. // command specific flags.
@ -90,7 +89,7 @@ func (u updateMessage) String() string {
return updateMessage("You are already running the most recent version of minio.") return updateMessage("You are already running the most recent version of minio.")
} }
msg, err := colorizeUpdateMessage(u.Download) msg, err := colorizeUpdateMessage(u.Download)
fatalIf(err.Trace(msg), "Unable to colorize experimental update notification string "+msg+".", nil) fatalIf(err, "Unable to colorize experimental update notification string "+msg+".", nil)
return msg return msg
} }
@ -98,38 +97,38 @@ func (u updateMessage) String() string {
func (u updateMessage) JSON() string { func (u updateMessage) JSON() string {
u.Status = "success" u.Status = "success"
updateMessageJSONBytes, err := json.Marshal(u) updateMessageJSONBytes, err := json.Marshal(u)
fatalIf(probe.NewError(err), "Unable to marshal into JSON.", nil) fatalIf((err), "Unable to marshal into JSON.", nil)
return string(updateMessageJSONBytes) return string(updateMessageJSONBytes)
} }
func parseReleaseData(data string) (time.Time, *probe.Error) { func parseReleaseData(data string) (time.Time, error) {
releaseStr := strings.Fields(data) releaseStr := strings.Fields(data)
if len(releaseStr) < 2 { if len(releaseStr) < 2 {
return time.Time{}, probe.NewError(errors.New("Update data malformed")) return time.Time{}, errors.New("Update data malformed")
} }
releaseDate := releaseStr[1] releaseDate := releaseStr[1]
releaseDateSplits := strings.SplitN(releaseDate, ".", 3) releaseDateSplits := strings.SplitN(releaseDate, ".", 3)
if len(releaseDateSplits) < 3 { if len(releaseDateSplits) < 3 {
return time.Time{}, probe.NewError(errors.New("Update data malformed")) return time.Time{}, (errors.New("Update data malformed"))
} }
if releaseDateSplits[0] != "minio" { if releaseDateSplits[0] != "minio" {
return time.Time{}, probe.NewError(errors.New("Update data malformed, missing minio tag")) return time.Time{}, (errors.New("Update data malformed, missing minio tag"))
} }
// "OFFICIAL" tag is still kept for backward compatibility, we should remove this for the next release. // "OFFICIAL" tag is still kept for backward compatibility, we should remove this for the next release.
if releaseDateSplits[1] != "RELEASE" && releaseDateSplits[1] != "OFFICIAL" { if releaseDateSplits[1] != "RELEASE" && releaseDateSplits[1] != "OFFICIAL" {
return time.Time{}, probe.NewError(errors.New("Update data malformed, missing RELEASE tag")) return time.Time{}, (errors.New("Update data malformed, missing RELEASE tag"))
} }
dateSplits := strings.SplitN(releaseDateSplits[2], "T", 2) dateSplits := strings.SplitN(releaseDateSplits[2], "T", 2)
if len(dateSplits) < 2 { if len(dateSplits) < 2 {
return time.Time{}, probe.NewError(errors.New("Update data malformed, not in modified RFC3359 form")) return time.Time{}, (errors.New("Update data malformed, not in modified RFC3359 form"))
} }
dateSplits[1] = strings.Replace(dateSplits[1], "-", ":", -1) dateSplits[1] = strings.Replace(dateSplits[1], "-", ":", -1)
date := strings.Join(dateSplits, "T") date := strings.Join(dateSplits, "T")
parsedDate, e := time.Parse(time.RFC3339, date) parsedDate, err := time.Parse(time.RFC3339, date)
if e != nil { if err != nil {
return time.Time{}, probe.NewError(e) return time.Time{}, err
} }
return parsedDate, nil return parsedDate, nil
} }
@ -159,32 +158,32 @@ func getReleaseUpdate(updateURL string, noError bool) updateMessage {
// Instantiate a new client with 1 sec timeout. // Instantiate a new client with 1 sec timeout.
client := &http.Client{ client := &http.Client{
Timeout: 500 * time.Millisecond, Timeout: 1 * time.Millisecond,
} }
// Fetch new update. // Fetch new update.
data, e := client.Get(newUpdateURL) data, err := client.Get(newUpdateURL)
if e != nil && noError { if err != nil && noError {
return updateMsg return updateMsg
} }
fatalIf(probe.NewError(e), "Unable to read from update URL "+newUpdateURL+".", nil) fatalIf((err), "Unable to read from update URL "+newUpdateURL+".", nil)
// Error out if 'update' command is issued for development based builds. // Error out if 'update' command is issued for development based builds.
if minioVersion == "DEVELOPMENT.GOGET" && !noError { if minioVersion == "DEVELOPMENT.GOGET" && !noError {
fatalIf(probe.NewError(errors.New("")), fatalIf((errors.New("")),
"Update mechanism is not supported for go get based binary builds. Please download official releases from https://minio.io/#minio", nil) "Update mechanism is not supported for go get based binary builds. Please download official releases from https://minio.io/#minio", nil)
} }
// Parse current minio version into RFC3339. // Parse current minio version into RFC3339.
current, e := time.Parse(time.RFC3339, minioVersion) current, err := time.Parse(time.RFC3339, minioVersion)
if e != nil && noError { if err != nil && noError {
return updateMsg return updateMsg
} }
fatalIf(probe.NewError(e), "Unable to parse version string as time.", nil) fatalIf((err), "Unable to parse version string as time.", nil)
// Verify if current minio version is zero. // Verify if current minio version is zero.
if current.IsZero() && !noError { if current.IsZero() && !noError {
fatalIf(probe.NewError(errors.New("")), fatalIf((errors.New("")),
"Updates not supported for custom builds. Version field is empty. Please download official releases from https://minio.io/#minio", nil) "Updates not supported for custom builds. Version field is empty. Please download official releases from https://minio.io/#minio", nil)
} }
@ -195,27 +194,27 @@ func getReleaseUpdate(updateURL string, noError bool) updateMessage {
if noError { if noError {
return updateMsg return updateMsg
} }
fatalIf(probe.NewError(errors.New("")), "Update server responsed with "+data.Status, nil) fatalIf((errors.New("")), "Update server responsed with "+data.Status, nil)
} }
} }
// Read the response body. // Read the response body.
updateBody, e := ioutil.ReadAll(data.Body) updateBody, err := ioutil.ReadAll(data.Body)
if e != nil && noError { if err != nil && noError {
return updateMsg return updateMsg
} }
fatalIf(probe.NewError(e), "Fetching updates failed. Please try again.", nil) fatalIf((err), "Fetching updates failed. Please try again.", nil)
// Parse the date if its valid. // Parse the date if its valid.
latest, err := parseReleaseData(string(updateBody)) latest, err := parseReleaseData(string(updateBody))
if err != nil && noError { if err != nil && noError {
return updateMsg return updateMsg
} }
fatalIf(err.Trace(updateURL), "Please report this issue at https://github.com/minio/minio/issues.", nil) fatalIf(err, "Please report this issue at https://github.com/minio/minio/issues.", nil)
// Verify if the date is not zero. // Verify if the date is not zero.
if latest.IsZero() && !noError { if latest.IsZero() && !noError {
fatalIf(probe.NewError(errors.New("")), fatalIf((errors.New("")),
"Unable to validate any update available at this time. Please open an issue at https://github.com/minio/minio/issues", nil) "Unable to validate any update available at this time. Please open an issue at https://github.com/minio/minio/issues", nil)
} }

View File

@ -19,7 +19,6 @@ package main
import ( import (
"encoding/base64" "encoding/base64"
"encoding/xml" "encoding/xml"
"github.com/minio/minio/pkg/probe"
"io" "io"
"strings" "strings"
) )
@ -31,12 +30,8 @@ func xmlDecoder(body io.Reader, v interface{}) error {
} }
// checkValidMD5 - verify if valid md5, returns md5 in bytes. // checkValidMD5 - verify if valid md5, returns md5 in bytes.
func checkValidMD5(md5 string) ([]byte, *probe.Error) { func checkValidMD5(md5 string) ([]byte, error) {
md5Bytes, e := base64.StdEncoding.DecodeString(strings.TrimSpace(md5)) return base64.StdEncoding.DecodeString(strings.TrimSpace(md5))
if e != nil {
return nil, probe.NewError(e)
}
return md5Bytes, nil
} }
/// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html /// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
@ -56,6 +51,5 @@ func contains(stringList []string, element string) bool {
return true return true
} }
} }
return false return false
} }

View File

@ -109,9 +109,8 @@ func (web *webAPIHandlers) MakeBucket(r *http.Request, args *MakeBucketArgs, rep
return &json2.Error{Message: "Unauthorized request"} return &json2.Error{Message: "Unauthorized request"}
} }
reply.UIVersion = miniobrowser.UIVersion reply.UIVersion = miniobrowser.UIVersion
e := web.ObjectAPI.MakeBucket(args.BucketName) if err := web.ObjectAPI.MakeBucket(args.BucketName); err != nil {
if e != nil { return &json2.Error{Message: err.Error()}
return &json2.Error{Message: e.Cause.Error()}
} }
return nil return nil
} }
@ -139,9 +138,9 @@ func (web *webAPIHandlers) ListBuckets(r *http.Request, args *WebGenericArgs, re
if !isJWTReqAuthenticated(r) { if !isJWTReqAuthenticated(r) {
return &json2.Error{Message: "Unauthorized request"} return &json2.Error{Message: "Unauthorized request"}
} }
buckets, e := web.ObjectAPI.ListBuckets() buckets, err := web.ObjectAPI.ListBuckets()
if e != nil { if err != nil {
return &json2.Error{Message: e.Cause.Error()} return &json2.Error{Message: err.Error()}
} }
for _, bucket := range buckets { for _, bucket := range buckets {
// List all buckets which are not private. // List all buckets which are not private.
@ -191,7 +190,7 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r
for { for {
lo, err := web.ObjectAPI.ListObjects(args.BucketName, args.Prefix, marker, "/", 1000) lo, err := web.ObjectAPI.ListObjects(args.BucketName, args.Prefix, marker, "/", 1000)
if err != nil { if err != nil {
return &json2.Error{Message: err.Cause.Error()} return &json2.Error{Message: err.Error()}
} }
marker = lo.NextMarker marker = lo.NextMarker
for _, obj := range lo.Objects { for _, obj := range lo.Objects {
@ -227,9 +226,8 @@ func (web *webAPIHandlers) RemoveObject(r *http.Request, args *RemoveObjectArgs,
return &json2.Error{Message: "Unauthorized request"} return &json2.Error{Message: "Unauthorized request"}
} }
reply.UIVersion = miniobrowser.UIVersion reply.UIVersion = miniobrowser.UIVersion
e := web.ObjectAPI.DeleteObject(args.BucketName, args.ObjectName) if err := web.ObjectAPI.DeleteObject(args.BucketName, args.ObjectName); err != nil {
if e != nil { return &json2.Error{Message: err.Error()}
return &json2.Error{Message: e.Cause.Error()}
} }
return nil return nil
} }
@ -252,7 +250,7 @@ func (web *webAPIHandlers) Login(r *http.Request, args *LoginArgs, reply *LoginR
if jwt.Authenticate(args.Username, args.Password) { if jwt.Authenticate(args.Username, args.Password) {
token, err := jwt.GenerateToken(args.Username) token, err := jwt.GenerateToken(args.Username)
if err != nil { if err != nil {
return &json2.Error{Message: err.Cause.Error(), Data: err.String()} return &json2.Error{Message: err.Error()}
} }
reply.Token = token reply.Token = token
reply.UIVersion = miniobrowser.UIVersion reply.UIVersion = miniobrowser.UIVersion
@ -305,7 +303,7 @@ func (web *webAPIHandlers) SetAuth(r *http.Request, args *SetAuthArgs, reply *Se
cred := credential{args.AccessKey, args.SecretKey} cred := credential{args.AccessKey, args.SecretKey}
serverConfig.SetCredential(cred) serverConfig.SetCredential(cred)
if err := serverConfig.Save(); err != nil { if err := serverConfig.Save(); err != nil {
return &json2.Error{Message: err.Cause.Error()} return &json2.Error{Message: err.Error()}
} }
jwt := initJWT() jwt := initJWT()
@ -314,7 +312,7 @@ func (web *webAPIHandlers) SetAuth(r *http.Request, args *SetAuthArgs, reply *Se
} }
token, err := jwt.GenerateToken(args.AccessKey) token, err := jwt.GenerateToken(args.AccessKey)
if err != nil { if err != nil {
return &json2.Error{Message: err.Cause.Error()} return &json2.Error{Message: err.Error()}
} }
reply.Token = token reply.Token = token
reply.UIVersion = miniobrowser.UIVersion reply.UIVersion = miniobrowser.UIVersion
@ -350,7 +348,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
bucket := vars["bucket"] bucket := vars["bucket"]
object := vars["object"] object := vars["object"]
if _, err := web.ObjectAPI.PutObject(bucket, object, -1, r.Body, nil); err != nil { if _, err := web.ObjectAPI.PutObject(bucket, object, -1, r.Body, nil); err != nil {
writeWebErrorResponse(w, err.ToGoError()) writeWebErrorResponse(w, err)
} }
} }
@ -377,10 +375,10 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) {
objReader, err := web.ObjectAPI.GetObject(bucket, object, 0) objReader, err := web.ObjectAPI.GetObject(bucket, object, 0)
if err != nil { if err != nil {
writeWebErrorResponse(w, err.ToGoError()) writeWebErrorResponse(w, err)
return return
} }
if _, e := io.Copy(w, objReader); e != nil { if _, err := io.Copy(w, objReader); err != nil {
/// No need to print error, response writer already written to. /// No need to print error, response writer already written to.
return return
} }

View File

@ -30,7 +30,7 @@ import (
// webAPI container for Web API. // webAPI container for Web API.
type webAPIHandlers struct { type webAPIHandlers struct {
ObjectAPI objectAPI ObjectAPI ObjectLayer
} }
// indexHandler - Handler to serve index.html // indexHandler - Handler to serve index.html

View File

@ -83,7 +83,7 @@ func newXL(disks ...string) (StorageAPI, error) {
storageDisks := make([]StorageAPI, len(disks)) storageDisks := make([]StorageAPI, len(disks))
for index, disk := range disks { for index, disk := range disks {
var err error var err error
storageDisks[index], err = newFS(disk) storageDisks[index], err = newPosix(disk)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -573,6 +573,7 @@ func (xl XL) DeleteFile(volume, path string) error {
// RenameFile - rename file. // RenameFile - rename file.
func (xl XL) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) error { func (xl XL) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) error {
// Validate inputs.
if !isValidVolname(srcVolume) { if !isValidVolname(srcVolume) {
return errInvalidArgument return errInvalidArgument
} }
@ -587,6 +588,12 @@ func (xl XL) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) error {
} }
for _, disk := range xl.storageDisks { for _, disk := range xl.storageDisks {
if err := disk.RenameFile(srcVolume, srcPath, dstVolume, dstPath); err != nil { if err := disk.RenameFile(srcVolume, srcPath, dstVolume, dstPath); err != nil {
log.WithFields(logrus.Fields{
"srcVolume": srcVolume,
"srcPath": srcPath,
"dstVolume": dstVolume,
"dstPath": dstPath,
}).Errorf("RenameFile failed with %s", err)
return err return err
} }
} }

531
xl-objects-multipart.go Normal file
View File

@ -0,0 +1,531 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"crypto/md5"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"path"
"strconv"
"strings"
"github.com/Sirupsen/logrus"
"github.com/skyrings/skyring-common/tools/uuid"
)
// listLeafEntries - lists all entries if a given prefixPath is a leaf
// directory, returns error if any - returns empty list if prefixPath
// is not a leaf directory.
func (xl xlObjects) listLeafEntries(prefixPath string) (entries []FileInfo, e error) {
var markerPath string
for {
fileInfos, eof, err := xl.storage.ListFiles(minioMetaVolume, prefixPath, markerPath, false, 1000)
if err != nil {
log.WithFields(logrus.Fields{
"prefixPath": prefixPath,
"markerPath": markerPath,
}).Errorf("%s", err)
return nil, err
}
for _, fileInfo := range fileInfos {
// Set marker for next batch of ListFiles.
markerPath = fileInfo.Name
if fileInfo.Mode.IsDir() {
// If a directory is found, doesn't return anything.
return nil, nil
}
fileName := path.Base(fileInfo.Name)
if !strings.Contains(fileName, ".") {
// Skip the entry if it is of the pattern bucket/object/uploadID.partNum.md5sum
// and retain entries of the pattern bucket/object/uploadID
entries = append(entries, fileInfo)
}
}
if eof {
break
}
}
return entries, nil
}
// listMetaVolumeFiles - list all files at a given prefix inside minioMetaVolume.
func (xl xlObjects) listMetaVolumeFiles(prefixPath string, markerPath string, recursive bool, maxKeys int) (allFileInfos []FileInfo, eof bool, err error) {
// newMaxKeys tracks the size of entries which are going to be
// returned back.
var newMaxKeys int
// Following loop gathers and filters out special files inside
// minio meta volume.
for {
var fileInfos []FileInfo
// List files up to maxKeys-newMaxKeys, since we are skipping entries for special files.
fileInfos, eof, err = xl.storage.ListFiles(minioMetaVolume, prefixPath, markerPath, recursive, maxKeys-newMaxKeys)
if err != nil {
log.WithFields(logrus.Fields{
"prefixPath": prefixPath,
"markerPath": markerPath,
"recursive": recursive,
"maxKeys": maxKeys,
}).Errorf("%s", err)
return nil, true, err
}
// Loop through and validate individual file.
for _, fi := range fileInfos {
var entries []FileInfo
if fi.Mode.IsDir() {
// List all the entries if fi.Name is a leaf directory, if
// fi.Name is not a leaf directory then the resulting
// entries are empty.
entries, err = xl.listLeafEntries(fi.Name)
if err != nil {
log.WithFields(logrus.Fields{
"prefixPath": fi.Name,
}).Errorf("%s", err)
return nil, false, err
}
}
// Set markerPath for next batch of listing.
markerPath = fi.Name
if len(entries) > 0 {
// We reach here for non-recursive case and a leaf entry.
for _, entry := range entries {
allFileInfos = append(allFileInfos, entry)
newMaxKeys++
// If we have reached the maxKeys, it means we have listed
// everything that was requested. Return right here.
if newMaxKeys == maxKeys {
// Return values:
// allFileInfos : "maxKeys" number of entries.
// eof : eof returned by xl.storage.ListFiles()
// error : nil
return
}
}
} else {
// We reach here for a non-recursive case non-leaf entry
// OR recursive case with fi.Name matching pattern bucket/object/uploadID[.partNum.md5sum]
if !fi.Mode.IsDir() { // Do not skip non-recursive case directory entries.
// Skip files matching pattern bucket/object/uploadID.partNum.md5sum
// and retain files matching pattern bucket/object/uploadID
specialFile := path.Base(fi.Name)
if strings.Contains(specialFile, ".") {
// Contains partnumber and md5sum info, skip this.
continue
}
}
}
allFileInfos = append(allFileInfos, fi)
newMaxKeys++
// If we have reached the maxKeys, it means we have listed
// everything that was requested. Return right here.
if newMaxKeys == maxKeys {
// Return values:
// allFileInfos : "maxKeys" number of entries.
// eof : eof returned by xl.storage.ListFiles()
// error : nil
return
}
}
// If we have reached eof then we break out.
if eof {
break
}
}
// Return entries here.
return allFileInfos, eof, nil
}
// ListMultipartUploads - list multipart uploads.
func (xl xlObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
result := ListMultipartsInfo{}
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return ListMultipartsInfo{}, (BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectPrefix(prefix) {
return ListMultipartsInfo{}, (ObjectNameInvalid{Bucket: bucket, Object: prefix})
}
// Verify if delimiter is anything other than '/', which we do not support.
if delimiter != "" && delimiter != slashSeparator {
return ListMultipartsInfo{}, (UnsupportedDelimiter{
Delimiter: delimiter,
})
}
// Verify if marker has prefix.
if keyMarker != "" && !strings.HasPrefix(keyMarker, prefix) {
return ListMultipartsInfo{}, (InvalidMarkerPrefixCombination{
Marker: keyMarker,
Prefix: prefix,
})
}
if uploadIDMarker != "" {
if strings.HasSuffix(keyMarker, slashSeparator) {
return result, (InvalidUploadIDKeyCombination{
UploadIDMarker: uploadIDMarker,
KeyMarker: keyMarker,
})
}
id, err := uuid.Parse(uploadIDMarker)
if err != nil {
return result, err
}
if id.IsZero() {
return result, (MalformedUploadID{
UploadID: uploadIDMarker,
})
}
}
recursive := true
if delimiter == slashSeparator {
recursive = false
}
result.IsTruncated = true
result.MaxUploads = maxUploads
// Not using path.Join() as it strips off the trailing '/'.
// Also bucket should always be followed by '/' even if prefix is empty.
prefixPath := pathJoin(bucket, prefix)
keyMarkerPath := ""
if keyMarker != "" {
keyMarkerPath = pathJoin(pathJoin(bucket, keyMarker), uploadIDMarker)
}
// List all the multipart files at prefixPath, starting with marker keyMarkerPath.
fileInfos, eof, err := xl.listMetaVolumeFiles(prefixPath, keyMarkerPath, recursive, maxUploads)
if err != nil {
log.WithFields(logrus.Fields{
"prefixPath": prefixPath,
"markerPath": keyMarkerPath,
"recursive": recursive,
"maxUploads": maxUploads,
}).Errorf("listMetaVolumeFiles failed with %s", err)
return ListMultipartsInfo{}, err
}
// Loop through all the received files fill in the multiparts result.
for _, fi := range fileInfos {
var objectName string
var uploadID string
if fi.Mode.IsDir() {
// All directory entries are common prefixes.
uploadID = "" // Upload ids are empty for CommonPrefixes.
objectName = strings.TrimPrefix(fi.Name, retainSlash(bucket))
result.CommonPrefixes = append(result.CommonPrefixes, objectName)
} else {
uploadID = path.Base(fi.Name)
objectName = strings.TrimPrefix(path.Dir(fi.Name), retainSlash(bucket))
result.Uploads = append(result.Uploads, uploadMetadata{
Object: objectName,
UploadID: uploadID,
Initiated: fi.ModTime,
})
}
result.NextKeyMarker = objectName
result.NextUploadIDMarker = uploadID
}
result.IsTruncated = !eof
if !result.IsTruncated {
result.NextKeyMarker = ""
result.NextUploadIDMarker = ""
}
return result, nil
}
func (xl xlObjects) NewMultipartUpload(bucket, object string) (string, error) {
// Verify if bucket name is valid.
if !IsValidBucketName(bucket) {
return "", (BucketNameInvalid{Bucket: bucket})
}
// Verify if object name is valid.
if !IsValidObjectName(object) {
return "", ObjectNameInvalid{Bucket: bucket, Object: object}
}
// Verify whether the bucket exists.
if isExist, err := xl.isBucketExist(bucket); err != nil {
return "", err
} else if !isExist {
return "", BucketNotFound{Bucket: bucket}
}
if _, err := xl.storage.StatVol(minioMetaVolume); err != nil {
if err == errVolumeNotFound {
err = xl.storage.MakeVol(minioMetaVolume)
if err != nil {
return "", toObjectErr(err)
}
}
}
for {
uuid, err := uuid.New()
if err != nil {
return "", err
}
uploadID := uuid.String()
uploadIDPath := path.Join(bucket, object, uploadID)
if _, err = xl.storage.StatFile(minioMetaVolume, uploadIDPath); err != nil {
if err != errFileNotFound {
return "", (toObjectErr(err, minioMetaVolume, uploadIDPath))
}
// uploadIDPath doesn't exist, so create empty file to reserve the name
var w io.WriteCloser
if w, err = xl.storage.CreateFile(minioMetaVolume, uploadIDPath); err == nil {
// Close the writer.
if err = w.Close(); err != nil {
return "", err
}
} else {
return "", toObjectErr(err, minioMetaVolume, uploadIDPath)
}
return uploadID, nil
}
// uploadIDPath already exists.
// loop again to try with different uuid generated.
}
}
// isUploadIDExists - verify if a given uploadID exists and is valid.
func (xl xlObjects) isUploadIDExists(bucket, object, uploadID string) (bool, error) {
uploadIDPath := path.Join(bucket, object, uploadID)
st, err := xl.storage.StatFile(minioMetaVolume, uploadIDPath)
if err != nil {
// Upload id does not exist.
if err == errFileNotFound {
return false, nil
}
return false, err
}
// Upload id exists and is a regular file.
return st.Mode.IsRegular(), nil
}
// PutObjectPart - writes the multipart upload chunks.
func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return "", BucketNameInvalid{Bucket: bucket}
}
if !IsValidObjectName(object) {
return "", ObjectNameInvalid{Bucket: bucket, Object: object}
}
// Verify whether the bucket exists.
if isExist, err := xl.isBucketExist(bucket); err != nil {
return "", err
} else if !isExist {
return "", BucketNotFound{Bucket: bucket}
}
if status, err := xl.isUploadIDExists(bucket, object, uploadID); err != nil {
return "", err
} else if !status {
return "", InvalidUploadID{UploadID: uploadID}
}
partSuffix := fmt.Sprintf("%s.%d.%s", uploadID, partID, md5Hex)
fileWriter, err := xl.storage.CreateFile(minioMetaVolume, path.Join(bucket, object, partSuffix))
if err != nil {
return "", toObjectErr(err, bucket, object)
}
// Initialize md5 writer.
md5Writer := md5.New()
// Instantiate a new multi writer.
multiWriter := io.MultiWriter(md5Writer, fileWriter)
// Instantiate checksum hashers and create a multiwriter.
if size > 0 {
if _, err = io.CopyN(multiWriter, data, size); err != nil {
safeCloseAndRemove(fileWriter)
return "", (toObjectErr(err))
}
// Reader shouldn't have more data what mentioned in size argument.
// reading one more byte from the reader to validate it.
// expected to fail, success validates existence of more data in the reader.
if _, err = io.CopyN(ioutil.Discard, data, 1); err == nil {
safeCloseAndRemove(fileWriter)
return "", (UnExpectedDataSize{Size: int(size)})
}
} else {
if _, err = io.Copy(multiWriter, data); err != nil {
safeCloseAndRemove(fileWriter)
return "", (toObjectErr(err))
}
}
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
if md5Hex != "" {
if newMD5Hex != md5Hex {
safeCloseAndRemove(fileWriter)
return "", (BadDigest{md5Hex, newMD5Hex})
}
}
err = fileWriter.Close()
if err != nil {
return "", err
}
return newMD5Hex, nil
}
func (xl xlObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return ListPartsInfo{}, (BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(object) {
return ListPartsInfo{}, (ObjectNameInvalid{Bucket: bucket, Object: object})
}
if status, err := xl.isUploadIDExists(bucket, object, uploadID); err != nil {
return ListPartsInfo{}, err
} else if !status {
return ListPartsInfo{}, (InvalidUploadID{UploadID: uploadID})
}
result := ListPartsInfo{}
var markerPath string
nextPartNumberMarker := 0
uploadIDPath := path.Join(bucket, object, uploadID)
// Figure out the marker for the next subsequent calls, if the
// partNumberMarker is already set.
if partNumberMarker > 0 {
partNumberMarkerPath := uploadIDPath + "." + strconv.Itoa(partNumberMarker) + "."
fileInfos, _, err := xl.storage.ListFiles(minioMetaVolume, partNumberMarkerPath, "", false, 1)
if err != nil {
return result, toObjectErr(err, minioMetaVolume, partNumberMarkerPath)
}
if len(fileInfos) == 0 {
return result, (InvalidPart{})
}
markerPath = fileInfos[0].Name
}
uploadIDPrefix := uploadIDPath + "."
fileInfos, eof, err := xl.storage.ListFiles(minioMetaVolume, uploadIDPrefix, markerPath, false, maxParts)
if err != nil {
return result, InvalidPart{}
}
for _, fileInfo := range fileInfos {
fileName := path.Base(fileInfo.Name)
splitResult := strings.Split(fileName, ".")
partNum, err := strconv.Atoi(splitResult[1])
if err != nil {
return result, err
}
md5sum := splitResult[2]
result.Parts = append(result.Parts, partInfo{
PartNumber: partNum,
LastModified: fileInfo.ModTime,
ETag: md5sum,
Size: fileInfo.Size,
})
nextPartNumberMarker = partNum
}
result.Bucket = bucket
result.Object = object
result.UploadID = uploadID
result.PartNumberMarker = partNumberMarker
result.NextPartNumberMarker = nextPartNumberMarker
result.MaxParts = maxParts
result.IsTruncated = !eof
return result, nil
}
func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (string, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return "", (BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(object) {
return "", (ObjectNameInvalid{
Bucket: bucket,
Object: object,
})
}
if status, err := xl.isUploadIDExists(bucket, object, uploadID); err != nil {
return "", err
} else if !status {
return "", (InvalidUploadID{UploadID: uploadID})
}
fileWriter, err := xl.storage.CreateFile(bucket, object)
if err != nil {
return "", toObjectErr(err, bucket, object)
}
var md5Sums []string
for _, part := range parts {
// Construct part suffix.
partSuffix := fmt.Sprintf("%s.%d.%s", uploadID, part.PartNumber, part.ETag)
err = xl.storage.RenameFile(minioMetaVolume, path.Join(bucket, object, partSuffix), bucket, path.Join(object, fmt.Sprint(part.PartNumber)))
if err != nil {
return "", err
}
md5Sums = append(md5Sums, part.ETag)
}
err = fileWriter.Close()
if err != nil {
return "", err
}
// Save the s3 md5.
s3MD5, err := makeS3MD5(md5Sums...)
if err != nil {
return "", err
}
// Return md5sum.
return s3MD5, nil
}
// AbortMultipartUpload - abort multipart upload.
func (xl xlObjects) AbortMultipartUpload(bucket, object, uploadID string) error {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return (BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(object) {
return (ObjectNameInvalid{Bucket: bucket, Object: object})
}
if status, err := xl.isUploadIDExists(bucket, object, uploadID); err != nil {
return err
} else if !status {
return (InvalidUploadID{UploadID: uploadID})
}
markerPath := ""
for {
uploadIDPath := path.Join(bucket, object, uploadID)
fileInfos, eof, err := xl.storage.ListFiles(minioMetaVolume, uploadIDPath, markerPath, false, 1000)
if err != nil {
if err == errFileNotFound {
return (InvalidUploadID{UploadID: uploadID})
}
return toObjectErr(err)
}
for _, fileInfo := range fileInfos {
xl.storage.DeleteFile(minioMetaVolume, fileInfo.Name)
markerPath = fileInfo.Name
}
if eof {
break
}
}
return nil
}

View File

@ -27,26 +27,30 @@ import (
"strings" "strings"
"github.com/minio/minio/pkg/mimedb" "github.com/minio/minio/pkg/mimedb"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/safe"
) )
const ( const (
multipartMetaFile = "multipart.json" multipartMetaFile = "multipart.json"
) )
type objectAPI struct { // xlObjects - Implements fs object layer.
type xlObjects struct {
storage StorageAPI storage StorageAPI
} }
func newObjectLayer(storage StorageAPI) objectAPI { // newXLObjects - initialize new xl object layer.
return objectAPI{storage} func newXLObjects(exportPaths ...string) (ObjectLayer, error) {
storage, err := newXL(exportPaths...)
if err != nil {
return nil, err
}
return xlObjects{storage}, nil
} }
// checks whether bucket exists. // checks whether bucket exists.
func (o objectAPI) isBucketExist(bucketName string) (bool, error) { func (xl xlObjects) isBucketExist(bucketName string) (bool, error) {
// Check whether bucket exists. // Check whether bucket exists.
if _, e := o.storage.StatVol(bucketName); e != nil { if _, e := xl.storage.StatVol(bucketName); e != nil {
if e == errVolumeNotFound { if e == errVolumeNotFound {
return false, nil return false, nil
} }
@ -58,35 +62,35 @@ func (o objectAPI) isBucketExist(bucketName string) (bool, error) {
/// Bucket operations /// Bucket operations
// MakeBucket - make a bucket. // MakeBucket - make a bucket.
func (o objectAPI) MakeBucket(bucket string) *probe.Error { func (xl xlObjects) MakeBucket(bucket string) error {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return probe.NewError(BucketNameInvalid{Bucket: bucket}) return BucketNameInvalid{Bucket: bucket}
} }
if e := o.storage.MakeVol(bucket); e != nil { if err := xl.storage.MakeVol(bucket); err != nil {
return probe.NewError(toObjectErr(e, bucket)) return toObjectErr(err, bucket)
} }
// This happens for the first time, but keep this here since this // This happens for the first time, but keep this here since this
// is the only place where it can be made expensive optimizing all // is the only place where it can be made expensive optimizing all
// other calls. // other calls.
// Create minio meta volume, if it doesn't exist yet. // Create minio meta volume, if it doesn't exist yet.
if e := o.storage.MakeVol(minioMetaVolume); e != nil { if err := xl.storage.MakeVol(minioMetaVolume); err != nil {
if e != errVolumeExists { if err != errVolumeExists {
return probe.NewError(toObjectErr(e, minioMetaVolume)) return toObjectErr(err, minioMetaVolume)
} }
} }
return nil return nil
} }
// GetBucketInfo - get bucket info. // GetBucketInfo - get bucket info.
func (o objectAPI) GetBucketInfo(bucket string) (BucketInfo, *probe.Error) { func (xl xlObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return BucketInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) return BucketInfo{}, BucketNameInvalid{Bucket: bucket}
} }
vi, e := o.storage.StatVol(bucket) vi, err := xl.storage.StatVol(bucket)
if e != nil { if err != nil {
return BucketInfo{}, probe.NewError(toObjectErr(e, bucket)) return BucketInfo{}, toObjectErr(err, bucket)
} }
return BucketInfo{ return BucketInfo{
Name: bucket, Name: bucket,
@ -96,19 +100,12 @@ func (o objectAPI) GetBucketInfo(bucket string) (BucketInfo, *probe.Error) {
}, nil }, nil
} }
// byBucketName is a collection satisfying sort.Interface.
type byBucketName []BucketInfo
func (d byBucketName) Len() int { return len(d) }
func (d byBucketName) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d byBucketName) Less(i, j int) bool { return d[i].Name < d[j].Name }
// ListBuckets - list buckets. // ListBuckets - list buckets.
func (o objectAPI) ListBuckets() ([]BucketInfo, *probe.Error) { func (xl xlObjects) ListBuckets() ([]BucketInfo, error) {
var bucketInfos []BucketInfo var bucketInfos []BucketInfo
vols, e := o.storage.ListVols() vols, err := xl.storage.ListVols()
if e != nil { if err != nil {
return nil, probe.NewError(toObjectErr(e)) return nil, toObjectErr(err)
} }
for _, vol := range vols { for _, vol := range vols {
// StorageAPI can send volume names which are incompatible // StorageAPI can send volume names which are incompatible
@ -128,13 +125,13 @@ func (o objectAPI) ListBuckets() ([]BucketInfo, *probe.Error) {
} }
// DeleteBucket - delete a bucket. // DeleteBucket - delete a bucket.
func (o objectAPI) DeleteBucket(bucket string) *probe.Error { func (xl xlObjects) DeleteBucket(bucket string) error {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return probe.NewError(BucketNameInvalid{Bucket: bucket}) return BucketNameInvalid{Bucket: bucket}
} }
if e := o.storage.DeleteVol(bucket); e != nil { if err := xl.storage.DeleteVol(bucket); err != nil {
return probe.NewError(toObjectErr(e)) return toObjectErr(err)
} }
return nil return nil
} }
@ -142,12 +139,12 @@ func (o objectAPI) DeleteBucket(bucket string) *probe.Error {
/// Object Operations /// Object Operations
// GetObject - get an object. // GetObject - get an object.
func (o objectAPI) GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error) { func (xl xlObjects) GetObject(bucket, object string, startOffset int64) (io.ReadCloser, error) {
findPathOffset := func() (i int, partOffset int64, err error) { findPathOffset := func() (i int, partOffset int64, err error) {
partOffset = startOffset partOffset = startOffset
for i = 1; i < 10000; i++ { for i = 1; i < 10000; i++ {
var fileInfo FileInfo var fileInfo FileInfo
fileInfo, err = o.storage.StatFile(bucket, pathJoin(object, fmt.Sprint(i))) fileInfo, err = xl.storage.StatFile(bucket, pathJoin(object, fmt.Sprint(i)))
if err != nil { if err != nil {
if err == errFileNotFound { if err == errFileNotFound {
continue continue
@ -167,36 +164,31 @@ func (o objectAPI) GetObject(bucket, object string, startOffset int64) (io.ReadC
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return nil, probe.NewError(BucketNameInvalid{Bucket: bucket}) return nil, BucketNameInvalid{Bucket: bucket}
} }
// Verify if object is valid. // Verify if object is valid.
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return nil, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) return nil, ObjectNameInvalid{Bucket: bucket, Object: object}
} }
_, err := o.storage.StatFile(bucket, object) if _, err := xl.storage.StatFile(bucket, pathJoin(object, multipartMetaFile)); err != nil {
if err == nil { if _, err = xl.storage.StatFile(bucket, object); err == nil {
fmt.Println("1", err) var reader io.ReadCloser
r, e := o.storage.ReadFile(bucket, object, startOffset) reader, err = xl.storage.ReadFile(bucket, object, startOffset)
if e != nil { if err != nil {
fmt.Println("1.5", err) return nil, toObjectErr(err, bucket, object)
return nil, probe.NewError(toObjectErr(e, bucket, object)) }
return reader, nil
} }
return r, nil return nil, toObjectErr(err, bucket, object)
}
_, err = o.storage.StatFile(bucket, pathJoin(object, multipartMetaFile))
if err != nil {
fmt.Println("2", err)
return nil, probe.NewError(toObjectErr(err, bucket, object))
} }
fileReader, fileWriter := io.Pipe() fileReader, fileWriter := io.Pipe()
partNum, offset, err := findPathOffset() partNum, offset, err := findPathOffset()
if err != nil { if err != nil {
fmt.Println("3", err) return nil, toObjectErr(err, bucket, object)
return nil, probe.NewError(toObjectErr(err, bucket, object))
} }
go func() { go func() {
for ; partNum < 10000; partNum++ { for ; partNum < 10000; partNum++ {
r, err := o.storage.ReadFile(bucket, pathJoin(object, fmt.Sprint(partNum)), offset) r, err := xl.storage.ReadFile(bucket, pathJoin(object, fmt.Sprint(partNum)), offset)
if err != nil { if err != nil {
if err == errFileNotFound { if err == errFileNotFound {
continue continue
@ -215,10 +207,10 @@ func (o objectAPI) GetObject(bucket, object string, startOffset int64) (io.ReadC
} }
// GetObjectInfo - get object info. // GetObjectInfo - get object info.
func (o objectAPI) GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error) { func (xl xlObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) {
getMultpartFileSize := func() (size int64) { getMultpartFileSize := func() (size int64) {
for i := 0; i < 10000; i++ { for i := 0; i < 10000; i++ {
fi, err := o.storage.StatFile(bucket, pathJoin(object, fmt.Sprint(i))) fi, err := xl.storage.StatFile(bucket, pathJoin(object, fmt.Sprint(i)))
if err != nil { if err != nil {
continue continue
} }
@ -228,17 +220,17 @@ func (o objectAPI) GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Erro
} }
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return ObjectInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) return ObjectInfo{}, BucketNameInvalid{Bucket: bucket}
} }
// Verify if object is valid. // Verify if object is valid.
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return ObjectInfo{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) return ObjectInfo{}, ObjectNameInvalid{Bucket: bucket, Object: object}
} }
fi, e := o.storage.StatFile(bucket, object) fi, err := xl.storage.StatFile(bucket, object)
if e != nil { if err != nil {
fi, e = o.storage.StatFile(bucket, pathJoin(object, multipartMetaFile)) fi, err = xl.storage.StatFile(bucket, pathJoin(object, multipartMetaFile))
if e != nil { if err != nil {
return ObjectInfo{}, probe.NewError(toObjectErr(e, bucket, object)) return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
fi.Size = getMultpartFileSize() fi.Size = getMultpartFileSize()
} }
@ -260,44 +252,29 @@ func (o objectAPI) GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Erro
}, nil }, nil
} }
// safeCloseAndRemove - safely closes and removes underlying temporary func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (string, error) {
// file writer if possible.
func safeCloseAndRemove(writer io.WriteCloser) error {
// If writer is a safe file, Attempt to close and remove.
safeWriter, ok := writer.(*safe.File)
if ok {
return safeWriter.CloseAndRemove()
}
pipeWriter, ok := writer.(*io.PipeWriter)
if ok {
return pipeWriter.CloseWithError(errors.New("Close and error out."))
}
return nil
}
func (o objectAPI) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (string, *probe.Error) {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return "", probe.NewError(BucketNameInvalid{Bucket: bucket}) return "", (BucketNameInvalid{Bucket: bucket})
} }
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return "", probe.NewError(ObjectNameInvalid{ return "", (ObjectNameInvalid{
Bucket: bucket, Bucket: bucket,
Object: object, Object: object,
}) })
} }
// Check whether the bucket exists. // Check whether the bucket exists.
isExist, err := o.isBucketExist(bucket) isExist, err := xl.isBucketExist(bucket)
if err != nil { if err != nil {
return "", probe.NewError(err) return "", err
} }
if !isExist { if !isExist {
return "", probe.NewError(BucketNotFound{Bucket: bucket}) return "", BucketNotFound{Bucket: bucket}
} }
fileWriter, e := o.storage.CreateFile(bucket, object) fileWriter, err := xl.storage.CreateFile(bucket, object)
if e != nil { if err != nil {
return "", probe.NewError(toObjectErr(e, bucket, object)) return "", toObjectErr(err, bucket, object)
} }
// Initialize md5 writer. // Initialize md5 writer.
@ -308,18 +285,18 @@ func (o objectAPI) PutObject(bucket string, object string, size int64, data io.R
// Instantiate checksum hashers and create a multiwriter. // Instantiate checksum hashers and create a multiwriter.
if size > 0 { if size > 0 {
if _, e = io.CopyN(multiWriter, data, size); e != nil { if _, err = io.CopyN(multiWriter, data, size); err != nil {
if clErr := safeCloseAndRemove(fileWriter); clErr != nil { if clErr := safeCloseAndRemove(fileWriter); clErr != nil {
return "", probe.NewError(clErr) return "", clErr
} }
return "", probe.NewError(toObjectErr(e)) return "", toObjectErr(err)
} }
} else { } else {
if _, e = io.Copy(multiWriter, data); e != nil { if _, err = io.Copy(multiWriter, data); err != nil {
if clErr := safeCloseAndRemove(fileWriter); clErr != nil { if clErr := safeCloseAndRemove(fileWriter); clErr != nil {
return "", probe.NewError(clErr) return "", clErr
} }
return "", probe.NewError(e) return "", err
} }
} }
@ -331,56 +308,56 @@ func (o objectAPI) PutObject(bucket string, object string, size int64, data io.R
} }
if md5Hex != "" { if md5Hex != "" {
if newMD5Hex != md5Hex { if newMD5Hex != md5Hex {
if e = safeCloseAndRemove(fileWriter); e != nil { if err = safeCloseAndRemove(fileWriter); err != nil {
return "", probe.NewError(e) return "", err
} }
return "", probe.NewError(BadDigest{md5Hex, newMD5Hex}) return "", BadDigest{md5Hex, newMD5Hex}
} }
} }
e = fileWriter.Close() err = fileWriter.Close()
if e != nil { if err != nil {
return "", probe.NewError(e) return "", err
} }
// Return md5sum, successfully wrote object. // Return md5sum, successfully wrote object.
return newMD5Hex, nil return newMD5Hex, nil
} }
func (o objectAPI) DeleteObject(bucket, object string) *probe.Error { func (xl xlObjects) DeleteObject(bucket, object string) error {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return probe.NewError(BucketNameInvalid{Bucket: bucket}) return BucketNameInvalid{Bucket: bucket}
} }
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) return ObjectNameInvalid{Bucket: bucket, Object: object}
} }
if e := o.storage.DeleteFile(bucket, object); e != nil { if err := xl.storage.DeleteFile(bucket, object); err != nil {
return probe.NewError(toObjectErr(e, bucket, object)) return toObjectErr(err, bucket, object)
} }
return nil return nil
} }
func (o objectAPI) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, *probe.Error) { func (xl xlObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return ListObjectsInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) return ListObjectsInfo{}, BucketNameInvalid{Bucket: bucket}
} }
if !IsValidObjectPrefix(prefix) { if !IsValidObjectPrefix(prefix) {
return ListObjectsInfo{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: prefix}) return ListObjectsInfo{}, ObjectNameInvalid{Bucket: bucket, Object: prefix}
} }
// Verify if delimiter is anything other than '/', which we do not support. // Verify if delimiter is anything other than '/', which we do not support.
if delimiter != "" && delimiter != slashSeparator { if delimiter != "" && delimiter != slashSeparator {
return ListObjectsInfo{}, probe.NewError(UnsupportedDelimiter{ return ListObjectsInfo{}, UnsupportedDelimiter{
Delimiter: delimiter, Delimiter: delimiter,
}) }
} }
// Verify if marker has prefix. // Verify if marker has prefix.
if marker != "" { if marker != "" {
if !strings.HasPrefix(marker, prefix) { if !strings.HasPrefix(marker, prefix) {
return ListObjectsInfo{}, probe.NewError(InvalidMarkerPrefixCombination{ return ListObjectsInfo{}, InvalidMarkerPrefixCombination{
Marker: marker, Marker: marker,
Prefix: prefix, Prefix: prefix,
}) }
} }
} }
@ -389,9 +366,9 @@ func (o objectAPI) ListObjects(bucket, prefix, marker, delimiter string, maxKeys
if delimiter == slashSeparator { if delimiter == slashSeparator {
recursive = false recursive = false
} }
fileInfos, eof, e := o.storage.ListFiles(bucket, prefix, marker, recursive, maxKeys) fileInfos, eof, err := xl.storage.ListFiles(bucket, prefix, marker, recursive, maxKeys)
if e != nil { if err != nil {
return ListObjectsInfo{}, probe.NewError(toObjectErr(e, bucket)) return ListObjectsInfo{}, toObjectErr(err, bucket)
} }
if maxKeys == 0 { if maxKeys == 0 {
return ListObjectsInfo{}, nil return ListObjectsInfo{}, nil