mirror of https://github.com/minio/minio.git
xl/fs: Split object layer into interface. (#1415)
This commit is contained in:
parent
4d1b3d5e9a
commit
4e34e03dd4
|
@ -21,8 +21,6 @@ import (
|
|||
"encoding/base64"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// credential container for access and secret keys.
|
||||
|
@ -52,19 +50,19 @@ var isValidAccessKey = regexp.MustCompile(`^[a-zA-Z0-9\\-\\.\\_\\~]{5,20}$`)
|
|||
// mustGenAccessKeys - must generate access credentials.
|
||||
func mustGenAccessKeys() (creds credential) {
|
||||
creds, err := genAccessKeys()
|
||||
fatalIf(err.Trace(), "Unable to generate access keys.", nil)
|
||||
fatalIf(err, "Unable to generate access keys.", nil)
|
||||
return creds
|
||||
}
|
||||
|
||||
// genAccessKeys - generate access credentials.
|
||||
func genAccessKeys() (credential, *probe.Error) {
|
||||
func genAccessKeys() (credential, error) {
|
||||
accessKeyID, err := genAccessKeyID()
|
||||
if err != nil {
|
||||
return credential{}, err.Trace()
|
||||
return credential{}, err
|
||||
}
|
||||
secretAccessKey, err := genSecretAccessKey()
|
||||
if err != nil {
|
||||
return credential{}, err.Trace()
|
||||
return credential{}, err
|
||||
}
|
||||
creds := credential{
|
||||
AccessKeyID: string(accessKeyID),
|
||||
|
@ -75,10 +73,10 @@ func genAccessKeys() (credential, *probe.Error) {
|
|||
|
||||
// genAccessKeyID - generate random alpha numeric value using only uppercase characters
|
||||
// takes input as size in integer
|
||||
func genAccessKeyID() ([]byte, *probe.Error) {
|
||||
func genAccessKeyID() ([]byte, error) {
|
||||
alpha := make([]byte, minioAccessID)
|
||||
if _, e := rand.Read(alpha); e != nil {
|
||||
return nil, probe.NewError(e)
|
||||
if _, err := rand.Read(alpha); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := 0; i < minioAccessID; i++ {
|
||||
alpha[i] = alphaNumericTable[alpha[i]%byte(len(alphaNumericTable))]
|
||||
|
@ -87,10 +85,10 @@ func genAccessKeyID() ([]byte, *probe.Error) {
|
|||
}
|
||||
|
||||
// genSecretAccessKey - generate random base64 numeric value from a random seed.
|
||||
func genSecretAccessKey() ([]byte, *probe.Error) {
|
||||
func genSecretAccessKey() ([]byte, error) {
|
||||
rb := make([]byte, minioSecretID)
|
||||
if _, e := rand.Read(rb); e != nil {
|
||||
return nil, probe.NewError(e)
|
||||
if _, err := rand.Read(rb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []byte(base64.StdEncoding.EncodeToString(rb))[:minioSecretID], nil
|
||||
}
|
||||
|
|
|
@ -22,8 +22,6 @@ import (
|
|||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
type accessLogHandler struct {
|
||||
|
@ -60,14 +58,14 @@ type LogMessage struct {
|
|||
|
||||
func (h *accessLogHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
message, err := getLogMessage(w, req)
|
||||
fatalIf(err.Trace(), "Unable to extract http message.", nil)
|
||||
_, e := h.accessLogFile.Write(message)
|
||||
fatalIf(probe.NewError(e), "Writing to log file failed.", nil)
|
||||
fatalIf(err, "Unable to extract http message.", nil)
|
||||
_, err = h.accessLogFile.Write(message)
|
||||
fatalIf(err, "Writing to log file failed.", nil)
|
||||
|
||||
h.Handler.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
func getLogMessage(w http.ResponseWriter, req *http.Request) ([]byte, *probe.Error) {
|
||||
func getLogMessage(w http.ResponseWriter, req *http.Request) ([]byte, error) {
|
||||
logMessage := &LogMessage{
|
||||
StartTime: time.Now().UTC(),
|
||||
}
|
||||
|
@ -103,9 +101,9 @@ func getLogMessage(w http.ResponseWriter, req *http.Request) ([]byte, *probe.Err
|
|||
|
||||
// logMessage.HTTP.Request = req
|
||||
logMessage.Duration = time.Now().UTC().Sub(logMessage.StartTime)
|
||||
js, e := json.Marshal(logMessage)
|
||||
if e != nil {
|
||||
return nil, probe.NewError(e)
|
||||
js, err := json.Marshal(logMessage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
js = append(js, byte('\n')) // append a new line
|
||||
return js, nil
|
||||
|
@ -113,8 +111,8 @@ func getLogMessage(w http.ResponseWriter, req *http.Request) ([]byte, *probe.Err
|
|||
|
||||
// setAccessLogHandler logs requests
|
||||
func setAccessLogHandler(h http.Handler) http.Handler {
|
||||
file, e := os.OpenFile("access.log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
|
||||
fatalIf(probe.NewError(e), "Unable to open access log.", nil)
|
||||
file, err := os.OpenFile("access.log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
|
||||
fatalIf(err, "Unable to open access log.", nil)
|
||||
|
||||
return &accessLogHandler{Handler: h, accessLogFile: file}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ import router "github.com/gorilla/mux"
|
|||
|
||||
// objectAPIHandler implements and provides http handlers for S3 API.
|
||||
type objectAPIHandlers struct {
|
||||
ObjectAPI objectAPI
|
||||
ObjectAPI ObjectLayer
|
||||
}
|
||||
|
||||
// registerAPIRouter - registers S3 compatible APIs.
|
||||
|
|
|
@ -26,7 +26,6 @@ import (
|
|||
"strings"
|
||||
|
||||
fastSha256 "github.com/minio/minio/pkg/crypto/sha256"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// Verify if request has JWT.
|
||||
|
@ -113,12 +112,12 @@ func sumMD5(data []byte) []byte {
|
|||
// Verify if request has valid AWS Signature Version '4'.
|
||||
func isReqAuthenticated(r *http.Request) (s3Error APIErrorCode) {
|
||||
if r == nil {
|
||||
errorIf(probe.NewError(errInvalidArgument), "HTTP request cannot be empty.", nil)
|
||||
errorIf(errInvalidArgument, "HTTP request cannot be empty.", nil)
|
||||
return ErrInternalError
|
||||
}
|
||||
payload, e := ioutil.ReadAll(r.Body)
|
||||
if e != nil {
|
||||
errorIf(probe.NewError(e), "Unable to read HTTP body.", nil)
|
||||
payload, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to read HTTP body.", nil)
|
||||
return ErrInternalError
|
||||
}
|
||||
// Verify Content-Md5, if payload is set.
|
||||
|
|
|
@ -29,7 +29,6 @@ import (
|
|||
"strings"
|
||||
|
||||
mux "github.com/gorilla/mux"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
|
||||
|
@ -37,8 +36,8 @@ func enforceBucketPolicy(action string, bucket string, reqURL *url.URL) (s3Error
|
|||
// Read saved bucket policy.
|
||||
policy, err := readBucketPolicy(bucket)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(bucket), "GetBucketPolicy failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
errorIf(err, "GetBucketPolicy failed.", nil)
|
||||
switch err.(type) {
|
||||
case BucketNotFound:
|
||||
return ErrNoSuchBucket
|
||||
case BucketNameInvalid:
|
||||
|
@ -49,9 +48,9 @@ func enforceBucketPolicy(action string, bucket string, reqURL *url.URL) (s3Error
|
|||
}
|
||||
}
|
||||
// Parse the saved policy.
|
||||
bucketPolicy, e := parseBucketPolicy(policy)
|
||||
if e != nil {
|
||||
errorIf(probe.NewError(e), "Parse policy failed.", nil)
|
||||
bucketPolicy, err := parseBucketPolicy(policy)
|
||||
if err != nil {
|
||||
errorIf(err, "Parse policy failed.", nil)
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
|
@ -90,8 +89,8 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
|||
return
|
||||
}
|
||||
case authTypeSigned, authTypePresigned:
|
||||
payload, e := ioutil.ReadAll(r.Body)
|
||||
if e != nil {
|
||||
payload, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
@ -117,10 +116,9 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
|||
}
|
||||
}
|
||||
|
||||
_, err := api.ObjectAPI.GetBucketInfo(bucket)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "GetBucketInfo failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
if _, err := api.ObjectAPI.GetBucketInfo(bucket); err != nil {
|
||||
errorIf(err, "GetBucketInfo failed.", nil)
|
||||
switch err.(type) {
|
||||
case BucketNotFound:
|
||||
writeErrorResponse(w, r, ErrNoSuchBucket, r.URL.Path)
|
||||
case BucketNameInvalid:
|
||||
|
@ -181,9 +179,8 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
|
|||
}
|
||||
if keyMarker != "" {
|
||||
// Unescape keyMarker string
|
||||
keyMarkerUnescaped, e := url.QueryUnescape(keyMarker)
|
||||
if e != nil {
|
||||
if e != nil {
|
||||
keyMarkerUnescaped, err := url.QueryUnescape(keyMarker)
|
||||
if err != nil {
|
||||
// Return 'NoSuchKey' to indicate invalid marker key.
|
||||
writeErrorResponse(w, r, ErrNoSuchKey, r.URL.Path)
|
||||
return
|
||||
|
@ -195,12 +192,11 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
|
|||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
listMultipartsInfo, err := api.ObjectAPI.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "ListMultipartUploads failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
errorIf(err, "ListMultipartUploads failed.", nil)
|
||||
switch err.(type) {
|
||||
case BucketNotFound:
|
||||
writeErrorResponse(w, r, ErrNoSuchBucket, r.URL.Path)
|
||||
default:
|
||||
|
@ -259,8 +255,8 @@ func (api objectAPIHandlers) ListObjectsHandler(w http.ResponseWriter, r *http.R
|
|||
// If marker is set unescape.
|
||||
if marker != "" {
|
||||
// Try to unescape marker.
|
||||
markerUnescaped, e := url.QueryUnescape(marker)
|
||||
if e != nil {
|
||||
markerUnescaped, err := url.QueryUnescape(marker)
|
||||
if err != nil {
|
||||
// Return 'NoSuchKey' to indicate invalid marker key.
|
||||
writeErrorResponse(w, r, ErrNoSuchKey, r.URL.Path)
|
||||
return
|
||||
|
@ -284,7 +280,8 @@ func (api objectAPIHandlers) ListObjectsHandler(w http.ResponseWriter, r *http.R
|
|||
writeSuccessResponse(w, encodedSuccessResponse)
|
||||
return
|
||||
}
|
||||
switch err.ToGoError().(type) {
|
||||
errorIf(err, "ListObjects failed.", nil)
|
||||
switch err.(type) {
|
||||
case BucketNameInvalid:
|
||||
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
|
||||
case BucketNotFound:
|
||||
|
@ -292,7 +289,6 @@ func (api objectAPIHandlers) ListObjectsHandler(w http.ResponseWriter, r *http.R
|
|||
case ObjectNameInvalid:
|
||||
writeErrorResponse(w, r, ErrNoSuchKey, r.URL.Path)
|
||||
default:
|
||||
errorIf(err.Trace(), "ListObjects failed.", nil)
|
||||
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
|
||||
}
|
||||
}
|
||||
|
@ -347,8 +343,8 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
|||
writeSuccessResponse(w, encodedSuccessResponse)
|
||||
return
|
||||
}
|
||||
errorIf(err.Trace(), "ListBuckets failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
errorIf(err, "ListBuckets failed.", nil)
|
||||
switch err.(type) {
|
||||
case StorageInsufficientReadResources:
|
||||
writeErrorResponse(w, r, ErrInsufficientReadResources, r.URL.Path)
|
||||
default:
|
||||
|
@ -398,16 +394,16 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||
deleteXMLBytes := make([]byte, r.ContentLength)
|
||||
|
||||
// Read incoming body XML bytes.
|
||||
_, e := io.ReadFull(r.Body, deleteXMLBytes)
|
||||
if e != nil {
|
||||
errorIf(probe.NewError(e), "DeleteMultipleObjects failed.", nil)
|
||||
if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil {
|
||||
errorIf(err, "DeleteMultipleObjects failed.", nil)
|
||||
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
// Unmarshal list of keys to be deleted.
|
||||
deleteObjects := &DeleteObjectsRequest{}
|
||||
if e := xml.Unmarshal(deleteXMLBytes, deleteObjects); e != nil {
|
||||
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
|
||||
errorIf(err, "DeleteMultipartObjects xml decoding failed.", nil)
|
||||
writeErrorResponse(w, r, ErrMalformedXML, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
@ -422,8 +418,8 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||
ObjectName: object.ObjectName,
|
||||
})
|
||||
} else {
|
||||
errorIf(err.Trace(object.ObjectName), "DeleteObject failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
errorIf(err, "DeleteObject failed.", nil)
|
||||
switch err.(type) {
|
||||
case BucketNameInvalid:
|
||||
deleteErrors = append(deleteErrors, DeleteError{
|
||||
Code: errorCodeResponse[ErrInvalidBucketName].Code,
|
||||
|
@ -498,8 +494,8 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
|||
// Make bucket.
|
||||
err := api.ObjectAPI.MakeBucket(bucket)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "MakeBucket failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
errorIf(err, "MakeBucket failed.", nil)
|
||||
switch err.(type) {
|
||||
case BucketNameInvalid:
|
||||
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
|
||||
case BucketExists:
|
||||
|
@ -514,24 +510,25 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
|||
writeSuccessResponse(w, nil)
|
||||
}
|
||||
|
||||
func extractHTTPFormValues(reader *multipart.Reader) (io.Reader, map[string]string, *probe.Error) {
|
||||
func extractHTTPFormValues(reader *multipart.Reader) (io.Reader, map[string]string, error) {
|
||||
/// HTML Form values
|
||||
formValues := make(map[string]string)
|
||||
filePart := new(bytes.Buffer)
|
||||
var e error
|
||||
for e == nil {
|
||||
var err error
|
||||
for err == nil {
|
||||
var part *multipart.Part
|
||||
part, e = reader.NextPart()
|
||||
part, err = reader.NextPart()
|
||||
if part != nil {
|
||||
if part.FileName() == "" {
|
||||
buffer, e := ioutil.ReadAll(part)
|
||||
if e != nil {
|
||||
return nil, nil, probe.NewError(e)
|
||||
var buffer []byte
|
||||
buffer, err = ioutil.ReadAll(part)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
formValues[http.CanonicalHeaderKey(part.FormName())] = string(buffer)
|
||||
} else {
|
||||
if _, e := io.Copy(filePart, part); e != nil {
|
||||
return nil, nil, probe.NewError(e)
|
||||
if _, err = io.Copy(filePart, part); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -546,16 +543,16 @@ func extractHTTPFormValues(reader *multipart.Reader) (io.Reader, map[string]stri
|
|||
func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// Here the parameter is the size of the form data that should
|
||||
// be loaded in memory, the remaining being put in temporary files.
|
||||
reader, e := r.MultipartReader()
|
||||
if e != nil {
|
||||
errorIf(probe.NewError(e), "Unable to initialize multipart reader.", nil)
|
||||
reader, err := r.MultipartReader()
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to initialize multipart reader.", nil)
|
||||
writeErrorResponse(w, r, ErrMalformedPOSTRequest, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
fileBody, formValues, err := extractHTTPFormValues(reader)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "Unable to parse form values.", nil)
|
||||
errorIf(err, "Unable to parse form values.", nil)
|
||||
writeErrorResponse(w, r, ErrMalformedPOSTRequest, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
@ -575,8 +572,8 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
|||
}
|
||||
md5Sum, err := api.ObjectAPI.PutObject(bucket, object, -1, fileBody, nil)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "PutObject failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
errorIf(err, "PutObject failed.", nil)
|
||||
switch err.(type) {
|
||||
case StorageFull:
|
||||
writeErrorResponse(w, r, ErrStorageFull, r.URL.Path)
|
||||
case BucketNotFound:
|
||||
|
@ -626,10 +623,9 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
|||
}
|
||||
}
|
||||
|
||||
_, err := api.ObjectAPI.GetBucketInfo(bucket)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "GetBucketInfo failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
if _, err := api.ObjectAPI.GetBucketInfo(bucket); err != nil {
|
||||
errorIf(err, "GetBucketInfo failed.", nil)
|
||||
switch err.(type) {
|
||||
case BucketNotFound:
|
||||
writeErrorResponse(w, r, ErrNoSuchBucket, r.URL.Path)
|
||||
case BucketNameInvalid:
|
||||
|
@ -661,10 +657,9 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
|||
}
|
||||
}
|
||||
|
||||
err := api.ObjectAPI.DeleteBucket(bucket)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "DeleteBucket failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
if err := api.ObjectAPI.DeleteBucket(bucket); err != nil {
|
||||
errorIf(err, "DeleteBucket failed.", nil)
|
||||
switch err.(type) {
|
||||
case BucketNotFound:
|
||||
writeErrorResponse(w, r, ErrNoSuchBucket, r.URL.Path)
|
||||
case BucketNotEmpty:
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
"strings"
|
||||
|
||||
mux "github.com/gorilla/mux"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// maximum supported access policy size.
|
||||
|
@ -67,8 +66,8 @@ func bucketPolicyMatchStatement(action string, resource string, conditions map[s
|
|||
func bucketPolicyActionMatch(action string, statement policyStatement) bool {
|
||||
for _, policyAction := range statement.Actions {
|
||||
// Policy action can be a regex, validate the action with matching string.
|
||||
matched, e := regexp.MatchString(policyAction, action)
|
||||
fatalIf(probe.NewError(e), "Invalid pattern, please verify the pattern string.", nil)
|
||||
matched, err := regexp.MatchString(policyAction, action)
|
||||
fatalIf(err, "Invalid pattern, please verify the pattern string.", nil)
|
||||
if matched {
|
||||
return true
|
||||
}
|
||||
|
@ -79,8 +78,8 @@ func bucketPolicyActionMatch(action string, statement policyStatement) bool {
|
|||
// Verify if given resource matches with policy statement.
|
||||
func bucketPolicyResourceMatch(resource string, statement policyStatement) bool {
|
||||
for _, presource := range statement.Resources {
|
||||
matched, e := regexp.MatchString(presource, strings.TrimPrefix(resource, "/"))
|
||||
fatalIf(probe.NewError(e), "Invalid pattern, please verify the pattern string.", nil)
|
||||
matched, err := regexp.MatchString(presource, strings.TrimPrefix(resource, "/"))
|
||||
fatalIf(err, "Invalid pattern, please verify the pattern string.", nil)
|
||||
// For any path matches, we return quickly and the let the caller continue.
|
||||
if matched {
|
||||
return true
|
||||
|
@ -161,17 +160,17 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
|||
// Read access policy up to maxAccessPolicySize.
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
|
||||
// bucket policies are limited to 20KB in size, using a limit reader.
|
||||
bucketPolicyBuf, e := ioutil.ReadAll(io.LimitReader(r.Body, maxAccessPolicySize))
|
||||
if e != nil {
|
||||
errorIf(probe.NewError(e).Trace(bucket), "Reading policy failed.", nil)
|
||||
bucketPolicyBuf, err := ioutil.ReadAll(io.LimitReader(r.Body, maxAccessPolicySize))
|
||||
if err != nil {
|
||||
errorIf(err, "Reading policy failed.", nil)
|
||||
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse bucket policy.
|
||||
bucketPolicy, e := parseBucketPolicy(bucketPolicyBuf)
|
||||
if e != nil {
|
||||
errorIf(probe.NewError(e), "Unable to parse bucket policy.", nil)
|
||||
bucketPolicy, err := parseBucketPolicy(bucketPolicyBuf)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to parse bucket policy.", nil)
|
||||
writeErrorResponse(w, r, ErrInvalidPolicyDocument, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
@ -183,10 +182,9 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
|||
}
|
||||
|
||||
// Save bucket policy.
|
||||
err := writeBucketPolicy(bucket, bucketPolicyBuf)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(bucket, string(bucketPolicyBuf)), "SaveBucketPolicy failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
if err := writeBucketPolicy(bucket, bucketPolicyBuf); err != nil {
|
||||
errorIf(err, "SaveBucketPolicy failed.", nil)
|
||||
switch err.(type) {
|
||||
case BucketNameInvalid:
|
||||
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
|
||||
default:
|
||||
|
@ -218,10 +216,9 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
|
|||
}
|
||||
|
||||
// Delete bucket access policy.
|
||||
err := removeBucketPolicy(bucket)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(bucket), "DeleteBucketPolicy failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
if err := removeBucketPolicy(bucket); err != nil {
|
||||
errorIf(err, "DeleteBucketPolicy failed.", nil)
|
||||
switch err.(type) {
|
||||
case BucketNameInvalid:
|
||||
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
|
||||
case BucketPolicyNotFound:
|
||||
|
@ -257,8 +254,8 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht
|
|||
// Read bucket access policy.
|
||||
p, err := readBucketPolicy(bucket)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(bucket), "GetBucketPolicy failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
errorIf(err, "GetBucketPolicy failed.", nil)
|
||||
switch err.(type) {
|
||||
case BucketNameInvalid:
|
||||
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
|
||||
case BucketPolicyNotFound:
|
||||
|
|
|
@ -20,132 +20,115 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// getBucketsConfigPath - get buckets path.
|
||||
func getBucketsConfigPath() (string, *probe.Error) {
|
||||
func getBucketsConfigPath() (string, error) {
|
||||
configPath, err := getConfigPath()
|
||||
if err != nil {
|
||||
return "", err.Trace()
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(configPath, "buckets"), nil
|
||||
}
|
||||
|
||||
// createBucketsConfigPath - create buckets directory.
|
||||
func createBucketsConfigPath() *probe.Error {
|
||||
func createBucketsConfigPath() error {
|
||||
bucketsConfigPath, err := getBucketsConfigPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e := os.MkdirAll(bucketsConfigPath, 0700); e != nil {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
return nil
|
||||
return os.MkdirAll(bucketsConfigPath, 0700)
|
||||
}
|
||||
|
||||
// getBucketConfigPath - get bucket config path.
|
||||
func getBucketConfigPath(bucket string) (string, *probe.Error) {
|
||||
func getBucketConfigPath(bucket string) (string, error) {
|
||||
bucketsConfigPath, err := getBucketsConfigPath()
|
||||
if err != nil {
|
||||
return "", err.Trace()
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(bucketsConfigPath, bucket), nil
|
||||
}
|
||||
|
||||
// createBucketConfigPath - create bucket config directory.
|
||||
func createBucketConfigPath(bucket string) *probe.Error {
|
||||
func createBucketConfigPath(bucket string) error {
|
||||
bucketConfigPath, err := getBucketConfigPath(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e := os.MkdirAll(bucketConfigPath, 0700); e != nil {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
return nil
|
||||
return os.MkdirAll(bucketConfigPath, 0700)
|
||||
}
|
||||
|
||||
// readBucketPolicy - read bucket policy.
|
||||
func readBucketPolicy(bucket string) ([]byte, *probe.Error) {
|
||||
func readBucketPolicy(bucket string) ([]byte, error) {
|
||||
// Verify bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return nil, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
return nil, BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
|
||||
bucketConfigPath, err := getBucketConfigPath(bucket)
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get policy file.
|
||||
bucketPolicyFile := filepath.Join(bucketConfigPath, "access-policy.json")
|
||||
if _, e := os.Stat(bucketPolicyFile); e != nil {
|
||||
if os.IsNotExist(e) {
|
||||
return nil, probe.NewError(BucketPolicyNotFound{Bucket: bucket})
|
||||
if _, err = os.Stat(bucketPolicyFile); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, BucketPolicyNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, probe.NewError(e)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
accessPolicyBytes, e := ioutil.ReadFile(bucketPolicyFile)
|
||||
if e != nil {
|
||||
return nil, probe.NewError(e)
|
||||
}
|
||||
return accessPolicyBytes, nil
|
||||
return ioutil.ReadFile(bucketPolicyFile)
|
||||
}
|
||||
|
||||
// removeBucketPolicy - remove bucket policy.
|
||||
func removeBucketPolicy(bucket string) *probe.Error {
|
||||
func removeBucketPolicy(bucket string) error {
|
||||
// Verify bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
return BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
|
||||
bucketConfigPath, err := getBucketConfigPath(bucket)
|
||||
if err != nil {
|
||||
return err.Trace(bucket)
|
||||
return err
|
||||
}
|
||||
|
||||
// Get policy file.
|
||||
bucketPolicyFile := filepath.Join(bucketConfigPath, "access-policy.json")
|
||||
if _, e := os.Stat(bucketPolicyFile); e != nil {
|
||||
if os.IsNotExist(e) {
|
||||
return probe.NewError(BucketPolicyNotFound{Bucket: bucket})
|
||||
if _, err = os.Stat(bucketPolicyFile); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return BucketPolicyNotFound{Bucket: bucket}
|
||||
}
|
||||
return probe.NewError(e)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeBucketPolicy - save bucket policy.
|
||||
func writeBucketPolicy(bucket string, accessPolicyBytes []byte) *probe.Error {
|
||||
func writeBucketPolicy(bucket string, accessPolicyBytes []byte) error {
|
||||
// Verify if bucket path legal
|
||||
if !IsValidBucketName(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
return BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
|
||||
// Create bucket config path.
|
||||
if err := createBucketConfigPath(bucket); err != nil {
|
||||
return err.Trace()
|
||||
return err
|
||||
}
|
||||
|
||||
bucketConfigPath, err := getBucketConfigPath(bucket)
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
return err
|
||||
}
|
||||
|
||||
// Get policy file.
|
||||
bucketPolicyFile := filepath.Join(bucketConfigPath, "access-policy.json")
|
||||
if _, e := os.Stat(bucketPolicyFile); e != nil {
|
||||
if !os.IsNotExist(e) {
|
||||
return probe.NewError(e)
|
||||
if _, err := os.Stat(bucketPolicyFile); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Write bucket policy.
|
||||
if e := ioutil.WriteFile(bucketPolicyFile, accessPolicyBytes, 0600); e != nil {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
|
||||
return nil
|
||||
return ioutil.WriteFile(bucketPolicyFile, accessPolicyBytes, 0600)
|
||||
}
|
||||
|
|
20
certs.go
20
certs.go
|
@ -21,26 +21,22 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"github.com/minio/go-homedir"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// createCertsPath create certs path.
|
||||
func createCertsPath() *probe.Error {
|
||||
func createCertsPath() error {
|
||||
certsPath, err := getCertsPath()
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(certsPath, 0700); err != nil {
|
||||
return probe.NewError(err)
|
||||
}
|
||||
return nil
|
||||
return os.MkdirAll(certsPath, 0700)
|
||||
}
|
||||
|
||||
// getCertsPath get certs path.
|
||||
func getCertsPath() (string, *probe.Error) {
|
||||
homeDir, e := homedir.Dir()
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
func getCertsPath() (string, error) {
|
||||
homeDir, err := homedir.Dir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
certsPath := filepath.Join(homeDir, globalMinioCertsDir)
|
||||
return certsPath, nil
|
||||
|
@ -49,7 +45,7 @@ func getCertsPath() (string, *probe.Error) {
|
|||
// mustGetCertsPath must get certs path.
|
||||
func mustGetCertsPath() string {
|
||||
certsPath, err := getCertsPath()
|
||||
fatalIf(err.Trace(), "Unable to retrieve certs path.", nil)
|
||||
fatalIf(err, "Unable to retrieve certs path.", nil)
|
||||
return certsPath
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"github.com/minio/mc/pkg/console"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
|
||||
|
@ -38,35 +37,31 @@ func migrateConfig() {
|
|||
// Version '1' is not supported anymore and deprecated, safe to delete.
|
||||
func purgeV1() {
|
||||
cv1, err := loadConfigV1()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err.ToGoError()) {
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
fatalIf(err.Trace(), "Unable to load config version ‘1’.", nil)
|
||||
fatalIf(err, "Unable to load config version ‘1’.", nil)
|
||||
|
||||
if cv1.Version == "1" {
|
||||
console.Println("Unsupported config version ‘1’ found, removed successfully.")
|
||||
/// Purge old fsUsers.json file
|
||||
configPath, err := getConfigPath()
|
||||
fatalIf(err.Trace(), "Unable to retrieve config path.", nil)
|
||||
fatalIf(err, "Unable to retrieve config path.", nil)
|
||||
|
||||
configFile := filepath.Join(configPath, "fsUsers.json")
|
||||
os.RemoveAll(configFile)
|
||||
}
|
||||
fatalIf(probe.NewError(errors.New("")), "Unexpected version found ‘"+cv1.Version+"’, cannot migrate.", nil)
|
||||
fatalIf(errors.New(""), "Unexpected version found ‘"+cv1.Version+"’, cannot migrate.", nil)
|
||||
}
|
||||
|
||||
// Version '2' to '3' config migration adds new fields and re-orders
|
||||
// previous fields. Simplifies config for future additions.
|
||||
func migrateV2ToV3() {
|
||||
cv2, err := loadConfigV2()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err.ToGoError()) {
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
fatalIf(err.Trace(), "Unable to load config version ‘2’.", nil)
|
||||
fatalIf(err, "Unable to load config version ‘2’.", nil)
|
||||
if cv2.Version != "2" {
|
||||
return
|
||||
}
|
||||
|
@ -99,14 +94,14 @@ func migrateV2ToV3() {
|
|||
srvConfig.Logger.Syslog = slogger
|
||||
|
||||
qc, err := quick.New(srvConfig)
|
||||
fatalIf(err.Trace(), "Unable to initialize config.", nil)
|
||||
fatalIf(err, "Unable to initialize config.", nil)
|
||||
|
||||
configFile, err := getConfigFile()
|
||||
fatalIf(err.Trace(), "Unable to get config file.", nil)
|
||||
fatalIf(err, "Unable to get config file.", nil)
|
||||
|
||||
// Migrate the config.
|
||||
err = qc.Save(configFile)
|
||||
fatalIf(err.Trace(), "Migrating from version ‘"+cv2.Version+"’ to ‘"+srvConfig.Version+"’ failed.", nil)
|
||||
fatalIf(err, "Migrating from version ‘"+cv2.Version+"’ to ‘"+srvConfig.Version+"’ failed.", nil)
|
||||
|
||||
console.Println("Migration from version ‘" + cv2.Version + "’ to ‘" + srvConfig.Version + "’ completed successfully.")
|
||||
}
|
||||
|
@ -116,12 +111,10 @@ func migrateV2ToV3() {
|
|||
// the config for future additions.
|
||||
func migrateV3ToV4() {
|
||||
cv3, err := loadConfigV3()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err.ToGoError()) {
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
fatalIf(err.Trace(), "Unable to load config version ‘3’.", nil)
|
||||
fatalIf(err, "Unable to load config version ‘3’.", nil)
|
||||
if cv3.Version != "3" {
|
||||
return
|
||||
}
|
||||
|
@ -136,12 +129,12 @@ func migrateV3ToV4() {
|
|||
srvConfig.Logger.Syslog = cv3.Logger.Syslog
|
||||
|
||||
qc, err := quick.New(srvConfig)
|
||||
fatalIf(err.Trace(), "Unable to initialize the quick config.", nil)
|
||||
fatalIf(err, "Unable to initialize the quick config.", nil)
|
||||
configFile, err := getConfigFile()
|
||||
fatalIf(err.Trace(), "Unable to get config file.", nil)
|
||||
fatalIf(err, "Unable to get config file.", nil)
|
||||
|
||||
err = qc.Save(configFile)
|
||||
fatalIf(err.Trace(), "Migrating from version ‘"+cv3.Version+"’ to ‘"+srvConfig.Version+"’ failed.", nil)
|
||||
fatalIf(err, "Migrating from version ‘"+cv3.Version+"’ to ‘"+srvConfig.Version+"’ failed.", nil)
|
||||
|
||||
console.Println("Migration from version ‘" + cv3.Version + "’ to ‘" + srvConfig.Version + "’ completed successfully.")
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
|
||||
|
@ -16,23 +15,23 @@ type configV1 struct {
|
|||
}
|
||||
|
||||
// loadConfigV1 load config
|
||||
func loadConfigV1() (*configV1, *probe.Error) {
|
||||
func loadConfigV1() (*configV1, error) {
|
||||
configPath, err := getConfigPath()
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
return nil, err
|
||||
}
|
||||
configFile := filepath.Join(configPath, "fsUsers.json")
|
||||
if _, err := os.Stat(configFile); err != nil {
|
||||
return nil, probe.NewError(err)
|
||||
if _, err = os.Stat(configFile); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a := &configV1{}
|
||||
a.Version = "1"
|
||||
qc, err := quick.New(a)
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
return nil, err
|
||||
}
|
||||
if err := qc.Load(configFile); err != nil {
|
||||
return nil, err.Trace()
|
||||
return nil, err
|
||||
}
|
||||
return qc.Data().(*configV1), nil
|
||||
}
|
||||
|
@ -60,22 +59,22 @@ type configV2 struct {
|
|||
}
|
||||
|
||||
// loadConfigV2 load config version '2'.
|
||||
func loadConfigV2() (*configV2, *probe.Error) {
|
||||
func loadConfigV2() (*configV2, error) {
|
||||
configFile, err := getConfigFile()
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
return nil, err
|
||||
}
|
||||
if _, err := os.Stat(configFile); err != nil {
|
||||
return nil, probe.NewError(err)
|
||||
if _, err = os.Stat(configFile); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a := &configV2{}
|
||||
a.Version = "2"
|
||||
qc, err := quick.New(a)
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
return nil, err
|
||||
}
|
||||
if err := qc.Load(configFile); err != nil {
|
||||
return nil, err.Trace()
|
||||
return nil, err
|
||||
}
|
||||
return qc.Data().(*configV2), nil
|
||||
}
|
||||
|
@ -127,22 +126,22 @@ type configV3 struct {
|
|||
}
|
||||
|
||||
// loadConfigV3 load config version '3'.
|
||||
func loadConfigV3() (*configV3, *probe.Error) {
|
||||
func loadConfigV3() (*configV3, error) {
|
||||
configFile, err := getConfigFile()
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
return nil, err
|
||||
}
|
||||
if _, err := os.Stat(configFile); err != nil {
|
||||
return nil, probe.NewError(err)
|
||||
if _, err = os.Stat(configFile); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a := &configV3{}
|
||||
a.Version = "3"
|
||||
qc, err := quick.New(a)
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
return nil, err
|
||||
}
|
||||
if err := qc.Load(configFile); err != nil {
|
||||
return nil, err.Trace()
|
||||
return nil, err
|
||||
}
|
||||
return qc.Data().(*configV3), nil
|
||||
}
|
||||
|
|
36
config-v4.go
36
config-v4.go
|
@ -20,7 +20,6 @@ import (
|
|||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
|
||||
|
@ -40,7 +39,7 @@ type serverConfigV4 struct {
|
|||
}
|
||||
|
||||
// initConfig - initialize server config. config version (called only once).
|
||||
func initConfig() *probe.Error {
|
||||
func initConfig() error {
|
||||
if !isConfigFileExists() {
|
||||
srvCfg := &serverConfigV4{}
|
||||
srvCfg.Version = globalMinioConfigVersion
|
||||
|
@ -55,41 +54,37 @@ func initConfig() *probe.Error {
|
|||
// Create config path.
|
||||
err := createConfigPath()
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
return err
|
||||
}
|
||||
|
||||
// Create certs path.
|
||||
err = createCertsPath()
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
return err
|
||||
}
|
||||
|
||||
// Save the new config globally.
|
||||
serverConfig = srvCfg
|
||||
|
||||
// Save config into file.
|
||||
err = serverConfig.Save()
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
return nil
|
||||
return serverConfig.Save()
|
||||
}
|
||||
configFile, err := getConfigFile()
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
return err
|
||||
}
|
||||
if _, e := os.Stat(configFile); err != nil {
|
||||
return probe.NewError(e)
|
||||
if _, err = os.Stat(configFile); err != nil {
|
||||
return err
|
||||
}
|
||||
srvCfg := &serverConfigV4{}
|
||||
srvCfg.Version = globalMinioConfigVersion
|
||||
srvCfg.rwMutex = &sync.RWMutex{}
|
||||
qc, err := quick.New(srvCfg)
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
return err
|
||||
}
|
||||
if err := qc.Load(configFile); err != nil {
|
||||
return err.Trace()
|
||||
return err
|
||||
}
|
||||
// Save the loaded config globally.
|
||||
serverConfig = qc.Data().(*serverConfigV4)
|
||||
|
@ -181,27 +176,22 @@ func (s serverConfigV4) GetCredential() credential {
|
|||
}
|
||||
|
||||
// Save config.
|
||||
func (s serverConfigV4) Save() *probe.Error {
|
||||
func (s serverConfigV4) Save() error {
|
||||
s.rwMutex.RLock()
|
||||
defer s.rwMutex.RUnlock()
|
||||
|
||||
// get config file.
|
||||
configFile, err := getConfigFile()
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
return err
|
||||
}
|
||||
|
||||
// initialize quick.
|
||||
qc, err := quick.New(&s)
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
return err
|
||||
}
|
||||
|
||||
// Save config file.
|
||||
if err := qc.Save(configFile); err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
|
||||
// Return success.
|
||||
return nil
|
||||
return qc.Save(configFile)
|
||||
}
|
||||
|
|
30
config.go
30
config.go
|
@ -21,7 +21,6 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"github.com/minio/go-homedir"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// configPath for custom config path only for testing purposes
|
||||
|
@ -33,13 +32,13 @@ func setGlobalConfigPath(configPath string) {
|
|||
}
|
||||
|
||||
// getConfigPath get server config path
|
||||
func getConfigPath() (string, *probe.Error) {
|
||||
func getConfigPath() (string, error) {
|
||||
if customConfigPath != "" {
|
||||
return customConfigPath, nil
|
||||
}
|
||||
homeDir, e := homedir.Dir()
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
homeDir, err := homedir.Dir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
configPath := filepath.Join(homeDir, globalMinioConfigDir)
|
||||
return configPath, nil
|
||||
|
@ -48,27 +47,24 @@ func getConfigPath() (string, *probe.Error) {
|
|||
// mustGetConfigPath must get server config path.
|
||||
func mustGetConfigPath() string {
|
||||
configPath, err := getConfigPath()
|
||||
fatalIf(err.Trace(), "Unable to get config path.", nil)
|
||||
fatalIf(err, "Unable to get config path.", nil)
|
||||
return configPath
|
||||
}
|
||||
|
||||
// createConfigPath create server config path.
|
||||
func createConfigPath() *probe.Error {
|
||||
func createConfigPath() error {
|
||||
configPath, err := getConfigPath()
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(configPath, 0700); err != nil {
|
||||
return probe.NewError(err)
|
||||
}
|
||||
return nil
|
||||
return os.MkdirAll(configPath, 0700)
|
||||
}
|
||||
|
||||
// isConfigFileExists - returns true if config file exists.
|
||||
func isConfigFileExists() bool {
|
||||
st, e := os.Stat(mustGetConfigFile())
|
||||
st, err := os.Stat(mustGetConfigFile())
|
||||
// If file exists and is regular return true.
|
||||
if e == nil && st.Mode().IsRegular() {
|
||||
if err == nil && st.Mode().IsRegular() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
@ -77,16 +73,16 @@ func isConfigFileExists() bool {
|
|||
// mustGetConfigFile must get server config file.
|
||||
func mustGetConfigFile() string {
|
||||
configFile, err := getConfigFile()
|
||||
fatalIf(err.Trace(), "Unable to get config file.", nil)
|
||||
fatalIf(err, "Unable to get config file.", nil)
|
||||
|
||||
return configFile
|
||||
}
|
||||
|
||||
// getConfigFile get server config file.
|
||||
func getConfigFile() (string, *probe.Error) {
|
||||
func getConfigFile() (string, error) {
|
||||
configPath, err := getConfigPath()
|
||||
if err != nil {
|
||||
return "", err.Trace()
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(configPath, globalMinioConfigFile), nil
|
||||
}
|
||||
|
|
|
@ -27,28 +27,22 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
// Minio meta volume.
|
||||
minioMetaVolume = ".minio"
|
||||
)
|
||||
|
||||
// listLeafEntries - lists all entries if a given prefixPath is a leaf
|
||||
// directory, returns error if any - returns empty list if prefixPath
|
||||
// is not a leaf directory.
|
||||
func (o objectAPI) listLeafEntries(prefixPath string) (entries []FileInfo, e error) {
|
||||
func (fs fsObjects) listLeafEntries(prefixPath string) (entries []FileInfo, e error) {
|
||||
var markerPath string
|
||||
for {
|
||||
fileInfos, eof, e := o.storage.ListFiles(minioMetaVolume, prefixPath, markerPath, false, 1000)
|
||||
if e != nil {
|
||||
fileInfos, eof, err := fs.storage.ListFiles(minioMetaVolume, prefixPath, markerPath, false, 1000)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"prefixPath": prefixPath,
|
||||
"markerPath": markerPath,
|
||||
}).Errorf("%s", e)
|
||||
return nil, e
|
||||
}).Errorf("%s", err)
|
||||
return nil, err
|
||||
}
|
||||
for _, fileInfo := range fileInfos {
|
||||
// Set marker for next batch of ListFiles.
|
||||
|
@ -72,7 +66,7 @@ func (o objectAPI) listLeafEntries(prefixPath string) (entries []FileInfo, e err
|
|||
}
|
||||
|
||||
// listMetaVolumeFiles - list all files at a given prefix inside minioMetaVolume.
|
||||
func (o objectAPI) listMetaVolumeFiles(prefixPath string, markerPath string, recursive bool, maxKeys int) (allFileInfos []FileInfo, eof bool, e error) {
|
||||
func (fs fsObjects) listMetaVolumeFiles(prefixPath string, markerPath string, recursive bool, maxKeys int) (allFileInfos []FileInfo, eof bool, err error) {
|
||||
// newMaxKeys tracks the size of entries which are going to be
|
||||
// returned back.
|
||||
var newMaxKeys int
|
||||
|
@ -82,15 +76,15 @@ func (o objectAPI) listMetaVolumeFiles(prefixPath string, markerPath string, rec
|
|||
for {
|
||||
var fileInfos []FileInfo
|
||||
// List files up to maxKeys-newMaxKeys, since we are skipping entries for special files.
|
||||
fileInfos, eof, e = o.storage.ListFiles(minioMetaVolume, prefixPath, markerPath, recursive, maxKeys-newMaxKeys)
|
||||
if e != nil {
|
||||
fileInfos, eof, err = fs.storage.ListFiles(minioMetaVolume, prefixPath, markerPath, recursive, maxKeys-newMaxKeys)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"prefixPath": prefixPath,
|
||||
"markerPath": markerPath,
|
||||
"recursive": recursive,
|
||||
"maxKeys": maxKeys,
|
||||
}).Errorf("%s", e)
|
||||
return nil, true, e
|
||||
}).Errorf("%s", err)
|
||||
return nil, true, err
|
||||
}
|
||||
// Loop through and validate individual file.
|
||||
for _, fi := range fileInfos {
|
||||
|
@ -99,20 +93,18 @@ func (o objectAPI) listMetaVolumeFiles(prefixPath string, markerPath string, rec
|
|||
// List all the entries if fi.Name is a leaf directory, if
|
||||
// fi.Name is not a leaf directory then the resulting
|
||||
// entries are empty.
|
||||
entries, e = o.listLeafEntries(fi.Name)
|
||||
if e != nil {
|
||||
entries, err = fs.listLeafEntries(fi.Name)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"prefixPath": fi.Name,
|
||||
}).Errorf("%s", e)
|
||||
return nil, false, e
|
||||
}).Errorf("%s", err)
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
// Set markerPath for next batch of listing.
|
||||
markerPath = fi.Name
|
||||
if len(entries) > 0 {
|
||||
|
||||
// We reach here for non-recursive case and a leaf entry.
|
||||
|
||||
for _, entry := range entries {
|
||||
allFileInfos = append(allFileInfos, entry)
|
||||
newMaxKeys++
|
||||
|
@ -121,17 +113,14 @@ func (o objectAPI) listMetaVolumeFiles(prefixPath string, markerPath string, rec
|
|||
if newMaxKeys == maxKeys {
|
||||
// Return values:
|
||||
// allFileInfos : "maxKeys" number of entries.
|
||||
// eof : eof returned by o.storage.ListFiles()
|
||||
// eof : eof returned by fs.storage.ListFiles()
|
||||
// error : nil
|
||||
return
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
} else {
|
||||
// We reach here for a non-recursive case non-leaf entry
|
||||
// OR recursive case with fi.Name matching pattern bucket/object/uploadID[.partNum.md5sum]
|
||||
|
||||
if !fi.Mode.IsDir() { // Do not skip non-recursive case directory entries.
|
||||
// Skip files matching pattern bucket/object/uploadID.partNum.md5sum
|
||||
// and retain files matching pattern bucket/object/uploadID
|
||||
|
@ -141,6 +130,7 @@ func (o objectAPI) listMetaVolumeFiles(prefixPath string, markerPath string, rec
|
|||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
allFileInfos = append(allFileInfos, fi)
|
||||
newMaxKeys++
|
||||
// If we have reached the maxKeys, it means we have listed
|
||||
|
@ -148,7 +138,7 @@ func (o objectAPI) listMetaVolumeFiles(prefixPath string, markerPath string, rec
|
|||
if newMaxKeys == maxKeys {
|
||||
// Return values:
|
||||
// allFileInfos : "maxKeys" number of entries.
|
||||
// eof : eof returned by o.storage.ListFiles()
|
||||
// eof : eof returned by fs.storage.ListFiles()
|
||||
// error : nil
|
||||
return
|
||||
}
|
||||
|
@ -164,41 +154,41 @@ func (o objectAPI) listMetaVolumeFiles(prefixPath string, markerPath string, rec
|
|||
}
|
||||
|
||||
// ListMultipartUploads - list multipart uploads.
|
||||
func (o objectAPI) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, *probe.Error) {
|
||||
func (fs fsObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
|
||||
result := ListMultipartsInfo{}
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ListMultipartsInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
return ListMultipartsInfo{}, (BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectPrefix(prefix) {
|
||||
return ListMultipartsInfo{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: prefix})
|
||||
return ListMultipartsInfo{}, (ObjectNameInvalid{Bucket: bucket, Object: prefix})
|
||||
}
|
||||
// Verify if delimiter is anything other than '/', which we do not support.
|
||||
if delimiter != "" && delimiter != slashSeparator {
|
||||
return ListMultipartsInfo{}, probe.NewError(UnsupportedDelimiter{
|
||||
return ListMultipartsInfo{}, (UnsupportedDelimiter{
|
||||
Delimiter: delimiter,
|
||||
})
|
||||
}
|
||||
// Verify if marker has prefix.
|
||||
if keyMarker != "" && !strings.HasPrefix(keyMarker, prefix) {
|
||||
return ListMultipartsInfo{}, probe.NewError(InvalidMarkerPrefixCombination{
|
||||
return ListMultipartsInfo{}, (InvalidMarkerPrefixCombination{
|
||||
Marker: keyMarker,
|
||||
Prefix: prefix,
|
||||
})
|
||||
}
|
||||
if uploadIDMarker != "" {
|
||||
if strings.HasSuffix(keyMarker, slashSeparator) {
|
||||
return result, probe.NewError(InvalidUploadIDKeyCombination{
|
||||
return result, (InvalidUploadIDKeyCombination{
|
||||
UploadIDMarker: uploadIDMarker,
|
||||
KeyMarker: keyMarker,
|
||||
})
|
||||
}
|
||||
id, e := uuid.Parse(uploadIDMarker)
|
||||
if e != nil {
|
||||
return result, probe.NewError(e)
|
||||
id, err := uuid.Parse(uploadIDMarker)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
if id.IsZero() {
|
||||
return result, probe.NewError(MalformedUploadID{
|
||||
return result, (MalformedUploadID{
|
||||
UploadID: uploadIDMarker,
|
||||
})
|
||||
}
|
||||
|
@ -220,15 +210,15 @@ func (o objectAPI) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarke
|
|||
keyMarkerPath = pathJoin(pathJoin(bucket, keyMarker), uploadIDMarker)
|
||||
}
|
||||
// List all the multipart files at prefixPath, starting with marker keyMarkerPath.
|
||||
fileInfos, eof, e := o.listMetaVolumeFiles(prefixPath, keyMarkerPath, recursive, maxUploads)
|
||||
if e != nil {
|
||||
fileInfos, eof, err := fs.listMetaVolumeFiles(prefixPath, keyMarkerPath, recursive, maxUploads)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"prefixPath": prefixPath,
|
||||
"markerPath": keyMarkerPath,
|
||||
"recursive": recursive,
|
||||
"maxUploads": maxUploads,
|
||||
}).Errorf("listMetaVolumeFiles failed with %s", e)
|
||||
return ListMultipartsInfo{}, probe.NewError(e)
|
||||
}).Errorf("listMetaVolumeFiles failed with %s", err)
|
||||
return ListMultipartsInfo{}, err
|
||||
}
|
||||
|
||||
// Loop through all the received files fill in the multiparts result.
|
||||
|
@ -260,55 +250,50 @@ func (o objectAPI) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarke
|
|||
return result, nil
|
||||
}
|
||||
|
||||
func (o objectAPI) NewMultipartUpload(bucket, object string) (string, *probe.Error) {
|
||||
func (fs fsObjects) NewMultipartUpload(bucket, object string) (string, error) {
|
||||
// Verify if bucket name is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
return "", (BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
// Verify if object name is valid.
|
||||
if !IsValidObjectName(object) {
|
||||
return "", probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
return "", ObjectNameInvalid{Bucket: bucket, Object: object}
|
||||
}
|
||||
// Verify whether the bucket exists.
|
||||
isExist, err := o.isBucketExist(bucket)
|
||||
if err != nil {
|
||||
return "", probe.NewError(err)
|
||||
}
|
||||
if !isExist {
|
||||
return "", probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
if isExist, err := isBucketExist(fs.storage, bucket); err != nil {
|
||||
return "", err
|
||||
} else if !isExist {
|
||||
return "", BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
if _, e := o.storage.StatVol(minioMetaVolume); e != nil {
|
||||
if e == errVolumeNotFound {
|
||||
e = o.storage.MakeVol(minioMetaVolume)
|
||||
if e != nil {
|
||||
if e == errDiskFull {
|
||||
return "", probe.NewError(StorageFull{})
|
||||
}
|
||||
return "", probe.NewError(e)
|
||||
if _, err := fs.storage.StatVol(minioMetaVolume); err != nil {
|
||||
if err == errVolumeNotFound {
|
||||
err = fs.storage.MakeVol(minioMetaVolume)
|
||||
if err != nil {
|
||||
return "", toObjectErr(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
for {
|
||||
uuid, e := uuid.New()
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
uuid, err := uuid.New()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
uploadID := uuid.String()
|
||||
uploadIDPath := path.Join(bucket, object, uploadID)
|
||||
if _, e = o.storage.StatFile(minioMetaVolume, uploadIDPath); e != nil {
|
||||
if e != errFileNotFound {
|
||||
return "", probe.NewError(toObjectErr(e, minioMetaVolume, uploadIDPath))
|
||||
if _, err = fs.storage.StatFile(minioMetaVolume, uploadIDPath); err != nil {
|
||||
if err != errFileNotFound {
|
||||
return "", (toObjectErr(err, minioMetaVolume, uploadIDPath))
|
||||
}
|
||||
// uploadIDPath doesn't exist, so create empty file to reserve the name
|
||||
var w io.WriteCloser
|
||||
if w, e = o.storage.CreateFile(minioMetaVolume, uploadIDPath); e == nil {
|
||||
if w, err = fs.storage.CreateFile(minioMetaVolume, uploadIDPath); err == nil {
|
||||
// Close the writer.
|
||||
if e = w.Close(); e != nil {
|
||||
return "", probe.NewError(e)
|
||||
if err = w.Close(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
return "", probe.NewError(toObjectErr(e, minioMetaVolume, uploadIDPath))
|
||||
return "", toObjectErr(err, minioMetaVolume, uploadIDPath)
|
||||
}
|
||||
return uploadID, nil
|
||||
}
|
||||
|
@ -318,48 +303,46 @@ func (o objectAPI) NewMultipartUpload(bucket, object string) (string, *probe.Err
|
|||
}
|
||||
|
||||
// isUploadIDExists - verify if a given uploadID exists and is valid.
|
||||
func (o objectAPI) isUploadIDExists(bucket, object, uploadID string) (bool, error) {
|
||||
func isUploadIDExists(storage StorageAPI, bucket, object, uploadID string) (bool, error) {
|
||||
uploadIDPath := path.Join(bucket, object, uploadID)
|
||||
st, e := o.storage.StatFile(minioMetaVolume, uploadIDPath)
|
||||
if e != nil {
|
||||
st, err := storage.StatFile(minioMetaVolume, uploadIDPath)
|
||||
if err != nil {
|
||||
// Upload id does not exist.
|
||||
if e == errFileNotFound {
|
||||
if err == errFileNotFound {
|
||||
return false, nil
|
||||
}
|
||||
return false, e
|
||||
return false, err
|
||||
}
|
||||
// Upload id exists and is a regular file.
|
||||
return st.Mode.IsRegular(), nil
|
||||
}
|
||||
|
||||
// PutObjectPart - writes the multipart upload chunks.
|
||||
func (o objectAPI) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error) {
|
||||
func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
return "", BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return "", probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
return "", ObjectNameInvalid{Bucket: bucket, Object: object}
|
||||
}
|
||||
// Verify whether the bucket exists.
|
||||
isExist, err := o.isBucketExist(bucket)
|
||||
if err != nil {
|
||||
return "", probe.NewError(err)
|
||||
}
|
||||
if !isExist {
|
||||
return "", probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
if isExist, err := isBucketExist(fs.storage, bucket); err != nil {
|
||||
return "", err
|
||||
} else if !isExist {
|
||||
return "", BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
if status, e := o.isUploadIDExists(bucket, object, uploadID); e != nil {
|
||||
return "", probe.NewError(e)
|
||||
if status, err := isUploadIDExists(fs.storage, bucket, object, uploadID); err != nil {
|
||||
return "", err
|
||||
} else if !status {
|
||||
return "", probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
return "", InvalidUploadID{UploadID: uploadID}
|
||||
}
|
||||
|
||||
partSuffix := fmt.Sprintf("%s.%d.%s", uploadID, partID, md5Hex)
|
||||
fileWriter, e := o.storage.CreateFile(minioMetaVolume, path.Join(bucket, object, partSuffix))
|
||||
if e != nil {
|
||||
return "", probe.NewError(toObjectErr(e, bucket, object))
|
||||
fileWriter, err := fs.storage.CreateFile(minioMetaVolume, path.Join(bucket, object, partSuffix))
|
||||
if err != nil {
|
||||
return "", toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// Initialize md5 writer.
|
||||
|
@ -370,21 +353,21 @@ func (o objectAPI) PutObjectPart(bucket, object, uploadID string, partID int, si
|
|||
|
||||
// Instantiate checksum hashers and create a multiwriter.
|
||||
if size > 0 {
|
||||
if _, e = io.CopyN(multiWriter, data, size); e != nil {
|
||||
if _, err = io.CopyN(multiWriter, data, size); err != nil {
|
||||
safeCloseAndRemove(fileWriter)
|
||||
return "", probe.NewError(toObjectErr(e))
|
||||
return "", (toObjectErr(err))
|
||||
}
|
||||
// Reader shouldn't have more data what mentioned in size argument.
|
||||
// reading one more byte from the reader to validate it.
|
||||
// expected to fail, success validates existence of more data in the reader.
|
||||
if _, e = io.CopyN(ioutil.Discard, data, 1); e == nil {
|
||||
if _, err = io.CopyN(ioutil.Discard, data, 1); err == nil {
|
||||
safeCloseAndRemove(fileWriter)
|
||||
return "", probe.NewError(UnExpectedDataSize{Size: int(size)})
|
||||
return "", (UnExpectedDataSize{Size: int(size)})
|
||||
}
|
||||
} else {
|
||||
if _, e = io.Copy(multiWriter, data); e != nil {
|
||||
if _, err = io.Copy(multiWriter, data); err != nil {
|
||||
safeCloseAndRemove(fileWriter)
|
||||
return "", probe.NewError(toObjectErr(e))
|
||||
return "", (toObjectErr(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -392,28 +375,28 @@ func (o objectAPI) PutObjectPart(bucket, object, uploadID string, partID int, si
|
|||
if md5Hex != "" {
|
||||
if newMD5Hex != md5Hex {
|
||||
safeCloseAndRemove(fileWriter)
|
||||
return "", probe.NewError(BadDigest{md5Hex, newMD5Hex})
|
||||
return "", (BadDigest{md5Hex, newMD5Hex})
|
||||
}
|
||||
}
|
||||
e = fileWriter.Close()
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
err = fileWriter.Close()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return newMD5Hex, nil
|
||||
}
|
||||
|
||||
func (o objectAPI) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, *probe.Error) {
|
||||
func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ListPartsInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
return ListPartsInfo{}, (BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return ListPartsInfo{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
return ListPartsInfo{}, (ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
if status, e := o.isUploadIDExists(bucket, object, uploadID); e != nil {
|
||||
return ListPartsInfo{}, probe.NewError(e)
|
||||
if status, err := isUploadIDExists(fs.storage, bucket, object, uploadID); err != nil {
|
||||
return ListPartsInfo{}, err
|
||||
} else if !status {
|
||||
return ListPartsInfo{}, probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
return ListPartsInfo{}, (InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
result := ListPartsInfo{}
|
||||
var markerPath string
|
||||
|
@ -423,26 +406,26 @@ func (o objectAPI) ListObjectParts(bucket, object, uploadID string, partNumberMa
|
|||
// partNumberMarker is already set.
|
||||
if partNumberMarker > 0 {
|
||||
partNumberMarkerPath := uploadIDPath + "." + strconv.Itoa(partNumberMarker) + "."
|
||||
fileInfos, _, e := o.storage.ListFiles(minioMetaVolume, partNumberMarkerPath, "", false, 1)
|
||||
if e != nil {
|
||||
return result, probe.NewError(toObjectErr(e, minioMetaVolume, partNumberMarkerPath))
|
||||
fileInfos, _, err := fs.storage.ListFiles(minioMetaVolume, partNumberMarkerPath, "", false, 1)
|
||||
if err != nil {
|
||||
return result, toObjectErr(err, minioMetaVolume, partNumberMarkerPath)
|
||||
}
|
||||
if len(fileInfos) == 0 {
|
||||
return result, probe.NewError(InvalidPart{})
|
||||
return result, (InvalidPart{})
|
||||
}
|
||||
markerPath = fileInfos[0].Name
|
||||
}
|
||||
uploadIDPrefix := uploadIDPath + "."
|
||||
fileInfos, eof, e := o.storage.ListFiles(minioMetaVolume, uploadIDPrefix, markerPath, false, maxParts)
|
||||
if e != nil {
|
||||
return result, probe.NewError(InvalidPart{})
|
||||
fileInfos, eof, err := fs.storage.ListFiles(minioMetaVolume, uploadIDPrefix, markerPath, false, maxParts)
|
||||
if err != nil {
|
||||
return result, InvalidPart{}
|
||||
}
|
||||
for _, fileInfo := range fileInfos {
|
||||
fileName := path.Base(fileInfo.Name)
|
||||
splitResult := strings.Split(fileName, ".")
|
||||
partNum, e := strconv.Atoi(splitResult[1])
|
||||
if e != nil {
|
||||
return result, probe.NewError(e)
|
||||
partNum, err := strconv.Atoi(splitResult[1])
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
md5sum := splitResult[2]
|
||||
result.Parts = append(result.Parts, partInfo{
|
||||
|
@ -463,86 +446,90 @@ func (o objectAPI) ListObjectParts(bucket, object, uploadID string, partNumberMa
|
|||
return result, nil
|
||||
}
|
||||
|
||||
// Create an s3 compatible MD5sum for complete multipart transaction.
|
||||
func makeS3MD5(md5Strs ...string) (string, *probe.Error) {
|
||||
var finalMD5Bytes []byte
|
||||
for _, md5Str := range md5Strs {
|
||||
md5Bytes, e := hex.DecodeString(md5Str)
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
finalMD5Bytes = append(finalMD5Bytes, md5Bytes...)
|
||||
}
|
||||
md5Hasher := md5.New()
|
||||
md5Hasher.Write(finalMD5Bytes)
|
||||
s3MD5 := fmt.Sprintf("%s-%d", hex.EncodeToString(md5Hasher.Sum(nil)), len(md5Strs))
|
||||
return s3MD5, nil
|
||||
}
|
||||
|
||||
func (o objectAPI) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (string, *probe.Error) {
|
||||
func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (string, error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
return "", (BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return "", probe.NewError(ObjectNameInvalid{
|
||||
return "", (ObjectNameInvalid{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
})
|
||||
}
|
||||
if status, e := o.isUploadIDExists(bucket, object, uploadID); e != nil {
|
||||
return "", probe.NewError(e)
|
||||
if status, err := isUploadIDExists(fs.storage, bucket, object, uploadID); err != nil {
|
||||
return "", err
|
||||
} else if !status {
|
||||
return "", probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
return "", (InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
|
||||
fileWriter, err := fs.storage.CreateFile(bucket, object)
|
||||
if err != nil {
|
||||
return "", toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
var md5Sums []string
|
||||
for _, part := range parts {
|
||||
// Construct part suffix.
|
||||
partSuffix := fmt.Sprintf("%s.%d.%s", uploadID, part.PartNumber, part.ETag)
|
||||
e := o.storage.RenameFile(minioMetaVolume, path.Join(bucket, object, partSuffix), bucket, path.Join(object, fmt.Sprint(part.PartNumber)))
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
var fileReader io.ReadCloser
|
||||
fileReader, err = fs.storage.ReadFile(minioMetaVolume, path.Join(bucket, object, partSuffix), 0)
|
||||
if err != nil {
|
||||
if err == errFileNotFound {
|
||||
return "", (InvalidPart{})
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
_, err = io.Copy(fileWriter, fileReader)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = fileReader.Close()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
md5Sums = append(md5Sums, part.ETag)
|
||||
}
|
||||
fileWriter, e := o.storage.CreateFile(bucket, path.Join(object, "multipart.json"))
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
|
||||
err = fileWriter.Close()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
fileWriter.Close()
|
||||
|
||||
// Save the s3 md5.
|
||||
s3MD5, err := makeS3MD5(md5Sums...)
|
||||
if err != nil {
|
||||
return "", err.Trace(md5Sums...)
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Cleanup all the parts.
|
||||
// o.removeMultipartUpload(bucket, object, uploadID)
|
||||
fs.removeMultipartUpload(bucket, object, uploadID)
|
||||
|
||||
// Return md5sum.
|
||||
return s3MD5, nil
|
||||
}
|
||||
|
||||
func (o objectAPI) removeMultipartUpload(bucket, object, uploadID string) *probe.Error {
|
||||
func (fs fsObjects) removeMultipartUpload(bucket, object, uploadID string) error {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
return (BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
return (ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
|
||||
marker := ""
|
||||
for {
|
||||
uploadIDPath := path.Join(bucket, object, uploadID)
|
||||
fileInfos, eof, e := o.storage.ListFiles(minioMetaVolume, uploadIDPath, marker, false, 1000)
|
||||
if e != nil {
|
||||
|
||||
return probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
fileInfos, eof, err := fs.storage.ListFiles(minioMetaVolume, uploadIDPath, marker, false, 1000)
|
||||
if err != nil {
|
||||
if err == errFileNotFound {
|
||||
return (InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
return toObjectErr(err)
|
||||
}
|
||||
for _, fileInfo := range fileInfos {
|
||||
o.storage.DeleteFile(minioMetaVolume, fileInfo.Name)
|
||||
fs.storage.DeleteFile(minioMetaVolume, fileInfo.Name)
|
||||
marker = fileInfo.Name
|
||||
}
|
||||
if eof {
|
||||
|
@ -552,22 +539,18 @@ func (o objectAPI) removeMultipartUpload(bucket, object, uploadID string) *probe
|
|||
return nil
|
||||
}
|
||||
|
||||
func (o objectAPI) AbortMultipartUpload(bucket, object, uploadID string) *probe.Error {
|
||||
func (fs fsObjects) AbortMultipartUpload(bucket, object, uploadID string) error {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
return (BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
return (ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
if status, e := o.isUploadIDExists(bucket, object, uploadID); e != nil {
|
||||
return probe.NewError(e)
|
||||
if status, err := isUploadIDExists(fs.storage, bucket, object, uploadID); err != nil {
|
||||
return err
|
||||
} else if !status {
|
||||
return probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
return (InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
err := o.removeMultipartUpload(bucket, object, uploadID)
|
||||
if err != nil {
|
||||
return err.Trace(bucket, object, uploadID)
|
||||
}
|
||||
return nil
|
||||
return fs.removeMultipartUpload(bucket, object, uploadID)
|
||||
}
|
|
@ -0,0 +1,333 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/pkg/mimedb"
|
||||
)
|
||||
|
||||
// fsObjects - Implements fs object layer.
|
||||
type fsObjects struct {
|
||||
storage StorageAPI
|
||||
}
|
||||
|
||||
// newFSObjects - initialize new fs object layer.
|
||||
func newFSObjects(exportPath string) (ObjectLayer, error) {
|
||||
var storage StorageAPI
|
||||
var err error
|
||||
if !strings.ContainsRune(exportPath, ':') || filepath.VolumeName(exportPath) != "" {
|
||||
// Initialize filesystem storage API.
|
||||
storage, err = newPosix(exportPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// Initialize rpc client storage API.
|
||||
storage, err = newRPCClient(exportPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return fsObjects{storage}, nil
|
||||
}
|
||||
|
||||
// checks whether bucket exists.
|
||||
func isBucketExist(storage StorageAPI, bucketName string) (bool, error) {
|
||||
// Check whether bucket exists.
|
||||
if _, e := storage.StatVol(bucketName); e != nil {
|
||||
if e == errVolumeNotFound {
|
||||
return false, nil
|
||||
}
|
||||
return false, e
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
/// Bucket operations
|
||||
|
||||
// MakeBucket - make a bucket.
|
||||
func (fs fsObjects) MakeBucket(bucket string) error {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
if err := fs.storage.MakeVol(bucket); err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
// This happens for the first time, but keep this here since this
|
||||
// is the only place where it can be made expensive optimizing all
|
||||
// other calls.
|
||||
// Create minio meta volume, if it doesn't exist yet.
|
||||
if err := fs.storage.MakeVol(minioMetaVolume); err != nil {
|
||||
if err != errVolumeExists {
|
||||
return toObjectErr(err, minioMetaVolume)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBucketInfo - get bucket info.
|
||||
func (fs fsObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return BucketInfo{}, BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
vi, err := fs.storage.StatVol(bucket)
|
||||
if err != nil {
|
||||
return BucketInfo{}, toObjectErr(err, bucket)
|
||||
}
|
||||
return BucketInfo{
|
||||
Name: bucket,
|
||||
Created: vi.Created,
|
||||
Total: vi.Total,
|
||||
Free: vi.Free,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListBuckets - list buckets.
|
||||
func (fs fsObjects) ListBuckets() ([]BucketInfo, error) {
|
||||
var bucketInfos []BucketInfo
|
||||
vols, err := fs.storage.ListVols()
|
||||
if err != nil {
|
||||
return nil, toObjectErr(err)
|
||||
}
|
||||
for _, vol := range vols {
|
||||
// StorageAPI can send volume names which are incompatible
|
||||
// with buckets, handle it and skip them.
|
||||
if !IsValidBucketName(vol.Name) {
|
||||
continue
|
||||
}
|
||||
bucketInfos = append(bucketInfos, BucketInfo{
|
||||
Name: vol.Name,
|
||||
Created: vol.Created,
|
||||
Total: vol.Total,
|
||||
Free: vol.Free,
|
||||
})
|
||||
}
|
||||
sort.Sort(byBucketName(bucketInfos))
|
||||
return bucketInfos, nil
|
||||
}
|
||||
|
||||
// DeleteBucket - delete a bucket.
|
||||
func (fs fsObjects) DeleteBucket(bucket string) error {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
if err := fs.storage.DeleteVol(bucket); err != nil {
|
||||
return toObjectErr(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/// Object Operations
|
||||
|
||||
// GetObject - get an object.
|
||||
func (fs fsObjects) GetObject(bucket, object string, startOffset int64) (io.ReadCloser, error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return nil, (BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
// Verify if object is valid.
|
||||
if !IsValidObjectName(object) {
|
||||
return nil, (ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
fileReader, err := fs.storage.ReadFile(bucket, object, startOffset)
|
||||
if err != nil {
|
||||
return nil, toObjectErr(err, bucket, object)
|
||||
}
|
||||
return fileReader, nil
|
||||
}
|
||||
|
||||
// GetObjectInfo - get object info.
|
||||
func (fs fsObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ObjectInfo{}, (BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
// Verify if object is valid.
|
||||
if !IsValidObjectName(object) {
|
||||
return ObjectInfo{}, (ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
fi, err := fs.storage.StatFile(bucket, object)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
contentType := "application/octet-stream"
|
||||
if objectExt := filepath.Ext(object); objectExt != "" {
|
||||
content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))]
|
||||
if ok {
|
||||
contentType = content.ContentType
|
||||
}
|
||||
}
|
||||
return ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: object,
|
||||
ModTime: fi.ModTime,
|
||||
Size: fi.Size,
|
||||
IsDir: fi.Mode.IsDir(),
|
||||
ContentType: contentType,
|
||||
MD5Sum: "", // Read from metadata.
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (string, error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return "", (BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return "", (ObjectNameInvalid{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
})
|
||||
}
|
||||
// Check whether the bucket exists.
|
||||
if isExist, err := isBucketExist(fs.storage, bucket); err != nil {
|
||||
return "", err
|
||||
} else if !isExist {
|
||||
return "", BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
fileWriter, err := fs.storage.CreateFile(bucket, object)
|
||||
if err != nil {
|
||||
return "", toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// Initialize md5 writer.
|
||||
md5Writer := md5.New()
|
||||
|
||||
// Instantiate a new multi writer.
|
||||
multiWriter := io.MultiWriter(md5Writer, fileWriter)
|
||||
|
||||
// Instantiate checksum hashers and create a multiwriter.
|
||||
if size > 0 {
|
||||
if _, err = io.CopyN(multiWriter, data, size); err != nil {
|
||||
if clErr := safeCloseAndRemove(fileWriter); clErr != nil {
|
||||
return "", clErr
|
||||
}
|
||||
return "", toObjectErr(err)
|
||||
}
|
||||
} else {
|
||||
if _, err = io.Copy(multiWriter, data); err != nil {
|
||||
if clErr := safeCloseAndRemove(fileWriter); clErr != nil {
|
||||
return "", clErr
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
|
||||
// md5Hex representation.
|
||||
var md5Hex string
|
||||
if len(metadata) != 0 {
|
||||
md5Hex = metadata["md5Sum"]
|
||||
}
|
||||
if md5Hex != "" {
|
||||
if newMD5Hex != md5Hex {
|
||||
if err = safeCloseAndRemove(fileWriter); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return "", BadDigest{md5Hex, newMD5Hex}
|
||||
}
|
||||
}
|
||||
err = fileWriter.Close()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Return md5sum, successfully wrote object.
|
||||
return newMD5Hex, nil
|
||||
}
|
||||
|
||||
func (fs fsObjects) DeleteObject(bucket, object string) error {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return ObjectNameInvalid{Bucket: bucket, Object: object}
|
||||
}
|
||||
if err := fs.storage.DeleteFile(bucket, object); err != nil {
|
||||
return toObjectErr(err, bucket, object)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ListObjectsInfo{}, BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
if !IsValidObjectPrefix(prefix) {
|
||||
return ListObjectsInfo{}, ObjectNameInvalid{Bucket: bucket, Object: prefix}
|
||||
}
|
||||
// Verify if delimiter is anything other than '/', which we do not support.
|
||||
if delimiter != "" && delimiter != slashSeparator {
|
||||
return ListObjectsInfo{}, UnsupportedDelimiter{
|
||||
Delimiter: delimiter,
|
||||
}
|
||||
}
|
||||
// Verify if marker has prefix.
|
||||
if marker != "" {
|
||||
if !strings.HasPrefix(marker, prefix) {
|
||||
return ListObjectsInfo{}, InvalidMarkerPrefixCombination{
|
||||
Marker: marker,
|
||||
Prefix: prefix,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Default is recursive, if delimiter is set then list non recursive.
|
||||
recursive := true
|
||||
if delimiter == slashSeparator {
|
||||
recursive = false
|
||||
}
|
||||
fileInfos, eof, err := fs.storage.ListFiles(bucket, prefix, marker, recursive, maxKeys)
|
||||
if err != nil {
|
||||
return ListObjectsInfo{}, toObjectErr(err, bucket)
|
||||
}
|
||||
if maxKeys == 0 {
|
||||
return ListObjectsInfo{}, nil
|
||||
}
|
||||
|
||||
result := ListObjectsInfo{IsTruncated: !eof}
|
||||
for _, fileInfo := range fileInfos {
|
||||
// With delimiter set we fill in NextMarker and Prefixes.
|
||||
if delimiter == slashSeparator {
|
||||
result.NextMarker = fileInfo.Name
|
||||
if fileInfo.Mode.IsDir() {
|
||||
result.Prefixes = append(result.Prefixes, fileInfo.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
result.Objects = append(result.Objects, ObjectInfo{
|
||||
Name: fileInfo.Name,
|
||||
ModTime: fileInfo.ModTime,
|
||||
Size: fileInfo.Size,
|
||||
IsDir: false,
|
||||
})
|
||||
}
|
||||
return result, nil
|
||||
}
|
28
httprange.go
28
httprange.go
|
@ -21,8 +21,6 @@ import (
|
|||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -50,7 +48,7 @@ func (r *httpRange) String() string {
|
|||
}
|
||||
|
||||
// Grab new range from request header
|
||||
func getRequestedRange(hrange string, size int64) (*httpRange, *probe.Error) {
|
||||
func getRequestedRange(hrange string, size int64) (*httpRange, error) {
|
||||
r := &httpRange{
|
||||
start: 0,
|
||||
length: 0,
|
||||
|
@ -60,16 +58,16 @@ func getRequestedRange(hrange string, size int64) (*httpRange, *probe.Error) {
|
|||
if hrange != "" {
|
||||
err := r.parseRange(hrange)
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (r *httpRange) parse(ra string) *probe.Error {
|
||||
func (r *httpRange) parse(ra string) error {
|
||||
i := strings.Index(ra, "-")
|
||||
if i < 0 {
|
||||
return probe.NewError(InvalidRange{})
|
||||
return InvalidRange{}
|
||||
}
|
||||
start, end := strings.TrimSpace(ra[:i]), strings.TrimSpace(ra[i+1:])
|
||||
if start == "" {
|
||||
|
@ -77,7 +75,7 @@ func (r *httpRange) parse(ra string) *probe.Error {
|
|||
// range start relative to the end of the file.
|
||||
i, err := strconv.ParseInt(end, 10, 64)
|
||||
if err != nil {
|
||||
return probe.NewError(InvalidRange{})
|
||||
return InvalidRange{}
|
||||
}
|
||||
if i > r.size {
|
||||
i = r.size
|
||||
|
@ -87,7 +85,7 @@ func (r *httpRange) parse(ra string) *probe.Error {
|
|||
} else {
|
||||
i, err := strconv.ParseInt(start, 10, 64)
|
||||
if err != nil || i > r.size || i < 0 {
|
||||
return probe.NewError(InvalidRange{})
|
||||
return InvalidRange{}
|
||||
}
|
||||
r.start = i
|
||||
if end == "" {
|
||||
|
@ -96,7 +94,7 @@ func (r *httpRange) parse(ra string) *probe.Error {
|
|||
} else {
|
||||
i, err := strconv.ParseInt(end, 10, 64)
|
||||
if err != nil || r.start > i {
|
||||
return probe.NewError(InvalidRange{})
|
||||
return InvalidRange{}
|
||||
}
|
||||
if i >= r.size {
|
||||
i = r.size - 1
|
||||
|
@ -108,26 +106,26 @@ func (r *httpRange) parse(ra string) *probe.Error {
|
|||
}
|
||||
|
||||
// parseRange parses a Range header string as per RFC 2616.
|
||||
func (r *httpRange) parseRange(s string) *probe.Error {
|
||||
func (r *httpRange) parseRange(s string) error {
|
||||
if s == "" {
|
||||
return probe.NewError(errors.New("header not present"))
|
||||
return errors.New("header not present")
|
||||
}
|
||||
if !strings.HasPrefix(s, b) {
|
||||
return probe.NewError(InvalidRange{})
|
||||
return InvalidRange{}
|
||||
}
|
||||
|
||||
ras := strings.Split(s[len(b):], ",")
|
||||
if len(ras) == 0 {
|
||||
return probe.NewError(errors.New("invalid request"))
|
||||
return errors.New("invalid request")
|
||||
}
|
||||
// Just pick the first one and ignore the rest, we only support one range per object
|
||||
if len(ras) > 1 {
|
||||
return probe.NewError(errors.New("multiple ranges specified"))
|
||||
return errors.New("multiple ranges specified")
|
||||
}
|
||||
|
||||
ra := strings.TrimSpace(ras[0])
|
||||
if ra == "" {
|
||||
return probe.NewError(InvalidRange{})
|
||||
return InvalidRange{}
|
||||
}
|
||||
return r.parse(ra)
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"io/ioutil"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// consoleLogger - default logger if not other logging is enabled.
|
||||
|
@ -39,8 +38,8 @@ func enableConsoleLogger() {
|
|||
}
|
||||
// log.Out and log.Formatter use the default versions.
|
||||
// Only set specific log level.
|
||||
lvl, e := logrus.ParseLevel(clogger.Level)
|
||||
fatalIf(probe.NewError(e), "Unknown log level detected, please fix your console logger configuration.", nil)
|
||||
lvl, err := logrus.ParseLevel(clogger.Level)
|
||||
fatalIf(err, "Unknown log level detected, please fix your console logger configuration.", nil)
|
||||
|
||||
log.Level = lvl
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
type fileLogger struct {
|
||||
|
@ -40,14 +39,14 @@ func enableFileLogger() {
|
|||
return
|
||||
}
|
||||
|
||||
file, e := os.OpenFile(flogger.Filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
|
||||
fatalIf(probe.NewError(e), "Unable to open log file.", nil)
|
||||
file, err := os.OpenFile(flogger.Filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
|
||||
fatalIf(err, "Unable to open log file.", nil)
|
||||
|
||||
// Add a local file hook.
|
||||
log.Hooks.Add(&localFile{file})
|
||||
|
||||
lvl, e := logrus.ParseLevel(flogger.Level)
|
||||
fatalIf(probe.NewError(e), "Unknown log level detected, please fix your console logger configuration.", nil)
|
||||
lvl, err := logrus.ParseLevel(flogger.Level)
|
||||
fatalIf(err, "Unknown log level detected, please fix your console logger configuration.", nil)
|
||||
|
||||
// Set default JSON formatter.
|
||||
log.Formatter = new(logrus.JSONFormatter)
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"log/syslog"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
type syslogLogger struct {
|
||||
|
@ -41,8 +40,8 @@ type syslogHook struct {
|
|||
|
||||
// enableSyslogLogger - enable logger at raddr.
|
||||
func enableSyslogLogger(raddr string) {
|
||||
syslogHook, e := newSyslog("udp", raddr, syslog.LOG_ERR, "MINIO")
|
||||
fatalIf(probe.NewError(e), "Unable to instantiate syslog.", nil)
|
||||
syslogHook, err := newSyslog("udp", raddr, syslog.LOG_ERR, "MINIO")
|
||||
fatalIf(err, "Unable to instantiate syslog.", nil)
|
||||
|
||||
log.Hooks.Add(syslogHook) // Add syslog hook.
|
||||
log.Formatter = &logrus.JSONFormatter{} // JSON formatted log.
|
||||
|
@ -51,15 +50,15 @@ func enableSyslogLogger(raddr string) {
|
|||
|
||||
// newSyslog - Creates a hook to be added to an instance of logger.
|
||||
func newSyslog(network, raddr string, priority syslog.Priority, tag string) (*syslogHook, error) {
|
||||
w, e := syslog.Dial(network, raddr, priority, tag)
|
||||
return &syslogHook{w, network, raddr}, e
|
||||
w, err := syslog.Dial(network, raddr, priority, tag)
|
||||
return &syslogHook{w, network, raddr}, err
|
||||
}
|
||||
|
||||
// Fire - fire the log event
|
||||
func (hook *syslogHook) Fire(entry *logrus.Entry) error {
|
||||
line, e := entry.String()
|
||||
if e != nil {
|
||||
return fmt.Errorf("Unable to read entry, %v", e)
|
||||
line, err := entry.String()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to read entry, %v", err)
|
||||
}
|
||||
switch entry.Level {
|
||||
case logrus.PanicLevel:
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
|
||||
package main
|
||||
|
||||
import "github.com/minio/minio/pkg/probe"
|
||||
|
||||
type syslogLogger struct {
|
||||
Enable bool `json:"enable"`
|
||||
Addr string `json:"address"`
|
||||
|
@ -28,5 +26,5 @@ type syslogLogger struct {
|
|||
|
||||
// enableSyslogLogger - unsupported on windows.
|
||||
func enableSyslogLogger(raddr string) {
|
||||
fatalIf(probe.NewError(errSyslogNotSupported), "Unable to enable syslog.", nil)
|
||||
fatalIf(errSyslogNotSupported, "Unable to enable syslog.", nil)
|
||||
}
|
||||
|
|
23
logger.go
23
logger.go
|
@ -17,11 +17,9 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
type fields map[string]interface{}
|
||||
|
@ -43,7 +41,7 @@ type logger struct {
|
|||
}
|
||||
|
||||
// errorIf synonymous with fatalIf but doesn't exit on error != nil
|
||||
func errorIf(err *probe.Error, msg string, fields logrus.Fields) {
|
||||
func errorIf(err error, msg string, fields logrus.Fields) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
@ -53,19 +51,15 @@ func errorIf(err *probe.Error, msg string, fields logrus.Fields) {
|
|||
fields["Error"] = struct {
|
||||
Cause string `json:"cause,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
CallTrace []probe.TracePoint `json:"trace,omitempty"`
|
||||
SysInfo map[string]string `json:"sysinfo,omitempty"`
|
||||
}{
|
||||
err.Cause.Error(),
|
||||
reflect.TypeOf(err.Cause).String(),
|
||||
err.CallTrace,
|
||||
err.SysInfo,
|
||||
err.Error(),
|
||||
reflect.TypeOf(err).String(),
|
||||
}
|
||||
log.WithFields(fields).Error(msg)
|
||||
}
|
||||
|
||||
// fatalIf wrapper function which takes error and prints jsonic error messages.
|
||||
func fatalIf(err *probe.Error, msg string, fields logrus.Fields) {
|
||||
func fatalIf(err error, msg string, fields logrus.Fields) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
@ -73,9 +67,12 @@ func fatalIf(err *probe.Error, msg string, fields logrus.Fields) {
|
|||
fields = make(logrus.Fields)
|
||||
}
|
||||
|
||||
fields["error"] = err.ToGoError()
|
||||
if jsonErr, e := json.Marshal(err); e == nil {
|
||||
fields["probe"] = string(jsonErr)
|
||||
fields["Error"] = struct {
|
||||
Cause string `json:"cause,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
}{
|
||||
err.Error(),
|
||||
reflect.TypeOf(err).String(),
|
||||
}
|
||||
log.WithFields(fields).Fatal(msg)
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"errors"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
@ -37,7 +36,7 @@ func (s *LoggerSuite) TestLogger(c *C) {
|
|||
log.Out = &buffer
|
||||
log.Formatter = new(logrus.JSONFormatter)
|
||||
|
||||
errorIf(probe.NewError(errors.New("Fake error")), "Failed with error.", nil)
|
||||
errorIf(errors.New("Fake error"), "Failed with error.", nil)
|
||||
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(fields["level"], Equals, "error")
|
||||
|
|
2
main.go
2
main.go
|
@ -197,7 +197,7 @@ func main() {
|
|||
|
||||
// Initialize config.
|
||||
err := initConfig()
|
||||
fatalIf(err.Trace(), "Unable to initialize minio config.", nil)
|
||||
fatalIf(err, "Unable to initialize minio config.", nil)
|
||||
|
||||
// Enable all loggers by now.
|
||||
enableLoggers()
|
||||
|
|
|
@ -22,12 +22,11 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/olekukonko/ts"
|
||||
)
|
||||
|
||||
// colorizeUpdateMessage - inspired from Yeoman project npm package https://github.com/yeoman/update-notifier
|
||||
func colorizeUpdateMessage(updateString string) (string, *probe.Error) {
|
||||
func colorizeUpdateMessage(updateString string) (string, error) {
|
||||
// Initialize coloring.
|
||||
cyan := color.New(color.FgCyan, color.Bold).SprintFunc()
|
||||
yellow := color.New(color.FgYellow, color.Bold).SprintfFunc()
|
||||
|
@ -47,7 +46,7 @@ func colorizeUpdateMessage(updateString string) (string, *probe.Error) {
|
|||
|
||||
terminal, err := ts.GetSize()
|
||||
if err != nil {
|
||||
return "", probe.NewError(err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
var message string
|
||||
|
|
|
@ -25,27 +25,22 @@ import (
|
|||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// Testing GetObjectInfo().
|
||||
func TestGetObjectInfo(t *testing.T) {
|
||||
directory, e := ioutil.TempDir("", "minio-get-objinfo-test")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
directory, err := ioutil.TempDir("", "minio-get-objinfo-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the obj.
|
||||
fs, e := newFS(directory)
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
obj, err := newFSObjects(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
obj := newObjectLayer(fs)
|
||||
var err *probe.Error
|
||||
|
||||
// This bucket is used for testing getObjectInfo operations.
|
||||
err = obj.MakeBucket("test-getobjectinfo")
|
||||
if err != nil {
|
||||
|
@ -93,15 +88,15 @@ func TestGetObjectInfo(t *testing.T) {
|
|||
for i, testCase := range testCases {
|
||||
result, err := obj.GetObjectInfo(testCase.bucketName, testCase.objectName)
|
||||
if err != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Cause.Error())
|
||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
|
||||
}
|
||||
if err == nil && !testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
||||
}
|
||||
// Failed as expected, but does it fail for the expected reason.
|
||||
if err != nil && !testCase.shouldPass {
|
||||
if testCase.err.Error() != err.Cause.Error() {
|
||||
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Cause.Error())
|
||||
if testCase.err.Error() != err.Error() {
|
||||
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -125,21 +120,18 @@ func TestGetObjectInfo(t *testing.T) {
|
|||
|
||||
func BenchmarkGetObject(b *testing.B) {
|
||||
// Make a temporary directory to use as the obj.
|
||||
directory, e := ioutil.TempDir("", "minio-benchmark-getobject")
|
||||
if e != nil {
|
||||
b.Fatal(e)
|
||||
directory, err := ioutil.TempDir("", "minio-benchmark-getobject")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the obj.
|
||||
fs, e := newFS(directory)
|
||||
if e != nil {
|
||||
b.Fatal(e)
|
||||
obj, err := newFSObjects(directory)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
obj := newObjectLayer(fs)
|
||||
var err *probe.Error
|
||||
|
||||
// Make a bucket and put in a few objects.
|
||||
err = obj.MakeBucket("bucket")
|
||||
if err != nil {
|
||||
|
@ -165,8 +157,8 @@ func BenchmarkGetObject(b *testing.B) {
|
|||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
if _, e := io.Copy(buffer, r); e != nil {
|
||||
b.Error(e)
|
||||
if _, err := io.Copy(buffer, r); err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
if buffer.Len() != len(text) {
|
||||
b.Errorf("GetObject returned incorrect length %d (should be %d)\n", buffer.Len(), len(text))
|
||||
|
|
|
@ -24,26 +24,22 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
func TestListObjects(t *testing.T) {
|
||||
// Make a temporary directory to use as the obj.
|
||||
directory, e := ioutil.TempDir("", "minio-list-object-test")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
directory, err := ioutil.TempDir("", "minio-list-object-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the obj.
|
||||
fs, e := newFS(directory)
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
obj, err := newFSObjects(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
obj := newObjectLayer(fs)
|
||||
var err *probe.Error
|
||||
// This bucket is used for testing ListObject operations.
|
||||
err = obj.MakeBucket("test-bucket-list-object")
|
||||
if err != nil {
|
||||
|
@ -56,25 +52,25 @@ func TestListObjects(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tmpfile, e := ioutil.TempFile("", "simple-file.txt")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
tmpfile, err := ioutil.TempFile("", "simple-file.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.Remove(tmpfile.Name()) // clean up
|
||||
|
||||
_, err = obj.PutObject("test-bucket-list-object", "Asia-maps", int64(len("asia-maps")), bytes.NewBufferString("asia-maps"), nil)
|
||||
if err != nil {
|
||||
t.Fatal(e)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = obj.PutObject("test-bucket-list-object", "Asia/India/India-summer-photos-1", int64(len("contentstring")), bytes.NewBufferString("contentstring"), nil)
|
||||
if err != nil {
|
||||
t.Fatal(e)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = obj.PutObject("test-bucket-list-object", "Asia/India/Karnataka/Bangalore/Koramangala/pics", int64(len("contentstring")), bytes.NewBufferString("contentstring"), nil)
|
||||
if err != nil {
|
||||
t.Fatal(e)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
|
@ -86,7 +82,7 @@ func TestListObjects(t *testing.T) {
|
|||
}
|
||||
_, err = obj.PutObject("test-bucket-list-object", "newzen/zen/recurse/again/again/again/pics", int64(len("recurse")), bytes.NewBufferString("recurse"), nil)
|
||||
if err != nil {
|
||||
t.Fatal(e)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
|
@ -536,15 +532,15 @@ func TestListObjects(t *testing.T) {
|
|||
for i, testCase := range testCases {
|
||||
result, err := obj.ListObjects(testCase.bucketName, testCase.prefix, testCase.marker, testCase.delimeter, testCase.maxKeys)
|
||||
if err != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Cause.Error())
|
||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
|
||||
}
|
||||
if err == nil && !testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
||||
}
|
||||
// Failed as expected, but does it fail for the expected reason.
|
||||
if err != nil && !testCase.shouldPass {
|
||||
if !strings.Contains(err.Cause.Error(), testCase.err.Error()) {
|
||||
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Cause.Error())
|
||||
if !strings.Contains(err.Error(), testCase.err.Error()) {
|
||||
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
|
||||
}
|
||||
}
|
||||
// Since there are cases for which ListObjects fails, this is necessary.
|
||||
|
@ -571,21 +567,18 @@ func TestListObjects(t *testing.T) {
|
|||
|
||||
func BenchmarkListObjects(b *testing.B) {
|
||||
// Make a temporary directory to use as the obj.
|
||||
directory, e := ioutil.TempDir("", "minio-list-benchmark")
|
||||
if e != nil {
|
||||
b.Fatal(e)
|
||||
directory, err := ioutil.TempDir("", "minio-list-benchmark")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the obj.
|
||||
fs, e := newFS(directory)
|
||||
if e != nil {
|
||||
b.Fatal(e)
|
||||
obj, err := newFSObjects(directory)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
obj := newObjectLayer(fs)
|
||||
var err *probe.Error
|
||||
|
||||
// Create a bucket.
|
||||
err = obj.MakeBucket("ls-benchmark-bucket")
|
||||
if err != nil {
|
||||
|
|
|
@ -21,27 +21,23 @@ import (
|
|||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Tests validate creation of new multipart upload instance.
|
||||
func TestObjectNewMultipartUpload(t *testing.T) {
|
||||
directory, e := ioutil.TempDir("", "minio-multipart-1-test")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
directory, err := ioutil.TempDir("", "minio-multipart-1-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Initialize fs layer.
|
||||
fs, e := newFS(directory)
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
// Initialize fs object layer.
|
||||
obj, err := newFSObjects(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Initialize object layer.
|
||||
obj := newObjectLayer(fs)
|
||||
|
||||
bucket := "minio-bucket"
|
||||
object := "minio-object"
|
||||
|
||||
|
@ -49,77 +45,68 @@ func TestObjectNewMultipartUpload(t *testing.T) {
|
|||
// opearation expected to fail since the bucket on which NewMultipartUpload is being initiated doesn't exist.
|
||||
uploadID, err := obj.NewMultipartUpload(bucket, object)
|
||||
if err == nil {
|
||||
t.Fatalf("Expcected to fail since the NewMultipartUpload is intialized on a non-existant bucket.")
|
||||
t.Fatalf("Expected to fail since the NewMultipartUpload is intialized on a non-existant bucket.")
|
||||
}
|
||||
if errMsg != err.ToGoError().Error() {
|
||||
t.Errorf("Expected to fail with Error \"%s\", but instead found \"%s\".", errMsg, err.ToGoError().Error())
|
||||
if errMsg != err.Error() {
|
||||
t.Errorf("Expected to fail with Error \"%s\", but instead found \"%s\".", errMsg, err.Error())
|
||||
}
|
||||
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err = obj.MakeBucket(bucket)
|
||||
if err != nil {
|
||||
// failed to create newbucket, abort.
|
||||
t.Fatal(err.ToGoError())
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
uploadID, err = obj.NewMultipartUpload(bucket, object)
|
||||
if err != nil {
|
||||
t.Fatal(err.ToGoError())
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
uploadIDPath := path.Join(bucket, object, uploadID)
|
||||
_, e = obj.storage.StatFile(minioMetaVolume, uploadIDPath)
|
||||
if e != nil {
|
||||
if e == errFileNotFound {
|
||||
err = obj.AbortMultipartUpload(bucket, object, uploadID)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case InvalidUploadID:
|
||||
t.Fatalf("New Multipart upload failed to create uuid file.")
|
||||
default:
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
t.Fatalf(e.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Tests validates the validator for existence of uploadID.
|
||||
func TestObjectAPIIsUploadIDExists(t *testing.T) {
|
||||
directory, e := ioutil.TempDir("", "minio-multipart-2-test")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
directory, err := ioutil.TempDir("", "minio-multipart-2-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Initialize fs layer.
|
||||
fs, e := newFS(directory)
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
// Initialize fs object layer.
|
||||
obj, err := newFSObjects(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Initialize object layer.
|
||||
obj := newObjectLayer(fs)
|
||||
|
||||
bucket := "minio-bucket"
|
||||
object := "minio-object"
|
||||
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err := obj.MakeBucket(bucket)
|
||||
err = obj.MakeBucket(bucket)
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatal(err.ToGoError())
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// UploadID file shouldn't exist.
|
||||
isExists, e := obj.isUploadIDExists(bucket, object, "abc")
|
||||
if e == nil && isExists {
|
||||
t.Fatal("Expected uploadIDPath to not to exist.")
|
||||
}
|
||||
|
||||
uploadID, err := obj.NewMultipartUpload(bucket, object)
|
||||
_, err = obj.NewMultipartUpload(bucket, object)
|
||||
if err != nil {
|
||||
t.Fatal(err.ToGoError())
|
||||
t.Fatal(err)
|
||||
}
|
||||
// UploadID file should exist.
|
||||
isExists, e = obj.isUploadIDExists(bucket, object, uploadID)
|
||||
if e != nil {
|
||||
t.Fatal(e.Error())
|
||||
}
|
||||
if !isExists {
|
||||
|
||||
err = obj.AbortMultipartUpload(bucket, object, "abc")
|
||||
switch err.(type) {
|
||||
case InvalidUploadID:
|
||||
default:
|
||||
t.Fatal("Expected uploadIDPath to exist.")
|
||||
}
|
||||
}
|
||||
|
@ -127,40 +114,38 @@ func TestObjectAPIIsUploadIDExists(t *testing.T) {
|
|||
// Tests validate correctness of PutObjectPart.
|
||||
func TestObjectAPIPutObjectPart(t *testing.T) {
|
||||
// Generating cases for which the PutObjectPart fails.
|
||||
directory, e := ioutil.TempDir("", "minio-multipart-3-test")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
directory, err := ioutil.TempDir("", "minio-multipart-3-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Initializing fs layer.
|
||||
fs, e := newFS(directory)
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
// Initializing fs object layer.
|
||||
obj, err := newFSObjects(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bucket := "minio-bucket"
|
||||
object := "minio-object"
|
||||
|
||||
// Initializing object layer.
|
||||
obj := newObjectLayer(fs)
|
||||
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err := obj.MakeBucket(bucket)
|
||||
err = obj.MakeBucket(bucket)
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatal(err.ToGoError())
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Initiate Multipart Upload on the above created bucket.
|
||||
uploadID, err := obj.NewMultipartUpload(bucket, object)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatal(err.ToGoError())
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Creating a dummy bucket for tests.
|
||||
err = obj.MakeBucket("unused-bucket")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatal(err.ToGoError())
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
failCases := []struct {
|
||||
|
@ -235,31 +220,24 @@ func TestObjectAPIPutObjectPart(t *testing.T) {
|
|||
// All are test cases above are expected to fail.
|
||||
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s.", i+1, actualErr.ToGoError().Error())
|
||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s.", i+1, actualErr.Error())
|
||||
}
|
||||
if actualErr == nil && !testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead.", i+1, testCase.expectedError.Error())
|
||||
}
|
||||
// Failed as expected, but does it fail for the expected reason.
|
||||
if actualErr != nil && !testCase.shouldPass {
|
||||
if testCase.expectedError.Error() != actualErr.ToGoError().Error() {
|
||||
if testCase.expectedError.Error() != actualErr.Error() {
|
||||
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", i+1,
|
||||
testCase.expectedError.Error(), actualErr.ToGoError().Error())
|
||||
testCase.expectedError.Error(), actualErr.Error())
|
||||
}
|
||||
}
|
||||
// Since there are cases for which ListObjects fails, this is necessary.
|
||||
// Test passes as expected, but the output values are verified for correctness here.
|
||||
if actualErr == nil && testCase.shouldPass {
|
||||
// Asserting whether the md5 output is correct.
|
||||
if testCase.inputMd5 != actualMd5Hex {
|
||||
t.Errorf("Test %d: Calculated Md5 different from the actual one %s.", i+1, actualMd5Hex)
|
||||
}
|
||||
partSuffix := fmt.Sprintf("%s.%d.%s", uploadID, testCase.PartID, testCase.inputMd5)
|
||||
// Verifying whether the part file is created.
|
||||
_, e := obj.storage.StatFile(minioMetaVolume, path.Join(bucket, object, partSuffix))
|
||||
if e != nil {
|
||||
t.Errorf("Test %d: Failed to create the Part file.", i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,12 +29,11 @@ var _ = Suite(&MySuite{})
|
|||
|
||||
func (s *MySuite) TestFSAPISuite(c *C) {
|
||||
var storageList []string
|
||||
create := func() objectAPI {
|
||||
create := func() ObjectLayer {
|
||||
path, err := ioutil.TempDir(os.TempDir(), "minio-")
|
||||
c.Check(err, IsNil)
|
||||
storageAPI, err := newStorageAPI(path)
|
||||
objAPI, err := newFSObjects(path)
|
||||
c.Check(err, IsNil)
|
||||
objAPI := newObjectLayer(storageAPI)
|
||||
storageList = append(storageList, path)
|
||||
return objAPI
|
||||
}
|
||||
|
@ -48,7 +47,7 @@ func (s *MySuite) TestXLAPISuite(c *C) {
|
|||
// Initialize name space lock.
|
||||
initNSLock()
|
||||
|
||||
create := func() objectAPI {
|
||||
create := func() ObjectLayer {
|
||||
var nDisks = 16 // Maximum disks.
|
||||
var erasureDisks []string
|
||||
for i := 0; i < nDisks; i++ {
|
||||
|
@ -56,10 +55,8 @@ func (s *MySuite) TestXLAPISuite(c *C) {
|
|||
c.Check(err, IsNil)
|
||||
erasureDisks = append(erasureDisks, path)
|
||||
}
|
||||
storageList = append(storageList, erasureDisks...)
|
||||
storageAPI, err := newStorageAPI(erasureDisks...)
|
||||
objAPI, err := newXLObjects(erasureDisks...)
|
||||
c.Check(err, IsNil)
|
||||
objAPI := newObjectLayer(storageAPI)
|
||||
return objAPI
|
||||
}
|
||||
APITestSuite(c, create)
|
||||
|
|
|
@ -32,7 +32,6 @@ import (
|
|||
fastSha256 "github.com/minio/minio/pkg/crypto/sha256"
|
||||
|
||||
mux "github.com/gorilla/mux"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// supportedGetReqParams - supported request parameters for GET presigned request.
|
||||
|
@ -100,7 +99,8 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
// Fetch object stat info.
|
||||
objInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object)
|
||||
if err != nil {
|
||||
switch err.ToGoError().(type) {
|
||||
errorIf(err, "GetObjectInfo failed.", nil)
|
||||
switch err.(type) {
|
||||
case BucketNameInvalid:
|
||||
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
|
||||
case BucketNotFound:
|
||||
|
@ -110,7 +110,6 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
case ObjectNameInvalid:
|
||||
writeErrorResponse(w, r, ErrNoSuchKey, r.URL.Path)
|
||||
default:
|
||||
errorIf(err.Trace(), "GetObjectInfo failed.", nil)
|
||||
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
|
@ -137,13 +136,13 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
startOffset := hrange.start
|
||||
readCloser, err := api.ObjectAPI.GetObject(bucket, object, startOffset)
|
||||
if err != nil {
|
||||
switch err.ToGoError().(type) {
|
||||
switch err.(type) {
|
||||
case BucketNotFound:
|
||||
writeErrorResponse(w, r, ErrNoSuchBucket, r.URL.Path)
|
||||
case ObjectNotFound:
|
||||
writeErrorResponse(w, r, errAllowableObjectNotFound(bucket, r), r.URL.Path)
|
||||
default:
|
||||
errorIf(err.Trace(), "GetObject failed.", nil)
|
||||
errorIf(err, "GetObject failed.", nil)
|
||||
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
|
@ -157,14 +156,14 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
setGetRespHeaders(w, r.URL.Query())
|
||||
|
||||
if hrange.length > 0 {
|
||||
if _, e := io.CopyN(w, readCloser, hrange.length); e != nil {
|
||||
errorIf(probe.NewError(e), "Writing to client failed", nil)
|
||||
if _, err := io.CopyN(w, readCloser, hrange.length); err != nil {
|
||||
errorIf(err, "Writing to client failed", nil)
|
||||
// Do not send error response here, since client could have died.
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if _, e := io.Copy(w, readCloser); e != nil {
|
||||
errorIf(probe.NewError(e), "Writing to client failed", nil)
|
||||
if _, err := io.Copy(w, readCloser); err != nil {
|
||||
errorIf(err, "Writing to client failed", nil)
|
||||
// Do not send error response here, since client could have died.
|
||||
return
|
||||
}
|
||||
|
@ -294,8 +293,8 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
|
|||
|
||||
objInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(bucket, object), "GetObjectInfo failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
errorIf(err, "GetObjectInfo failed.", nil)
|
||||
switch err.(type) {
|
||||
case BucketNameInvalid:
|
||||
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
|
||||
case BucketNotFound:
|
||||
|
@ -387,8 +386,8 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||
|
||||
objInfo, err := api.ObjectAPI.GetObjectInfo(sourceBucket, sourceObject)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "GetObjectInfo failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
errorIf(err, "GetObjectInfo failed.", nil)
|
||||
switch err.(type) {
|
||||
case BucketNameInvalid:
|
||||
writeErrorResponse(w, r, ErrInvalidBucketName, objectSource)
|
||||
case BucketNotFound:
|
||||
|
@ -425,10 +424,9 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||
|
||||
var md5Bytes []byte
|
||||
if objInfo.MD5Sum != "" {
|
||||
var e error
|
||||
md5Bytes, e = hex.DecodeString(objInfo.MD5Sum)
|
||||
if e != nil {
|
||||
errorIf(probe.NewError(e), "Decoding md5 failed.", nil)
|
||||
md5Bytes, err = hex.DecodeString(objInfo.MD5Sum)
|
||||
if err != nil {
|
||||
errorIf(err, "Decoding md5 failed.", nil)
|
||||
writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
@ -436,10 +434,10 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||
|
||||
startOffset := int64(0) // Read the whole file.
|
||||
// Get the object.
|
||||
readCloser, getErr := api.ObjectAPI.GetObject(sourceBucket, sourceObject, startOffset)
|
||||
if getErr != nil {
|
||||
errorIf(getErr.Trace(sourceBucket, sourceObject), "Reading "+objectSource+" failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
readCloser, err := api.ObjectAPI.GetObject(sourceBucket, sourceObject, startOffset)
|
||||
if err != nil {
|
||||
errorIf(err, "Reading "+objectSource+" failed.", nil)
|
||||
switch err.(type) {
|
||||
case BucketNotFound:
|
||||
writeErrorResponse(w, r, ErrNoSuchBucket, objectSource)
|
||||
case ObjectNotFound:
|
||||
|
@ -459,7 +457,8 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||
// Create the object.
|
||||
md5Sum, err := api.ObjectAPI.PutObject(bucket, object, size, readCloser, metadata)
|
||||
if err != nil {
|
||||
switch err.ToGoError().(type) {
|
||||
errorIf(err, "PutObject failed.", nil)
|
||||
switch err.(type) {
|
||||
case StorageFull:
|
||||
writeErrorResponse(w, r, ErrStorageFull, r.URL.Path)
|
||||
case BucketNotFound:
|
||||
|
@ -473,7 +472,6 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||
case ObjectExistsAsPrefix:
|
||||
writeErrorResponse(w, r, ErrObjectExistsAsPrefix, r.URL.Path)
|
||||
default:
|
||||
errorIf(err.Trace(), "PutObject failed.", nil)
|
||||
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
|
@ -481,7 +479,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||
|
||||
objInfo, err = api.ObjectAPI.GetObjectInfo(bucket, object)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "GetObjectInfo failed.", nil)
|
||||
errorIf(err, "GetObjectInfo failed.", nil)
|
||||
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
@ -605,7 +603,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
// Get Content-Md5 sent by client and verify if valid
|
||||
md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5"))
|
||||
if err != nil {
|
||||
errorIf(err.Trace(r.Header.Get("Content-Md5")), "Decoding md5 failed.", nil)
|
||||
errorIf(err, "Decoding md5 failed.", nil)
|
||||
writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
@ -643,9 +641,9 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
go func() {
|
||||
shaWriter := fastSha256.New()
|
||||
multiWriter := io.MultiWriter(shaWriter, writer)
|
||||
if _, e := io.CopyN(multiWriter, r.Body, size); e != nil {
|
||||
errorIf(probe.NewError(e), "Unable to read HTTP body.", nil)
|
||||
writer.CloseWithError(e)
|
||||
if _, cerr := io.CopyN(multiWriter, r.Body, size); cerr != nil {
|
||||
errorIf(cerr, "Unable to read HTTP body.", nil)
|
||||
writer.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
shaPayload := shaWriter.Sum(nil)
|
||||
|
@ -676,14 +674,13 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
md5Sum, err = api.ObjectAPI.PutObject(bucket, object, size, reader, metadata)
|
||||
}
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "PutObject failed.", nil)
|
||||
e := err.ToGoError()
|
||||
errorIf(err, "PutObject failed.", nil)
|
||||
// Verify if the underlying error is signature mismatch.
|
||||
if e == errSignatureMismatch {
|
||||
if err == errSignatureMismatch {
|
||||
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
switch e.(type) {
|
||||
switch err.(type) {
|
||||
case StorageFull:
|
||||
writeErrorResponse(w, r, ErrStorageFull, r.URL.Path)
|
||||
case BucketNotFound:
|
||||
|
@ -736,8 +733,8 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
|||
|
||||
uploadID, err := api.ObjectAPI.NewMultipartUpload(bucket, object)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "NewMultipartUpload failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
errorIf(err, "NewMultipartUpload failed.", nil)
|
||||
switch err.(type) {
|
||||
case StorageFull:
|
||||
writeErrorResponse(w, r, ErrStorageFull, r.URL.Path)
|
||||
case BucketNameInvalid:
|
||||
|
@ -771,7 +768,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
|||
// get Content-Md5 sent by client and verify if valid
|
||||
md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5"))
|
||||
if err != nil {
|
||||
errorIf(err.Trace(r.Header.Get("Content-Md5")), "Decoding md5 failed.", nil)
|
||||
errorIf(err, "Decoding md5 failed.", nil)
|
||||
writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
@ -792,8 +789,8 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
|||
uploadID := r.URL.Query().Get("uploadId")
|
||||
partIDString := r.URL.Query().Get("partNumber")
|
||||
|
||||
partID, e := strconv.Atoi(partIDString)
|
||||
if e != nil {
|
||||
partID, err := strconv.Atoi(partIDString)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, r, ErrInvalidPart, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
@ -822,9 +819,9 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
|||
go func() {
|
||||
shaWriter := fastSha256.New()
|
||||
multiWriter := io.MultiWriter(shaWriter, writer)
|
||||
if _, e := io.CopyN(multiWriter, r.Body, size); e != nil {
|
||||
errorIf(probe.NewError(e), "Unable to read HTTP body.", nil)
|
||||
writer.CloseWithError(e)
|
||||
if _, err = io.CopyN(multiWriter, r.Body, size); err != nil {
|
||||
errorIf(err, "Unable to read HTTP body.", nil)
|
||||
writer.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
shaPayload := shaWriter.Sum(nil)
|
||||
|
@ -848,14 +845,13 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
|||
partMD5, err = api.ObjectAPI.PutObjectPart(bucket, object, uploadID, partID, size, reader, hex.EncodeToString(md5Bytes))
|
||||
}
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "PutObjectPart failed.", nil)
|
||||
e := err.ToGoError()
|
||||
errorIf(err, "PutObjectPart failed.", nil)
|
||||
// Verify if the underlying error is signature mismatch.
|
||||
if e == errSignatureMismatch {
|
||||
if err == errSignatureMismatch {
|
||||
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
switch e.(type) {
|
||||
switch err.(type) {
|
||||
case StorageFull:
|
||||
writeErrorResponse(w, r, ErrStorageFull, r.URL.Path)
|
||||
case InvalidUploadID:
|
||||
|
@ -900,10 +896,9 @@ func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter,
|
|||
}
|
||||
|
||||
uploadID, _, _, _ := getObjectResources(r.URL.Query())
|
||||
err := api.ObjectAPI.AbortMultipartUpload(bucket, object, uploadID)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "AbortMutlipartUpload failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
if err := api.ObjectAPI.AbortMultipartUpload(bucket, object, uploadID); err != nil {
|
||||
errorIf(err, "AbortMutlipartUpload failed.", nil)
|
||||
switch err.(type) {
|
||||
case BucketNameInvalid:
|
||||
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
|
||||
case BucketNotFound:
|
||||
|
@ -961,8 +956,8 @@ func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *ht
|
|||
|
||||
listPartsInfo, err := api.ObjectAPI.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "ListObjectParts failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
errorIf(err, "ListObjectParts failed.", nil)
|
||||
switch err.(type) {
|
||||
case BucketNameInvalid:
|
||||
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
|
||||
case BucketNotFound:
|
||||
|
@ -998,7 +993,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
|||
uploadID, _, _, _ := getObjectResources(r.URL.Query())
|
||||
|
||||
var md5Sum string
|
||||
var err *probe.Error
|
||||
var err error
|
||||
switch getRequestAuthType(r) {
|
||||
default:
|
||||
// For all unknown auth types return error.
|
||||
|
@ -1016,15 +1011,15 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
|||
return
|
||||
}
|
||||
}
|
||||
completeMultipartBytes, e := ioutil.ReadAll(r.Body)
|
||||
if e != nil {
|
||||
errorIf(probe.NewError(e), "CompleteMultipartUpload failed.", nil)
|
||||
completeMultipartBytes, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
errorIf(err, "CompleteMultipartUpload failed.", nil)
|
||||
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
|
||||
return
|
||||
}
|
||||
complMultipartUpload := &completeMultipartUpload{}
|
||||
if e = xml.Unmarshal(completeMultipartBytes, complMultipartUpload); e != nil {
|
||||
errorIf(probe.NewError(e), "XML Unmarshal failed", nil)
|
||||
if err = xml.Unmarshal(completeMultipartBytes, complMultipartUpload); err != nil {
|
||||
errorIf(err, "XML Unmarshal failed", nil)
|
||||
writeErrorResponse(w, r, ErrMalformedXML, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
@ -1042,8 +1037,8 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
|||
// Complete multipart upload.
|
||||
md5Sum, err = api.ObjectAPI.CompleteMultipartUpload(bucket, object, uploadID, completeParts)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "CompleteMultipartUpload failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
errorIf(err, "CompleteMultipartUpload failed.", nil)
|
||||
switch err.(type) {
|
||||
case BucketNameInvalid:
|
||||
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
|
||||
case BucketNotFound:
|
||||
|
@ -1099,10 +1094,9 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
|
|||
return
|
||||
}
|
||||
}
|
||||
err := api.ObjectAPI.DeleteObject(bucket, object)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "DeleteObject failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
if err := api.ObjectAPI.DeleteObject(bucket, object); err != nil {
|
||||
errorIf(err, "DeleteObject failed.", nil)
|
||||
switch err.(type) {
|
||||
case BucketNameInvalid:
|
||||
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
|
||||
case BucketNotFound:
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import "io"
|
||||
|
||||
// ObjectLayer implements primitives for object API layer.
|
||||
type ObjectLayer interface {
|
||||
// Bucket operations.
|
||||
MakeBucket(bucket string) error
|
||||
GetBucketInfo(bucket string) (bucketInfo BucketInfo, err error)
|
||||
ListBuckets() (buckets []BucketInfo, err error)
|
||||
DeleteBucket(bucket string) error
|
||||
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error)
|
||||
|
||||
// Object operations.
|
||||
GetObject(bucket, object string, startOffset int64) (reader io.ReadCloser, err error)
|
||||
GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error)
|
||||
PutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string) (md5 string, err error)
|
||||
DeleteObject(bucket, object string) error
|
||||
|
||||
// Multipart operations.
|
||||
ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error)
|
||||
NewMultipartUpload(bucket, object string) (uploadID string, err error)
|
||||
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (md5 string, err error)
|
||||
ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error)
|
||||
AbortMultipartUpload(bucket, object, uploadID string) error
|
||||
CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []completePart) (md5 string, err error)
|
||||
}
|
|
@ -17,9 +17,21 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/minio/minio/pkg/safe"
|
||||
)
|
||||
|
||||
const (
|
||||
// Minio meta volume.
|
||||
minioMetaVolume = ".minio"
|
||||
)
|
||||
|
||||
// validBucket regexp.
|
||||
|
@ -96,3 +108,41 @@ func retainSlash(s string) string {
|
|||
func pathJoin(s1 string, s2 string) string {
|
||||
return retainSlash(s1) + s2
|
||||
}
|
||||
|
||||
// Create an s3 compatible MD5sum for complete multipart transaction.
|
||||
func makeS3MD5(md5Strs ...string) (string, error) {
|
||||
var finalMD5Bytes []byte
|
||||
for _, md5Str := range md5Strs {
|
||||
md5Bytes, err := hex.DecodeString(md5Str)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
finalMD5Bytes = append(finalMD5Bytes, md5Bytes...)
|
||||
}
|
||||
md5Hasher := md5.New()
|
||||
md5Hasher.Write(finalMD5Bytes)
|
||||
s3MD5 := fmt.Sprintf("%s-%d", hex.EncodeToString(md5Hasher.Sum(nil)), len(md5Strs))
|
||||
return s3MD5, nil
|
||||
}
|
||||
|
||||
// byBucketName is a collection satisfying sort.Interface.
|
||||
type byBucketName []BucketInfo
|
||||
|
||||
func (d byBucketName) Len() int { return len(d) }
|
||||
func (d byBucketName) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
|
||||
func (d byBucketName) Less(i, j int) bool { return d[i].Name < d[j].Name }
|
||||
|
||||
// safeCloseAndRemove - safely closes and removes underlying temporary
|
||||
// file writer if possible.
|
||||
func safeCloseAndRemove(writer io.WriteCloser) error {
|
||||
// If writer is a safe file, Attempt to close and remove.
|
||||
safeWriter, ok := writer.(*safe.File)
|
||||
if ok {
|
||||
return safeWriter.CloseAndRemove()
|
||||
}
|
||||
pipeWriter, ok := writer.(*io.PipeWriter)
|
||||
if ok {
|
||||
return pipeWriter.CloseWithError(errors.New("Close and error out."))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
// TODO - enable all the commented tests.
|
||||
|
||||
// APITestSuite - collection of API tests.
|
||||
func APITestSuite(c *check.C, create func() objectAPI) {
|
||||
func APITestSuite(c *check.C, create func() ObjectLayer) {
|
||||
testMakeBucket(c, create)
|
||||
testMultipleObjectCreation(c, create)
|
||||
testPaging(c, create)
|
||||
|
@ -50,7 +50,7 @@ func APITestSuite(c *check.C, create func() objectAPI) {
|
|||
}
|
||||
|
||||
// Tests validate bucket creation.
|
||||
func testMakeBucket(c *check.C, create func() objectAPI) {
|
||||
func testMakeBucket(c *check.C, create func() ObjectLayer) {
|
||||
obj := create()
|
||||
err := obj.MakeBucket("bucket-unknown")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@ -58,7 +58,7 @@ func testMakeBucket(c *check.C, create func() objectAPI) {
|
|||
}
|
||||
|
||||
// Tests validate creation of part files during Multipart operation.
|
||||
func testMultipartObjectCreation(c *check.C, create func() objectAPI) {
|
||||
func testMultipartObjectCreation(c *check.C, create func() ObjectLayer) {
|
||||
obj := create()
|
||||
err := obj.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@ -83,7 +83,7 @@ func testMultipartObjectCreation(c *check.C, create func() objectAPI) {
|
|||
}
|
||||
|
||||
// Tests validate abortion of Multipart operation.
|
||||
func testMultipartObjectAbort(c *check.C, create func() objectAPI) {
|
||||
func testMultipartObjectAbort(c *check.C, create func() ObjectLayer) {
|
||||
obj := create()
|
||||
err := obj.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@ -115,7 +115,7 @@ func testMultipartObjectAbort(c *check.C, create func() objectAPI) {
|
|||
}
|
||||
|
||||
// Tests validate object creation.
|
||||
func testMultipleObjectCreation(c *check.C, create func() objectAPI) {
|
||||
func testMultipleObjectCreation(c *check.C, create func() ObjectLayer) {
|
||||
objects := make(map[string][]byte)
|
||||
obj := create()
|
||||
err := obj.MakeBucket("bucket")
|
||||
|
@ -157,7 +157,7 @@ func testMultipleObjectCreation(c *check.C, create func() objectAPI) {
|
|||
}
|
||||
|
||||
// Tests validate creation of objects and the order of listing using various filters for ListObjects operation.
|
||||
func testPaging(c *check.C, create func() objectAPI) {
|
||||
func testPaging(c *check.C, create func() ObjectLayer) {
|
||||
obj := create()
|
||||
obj.MakeBucket("bucket")
|
||||
result, err := obj.ListObjects("bucket", "", "", "", 0)
|
||||
|
@ -261,7 +261,7 @@ func testPaging(c *check.C, create func() objectAPI) {
|
|||
}
|
||||
|
||||
// Tests validate overwriting of an existing object.
|
||||
func testObjectOverwriteWorks(c *check.C, create func() objectAPI) {
|
||||
func testObjectOverwriteWorks(c *check.C, create func() ObjectLayer) {
|
||||
obj := create()
|
||||
err := obj.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@ -284,25 +284,25 @@ func testObjectOverwriteWorks(c *check.C, create func() objectAPI) {
|
|||
}
|
||||
|
||||
// Tests validate that bucket operation on non-existent bucket fails.
|
||||
func testNonExistantBucketOperations(c *check.C, create func() objectAPI) {
|
||||
func testNonExistantBucketOperations(c *check.C, create func() ObjectLayer) {
|
||||
obj := create()
|
||||
_, err := obj.PutObject("bucket1", "object", int64(len("one")), bytes.NewBufferString("one"), nil)
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
c.Assert(err.ToGoError().Error(), check.Equals, "Bucket not found: bucket1")
|
||||
c.Assert(err.Error(), check.Equals, "Bucket not found: bucket1")
|
||||
}
|
||||
|
||||
// Tests validate that recreation of the bucket fails.
|
||||
func testBucketRecreateFails(c *check.C, create func() objectAPI) {
|
||||
func testBucketRecreateFails(c *check.C, create func() ObjectLayer) {
|
||||
obj := create()
|
||||
err := obj.MakeBucket("string")
|
||||
c.Assert(err, check.IsNil)
|
||||
err = obj.MakeBucket("string")
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
c.Assert(err.ToGoError().Error(), check.Equals, "Bucket exists: string")
|
||||
c.Assert(err.Error(), check.Equals, "Bucket exists: string")
|
||||
}
|
||||
|
||||
// Tests validate PutObject with subdirectory prefix.
|
||||
func testPutObjectInSubdir(c *check.C, create func() objectAPI) {
|
||||
func testPutObjectInSubdir(c *check.C, create func() ObjectLayer) {
|
||||
obj := create()
|
||||
err := obj.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@ -321,7 +321,7 @@ func testPutObjectInSubdir(c *check.C, create func() objectAPI) {
|
|||
}
|
||||
|
||||
// Tests validate ListBuckets.
|
||||
func testListBuckets(c *check.C, create func() objectAPI) {
|
||||
func testListBuckets(c *check.C, create func() ObjectLayer) {
|
||||
obj := create()
|
||||
|
||||
// test empty list.
|
||||
|
@ -354,7 +354,7 @@ func testListBuckets(c *check.C, create func() objectAPI) {
|
|||
}
|
||||
|
||||
// Tests validate the order of result of ListBuckets.
|
||||
func testListBucketsOrder(c *check.C, create func() objectAPI) {
|
||||
func testListBucketsOrder(c *check.C, create func() ObjectLayer) {
|
||||
// if implementation contains a map, order of map keys will vary.
|
||||
// this ensures they return in the same order each time.
|
||||
for i := 0; i < 10; i++ {
|
||||
|
@ -373,24 +373,24 @@ func testListBucketsOrder(c *check.C, create func() objectAPI) {
|
|||
}
|
||||
|
||||
// Tests validate that ListObjects operation on a non-existent bucket fails as expected.
|
||||
func testListObjectsTestsForNonExistantBucket(c *check.C, create func() objectAPI) {
|
||||
func testListObjectsTestsForNonExistantBucket(c *check.C, create func() ObjectLayer) {
|
||||
obj := create()
|
||||
result, err := obj.ListObjects("bucket", "", "", "", 1000)
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
c.Assert(result.IsTruncated, check.Equals, false)
|
||||
c.Assert(len(result.Objects), check.Equals, 0)
|
||||
c.Assert(err.ToGoError().Error(), check.Equals, "Bucket not found: bucket")
|
||||
c.Assert(err.Error(), check.Equals, "Bucket not found: bucket")
|
||||
}
|
||||
|
||||
// Tests validate that GetObject fails on a non-existent bucket as expected.
|
||||
func testNonExistantObjectInBucket(c *check.C, create func() objectAPI) {
|
||||
func testNonExistantObjectInBucket(c *check.C, create func() ObjectLayer) {
|
||||
obj := create()
|
||||
err := obj.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = obj.GetObject("bucket", "dir1", 0)
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
switch err := err.ToGoError().(type) {
|
||||
switch err := err.(type) {
|
||||
case ObjectNotFound:
|
||||
c.Assert(err, check.ErrorMatches, "Object not found: bucket#dir1")
|
||||
default:
|
||||
|
@ -399,7 +399,7 @@ func testNonExistantObjectInBucket(c *check.C, create func() objectAPI) {
|
|||
}
|
||||
|
||||
// Tests validate that GetObject on an existing directory fails as expected.
|
||||
func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() objectAPI) {
|
||||
func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() ObjectLayer) {
|
||||
obj := create()
|
||||
err := obj.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@ -408,7 +408,7 @@ func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() objectAPI)
|
|||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = obj.GetObject("bucket", "dir1", 0)
|
||||
switch err := err.ToGoError().(type) {
|
||||
switch err := err.(type) {
|
||||
case ObjectNotFound:
|
||||
c.Assert(err.Bucket, check.Equals, "bucket")
|
||||
c.Assert(err.Object, check.Equals, "dir1")
|
||||
|
@ -418,7 +418,7 @@ func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() objectAPI)
|
|||
}
|
||||
|
||||
_, err = obj.GetObject("bucket", "dir1/", 0)
|
||||
switch err := err.ToGoError().(type) {
|
||||
switch err := err.(type) {
|
||||
case ObjectNotFound:
|
||||
c.Assert(err.Bucket, check.Equals, "bucket")
|
||||
c.Assert(err.Object, check.Equals, "dir1/")
|
||||
|
@ -429,7 +429,7 @@ func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() objectAPI)
|
|||
}
|
||||
|
||||
// Tests valdiate the default ContentType.
|
||||
func testDefaultContentType(c *check.C, create func() objectAPI) {
|
||||
func testDefaultContentType(c *check.C, create func() ObjectLayer) {
|
||||
obj := create()
|
||||
err := obj.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/fatih/structs"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/minio/minio/pkg/safe"
|
||||
)
|
||||
|
||||
|
@ -38,11 +37,11 @@ import (
|
|||
type Config interface {
|
||||
String() string
|
||||
Version() string
|
||||
Save(string) *probe.Error
|
||||
Load(string) *probe.Error
|
||||
Save(string) error
|
||||
Load(string) error
|
||||
Data() interface{}
|
||||
Diff(Config) ([]structs.Field, *probe.Error)
|
||||
DeepDiff(Config) ([]structs.Field, *probe.Error)
|
||||
Diff(Config) ([]structs.Field, error)
|
||||
DeepDiff(Config) ([]structs.Field, error)
|
||||
}
|
||||
|
||||
// config - implements quick.Config interface
|
||||
|
@ -53,28 +52,28 @@ type config struct {
|
|||
|
||||
// CheckData - checks the validity of config data. Data should be of
|
||||
// type struct and contain a string type field called "Version".
|
||||
func CheckData(data interface{}) *probe.Error {
|
||||
func CheckData(data interface{}) error {
|
||||
if !structs.IsStruct(data) {
|
||||
return probe.NewError(fmt.Errorf("Invalid argument type. Expecing \"struct\" type."))
|
||||
return fmt.Errorf("Invalid argument type. Expecing \"struct\" type.")
|
||||
}
|
||||
|
||||
st := structs.New(data)
|
||||
f, ok := st.FieldOk("Version")
|
||||
if !ok {
|
||||
return probe.NewError(fmt.Errorf("Invalid type of struct argument. No [%s.Version] field found.", st.Name()))
|
||||
return fmt.Errorf("Invalid type of struct argument. No [%s.Version] field found.", st.Name())
|
||||
}
|
||||
|
||||
if f.Kind() != reflect.String {
|
||||
return probe.NewError(fmt.Errorf("Invalid type of struct argument. Expecting \"string\" type [%s.Version] field.", st.Name()))
|
||||
return fmt.Errorf("Invalid type of struct argument. Expecting \"string\" type [%s.Version] field.", st.Name())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// New - instantiate a new config
|
||||
func New(data interface{}) (Config, *probe.Error) {
|
||||
func New(data interface{}) (Config, error) {
|
||||
if err := CheckData(data); err != nil {
|
||||
return nil, err.Trace()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d := new(config)
|
||||
|
@ -85,15 +84,15 @@ func New(data interface{}) (Config, *probe.Error) {
|
|||
|
||||
// CheckVersion - loads json and compares the version number provided returns back true or false - any failure
|
||||
// is returned as error.
|
||||
func CheckVersion(filename string, version string) (bool, *probe.Error) {
|
||||
_, e := os.Stat(filename)
|
||||
if e != nil {
|
||||
return false, probe.NewError(e)
|
||||
func CheckVersion(filename string, version string) (bool, error) {
|
||||
_, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
fileData, e := ioutil.ReadFile(filename)
|
||||
if e != nil {
|
||||
return false, probe.NewError(e)
|
||||
fileData, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
|
@ -104,18 +103,18 @@ func CheckVersion(filename string, version string) (bool, *probe.Error) {
|
|||
}{
|
||||
Version: "",
|
||||
}
|
||||
e = json.Unmarshal(fileData, &data)
|
||||
if e != nil {
|
||||
switch e := e.(type) {
|
||||
err = json.Unmarshal(fileData, &data)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case *json.SyntaxError:
|
||||
return false, probe.NewError(FormatJSONSyntaxError(bytes.NewReader(fileData), e))
|
||||
return false, FormatJSONSyntaxError(bytes.NewReader(fileData), err)
|
||||
default:
|
||||
return false, probe.NewError(e)
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
config, err := New(data)
|
||||
if err != nil {
|
||||
return false, err.Trace()
|
||||
return false, err
|
||||
}
|
||||
if config.Version() != version {
|
||||
return false, nil
|
||||
|
@ -124,34 +123,34 @@ func CheckVersion(filename string, version string) (bool, *probe.Error) {
|
|||
}
|
||||
|
||||
// Load - loads json config from filename for the a given struct data
|
||||
func Load(filename string, data interface{}) (Config, *probe.Error) {
|
||||
_, e := os.Stat(filename)
|
||||
if e != nil {
|
||||
return nil, probe.NewError(e)
|
||||
func Load(filename string, data interface{}) (Config, error) {
|
||||
_, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileData, e := ioutil.ReadFile(filename)
|
||||
if e != nil {
|
||||
return nil, probe.NewError(e)
|
||||
fileData, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
fileData = []byte(strings.Replace(string(fileData), "\r\n", "\n", -1))
|
||||
}
|
||||
|
||||
e = json.Unmarshal(fileData, &data)
|
||||
if e != nil {
|
||||
switch e := e.(type) {
|
||||
err = json.Unmarshal(fileData, &data)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case *json.SyntaxError:
|
||||
return nil, probe.NewError(FormatJSONSyntaxError(bytes.NewReader(fileData), e))
|
||||
return nil, FormatJSONSyntaxError(bytes.NewReader(fileData), err)
|
||||
default:
|
||||
return nil, probe.NewError(e)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
config, err := New(data)
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return config, nil
|
||||
|
@ -177,20 +176,16 @@ func (d config) Version() string {
|
|||
// writeFile writes data to a file named by filename.
|
||||
// If the file does not exist, writeFile creates it;
|
||||
// otherwise writeFile truncates it before writing.
|
||||
func writeFile(filename string, data []byte) *probe.Error {
|
||||
safeFile, e := safe.CreateFile(filename)
|
||||
if e != nil {
|
||||
return probe.NewError(e)
|
||||
func writeFile(filename string, data []byte) error {
|
||||
safeFile, err := safe.CreateFile(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, e = safeFile.Write(data)
|
||||
if e != nil {
|
||||
return probe.NewError(e)
|
||||
_, err = safeFile.Write(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e = safeFile.Close()
|
||||
if e != nil {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
return nil
|
||||
return safeFile.Close()
|
||||
}
|
||||
|
||||
// String converts JSON config to printable string
|
||||
|
@ -200,37 +195,37 @@ func (d config) String() string {
|
|||
}
|
||||
|
||||
// Save writes config data in JSON format to a file.
|
||||
func (d config) Save(filename string) *probe.Error {
|
||||
func (d config) Save(filename string) error {
|
||||
d.lock.Lock()
|
||||
defer d.lock.Unlock()
|
||||
|
||||
// Check for existing file, if yes create a backup.
|
||||
st, e := os.Stat(filename)
|
||||
st, err := os.Stat(filename)
|
||||
// If file exists and stat failed return here.
|
||||
if e != nil && !os.IsNotExist(e) {
|
||||
return probe.NewError(e)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
// File exists and proceed to take backup.
|
||||
if e == nil {
|
||||
if err == nil {
|
||||
// File exists and is not a regular file return error.
|
||||
if !st.Mode().IsRegular() {
|
||||
return probe.NewError(fmt.Errorf("%s is not a regular file", filename))
|
||||
return fmt.Errorf("%s is not a regular file", filename)
|
||||
}
|
||||
// Read old data.
|
||||
var oldData []byte
|
||||
oldData, e = ioutil.ReadFile(filename)
|
||||
if e != nil {
|
||||
return probe.NewError(e)
|
||||
oldData, err = ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Save read data to the backup file.
|
||||
if err := writeFile(filename+".old", oldData); err != nil {
|
||||
return err.Trace(filename + ".old")
|
||||
if err = writeFile(filename+".old", oldData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Proceed to create or overwrite file.
|
||||
jsonData, e := json.MarshalIndent(d.data, "", "\t")
|
||||
if e != nil {
|
||||
return probe.NewError(e)
|
||||
jsonData, err := json.MarshalIndent(d.data, "", "\t")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
|
@ -238,23 +233,22 @@ func (d config) Save(filename string) *probe.Error {
|
|||
}
|
||||
|
||||
// Save data.
|
||||
err := writeFile(filename, jsonData)
|
||||
return err.Trace(filename)
|
||||
return writeFile(filename, jsonData)
|
||||
}
|
||||
|
||||
// Load - loads JSON config from file and merge with currently set values
|
||||
func (d *config) Load(filename string) *probe.Error {
|
||||
func (d *config) Load(filename string) error {
|
||||
d.lock.Lock()
|
||||
defer d.lock.Unlock()
|
||||
|
||||
_, e := os.Stat(filename)
|
||||
if e != nil {
|
||||
return probe.NewError(e)
|
||||
_, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fileData, e := ioutil.ReadFile(filename)
|
||||
if e != nil {
|
||||
return probe.NewError(e)
|
||||
fileData, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
|
@ -264,25 +258,25 @@ func (d *config) Load(filename string) *probe.Error {
|
|||
st := structs.New(d.data)
|
||||
f, ok := st.FieldOk("Version")
|
||||
if !ok {
|
||||
return probe.NewError(fmt.Errorf("Argument struct [%s] does not contain field \"Version\".", st.Name()))
|
||||
return fmt.Errorf("Argument struct [%s] does not contain field \"Version\".", st.Name())
|
||||
}
|
||||
|
||||
e = json.Unmarshal(fileData, d.data)
|
||||
if e != nil {
|
||||
switch e := e.(type) {
|
||||
err = json.Unmarshal(fileData, d.data)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case *json.SyntaxError:
|
||||
return probe.NewError(FormatJSONSyntaxError(bytes.NewReader(fileData), e))
|
||||
return FormatJSONSyntaxError(bytes.NewReader(fileData), err)
|
||||
default:
|
||||
return probe.NewError(e)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := CheckData(d.data); err != nil {
|
||||
return err.Trace(filename)
|
||||
return err
|
||||
}
|
||||
|
||||
if (*d).Version() != f.Value() {
|
||||
return probe.NewError(fmt.Errorf("Version mismatch"))
|
||||
return fmt.Errorf("Version mismatch")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -294,11 +288,11 @@ func (d config) Data() interface{} {
|
|||
}
|
||||
|
||||
//Diff - list fields that are in A but not in B
|
||||
func (d config) Diff(c Config) ([]structs.Field, *probe.Error) {
|
||||
func (d config) Diff(c Config) ([]structs.Field, error) {
|
||||
var fields []structs.Field
|
||||
err := CheckData(c.Data())
|
||||
if err != nil {
|
||||
return []structs.Field{}, err.Trace()
|
||||
return []structs.Field{}, err
|
||||
}
|
||||
|
||||
currFields := structs.Fields(d.Data())
|
||||
|
@ -320,11 +314,11 @@ func (d config) Diff(c Config) ([]structs.Field, *probe.Error) {
|
|||
}
|
||||
|
||||
//DeepDiff - list fields in A that are missing or not equal to fields in B
|
||||
func (d config) DeepDiff(c Config) ([]structs.Field, *probe.Error) {
|
||||
func (d config) DeepDiff(c Config) ([]structs.Field, error) {
|
||||
var fields []structs.Field
|
||||
err := CheckData(c.Data())
|
||||
if err != nil {
|
||||
return []structs.Field{}, err.Trace()
|
||||
return []structs.Field{}, err
|
||||
}
|
||||
|
||||
currFields := structs.Fields(d.Data())
|
||||
|
|
|
@ -78,7 +78,7 @@ func isDirExist(dirname string) (bool, error) {
|
|||
}
|
||||
|
||||
// Initialize a new storage disk.
|
||||
func newFS(diskPath string) (StorageAPI, error) {
|
||||
func newPosix(diskPath string) (StorageAPI, error) {
|
||||
if diskPath == "" {
|
||||
log.Debug("Disk cannot be empty")
|
||||
return nil, errInvalidArgument
|
32
routers.go
32
routers.go
|
@ -18,38 +18,30 @@ package main
|
|||
|
||||
import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
router "github.com/gorilla/mux"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// newStorageAPI - initialize any storage API depending on the export path style.
|
||||
func newStorageAPI(exportPaths ...string) (StorageAPI, error) {
|
||||
// newObjectLayer - initialize any object layer depending on the
|
||||
// number of export paths.
|
||||
func newObjectLayer(exportPaths ...string) (ObjectLayer, error) {
|
||||
if len(exportPaths) == 1 {
|
||||
exportPath := exportPaths[0]
|
||||
if !strings.ContainsRune(exportPath, ':') || filepath.VolumeName(exportPath) != "" {
|
||||
// Initialize filesystem storage API.
|
||||
return newFS(exportPath)
|
||||
// Initialize FS object layer.
|
||||
return newFSObjects(exportPath)
|
||||
}
|
||||
// Initialize network storage API.
|
||||
return newNetworkFS(exportPath)
|
||||
}
|
||||
// Initialize XL storage API.
|
||||
return newXL(exportPaths...)
|
||||
// Initialize XL object layer.
|
||||
return newXLObjects(exportPaths...)
|
||||
}
|
||||
|
||||
// configureServer handler returns final handler for the http server.
|
||||
func configureServerHandler(srvCmdConfig serverCmdConfig) http.Handler {
|
||||
storageAPI, e := newStorageAPI(srvCmdConfig.exportPaths...)
|
||||
fatalIf(probe.NewError(e), "Initializing storage API failed.", nil)
|
||||
objAPI, err := newObjectLayer(srvCmdConfig.exportPaths...)
|
||||
fatalIf(err, "Initializing object layer failed.", nil)
|
||||
|
||||
// Initialize object layer.
|
||||
objAPI := newObjectLayer(storageAPI)
|
||||
|
||||
// Initialize storage rpc.
|
||||
storageRPC := newStorageRPC(storageAPI)
|
||||
// Initialize storage rpc server.
|
||||
storageRPC, err := newRPCServer(srvCmdConfig.exportPaths[0]) // FIXME: should only have one path.
|
||||
fatalIf(err, "Initializing storage rpc server failed.", nil)
|
||||
|
||||
// Initialize API.
|
||||
apiHandlers := objectAPIHandlers{
|
||||
|
|
|
@ -76,8 +76,8 @@ func toStorageErr(err error) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Initialize new network file system.
|
||||
func newNetworkFS(networkPath string) (StorageAPI, error) {
|
||||
// Initialize new rpc client.
|
||||
func newRPCClient(networkPath string) (StorageAPI, error) {
|
||||
// Input validation.
|
||||
if networkPath == "" || strings.LastIndex(networkPath, ":") == -1 {
|
||||
log.WithFields(logrus.Fields{
|
|
@ -119,10 +119,15 @@ func (s *storageServer) DeleteFileHandler(arg *DeleteFileArgs, reply *GenericRep
|
|||
}
|
||||
|
||||
// Initialize new storage rpc.
|
||||
func newStorageRPC(storageAPI StorageAPI) *storageServer {
|
||||
return &storageServer{
|
||||
storage: storageAPI,
|
||||
func newRPCServer(exportPath string) (*storageServer, error) {
|
||||
// Initialize posix storage API.
|
||||
storage, err := newPosix(exportPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &storageServer{
|
||||
storage: storage,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// registerStorageRPCRouter - register storage rpc router.
|
|
@ -31,7 +31,6 @@ import (
|
|||
"github.com/minio/cli"
|
||||
"github.com/minio/mc/pkg/console"
|
||||
"github.com/minio/minio/pkg/minhttp"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
var serverCmd = cli.Command{
|
||||
|
@ -89,11 +88,11 @@ func configureServer(srvCmdConfig serverCmdConfig) *http.Server {
|
|||
|
||||
// Configure TLS if certs are available.
|
||||
if isSSL() {
|
||||
var e error
|
||||
var err error
|
||||
apiServer.TLSConfig = &tls.Config{}
|
||||
apiServer.TLSConfig.Certificates = make([]tls.Certificate, 1)
|
||||
apiServer.TLSConfig.Certificates[0], e = tls.LoadX509KeyPair(mustGetCertFile(), mustGetKeyFile())
|
||||
fatalIf(probe.NewError(e), "Unable to load certificates.", nil)
|
||||
apiServer.TLSConfig.Certificates[0], err = tls.LoadX509KeyPair(mustGetCertFile(), mustGetKeyFile())
|
||||
fatalIf(err, "Unable to load certificates.", nil)
|
||||
}
|
||||
|
||||
// Returns configured HTTP server.
|
||||
|
@ -102,16 +101,16 @@ func configureServer(srvCmdConfig serverCmdConfig) *http.Server {
|
|||
|
||||
// Print listen ips.
|
||||
func printListenIPs(httpServerConf *http.Server) {
|
||||
host, port, e := net.SplitHostPort(httpServerConf.Addr)
|
||||
fatalIf(probe.NewError(e), "Unable to split host port.", nil)
|
||||
host, port, err := net.SplitHostPort(httpServerConf.Addr)
|
||||
fatalIf(err, "Unable to split host port.", nil)
|
||||
|
||||
var hosts []string
|
||||
switch {
|
||||
case host != "":
|
||||
hosts = append(hosts, host)
|
||||
default:
|
||||
addrs, e := net.InterfaceAddrs()
|
||||
fatalIf(probe.NewError(e), "Unable to get interface address.", nil)
|
||||
addrs, err := net.InterfaceAddrs()
|
||||
fatalIf(err, "Unable to get interface address.", nil)
|
||||
for _, addr := range addrs {
|
||||
if addr.Network() == "ip+net" {
|
||||
host := strings.Split(addr.String(), "/")[0]
|
||||
|
@ -134,7 +133,7 @@ func printListenIPs(httpServerConf *http.Server) {
|
|||
func initServerConfig(c *cli.Context) {
|
||||
// Save new config.
|
||||
err := serverConfig.Save()
|
||||
fatalIf(err.Trace(), "Unable to save config.", nil)
|
||||
fatalIf(err, "Unable to save config.", nil)
|
||||
|
||||
// Fetch access keys from environment variables if any and update the config.
|
||||
accessKey := os.Getenv("MINIO_ACCESS_KEY")
|
||||
|
@ -143,10 +142,10 @@ func initServerConfig(c *cli.Context) {
|
|||
// Validate if both keys are specified and they are valid save them.
|
||||
if accessKey != "" && secretKey != "" {
|
||||
if !isValidAccessKey.MatchString(accessKey) {
|
||||
fatalIf(probe.NewError(errInvalidArgument), "Access key does not have required length", nil)
|
||||
fatalIf(errInvalidArgument, "Access key does not have required length", nil)
|
||||
}
|
||||
if !isValidSecretKey.MatchString(secretKey) {
|
||||
fatalIf(probe.NewError(errInvalidArgument), "Secret key does not have required length", nil)
|
||||
fatalIf(errInvalidArgument, "Secret key does not have required length", nil)
|
||||
}
|
||||
serverConfig.SetCredential(credential{
|
||||
AccessKeyID: accessKey,
|
||||
|
@ -169,10 +168,10 @@ func checkServerSyntax(c *cli.Context) {
|
|||
|
||||
// Extract port number from address address should be of the form host:port.
|
||||
func getPort(address string) int {
|
||||
_, portStr, e := net.SplitHostPort(address)
|
||||
fatalIf(probe.NewError(e), "Unable to split host port.", nil)
|
||||
portInt, e := strconv.Atoi(portStr)
|
||||
fatalIf(probe.NewError(e), "Invalid port number.", nil)
|
||||
_, portStr, err := net.SplitHostPort(address)
|
||||
fatalIf(err, "Unable to split host port.", nil)
|
||||
portInt, err := strconv.Atoi(portStr)
|
||||
fatalIf(err, "Invalid port number.", nil)
|
||||
return portInt
|
||||
}
|
||||
|
||||
|
@ -186,11 +185,11 @@ func getPort(address string) int {
|
|||
// on 127.0.0.1 even though minio server is running. So before we start
|
||||
// the minio server we make sure that the port is free on all the IPs.
|
||||
func checkPortAvailability(port int) {
|
||||
isAddrInUse := func(e error) bool {
|
||||
isAddrInUse := func(err error) bool {
|
||||
// Check if the syscall error is EADDRINUSE.
|
||||
// EADDRINUSE is the system call error if another process is
|
||||
// already listening at the specified port.
|
||||
neterr, ok := e.(*net.OpError)
|
||||
neterr, ok := err.(*net.OpError)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
@ -207,19 +206,19 @@ func checkPortAvailability(port int) {
|
|||
}
|
||||
return true
|
||||
}
|
||||
ifcs, e := net.Interfaces()
|
||||
if e != nil {
|
||||
fatalIf(probe.NewError(e), "Unable to list interfaces.", nil)
|
||||
ifcs, err := net.Interfaces()
|
||||
if err != nil {
|
||||
fatalIf(err, "Unable to list interfaces.", nil)
|
||||
}
|
||||
for _, ifc := range ifcs {
|
||||
addrs, e := ifc.Addrs()
|
||||
if e != nil {
|
||||
fatalIf(probe.NewError(e), fmt.Sprintf("Unable to list addresses on interface %s.", ifc.Name), nil)
|
||||
addrs, err := ifc.Addrs()
|
||||
if err != nil {
|
||||
fatalIf(err, fmt.Sprintf("Unable to list addresses on interface %s.", ifc.Name), nil)
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
ipnet, ok := addr.(*net.IPNet)
|
||||
if !ok {
|
||||
errorIf(probe.NewError(errors.New("")), "Interface type assertion to (*net.IPNet) failed.", nil)
|
||||
errorIf(errors.New(""), "Interface type assertion to (*net.IPNet) failed.", nil)
|
||||
continue
|
||||
}
|
||||
ip := ipnet.IP
|
||||
|
@ -228,19 +227,18 @@ func checkPortAvailability(port int) {
|
|||
network = "tcp6"
|
||||
}
|
||||
tcpAddr := net.TCPAddr{IP: ip, Port: port, Zone: ifc.Name}
|
||||
l, e := net.ListenTCP(network, &tcpAddr)
|
||||
if e != nil {
|
||||
if isAddrInUse(e) {
|
||||
l, err := net.ListenTCP(network, &tcpAddr)
|
||||
if err != nil {
|
||||
if isAddrInUse(err) {
|
||||
// Fail if port is already in use.
|
||||
fatalIf(probe.NewError(e), fmt.Sprintf("Unable to listen on IP %s, port %.d", tcpAddr.IP, tcpAddr.Port), nil)
|
||||
fatalIf(err, fmt.Sprintf("Unable to listen on IP %s, port %.d", tcpAddr.IP, tcpAddr.Port), nil)
|
||||
} else {
|
||||
// Ignore other errors.
|
||||
continue
|
||||
}
|
||||
}
|
||||
e = l.Close()
|
||||
if e != nil {
|
||||
fatalIf(probe.NewError(e), fmt.Sprintf("Unable to close listener on IP %s, port %.d", tcpAddr.IP, tcpAddr.Port), nil)
|
||||
if err = l.Close(); err != nil {
|
||||
fatalIf(err, fmt.Sprintf("Unable to close listener on IP %s, port %.d", tcpAddr.IP, tcpAddr.Port), nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -308,5 +306,5 @@ func serverMain(c *cli.Context) {
|
|||
|
||||
// Start server.
|
||||
err := minhttp.ListenAndServe(apiServer)
|
||||
errorIf(err.Trace(), "Failed to start the minio server.", nil)
|
||||
errorIf(err.Cause, "Failed to start the minio server.", nil)
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"time"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
|
@ -49,17 +48,13 @@ func initJWT() *JWT {
|
|||
}
|
||||
|
||||
// GenerateToken - generates a new Json Web Token based on the incoming user id.
|
||||
func (jwt *JWT) GenerateToken(userName string) (string, *probe.Error) {
|
||||
func (jwt *JWT) GenerateToken(userName string) (string, error) {
|
||||
token := jwtgo.New(jwtgo.SigningMethodHS512)
|
||||
// Token expires in 10hrs.
|
||||
token.Claims["exp"] = time.Now().Add(time.Hour * tokenExpires).Unix()
|
||||
token.Claims["iat"] = time.Now().Unix()
|
||||
token.Claims["sub"] = userName
|
||||
tokenString, e := token.SignedString([]byte(jwt.SecretAccessKey))
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
return tokenString, nil
|
||||
return token.SignedString([]byte(jwt.SecretAccessKey))
|
||||
}
|
||||
|
||||
// Authenticate - authenticates incoming username and password.
|
||||
|
|
|
@ -23,8 +23,6 @@ import (
|
|||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// toString - Safely convert interface to string without causing panic.
|
||||
|
@ -70,7 +68,7 @@ type PostPolicyForm struct {
|
|||
}
|
||||
|
||||
// parsePostPolicyFormV4 - Parse JSON policy string into typed POostPolicyForm structure.
|
||||
func parsePostPolicyFormV4(policy string) (PostPolicyForm, *probe.Error) {
|
||||
func parsePostPolicyFormV4(policy string) (PostPolicyForm, error) {
|
||||
// Convert po into interfaces and
|
||||
// perform strict type conversion using reflection.
|
||||
var rawPolicy struct {
|
||||
|
@ -78,17 +76,17 @@ func parsePostPolicyFormV4(policy string) (PostPolicyForm, *probe.Error) {
|
|||
Conditions []interface{} `json:"conditions"`
|
||||
}
|
||||
|
||||
e := json.Unmarshal([]byte(policy), &rawPolicy)
|
||||
if e != nil {
|
||||
return PostPolicyForm{}, probe.NewError(e)
|
||||
err := json.Unmarshal([]byte(policy), &rawPolicy)
|
||||
if err != nil {
|
||||
return PostPolicyForm{}, err
|
||||
}
|
||||
|
||||
parsedPolicy := PostPolicyForm{}
|
||||
|
||||
// Parse expiry time.
|
||||
parsedPolicy.Expiration, e = time.Parse(time.RFC3339Nano, rawPolicy.Expiration)
|
||||
if e != nil {
|
||||
return PostPolicyForm{}, probe.NewError(e)
|
||||
parsedPolicy.Expiration, err = time.Parse(time.RFC3339Nano, rawPolicy.Expiration)
|
||||
if err != nil {
|
||||
return PostPolicyForm{}, err
|
||||
}
|
||||
parsedPolicy.Conditions.Policies = make(map[string]struct {
|
||||
Operator string
|
||||
|
@ -102,8 +100,7 @@ func parsePostPolicyFormV4(policy string) (PostPolicyForm, *probe.Error) {
|
|||
for k, v := range condt {
|
||||
if !isString(v) { // Pre-check value type.
|
||||
// All values must be of type string.
|
||||
return parsedPolicy, probe.NewError(fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form.",
|
||||
reflect.TypeOf(condt).String(), condt))
|
||||
return parsedPolicy, fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form.", reflect.TypeOf(condt).String(), condt)
|
||||
}
|
||||
// {"acl": "public-read" } is an alternate way to indicate - [ "eq", "$acl", "public-read" ]
|
||||
// In this case we will just collapse this into "eq" for all use cases.
|
||||
|
@ -117,16 +114,14 @@ func parsePostPolicyFormV4(policy string) (PostPolicyForm, *probe.Error) {
|
|||
}
|
||||
case []interface{}: // Handle array types.
|
||||
if len(condt) != 3 { // Return error if we have insufficient elements.
|
||||
return parsedPolicy, probe.NewError(fmt.Errorf("Malformed conditional fields %s of type %s found in POST policy form.",
|
||||
condt, reflect.TypeOf(condt).String()))
|
||||
return parsedPolicy, fmt.Errorf("Malformed conditional fields %s of type %s found in POST policy form.", condt, reflect.TypeOf(condt).String())
|
||||
}
|
||||
switch toString(condt[0]) {
|
||||
case "eq", "starts-with":
|
||||
for _, v := range condt { // Pre-check all values for type.
|
||||
if !isString(v) {
|
||||
// All values must be of type string.
|
||||
return parsedPolicy, probe.NewError(fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form.",
|
||||
reflect.TypeOf(condt).String(), condt))
|
||||
return parsedPolicy, fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form.", reflect.TypeOf(condt).String(), condt)
|
||||
}
|
||||
}
|
||||
operator, matchType, value := toString(condt[0]), toString(condt[1]), toString(condt[2])
|
||||
|
@ -147,12 +142,10 @@ func parsePostPolicyFormV4(policy string) (PostPolicyForm, *probe.Error) {
|
|||
}
|
||||
default:
|
||||
// Condition should be valid.
|
||||
return parsedPolicy, probe.NewError(fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form.",
|
||||
reflect.TypeOf(condt).String(), condt))
|
||||
return parsedPolicy, fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form.", reflect.TypeOf(condt).String(), condt)
|
||||
}
|
||||
default:
|
||||
return parsedPolicy, probe.NewError(fmt.Errorf("Unknown field %s of type %s found in POST policy form.",
|
||||
condt, reflect.TypeOf(condt).String()))
|
||||
return parsedPolicy, fmt.Errorf("Unknown field %s of type %s found in POST policy form.", condt, reflect.TypeOf(condt).String())
|
||||
}
|
||||
}
|
||||
return parsedPolicy, nil
|
||||
|
|
|
@ -28,7 +28,6 @@ import (
|
|||
"github.com/fatih/color"
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/mc/pkg/console"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// command specific flags.
|
||||
|
@ -90,7 +89,7 @@ func (u updateMessage) String() string {
|
|||
return updateMessage("You are already running the most recent version of ‘minio’.")
|
||||
}
|
||||
msg, err := colorizeUpdateMessage(u.Download)
|
||||
fatalIf(err.Trace(msg), "Unable to colorize experimental update notification string ‘"+msg+"’.", nil)
|
||||
fatalIf(err, "Unable to colorize experimental update notification string ‘"+msg+"’.", nil)
|
||||
return msg
|
||||
}
|
||||
|
||||
|
@ -98,38 +97,38 @@ func (u updateMessage) String() string {
|
|||
func (u updateMessage) JSON() string {
|
||||
u.Status = "success"
|
||||
updateMessageJSONBytes, err := json.Marshal(u)
|
||||
fatalIf(probe.NewError(err), "Unable to marshal into JSON.", nil)
|
||||
fatalIf((err), "Unable to marshal into JSON.", nil)
|
||||
|
||||
return string(updateMessageJSONBytes)
|
||||
}
|
||||
|
||||
func parseReleaseData(data string) (time.Time, *probe.Error) {
|
||||
func parseReleaseData(data string) (time.Time, error) {
|
||||
releaseStr := strings.Fields(data)
|
||||
if len(releaseStr) < 2 {
|
||||
return time.Time{}, probe.NewError(errors.New("Update data malformed"))
|
||||
return time.Time{}, errors.New("Update data malformed")
|
||||
}
|
||||
releaseDate := releaseStr[1]
|
||||
releaseDateSplits := strings.SplitN(releaseDate, ".", 3)
|
||||
if len(releaseDateSplits) < 3 {
|
||||
return time.Time{}, probe.NewError(errors.New("Update data malformed"))
|
||||
return time.Time{}, (errors.New("Update data malformed"))
|
||||
}
|
||||
if releaseDateSplits[0] != "minio" {
|
||||
return time.Time{}, probe.NewError(errors.New("Update data malformed, missing minio tag"))
|
||||
return time.Time{}, (errors.New("Update data malformed, missing minio tag"))
|
||||
}
|
||||
// "OFFICIAL" tag is still kept for backward compatibility, we should remove this for the next release.
|
||||
if releaseDateSplits[1] != "RELEASE" && releaseDateSplits[1] != "OFFICIAL" {
|
||||
return time.Time{}, probe.NewError(errors.New("Update data malformed, missing RELEASE tag"))
|
||||
return time.Time{}, (errors.New("Update data malformed, missing RELEASE tag"))
|
||||
}
|
||||
dateSplits := strings.SplitN(releaseDateSplits[2], "T", 2)
|
||||
if len(dateSplits) < 2 {
|
||||
return time.Time{}, probe.NewError(errors.New("Update data malformed, not in modified RFC3359 form"))
|
||||
return time.Time{}, (errors.New("Update data malformed, not in modified RFC3359 form"))
|
||||
}
|
||||
dateSplits[1] = strings.Replace(dateSplits[1], "-", ":", -1)
|
||||
date := strings.Join(dateSplits, "T")
|
||||
|
||||
parsedDate, e := time.Parse(time.RFC3339, date)
|
||||
if e != nil {
|
||||
return time.Time{}, probe.NewError(e)
|
||||
parsedDate, err := time.Parse(time.RFC3339, date)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return parsedDate, nil
|
||||
}
|
||||
|
@ -159,32 +158,32 @@ func getReleaseUpdate(updateURL string, noError bool) updateMessage {
|
|||
|
||||
// Instantiate a new client with 1 sec timeout.
|
||||
client := &http.Client{
|
||||
Timeout: 500 * time.Millisecond,
|
||||
Timeout: 1 * time.Millisecond,
|
||||
}
|
||||
|
||||
// Fetch new update.
|
||||
data, e := client.Get(newUpdateURL)
|
||||
if e != nil && noError {
|
||||
data, err := client.Get(newUpdateURL)
|
||||
if err != nil && noError {
|
||||
return updateMsg
|
||||
}
|
||||
fatalIf(probe.NewError(e), "Unable to read from update URL ‘"+newUpdateURL+"’.", nil)
|
||||
fatalIf((err), "Unable to read from update URL ‘"+newUpdateURL+"’.", nil)
|
||||
|
||||
// Error out if 'update' command is issued for development based builds.
|
||||
if minioVersion == "DEVELOPMENT.GOGET" && !noError {
|
||||
fatalIf(probe.NewError(errors.New("")),
|
||||
fatalIf((errors.New("")),
|
||||
"Update mechanism is not supported for ‘go get’ based binary builds. Please download official releases from https://minio.io/#minio", nil)
|
||||
}
|
||||
|
||||
// Parse current minio version into RFC3339.
|
||||
current, e := time.Parse(time.RFC3339, minioVersion)
|
||||
if e != nil && noError {
|
||||
current, err := time.Parse(time.RFC3339, minioVersion)
|
||||
if err != nil && noError {
|
||||
return updateMsg
|
||||
}
|
||||
fatalIf(probe.NewError(e), "Unable to parse version string as time.", nil)
|
||||
fatalIf((err), "Unable to parse version string as time.", nil)
|
||||
|
||||
// Verify if current minio version is zero.
|
||||
if current.IsZero() && !noError {
|
||||
fatalIf(probe.NewError(errors.New("")),
|
||||
fatalIf((errors.New("")),
|
||||
"Updates not supported for custom builds. Version field is empty. Please download official releases from https://minio.io/#minio", nil)
|
||||
}
|
||||
|
||||
|
@ -195,27 +194,27 @@ func getReleaseUpdate(updateURL string, noError bool) updateMessage {
|
|||
if noError {
|
||||
return updateMsg
|
||||
}
|
||||
fatalIf(probe.NewError(errors.New("")), "Update server responsed with "+data.Status, nil)
|
||||
fatalIf((errors.New("")), "Update server responsed with "+data.Status, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Read the response body.
|
||||
updateBody, e := ioutil.ReadAll(data.Body)
|
||||
if e != nil && noError {
|
||||
updateBody, err := ioutil.ReadAll(data.Body)
|
||||
if err != nil && noError {
|
||||
return updateMsg
|
||||
}
|
||||
fatalIf(probe.NewError(e), "Fetching updates failed. Please try again.", nil)
|
||||
fatalIf((err), "Fetching updates failed. Please try again.", nil)
|
||||
|
||||
// Parse the date if its valid.
|
||||
latest, err := parseReleaseData(string(updateBody))
|
||||
if err != nil && noError {
|
||||
return updateMsg
|
||||
}
|
||||
fatalIf(err.Trace(updateURL), "Please report this issue at https://github.com/minio/minio/issues.", nil)
|
||||
fatalIf(err, "Please report this issue at https://github.com/minio/minio/issues.", nil)
|
||||
|
||||
// Verify if the date is not zero.
|
||||
if latest.IsZero() && !noError {
|
||||
fatalIf(probe.NewError(errors.New("")),
|
||||
fatalIf((errors.New("")),
|
||||
"Unable to validate any update available at this time. Please open an issue at https://github.com/minio/minio/issues", nil)
|
||||
}
|
||||
|
||||
|
|
10
utils.go
10
utils.go
|
@ -19,7 +19,6 @@ package main
|
|||
import (
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
@ -31,12 +30,8 @@ func xmlDecoder(body io.Reader, v interface{}) error {
|
|||
}
|
||||
|
||||
// checkValidMD5 - verify if valid md5, returns md5 in bytes.
|
||||
func checkValidMD5(md5 string) ([]byte, *probe.Error) {
|
||||
md5Bytes, e := base64.StdEncoding.DecodeString(strings.TrimSpace(md5))
|
||||
if e != nil {
|
||||
return nil, probe.NewError(e)
|
||||
}
|
||||
return md5Bytes, nil
|
||||
func checkValidMD5(md5 string) ([]byte, error) {
|
||||
return base64.StdEncoding.DecodeString(strings.TrimSpace(md5))
|
||||
}
|
||||
|
||||
/// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
|
||||
|
@ -56,6 +51,5 @@ func contains(stringList []string, element string) bool {
|
|||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -109,9 +109,8 @@ func (web *webAPIHandlers) MakeBucket(r *http.Request, args *MakeBucketArgs, rep
|
|||
return &json2.Error{Message: "Unauthorized request"}
|
||||
}
|
||||
reply.UIVersion = miniobrowser.UIVersion
|
||||
e := web.ObjectAPI.MakeBucket(args.BucketName)
|
||||
if e != nil {
|
||||
return &json2.Error{Message: e.Cause.Error()}
|
||||
if err := web.ObjectAPI.MakeBucket(args.BucketName); err != nil {
|
||||
return &json2.Error{Message: err.Error()}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -139,9 +138,9 @@ func (web *webAPIHandlers) ListBuckets(r *http.Request, args *WebGenericArgs, re
|
|||
if !isJWTReqAuthenticated(r) {
|
||||
return &json2.Error{Message: "Unauthorized request"}
|
||||
}
|
||||
buckets, e := web.ObjectAPI.ListBuckets()
|
||||
if e != nil {
|
||||
return &json2.Error{Message: e.Cause.Error()}
|
||||
buckets, err := web.ObjectAPI.ListBuckets()
|
||||
if err != nil {
|
||||
return &json2.Error{Message: err.Error()}
|
||||
}
|
||||
for _, bucket := range buckets {
|
||||
// List all buckets which are not private.
|
||||
|
@ -191,7 +190,7 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r
|
|||
for {
|
||||
lo, err := web.ObjectAPI.ListObjects(args.BucketName, args.Prefix, marker, "/", 1000)
|
||||
if err != nil {
|
||||
return &json2.Error{Message: err.Cause.Error()}
|
||||
return &json2.Error{Message: err.Error()}
|
||||
}
|
||||
marker = lo.NextMarker
|
||||
for _, obj := range lo.Objects {
|
||||
|
@ -227,9 +226,8 @@ func (web *webAPIHandlers) RemoveObject(r *http.Request, args *RemoveObjectArgs,
|
|||
return &json2.Error{Message: "Unauthorized request"}
|
||||
}
|
||||
reply.UIVersion = miniobrowser.UIVersion
|
||||
e := web.ObjectAPI.DeleteObject(args.BucketName, args.ObjectName)
|
||||
if e != nil {
|
||||
return &json2.Error{Message: e.Cause.Error()}
|
||||
if err := web.ObjectAPI.DeleteObject(args.BucketName, args.ObjectName); err != nil {
|
||||
return &json2.Error{Message: err.Error()}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -252,7 +250,7 @@ func (web *webAPIHandlers) Login(r *http.Request, args *LoginArgs, reply *LoginR
|
|||
if jwt.Authenticate(args.Username, args.Password) {
|
||||
token, err := jwt.GenerateToken(args.Username)
|
||||
if err != nil {
|
||||
return &json2.Error{Message: err.Cause.Error(), Data: err.String()}
|
||||
return &json2.Error{Message: err.Error()}
|
||||
}
|
||||
reply.Token = token
|
||||
reply.UIVersion = miniobrowser.UIVersion
|
||||
|
@ -305,7 +303,7 @@ func (web *webAPIHandlers) SetAuth(r *http.Request, args *SetAuthArgs, reply *Se
|
|||
cred := credential{args.AccessKey, args.SecretKey}
|
||||
serverConfig.SetCredential(cred)
|
||||
if err := serverConfig.Save(); err != nil {
|
||||
return &json2.Error{Message: err.Cause.Error()}
|
||||
return &json2.Error{Message: err.Error()}
|
||||
}
|
||||
|
||||
jwt := initJWT()
|
||||
|
@ -314,7 +312,7 @@ func (web *webAPIHandlers) SetAuth(r *http.Request, args *SetAuthArgs, reply *Se
|
|||
}
|
||||
token, err := jwt.GenerateToken(args.AccessKey)
|
||||
if err != nil {
|
||||
return &json2.Error{Message: err.Cause.Error()}
|
||||
return &json2.Error{Message: err.Error()}
|
||||
}
|
||||
reply.Token = token
|
||||
reply.UIVersion = miniobrowser.UIVersion
|
||||
|
@ -350,7 +348,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
|||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
if _, err := web.ObjectAPI.PutObject(bucket, object, -1, r.Body, nil); err != nil {
|
||||
writeWebErrorResponse(w, err.ToGoError())
|
||||
writeWebErrorResponse(w, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -377,10 +375,10 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
objReader, err := web.ObjectAPI.GetObject(bucket, object, 0)
|
||||
if err != nil {
|
||||
writeWebErrorResponse(w, err.ToGoError())
|
||||
writeWebErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
if _, e := io.Copy(w, objReader); e != nil {
|
||||
if _, err := io.Copy(w, objReader); err != nil {
|
||||
/// No need to print error, response writer already written to.
|
||||
return
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
|
||||
// webAPI container for Web API.
|
||||
type webAPIHandlers struct {
|
||||
ObjectAPI objectAPI
|
||||
ObjectAPI ObjectLayer
|
||||
}
|
||||
|
||||
// indexHandler - Handler to serve index.html
|
||||
|
|
|
@ -83,7 +83,7 @@ func newXL(disks ...string) (StorageAPI, error) {
|
|||
storageDisks := make([]StorageAPI, len(disks))
|
||||
for index, disk := range disks {
|
||||
var err error
|
||||
storageDisks[index], err = newFS(disk)
|
||||
storageDisks[index], err = newPosix(disk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -573,6 +573,7 @@ func (xl XL) DeleteFile(volume, path string) error {
|
|||
|
||||
// RenameFile - rename file.
|
||||
func (xl XL) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) error {
|
||||
// Validate inputs.
|
||||
if !isValidVolname(srcVolume) {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
@ -587,6 +588,12 @@ func (xl XL) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) error {
|
|||
}
|
||||
for _, disk := range xl.storageDisks {
|
||||
if err := disk.RenameFile(srcVolume, srcPath, dstVolume, dstPath); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"srcVolume": srcVolume,
|
||||
"srcPath": srcPath,
|
||||
"dstVolume": dstVolume,
|
||||
"dstPath": dstPath,
|
||||
}).Errorf("RenameFile failed with %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
|
@ -0,0 +1,531 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
)
|
||||
|
||||
// listLeafEntries - lists all entries if a given prefixPath is a leaf
|
||||
// directory, returns error if any - returns empty list if prefixPath
|
||||
// is not a leaf directory.
|
||||
func (xl xlObjects) listLeafEntries(prefixPath string) (entries []FileInfo, e error) {
|
||||
var markerPath string
|
||||
for {
|
||||
fileInfos, eof, err := xl.storage.ListFiles(minioMetaVolume, prefixPath, markerPath, false, 1000)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"prefixPath": prefixPath,
|
||||
"markerPath": markerPath,
|
||||
}).Errorf("%s", err)
|
||||
return nil, err
|
||||
}
|
||||
for _, fileInfo := range fileInfos {
|
||||
// Set marker for next batch of ListFiles.
|
||||
markerPath = fileInfo.Name
|
||||
if fileInfo.Mode.IsDir() {
|
||||
// If a directory is found, doesn't return anything.
|
||||
return nil, nil
|
||||
}
|
||||
fileName := path.Base(fileInfo.Name)
|
||||
if !strings.Contains(fileName, ".") {
|
||||
// Skip the entry if it is of the pattern bucket/object/uploadID.partNum.md5sum
|
||||
// and retain entries of the pattern bucket/object/uploadID
|
||||
entries = append(entries, fileInfo)
|
||||
}
|
||||
}
|
||||
if eof {
|
||||
break
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listMetaVolumeFiles - list all files at a given prefix inside minioMetaVolume.
|
||||
func (xl xlObjects) listMetaVolumeFiles(prefixPath string, markerPath string, recursive bool, maxKeys int) (allFileInfos []FileInfo, eof bool, err error) {
|
||||
// newMaxKeys tracks the size of entries which are going to be
|
||||
// returned back.
|
||||
var newMaxKeys int
|
||||
|
||||
// Following loop gathers and filters out special files inside
|
||||
// minio meta volume.
|
||||
for {
|
||||
var fileInfos []FileInfo
|
||||
// List files up to maxKeys-newMaxKeys, since we are skipping entries for special files.
|
||||
fileInfos, eof, err = xl.storage.ListFiles(minioMetaVolume, prefixPath, markerPath, recursive, maxKeys-newMaxKeys)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"prefixPath": prefixPath,
|
||||
"markerPath": markerPath,
|
||||
"recursive": recursive,
|
||||
"maxKeys": maxKeys,
|
||||
}).Errorf("%s", err)
|
||||
return nil, true, err
|
||||
}
|
||||
// Loop through and validate individual file.
|
||||
for _, fi := range fileInfos {
|
||||
var entries []FileInfo
|
||||
if fi.Mode.IsDir() {
|
||||
// List all the entries if fi.Name is a leaf directory, if
|
||||
// fi.Name is not a leaf directory then the resulting
|
||||
// entries are empty.
|
||||
entries, err = xl.listLeafEntries(fi.Name)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"prefixPath": fi.Name,
|
||||
}).Errorf("%s", err)
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
// Set markerPath for next batch of listing.
|
||||
markerPath = fi.Name
|
||||
if len(entries) > 0 {
|
||||
// We reach here for non-recursive case and a leaf entry.
|
||||
for _, entry := range entries {
|
||||
allFileInfos = append(allFileInfos, entry)
|
||||
newMaxKeys++
|
||||
// If we have reached the maxKeys, it means we have listed
|
||||
// everything that was requested. Return right here.
|
||||
if newMaxKeys == maxKeys {
|
||||
// Return values:
|
||||
// allFileInfos : "maxKeys" number of entries.
|
||||
// eof : eof returned by xl.storage.ListFiles()
|
||||
// error : nil
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// We reach here for a non-recursive case non-leaf entry
|
||||
// OR recursive case with fi.Name matching pattern bucket/object/uploadID[.partNum.md5sum]
|
||||
if !fi.Mode.IsDir() { // Do not skip non-recursive case directory entries.
|
||||
// Skip files matching pattern bucket/object/uploadID.partNum.md5sum
|
||||
// and retain files matching pattern bucket/object/uploadID
|
||||
specialFile := path.Base(fi.Name)
|
||||
if strings.Contains(specialFile, ".") {
|
||||
// Contains partnumber and md5sum info, skip this.
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
allFileInfos = append(allFileInfos, fi)
|
||||
newMaxKeys++
|
||||
// If we have reached the maxKeys, it means we have listed
|
||||
// everything that was requested. Return right here.
|
||||
if newMaxKeys == maxKeys {
|
||||
// Return values:
|
||||
// allFileInfos : "maxKeys" number of entries.
|
||||
// eof : eof returned by xl.storage.ListFiles()
|
||||
// error : nil
|
||||
return
|
||||
}
|
||||
}
|
||||
// If we have reached eof then we break out.
|
||||
if eof {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Return entries here.
|
||||
return allFileInfos, eof, nil
|
||||
}
|
||||
|
||||
// ListMultipartUploads - list multipart uploads.
|
||||
func (xl xlObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
|
||||
result := ListMultipartsInfo{}
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ListMultipartsInfo{}, (BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectPrefix(prefix) {
|
||||
return ListMultipartsInfo{}, (ObjectNameInvalid{Bucket: bucket, Object: prefix})
|
||||
}
|
||||
// Verify if delimiter is anything other than '/', which we do not support.
|
||||
if delimiter != "" && delimiter != slashSeparator {
|
||||
return ListMultipartsInfo{}, (UnsupportedDelimiter{
|
||||
Delimiter: delimiter,
|
||||
})
|
||||
}
|
||||
// Verify if marker has prefix.
|
||||
if keyMarker != "" && !strings.HasPrefix(keyMarker, prefix) {
|
||||
return ListMultipartsInfo{}, (InvalidMarkerPrefixCombination{
|
||||
Marker: keyMarker,
|
||||
Prefix: prefix,
|
||||
})
|
||||
}
|
||||
if uploadIDMarker != "" {
|
||||
if strings.HasSuffix(keyMarker, slashSeparator) {
|
||||
return result, (InvalidUploadIDKeyCombination{
|
||||
UploadIDMarker: uploadIDMarker,
|
||||
KeyMarker: keyMarker,
|
||||
})
|
||||
}
|
||||
id, err := uuid.Parse(uploadIDMarker)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
if id.IsZero() {
|
||||
return result, (MalformedUploadID{
|
||||
UploadID: uploadIDMarker,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
recursive := true
|
||||
if delimiter == slashSeparator {
|
||||
recursive = false
|
||||
}
|
||||
|
||||
result.IsTruncated = true
|
||||
result.MaxUploads = maxUploads
|
||||
|
||||
// Not using path.Join() as it strips off the trailing '/'.
|
||||
// Also bucket should always be followed by '/' even if prefix is empty.
|
||||
prefixPath := pathJoin(bucket, prefix)
|
||||
keyMarkerPath := ""
|
||||
if keyMarker != "" {
|
||||
keyMarkerPath = pathJoin(pathJoin(bucket, keyMarker), uploadIDMarker)
|
||||
}
|
||||
// List all the multipart files at prefixPath, starting with marker keyMarkerPath.
|
||||
fileInfos, eof, err := xl.listMetaVolumeFiles(prefixPath, keyMarkerPath, recursive, maxUploads)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"prefixPath": prefixPath,
|
||||
"markerPath": keyMarkerPath,
|
||||
"recursive": recursive,
|
||||
"maxUploads": maxUploads,
|
||||
}).Errorf("listMetaVolumeFiles failed with %s", err)
|
||||
return ListMultipartsInfo{}, err
|
||||
}
|
||||
|
||||
// Loop through all the received files fill in the multiparts result.
|
||||
for _, fi := range fileInfos {
|
||||
var objectName string
|
||||
var uploadID string
|
||||
if fi.Mode.IsDir() {
|
||||
// All directory entries are common prefixes.
|
||||
uploadID = "" // Upload ids are empty for CommonPrefixes.
|
||||
objectName = strings.TrimPrefix(fi.Name, retainSlash(bucket))
|
||||
result.CommonPrefixes = append(result.CommonPrefixes, objectName)
|
||||
} else {
|
||||
uploadID = path.Base(fi.Name)
|
||||
objectName = strings.TrimPrefix(path.Dir(fi.Name), retainSlash(bucket))
|
||||
result.Uploads = append(result.Uploads, uploadMetadata{
|
||||
Object: objectName,
|
||||
UploadID: uploadID,
|
||||
Initiated: fi.ModTime,
|
||||
})
|
||||
}
|
||||
result.NextKeyMarker = objectName
|
||||
result.NextUploadIDMarker = uploadID
|
||||
}
|
||||
result.IsTruncated = !eof
|
||||
if !result.IsTruncated {
|
||||
result.NextKeyMarker = ""
|
||||
result.NextUploadIDMarker = ""
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (xl xlObjects) NewMultipartUpload(bucket, object string) (string, error) {
|
||||
// Verify if bucket name is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return "", (BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
// Verify if object name is valid.
|
||||
if !IsValidObjectName(object) {
|
||||
return "", ObjectNameInvalid{Bucket: bucket, Object: object}
|
||||
}
|
||||
// Verify whether the bucket exists.
|
||||
if isExist, err := xl.isBucketExist(bucket); err != nil {
|
||||
return "", err
|
||||
} else if !isExist {
|
||||
return "", BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
if _, err := xl.storage.StatVol(minioMetaVolume); err != nil {
|
||||
if err == errVolumeNotFound {
|
||||
err = xl.storage.MakeVol(minioMetaVolume)
|
||||
if err != nil {
|
||||
return "", toObjectErr(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
for {
|
||||
uuid, err := uuid.New()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
uploadID := uuid.String()
|
||||
uploadIDPath := path.Join(bucket, object, uploadID)
|
||||
if _, err = xl.storage.StatFile(minioMetaVolume, uploadIDPath); err != nil {
|
||||
if err != errFileNotFound {
|
||||
return "", (toObjectErr(err, minioMetaVolume, uploadIDPath))
|
||||
}
|
||||
// uploadIDPath doesn't exist, so create empty file to reserve the name
|
||||
var w io.WriteCloser
|
||||
if w, err = xl.storage.CreateFile(minioMetaVolume, uploadIDPath); err == nil {
|
||||
// Close the writer.
|
||||
if err = w.Close(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
return "", toObjectErr(err, minioMetaVolume, uploadIDPath)
|
||||
}
|
||||
return uploadID, nil
|
||||
}
|
||||
// uploadIDPath already exists.
|
||||
// loop again to try with different uuid generated.
|
||||
}
|
||||
}
|
||||
|
||||
// isUploadIDExists - verify if a given uploadID exists and is valid.
|
||||
func (xl xlObjects) isUploadIDExists(bucket, object, uploadID string) (bool, error) {
|
||||
uploadIDPath := path.Join(bucket, object, uploadID)
|
||||
st, err := xl.storage.StatFile(minioMetaVolume, uploadIDPath)
|
||||
if err != nil {
|
||||
// Upload id does not exist.
|
||||
if err == errFileNotFound {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
// Upload id exists and is a regular file.
|
||||
return st.Mode.IsRegular(), nil
|
||||
}
|
||||
|
||||
// PutObjectPart - writes the multipart upload chunks.
|
||||
func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return "", BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return "", ObjectNameInvalid{Bucket: bucket, Object: object}
|
||||
}
|
||||
// Verify whether the bucket exists.
|
||||
if isExist, err := xl.isBucketExist(bucket); err != nil {
|
||||
return "", err
|
||||
} else if !isExist {
|
||||
return "", BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
if status, err := xl.isUploadIDExists(bucket, object, uploadID); err != nil {
|
||||
return "", err
|
||||
} else if !status {
|
||||
return "", InvalidUploadID{UploadID: uploadID}
|
||||
}
|
||||
|
||||
partSuffix := fmt.Sprintf("%s.%d.%s", uploadID, partID, md5Hex)
|
||||
fileWriter, err := xl.storage.CreateFile(minioMetaVolume, path.Join(bucket, object, partSuffix))
|
||||
if err != nil {
|
||||
return "", toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// Initialize md5 writer.
|
||||
md5Writer := md5.New()
|
||||
|
||||
// Instantiate a new multi writer.
|
||||
multiWriter := io.MultiWriter(md5Writer, fileWriter)
|
||||
|
||||
// Instantiate checksum hashers and create a multiwriter.
|
||||
if size > 0 {
|
||||
if _, err = io.CopyN(multiWriter, data, size); err != nil {
|
||||
safeCloseAndRemove(fileWriter)
|
||||
return "", (toObjectErr(err))
|
||||
}
|
||||
// Reader shouldn't have more data what mentioned in size argument.
|
||||
// reading one more byte from the reader to validate it.
|
||||
// expected to fail, success validates existence of more data in the reader.
|
||||
if _, err = io.CopyN(ioutil.Discard, data, 1); err == nil {
|
||||
safeCloseAndRemove(fileWriter)
|
||||
return "", (UnExpectedDataSize{Size: int(size)})
|
||||
}
|
||||
} else {
|
||||
if _, err = io.Copy(multiWriter, data); err != nil {
|
||||
safeCloseAndRemove(fileWriter)
|
||||
return "", (toObjectErr(err))
|
||||
}
|
||||
}
|
||||
|
||||
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
|
||||
if md5Hex != "" {
|
||||
if newMD5Hex != md5Hex {
|
||||
safeCloseAndRemove(fileWriter)
|
||||
return "", (BadDigest{md5Hex, newMD5Hex})
|
||||
}
|
||||
}
|
||||
err = fileWriter.Close()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return newMD5Hex, nil
|
||||
}
|
||||
|
||||
func (xl xlObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ListPartsInfo{}, (BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return ListPartsInfo{}, (ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
if status, err := xl.isUploadIDExists(bucket, object, uploadID); err != nil {
|
||||
return ListPartsInfo{}, err
|
||||
} else if !status {
|
||||
return ListPartsInfo{}, (InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
result := ListPartsInfo{}
|
||||
var markerPath string
|
||||
nextPartNumberMarker := 0
|
||||
uploadIDPath := path.Join(bucket, object, uploadID)
|
||||
// Figure out the marker for the next subsequent calls, if the
|
||||
// partNumberMarker is already set.
|
||||
if partNumberMarker > 0 {
|
||||
partNumberMarkerPath := uploadIDPath + "." + strconv.Itoa(partNumberMarker) + "."
|
||||
fileInfos, _, err := xl.storage.ListFiles(minioMetaVolume, partNumberMarkerPath, "", false, 1)
|
||||
if err != nil {
|
||||
return result, toObjectErr(err, minioMetaVolume, partNumberMarkerPath)
|
||||
}
|
||||
if len(fileInfos) == 0 {
|
||||
return result, (InvalidPart{})
|
||||
}
|
||||
markerPath = fileInfos[0].Name
|
||||
}
|
||||
uploadIDPrefix := uploadIDPath + "."
|
||||
fileInfos, eof, err := xl.storage.ListFiles(minioMetaVolume, uploadIDPrefix, markerPath, false, maxParts)
|
||||
if err != nil {
|
||||
return result, InvalidPart{}
|
||||
}
|
||||
for _, fileInfo := range fileInfos {
|
||||
fileName := path.Base(fileInfo.Name)
|
||||
splitResult := strings.Split(fileName, ".")
|
||||
partNum, err := strconv.Atoi(splitResult[1])
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
md5sum := splitResult[2]
|
||||
result.Parts = append(result.Parts, partInfo{
|
||||
PartNumber: partNum,
|
||||
LastModified: fileInfo.ModTime,
|
||||
ETag: md5sum,
|
||||
Size: fileInfo.Size,
|
||||
})
|
||||
nextPartNumberMarker = partNum
|
||||
}
|
||||
result.Bucket = bucket
|
||||
result.Object = object
|
||||
result.UploadID = uploadID
|
||||
result.PartNumberMarker = partNumberMarker
|
||||
result.NextPartNumberMarker = nextPartNumberMarker
|
||||
result.MaxParts = maxParts
|
||||
result.IsTruncated = !eof
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (string, error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return "", (BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return "", (ObjectNameInvalid{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
})
|
||||
}
|
||||
if status, err := xl.isUploadIDExists(bucket, object, uploadID); err != nil {
|
||||
return "", err
|
||||
} else if !status {
|
||||
return "", (InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
|
||||
fileWriter, err := xl.storage.CreateFile(bucket, object)
|
||||
if err != nil {
|
||||
return "", toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
var md5Sums []string
|
||||
for _, part := range parts {
|
||||
// Construct part suffix.
|
||||
partSuffix := fmt.Sprintf("%s.%d.%s", uploadID, part.PartNumber, part.ETag)
|
||||
err = xl.storage.RenameFile(minioMetaVolume, path.Join(bucket, object, partSuffix), bucket, path.Join(object, fmt.Sprint(part.PartNumber)))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
md5Sums = append(md5Sums, part.ETag)
|
||||
}
|
||||
|
||||
err = fileWriter.Close()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Save the s3 md5.
|
||||
s3MD5, err := makeS3MD5(md5Sums...)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Return md5sum.
|
||||
return s3MD5, nil
|
||||
}
|
||||
|
||||
// AbortMultipartUpload - abort multipart upload.
|
||||
func (xl xlObjects) AbortMultipartUpload(bucket, object, uploadID string) error {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return (BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return (ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
if status, err := xl.isUploadIDExists(bucket, object, uploadID); err != nil {
|
||||
return err
|
||||
} else if !status {
|
||||
return (InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
|
||||
markerPath := ""
|
||||
for {
|
||||
uploadIDPath := path.Join(bucket, object, uploadID)
|
||||
fileInfos, eof, err := xl.storage.ListFiles(minioMetaVolume, uploadIDPath, markerPath, false, 1000)
|
||||
if err != nil {
|
||||
if err == errFileNotFound {
|
||||
return (InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
return toObjectErr(err)
|
||||
}
|
||||
for _, fileInfo := range fileInfos {
|
||||
xl.storage.DeleteFile(minioMetaVolume, fileInfo.Name)
|
||||
markerPath = fileInfo.Name
|
||||
}
|
||||
if eof {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -27,26 +27,30 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/minio/minio/pkg/mimedb"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/minio/minio/pkg/safe"
|
||||
)
|
||||
|
||||
const (
|
||||
multipartMetaFile = "multipart.json"
|
||||
)
|
||||
|
||||
type objectAPI struct {
|
||||
// xlObjects - Implements fs object layer.
|
||||
type xlObjects struct {
|
||||
storage StorageAPI
|
||||
}
|
||||
|
||||
func newObjectLayer(storage StorageAPI) objectAPI {
|
||||
return objectAPI{storage}
|
||||
// newXLObjects - initialize new xl object layer.
|
||||
func newXLObjects(exportPaths ...string) (ObjectLayer, error) {
|
||||
storage, err := newXL(exportPaths...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return xlObjects{storage}, nil
|
||||
}
|
||||
|
||||
// checks whether bucket exists.
|
||||
func (o objectAPI) isBucketExist(bucketName string) (bool, error) {
|
||||
func (xl xlObjects) isBucketExist(bucketName string) (bool, error) {
|
||||
// Check whether bucket exists.
|
||||
if _, e := o.storage.StatVol(bucketName); e != nil {
|
||||
if _, e := xl.storage.StatVol(bucketName); e != nil {
|
||||
if e == errVolumeNotFound {
|
||||
return false, nil
|
||||
}
|
||||
|
@ -58,35 +62,35 @@ func (o objectAPI) isBucketExist(bucketName string) (bool, error) {
|
|||
/// Bucket operations
|
||||
|
||||
// MakeBucket - make a bucket.
|
||||
func (o objectAPI) MakeBucket(bucket string) *probe.Error {
|
||||
func (xl xlObjects) MakeBucket(bucket string) error {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
return BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
if e := o.storage.MakeVol(bucket); e != nil {
|
||||
return probe.NewError(toObjectErr(e, bucket))
|
||||
if err := xl.storage.MakeVol(bucket); err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
// This happens for the first time, but keep this here since this
|
||||
// is the only place where it can be made expensive optimizing all
|
||||
// other calls.
|
||||
// Create minio meta volume, if it doesn't exist yet.
|
||||
if e := o.storage.MakeVol(minioMetaVolume); e != nil {
|
||||
if e != errVolumeExists {
|
||||
return probe.NewError(toObjectErr(e, minioMetaVolume))
|
||||
if err := xl.storage.MakeVol(minioMetaVolume); err != nil {
|
||||
if err != errVolumeExists {
|
||||
return toObjectErr(err, minioMetaVolume)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBucketInfo - get bucket info.
|
||||
func (o objectAPI) GetBucketInfo(bucket string) (BucketInfo, *probe.Error) {
|
||||
func (xl xlObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return BucketInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
return BucketInfo{}, BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
vi, e := o.storage.StatVol(bucket)
|
||||
if e != nil {
|
||||
return BucketInfo{}, probe.NewError(toObjectErr(e, bucket))
|
||||
vi, err := xl.storage.StatVol(bucket)
|
||||
if err != nil {
|
||||
return BucketInfo{}, toObjectErr(err, bucket)
|
||||
}
|
||||
return BucketInfo{
|
||||
Name: bucket,
|
||||
|
@ -96,19 +100,12 @@ func (o objectAPI) GetBucketInfo(bucket string) (BucketInfo, *probe.Error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
// byBucketName is a collection satisfying sort.Interface.
|
||||
type byBucketName []BucketInfo
|
||||
|
||||
func (d byBucketName) Len() int { return len(d) }
|
||||
func (d byBucketName) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
|
||||
func (d byBucketName) Less(i, j int) bool { return d[i].Name < d[j].Name }
|
||||
|
||||
// ListBuckets - list buckets.
|
||||
func (o objectAPI) ListBuckets() ([]BucketInfo, *probe.Error) {
|
||||
func (xl xlObjects) ListBuckets() ([]BucketInfo, error) {
|
||||
var bucketInfos []BucketInfo
|
||||
vols, e := o.storage.ListVols()
|
||||
if e != nil {
|
||||
return nil, probe.NewError(toObjectErr(e))
|
||||
vols, err := xl.storage.ListVols()
|
||||
if err != nil {
|
||||
return nil, toObjectErr(err)
|
||||
}
|
||||
for _, vol := range vols {
|
||||
// StorageAPI can send volume names which are incompatible
|
||||
|
@ -128,13 +125,13 @@ func (o objectAPI) ListBuckets() ([]BucketInfo, *probe.Error) {
|
|||
}
|
||||
|
||||
// DeleteBucket - delete a bucket.
|
||||
func (o objectAPI) DeleteBucket(bucket string) *probe.Error {
|
||||
func (xl xlObjects) DeleteBucket(bucket string) error {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
return BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
if e := o.storage.DeleteVol(bucket); e != nil {
|
||||
return probe.NewError(toObjectErr(e))
|
||||
if err := xl.storage.DeleteVol(bucket); err != nil {
|
||||
return toObjectErr(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -142,12 +139,12 @@ func (o objectAPI) DeleteBucket(bucket string) *probe.Error {
|
|||
/// Object Operations
|
||||
|
||||
// GetObject - get an object.
|
||||
func (o objectAPI) GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error) {
|
||||
func (xl xlObjects) GetObject(bucket, object string, startOffset int64) (io.ReadCloser, error) {
|
||||
findPathOffset := func() (i int, partOffset int64, err error) {
|
||||
partOffset = startOffset
|
||||
for i = 1; i < 10000; i++ {
|
||||
var fileInfo FileInfo
|
||||
fileInfo, err = o.storage.StatFile(bucket, pathJoin(object, fmt.Sprint(i)))
|
||||
fileInfo, err = xl.storage.StatFile(bucket, pathJoin(object, fmt.Sprint(i)))
|
||||
if err != nil {
|
||||
if err == errFileNotFound {
|
||||
continue
|
||||
|
@ -167,36 +164,31 @@ func (o objectAPI) GetObject(bucket, object string, startOffset int64) (io.ReadC
|
|||
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return nil, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
return nil, BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
// Verify if object is valid.
|
||||
if !IsValidObjectName(object) {
|
||||
return nil, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
return nil, ObjectNameInvalid{Bucket: bucket, Object: object}
|
||||
}
|
||||
_, err := o.storage.StatFile(bucket, object)
|
||||
if err == nil {
|
||||
fmt.Println("1", err)
|
||||
r, e := o.storage.ReadFile(bucket, object, startOffset)
|
||||
if e != nil {
|
||||
fmt.Println("1.5", err)
|
||||
return nil, probe.NewError(toObjectErr(e, bucket, object))
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
_, err = o.storage.StatFile(bucket, pathJoin(object, multipartMetaFile))
|
||||
if _, err := xl.storage.StatFile(bucket, pathJoin(object, multipartMetaFile)); err != nil {
|
||||
if _, err = xl.storage.StatFile(bucket, object); err == nil {
|
||||
var reader io.ReadCloser
|
||||
reader, err = xl.storage.ReadFile(bucket, object, startOffset)
|
||||
if err != nil {
|
||||
fmt.Println("2", err)
|
||||
return nil, probe.NewError(toObjectErr(err, bucket, object))
|
||||
return nil, toObjectErr(err, bucket, object)
|
||||
}
|
||||
return reader, nil
|
||||
}
|
||||
return nil, toObjectErr(err, bucket, object)
|
||||
}
|
||||
fileReader, fileWriter := io.Pipe()
|
||||
partNum, offset, err := findPathOffset()
|
||||
if err != nil {
|
||||
fmt.Println("3", err)
|
||||
return nil, probe.NewError(toObjectErr(err, bucket, object))
|
||||
return nil, toObjectErr(err, bucket, object)
|
||||
}
|
||||
go func() {
|
||||
for ; partNum < 10000; partNum++ {
|
||||
r, err := o.storage.ReadFile(bucket, pathJoin(object, fmt.Sprint(partNum)), offset)
|
||||
r, err := xl.storage.ReadFile(bucket, pathJoin(object, fmt.Sprint(partNum)), offset)
|
||||
if err != nil {
|
||||
if err == errFileNotFound {
|
||||
continue
|
||||
|
@ -215,10 +207,10 @@ func (o objectAPI) GetObject(bucket, object string, startOffset int64) (io.ReadC
|
|||
}
|
||||
|
||||
// GetObjectInfo - get object info.
|
||||
func (o objectAPI) GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error) {
|
||||
func (xl xlObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) {
|
||||
getMultpartFileSize := func() (size int64) {
|
||||
for i := 0; i < 10000; i++ {
|
||||
fi, err := o.storage.StatFile(bucket, pathJoin(object, fmt.Sprint(i)))
|
||||
fi, err := xl.storage.StatFile(bucket, pathJoin(object, fmt.Sprint(i)))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
@ -228,17 +220,17 @@ func (o objectAPI) GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Erro
|
|||
}
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ObjectInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
return ObjectInfo{}, BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
// Verify if object is valid.
|
||||
if !IsValidObjectName(object) {
|
||||
return ObjectInfo{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
return ObjectInfo{}, ObjectNameInvalid{Bucket: bucket, Object: object}
|
||||
}
|
||||
fi, e := o.storage.StatFile(bucket, object)
|
||||
if e != nil {
|
||||
fi, e = o.storage.StatFile(bucket, pathJoin(object, multipartMetaFile))
|
||||
if e != nil {
|
||||
return ObjectInfo{}, probe.NewError(toObjectErr(e, bucket, object))
|
||||
fi, err := xl.storage.StatFile(bucket, object)
|
||||
if err != nil {
|
||||
fi, err = xl.storage.StatFile(bucket, pathJoin(object, multipartMetaFile))
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
fi.Size = getMultpartFileSize()
|
||||
}
|
||||
|
@ -260,44 +252,29 @@ func (o objectAPI) GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Erro
|
|||
}, nil
|
||||
}
|
||||
|
||||
// safeCloseAndRemove - safely closes and removes underlying temporary
|
||||
// file writer if possible.
|
||||
func safeCloseAndRemove(writer io.WriteCloser) error {
|
||||
// If writer is a safe file, Attempt to close and remove.
|
||||
safeWriter, ok := writer.(*safe.File)
|
||||
if ok {
|
||||
return safeWriter.CloseAndRemove()
|
||||
}
|
||||
pipeWriter, ok := writer.(*io.PipeWriter)
|
||||
if ok {
|
||||
return pipeWriter.CloseWithError(errors.New("Close and error out."))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o objectAPI) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (string, *probe.Error) {
|
||||
func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (string, error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
return "", (BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return "", probe.NewError(ObjectNameInvalid{
|
||||
return "", (ObjectNameInvalid{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
})
|
||||
}
|
||||
// Check whether the bucket exists.
|
||||
isExist, err := o.isBucketExist(bucket)
|
||||
isExist, err := xl.isBucketExist(bucket)
|
||||
if err != nil {
|
||||
return "", probe.NewError(err)
|
||||
return "", err
|
||||
}
|
||||
if !isExist {
|
||||
return "", probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
return "", BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
fileWriter, e := o.storage.CreateFile(bucket, object)
|
||||
if e != nil {
|
||||
return "", probe.NewError(toObjectErr(e, bucket, object))
|
||||
fileWriter, err := xl.storage.CreateFile(bucket, object)
|
||||
if err != nil {
|
||||
return "", toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// Initialize md5 writer.
|
||||
|
@ -308,18 +285,18 @@ func (o objectAPI) PutObject(bucket string, object string, size int64, data io.R
|
|||
|
||||
// Instantiate checksum hashers and create a multiwriter.
|
||||
if size > 0 {
|
||||
if _, e = io.CopyN(multiWriter, data, size); e != nil {
|
||||
if _, err = io.CopyN(multiWriter, data, size); err != nil {
|
||||
if clErr := safeCloseAndRemove(fileWriter); clErr != nil {
|
||||
return "", probe.NewError(clErr)
|
||||
return "", clErr
|
||||
}
|
||||
return "", probe.NewError(toObjectErr(e))
|
||||
return "", toObjectErr(err)
|
||||
}
|
||||
} else {
|
||||
if _, e = io.Copy(multiWriter, data); e != nil {
|
||||
if _, err = io.Copy(multiWriter, data); err != nil {
|
||||
if clErr := safeCloseAndRemove(fileWriter); clErr != nil {
|
||||
return "", probe.NewError(clErr)
|
||||
return "", clErr
|
||||
}
|
||||
return "", probe.NewError(e)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -331,56 +308,56 @@ func (o objectAPI) PutObject(bucket string, object string, size int64, data io.R
|
|||
}
|
||||
if md5Hex != "" {
|
||||
if newMD5Hex != md5Hex {
|
||||
if e = safeCloseAndRemove(fileWriter); e != nil {
|
||||
return "", probe.NewError(e)
|
||||
if err = safeCloseAndRemove(fileWriter); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return "", probe.NewError(BadDigest{md5Hex, newMD5Hex})
|
||||
return "", BadDigest{md5Hex, newMD5Hex}
|
||||
}
|
||||
}
|
||||
e = fileWriter.Close()
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
err = fileWriter.Close()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Return md5sum, successfully wrote object.
|
||||
return newMD5Hex, nil
|
||||
}
|
||||
|
||||
func (o objectAPI) DeleteObject(bucket, object string) *probe.Error {
|
||||
func (xl xlObjects) DeleteObject(bucket, object string) error {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
return BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
return ObjectNameInvalid{Bucket: bucket, Object: object}
|
||||
}
|
||||
if e := o.storage.DeleteFile(bucket, object); e != nil {
|
||||
return probe.NewError(toObjectErr(e, bucket, object))
|
||||
if err := xl.storage.DeleteFile(bucket, object); err != nil {
|
||||
return toObjectErr(err, bucket, object)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o objectAPI) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, *probe.Error) {
|
||||
func (xl xlObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ListObjectsInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
return ListObjectsInfo{}, BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
if !IsValidObjectPrefix(prefix) {
|
||||
return ListObjectsInfo{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: prefix})
|
||||
return ListObjectsInfo{}, ObjectNameInvalid{Bucket: bucket, Object: prefix}
|
||||
}
|
||||
// Verify if delimiter is anything other than '/', which we do not support.
|
||||
if delimiter != "" && delimiter != slashSeparator {
|
||||
return ListObjectsInfo{}, probe.NewError(UnsupportedDelimiter{
|
||||
return ListObjectsInfo{}, UnsupportedDelimiter{
|
||||
Delimiter: delimiter,
|
||||
})
|
||||
}
|
||||
}
|
||||
// Verify if marker has prefix.
|
||||
if marker != "" {
|
||||
if !strings.HasPrefix(marker, prefix) {
|
||||
return ListObjectsInfo{}, probe.NewError(InvalidMarkerPrefixCombination{
|
||||
return ListObjectsInfo{}, InvalidMarkerPrefixCombination{
|
||||
Marker: marker,
|
||||
Prefix: prefix,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -389,9 +366,9 @@ func (o objectAPI) ListObjects(bucket, prefix, marker, delimiter string, maxKeys
|
|||
if delimiter == slashSeparator {
|
||||
recursive = false
|
||||
}
|
||||
fileInfos, eof, e := o.storage.ListFiles(bucket, prefix, marker, recursive, maxKeys)
|
||||
if e != nil {
|
||||
return ListObjectsInfo{}, probe.NewError(toObjectErr(e, bucket))
|
||||
fileInfos, eof, err := xl.storage.ListFiles(bucket, prefix, marker, recursive, maxKeys)
|
||||
if err != nil {
|
||||
return ListObjectsInfo{}, toObjectErr(err, bucket)
|
||||
}
|
||||
if maxKeys == 0 {
|
||||
return ListObjectsInfo{}, nil
|
Loading…
Reference in New Issue