Merge pull request #2657 from minio/distributed

Distributed XL support
This commit is contained in:
Harshavardhana 2016-09-13 22:34:49 -07:00 committed by GitHub
commit 16e4a7c200
125 changed files with 9871 additions and 1410 deletions

View File

@ -134,6 +134,7 @@ const (
ErrObjectExistsAsDirectory ErrObjectExistsAsDirectory
ErrPolicyNesting ErrPolicyNesting
ErrInvalidObjectName ErrInvalidObjectName
ErrServerNotInitialized
// Add new extended error codes here. // Add new extended error codes here.
// Please open a https://github.com/minio/minio/issues before adding // Please open a https://github.com/minio/minio/issues before adding
// new error codes here. // new error codes here.
@ -454,7 +455,7 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "Request is not valid yet", Description: "Request is not valid yet",
HTTPStatusCode: http.StatusForbidden, HTTPStatusCode: http.StatusForbidden,
}, },
// FIXME: Actual XML error response also contains the header which missed in lsit of signed header parameters. // FIXME: Actual XML error response also contains the header which missed in list of signed header parameters.
ErrUnsignedHeaders: { ErrUnsignedHeaders: {
Code: "AccessDenied", Code: "AccessDenied",
Description: "There were headers present in the request which were not signed", Description: "There were headers present in the request which were not signed",
@ -556,6 +557,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "Object name contains unsupported characters. Unsupported characters are `^*|\\\"", Description: "Object name contains unsupported characters. Unsupported characters are `^*|\\\"",
HTTPStatusCode: http.StatusBadRequest, HTTPStatusCode: http.StatusBadRequest,
}, },
ErrServerNotInitialized: {
Code: "XMinioServerNotInitialized",
Description: "Server not initialized, please try again.",
HTTPStatusCode: http.StatusServiceUnavailable,
},
// Add your error structure here. // Add your error structure here.
} }
@ -566,6 +572,7 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
if err == nil { if err == nil {
return ErrNone return ErrNone
} }
err = errorCause(err)
// Verify if the underlying error is signature mismatch. // Verify if the underlying error is signature mismatch.
switch err { switch err {
case errSignatureMismatch: case errSignatureMismatch:

View File

@ -24,10 +24,10 @@ import (
) )
const ( const (
timeFormatAMZ = "2006-01-02T15:04:05.000Z" // Reply date format timeFormatAMZ = "2006-01-02T15:04:05Z" // Reply date format
maxObjectList = 1000 // Limit number of objects in a listObjectsResponse. maxObjectList = 1000 // Limit number of objects in a listObjectsResponse.
maxUploadsList = 1000 // Limit number of uploads in a listUploadsResponse. maxUploadsList = 1000 // Limit number of uploads in a listUploadsResponse.
maxPartsList = 1000 // Limit number of parts in a listPartsResponse. maxPartsList = 1000 // Limit number of parts in a listPartsResponse.
) )
// LocationResponse - format for location response. // LocationResponse - format for location response.

View File

@ -20,7 +20,7 @@ import router "github.com/gorilla/mux"
// objectAPIHandler implements and provides http handlers for S3 API. // objectAPIHandler implements and provides http handlers for S3 API.
type objectAPIHandlers struct { type objectAPIHandlers struct {
ObjectAPI ObjectLayer ObjectAPI func() ObjectLayer
} }
// registerAPIRouter - registers S3 compatible APIs. // registerAPIRouter - registers S3 compatible APIs.

163
cmd/auth-rpc-client.go Normal file
View File

@ -0,0 +1,163 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"net/rpc"
"time"
jwtgo "github.com/dgrijalva/jwt-go"
)
// GenericReply represents any generic RPC reply.
type GenericReply struct{}
// GenericArgs represents any generic RPC arguments.
type GenericArgs struct {
Token string // Used to authenticate every RPC call.
Timestamp time.Time // Used to verify if the RPC call was issued between the same Login() and disconnect event pair.
}
// SetToken - sets the token to the supplied value.
func (ga *GenericArgs) SetToken(token string) {
ga.Token = token
}
// SetTimestamp - sets the timestamp to the supplied value.
func (ga *GenericArgs) SetTimestamp(tstamp time.Time) {
ga.Timestamp = tstamp
}
// RPCLoginArgs - login username and password for RPC.
type RPCLoginArgs struct {
Username string
Password string
}
// RPCLoginReply - login reply provides generated token to be used
// with subsequent requests.
type RPCLoginReply struct {
Token string
ServerVersion string
Timestamp time.Time
}
// Validates if incoming token is valid.
func isRPCTokenValid(tokenStr string) bool {
jwt, err := newJWT(defaultTokenExpiry) // Expiry set to 100yrs.
if err != nil {
errorIf(err, "Unable to initialize JWT")
return false
}
token, err := jwtgo.Parse(tokenStr, func(token *jwtgo.Token) (interface{}, error) {
if _, ok := token.Method.(*jwtgo.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
return []byte(jwt.SecretAccessKey), nil
})
if err != nil {
errorIf(err, "Unable to parse JWT token string")
return false
}
// Return if token is valid.
return token.Valid
}
// Auth config represents authentication credentials and Login method name to be used
// for fetching JWT tokens from the RPC server.
type authConfig struct {
accessKey string // Username for the server.
secretKey string // Password for the server.
address string // Network address path of RPC server.
path string // Network path for HTTP dial.
loginMethod string // RPC service name for authenticating using JWT
}
// AuthRPCClient is a wrapper type for RPCClient which provides JWT based authentication across reconnects.
type AuthRPCClient struct {
config *authConfig
rpc *RPCClient // reconnect'able rpc client built on top of net/rpc Client
isLoggedIn bool // Indicates if the auth client has been logged in and token is valid.
token string // JWT based token
tstamp time.Time // Timestamp as received on Login RPC.
}
// newAuthClient - returns a jwt based authenticated (go) rpc client, which does automatic reconnect.
func newAuthClient(cfg *authConfig) *AuthRPCClient {
return &AuthRPCClient{
// Save the config.
config: cfg,
// Initialize a new reconnectable rpc client.
rpc: newClient(cfg.address, cfg.path),
// Allocated auth client not logged in yet.
isLoggedIn: false,
}
}
// Close - closes underlying rpc connection.
func (authClient *AuthRPCClient) Close() error {
// reset token on closing a connection
authClient.isLoggedIn = false
return authClient.rpc.Close()
}
// Login - a jwt based authentication is performed with rpc server.
func (authClient *AuthRPCClient) Login() error {
// Return if already logged in.
if authClient.isLoggedIn {
return nil
}
reply := RPCLoginReply{}
if err := authClient.rpc.Call(authClient.config.loginMethod, RPCLoginArgs{
Username: authClient.config.accessKey,
Password: authClient.config.secretKey,
}, &reply); err != nil {
return err
}
// Set token, time stamp as received from a successful login call.
authClient.token = reply.Token
authClient.tstamp = reply.Timestamp
authClient.isLoggedIn = true
return nil
}
// Call - If rpc connection isn't established yet since previous disconnect,
// connection is established, a jwt authenticated login is performed and then
// the call is performed.
func (authClient *AuthRPCClient) Call(serviceMethod string, args interface {
SetToken(token string)
SetTimestamp(tstamp time.Time)
}, reply interface{}) (err error) {
// On successful login, attempt the call.
if err = authClient.Login(); err == nil {
// Set token and timestamp before the rpc call.
args.SetToken(authClient.token)
args.SetTimestamp(authClient.tstamp)
// Call the underlying rpc.
err = authClient.rpc.Call(serviceMethod, args, reply)
// Invalidate token to mark for re-login on subsequent reconnect.
if err != nil {
if err.Error() == rpc.ErrShutdown.Error() {
authClient.isLoggedIn = false
}
}
}
return err
}

View File

@ -28,6 +28,20 @@ import (
"time" "time"
) )
// Prepare benchmark backend
func prepareBenchmarkBackend(instanceType string) (ObjectLayer, []string, error) {
nDisks := 16
disks, err := getRandomDisks(nDisks)
if err != nil {
return nil, nil, err
}
obj, err := makeTestBackend(disks, instanceType)
if err != nil {
return nil, nil, err
}
return obj, disks, nil
}
// Benchmark utility functions for ObjectLayer.PutObject(). // Benchmark utility functions for ObjectLayer.PutObject().
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark. // Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) { func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
@ -40,9 +54,6 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
b.Fatal(err) b.Fatal(err)
} }
// PutObject returns md5Sum of the object inserted.
// md5Sum variable is assigned with that value.
var md5Sum string
// get text data generated for number of bytes equal to object size. // get text data generated for number of bytes equal to object size.
textData := generateBytesData(objSize) textData := generateBytesData(objSize)
// generate md5sum for the generated data. // generate md5sum for the generated data.
@ -57,12 +68,12 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
// insert the object. // insert the object.
md5Sum, err = obj.PutObject(bucket, "object"+strconv.Itoa(i), int64(len(textData)), bytes.NewBuffer(textData), metadata) objInfo, err := obj.PutObject(bucket, "object"+strconv.Itoa(i), int64(len(textData)), bytes.NewBuffer(textData), metadata)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
if md5Sum != metadata["md5Sum"] { if objInfo.MD5Sum != metadata["md5Sum"] {
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, md5Sum, metadata["md5Sum"]) b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.MD5Sum, metadata["md5Sum"])
} }
} }
// Benchmark ends here. Stop timer. // Benchmark ends here. Stop timer.
@ -135,7 +146,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function. // creates XL/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) { func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend. // create a temp XL/FS backend.
objLayer, disks, err := makeTestBackend(instanceType) objLayer, disks, err := prepareBenchmarkBackend(instanceType)
if err != nil { if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err) b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
} }
@ -148,7 +159,7 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function. // creates XL/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
func benchmarkPutObject(b *testing.B, instanceType string, objSize int) { func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend. // create a temp XL/FS backend.
objLayer, disks, err := makeTestBackend(instanceType) objLayer, disks, err := prepareBenchmarkBackend(instanceType)
if err != nil { if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err) b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
} }
@ -161,7 +172,7 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for put object. // creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) { func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend. // create a temp XL/FS backend.
objLayer, disks, err := makeTestBackend(instanceType) objLayer, disks, err := prepareBenchmarkBackend(instanceType)
if err != nil { if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err) b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
} }
@ -183,9 +194,6 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
b.Fatal(err) b.Fatal(err)
} }
// PutObject returns md5Sum of the object inserted.
// md5Sum variable is assigned with that value.
var md5Sum string
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
// get text data generated for number of bytes equal to object size. // get text data generated for number of bytes equal to object size.
textData := generateBytesData(objSize) textData := generateBytesData(objSize)
@ -197,12 +205,13 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
metadata := make(map[string]string) metadata := make(map[string]string)
metadata["md5Sum"] = hex.EncodeToString(hasher.Sum(nil)) metadata["md5Sum"] = hex.EncodeToString(hasher.Sum(nil))
// insert the object. // insert the object.
md5Sum, err = obj.PutObject(bucket, "object"+strconv.Itoa(i), int64(len(textData)), bytes.NewBuffer(textData), metadata) var objInfo ObjectInfo
objInfo, err = obj.PutObject(bucket, "object"+strconv.Itoa(i), int64(len(textData)), bytes.NewBuffer(textData), metadata)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
if md5Sum != metadata["md5Sum"] { if objInfo.MD5Sum != metadata["md5Sum"] {
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, md5Sum, metadata["md5Sum"]) b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.MD5Sum, metadata["md5Sum"])
} }
} }
@ -242,7 +251,7 @@ func generateBytesData(size int) []byte {
// creates XL/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function. // creates XL/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) { func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend. // create a temp XL/FS backend.
objLayer, disks, err := makeTestBackend(instanceType) objLayer, disks, err := prepareBenchmarkBackend(instanceType)
if err != nil { if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err) b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
} }
@ -255,7 +264,7 @@ func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() . // creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) { func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend. // create a temp XL/FS backend.
objLayer, disks, err := makeTestBackend(instanceType) objLayer, disks, err := prepareBenchmarkBackend(instanceType)
if err != nil { if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err) b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
} }
@ -277,9 +286,6 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
b.Fatal(err) b.Fatal(err)
} }
// PutObject returns md5Sum of the object inserted.
// md5Sum variable is assigned with that value.
var md5Sum string
// get text data generated for number of bytes equal to object size. // get text data generated for number of bytes equal to object size.
textData := generateBytesData(objSize) textData := generateBytesData(objSize)
// generate md5sum for the generated data. // generate md5sum for the generated data.
@ -297,12 +303,12 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
i := 0 i := 0
for pb.Next() { for pb.Next() {
// insert the object. // insert the object.
md5Sum, err = obj.PutObject(bucket, "object"+strconv.Itoa(i), int64(len(textData)), bytes.NewBuffer(textData), metadata) objInfo, err := obj.PutObject(bucket, "object"+strconv.Itoa(i), int64(len(textData)), bytes.NewBuffer(textData), metadata)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
if md5Sum != metadata["md5Sum"] { if objInfo.MD5Sum != metadata["md5Sum"] {
b.Fatalf("Write no: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", md5Sum, metadata["md5Sum"]) b.Fatalf("Write no: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", objInfo.MD5Sum, metadata["md5Sum"])
} }
i++ i++
} }
@ -324,9 +330,6 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
b.Fatal(err) b.Fatal(err)
} }
// PutObject returns md5Sum of the object inserted.
// md5Sum variable is assigned with that value.
var md5Sum string
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
// get text data generated for number of bytes equal to object size. // get text data generated for number of bytes equal to object size.
textData := generateBytesData(objSize) textData := generateBytesData(objSize)
@ -338,12 +341,13 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
metadata := make(map[string]string) metadata := make(map[string]string)
metadata["md5Sum"] = hex.EncodeToString(hasher.Sum(nil)) metadata["md5Sum"] = hex.EncodeToString(hasher.Sum(nil))
// insert the object. // insert the object.
md5Sum, err = obj.PutObject(bucket, "object"+strconv.Itoa(i), int64(len(textData)), bytes.NewBuffer(textData), metadata) var objInfo ObjectInfo
objInfo, err = obj.PutObject(bucket, "object"+strconv.Itoa(i), int64(len(textData)), bytes.NewBuffer(textData), metadata)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
if md5Sum != metadata["md5Sum"] { if objInfo.MD5Sum != metadata["md5Sum"] {
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, md5Sum, metadata["md5Sum"]) b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.MD5Sum, metadata["md5Sum"])
} }
} }

View File

@ -64,6 +64,12 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
vars := mux.Vars(r) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
default: default:
// For all unknown auth types return error. // For all unknown auth types return error.
@ -100,7 +106,7 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
// Inititate a list objects operation based on the input params. // Inititate a list objects operation based on the input params.
// On success would return back ListObjectsInfo object to be // On success would return back ListObjectsInfo object to be
// marshalled into S3 compatible XML header. // marshalled into S3 compatible XML header.
listObjectsInfo, err := api.ObjectAPI.ListObjects(bucket, prefix, marker, delimiter, maxKeys) listObjectsInfo, err := objectAPI.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil { if err != nil {
errorIf(err, "Unable to list objects.") errorIf(err, "Unable to list objects.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
@ -124,6 +130,12 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
vars := mux.Vars(r) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
default: default:
// For all unknown auth types return error. // For all unknown auth types return error.
@ -154,7 +166,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
// Inititate a list objects operation based on the input params. // Inititate a list objects operation based on the input params.
// On success would return back ListObjectsInfo object to be // On success would return back ListObjectsInfo object to be
// marshalled into S3 compatible XML header. // marshalled into S3 compatible XML header.
listObjectsInfo, err := api.ObjectAPI.ListObjects(bucket, prefix, marker, delimiter, maxKeys) listObjectsInfo, err := objectAPI.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil { if err != nil {
errorIf(err, "Unable to list objects.") errorIf(err, "Unable to list objects.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)

View File

@ -21,7 +21,6 @@ import (
"io" "io"
"net/http" "net/http"
"net/url" "net/url"
"path"
"strings" "strings"
"sync" "sync"
@ -64,6 +63,12 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
vars := mux.Vars(r) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
default: default:
// For all unknown auth types return error. // For all unknown auth types return error.
@ -82,7 +87,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
} }
} }
if _, err := api.ObjectAPI.GetBucketInfo(bucket); err != nil { if _, err := objectAPI.GetBucketInfo(bucket); err != nil {
errorIf(err, "Unable to fetch bucket info.") errorIf(err, "Unable to fetch bucket info.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return return
@ -113,6 +118,12 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
vars := mux.Vars(r) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
default: default:
// For all unknown auth types return error. // For all unknown auth types return error.
@ -144,7 +155,7 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
} }
} }
listMultipartsInfo, err := api.ObjectAPI.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) listMultipartsInfo, err := objectAPI.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
if err != nil { if err != nil {
errorIf(err, "Unable to list multipart uploads.") errorIf(err, "Unable to list multipart uploads.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
@ -164,13 +175,20 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
// This implementation of the GET operation returns a list of all buckets // This implementation of the GET operation returns a list of all buckets
// owned by the authenticated sender of the request. // owned by the authenticated sender of the request.
func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
// List buckets does not support bucket policies, no need to enforce it. // List buckets does not support bucket policies, no need to enforce it.
if s3Error := checkAuth(r); s3Error != ErrNone { if s3Error := checkAuth(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
return return
} }
bucketsInfo, err := api.ObjectAPI.ListBuckets() // Invoke the list buckets.
bucketsInfo, err := objectAPI.ListBuckets()
if err != nil { if err != nil {
errorIf(err, "Unable to list buckets.") errorIf(err, "Unable to list buckets.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
@ -191,6 +209,12 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
vars := mux.Vars(r) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
default: default:
// For all unknown auth types return error. // For all unknown auth types return error.
@ -249,7 +273,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
wg.Add(1) wg.Add(1)
go func(i int, obj ObjectIdentifier) { go func(i int, obj ObjectIdentifier) {
defer wg.Done() defer wg.Done()
dErr := api.ObjectAPI.DeleteObject(bucket, obj.ObjectName) dErr := objectAPI.DeleteObject(bucket, obj.ObjectName)
if dErr != nil { if dErr != nil {
dErrs[i] = dErr dErrs[i] = dErr
} }
@ -267,7 +291,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
deletedObjects = append(deletedObjects, object) deletedObjects = append(deletedObjects, object)
continue continue
} }
if _, ok := err.(ObjectNotFound); ok { if _, ok := errorCause(err).(ObjectNotFound); ok {
// If the object is not found it should be // If the object is not found it should be
// accounted as deleted as per S3 spec. // accounted as deleted as per S3 spec.
deletedObjects = append(deletedObjects, object) deletedObjects = append(deletedObjects, object)
@ -311,6 +335,12 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
// ---------- // ----------
// This implementation of the PUT operation creates a new bucket for authenticated request // This implementation of the PUT operation creates a new bucket for authenticated request
func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
// PutBucket does not support policies, use checkAuth to validate signature. // PutBucket does not support policies, use checkAuth to validate signature.
if s3Error := checkAuth(r); s3Error != ErrNone { if s3Error := checkAuth(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
@ -328,7 +358,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
} }
// Proceed to creating a bucket. // Proceed to creating a bucket.
err := api.ObjectAPI.MakeBucket(bucket) err := objectAPI.MakeBucket(bucket)
if err != nil { if err != nil {
errorIf(err, "Unable to create a bucket.") errorIf(err, "Unable to create a bucket.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
@ -344,6 +374,12 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
// This implementation of the POST operation handles object creation with a specified // This implementation of the POST operation handles object creation with a specified
// signature policy in multipart/form-data // signature policy in multipart/form-data
func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) { func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
// Here the parameter is the size of the form data that should // Here the parameter is the size of the form data that should
// be loaded in memory, the remaining being put in temporary files. // be loaded in memory, the remaining being put in temporary files.
reader, err := r.MultipartReader() reader, err := r.MultipartReader()
@ -384,17 +420,13 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
metadata := make(map[string]string) metadata := make(map[string]string)
// Nothing to store right now. // Nothing to store right now.
md5Sum, err := api.ObjectAPI.PutObject(bucket, object, -1, fileBody, metadata) objInfo, err := objectAPI.PutObject(bucket, object, -1, fileBody, metadata)
if err != nil { if err != nil {
errorIf(err, "Unable to create object.") errorIf(err, "Unable to create object.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return return
} }
if md5Sum != "" { w.Header().Set("ETag", "\""+objInfo.MD5Sum+"\"")
w.Header().Set("ETag", "\""+md5Sum+"\"")
}
// TODO full URL is preferred.
w.Header().Set("Location", getObjectLocation(bucket, object)) w.Header().Set("Location", getObjectLocation(bucket, object))
// Set common headers. // Set common headers.
@ -404,13 +436,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
writeSuccessNoContent(w) writeSuccessNoContent(w)
if globalEventNotifier.IsBucketNotificationSet(bucket) { if globalEventNotifier.IsBucketNotificationSet(bucket) {
// Fetch object info for notifications.
objInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object)
if err != nil {
errorIf(err, "Unable to fetch object info for \"%s\"", path.Join(bucket, object))
return
}
// Notify object created event. // Notify object created event.
eventNotify(eventData{ eventNotify(eventData{
Type: ObjectCreatedPost, Type: ObjectCreatedPost,
@ -433,6 +458,12 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
vars := mux.Vars(r) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
default: default:
// For all unknown auth types return error. // For all unknown auth types return error.
@ -451,7 +482,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
} }
} }
if _, err := api.ObjectAPI.GetBucketInfo(bucket); err != nil { if _, err := objectAPI.GetBucketInfo(bucket); err != nil {
errorIf(err, "Unable to fetch bucket info.") errorIf(err, "Unable to fetch bucket info.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return return
@ -461,6 +492,12 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
// DeleteBucketHandler - Delete bucket // DeleteBucketHandler - Delete bucket
func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
// DeleteBucket does not support bucket policies, use checkAuth to validate signature. // DeleteBucket does not support bucket policies, use checkAuth to validate signature.
if s3Error := checkAuth(r); s3Error != ErrNone { if s3Error := checkAuth(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
@ -471,17 +508,17 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
bucket := vars["bucket"] bucket := vars["bucket"]
// Attempt to delete bucket. // Attempt to delete bucket.
if err := api.ObjectAPI.DeleteBucket(bucket); err != nil { if err := objectAPI.DeleteBucket(bucket); err != nil {
errorIf(err, "Unable to delete a bucket.") errorIf(err, "Unable to delete a bucket.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return return
} }
// Delete bucket access policy, if present - ignore any errors. // Delete bucket access policy, if present - ignore any errors.
removeBucketPolicy(bucket, api.ObjectAPI) removeBucketPolicy(bucket, objectAPI)
// Delete notification config, if present - ignore any errors. // Delete notification config, if present - ignore any errors.
removeNotificationConfig(bucket, api.ObjectAPI) removeNotificationConfig(bucket, objectAPI)
// Write success response. // Write success response.
writeSuccessNoContent(w) writeSuccessNoContent(w)

302
cmd/bucket-handlers_test.go Normal file
View File

@ -0,0 +1,302 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"encoding/xml"
"net/http"
"net/http/httptest"
"testing"
)
// Wrapper for calling GetBucketPolicy HTTP handler tests for both XL multiple disks and single node setup.
func TestGetBucketLocationHandler(t *testing.T) {
ExecObjectLayerTest(t, testGetBucketLocationHandler)
}
func testGetBucketLocationHandler(obj ObjectLayer, instanceType string, t TestErrHandler) {
initBucketPolicies(obj)
// get random bucket name.
bucketName := getRandomBucketName()
// Create bucket.
err := obj.MakeBucket(bucketName)
if err != nil {
// failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err)
}
// Register the API end points with XL/FS object layer.
apiRouter := initTestAPIEndPoints(obj, []string{"GetBucketLocation"})
// initialize the server and obtain the credentials and root.
// credentials are necessary to sign the HTTP request.
rootPath, err := newTestConfig("us-east-1")
if err != nil {
t.Fatalf("Init Test config failed")
}
// remove the root folder after the test ends.
defer removeAll(rootPath)
credentials := serverConfig.GetCredential()
// test cases with sample input and expected output.
testCases := []struct {
bucketName string
accessKey string
secretKey string
// expected Response.
expectedRespStatus int
locationResponse []byte
errorResponse APIErrorResponse
shouldPass bool
}{
// Tests for authenticated request and proper response.
{
bucketName,
credentials.AccessKeyID,
credentials.SecretAccessKey,
http.StatusOK,
[]byte(`<?xml version="1.0" encoding="UTF-8"?>
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/"></LocationConstraint>`),
APIErrorResponse{},
true,
},
// Tests for anonymous requests.
{
bucketName,
"",
"",
http.StatusForbidden,
[]byte(""),
APIErrorResponse{
Resource: "/" + bucketName + "/",
Code: "AccessDenied",
Message: "Access Denied.",
},
false,
},
}
for i, testCase := range testCases {
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for Get bucket location.
req, err := newTestSignedRequest("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for GetBucketLocationHandler: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
}
if !bytes.Equal(testCase.locationResponse, rec.Body.Bytes()) && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected the response to be `%s`, but instead found `%s`", i+1, instanceType, string(testCase.locationResponse), string(rec.Body.Bytes()))
}
errorResponse := APIErrorResponse{}
err = xml.Unmarshal(rec.Body.Bytes(), &errorResponse)
if err != nil && !testCase.shouldPass {
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, string(rec.Body.Bytes()))
}
if errorResponse.Resource != testCase.errorResponse.Resource {
t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource)
}
if errorResponse.Message != testCase.errorResponse.Message {
t.Errorf("Test %d: %s: Expected the error message to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Message, errorResponse.Message)
}
if errorResponse.Code != testCase.errorResponse.Code {
t.Errorf("Test %d: %s: Expected the error code to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Code, errorResponse.Code)
}
}
}
// Wrapper for calling HeadBucket HTTP handler tests for both XL multiple disks and single node setup.
func TestHeadBucketHandler(t *testing.T) {
ExecObjectLayerTest(t, testHeadBucketHandler)
}
func testHeadBucketHandler(obj ObjectLayer, instanceType string, t TestErrHandler) {
initBucketPolicies(obj)
// get random bucket name.
bucketName := getRandomBucketName()
// Create bucket.
err := obj.MakeBucket(bucketName)
if err != nil {
// failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err)
}
// Register the API end points with XL/FS object layer.
apiRouter := initTestAPIEndPoints(obj, []string{"HeadBucket"})
// initialize the server and obtain the credentials and root.
// credentials are necessary to sign the HTTP request.
rootPath, err := newTestConfig("us-east-1")
if err != nil {
t.Fatalf("Init Test config failed")
}
// remove the root folder after the test ends.
defer removeAll(rootPath)
credentials := serverConfig.GetCredential()
// test cases with sample input and expected output.
testCases := []struct {
bucketName string
accessKey string
secretKey string
// expected Response.
expectedRespStatus int
}{
// Bucket exists.
{
bucketName: bucketName,
accessKey: credentials.AccessKeyID,
secretKey: credentials.SecretAccessKey,
expectedRespStatus: http.StatusOK,
},
// Non-existent bucket name.
{
bucketName: "2333",
accessKey: credentials.AccessKeyID,
secretKey: credentials.SecretAccessKey,
expectedRespStatus: http.StatusNotFound,
},
// Un-authenticated request.
{
bucketName: bucketName,
accessKey: "",
secretKey: "",
expectedRespStatus: http.StatusForbidden,
},
}
for i, testCase := range testCases {
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for HEAD bucket.
req, err := newTestSignedRequest("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for HeadBucketHandler: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
}
}
}
// Wrapper for calling TestListMultipartUploadsHandler tests for both XL multiple disks and single node setup.
func TestListMultipartUploadsHandler(t *testing.T) {
ExecObjectLayerTest(t, testListMultipartUploads)
}
// testListMultipartUploadsHandler - Tests validate listing of multipart uploads.
func testListMultipartUploadsHandler(obj ObjectLayer, instanceType string, t TestErrHandler) {
initBucketPolicies(obj)
// get random bucket name.
bucketName := getRandomBucketName()
// Register the API end points with XL/FS object layer.
apiRouter := initTestAPIEndPoints(obj, []string{"ListMultipartUploads"})
// initialize the server and obtain the credentials and root.
// credentials are necessary to sign the HTTP request.
rootPath, err := newTestConfig("us-east-1")
if err != nil {
t.Fatalf("Init Test config failed")
}
// remove the root folder after the test ends.
defer removeAll(rootPath)
credentials := serverConfig.GetCredential()
// bucketnames[0].
// objectNames[0].
// uploadIds [0].
// Create bucket before initiating NewMultipartUpload.
err = obj.MakeBucket(bucketName)
if err != nil {
// Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error())
}
// Collection of non-exhaustive ListMultipartUploads test cases, valid errors
// and success responses.
testCases := []struct {
// Inputs to ListMultipartUploads.
bucket string
prefix string
keyMarker string
uploadIDMarker string
delimiter string
maxUploads string
expectedRespStatus int
shouldPass bool
}{
// 1 - invalid bucket name.
{".test", "", "", "", "", "0", http.StatusBadRequest, false},
// 2 - bucket not found.
{"volatile-bucket-1", "", "", "", "", "0", http.StatusNotFound, false},
// 3 - invalid delimiter.
{bucketName, "", "", "", "-", "0", http.StatusBadRequest, false},
// 4 - invalid prefix and marker combination.
{bucketName, "asia", "europe-object", "", "", "0", http.StatusNotImplemented, false},
// 5 - invalid upload id and marker combination.
{bucketName, "asia", "asia/europe/", "abc", "", "0", http.StatusBadRequest, false},
// 6 - invalid max upload id.
{bucketName, "", "", "", "", "-1", http.StatusBadRequest, false},
// 7 - good case delimiter.
{bucketName, "", "", "", "/", "100", http.StatusOK, true},
// 8 - good case without delimiter.
{bucketName, "", "", "", "", "100", http.StatusOK, true},
}
for i, testCase := range testCases {
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for List multipart uploads endpoint.
u := getListMultipartUploadsURLWithParams("", testCase.bucket, testCase.prefix, testCase.keyMarker, testCase.uploadIDMarker, testCase.delimiter, testCase.maxUploads)
req, gerr := newTestSignedRequest("GET", u, 0, nil, credentials.AccessKeyID, credentials.SecretAccessKey)
if gerr != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for ListMultipartUploadsHandler: <ERROR> %v", i+1, instanceType, gerr)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
}
}
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for List multipart uploads endpoint.
u := getListMultipartUploadsURLWithParams("", bucketName, "", "", "", "", "")
req, err := newTestSignedRequest("GET", u, 0, nil, "", "") // Generate an anonymous request.
if err != nil {
t.Fatalf("Test %s: Failed to create HTTP request for ListMultipartUploadsHandler: <ERROR> %v", instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != http.StatusForbidden {
t.Errorf("Test %s: Expected the response status to be `http.StatusForbidden`, but instead found `%d`", instanceType, rec.Code)
}
}

View File

@ -39,6 +39,12 @@ const (
// not enabled on the bucket, the operation returns an empty // not enabled on the bucket, the operation returns an empty
// NotificationConfiguration element. // NotificationConfiguration element.
func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) { func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
// Validate request authorization. // Validate request authorization.
if s3Error := checkAuth(r); s3Error != ErrNone { if s3Error := checkAuth(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
@ -47,7 +53,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
vars := mux.Vars(r) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
// Attempt to successfully load notification config. // Attempt to successfully load notification config.
nConfig, err := loadNotificationConfig(bucket, api.ObjectAPI) nConfig, err := loadNotificationConfig(bucket, objAPI)
if err != nil && err != errNoSuchNotifications { if err != nil && err != errNoSuchNotifications {
errorIf(err, "Unable to read notification configuration.") errorIf(err, "Unable to read notification configuration.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
@ -78,6 +84,12 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
// By default, your bucket has no event notifications configured. That is, // By default, your bucket has no event notifications configured. That is,
// the notification configuration will be an empty NotificationConfiguration. // the notification configuration will be an empty NotificationConfiguration.
func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) { func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
// Validate request authorization. // Validate request authorization.
if s3Error := checkAuth(r); s3Error != ErrNone { if s3Error := checkAuth(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
@ -86,7 +98,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
vars := mux.Vars(r) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
_, err := api.ObjectAPI.GetBucketInfo(bucket) _, err := objectAPI.GetBucketInfo(bucket)
if err != nil { if err != nil {
errorIf(err, "Unable to find bucket info.") errorIf(err, "Unable to find bucket info.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
@ -133,7 +145,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
// Proceed to save notification configuration. // Proceed to save notification configuration.
notificationConfigPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig) notificationConfigPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig)
_, err = api.ObjectAPI.PutObject(minioMetaBucket, notificationConfigPath, bufferSize, bytes.NewReader(buffer.Bytes()), nil) _, err = objectAPI.PutObject(minioMetaBucket, notificationConfigPath, bufferSize, bytes.NewReader(buffer.Bytes()), nil)
if err != nil { if err != nil {
errorIf(err, "Unable to write bucket notification configuration.") errorIf(err, "Unable to write bucket notification configuration.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
@ -204,6 +216,13 @@ func sendBucketNotification(w http.ResponseWriter, arnListenerCh <-chan []Notifi
// ListenBucketNotificationHandler - list bucket notifications. // ListenBucketNotificationHandler - list bucket notifications.
func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWriter, r *http.Request) { func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
// Validate if bucket exists.
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
// Validate request authorization. // Validate request authorization.
if s3Error := checkAuth(r); s3Error != ErrNone { if s3Error := checkAuth(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
@ -219,8 +238,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
return return
} }
// Validate if bucket exists. _, err := objAPI.GetBucketInfo(bucket)
_, err := api.ObjectAPI.GetBucketInfo(bucket)
if err != nil { if err != nil {
errorIf(err, "Unable to bucket info.") errorIf(err, "Unable to bucket info.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)

View File

@ -268,18 +268,17 @@ func validateTopicConfigs(topicConfigs []topicConfig) APIErrorCode {
// Check all the queue configs for any duplicates. // Check all the queue configs for any duplicates.
func checkDuplicateQueueConfigs(configs []queueConfig) APIErrorCode { func checkDuplicateQueueConfigs(configs []queueConfig) APIErrorCode {
configMaps := make(map[string]int) var queueConfigARNS []string
// Navigate through each configs and count the entries. // Navigate through each configs and count the entries.
for _, config := range configs { for _, config := range configs {
configMaps[config.QueueARN]++ queueConfigARNS = append(queueConfigARNS, config.QueueARN)
} }
// Validate if there are any duplicate counts. // Check if there are any duplicate counts.
for _, count := range configMaps { if err := checkDuplicates(queueConfigARNS); err != nil {
if count != 1 { errorIf(err, "Invalid queue configs found.")
return ErrOverlappingConfigs return ErrOverlappingConfigs
}
} }
// Success. // Success.
@ -288,18 +287,17 @@ func checkDuplicateQueueConfigs(configs []queueConfig) APIErrorCode {
// Check all the topic configs for any duplicates. // Check all the topic configs for any duplicates.
func checkDuplicateTopicConfigs(configs []topicConfig) APIErrorCode { func checkDuplicateTopicConfigs(configs []topicConfig) APIErrorCode {
configMaps := make(map[string]int) var topicConfigARNS []string
// Navigate through each configs and count the entries. // Navigate through each configs and count the entries.
for _, config := range configs { for _, config := range configs {
configMaps[config.TopicARN]++ topicConfigARNS = append(topicConfigARNS, config.TopicARN)
} }
// Validate if there are any duplicate counts. // Check if there are any duplicate counts.
for _, count := range configMaps { if err := checkDuplicates(topicConfigARNS); err != nil {
if count != 1 { errorIf(err, "Invalid topic configs found.")
return ErrOverlappingConfigs return ErrOverlappingConfigs
}
} }
// Success. // Success.
@ -320,12 +318,17 @@ func validateNotificationConfig(nConfig notificationConfig) APIErrorCode {
} }
// Check for duplicate queue configs. // Check for duplicate queue configs.
if s3Error := checkDuplicateQueueConfigs(nConfig.QueueConfigs); s3Error != ErrNone { if len(nConfig.QueueConfigs) > 1 {
return s3Error if s3Error := checkDuplicateQueueConfigs(nConfig.QueueConfigs); s3Error != ErrNone {
return s3Error
}
} }
// Check for duplicate topic configs. // Check for duplicate topic configs.
if s3Error := checkDuplicateTopicConfigs(nConfig.TopicConfigs); s3Error != ErrNone { if len(nConfig.TopicConfigs) > 1 {
return s3Error if s3Error := checkDuplicateTopicConfigs(nConfig.TopicConfigs); s3Error != ErrNone {
return s3Error
}
} }
// Add validation for other configurations. // Add validation for other configurations.

View File

@ -126,6 +126,12 @@ func bucketPolicyConditionMatch(conditions map[string]set.StringSet, statement p
// This implementation of the PUT operation uses the policy // This implementation of the PUT operation uses the policy
// subresource to add to or replace a policy on a bucket // subresource to add to or replace a policy on a bucket
func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) { func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
vars := mux.Vars(r) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
@ -180,8 +186,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
} }
// Save bucket policy. // Save bucket policy.
if err = writeBucketPolicy(bucket, api.ObjectAPI, bytes.NewReader(policyBytes), int64(len(policyBytes))); err != nil { if err = writeBucketPolicy(bucket, objAPI, bytes.NewReader(policyBytes), int64(len(policyBytes))); err != nil {
errorIf(err, "Unable to write bucket policy.")
switch err.(type) { switch err.(type) {
case BucketNameInvalid: case BucketNameInvalid:
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path) writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
@ -203,6 +208,12 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
// This implementation of the DELETE operation uses the policy // This implementation of the DELETE operation uses the policy
// subresource to add to remove a policy on a bucket. // subresource to add to remove a policy on a bucket.
func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) { func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
vars := mux.Vars(r) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
@ -219,8 +230,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
} }
// Delete bucket access policy. // Delete bucket access policy.
if err := removeBucketPolicy(bucket, api.ObjectAPI); err != nil { if err := removeBucketPolicy(bucket, objAPI); err != nil {
errorIf(err, "Unable to remove bucket policy.")
switch err.(type) { switch err.(type) {
case BucketNameInvalid: case BucketNameInvalid:
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path) writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
@ -244,6 +254,12 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
// This operation uses the policy // This operation uses the policy
// subresource to return the policy of a specified bucket. // subresource to return the policy of a specified bucket.
func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) { func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
vars := mux.Vars(r) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
@ -260,7 +276,7 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht
} }
// Read bucket access policy. // Read bucket access policy.
policy, err := readBucketPolicy(bucket, api.ObjectAPI) policy, err := readBucketPolicy(bucket, objAPI)
if err != nil { if err != nil {
errorIf(err, "Unable to read bucket policy.") errorIf(err, "Unable to read bucket policy.")
switch err.(type) { switch err.(type) {

View File

@ -1,5 +1,5 @@
/* /*
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc. * Minio Cloud Storage, (C) 2016 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -294,13 +294,13 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType string, t TestErrH
req, err := newTestSignedRequest("PUT", getPutPolicyURL("", testCase.bucketName), req, err := newTestSignedRequest("PUT", getPutPolicyURL("", testCase.bucketName),
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testCase.accessKey, testCase.secretKey) int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testCase.accessKey, testCase.secretKey)
if err != nil { if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err) t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
} }
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler. // Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
// Call the ServeHTTP to execute the handler. // Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req) apiRouter.ServeHTTP(rec, req)
if rec.Code != testCase.expectedRespStatus { if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testCase.expectedRespStatus, rec.Code) t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
} }
} }
} }

View File

@ -66,27 +66,38 @@ func (bp *bucketPolicies) RemoveBucketPolicy(bucket string) {
func loadAllBucketPolicies(objAPI ObjectLayer) (policies map[string]*bucketPolicy, err error) { func loadAllBucketPolicies(objAPI ObjectLayer) (policies map[string]*bucketPolicy, err error) {
// List buckets to proceed loading all notification configuration. // List buckets to proceed loading all notification configuration.
buckets, err := objAPI.ListBuckets() buckets, err := objAPI.ListBuckets()
errorIf(err, "Unable to list buckets.")
err = errorCause(err)
if err != nil { if err != nil {
return nil, err return nil, err
} }
policies = make(map[string]*bucketPolicy) policies = make(map[string]*bucketPolicy)
var pErrs []error
// Loads bucket policy. // Loads bucket policy.
for _, bucket := range buckets { for _, bucket := range buckets {
var policy *bucketPolicy policy, pErr := readBucketPolicy(bucket.Name, objAPI)
policy, err = readBucketPolicy(bucket.Name, objAPI) if pErr != nil {
if err != nil { switch pErr.(type) {
switch err.(type) {
case BucketPolicyNotFound: case BucketPolicyNotFound:
continue continue
} }
return nil, err pErrs = append(pErrs, pErr)
// Continue to load other bucket policies if possible.
continue
} }
policies[bucket.Name] = policy policies[bucket.Name] = policy
} }
// Look for any errors occurred while reading bucket policies.
for _, pErr := range pErrs {
if pErr != nil {
return policies, pErr
}
}
// Success. // Success.
return policies, nil return policies, nil
} }
// Intialize all bucket policies. // Intialize all bucket policies.
@ -94,16 +105,20 @@ func initBucketPolicies(objAPI ObjectLayer) error {
if objAPI == nil { if objAPI == nil {
return errInvalidArgument return errInvalidArgument
} }
// Read all bucket policies. // Read all bucket policies.
policies, err := loadAllBucketPolicies(objAPI) policies, err := loadAllBucketPolicies(objAPI)
if err != nil { if err != nil {
return err return err
} }
// Populate global bucket collection.
globalBucketPolicies = &bucketPolicies{ globalBucketPolicies = &bucketPolicies{
rwMutex: &sync.RWMutex{}, rwMutex: &sync.RWMutex{},
bucketPolicyConfigs: policies, bucketPolicyConfigs: policies,
} }
// Success.
return nil return nil
} }
@ -125,18 +140,22 @@ func readBucketPolicyJSON(bucket string, objAPI ObjectLayer) (bucketPolicyReader
} }
policyPath := pathJoin(bucketConfigPrefix, bucket, policyJSON) policyPath := pathJoin(bucketConfigPrefix, bucket, policyJSON)
objInfo, err := objAPI.GetObjectInfo(minioMetaBucket, policyPath) objInfo, err := objAPI.GetObjectInfo(minioMetaBucket, policyPath)
err = errorCause(err)
if err != nil { if err != nil {
if _, ok := err.(ObjectNotFound); ok { if _, ok := err.(ObjectNotFound); ok {
return nil, BucketPolicyNotFound{Bucket: bucket} return nil, BucketPolicyNotFound{Bucket: bucket}
} }
errorIf(err, "Unable to load policy for the bucket %s.", bucket)
return nil, err return nil, err
} }
var buffer bytes.Buffer var buffer bytes.Buffer
err = objAPI.GetObject(minioMetaBucket, policyPath, 0, objInfo.Size, &buffer) err = objAPI.GetObject(minioMetaBucket, policyPath, 0, objInfo.Size, &buffer)
err = errorCause(err)
if err != nil { if err != nil {
if _, ok := err.(ObjectNotFound); ok { if _, ok := err.(ObjectNotFound); ok {
return nil, BucketPolicyNotFound{Bucket: bucket} return nil, BucketPolicyNotFound{Bucket: bucket}
} }
errorIf(err, "Unable to load policy for the bucket %s.", bucket)
return nil, err return nil, err
} }
@ -169,9 +188,10 @@ func removeBucketPolicy(bucket string, objAPI ObjectLayer) error {
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return BucketNameInvalid{Bucket: bucket} return BucketNameInvalid{Bucket: bucket}
} }
policyPath := pathJoin(bucketConfigPrefix, bucket, policyJSON) policyPath := pathJoin(bucketConfigPrefix, bucket, policyJSON)
if err := objAPI.DeleteObject(minioMetaBucket, policyPath); err != nil { if err := objAPI.DeleteObject(minioMetaBucket, policyPath); err != nil {
errorIf(err, "Unable to remove bucket-policy on bucket %s.", bucket)
err = errorCause(err)
if _, ok := err.(ObjectNotFound); ok { if _, ok := err.(ObjectNotFound); ok {
return BucketPolicyNotFound{Bucket: bucket} return BucketPolicyNotFound{Bucket: bucket}
} }
@ -188,6 +208,9 @@ func writeBucketPolicy(bucket string, objAPI ObjectLayer, reader io.Reader, size
} }
policyPath := pathJoin(bucketConfigPrefix, bucket, policyJSON) policyPath := pathJoin(bucketConfigPrefix, bucket, policyJSON)
_, err := objAPI.PutObject(minioMetaBucket, policyPath, size, reader, nil) if _, err := objAPI.PutObject(minioMetaBucket, policyPath, size, reader, nil); err != nil {
return err errorIf(err, "Unable to set policy for the bucket %s", bucket)
return errorCause(err)
}
return nil
} }

View File

@ -24,19 +24,6 @@ var commands = []cli.Command{}
// Collection of minio commands currently supported in a trie tree. // Collection of minio commands currently supported in a trie tree.
var commandsTree = newTrie() var commandsTree = newTrie()
// Collection of minio flags currently supported.
var globalFlags = []cli.Flag{
cli.StringFlag{
Name: "config-dir, C",
Value: mustGetConfigPath(),
Usage: "Path to configuration folder.",
},
cli.BoolFlag{
Name: "quiet",
Usage: "Suppress chatty output.",
},
}
// registerCommand registers a cli command. // registerCommand registers a cli command.
func registerCommand(command cli.Command) { func registerCommand(command cli.Command) {
commands = append(commands, command) commands = append(commands, command)

View File

@ -18,7 +18,6 @@ package cmd
import ( import (
"fmt" "fmt"
"net/rpc"
"net/url" "net/url"
"path" "path"
"strings" "strings"
@ -30,13 +29,18 @@ var healCmd = cli.Command{
Name: "heal", Name: "heal",
Usage: "To heal objects.", Usage: "To heal objects.",
Action: healControl, Action: healControl,
Flags: globalFlags,
CustomHelpTemplate: `NAME: CustomHelpTemplate: `NAME:
minio control {{.Name}} - {{.Usage}} minio control {{.Name}} - {{.Usage}}
USAGE: USAGE:
minio control {{.Name}} minio control {{.Name}}
EAMPLES: FLAGS:
{{range .Flags}}{{.}}
{{end}}
EXAMPLES:
1. Heal an object. 1. Heal an object.
$ minio control {{.Name}} http://localhost:9000/songs/classical/western/piano.mp3 $ minio control {{.Name}} http://localhost:9000/songs/classical/western/piano.mp3
@ -48,8 +52,17 @@ EAMPLES:
`, `,
} }
func checkHealControlSyntax(ctx *cli.Context) {
if len(ctx.Args()) != 1 {
cli.ShowCommandHelpAndExit(ctx, "heal", 1)
}
}
// "minio control heal" entry point. // "minio control heal" entry point.
func healControl(ctx *cli.Context) { func healControl(ctx *cli.Context) {
checkHealControlSyntax(ctx)
// Parse bucket and object from url.URL.Path // Parse bucket and object from url.URL.Path
parseBucketObject := func(path string) (bucketName string, objectName string) { parseBucketObject := func(path string) (bucketName string, objectName string) {
splits := strings.SplitN(path, string(slashSeparator), 3) splits := strings.SplitN(path, string(slashSeparator), 3)
@ -68,29 +81,38 @@ func healControl(ctx *cli.Context) {
return bucketName, objectName return bucketName, objectName
} }
if len(ctx.Args()) != 1 {
cli.ShowCommandHelpAndExit(ctx, "heal", 1)
}
parsedURL, err := url.Parse(ctx.Args()[0]) parsedURL, err := url.Parse(ctx.Args()[0])
fatalIf(err, "Unable to parse URL") fatalIf(err, "Unable to parse URL")
authCfg := &authConfig{
accessKey: serverConfig.GetCredential().AccessKeyID,
secretKey: serverConfig.GetCredential().SecretAccessKey,
address: parsedURL.Host,
path: path.Join(reservedBucket, controlPath),
loginMethod: "Controller.LoginHandler",
}
client := newAuthClient(authCfg)
// Always try to fix disk metadata
fmt.Print("Checking and healing disk metadata..")
args := &GenericArgs{}
reply := &GenericReply{}
err = client.Call("Controller.HealDiskMetadataHandler", args, reply)
fatalIf(err, "Unable to heal disk metadata.")
fmt.Println(" ok")
bucketName, objectName := parseBucketObject(parsedURL.Path) bucketName, objectName := parseBucketObject(parsedURL.Path)
if bucketName == "" { if bucketName == "" {
cli.ShowCommandHelpAndExit(ctx, "heal", 1) return
} }
client, err := rpc.DialHTTPPath("tcp", parsedURL.Host, path.Join(reservedBucket, controlPath))
fatalIf(err, "Unable to connect to %s", parsedURL.Host)
// If object does not have trailing "/" then it's an object, hence heal it. // If object does not have trailing "/" then it's an object, hence heal it.
if objectName != "" && !strings.HasSuffix(objectName, slashSeparator) { if objectName != "" && !strings.HasSuffix(objectName, slashSeparator) {
fmt.Printf("Healing : /%s/%s", bucketName, objectName) fmt.Printf("Healing : /%s/%s\n", bucketName, objectName)
args := &HealObjectArgs{bucketName, objectName} args := &HealObjectArgs{Bucket: bucketName, Object: objectName}
reply := &HealObjectReply{} reply := &HealObjectReply{}
err = client.Call("Control.HealObject", args, reply) err = client.Call("Controller.HealObjectHandler", args, reply)
fatalIf(err, "RPC Control.HealObject call failed") errorIf(err, "Healing object %s failed.", objectName)
fmt.Println()
return return
} }
@ -98,23 +120,32 @@ func healControl(ctx *cli.Context) {
prefix := objectName prefix := objectName
marker := "" marker := ""
for { for {
args := HealListArgs{bucketName, prefix, marker, "", 1000} args := &HealListArgs{
Bucket: bucketName,
Prefix: prefix,
Marker: marker,
Delimiter: "",
MaxKeys: 1000,
}
reply := &HealListReply{} reply := &HealListReply{}
err = client.Call("Control.ListObjectsHeal", args, reply) err = client.Call("Controller.ListObjectsHealHandler", args, reply)
fatalIf(err, "RPC Heal.ListObjects call failed") fatalIf(err, "Unable to list objects for healing.")
// Heal the objects returned in the ListObjects reply. // Heal the objects returned in the ListObjects reply.
for _, obj := range reply.Objects { for _, obj := range reply.Objects {
fmt.Printf("Healing : /%s/%s", bucketName, obj) fmt.Printf("Healing : /%s/%s\n", bucketName, obj)
reply := &HealObjectReply{} reply := &GenericReply{}
err = client.Call("Control.HealObject", HealObjectArgs{bucketName, obj}, reply) healArgs := &HealObjectArgs{Bucket: bucketName, Object: obj}
fatalIf(err, "RPC Heal.HealObject call failed") err = client.Call("Controller.HealObjectHandler", healArgs, reply)
fmt.Println() errorIf(err, "Healing object %s failed.", obj)
} }
if !reply.IsTruncated { if !reply.IsTruncated {
// End of listing. // End of listing.
break break
} }
// Set the marker to list the next set of keys.
marker = reply.NextMarker marker = reply.NextMarker
} }
} }

144
cmd/control-lock-main.go Normal file
View File

@ -0,0 +1,144 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"encoding/json"
"fmt"
"net/url"
"path"
"time"
"github.com/minio/cli"
)
// SystemLockState - Structure to fill the lock state of entire object storage.
// That is the total locks held, total calls blocked on locks and state of all the locks for the entire system.
type SystemLockState struct {
TotalLocks int64 `json:"totalLocks"`
TotalBlockedLocks int64 `json:"totalBlockedLocks"` // count of operations which are blocked waiting for the lock to be released.
TotalAcquiredLocks int64 `json:"totalAcquiredLocks"` // count of operations which has successfully acquired the lock but hasn't unlocked yet( operation in progress).
LocksInfoPerObject []VolumeLockInfo `json:"locksInfoPerObject"`
}
// VolumeLockInfo - Structure to contain the lock state info for volume, path pair.
type VolumeLockInfo struct {
Bucket string `json:"bucket"`
Object string `json:"object"`
LocksOnObject int64 `json:"locksOnObject"` // All locks blocked + running for given <volume,path> pair.
LocksAcquiredOnObject int64 `json:"locksAcquiredOnObject"` // count of operations which has successfully acquired the lock but hasn't unlocked yet( operation in progress).
TotalBlockedLocks int64 `json:"locksBlockedOnObject"` // count of operations which are blocked waiting for the lock to be released.
LockDetailsOnObject []OpsLockState `json:"lockDetailsOnObject"` // state information containing state of the locks for all operations on given <volume,path> pair.
}
// OpsLockState - structure to fill in state information of the lock.
// structure to fill in status information for each operation with given operation ID.
type OpsLockState struct {
OperationID string `json:"opsID"` // string containing operation ID.
LockOrigin string `json:"lockOrigin"` // contant which mentions the operation type (Get Obejct, PutObject...)
LockType string `json:"lockType"`
Status string `json:"status"` // status can be running/ready/blocked.
StatusSince string `json:"statusSince"` // time info of the since how long the status holds true, value in seconds.
}
// Read entire state of the locks in the system and return.
func generateSystemLockResponse() (SystemLockState, error) {
nsMutex.lockMapMutex.Lock()
defer nsMutex.lockMapMutex.Unlock()
if nsMutex.debugLockMap == nil {
return SystemLockState{}, LockInfoNil{}
}
lockState := SystemLockState{}
lockState.TotalBlockedLocks = nsMutex.blockedCounter
lockState.TotalLocks = nsMutex.globalLockCounter
lockState.TotalAcquiredLocks = nsMutex.runningLockCounter
for param := range nsMutex.debugLockMap {
volLockInfo := VolumeLockInfo{}
volLockInfo.Bucket = param.volume
volLockInfo.Object = param.path
volLockInfo.TotalBlockedLocks = nsMutex.debugLockMap[param].blocked
volLockInfo.LocksAcquiredOnObject = nsMutex.debugLockMap[param].running
volLockInfo.LocksOnObject = nsMutex.debugLockMap[param].ref
for opsID := range nsMutex.debugLockMap[param].lockInfo {
opsState := OpsLockState{}
opsState.OperationID = opsID
opsState.LockOrigin = nsMutex.debugLockMap[param].lockInfo[opsID].lockOrigin
opsState.LockType = nsMutex.debugLockMap[param].lockInfo[opsID].lockType
opsState.Status = nsMutex.debugLockMap[param].lockInfo[opsID].status
opsState.StatusSince = time.Now().Sub(nsMutex.debugLockMap[param].lockInfo[opsID].since).String()
volLockInfo.LockDetailsOnObject = append(volLockInfo.LockDetailsOnObject, opsState)
}
lockState.LocksInfoPerObject = append(lockState.LocksInfoPerObject, volLockInfo)
}
return lockState, nil
}
var lockCmd = cli.Command{
Name: "lock",
Usage: "info about the locks in the node.",
Action: lockControl,
Flags: globalFlags,
CustomHelpTemplate: `NAME:
minio control {{.Name}} - {{.Usage}}
USAGE:
minio control {{.Name}} http://localhost:9000/
FLAGS:
{{range .Flags}}{{.}}
{{end}}
EAMPLES:
1. Get all the info about the blocked/held locks in the node:
$ minio control lock http://localhost:9000/
`,
}
// "minio control lock" entry point.
func lockControl(c *cli.Context) {
if len(c.Args()) != 1 {
cli.ShowCommandHelpAndExit(c, "lock", 1)
}
parsedURL, err := url.Parse(c.Args()[0])
fatalIf(err, "Unable to parse URL.")
authCfg := &authConfig{
accessKey: serverConfig.GetCredential().AccessKeyID,
secretKey: serverConfig.GetCredential().SecretAccessKey,
address: parsedURL.Host,
path: path.Join(reservedBucket, controlPath),
loginMethod: "Controller.LoginHandler",
}
client := newAuthClient(authCfg)
args := &GenericArgs{}
reply := &SystemLockState{}
err = client.Call("Controller.LockInfo", args, reply)
// logs the error and returns if err != nil.
fatalIf(err, "RPC Controller.LockInfo call failed")
// print the lock info on the console.
b, err := json.MarshalIndent(*reply, "", " ")
fatalIf(err, "Failed to parse the RPC lock info response")
fmt.Print(string(b))
}

View File

@ -22,8 +22,10 @@ import "github.com/minio/cli"
var controlCmd = cli.Command{ var controlCmd = cli.Command{
Name: "control", Name: "control",
Usage: "Control and manage minio server.", Usage: "Control and manage minio server.",
Flags: globalFlags,
Action: mainControl, Action: mainControl,
Subcommands: []cli.Command{ Subcommands: []cli.Command{
lockCmd,
healCmd, healCmd,
shutdownCmd, shutdownCmd,
}, },

View File

@ -17,30 +17,35 @@
package cmd package cmd
import ( import (
"net/rpc"
"net/url" "net/url"
"path" "path"
"github.com/minio/cli" "github.com/minio/cli"
) )
var shutdownFlags = []cli.Flag{
cli.BoolFlag{
Name: "restart",
Usage: "Restart the server.",
},
}
var shutdownCmd = cli.Command{ var shutdownCmd = cli.Command{
Name: "shutdown", Name: "shutdown",
Usage: "Shutdown or restart the server.", Usage: "Shutdown or restart the server.",
Action: shutdownControl, Action: shutdownControl,
Flags: []cli.Flag{ Flags: append(shutdownFlags, globalFlags...),
cli.BoolFlag{
Name: "restart",
Usage: "Restart the server.",
},
},
CustomHelpTemplate: `NAME: CustomHelpTemplate: `NAME:
minio control {{.Name}} - {{.Usage}} minio control {{.Name}} - {{.Usage}}
USAGE: USAGE:
minio control {{.Name}} http://localhost:9000/ minio control {{.Name}} http://localhost:9000/
EAMPLES: FLAGS:
{{range .Flags}}{{.}}
{{end}}
EXAMPLES:
1. Shutdown the server: 1. Shutdown the server:
$ minio control shutdown http://localhost:9000/ $ minio control shutdown http://localhost:9000/
@ -55,14 +60,19 @@ func shutdownControl(c *cli.Context) {
cli.ShowCommandHelpAndExit(c, "shutdown", 1) cli.ShowCommandHelpAndExit(c, "shutdown", 1)
} }
parsedURL, err := url.ParseRequestURI(c.Args()[0]) parsedURL, err := url.Parse(c.Args()[0])
fatalIf(err, "Unable to parse URL") fatalIf(err, "Unable to parse URL.")
client, err := rpc.DialHTTPPath("tcp", parsedURL.Host, path.Join(reservedBucket, controlPath)) authCfg := &authConfig{
fatalIf(err, "Unable to connect to %s", parsedURL.Host) accessKey: serverConfig.GetCredential().AccessKeyID,
secretKey: serverConfig.GetCredential().SecretAccessKey,
address: parsedURL.Host,
path: path.Join(reservedBucket, controlPath),
loginMethod: "Controller.LoginHandler",
}
client := newAuthClient(authCfg)
args := &ShutdownArgs{Reboot: c.Bool("restart")} args := &ShutdownArgs{Restart: c.Bool("restart")}
reply := &ShutdownReply{} err = client.Call("Controller.ShutdownHandler", args, &GenericReply{})
err = client.Call("Control.Shutdown", args, reply) errorIf(err, "Shutting down Minio server at %s failed.", parsedURL.Host)
fatalIf(err, "RPC Control.Shutdown call failed")
} }

View File

@ -16,8 +16,31 @@
package cmd package cmd
/// Auth operations
// Login - login handler.
func (c *controllerAPIHandlers) LoginHandler(args *RPCLoginArgs, reply *RPCLoginReply) error {
jwt, err := newJWT(defaultTokenExpiry)
if err != nil {
return err
}
if err = jwt.Authenticate(args.Username, args.Password); err != nil {
return err
}
token, err := jwt.GenerateToken(args.Username)
if err != nil {
return err
}
reply.Token = token
reply.ServerVersion = Version
return nil
}
// HealListArgs - argument for ListObjects RPC. // HealListArgs - argument for ListObjects RPC.
type HealListArgs struct { type HealListArgs struct {
// Authentication token generated by Login.
GenericArgs
Bucket string Bucket string
Prefix string Prefix string
Marker string Marker string
@ -25,7 +48,7 @@ type HealListArgs struct {
MaxKeys int MaxKeys int
} }
// HealListReply - reply by ListObjects RPC. // HealListReply - reply object by ListObjects RPC.
type HealListReply struct { type HealListReply struct {
IsTruncated bool IsTruncated bool
NextMarker string NextMarker string
@ -33,12 +56,15 @@ type HealListReply struct {
} }
// ListObjects - list all objects that needs healing. // ListObjects - list all objects that needs healing.
func (c *controllerAPIHandlers) ListObjectsHeal(arg *HealListArgs, reply *HealListReply) error { func (c *controllerAPIHandlers) ListObjectsHealHandler(args *HealListArgs, reply *HealListReply) error {
objAPI := c.ObjectAPI objAPI := c.ObjectAPI()
if objAPI == nil { if objAPI == nil {
return errInvalidArgument return errVolumeBusy
} }
info, err := objAPI.ListObjectsHeal(arg.Bucket, arg.Prefix, arg.Marker, arg.Delimiter, arg.MaxKeys) if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
info, err := objAPI.ListObjectsHeal(args.Bucket, args.Prefix, args.Marker, args.Delimiter, args.MaxKeys)
if err != nil { if err != nil {
return err return err
} }
@ -52,7 +78,13 @@ func (c *controllerAPIHandlers) ListObjectsHeal(arg *HealListArgs, reply *HealLi
// HealObjectArgs - argument for HealObject RPC. // HealObjectArgs - argument for HealObject RPC.
type HealObjectArgs struct { type HealObjectArgs struct {
// Authentication token generated by Login.
GenericArgs
// Name of the bucket.
Bucket string Bucket string
// Name of the object.
Object string Object string
} }
@ -60,29 +92,78 @@ type HealObjectArgs struct {
type HealObjectReply struct{} type HealObjectReply struct{}
// HealObject - heal the object. // HealObject - heal the object.
func (c *controllerAPIHandlers) HealObject(arg *HealObjectArgs, reply *HealObjectReply) error { func (c *controllerAPIHandlers) HealObjectHandler(args *HealObjectArgs, reply *GenericReply) error {
objAPI := c.ObjectAPI objAPI := c.ObjectAPI()
if objAPI == nil { if objAPI == nil {
return errInvalidArgument return errVolumeBusy
} }
return objAPI.HealObject(arg.Bucket, arg.Object) if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
return objAPI.HealObject(args.Bucket, args.Object)
}
// HealObject - heal the object.
func (c *controllerAPIHandlers) HealDiskMetadataHandler(args *GenericArgs, reply *GenericReply) error {
objAPI := c.ObjectAPI()
if objAPI == nil {
return errVolumeBusy
}
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
err := objAPI.HealDiskMetadata()
if err != nil {
return err
}
go func() {
globalWakeupCh <- struct{}{}
}()
return err
} }
// ShutdownArgs - argument for Shutdown RPC. // ShutdownArgs - argument for Shutdown RPC.
type ShutdownArgs struct { type ShutdownArgs struct {
Reboot bool // Authentication token generated by Login.
GenericArgs
// Should the server be restarted, call active connections are served before server
// is restarted.
Restart bool
} }
// ShutdownReply - reply by Shutdown RPC. // Shutdown - Shutsdown the server.
type ShutdownReply struct{} func (c *controllerAPIHandlers) ShutdownHandler(args *ShutdownArgs, reply *GenericReply) error {
if !isRPCTokenValid(args.Token) {
// Shutdown - Shutdown the server. return errInvalidToken
}
func (c *controllerAPIHandlers) Shutdown(arg *ShutdownArgs, reply *ShutdownReply) error { if args.Restart {
if arg.Reboot {
globalShutdownSignalCh <- shutdownRestart globalShutdownSignalCh <- shutdownRestart
} else { } else {
globalShutdownSignalCh <- shutdownHalt globalShutdownSignalCh <- shutdownHalt
} }
return nil return nil
} }
func (c *controllerAPIHandlers) TryInitHandler(args *GenericArgs, reply *GenericReply) error {
go func() {
globalWakeupCh <- struct{}{}
}()
*reply = GenericReply{}
return nil
}
// LockInfo - RPC control handler for `minio control lock`.
// Returns the info of the locks held in the system.
func (c *controllerAPIHandlers) LockInfo(arg *GenericArgs, reply *SystemLockState) error {
// obtain the lock state information.
lockInfo, err := generateSystemLockResponse()
// in case of error, return err to the RPC client.
if err != nil {
return err
}
// the response containing the lock info.
*reply = lockInfo
return nil
}

View File

@ -27,10 +27,10 @@ const (
controlPath = "/controller" controlPath = "/controller"
) )
// Register control RPC handlers. // Register controller RPC handlers.
func registerControlRPCRouter(mux *router.Router, ctrlHandlers *controllerAPIHandlers) { func registerControllerRPCRouter(mux *router.Router, ctrlHandlers *controllerAPIHandlers) {
ctrlRPCServer := rpc.NewServer() ctrlRPCServer := rpc.NewServer()
ctrlRPCServer.RegisterName("Control", ctrlHandlers) ctrlRPCServer.RegisterName("Controller", ctrlHandlers)
ctrlRouter := mux.NewRoute().PathPrefix(reservedBucket).Subrouter() ctrlRouter := mux.NewRoute().PathPrefix(reservedBucket).Subrouter()
ctrlRouter.Path(controlPath).Handler(ctrlRPCServer) ctrlRouter.Path(controlPath).Handler(ctrlRPCServer)
@ -38,5 +38,5 @@ func registerControlRPCRouter(mux *router.Router, ctrlHandlers *controllerAPIHan
// Handler for object healing. // Handler for object healing.
type controllerAPIHandlers struct { type controllerAPIHandlers struct {
ObjectAPI ObjectLayer ObjectAPI func() ObjectLayer
} }

298
cmd/controller_test.go Normal file
View File

@ -0,0 +1,298 @@
/*
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"path"
"strconv"
"sync"
"time"
. "gopkg.in/check.v1"
)
// API suite container common to both FS and XL.
type TestRPCControllerSuite struct {
serverType string
testServer TestServer
endPoint string
accessKey string
secretKey string
}
// Init and run test on XL backend.
var _ = Suite(&TestRPCControllerSuite{serverType: "XL"})
// Setting up the test suite.
// Starting the Test server with temporary FS backend.
func (s *TestRPCControllerSuite) SetUpSuite(c *C) {
s.testServer = StartTestRPCServer(c, s.serverType)
s.endPoint = s.testServer.Server.Listener.Addr().String()
s.accessKey = s.testServer.AccessKey
s.secretKey = s.testServer.SecretKey
}
// Called implicitly by "gopkg.in/check.v1" after all tests are run.
func (s *TestRPCControllerSuite) TearDownSuite(c *C) {
s.testServer.Stop()
}
// Tests to validate the correctness of lock instrumentation control RPC end point.
func (s *TestRPCControllerSuite) TestRPCControlLock(c *C) {
// enabling lock instrumentation.
globalDebugLock = true
// initializing the locks.
initNSLock(false)
// set debug lock info to `nil` so that the next tests have to initialize them again.
defer func() {
globalDebugLock = false
nsMutex.debugLockMap = nil
}()
expectedResult := []lockStateCase{
// Test case - 1.
// Case where 10 read locks are held.
// Entry for any of the 10 reads locks has to be found.
// Since they held in a loop, Lock origin for first 10 read locks (opsID 0-9) should be the same.
{
volume: "my-bucket",
path: "my-object",
opsID: "0",
readLock: true,
lockOrigin: "[lock held] in github.com/minio/minio/cmd.TestLockStats[/Users/hackintoshrao/mycode/go/src/github.com/minio/minio/cmd/namespace-lock_test.go:298]",
// expected metrics.
expectedErr: nil,
expectedLockStatus: "Running",
expectedGlobalLockCount: 10,
expectedRunningLockCount: 10,
expectedBlockedLockCount: 0,
expectedVolPathLockCount: 10,
expectedVolPathRunningCount: 10,
expectedVolPathBlockCount: 0,
},
// Test case 2.
// Testing the existence of entry for the last read lock (read lock with opsID "9").
{
volume: "my-bucket",
path: "my-object",
opsID: "9",
readLock: true,
lockOrigin: "[lock held] in github.com/minio/minio/cmd.TestLockStats[/Users/hackintoshrao/mycode/go/src/github.com/minio/minio/cmd/namespace-lock_test.go:298]",
// expected metrics.
expectedErr: nil,
expectedLockStatus: "Running",
expectedGlobalLockCount: 10,
expectedRunningLockCount: 10,
expectedBlockedLockCount: 0,
expectedVolPathLockCount: 10,
expectedVolPathRunningCount: 10,
expectedVolPathBlockCount: 0,
},
// Test case 3.
// Hold a write lock, and it should block since 10 read locks
// on <"my-bucket", "my-object"> are still held.
{
volume: "my-bucket",
path: "my-object",
opsID: "10",
readLock: false,
lockOrigin: "[lock held] in github.com/minio/minio/cmd.TestLockStats[/Users/hackintoshrao/mycode/go/src/github.com/minio/minio/cmd/namespace-lock_test.go:298]",
// expected metrics.
expectedErr: nil,
expectedLockStatus: "Blocked",
expectedGlobalLockCount: 11,
expectedRunningLockCount: 10,
expectedBlockedLockCount: 1,
expectedVolPathLockCount: 11,
expectedVolPathRunningCount: 10,
expectedVolPathBlockCount: 1,
},
// Test case 4.
// Expected result when all the read locks are released and the blocked write lock acquires the lock.
{
volume: "my-bucket",
path: "my-object",
opsID: "10",
readLock: false,
lockOrigin: "[lock held] in github.com/minio/minio/cmd.TestLockStats[/Users/hackintoshrao/mycode/go/src/github.com/minio/minio/cmd/namespace-lock_test.go:298]",
// expected metrics.
expectedErr: nil,
expectedLockStatus: "Running",
expectedGlobalLockCount: 1,
expectedRunningLockCount: 1,
expectedBlockedLockCount: 0,
expectedVolPathLockCount: 1,
expectedVolPathRunningCount: 1,
expectedVolPathBlockCount: 0,
},
// Test case - 5.
// At the end after locks are released, its verified whether the counters are set to 0.
{
volume: "my-bucket",
path: "my-object",
// expected metrics.
expectedErr: nil,
expectedLockStatus: "Blocked",
expectedGlobalLockCount: 0,
expectedRunningLockCount: 0,
expectedBlockedLockCount: 0,
},
}
// used to make sure that the tests don't end till locks held in other go routines are released.
var wg sync.WaitGroup
// Hold 5 read locks. We should find the info about these in the RPC response.
// hold 10 read locks.
// Then call the RPC control end point for obtaining lock instrumentation info.
for i := 0; i < 10; i++ {
nsMutex.RLock("my-bucket", "my-object", strconv.Itoa(i))
}
authCfg := &authConfig{
accessKey: s.accessKey,
secretKey: s.secretKey,
address: s.endPoint,
path: path.Join(reservedBucket, controlPath),
loginMethod: "Controller.LoginHandler",
}
client := newAuthClient(authCfg)
defer client.Close()
args := &GenericArgs{}
reply := &SystemLockState{}
// Call the lock instrumentation RPC end point.
err := client.Call("Controller.LockInfo", args, reply)
if err != nil {
c.Errorf("Add: expected no error but got string %q", err.Error())
}
// expected lock info.
expectedLockStats := expectedResult[0]
// verify the actual lock info with the expected one.
// verify the existence entry for first read lock (read lock with opsID "0").
verifyRPCLockInfoResponse(expectedLockStats, *reply, c, 1)
expectedLockStats = expectedResult[1]
// verify the actual lock info with the expected one.
// verify the existence entry for last read lock (read lock with opsID "9").
verifyRPCLockInfoResponse(expectedLockStats, *reply, c, 2)
// now hold a write lock in a different go routine and it should block since 10 read locks are
// still held.
wg.Add(1)
go func() {
defer wg.Done()
// blocks till all read locks are released.
nsMutex.Lock("my-bucket", "my-object", strconv.Itoa(10))
// Once the above attempt to lock is unblocked/acquired, we verify the stats and release the lock.
expectedWLockStats := expectedResult[3]
// Since the write lock acquired here, the number of blocked locks should reduce by 1 and
// count of running locks should increase by 1.
// Call the RPC control handle to fetch the lock instrumentation info.
reply = &SystemLockState{}
// Call the lock instrumentation RPC end point.
err = client.Call("Controller.LockInfo", args, reply)
if err != nil {
c.Errorf("Add: expected no error but got string %q", err.Error())
}
verifyRPCLockInfoResponse(expectedWLockStats, *reply, c, 4)
// release the write lock.
nsMutex.Unlock("my-bucket", "my-object", strconv.Itoa(10))
}()
// waiting for a second so that the attempt to acquire the write lock in
// the above go routines gets blocked.
time.Sleep(1 * time.Second)
// The write lock should have got blocked by now,
// check whether the entry for one blocked lock exists.
expectedLockStats = expectedResult[2]
// Call the RPC control handle to fetch the lock instrumentation info.
reply = &SystemLockState{}
// Call the lock instrumentation RPC end point.
err = client.Call("Controller.LockInfo", args, reply)
if err != nil {
c.Errorf("Add: expected no error but got string %q", err.Error())
}
verifyRPCLockInfoResponse(expectedLockStats, *reply, c, 3)
// Release all the read locks held.
// the blocked write lock in the above go routines should get unblocked.
for i := 0; i < 10; i++ {
nsMutex.RUnlock("my-bucket", "my-object", strconv.Itoa(i))
}
wg.Wait()
// Since all the locks are released. There should not be any entry in the lock info.
// and all the counters should be set to 0.
reply = &SystemLockState{}
// Call the lock instrumentation RPC end point.
err = client.Call("Controller.LockInfo", args, reply)
if err != nil {
c.Errorf("Add: expected no error but got string %q", err.Error())
}
if reply.TotalAcquiredLocks != 0 && reply.TotalLocks != 0 && reply.TotalBlockedLocks != 0 {
c.Fatalf("The counters are not reset properly after all locks are released")
}
if len(reply.LocksInfoPerObject) != 0 {
c.Fatalf("Since all locks are released there shouldn't have been any lock info entry, but found %d", len(reply.LocksInfoPerObject))
}
}
// TestControllerHandlerHealDiskMetadata - Registers and call the `HealDiskMetadataHandler`,
// asserts to validate the success.
func (s *TestRPCControllerSuite) TestControllerHandlerHealDiskMetadata(c *C) {
// The suite has already started the test RPC server, just send RPC calls.
authCfg := &authConfig{
accessKey: s.accessKey,
secretKey: s.secretKey,
address: s.endPoint,
path: path.Join(reservedBucket, controlPath),
loginMethod: "Controller.LoginHandler",
}
client := newAuthClient(authCfg)
defer client.Close()
args := &GenericArgs{}
reply := &GenericReply{}
err := client.Call("Controller.HealDiskMetadataHandler", args, reply)
if err != nil {
c.Errorf("Heal Meta Disk Handler test failed with <ERROR> %s", err.Error())
}
}

View File

@ -44,9 +44,9 @@ func DamerauLevenshteinDistance(a string, b string) int {
for j := 0; j <= len(b); j++ { for j := 0; j <= len(b); j++ {
d[0][j] = j d[0][j] = j
} }
var cost int
for i := 1; i <= len(a); i++ { for i := 1; i <= len(a); i++ {
for j := 1; j <= len(b); j++ { for j := 1; j <= len(b); j++ {
cost := 0
if a[i-1] == b[j-1] { if a[i-1] == b[j-1] {
cost = 0 cost = 0
} else { } else {

View File

@ -41,7 +41,7 @@ func erasureCreateFile(disks []StorageAPI, volume, path string, reader io.Reader
// FIXME: this is a bug in Golang, n == 0 and err == // FIXME: this is a bug in Golang, n == 0 and err ==
// io.ErrUnexpectedEOF for io.ReadFull function. // io.ErrUnexpectedEOF for io.ReadFull function.
if n == 0 && rErr == io.ErrUnexpectedEOF { if n == 0 && rErr == io.ErrUnexpectedEOF {
return 0, nil, rErr return 0, nil, traceError(rErr)
} }
if rErr == io.EOF { if rErr == io.EOF {
// We have reached EOF on the first byte read, io.Reader // We have reached EOF on the first byte read, io.Reader
@ -58,7 +58,7 @@ func erasureCreateFile(disks []StorageAPI, volume, path string, reader io.Reader
break break
} }
if rErr != nil && rErr != io.ErrUnexpectedEOF { if rErr != nil && rErr != io.ErrUnexpectedEOF {
return 0, nil, rErr return 0, nil, traceError(rErr)
} }
if n > 0 { if n > 0 {
// Returns encoded blocks. // Returns encoded blocks.
@ -88,19 +88,19 @@ func erasureCreateFile(disks []StorageAPI, volume, path string, reader io.Reader
func encodeData(dataBuffer []byte, dataBlocks, parityBlocks int) ([][]byte, error) { func encodeData(dataBuffer []byte, dataBlocks, parityBlocks int) ([][]byte, error) {
rs, err := reedsolomon.New(dataBlocks, parityBlocks) rs, err := reedsolomon.New(dataBlocks, parityBlocks)
if err != nil { if err != nil {
return nil, err return nil, traceError(err)
} }
// Split the input buffer into data and parity blocks. // Split the input buffer into data and parity blocks.
var blocks [][]byte var blocks [][]byte
blocks, err = rs.Split(dataBuffer) blocks, err = rs.Split(dataBuffer)
if err != nil { if err != nil {
return nil, err return nil, traceError(err)
} }
// Encode parity blocks using data blocks. // Encode parity blocks using data blocks.
err = rs.Encode(blocks) err = rs.Encode(blocks)
if err != nil { if err != nil {
return nil, err return nil, traceError(err)
} }
// Return encoded blocks. // Return encoded blocks.
@ -122,7 +122,7 @@ func appendFile(disks []StorageAPI, volume, path string, enBlocks [][]byte, hash
defer wg.Done() defer wg.Done()
wErr := disk.AppendFile(volume, path, enBlocks[index]) wErr := disk.AppendFile(volume, path, enBlocks[index])
if wErr != nil { if wErr != nil {
wErrs[index] = wErr wErrs[index] = traceError(wErr)
return return
} }
@ -139,7 +139,7 @@ func appendFile(disks []StorageAPI, volume, path string, enBlocks [][]byte, hash
// Do we have write quorum?. // Do we have write quorum?.
if !isDiskQuorum(wErrs, writeQuorum) { if !isDiskQuorum(wErrs, writeQuorum) {
return errXLWriteQuorum return traceError(errXLWriteQuorum)
} }
return nil return nil
} }

View File

@ -93,8 +93,8 @@ func TestErasureCreateFile(t *testing.T) {
// 1 more disk down. 7 disk down in total. Should return quorum error. // 1 more disk down. 7 disk down in total. Should return quorum error.
disks[10] = AppendDiskDown{disks[10].(*posix)} disks[10] = AppendDiskDown{disks[10].(*posix)}
_, _, err = erasureCreateFile(disks, "testbucket", "testobject4", bytes.NewReader(data), blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) _, _, err = erasureCreateFile(disks, "testbucket", "testobject4", bytes.NewReader(data), blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
if err != errXLWriteQuorum { if errorCause(err) != errXLWriteQuorum {
t.Errorf("erasureCreateFile returned expected errXLWriteQuorum error, got %s", err) t.Errorf("erasureCreateFile return value: expected errXLWriteQuorum, got %s", err)
} }
} }
@ -195,7 +195,7 @@ func TestErasureEncode(t *testing.T) {
} }
// Failed as expected, but does it fail for the expected reason. // Failed as expected, but does it fail for the expected reason.
if actualErr != nil && !testCase.shouldPass { if actualErr != nil && !testCase.shouldPass {
if testCase.expectedErr != actualErr { if errorCause(actualErr) != testCase.expectedErr {
t.Errorf("Test %d: Expected Error to be \"%v\", but instead found \"%v\" ", i+1, testCase.expectedErr, actualErr) t.Errorf("Test %d: Expected Error to be \"%v\", but instead found \"%v\" ", i+1, testCase.expectedErr, actualErr)
} }
} }

View File

@ -64,7 +64,7 @@ func erasureHealFile(latestDisks []StorageAPI, outDatedDisks []StorageAPI, volum
} }
err := disk.AppendFile(healBucket, healPath, enBlocks[index]) err := disk.AppendFile(healBucket, healPath, enBlocks[index])
if err != nil { if err != nil {
return nil, err return nil, traceError(err)
} }
hashWriters[index].Write(enBlocks[index]) hashWriters[index].Write(enBlocks[index])
} }

View File

@ -66,7 +66,11 @@ func TestErasureHealFile(t *testing.T) {
copy(latest, disks) copy(latest, disks)
latest[0] = nil latest[0] = nil
outDated[0] = disks[0] outDated[0] = disks[0]
healCheckSums, err := erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*1024*1024, blockSize, dataBlocks, parityBlocks, bitRotAlgo) healCheckSums, err := erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*1024*1024, blockSize, dataBlocks, parityBlocks, bitRotAlgo)
if err != nil {
t.Fatal(err)
}
// Checksum of the healed file should match. // Checksum of the healed file should match.
if checkSums[0] != healCheckSums[0] { if checkSums[0] != healCheckSums[0] {
t.Error("Healing failed, data does not match.") t.Error("Healing failed, data does not match.")
@ -116,7 +120,7 @@ func TestErasureHealFile(t *testing.T) {
latest[index] = nil latest[index] = nil
outDated[index] = disks[index] outDated[index] = disks[index]
} }
healCheckSums, err = erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*1024*1024, blockSize, dataBlocks, parityBlocks, bitRotAlgo) _, err = erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*1024*1024, blockSize, dataBlocks, parityBlocks, bitRotAlgo)
if err == nil { if err == nil {
t.Error("Expected erasureHealFile() to fail when the number of available disks <= parityBlocks") t.Error("Expected erasureHealFile() to fail when the number of available disks <= parityBlocks")
} }

View File

@ -84,10 +84,10 @@ func getReadDisks(orderedDisks []StorageAPI, index int, dataBlocks int) (readDis
// Sanity checks - we should never have this situation. // Sanity checks - we should never have this situation.
if dataDisks == dataBlocks { if dataDisks == dataBlocks {
return nil, 0, errUnexpected return nil, 0, traceError(errUnexpected)
} }
if dataDisks+parityDisks >= dataBlocks { if dataDisks+parityDisks >= dataBlocks {
return nil, 0, errUnexpected return nil, 0, traceError(errUnexpected)
} }
// Find the disks from which next set of parallel reads should happen. // Find the disks from which next set of parallel reads should happen.
@ -107,7 +107,7 @@ func getReadDisks(orderedDisks []StorageAPI, index int, dataBlocks int) (readDis
return readDisks, i + 1, nil return readDisks, i + 1, nil
} }
} }
return nil, 0, errXLReadQuorum return nil, 0, traceError(errXLReadQuorum)
} }
// parallelRead - reads chunks in parallel from the disks specified in []readDisks. // parallelRead - reads chunks in parallel from the disks specified in []readDisks.
@ -161,12 +161,12 @@ func parallelRead(volume, path string, readDisks []StorageAPI, orderedDisks []St
func erasureReadFile(writer io.Writer, disks []StorageAPI, volume string, path string, offset int64, length int64, totalLength int64, blockSize int64, dataBlocks int, parityBlocks int, checkSums []string, algo string, pool *bpool.BytePool) (int64, error) { func erasureReadFile(writer io.Writer, disks []StorageAPI, volume string, path string, offset int64, length int64, totalLength int64, blockSize int64, dataBlocks int, parityBlocks int, checkSums []string, algo string, pool *bpool.BytePool) (int64, error) {
// Offset and length cannot be negative. // Offset and length cannot be negative.
if offset < 0 || length < 0 { if offset < 0 || length < 0 {
return 0, errUnexpected return 0, traceError(errUnexpected)
} }
// Can't request more data than what is available. // Can't request more data than what is available.
if offset+length > totalLength { if offset+length > totalLength {
return 0, errUnexpected return 0, traceError(errUnexpected)
} }
// chunkSize is the amount of data that needs to be read from each disk at a time. // chunkSize is the amount of data that needs to be read from each disk at a time.
@ -248,7 +248,7 @@ func erasureReadFile(writer io.Writer, disks []StorageAPI, volume string, path s
} }
if nextIndex == len(disks) { if nextIndex == len(disks) {
// No more disks to read from. // No more disks to read from.
return bytesWritten, errXLReadQuorum return bytesWritten, traceError(errXLReadQuorum)
} }
// We do not have enough enough data blocks to reconstruct the data // We do not have enough enough data blocks to reconstruct the data
// hence continue the for-loop till we have enough data blocks. // hence continue the for-loop till we have enough data blocks.
@ -325,24 +325,24 @@ func decodeData(enBlocks [][]byte, dataBlocks, parityBlocks int) error {
// Initialized reedsolomon. // Initialized reedsolomon.
rs, err := reedsolomon.New(dataBlocks, parityBlocks) rs, err := reedsolomon.New(dataBlocks, parityBlocks)
if err != nil { if err != nil {
return err return traceError(err)
} }
// Reconstruct encoded blocks. // Reconstruct encoded blocks.
err = rs.Reconstruct(enBlocks) err = rs.Reconstruct(enBlocks)
if err != nil { if err != nil {
return err return traceError(err)
} }
// Verify reconstructed blocks (parity). // Verify reconstructed blocks (parity).
ok, err := rs.Verify(enBlocks) ok, err := rs.Verify(enBlocks)
if err != nil { if err != nil {
return err return traceError(err)
} }
if !ok { if !ok {
// Blocks cannot be reconstructed, corrupted data. // Blocks cannot be reconstructed, corrupted data.
err = errors.New("Verification failed after reconstruction, data likely corrupted.") err = errors.New("Verification failed after reconstruction, data likely corrupted.")
return err return traceError(err)
} }
// Success. // Success.

View File

@ -104,7 +104,7 @@ func testGetReadDisks(t *testing.T, xl xlObjects) {
for i, test := range testCases { for i, test := range testCases {
disks, nextIndex, err := getReadDisks(test.argDisks, test.index, xl.dataBlocks) disks, nextIndex, err := getReadDisks(test.argDisks, test.index, xl.dataBlocks)
if err != test.err { if errorCause(err) != test.err {
t.Errorf("test-case %d - expected error : %s, got : %s", i+1, test.err, err) t.Errorf("test-case %d - expected error : %s, got : %s", i+1, test.err, err)
continue continue
} }
@ -217,11 +217,16 @@ func TestIsSuccessBlocks(t *testing.T) {
// Wrapper function for testGetReadDisks, testGetOrderedDisks. // Wrapper function for testGetReadDisks, testGetOrderedDisks.
func TestErasureReadUtils(t *testing.T) { func TestErasureReadUtils(t *testing.T) {
objLayer, dirs, err := getXLObjectLayer() nDisks := 16
disks, err := getRandomDisks(nDisks)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer removeRoots(dirs) objLayer, err := getXLObjectLayer(disks)
if err != nil {
t.Fatal(err)
}
defer removeRoots(disks)
xl := objLayer.(xlObjects) xl := objLayer.(xlObjects)
testGetReadDisks(t, xl) testGetReadDisks(t, xl)
testGetOrderedDisks(t, xl) testGetOrderedDisks(t, xl)
@ -314,7 +319,7 @@ func TestErasureReadFileDiskFail(t *testing.T) {
disks[13] = ReadDiskDown{disks[13].(*posix)} disks[13] = ReadDiskDown{disks[13].(*posix)}
buf.Reset() buf.Reset()
_, err = erasureReadFile(buf, disks, "testbucket", "testobject", 0, length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool) _, err = erasureReadFile(buf, disks, "testbucket", "testobject", 0, length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool)
if err != errXLReadQuorum { if errorCause(err) != errXLReadQuorum {
t.Fatal("expected errXLReadQuorum error") t.Fatal("expected errXLReadQuorum error")
} }
} }

View File

@ -76,17 +76,17 @@ func getDataBlockLen(enBlocks [][]byte, dataBlocks int) int {
func writeDataBlocks(dst io.Writer, enBlocks [][]byte, dataBlocks int, offset int64, length int64) (int64, error) { func writeDataBlocks(dst io.Writer, enBlocks [][]byte, dataBlocks int, offset int64, length int64) (int64, error) {
// Offset and out size cannot be negative. // Offset and out size cannot be negative.
if offset < 0 || length < 0 { if offset < 0 || length < 0 {
return 0, errUnexpected return 0, traceError(errUnexpected)
} }
// Do we have enough blocks? // Do we have enough blocks?
if len(enBlocks) < dataBlocks { if len(enBlocks) < dataBlocks {
return 0, reedsolomon.ErrTooFewShards return 0, traceError(reedsolomon.ErrTooFewShards)
} }
// Do we have enough data? // Do we have enough data?
if int64(getDataBlockLen(enBlocks, dataBlocks)) < length { if int64(getDataBlockLen(enBlocks, dataBlocks)) < length {
return 0, reedsolomon.ErrShortData return 0, traceError(reedsolomon.ErrShortData)
} }
// Counter to decrement total left to write. // Counter to decrement total left to write.
@ -114,7 +114,7 @@ func writeDataBlocks(dst io.Writer, enBlocks [][]byte, dataBlocks int, offset in
if write < int64(len(block)) { if write < int64(len(block)) {
n, err := io.Copy(dst, bytes.NewReader(block[:write])) n, err := io.Copy(dst, bytes.NewReader(block[:write]))
if err != nil { if err != nil {
return 0, err return 0, traceError(err)
} }
totalWritten += n totalWritten += n
break break
@ -122,7 +122,7 @@ func writeDataBlocks(dst io.Writer, enBlocks [][]byte, dataBlocks int, offset in
// Copy the block. // Copy the block.
n, err := io.Copy(dst, bytes.NewReader(block)) n, err := io.Copy(dst, bytes.NewReader(block))
if err != nil { if err != nil {
return 0, err return 0, traceError(err)
} }
// Decrement output size. // Decrement output size.

122
cmd/errors.go Normal file
View File

@ -0,0 +1,122 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
)
// Holds the current directory path. Used for trimming path in traceError()
var rootPath string
// Figure out the rootPath
func initError() {
// Root path is automatically determined from the calling function's source file location.
// Catch the calling function's source file path.
_, file, _, _ := runtime.Caller(1)
// Save the directory alone.
rootPath = filepath.Dir(file)
}
// Represents a stack frame in the stack trace.
type traceInfo struct {
file string // File where error occurred
line int // Line where error occurred
name string // Name of the function where error occurred
}
// Error - error type containing cause and the stack trace.
type Error struct {
e error // Holds the cause error
trace []traceInfo // stack trace
errs []error // Useful for XL to hold errors from all disks
}
// Implement error interface.
func (e Error) Error() string {
return e.e.Error()
}
// Trace - returns stack trace.
func (e Error) Trace() []string {
var traceArr []string
for _, info := range e.trace {
traceArr = append(traceArr, fmt.Sprintf("%s:%d:%s",
info.file, info.line, info.name))
}
return traceArr
}
// NewStorageError - return new Error type.
func traceError(e error, errs ...error) error {
if e == nil {
return nil
}
err := &Error{}
err.e = e
err.errs = errs
stack := make([]uintptr, 40)
length := runtime.Callers(2, stack)
if length > len(stack) {
length = len(stack)
}
stack = stack[:length]
for _, pc := range stack {
pc = pc - 1
fn := runtime.FuncForPC(pc)
file, line := fn.FileLine(pc)
name := fn.Name()
if strings.HasSuffix(name, "ServeHTTP") {
break
}
if strings.HasSuffix(name, "runtime.") {
break
}
file = strings.TrimPrefix(file, rootPath+string(os.PathSeparator))
name = strings.TrimPrefix(name, "github.com/minio/minio/cmd.")
err.trace = append(err.trace, traceInfo{file, line, name})
}
return err
}
// Returns the underlying cause error.
func errorCause(err error) error {
if e, ok := err.(*Error); ok {
err = e.e
}
return err
}
// Returns slice of underlying cause error.
func errorsCause(errs []error) []error {
Errs := make([]error, len(errs))
for i, err := range errs {
if err == nil {
continue
}
Errs[i] = errorCause(err)
}
return Errs
}

View File

@ -20,6 +20,7 @@ import (
"bytes" "bytes"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"net"
"net/url" "net/url"
"path" "path"
"sync" "sync"
@ -226,6 +227,7 @@ func loadNotificationConfig(bucket string, objAPI ObjectLayer) (*notificationCon
// Construct the notification config path. // Construct the notification config path.
notificationConfigPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig) notificationConfigPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig)
objInfo, err := objAPI.GetObjectInfo(minioMetaBucket, notificationConfigPath) objInfo, err := objAPI.GetObjectInfo(minioMetaBucket, notificationConfigPath)
err = errorCause(err)
if err != nil { if err != nil {
// 'notification.xml' not found return 'errNoSuchNotifications'. // 'notification.xml' not found return 'errNoSuchNotifications'.
// This is default when no bucket notifications are found on the bucket. // This is default when no bucket notifications are found on the bucket.
@ -233,11 +235,13 @@ func loadNotificationConfig(bucket string, objAPI ObjectLayer) (*notificationCon
case ObjectNotFound: case ObjectNotFound:
return nil, errNoSuchNotifications return nil, errNoSuchNotifications
} }
errorIf(err, "Unable to load bucket-notification for bucket %s", bucket)
// Returns error for other errors. // Returns error for other errors.
return nil, err return nil, err
} }
var buffer bytes.Buffer var buffer bytes.Buffer
err = objAPI.GetObject(minioMetaBucket, notificationConfigPath, 0, objInfo.Size, &buffer) err = objAPI.GetObject(minioMetaBucket, notificationConfigPath, 0, objInfo.Size, &buffer)
err = errorCause(err)
if err != nil { if err != nil {
// 'notification.xml' not found return 'errNoSuchNotifications'. // 'notification.xml' not found return 'errNoSuchNotifications'.
// This is default when no bucket notifications are found on the bucket. // This is default when no bucket notifications are found on the bucket.
@ -245,6 +249,7 @@ func loadNotificationConfig(bucket string, objAPI ObjectLayer) (*notificationCon
case ObjectNotFound: case ObjectNotFound:
return nil, errNoSuchNotifications return nil, errNoSuchNotifications
} }
errorIf(err, "Unable to load bucket-notification for bucket %s", bucket)
// Returns error for other errors. // Returns error for other errors.
return nil, err return nil, err
} }
@ -272,13 +277,12 @@ func loadAllBucketNotifications(objAPI ObjectLayer) (map[string]*notificationCon
// Loads all bucket notifications. // Loads all bucket notifications.
for _, bucket := range buckets { for _, bucket := range buckets {
var nCfg *notificationConfig nCfg, nErr := loadNotificationConfig(bucket.Name, objAPI)
nCfg, err = loadNotificationConfig(bucket.Name, objAPI) if nErr != nil {
if err != nil { if nErr == errNoSuchNotifications {
if err == errNoSuchNotifications {
continue continue
} }
return nil, err return nil, nErr
} }
configs[bucket.Name] = nCfg configs[bucket.Name] = nCfg
} }
@ -308,6 +312,14 @@ func loadAllQueueTargets() (map[string]*logrus.Logger, error) {
// Using accountID we can now initialize a new AMQP logrus instance. // Using accountID we can now initialize a new AMQP logrus instance.
amqpLog, err := newAMQPNotify(accountID) amqpLog, err := newAMQPNotify(accountID)
if err != nil { if err != nil {
// Encapsulate network error to be more informative.
if _, ok := err.(net.Error); ok {
return nil, &net.OpError{
Op: "Connecting to " + queueARN,
Net: "tcp",
Err: err,
}
}
return nil, err return nil, err
} }
queueTargets[queueARN] = amqpLog queueTargets[queueARN] = amqpLog
@ -327,6 +339,14 @@ func loadAllQueueTargets() (map[string]*logrus.Logger, error) {
// Using accountID we can now initialize a new Redis logrus instance. // Using accountID we can now initialize a new Redis logrus instance.
redisLog, err := newRedisNotify(accountID) redisLog, err := newRedisNotify(accountID)
if err != nil { if err != nil {
// Encapsulate network error to be more informative.
if _, ok := err.(net.Error); ok {
return nil, &net.OpError{
Op: "Connecting to " + queueARN,
Net: "tcp",
Err: err,
}
}
return nil, err return nil, err
} }
queueTargets[queueARN] = redisLog queueTargets[queueARN] = redisLog
@ -345,6 +365,13 @@ func loadAllQueueTargets() (map[string]*logrus.Logger, error) {
// Using accountID we can now initialize a new ElasticSearch logrus instance. // Using accountID we can now initialize a new ElasticSearch logrus instance.
elasticLog, err := newElasticNotify(accountID) elasticLog, err := newElasticNotify(accountID)
if err != nil { if err != nil {
// Encapsulate network error to be more informative.
if _, ok := err.(net.Error); ok {
return nil, &net.OpError{
Op: "Connecting to " + queueARN, Net: "tcp",
Err: err,
}
}
return nil, err return nil, err
} }
queueTargets[queueARN] = elasticLog queueTargets[queueARN] = elasticLog

View File

@ -86,16 +86,25 @@ func testEventNotify(obj ObjectLayer, instanceType string, t TestErrHandler) {
// Tests various forms of inititalization of event notifier. // Tests various forms of inititalization of event notifier.
func TestInitEventNotifier(t *testing.T) { func TestInitEventNotifier(t *testing.T) {
fs, disk, err := getSingleNodeObjectLayer() disk, err := getRandomDisks(1)
if err != nil {
t.Fatal("Unable to create directories for FS backend. ", err)
}
fs, err := getSingleNodeObjectLayer(disk[0])
if err != nil { if err != nil {
t.Fatal("Unable to initialize FS backend.", err) t.Fatal("Unable to initialize FS backend.", err)
} }
xl, disks, err := getXLObjectLayer() nDisks := 16
disks, err := getRandomDisks(nDisks)
if err != nil {
t.Fatal("Unable to create directories for XL backend. ", err)
}
xl, err := getXLObjectLayer(disks)
if err != nil { if err != nil {
t.Fatal("Unable to initialize XL backend.", err) t.Fatal("Unable to initialize XL backend.", err)
} }
disks = append(disks, disk) disks = append(disks, disk...)
for _, d := range disks { for _, d := range disks {
defer removeAll(d) defer removeAll(d)
} }

View File

@ -189,7 +189,7 @@ func loadAllFormats(bootstrapDisks []StorageAPI) ([]*formatConfigV1, []error) {
} }
} }
// Return all formats and nil // Return all formats and nil
return formatConfigs, nil return formatConfigs, sErrs
} }
// genericFormatCheck - validates and returns error. // genericFormatCheck - validates and returns error.
@ -524,6 +524,11 @@ func healFormatXLFreshDisks(storageDisks []StorageAPI) error {
} }
} }
// Initialize meta volume, if volume already exists ignores it.
if err := initMetaVolume(orderedDisks); err != nil {
return fmt.Errorf("Unable to initialize '.minio.sys' meta volume, %s", err)
}
// Save new `format.json` across all disks, in JBOD order. // Save new `format.json` across all disks, in JBOD order.
return saveFormatXL(orderedDisks, newFormatConfigs) return saveFormatXL(orderedDisks, newFormatConfigs)
} }
@ -870,6 +875,11 @@ func initFormatXL(storageDisks []StorageAPI) (err error) {
formats[index].XL.JBOD = jbod formats[index].XL.JBOD = jbod
} }
// Initialize meta volume, if volume already exists ignores it.
if err := initMetaVolume(storageDisks); err != nil {
return fmt.Errorf("Unable to initialize '.minio.sys' meta volume, %s", err)
}
// Save formats `format.json` across all disks. // Save formats `format.json` across all disks.
return saveFormatXL(storageDisks, formats) return saveFormatXL(storageDisks, formats)
} }

View File

@ -215,7 +215,6 @@ func genFormatXLInvalidDisksOrder() []*formatConfigV1 {
} }
func prepareFormatXLHealFreshDisks(obj ObjectLayer) ([]StorageAPI, error) { func prepareFormatXLHealFreshDisks(obj ObjectLayer) ([]StorageAPI, error) {
var err error var err error
xl := obj.(xlObjects) xl := obj.(xlObjects)
@ -263,8 +262,13 @@ func prepareFormatXLHealFreshDisks(obj ObjectLayer) ([]StorageAPI, error) {
} }
func TestFormatXLHealFreshDisks(t *testing.T) { func TestFormatXLHealFreshDisks(t *testing.T) {
nDisks := 16
fsDirs, err := getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
// Create an instance of xl backend. // Create an instance of xl backend.
obj, fsDirs, err := getXLObjectLayer() obj, err := getXLObjectLayer(fsDirs)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
@ -290,8 +294,13 @@ func TestFormatXLHealFreshDisks(t *testing.T) {
} }
func TestFormatXLHealFreshDisksErrorExpected(t *testing.T) { func TestFormatXLHealFreshDisksErrorExpected(t *testing.T) {
nDisks := 16
fsDirs, err := getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
// Create an instance of xl backend. // Create an instance of xl backend.
obj, fsDirs, err := getXLObjectLayer() obj, err := getXLObjectLayer(fsDirs)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
@ -326,7 +335,7 @@ func TestFormatXLHealFreshDisksErrorExpected(t *testing.T) {
// a given disk to test healing a corrupted disk // a given disk to test healing a corrupted disk
func TestFormatXLHealCorruptedDisks(t *testing.T) { func TestFormatXLHealCorruptedDisks(t *testing.T) {
// Create an instance of xl backend. // Create an instance of xl backend.
obj, fsDirs, err := getXLObjectLayer() obj, fsDirs, err := prepareXL()
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -398,7 +407,7 @@ func TestFormatXLHealCorruptedDisks(t *testing.T) {
// some of format.json // some of format.json
func TestFormatXLReorderByInspection(t *testing.T) { func TestFormatXLReorderByInspection(t *testing.T) {
// Create an instance of xl backend. // Create an instance of xl backend.
obj, fsDirs, err := getXLObjectLayer() obj, fsDirs, err := prepareXL()
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -569,8 +578,13 @@ func TestSavedUUIDOrder(t *testing.T) {
// Test initFormatXL() when disks are expected to return errors // Test initFormatXL() when disks are expected to return errors
func TestInitFormatXLErrors(t *testing.T) { func TestInitFormatXLErrors(t *testing.T) {
nDisks := 16
fsDirs, err := getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
// Create an instance of xl backend. // Create an instance of xl backend.
obj, fsDirs, err := getXLObjectLayer() obj, err := getXLObjectLayer(fsDirs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -659,8 +673,14 @@ func TestGenericFormatCheck(t *testing.T) {
} }
func TestLoadFormatXLErrs(t *testing.T) { func TestLoadFormatXLErrs(t *testing.T) {
nDisks := 16
fsDirs, err := getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
// Create an instance of xl backend. // Create an instance of xl backend.
obj, fsDirs, err := getXLObjectLayer() obj, err := getXLObjectLayer(fsDirs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -680,7 +700,12 @@ func TestLoadFormatXLErrs(t *testing.T) {
removeRoots(fsDirs) removeRoots(fsDirs)
obj, fsDirs, err = getXLObjectLayer() fsDirs, err = getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
obj, err = getXLObjectLayer(fsDirs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -700,7 +725,12 @@ func TestLoadFormatXLErrs(t *testing.T) {
removeRoots(fsDirs) removeRoots(fsDirs)
obj, fsDirs, err = getXLObjectLayer() fsDirs, err = getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
obj, err = getXLObjectLayer(fsDirs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -718,7 +748,12 @@ func TestLoadFormatXLErrs(t *testing.T) {
removeRoots(fsDirs) removeRoots(fsDirs)
obj, fsDirs, err = getXLObjectLayer() fsDirs, err = getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
obj, err = getXLObjectLayer(fsDirs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -737,8 +772,14 @@ func TestLoadFormatXLErrs(t *testing.T) {
// Tests for healFormatXLCorruptedDisks() with cases which lead to errors // Tests for healFormatXLCorruptedDisks() with cases which lead to errors
func TestHealFormatXLCorruptedDisksErrs(t *testing.T) { func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
nDisks := 16
fsDirs, err := getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
// Everything is fine, should return nil // Everything is fine, should return nil
obj, fsDirs, err := getXLObjectLayer() obj, err := getXLObjectLayer(fsDirs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -746,10 +787,16 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
if err = healFormatXLCorruptedDisks(xl.storageDisks); err != nil { if err = healFormatXLCorruptedDisks(xl.storageDisks); err != nil {
t.Fatal("Got an unexpected error: ", err) t.Fatal("Got an unexpected error: ", err)
} }
removeRoots(fsDirs) removeRoots(fsDirs)
fsDirs, err = getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
// Disks 0..15 are nil // Disks 0..15 are nil
obj, fsDirs, err = getXLObjectLayer() obj, err = getXLObjectLayer(fsDirs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -762,8 +809,13 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
} }
removeRoots(fsDirs) removeRoots(fsDirs)
fsDirs, err = getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
// One disk returns Faulty Disk // One disk returns Faulty Disk
obj, fsDirs, err = getXLObjectLayer() obj, err = getXLObjectLayer(fsDirs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -778,8 +830,13 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
} }
removeRoots(fsDirs) removeRoots(fsDirs)
fsDirs, err = getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
// One disk is not found, heal corrupted disks should return nil // One disk is not found, heal corrupted disks should return nil
obj, fsDirs, err = getXLObjectLayer() obj, err = getXLObjectLayer(fsDirs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -790,8 +847,13 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
} }
removeRoots(fsDirs) removeRoots(fsDirs)
fsDirs, err = getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
// Remove format.json of all disks // Remove format.json of all disks
obj, fsDirs, err = getXLObjectLayer() obj, err = getXLObjectLayer(fsDirs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -806,8 +868,13 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
} }
removeRoots(fsDirs) removeRoots(fsDirs)
fsDirs, err = getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
// Corrupted format json in one disk // Corrupted format json in one disk
obj, fsDirs, err = getXLObjectLayer() obj, err = getXLObjectLayer(fsDirs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -825,8 +892,14 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
// Tests for healFormatXLFreshDisks() with cases which lead to errors // Tests for healFormatXLFreshDisks() with cases which lead to errors
func TestHealFormatXLFreshDisksErrs(t *testing.T) { func TestHealFormatXLFreshDisksErrs(t *testing.T) {
nDisks := 16
fsDirs, err := getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
// Everything is fine, should return nil // Everything is fine, should return nil
obj, fsDirs, err := getXLObjectLayer() obj, err := getXLObjectLayer(fsDirs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -836,8 +909,13 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
} }
removeRoots(fsDirs) removeRoots(fsDirs)
fsDirs, err = getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
// Disks 0..15 are nil // Disks 0..15 are nil
obj, fsDirs, err = getXLObjectLayer() obj, err = getXLObjectLayer(fsDirs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -850,8 +928,13 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
} }
removeRoots(fsDirs) removeRoots(fsDirs)
fsDirs, err = getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
// One disk returns Faulty Disk // One disk returns Faulty Disk
obj, fsDirs, err = getXLObjectLayer() obj, err = getXLObjectLayer(fsDirs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -866,8 +949,13 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
} }
removeRoots(fsDirs) removeRoots(fsDirs)
fsDirs, err = getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
// One disk is not found, heal corrupted disks should return nil // One disk is not found, heal corrupted disks should return nil
obj, fsDirs, err = getXLObjectLayer() obj, err = getXLObjectLayer(fsDirs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -878,8 +966,13 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
} }
removeRoots(fsDirs) removeRoots(fsDirs)
fsDirs, err = getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
// Remove format.json of all disks // Remove format.json of all disks
obj, fsDirs, err = getXLObjectLayer() obj, err = getXLObjectLayer(fsDirs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -894,8 +987,13 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
} }
removeRoots(fsDirs) removeRoots(fsDirs)
fsDirs, err = getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
// Remove format.json of all disks // Remove format.json of all disks
obj, fsDirs, err = getXLObjectLayer() obj, err = getXLObjectLayer(fsDirs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -24,13 +24,13 @@ func fsCreateFile(disk StorageAPI, reader io.Reader, buf []byte, tmpBucket, temp
for { for {
n, rErr := reader.Read(buf) n, rErr := reader.Read(buf)
if rErr != nil && rErr != io.EOF { if rErr != nil && rErr != io.EOF {
return 0, rErr return 0, traceError(rErr)
} }
bytesWritten += int64(n) bytesWritten += int64(n)
if n > 0 { if n > 0 {
wErr := disk.AppendFile(tmpBucket, tempObj, buf[0:n]) wErr := disk.AppendFile(tmpBucket, tempObj, buf[0:n])
if wErr != nil { if wErr != nil {
return 0, wErr return 0, traceError(wErr)
} }
} }
if rErr == io.EOF { if rErr == io.EOF {

View File

@ -81,12 +81,12 @@ func readFSMetadata(disk StorageAPI, bucket, filePath string) (fsMeta fsMetaV1,
// Read all `fs.json`. // Read all `fs.json`.
buf, err := disk.ReadAll(bucket, filePath) buf, err := disk.ReadAll(bucket, filePath)
if err != nil { if err != nil {
return fsMetaV1{}, err return fsMetaV1{}, traceError(err)
} }
// Decode `fs.json` into fsMeta structure. // Decode `fs.json` into fsMeta structure.
if err = json.Unmarshal(buf, &fsMeta); err != nil { if err = json.Unmarshal(buf, &fsMeta); err != nil {
return fsMetaV1{}, err return fsMetaV1{}, traceError(err)
} }
// Success. // Success.
@ -94,16 +94,23 @@ func readFSMetadata(disk StorageAPI, bucket, filePath string) (fsMeta fsMetaV1,
} }
// Write fsMeta to fs.json or fs-append.json. // Write fsMeta to fs.json or fs-append.json.
func writeFSMetadata(disk StorageAPI, bucket, filePath string, fsMeta fsMetaV1) (err error) { func writeFSMetadata(disk StorageAPI, bucket, filePath string, fsMeta fsMetaV1) error {
tmpPath := path.Join(tmpMetaPrefix, getUUID()) tmpPath := path.Join(tmpMetaPrefix, getUUID())
metadataBytes, err := json.Marshal(fsMeta) metadataBytes, err := json.Marshal(fsMeta)
if err != nil { if err != nil {
return err return traceError(err)
} }
if err = disk.AppendFile(minioMetaBucket, tmpPath, metadataBytes); err != nil { if err = disk.AppendFile(minioMetaBucket, tmpPath, metadataBytes); err != nil {
return err return traceError(err)
} }
return disk.RenameFile(minioMetaBucket, tmpPath, bucket, filePath) err = disk.RenameFile(minioMetaBucket, tmpPath, bucket, filePath)
if err != nil {
err = disk.DeleteFile(minioMetaBucket, tmpPath)
if err != nil {
return traceError(err)
}
}
return nil
} }
// newFSMetaV1 - initializes new fsMetaV1. // newFSMetaV1 - initializes new fsMetaV1.

View File

@ -64,8 +64,8 @@ func (fs fsObjects) writeUploadJSON(bucket, object, uploadID string, initiated t
var uploadsJSON uploadsV1 var uploadsJSON uploadsV1
uploadsJSON, err = readUploadsJSON(bucket, object, fs.storage) uploadsJSON, err = readUploadsJSON(bucket, object, fs.storage)
if err != nil { if err != nil {
// For any other errors. // uploads.json might not exist hence ignore errFileNotFound.
if err != errFileNotFound { if errorCause(err) != errFileNotFound {
return err return err
} }
// Set uploads format to `fs`. // Set uploads format to `fs`.
@ -77,18 +77,18 @@ func (fs fsObjects) writeUploadJSON(bucket, object, uploadID string, initiated t
// Update `uploads.json` on all disks. // Update `uploads.json` on all disks.
uploadsJSONBytes, wErr := json.Marshal(&uploadsJSON) uploadsJSONBytes, wErr := json.Marshal(&uploadsJSON)
if wErr != nil { if wErr != nil {
return wErr return traceError(wErr)
} }
// Write `uploads.json` to disk. // Write `uploads.json` to disk.
if wErr = fs.storage.AppendFile(minioMetaBucket, tmpUploadsPath, uploadsJSONBytes); wErr != nil { if wErr = fs.storage.AppendFile(minioMetaBucket, tmpUploadsPath, uploadsJSONBytes); wErr != nil {
return wErr return traceError(wErr)
} }
wErr = fs.storage.RenameFile(minioMetaBucket, tmpUploadsPath, minioMetaBucket, uploadsPath) wErr = fs.storage.RenameFile(minioMetaBucket, tmpUploadsPath, minioMetaBucket, uploadsPath)
if wErr != nil { if wErr != nil {
if dErr := fs.storage.DeleteFile(minioMetaBucket, tmpUploadsPath); dErr != nil { if dErr := fs.storage.DeleteFile(minioMetaBucket, tmpUploadsPath); dErr != nil {
return dErr return traceError(dErr)
} }
return wErr return traceError(wErr)
} }
return nil return nil
} }
@ -100,13 +100,13 @@ func (fs fsObjects) updateUploadsJSON(bucket, object string, uploadsJSON uploads
tmpUploadsPath := path.Join(tmpMetaPrefix, uniqueID) tmpUploadsPath := path.Join(tmpMetaPrefix, uniqueID)
uploadsBytes, wErr := json.Marshal(uploadsJSON) uploadsBytes, wErr := json.Marshal(uploadsJSON)
if wErr != nil { if wErr != nil {
return wErr return traceError(wErr)
} }
if wErr = fs.storage.AppendFile(minioMetaBucket, tmpUploadsPath, uploadsBytes); wErr != nil { if wErr = fs.storage.AppendFile(minioMetaBucket, tmpUploadsPath, uploadsBytes); wErr != nil {
return wErr return traceError(wErr)
} }
if wErr = fs.storage.RenameFile(minioMetaBucket, tmpUploadsPath, minioMetaBucket, uploadsPath); wErr != nil { if wErr = fs.storage.RenameFile(minioMetaBucket, tmpUploadsPath, minioMetaBucket, uploadsPath); wErr != nil {
return wErr return traceError(wErr)
} }
return nil return nil
} }

View File

@ -58,9 +58,12 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
var err error var err error
var eof bool var eof bool
if uploadIDMarker != "" { if uploadIDMarker != "" {
nsMutex.RLock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, keyMarker)) // generates random string on setting MINIO_DEBUG=lock, else returns empty string.
// used for instrumentation on locks.
opsID := getOpsID()
nsMutex.RLock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, keyMarker), opsID)
uploads, _, err = listMultipartUploadIDs(bucket, keyMarker, uploadIDMarker, maxUploads, fs.storage) uploads, _, err = listMultipartUploadIDs(bucket, keyMarker, uploadIDMarker, maxUploads, fs.storage)
nsMutex.RUnlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, keyMarker)) nsMutex.RUnlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, keyMarker), opsID)
if err != nil { if err != nil {
return ListMultipartsInfo{}, err return ListMultipartsInfo{}, err
} }
@ -91,7 +94,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
eof = true eof = true
break break
} }
return ListMultipartsInfo{}, err return ListMultipartsInfo{}, walkResult.err
} }
entry := strings.TrimPrefix(walkResult.entry, retainSlash(pathJoin(mpartMetaPrefix, bucket))) entry := strings.TrimPrefix(walkResult.entry, retainSlash(pathJoin(mpartMetaPrefix, bucket)))
if strings.HasSuffix(walkResult.entry, slashSeparator) { if strings.HasSuffix(walkResult.entry, slashSeparator) {
@ -110,9 +113,14 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
var tmpUploads []uploadMetadata var tmpUploads []uploadMetadata
var end bool var end bool
uploadIDMarker = "" uploadIDMarker = ""
nsMutex.RLock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, entry))
// generates random string on setting MINIO_DEBUG=lock, else returns empty string.
// used for instrumentation on locks.
opsID := getOpsID()
nsMutex.RLock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, entry), opsID)
tmpUploads, end, err = listMultipartUploadIDs(bucket, entry, uploadIDMarker, maxUploads, fs.storage) tmpUploads, end, err = listMultipartUploadIDs(bucket, entry, uploadIDMarker, maxUploads, fs.storage)
nsMutex.RUnlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, entry)) nsMutex.RUnlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, entry), opsID)
if err != nil { if err != nil {
return ListMultipartsInfo{}, err return ListMultipartsInfo{}, err
} }
@ -168,42 +176,42 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
func (fs fsObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) { func (fs fsObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
// Validate input arguments. // Validate input arguments.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return ListMultipartsInfo{}, BucketNameInvalid{Bucket: bucket} return ListMultipartsInfo{}, traceError(BucketNameInvalid{Bucket: bucket})
} }
if !fs.isBucketExist(bucket) { if !fs.isBucketExist(bucket) {
return ListMultipartsInfo{}, BucketNotFound{Bucket: bucket} return ListMultipartsInfo{}, traceError(BucketNotFound{Bucket: bucket})
} }
if !IsValidObjectPrefix(prefix) { if !IsValidObjectPrefix(prefix) {
return ListMultipartsInfo{}, ObjectNameInvalid{Bucket: bucket, Object: prefix} return ListMultipartsInfo{}, traceError(ObjectNameInvalid{Bucket: bucket, Object: prefix})
} }
// Verify if delimiter is anything other than '/', which we do not support. // Verify if delimiter is anything other than '/', which we do not support.
if delimiter != "" && delimiter != slashSeparator { if delimiter != "" && delimiter != slashSeparator {
return ListMultipartsInfo{}, UnsupportedDelimiter{ return ListMultipartsInfo{}, traceError(UnsupportedDelimiter{
Delimiter: delimiter, Delimiter: delimiter,
} })
} }
// Verify if marker has prefix. // Verify if marker has prefix.
if keyMarker != "" && !strings.HasPrefix(keyMarker, prefix) { if keyMarker != "" && !strings.HasPrefix(keyMarker, prefix) {
return ListMultipartsInfo{}, InvalidMarkerPrefixCombination{ return ListMultipartsInfo{}, traceError(InvalidMarkerPrefixCombination{
Marker: keyMarker, Marker: keyMarker,
Prefix: prefix, Prefix: prefix,
} })
} }
if uploadIDMarker != "" { if uploadIDMarker != "" {
if strings.HasSuffix(keyMarker, slashSeparator) { if strings.HasSuffix(keyMarker, slashSeparator) {
return ListMultipartsInfo{}, InvalidUploadIDKeyCombination{ return ListMultipartsInfo{}, traceError(InvalidUploadIDKeyCombination{
UploadIDMarker: uploadIDMarker, UploadIDMarker: uploadIDMarker,
KeyMarker: keyMarker, KeyMarker: keyMarker,
} })
} }
id, err := uuid.Parse(uploadIDMarker) id, err := uuid.Parse(uploadIDMarker)
if err != nil { if err != nil {
return ListMultipartsInfo{}, err return ListMultipartsInfo{}, traceError(err)
} }
if id.IsZero() { if id.IsZero() {
return ListMultipartsInfo{}, MalformedUploadID{ return ListMultipartsInfo{}, traceError(MalformedUploadID{
UploadID: uploadIDMarker, UploadID: uploadIDMarker,
} })
} }
} }
return fs.listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) return fs.listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
@ -213,7 +221,7 @@ func (fs fsObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
// request, returns back a unique upload id. // request, returns back a unique upload id.
// //
// Internally this function creates 'uploads.json' associated for the // Internally this function creates 'uploads.json' associated for the
// incoming object at '.minio/multipart/bucket/object/uploads.json' on // incoming object at '.minio.sys/multipart/bucket/object/uploads.json' on
// all the disks. `uploads.json` carries metadata regarding on going // all the disks. `uploads.json` carries metadata regarding on going
// multipart operation on the object. // multipart operation on the object.
func (fs fsObjects) newMultipartUpload(bucket string, object string, meta map[string]string) (uploadID string, err error) { func (fs fsObjects) newMultipartUpload(bucket string, object string, meta map[string]string) (uploadID string, err error) {
@ -225,9 +233,13 @@ func (fs fsObjects) newMultipartUpload(bucket string, object string, meta map[st
fsMeta.Meta = meta fsMeta.Meta = meta
} }
// This lock needs to be held for any changes to the directory contents of ".minio/multipart/object/" // generates random string on setting MINIO_DEBUG=lock, else returns empty string.
nsMutex.Lock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object)) // used for instrumentation on locks.
defer nsMutex.Unlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object)) opsID := getOpsID()
// This lock needs to be held for any changes to the directory contents of ".minio.sys/multipart/object/"
nsMutex.Lock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object), opsID)
defer nsMutex.Unlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object), opsID)
uploadID = getUUID() uploadID = getUUID()
initiated := time.Now().UTC() initiated := time.Now().UTC()
@ -235,9 +247,9 @@ func (fs fsObjects) newMultipartUpload(bucket string, object string, meta map[st
if err = fs.writeUploadJSON(bucket, object, uploadID, initiated); err != nil { if err = fs.writeUploadJSON(bucket, object, uploadID, initiated); err != nil {
return "", err return "", err
} }
fsMetaPath := path.Join(mpartMetaPrefix, bucket, object, uploadID, fsMetaJSONFile) uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID)
if err = writeFSMetadata(fs.storage, minioMetaBucket, fsMetaPath, fsMeta); err != nil { if err = writeFSMetadata(fs.storage, minioMetaBucket, path.Join(uploadIDPath, fsMetaJSONFile), fsMeta); err != nil {
return "", toObjectErr(err, minioMetaBucket, fsMetaPath) return "", toObjectErr(err, minioMetaBucket, uploadIDPath)
} }
// Return success. // Return success.
return uploadID, nil return uploadID, nil
@ -251,15 +263,15 @@ func (fs fsObjects) newMultipartUpload(bucket string, object string, meta map[st
func (fs fsObjects) NewMultipartUpload(bucket, object string, meta map[string]string) (string, error) { func (fs fsObjects) NewMultipartUpload(bucket, object string, meta map[string]string) (string, error) {
// Verify if bucket name is valid. // Verify if bucket name is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return "", BucketNameInvalid{Bucket: bucket} return "", traceError(BucketNameInvalid{Bucket: bucket})
} }
// Verify whether the bucket exists. // Verify whether the bucket exists.
if !fs.isBucketExist(bucket) { if !fs.isBucketExist(bucket) {
return "", BucketNotFound{Bucket: bucket} return "", traceError(BucketNotFound{Bucket: bucket})
} }
// Verify if object name is valid. // Verify if object name is valid.
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return "", ObjectNameInvalid{Bucket: bucket, Object: object} return "", traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
} }
return fs.newMultipartUpload(bucket, object, meta) return fs.newMultipartUpload(bucket, object, meta)
} }
@ -290,7 +302,14 @@ func getFSAppendDataPath(uploadID string) string {
} }
// Append parts to fsAppendDataFile. // Append parts to fsAppendDataFile.
func appendParts(disk StorageAPI, bucket, object, uploadID string) { func appendParts(disk StorageAPI, bucket, object, uploadID, opsID string) {
cleanupAppendPaths := func() {
// In case of any error, cleanup the append data and json files
// from the tmp so that we do not have any inconsistent append
// data/json files.
disk.DeleteFile(bucket, getFSAppendDataPath(uploadID))
disk.DeleteFile(bucket, getFSAppendMetaPath(uploadID))
}
uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID) uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID)
// fs-append.json path // fs-append.json path
fsAppendMetaPath := getFSAppendMetaPath(uploadID) fsAppendMetaPath := getFSAppendMetaPath(uploadID)
@ -298,20 +317,21 @@ func appendParts(disk StorageAPI, bucket, object, uploadID string) {
fsMetaPath := path.Join(mpartMetaPrefix, bucket, object, uploadID, fsMetaJSONFile) fsMetaPath := path.Join(mpartMetaPrefix, bucket, object, uploadID, fsMetaJSONFile)
// Lock the uploadID so that no one modifies fs.json // Lock the uploadID so that no one modifies fs.json
nsMutex.RLock(minioMetaBucket, uploadIDPath) nsMutex.RLock(minioMetaBucket, uploadIDPath, opsID)
fsMeta, err := readFSMetadata(disk, minioMetaBucket, fsMetaPath) fsMeta, err := readFSMetadata(disk, minioMetaBucket, fsMetaPath)
nsMutex.RUnlock(minioMetaBucket, uploadIDPath) nsMutex.RUnlock(minioMetaBucket, uploadIDPath, opsID)
if err != nil { if err != nil {
return return
} }
// Lock fs-append.json so that there is no parallel append to the file. // Lock fs-append.json so that there is no parallel append to the file.
nsMutex.Lock(minioMetaBucket, fsAppendMetaPath) nsMutex.Lock(minioMetaBucket, fsAppendMetaPath, opsID)
defer nsMutex.Unlock(minioMetaBucket, fsAppendMetaPath) defer nsMutex.Unlock(minioMetaBucket, fsAppendMetaPath, opsID)
fsAppendMeta, err := readFSMetadata(disk, minioMetaBucket, fsAppendMetaPath) fsAppendMeta, err := readFSMetadata(disk, minioMetaBucket, fsAppendMetaPath)
if err != nil { if err != nil {
if err != errFileNotFound { if errorCause(err) != errFileNotFound {
cleanupAppendPaths()
return return
} }
fsAppendMeta = fsMeta fsAppendMeta = fsMeta
@ -324,28 +344,14 @@ func appendParts(disk StorageAPI, bucket, object, uploadID string) {
return return
} }
// Hold write lock on the part so that there is no parallel upload on the part. // Hold write lock on the part so that there is no parallel upload on the part.
nsMutex.Lock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object, uploadID, strconv.Itoa(part.Number))) partPath := pathJoin(mpartMetaPrefix, bucket, object, uploadID, strconv.Itoa(part.Number))
defer nsMutex.Unlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object, uploadID, strconv.Itoa(part.Number))) nsMutex.Lock(minioMetaBucket, partPath, opsID)
defer nsMutex.Unlock(minioMetaBucket, partPath, opsID)
// Proceed to append "part" // Proceed to append "part"
fsAppendDataPath := getFSAppendDataPath(uploadID) fsAppendDataPath := getFSAppendDataPath(uploadID)
tmpDataPath := path.Join(tmpMetaPrefix, getUUID())
if part.Number != 1 {
// Move it to tmp location before appending so that we don't leave inconsitent data
// if server crashes during append operation.
err = disk.RenameFile(minioMetaBucket, fsAppendDataPath, minioMetaBucket, tmpDataPath)
if err != nil {
return
}
// Delete fs-append.json so that we don't leave a stale file if server crashes
// when the part is being appended to the tmp file.
err = disk.DeleteFile(minioMetaBucket, fsAppendMetaPath)
if err != nil {
return
}
}
// Path to the part that needs to be appended. // Path to the part that needs to be appended.
partPath := path.Join(mpartMetaPrefix, bucket, object, uploadID, part.Name) partPath = path.Join(mpartMetaPrefix, bucket, object, uploadID, part.Name)
offset := int64(0) offset := int64(0)
totalLeft := part.Size totalLeft := part.Size
buf := make([]byte, readSizeV1) buf := make([]byte, readSizeV1)
@ -357,7 +363,8 @@ func appendParts(disk StorageAPI, bucket, object, uploadID string) {
var n int64 var n int64
n, err = disk.ReadFile(minioMetaBucket, partPath, offset, buf[:curLeft]) n, err = disk.ReadFile(minioMetaBucket, partPath, offset, buf[:curLeft])
if n > 0 { if n > 0 {
if err = disk.AppendFile(minioMetaBucket, tmpDataPath, buf[:n]); err != nil { if err = disk.AppendFile(minioMetaBucket, fsAppendDataPath, buf[:n]); err != nil {
cleanupAppendPaths()
return return
} }
} }
@ -365,51 +372,54 @@ func appendParts(disk StorageAPI, bucket, object, uploadID string) {
if err == io.EOF || err == io.ErrUnexpectedEOF { if err == io.EOF || err == io.ErrUnexpectedEOF {
break break
} }
cleanupAppendPaths()
return return
} }
offset += n offset += n
totalLeft -= n totalLeft -= n
} }
// All good, the part has been appended to the tmp file, rename it back.
if err = disk.RenameFile(minioMetaBucket, tmpDataPath, minioMetaBucket, fsAppendDataPath); err != nil {
return
}
fsAppendMeta.AddObjectPart(part.Number, part.Name, part.ETag, part.Size) fsAppendMeta.AddObjectPart(part.Number, part.Name, part.ETag, part.Size)
// Overwrite previous fs-append.json
if err = writeFSMetadata(disk, minioMetaBucket, fsAppendMetaPath, fsAppendMeta); err != nil { if err = writeFSMetadata(disk, minioMetaBucket, fsAppendMetaPath, fsAppendMeta); err != nil {
cleanupAppendPaths()
return return
} }
// If there are more parts that need to be appended to fsAppendDataFile // If there are more parts that need to be appended to fsAppendDataFile
_, appendNeeded = partToAppend(fsMeta, fsAppendMeta) _, appendNeeded = partToAppend(fsMeta, fsAppendMeta)
if appendNeeded { if appendNeeded {
go appendParts(disk, bucket, object, uploadID) go appendParts(disk, bucket, object, uploadID, opsID)
} }
} }
// PutObjectPart - reads incoming data until EOF for the part file on // PutObjectPart - reads incoming data until EOF for the part file on
// an ongoing multipart transaction. Internally incoming data is // an ongoing multipart transaction. Internally incoming data is
// written to '.minio/tmp' location and safely renamed to // written to '.minio.sys/tmp' location and safely renamed to
// '.minio/multipart' for reach parts. // '.minio.sys/multipart' for reach parts.
func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, error) { func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, error) {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return "", BucketNameInvalid{Bucket: bucket} return "", traceError(BucketNameInvalid{Bucket: bucket})
} }
// Verify whether the bucket exists. // Verify whether the bucket exists.
if !fs.isBucketExist(bucket) { if !fs.isBucketExist(bucket) {
return "", BucketNotFound{Bucket: bucket} return "", traceError(BucketNotFound{Bucket: bucket})
} }
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return "", ObjectNameInvalid{Bucket: bucket, Object: object} return "", traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
} }
uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID) uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID)
nsMutex.RLock(minioMetaBucket, uploadIDPath) // generates random string on setting MINIO_DEBUG=lock, else returns empty string.
// used for instrumentation on locks.
opsID := getOpsID()
nsMutex.RLock(minioMetaBucket, uploadIDPath, opsID)
// Just check if the uploadID exists to avoid copy if it doesn't. // Just check if the uploadID exists to avoid copy if it doesn't.
uploadIDExists := fs.isUploadIDExists(bucket, object, uploadID) uploadIDExists := fs.isUploadIDExists(bucket, object, uploadID)
nsMutex.RUnlock(minioMetaBucket, uploadIDPath) nsMutex.RUnlock(minioMetaBucket, uploadIDPath, opsID)
if !uploadIDExists { if !uploadIDExists {
return "", InvalidUploadID{UploadID: uploadID} return "", traceError(InvalidUploadID{UploadID: uploadID})
} }
partSuffix := fmt.Sprintf("object%d", partID) partSuffix := fmt.Sprintf("object%d", partID)
@ -443,7 +453,7 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
// bytes than specified in request header. // bytes than specified in request header.
if bytesWritten < size { if bytesWritten < size {
fs.storage.DeleteFile(minioMetaBucket, tmpPartPath) fs.storage.DeleteFile(minioMetaBucket, tmpPartPath)
return "", IncompleteBody{} return "", traceError(IncompleteBody{})
} }
// Validate if payload is valid. // Validate if payload is valid.
@ -452,7 +462,7 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
// Incoming payload wrong, delete the temporary object. // Incoming payload wrong, delete the temporary object.
fs.storage.DeleteFile(minioMetaBucket, tmpPartPath) fs.storage.DeleteFile(minioMetaBucket, tmpPartPath)
// Error return. // Error return.
return "", toObjectErr(err, bucket, object) return "", toObjectErr(traceError(err), bucket, object)
} }
} }
@ -462,17 +472,21 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
// MD5 mismatch, delete the temporary object. // MD5 mismatch, delete the temporary object.
fs.storage.DeleteFile(minioMetaBucket, tmpPartPath) fs.storage.DeleteFile(minioMetaBucket, tmpPartPath)
// Returns md5 mismatch. // Returns md5 mismatch.
return "", BadDigest{md5Hex, newMD5Hex} return "", traceError(BadDigest{md5Hex, newMD5Hex})
} }
} }
// generates random string on setting MINIO_DEBUG=lock, else returns empty string.
// used for instrumentation on locks.
opsID = getOpsID()
// Hold write lock as we are updating fs.json // Hold write lock as we are updating fs.json
nsMutex.Lock(minioMetaBucket, uploadIDPath) nsMutex.Lock(minioMetaBucket, uploadIDPath, opsID)
defer nsMutex.Unlock(minioMetaBucket, uploadIDPath) defer nsMutex.Unlock(minioMetaBucket, uploadIDPath, opsID)
// Just check if the uploadID exists to avoid copy if it doesn't. // Just check if the uploadID exists to avoid copy if it doesn't.
if !fs.isUploadIDExists(bucket, object, uploadID) { if !fs.isUploadIDExists(bucket, object, uploadID) {
return "", InvalidUploadID{UploadID: uploadID} return "", traceError(InvalidUploadID{UploadID: uploadID})
} }
fsMetaPath := pathJoin(uploadIDPath, fsMetaJSONFile) fsMetaPath := pathJoin(uploadIDPath, fsMetaJSONFile)
@ -486,21 +500,21 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
err = fs.storage.RenameFile(minioMetaBucket, tmpPartPath, minioMetaBucket, partPath) err = fs.storage.RenameFile(minioMetaBucket, tmpPartPath, minioMetaBucket, partPath)
if err != nil { if err != nil {
if dErr := fs.storage.DeleteFile(minioMetaBucket, tmpPartPath); dErr != nil { if dErr := fs.storage.DeleteFile(minioMetaBucket, tmpPartPath); dErr != nil {
return "", toObjectErr(dErr, minioMetaBucket, tmpPartPath) return "", toObjectErr(traceError(dErr), minioMetaBucket, tmpPartPath)
} }
return "", toObjectErr(err, minioMetaBucket, partPath) return "", toObjectErr(traceError(err), minioMetaBucket, partPath)
} }
uploadIDPath = path.Join(mpartMetaPrefix, bucket, object, uploadID)
if err = writeFSMetadata(fs.storage, minioMetaBucket, fsMetaPath, fsMeta); err != nil { if err = writeFSMetadata(fs.storage, minioMetaBucket, path.Join(uploadIDPath, fsMetaJSONFile), fsMeta); err != nil {
return "", toObjectErr(err, minioMetaBucket, fsMetaPath) return "", toObjectErr(err, minioMetaBucket, uploadIDPath)
} }
go appendParts(fs.storage, bucket, object, uploadID) go appendParts(fs.storage, bucket, object, uploadID, opsID)
return newMD5Hex, nil return newMD5Hex, nil
} }
// listObjectParts - wrapper scanning through // listObjectParts - wrapper scanning through
// '.minio/multipart/bucket/object/UPLOADID'. Lists all the parts // '.minio.sys/multipart/bucket/object/UPLOADID'. Lists all the parts
// saved inside '.minio/multipart/bucket/object/UPLOADID'. // saved inside '.minio.sys/multipart/bucket/object/UPLOADID'.
func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) { func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) {
result := ListPartsInfo{} result := ListPartsInfo{}
@ -521,7 +535,7 @@ func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberM
partNamePath := path.Join(mpartMetaPrefix, bucket, object, uploadID, part.Name) partNamePath := path.Join(mpartMetaPrefix, bucket, object, uploadID, part.Name)
fi, err = fs.storage.StatFile(minioMetaBucket, partNamePath) fi, err = fs.storage.StatFile(minioMetaBucket, partNamePath)
if err != nil { if err != nil {
return ListPartsInfo{}, toObjectErr(err, minioMetaBucket, partNamePath) return ListPartsInfo{}, toObjectErr(traceError(err), minioMetaBucket, partNamePath)
} }
result.Parts = append(result.Parts, partInfo{ result.Parts = append(result.Parts, partInfo{
PartNumber: part.Number, PartNumber: part.Number,
@ -559,21 +573,26 @@ func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberM
func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) { func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return ListPartsInfo{}, BucketNameInvalid{Bucket: bucket} return ListPartsInfo{}, traceError(BucketNameInvalid{Bucket: bucket})
} }
// Verify whether the bucket exists. // Verify whether the bucket exists.
if !fs.isBucketExist(bucket) { if !fs.isBucketExist(bucket) {
return ListPartsInfo{}, BucketNotFound{Bucket: bucket} return ListPartsInfo{}, traceError(BucketNotFound{Bucket: bucket})
} }
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return ListPartsInfo{}, ObjectNameInvalid{Bucket: bucket, Object: object} return ListPartsInfo{}, traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
} }
// generates random string on setting MINIO_DEBUG=lock, else returns empty string.
// used for instrumentation on locks.
opsID := getOpsID()
// Hold lock so that there is no competing abort-multipart-upload or complete-multipart-upload. // Hold lock so that there is no competing abort-multipart-upload or complete-multipart-upload.
nsMutex.Lock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object, uploadID)) nsMutex.Lock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object, uploadID), opsID)
defer nsMutex.Unlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object, uploadID)) defer nsMutex.Unlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object, uploadID), opsID)
if !fs.isUploadIDExists(bucket, object, uploadID) { if !fs.isUploadIDExists(bucket, object, uploadID) {
return ListPartsInfo{}, InvalidUploadID{UploadID: uploadID} return ListPartsInfo{}, traceError(InvalidUploadID{UploadID: uploadID})
} }
return fs.listObjectParts(bucket, object, uploadID, partNumberMarker, maxParts) return fs.listObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
} }
@ -587,55 +606,59 @@ func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberM
func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (string, error) { func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (string, error) {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return "", BucketNameInvalid{Bucket: bucket} return "", traceError(BucketNameInvalid{Bucket: bucket})
} }
// Verify whether the bucket exists. // Verify whether the bucket exists.
if !fs.isBucketExist(bucket) { if !fs.isBucketExist(bucket) {
return "", BucketNotFound{Bucket: bucket} return "", traceError(BucketNotFound{Bucket: bucket})
} }
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return "", ObjectNameInvalid{ return "", traceError(ObjectNameInvalid{
Bucket: bucket, Bucket: bucket,
Object: object, Object: object,
} })
} }
uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID) uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID)
// generates random string on setting MINIO_DEBUG=lock, else returns empty string.
// used for instrumentation on locks.
opsID := getOpsID()
// Hold lock so that // Hold lock so that
// 1) no one aborts this multipart upload // 1) no one aborts this multipart upload
// 2) no one does a parallel complete-multipart-upload on this // 2) no one does a parallel complete-multipart-upload on this
// multipart upload // multipart upload
nsMutex.Lock(minioMetaBucket, uploadIDPath) nsMutex.Lock(minioMetaBucket, uploadIDPath, opsID)
defer nsMutex.Unlock(minioMetaBucket, uploadIDPath) defer nsMutex.Unlock(minioMetaBucket, uploadIDPath, opsID)
if !fs.isUploadIDExists(bucket, object, uploadID) { if !fs.isUploadIDExists(bucket, object, uploadID) {
return "", InvalidUploadID{UploadID: uploadID} return "", traceError(InvalidUploadID{UploadID: uploadID})
} }
// fs-append.json path // fs-append.json path
fsAppendMetaPath := getFSAppendMetaPath(uploadID) fsAppendMetaPath := getFSAppendMetaPath(uploadID)
// Lock fs-append.json so that no parallel appendParts() is being done. // Lock fs-append.json so that no parallel appendParts() is being done.
nsMutex.Lock(minioMetaBucket, fsAppendMetaPath) nsMutex.Lock(minioMetaBucket, fsAppendMetaPath, opsID)
defer nsMutex.Unlock(minioMetaBucket, fsAppendMetaPath) defer nsMutex.Unlock(minioMetaBucket, fsAppendMetaPath, opsID)
// Calculate s3 compatible md5sum for complete multipart. // Calculate s3 compatible md5sum for complete multipart.
s3MD5, err := completeMultipartMD5(parts...) s3MD5, err := completeMultipartMD5(parts...)
if err != nil { if err != nil {
return "", err return "", traceError(err)
} }
// Read saved fs metadata for ongoing multipart. // Read saved fs metadata for ongoing multipart.
fsMetaPath := pathJoin(uploadIDPath, fsMetaJSONFile) fsMetaPath := pathJoin(uploadIDPath, fsMetaJSONFile)
fsMeta, err := readFSMetadata(fs.storage, minioMetaBucket, fsMetaPath) fsMeta, err := readFSMetadata(fs.storage, minioMetaBucket, fsMetaPath)
if err != nil { if err != nil {
return "", toObjectErr(err, minioMetaBucket, fsMetaPath) return "", toObjectErr(traceError(err), minioMetaBucket, fsMetaPath)
} }
fsAppendMeta, err := readFSMetadata(fs.storage, minioMetaBucket, fsAppendMetaPath) fsAppendMeta, err := readFSMetadata(fs.storage, minioMetaBucket, fsAppendMetaPath)
if err == nil && isPartsSame(fsAppendMeta.Parts, parts) { if err == nil && isPartsSame(fsAppendMeta.Parts, parts) {
fsAppendDataPath := getFSAppendDataPath(uploadID) fsAppendDataPath := getFSAppendDataPath(uploadID)
if err = fs.storage.RenameFile(minioMetaBucket, fsAppendDataPath, bucket, object); err != nil { if err = fs.storage.RenameFile(minioMetaBucket, fsAppendDataPath, bucket, object); err != nil {
return "", toObjectErr(err, minioMetaBucket, fsAppendDataPath) return "", toObjectErr(traceError(err), minioMetaBucket, fsAppendDataPath)
} }
// Remove the append-file metadata file in tmp location as we no longer need it. // Remove the append-file metadata file in tmp location as we no longer need it.
fs.storage.DeleteFile(minioMetaBucket, fsAppendMetaPath) fs.storage.DeleteFile(minioMetaBucket, fsAppendMetaPath)
@ -649,18 +672,18 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
for i, part := range parts { for i, part := range parts {
partIdx := fsMeta.ObjectPartIndex(part.PartNumber) partIdx := fsMeta.ObjectPartIndex(part.PartNumber)
if partIdx == -1 { if partIdx == -1 {
return "", InvalidPart{} return "", traceError(InvalidPart{})
} }
if fsMeta.Parts[partIdx].ETag != part.ETag { if fsMeta.Parts[partIdx].ETag != part.ETag {
return "", BadDigest{} return "", traceError(BadDigest{})
} }
// All parts except the last part has to be atleast 5MB. // All parts except the last part has to be atleast 5MB.
if (i < len(parts)-1) && !isMinAllowedPartSize(fsMeta.Parts[partIdx].Size) { if (i < len(parts)-1) && !isMinAllowedPartSize(fsMeta.Parts[partIdx].Size) {
return "", PartTooSmall{ return "", traceError(PartTooSmall{
PartNumber: part.PartNumber, PartNumber: part.PartNumber,
PartSize: fsMeta.Parts[partIdx].Size, PartSize: fsMeta.Parts[partIdx].Size,
PartETag: part.ETag, PartETag: part.ETag,
} })
} }
// Construct part suffix. // Construct part suffix.
partSuffix := fmt.Sprintf("object%d", part.PartNumber) partSuffix := fmt.Sprintf("object%d", part.PartNumber)
@ -676,7 +699,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
n, err = fs.storage.ReadFile(minioMetaBucket, multipartPartFile, offset, buf[:curLeft]) n, err = fs.storage.ReadFile(minioMetaBucket, multipartPartFile, offset, buf[:curLeft])
if n > 0 { if n > 0 {
if err = fs.storage.AppendFile(minioMetaBucket, tempObj, buf[:n]); err != nil { if err = fs.storage.AppendFile(minioMetaBucket, tempObj, buf[:n]); err != nil {
return "", toObjectErr(err, minioMetaBucket, tempObj) return "", toObjectErr(traceError(err), minioMetaBucket, tempObj)
} }
} }
if err != nil { if err != nil {
@ -684,9 +707,9 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
break break
} }
if err == errFileNotFound { if err == errFileNotFound {
return "", InvalidPart{} return "", traceError(InvalidPart{})
} }
return "", toObjectErr(err, minioMetaBucket, multipartPartFile) return "", toObjectErr(traceError(err), minioMetaBucket, multipartPartFile)
} }
offset += n offset += n
totalLeft -= n totalLeft -= n
@ -697,9 +720,9 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
err = fs.storage.RenameFile(minioMetaBucket, tempObj, bucket, object) err = fs.storage.RenameFile(minioMetaBucket, tempObj, bucket, object)
if err != nil { if err != nil {
if dErr := fs.storage.DeleteFile(minioMetaBucket, tempObj); dErr != nil { if dErr := fs.storage.DeleteFile(minioMetaBucket, tempObj); dErr != nil {
return "", toObjectErr(dErr, minioMetaBucket, tempObj) return "", toObjectErr(traceError(dErr), minioMetaBucket, tempObj)
} }
return "", toObjectErr(err, bucket, object) return "", toObjectErr(traceError(err), bucket, object)
} }
} }
@ -713,7 +736,8 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
} }
fsMeta.Meta["md5Sum"] = s3MD5 fsMeta.Meta["md5Sum"] = s3MD5
fsMetaPath = path.Join(bucketMetaPrefix, bucket, object, fsMetaJSONFile) fsMetaPath := path.Join(bucketMetaPrefix, bucket, object, fsMetaJSONFile)
// Write the metadata to a temp file and rename it to the actual location.
if err = writeFSMetadata(fs.storage, minioMetaBucket, fsMetaPath, fsMeta); err != nil { if err = writeFSMetadata(fs.storage, minioMetaBucket, fsMetaPath, fsMeta); err != nil {
return "", toObjectErr(err, bucket, object) return "", toObjectErr(err, bucket, object)
} }
@ -721,19 +745,23 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
// Cleanup all the parts if everything else has been safely committed. // Cleanup all the parts if everything else has been safely committed.
if err = cleanupUploadedParts(bucket, object, uploadID, fs.storage); err != nil { if err = cleanupUploadedParts(bucket, object, uploadID, fs.storage); err != nil {
return "", toObjectErr(err, bucket, object) return "", toObjectErr(traceError(err), bucket, object)
} }
// generates random string on setting MINIO_DEBUG=lock, else returns empty string.
// used for instrumentation on locks.
opsID = getOpsID()
// Hold the lock so that two parallel complete-multipart-uploads do not // Hold the lock so that two parallel complete-multipart-uploads do not
// leave a stale uploads.json behind. // leave a stale uploads.json behind.
nsMutex.Lock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object)) nsMutex.Lock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object), opsID)
defer nsMutex.Unlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object)) defer nsMutex.Unlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object), opsID)
// Validate if there are other incomplete upload-id's present for // Validate if there are other incomplete upload-id's present for
// the object, if yes do not attempt to delete 'uploads.json'. // the object, if yes do not attempt to delete 'uploads.json'.
uploadsJSON, err := readUploadsJSON(bucket, object, fs.storage) uploadsJSON, err := readUploadsJSON(bucket, object, fs.storage)
if err != nil { if err != nil {
return "", toObjectErr(err, minioMetaBucket, object) return "", toObjectErr(traceError(err), minioMetaBucket, object)
} }
// If we have successfully read `uploads.json`, then we proceed to // If we have successfully read `uploads.json`, then we proceed to
// purge or update `uploads.json`. // purge or update `uploads.json`.
@ -743,14 +771,14 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
} }
if len(uploadsJSON.Uploads) > 0 { if len(uploadsJSON.Uploads) > 0 {
if err = fs.updateUploadsJSON(bucket, object, uploadsJSON); err != nil { if err = fs.updateUploadsJSON(bucket, object, uploadsJSON); err != nil {
return "", toObjectErr(err, minioMetaBucket, path.Join(mpartMetaPrefix, bucket, object)) return "", toObjectErr(traceError(err), minioMetaBucket, path.Join(mpartMetaPrefix, bucket, object))
} }
// Return success. // Return success.
return s3MD5, nil return s3MD5, nil
} }
if err = fs.storage.DeleteFile(minioMetaBucket, path.Join(mpartMetaPrefix, bucket, object, uploadsJSONFile)); err != nil { if err = fs.storage.DeleteFile(minioMetaBucket, path.Join(mpartMetaPrefix, bucket, object, uploadsJSONFile)); err != nil {
return "", toObjectErr(err, minioMetaBucket, path.Join(mpartMetaPrefix, bucket, object)) return "", toObjectErr(traceError(err), minioMetaBucket, path.Join(mpartMetaPrefix, bucket, object))
} }
// Return md5sum. // Return md5sum.
@ -759,7 +787,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
// abortMultipartUpload - wrapper for purging an ongoing multipart // abortMultipartUpload - wrapper for purging an ongoing multipart
// transaction, deletes uploadID entry from `uploads.json` and purges // transaction, deletes uploadID entry from `uploads.json` and purges
// the directory at '.minio/multipart/bucket/object/uploadID' holding // the directory at '.minio.sys/multipart/bucket/object/uploadID' holding
// all the upload parts. // all the upload parts.
func (fs fsObjects) abortMultipartUpload(bucket, object, uploadID string) error { func (fs fsObjects) abortMultipartUpload(bucket, object, uploadID string) error {
// Cleanup all uploaded parts. // Cleanup all uploaded parts.
@ -785,9 +813,9 @@ func (fs fsObjects) abortMultipartUpload(bucket, object, uploadID string) error
return nil return nil
} }
} // No more pending uploads for the object, we purge the entire } // No more pending uploads for the object, we purge the entire
// entry at '.minio/multipart/bucket/object'. // entry at '.minio.sys/multipart/bucket/object'.
if err = fs.storage.DeleteFile(minioMetaBucket, path.Join(mpartMetaPrefix, bucket, object, uploadsJSONFile)); err != nil { if err = fs.storage.DeleteFile(minioMetaBucket, path.Join(mpartMetaPrefix, bucket, object, uploadsJSONFile)); err != nil {
return toObjectErr(err, minioMetaBucket, path.Join(mpartMetaPrefix, bucket, object)) return toObjectErr(traceError(err), minioMetaBucket, path.Join(mpartMetaPrefix, bucket, object))
} }
return nil return nil
} }
@ -807,27 +835,31 @@ func (fs fsObjects) abortMultipartUpload(bucket, object, uploadID string) error
func (fs fsObjects) AbortMultipartUpload(bucket, object, uploadID string) error { func (fs fsObjects) AbortMultipartUpload(bucket, object, uploadID string) error {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return BucketNameInvalid{Bucket: bucket} return traceError(BucketNameInvalid{Bucket: bucket})
} }
if !fs.isBucketExist(bucket) { if !fs.isBucketExist(bucket) {
return BucketNotFound{Bucket: bucket} return traceError(BucketNotFound{Bucket: bucket})
} }
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return ObjectNameInvalid{Bucket: bucket, Object: object} return traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
} }
// generates random string on setting MINIO_DEBUG=lock, else returns empty string.
// used for instrumentation on locks.
opsID := getOpsID()
// Hold lock so that there is no competing complete-multipart-upload or put-object-part. // Hold lock so that there is no competing complete-multipart-upload or put-object-part.
nsMutex.Lock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object, uploadID)) nsMutex.Lock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object, uploadID), opsID)
defer nsMutex.Unlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object, uploadID)) defer nsMutex.Unlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object, uploadID), opsID)
if !fs.isUploadIDExists(bucket, object, uploadID) { if !fs.isUploadIDExists(bucket, object, uploadID) {
return InvalidUploadID{UploadID: uploadID} return traceError(InvalidUploadID{UploadID: uploadID})
} }
fsAppendMetaPath := getFSAppendMetaPath(uploadID) fsAppendMetaPath := getFSAppendMetaPath(uploadID)
// Lock fs-append.json so that no parallel appendParts() is being done. // Lock fs-append.json so that no parallel appendParts() is being done.
nsMutex.Lock(minioMetaBucket, fsAppendMetaPath) nsMutex.Lock(minioMetaBucket, fsAppendMetaPath, opsID)
defer nsMutex.Unlock(minioMetaBucket, fsAppendMetaPath) defer nsMutex.Unlock(minioMetaBucket, fsAppendMetaPath, opsID)
err := fs.abortMultipartUpload(bucket, object, uploadID) err := fs.abortMultipartUpload(bucket, object, uploadID)
return err return err

View File

@ -26,7 +26,6 @@ import (
"sort" "sort"
"strings" "strings"
"github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/mimedb" "github.com/minio/minio/pkg/mimedb"
) )
@ -68,7 +67,7 @@ func newFSObjects(disk string) (ObjectLayer, error) {
return nil, err return nil, err
} }
// Attempt to create `.minio`. // Attempt to create `.minio.sys`.
err = storage.MakeVol(minioMetaBucket) err = storage.MakeVol(minioMetaBucket)
if err != nil { if err != nil {
switch err { switch err {
@ -146,8 +145,8 @@ func (fs fsObjects) Shutdown() error {
// StorageInfo - returns underlying storage statistics. // StorageInfo - returns underlying storage statistics.
func (fs fsObjects) StorageInfo() StorageInfo { func (fs fsObjects) StorageInfo() StorageInfo {
info, err := disk.GetInfo(fs.physicalDisk) info, err := fs.storage.DiskInfo()
fatalIf(err, "Unable to get disk info "+fs.physicalDisk) errorIf(err, "Unable to get disk info %#v", fs.storage)
return StorageInfo{ return StorageInfo{
Total: info.Total, Total: info.Total,
Free: info.Free, Free: info.Free,
@ -160,10 +159,10 @@ func (fs fsObjects) StorageInfo() StorageInfo {
func (fs fsObjects) MakeBucket(bucket string) error { func (fs fsObjects) MakeBucket(bucket string) error {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return BucketNameInvalid{Bucket: bucket} return traceError(BucketNameInvalid{Bucket: bucket})
} }
if err := fs.storage.MakeVol(bucket); err != nil { if err := fs.storage.MakeVol(bucket); err != nil {
return toObjectErr(err, bucket) return toObjectErr(traceError(err), bucket)
} }
return nil return nil
} }
@ -172,11 +171,11 @@ func (fs fsObjects) MakeBucket(bucket string) error {
func (fs fsObjects) GetBucketInfo(bucket string) (BucketInfo, error) { func (fs fsObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return BucketInfo{}, BucketNameInvalid{Bucket: bucket} return BucketInfo{}, traceError(BucketNameInvalid{Bucket: bucket})
} }
vi, err := fs.storage.StatVol(bucket) vi, err := fs.storage.StatVol(bucket)
if err != nil { if err != nil {
return BucketInfo{}, toObjectErr(err, bucket) return BucketInfo{}, toObjectErr(traceError(err), bucket)
} }
return BucketInfo{ return BucketInfo{
Name: bucket, Name: bucket,
@ -189,7 +188,7 @@ func (fs fsObjects) ListBuckets() ([]BucketInfo, error) {
var bucketInfos []BucketInfo var bucketInfos []BucketInfo
vols, err := fs.storage.ListVols() vols, err := fs.storage.ListVols()
if err != nil { if err != nil {
return nil, toObjectErr(err) return nil, toObjectErr(traceError(err))
} }
for _, vol := range vols { for _, vol := range vols {
// StorageAPI can send volume names which are incompatible // StorageAPI can send volume names which are incompatible
@ -214,11 +213,11 @@ func (fs fsObjects) ListBuckets() ([]BucketInfo, error) {
func (fs fsObjects) DeleteBucket(bucket string) error { func (fs fsObjects) DeleteBucket(bucket string) error {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return BucketNameInvalid{Bucket: bucket} return traceError(BucketNameInvalid{Bucket: bucket})
} }
// Attempt to delete regular bucket. // Attempt to delete regular bucket.
if err := fs.storage.DeleteVol(bucket); err != nil { if err := fs.storage.DeleteVol(bucket); err != nil {
return toObjectErr(err, bucket) return toObjectErr(traceError(err), bucket)
} }
// Cleanup all the previously incomplete multiparts. // Cleanup all the previously incomplete multiparts.
if err := cleanupDir(fs.storage, path.Join(minioMetaBucket, mpartMetaPrefix), bucket); err != nil && err != errVolumeNotFound { if err := cleanupDir(fs.storage, path.Join(minioMetaBucket, mpartMetaPrefix), bucket); err != nil && err != errVolumeNotFound {
@ -233,34 +232,34 @@ func (fs fsObjects) DeleteBucket(bucket string) error {
func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64, writer io.Writer) (err error) { func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64, writer io.Writer) (err error) {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return BucketNameInvalid{Bucket: bucket} return traceError(BucketNameInvalid{Bucket: bucket})
} }
// Verify if object is valid. // Verify if object is valid.
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return ObjectNameInvalid{Bucket: bucket, Object: object} return traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
} }
// Offset and length cannot be negative. // Offset and length cannot be negative.
if offset < 0 || length < 0 { if offset < 0 || length < 0 {
return toObjectErr(errUnexpected, bucket, object) return toObjectErr(traceError(errUnexpected), bucket, object)
} }
// Writer cannot be nil. // Writer cannot be nil.
if writer == nil { if writer == nil {
return toObjectErr(errUnexpected, bucket, object) return toObjectErr(traceError(errUnexpected), bucket, object)
} }
// Stat the file to get file size. // Stat the file to get file size.
fi, err := fs.storage.StatFile(bucket, object) fi, err := fs.storage.StatFile(bucket, object)
if err != nil { if err != nil {
return toObjectErr(err, bucket, object) return toObjectErr(traceError(err), bucket, object)
} }
// Reply back invalid range if the input offset and length fall out of range. // Reply back invalid range if the input offset and length fall out of range.
if offset > fi.Size || length > fi.Size { if offset > fi.Size || length > fi.Size {
return InvalidRange{offset, length, fi.Size} return traceError(InvalidRange{offset, length, fi.Size})
} }
// Reply if we have inputs with offset and length falling out of file size range. // Reply if we have inputs with offset and length falling out of file size range.
if offset+length > fi.Size { if offset+length > fi.Size {
return InvalidRange{offset, length, fi.Size} return traceError(InvalidRange{offset, length, fi.Size})
} }
var totalLeft = length var totalLeft = length
@ -289,11 +288,11 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64,
offset += int64(nw) offset += int64(nw)
} }
if ew != nil { if ew != nil {
err = ew err = traceError(ew)
break break
} }
if nr != int64(nw) { if nr != int64(nw) {
err = io.ErrShortWrite err = traceError(io.ErrShortWrite)
break break
} }
} }
@ -301,7 +300,7 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64,
break break
} }
if er != nil { if er != nil {
err = er err = traceError(er)
break break
} }
if totalLeft == 0 { if totalLeft == 0 {
@ -312,22 +311,15 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64,
return toObjectErr(err, bucket, object) return toObjectErr(err, bucket, object)
} }
// GetObjectInfo - get object info. // getObjectInfo - get object info.
func (fs fsObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) { func (fs fsObjects) getObjectInfo(bucket, object string) (ObjectInfo, error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return ObjectInfo{}, (BucketNameInvalid{Bucket: bucket})
}
// Verify if object is valid.
if !IsValidObjectName(object) {
return ObjectInfo{}, (ObjectNameInvalid{Bucket: bucket, Object: object})
}
fi, err := fs.storage.StatFile(bucket, object) fi, err := fs.storage.StatFile(bucket, object)
if err != nil { if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
} }
fsMeta, err := readFSMetadata(fs.storage, minioMetaBucket, path.Join(bucketMetaPrefix, bucket, object, fsMetaJSONFile)) fsMeta, err := readFSMetadata(fs.storage, minioMetaBucket, path.Join(bucketMetaPrefix, bucket, object, fsMetaJSONFile))
if err != nil && err != errFileNotFound { // Ignore error if the metadata file is not found, other errors must be returned.
if err != nil && errorCause(err) != errFileNotFound {
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
@ -358,17 +350,30 @@ func (fs fsObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) {
}, nil }, nil
} }
// PutObject - create an object. // GetObjectInfo - get object info.
func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (string, error) { func (fs fsObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return "", BucketNameInvalid{Bucket: bucket} return ObjectInfo{}, traceError(BucketNameInvalid{Bucket: bucket})
}
// Verify if object is valid.
if !IsValidObjectName(object) {
return ObjectInfo{}, traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
}
return fs.getObjectInfo(bucket, object)
}
// PutObject - create an object.
func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return ObjectInfo{}, traceError(BucketNameInvalid{Bucket: bucket})
} }
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return "", ObjectNameInvalid{ return ObjectInfo{}, traceError(ObjectNameInvalid{
Bucket: bucket, Bucket: bucket,
Object: object, Object: object,
} })
} }
// No metadata is set, allocate a new one. // No metadata is set, allocate a new one.
if metadata == nil { if metadata == nil {
@ -397,9 +402,9 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
if size == 0 { if size == 0 {
// For size 0 we write a 0byte file. // For size 0 we write a 0byte file.
err := fs.storage.AppendFile(minioMetaBucket, tempObj, []byte("")) err = fs.storage.AppendFile(minioMetaBucket, tempObj, []byte(""))
if err != nil { if err != nil {
return "", toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
} }
} else { } else {
// Allocate a buffer to Read() from request body // Allocate a buffer to Read() from request body
@ -409,17 +414,18 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
} }
buf := make([]byte, int(bufSize)) buf := make([]byte, int(bufSize))
teeReader := io.TeeReader(limitDataReader, md5Writer) teeReader := io.TeeReader(limitDataReader, md5Writer)
bytesWritten, err := fsCreateFile(fs.storage, teeReader, buf, minioMetaBucket, tempObj) var bytesWritten int64
bytesWritten, err = fsCreateFile(fs.storage, teeReader, buf, minioMetaBucket, tempObj)
if err != nil { if err != nil {
fs.storage.DeleteFile(minioMetaBucket, tempObj) fs.storage.DeleteFile(minioMetaBucket, tempObj)
return "", toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
} }
// Should return IncompleteBody{} error when reader has fewer // Should return IncompleteBody{} error when reader has fewer
// bytes than specified in request header. // bytes than specified in request header.
if bytesWritten < size { if bytesWritten < size {
fs.storage.DeleteFile(minioMetaBucket, tempObj) fs.storage.DeleteFile(minioMetaBucket, tempObj)
return "", IncompleteBody{} return ObjectInfo{}, traceError(IncompleteBody{})
} }
} }
@ -435,7 +441,7 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
// Incoming payload wrong, delete the temporary object. // Incoming payload wrong, delete the temporary object.
fs.storage.DeleteFile(minioMetaBucket, tempObj) fs.storage.DeleteFile(minioMetaBucket, tempObj)
// Error return. // Error return.
return "", toObjectErr(vErr, bucket, object) return ObjectInfo{}, toObjectErr(traceError(vErr), bucket, object)
} }
} }
@ -446,14 +452,14 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
// MD5 mismatch, delete the temporary object. // MD5 mismatch, delete the temporary object.
fs.storage.DeleteFile(minioMetaBucket, tempObj) fs.storage.DeleteFile(minioMetaBucket, tempObj)
// Returns md5 mismatch. // Returns md5 mismatch.
return "", BadDigest{md5Hex, newMD5Hex} return ObjectInfo{}, traceError(BadDigest{md5Hex, newMD5Hex})
} }
} }
// Entire object was written to the temp location, now it's safe to rename it to the actual location. // Entire object was written to the temp location, now it's safe to rename it to the actual location.
err := fs.storage.RenameFile(minioMetaBucket, tempObj, bucket, object) err = fs.storage.RenameFile(minioMetaBucket, tempObj, bucket, object)
if err != nil { if err != nil {
return "", toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
} }
// Save additional metadata only if extended headers such as "X-Amz-Meta-" are set. // Save additional metadata only if extended headers such as "X-Amz-Meta-" are set.
@ -464,12 +470,15 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
fsMetaPath := path.Join(bucketMetaPrefix, bucket, object, fsMetaJSONFile) fsMetaPath := path.Join(bucketMetaPrefix, bucket, object, fsMetaJSONFile)
if err = writeFSMetadata(fs.storage, minioMetaBucket, fsMetaPath, fsMeta); err != nil { if err = writeFSMetadata(fs.storage, minioMetaBucket, fsMetaPath, fsMeta); err != nil {
return "", toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
} }
} }
objInfo, err = fs.getObjectInfo(bucket, object)
// Return md5sum, successfully wrote object. if err == nil {
return newMD5Hex, nil // If MINIO_ENABLE_FSMETA is not enabled objInfo.MD5Sum will be empty.
objInfo.MD5Sum = newMD5Hex
}
return objInfo, err
} }
// DeleteObject - deletes an object from a bucket, this operation is destructive // DeleteObject - deletes an object from a bucket, this operation is destructive
@ -477,17 +486,17 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
func (fs fsObjects) DeleteObject(bucket, object string) error { func (fs fsObjects) DeleteObject(bucket, object string) error {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return BucketNameInvalid{Bucket: bucket} return traceError(BucketNameInvalid{Bucket: bucket})
} }
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return ObjectNameInvalid{Bucket: bucket, Object: object} return traceError(ObjectNameInvalid{Bucket: bucket, Object: object})
} }
err := fs.storage.DeleteFile(minioMetaBucket, path.Join(bucketMetaPrefix, bucket, object, fsMetaJSONFile)) err := fs.storage.DeleteFile(minioMetaBucket, path.Join(bucketMetaPrefix, bucket, object, fsMetaJSONFile))
if err != nil && err != errFileNotFound { if err != nil && err != errFileNotFound {
return toObjectErr(err, bucket, object) return toObjectErr(traceError(err), bucket, object)
} }
if err = fs.storage.DeleteFile(bucket, object); err != nil { if err = fs.storage.DeleteFile(bucket, object); err != nil {
return toObjectErr(err, bucket, object) return toObjectErr(traceError(err), bucket, object)
} }
return nil return nil
} }
@ -518,11 +527,11 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
return return
} }
if fileInfo, err = fs.storage.StatFile(bucket, entry); err != nil { if fileInfo, err = fs.storage.StatFile(bucket, entry); err != nil {
return return FileInfo{}, traceError(err)
} }
fsMeta, mErr := readFSMetadata(fs.storage, minioMetaBucket, path.Join(bucketMetaPrefix, bucket, entry, fsMetaJSONFile)) fsMeta, mErr := readFSMetadata(fs.storage, minioMetaBucket, path.Join(bucketMetaPrefix, bucket, entry, fsMetaJSONFile))
if mErr != nil && mErr != errFileNotFound { if mErr != nil && errorCause(mErr) != errFileNotFound {
return FileInfo{}, mErr return FileInfo{}, traceError(mErr)
} }
if len(fsMeta.Meta) == 0 { if len(fsMeta.Meta) == 0 {
fsMeta.Meta = make(map[string]string) fsMeta.Meta = make(map[string]string)
@ -535,28 +544,28 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return ListObjectsInfo{}, BucketNameInvalid{Bucket: bucket} return ListObjectsInfo{}, traceError(BucketNameInvalid{Bucket: bucket})
} }
// Verify if bucket exists. // Verify if bucket exists.
if !isBucketExist(fs.storage, bucket) { if !isBucketExist(fs.storage, bucket) {
return ListObjectsInfo{}, BucketNotFound{Bucket: bucket} return ListObjectsInfo{}, traceError(BucketNotFound{Bucket: bucket})
} }
if !IsValidObjectPrefix(prefix) { if !IsValidObjectPrefix(prefix) {
return ListObjectsInfo{}, ObjectNameInvalid{Bucket: bucket, Object: prefix} return ListObjectsInfo{}, traceError(ObjectNameInvalid{Bucket: bucket, Object: prefix})
} }
// Verify if delimiter is anything other than '/', which we do not support. // Verify if delimiter is anything other than '/', which we do not support.
if delimiter != "" && delimiter != slashSeparator { if delimiter != "" && delimiter != slashSeparator {
return ListObjectsInfo{}, UnsupportedDelimiter{ return ListObjectsInfo{}, traceError(UnsupportedDelimiter{
Delimiter: delimiter, Delimiter: delimiter,
} })
} }
// Verify if marker has prefix. // Verify if marker has prefix.
if marker != "" { if marker != "" {
if !strings.HasPrefix(marker, prefix) { if !strings.HasPrefix(marker, prefix) {
return ListObjectsInfo{}, InvalidMarkerPrefixCombination{ return ListObjectsInfo{}, traceError(InvalidMarkerPrefixCombination{
Marker: marker, Marker: marker,
Prefix: prefix, Prefix: prefix,
} })
} }
} }
@ -611,7 +620,7 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
// For any walk error return right away. // For any walk error return right away.
if walkResult.err != nil { if walkResult.err != nil {
// File not found is a valid case. // File not found is a valid case.
if walkResult.err == errFileNotFound { if errorCause(walkResult.err) == errFileNotFound {
return ListObjectsInfo{}, nil return ListObjectsInfo{}, nil
} }
return ListObjectsInfo{}, toObjectErr(walkResult.err, bucket, prefix) return ListObjectsInfo{}, toObjectErr(walkResult.err, bucket, prefix)
@ -653,10 +662,15 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
// HealObject - no-op for fs. Valid only for XL. // HealObject - no-op for fs. Valid only for XL.
func (fs fsObjects) HealObject(bucket, object string) error { func (fs fsObjects) HealObject(bucket, object string) error {
return NotImplemented{} return traceError(NotImplemented{})
} }
// HealListObjects - list objects for healing. Valid only for XL // HealListObjects - list objects for healing. Valid only for XL
func (fs fsObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { func (fs fsObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
return ListObjectsInfo{}, NotImplemented{} return ListObjectsInfo{}, traceError(NotImplemented{})
}
// HealDiskMetadata -- heal disk metadata, not supported in FS
func (fs fsObjects) HealDiskMetadata() error {
return NotImplemented{}
} }

View File

@ -41,7 +41,11 @@ func TestNewFS(t *testing.T) {
} }
// Initializes all disks with XL // Initializes all disks with XL
_, err := newXLObjects(disks, nil) err := formatDisks(disks, nil)
if err != nil {
t.Fatalf("Unable to format XL %s", err)
}
_, err = newXLObjects(disks, nil)
if err != nil { if err != nil {
t.Fatalf("Unable to initialize XL object, %s", err) t.Fatalf("Unable to initialize XL object, %s", err)
} }
@ -89,7 +93,7 @@ func TestFSShutdown(t *testing.T) {
for i := 1; i <= 5; i++ { for i := 1; i <= 5; i++ {
naughty := newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil) naughty := newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil)
fs.storage = naughty fs.storage = naughty
if err := fs.Shutdown(); err != errFaultyDisk { if err := fs.Shutdown(); errorCause(err) != errFaultyDisk {
t.Fatal(i, ", Got unexpected fs shutdown error: ", err) t.Fatal(i, ", Got unexpected fs shutdown error: ", err)
} }
} }

View File

@ -21,6 +21,7 @@ import (
"github.com/fatih/color" "github.com/fatih/color"
"github.com/minio/minio/pkg/objcache" "github.com/minio/minio/pkg/objcache"
"os"
) )
// Global constants for Minio. // Global constants for Minio.
@ -42,6 +43,10 @@ const (
var ( var (
globalQuiet = false // Quiet flag set via command line globalQuiet = false // Quiet flag set via command line
globalTrace = false // Trace flag set via environment setting. globalTrace = false // Trace flag set via environment setting.
globalDebug = false // Debug flag set to print debug info.
globalDebugLock = false // Lock debug info set via environment variable MINIO_DEBUG=lock .
globalDebugMemory = false // Memory debug info set via environment variable MINIO_DEBUG=mem
// Add new global flags here. // Add new global flags here.
// Maximum connections handled per // Maximum connections handled per
@ -70,3 +75,15 @@ var (
colorBlue = color.New(color.FgBlue).SprintfFunc() colorBlue = color.New(color.FgBlue).SprintfFunc()
colorBold = color.New(color.Bold).SprintFunc() colorBold = color.New(color.Bold).SprintFunc()
) )
// fetch from environment variables and set the global values related to locks.
func setGlobalsDebugFromEnv() {
debugEnv := os.Getenv("MINIO_DEBUG")
switch debugEnv {
case "lock":
globalDebugLock = true
case "mem":
globalDebugMemory = true
}
globalDebug = globalDebugLock || globalDebugMemory
}

283
cmd/lock-instrument.go Normal file
View File

@ -0,0 +1,283 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"time"
)
const (
debugRLockStr = "RLock"
debugWLockStr = "WLock"
)
// struct containing information of status (ready/running/blocked) of an operation with given operation ID.
type debugLockInfo struct {
lockType string // "Rlock" or "WLock".
lockOrigin string // contains the trace of the function which invoked the lock, obtained from runtime.
status string // status can be running/ready/blocked.
since time.Time // time info of the since how long the status holds true.
}
// debugLockInfo - container for storing locking information for unique copy (volume,path) pair.
// ref variable holds the reference count for locks held for.
// `ref` values helps us understand the n locks held for given <volume, path> pair.
// `running` value helps us understand the total successful locks held (not blocked) for given <volume, path> pair and the operation is under execution.
// `blocked` value helps us understand the total number of operations blocked waiting on locks for given <volume,path> pair.
type debugLockInfoPerVolumePath struct {
ref int64 // running + blocked operations.
running int64 // count of successful lock acquire and running operations.
blocked int64 // count of number of operations blocked waiting on lock.
lockInfo (map[string]debugLockInfo) // map of [operationID] debugLockInfo{operation, status, since} .
}
// returns an instance of debugLockInfo.
// need to create this for every unique pair of {volume,path}.
// total locks, number of calls blocked on locks, and number of successful locks held but not unlocked yet.
func newDebugLockInfoPerVolumePath() *debugLockInfoPerVolumePath {
return &debugLockInfoPerVolumePath{
lockInfo: make(map[string]debugLockInfo),
ref: 0,
blocked: 0,
running: 0,
}
}
// LockInfoNil - Returned if the lock info map is not initialized.
type LockInfoNil struct {
}
func (l LockInfoNil) Error() string {
return fmt.Sprintf("Debug Lock Map not initialized:\n1. Enable Lock Debugging using right ENV settings \n2. Make sure initNSLock() is called.")
}
// LockInfoOriginNotFound - While changing the state of the lock info its important that the entry for
// lock at a given origin exists, if not `LockInfoOriginNotFound` is returned.
type LockInfoOriginNotFound struct {
volume string
path string
operationID string
lockOrigin string
}
func (l LockInfoOriginNotFound) Error() string {
return fmt.Sprintf("No lock state stored for the lock origined at \"%s\", for <volume> %s, <path> %s, <operationID> %s.",
l.lockOrigin, l.volume, l.path, l.operationID)
}
// LockInfoVolPathMssing - Error interface. Returned when the info the
type LockInfoVolPathMssing struct {
volume string
path string
}
func (l LockInfoVolPathMssing) Error() string {
return fmt.Sprintf("No entry in debug Lock Map for Volume: %s, path: %s.", l.volume, l.path)
}
// LockInfoOpsIDNotFound - Returned when the lock state info exists, but the entry for
// given operation ID doesn't exist.
type LockInfoOpsIDNotFound struct {
volume string
path string
operationID string
}
func (l LockInfoOpsIDNotFound) Error() string {
return fmt.Sprintf("No entry in lock info for <Operation ID> %s, <volume> %s, <path> %s.", l.operationID, l.volume, l.path)
}
// LockInfoStateNotBlocked - When an attempt to change the state of the lock form `blocked` to `running` is done,
// its necessary that the state before the transsition is "blocked", otherwise LockInfoStateNotBlocked returned.
type LockInfoStateNotBlocked struct {
volume string
path string
operationID string
}
func (l LockInfoStateNotBlocked) Error() string {
return fmt.Sprintf("Lock state should be \"Blocked\" for <volume> %s, <path> %s, <operationID> %s.", l.volume, l.path, l.operationID)
}
// change the state of the lock from Blocked to Running.
func (n *nsLockMap) statusBlockedToRunning(param nsParam, lockOrigin, operationID string, readLock bool) error {
// This operation is not executed under the scope nsLockMap.mutex.Lock(), lock has to be explicitly held here.
n.lockMapMutex.Lock()
defer n.lockMapMutex.Unlock()
if n.debugLockMap == nil {
return LockInfoNil{}
}
// new state info to be set for the lock.
newLockInfo := debugLockInfo{
lockOrigin: lockOrigin,
status: "Running",
since: time.Now().UTC(),
}
// set lock type.
if readLock {
newLockInfo.lockType = debugRLockStr
} else {
newLockInfo.lockType = debugWLockStr
}
// check whether the lock info entry for <volume, path> pair already exists and its not `nil`.
if debugLockMap, ok := n.debugLockMap[param]; ok {
// ``*debugLockInfoPerVolumePath` entry containing lock info for `param <volume, path>` is `nil`.
if debugLockMap == nil {
return LockInfoNil{}
}
} else {
// The lock state info foe given <volume, path> pair should already exist.
// If not return `LockInfoVolPathMssing`.
return LockInfoVolPathMssing{param.volume, param.path}
}
// Lock info the for the given operation ID shouldn't be `nil`.
if n.debugLockMap[param].lockInfo == nil {
return LockInfoOpsIDNotFound{param.volume, param.path, operationID}
}
if lockInfo, ok := n.debugLockMap[param].lockInfo[operationID]; ok {
// The entry for the lock origined at `lockOrigin` should already exist.
// If not return `LockInfoOriginNotFound`.
if lockInfo.lockOrigin != lockOrigin {
return LockInfoOriginNotFound{param.volume, param.path, operationID, lockOrigin}
}
// Status of the lock should already be set to "Blocked".
// If not return `LockInfoStateNotBlocked`.
if lockInfo.status != "Blocked" {
return LockInfoStateNotBlocked{param.volume, param.path, operationID}
}
} else {
// The lock info entry for given `opsID` should already exist for given <volume, path> pair.
// If not return `LockInfoOpsIDNotFound`.
return LockInfoOpsIDNotFound{param.volume, param.path, operationID}
}
// All checks finished.
// changing the status of the operation from blocked to running and updating the time.
n.debugLockMap[param].lockInfo[operationID] = newLockInfo
// After locking unblocks decrease the blocked counter.
n.blockedCounter--
// Increase the running counter.
n.runningLockCounter++
n.debugLockMap[param].blocked--
n.debugLockMap[param].running++
return nil
}
// change the state of the lock from Ready to Blocked.
func (n *nsLockMap) statusNoneToBlocked(param nsParam, lockOrigin, operationID string, readLock bool) error {
if n.debugLockMap == nil {
return LockInfoNil{}
}
newLockInfo := debugLockInfo{
lockOrigin: lockOrigin,
status: "Blocked",
since: time.Now().UTC(),
}
if readLock {
newLockInfo.lockType = debugRLockStr
} else {
newLockInfo.lockType = debugWLockStr
}
if lockInfo, ok := n.debugLockMap[param]; ok {
if lockInfo == nil {
// *debugLockInfoPerVolumePath entry is nil, initialize here to avoid any case of `nil` pointer access.
n.initLockInfoForVolumePath(param)
}
} else {
// State info entry for the given <volume, pair> doesn't exist, initializing it.
n.initLockInfoForVolumePath(param)
}
// lockInfo is a map[string]debugLockInfo, which holds map[OperationID]{status,time, origin} of the lock.
if n.debugLockMap[param].lockInfo == nil {
n.debugLockMap[param].lockInfo = make(map[string]debugLockInfo)
}
// The status of the operation with the given operation ID is marked blocked till its gets unblocked from the lock.
n.debugLockMap[param].lockInfo[operationID] = newLockInfo
// Increment the Global lock counter.
n.globalLockCounter++
// Increment the counter for number of blocked opertions, decrement it after the locking unblocks.
n.blockedCounter++
// increment the reference of the lock for the given <volume,path> pair.
n.debugLockMap[param].ref++
// increment the blocked counter for the given <volume, path> pair.
n.debugLockMap[param].blocked++
return nil
}
// deleteLockInfoEntry - Deletes the lock state information for given <volume, path> pair. Called when nsLk.ref count is 0.
func (n *nsLockMap) deleteLockInfoEntryForVolumePath(param nsParam) error {
if n.debugLockMap == nil {
return LockInfoNil{}
}
// delete the lock info for the given operation.
if _, found := n.debugLockMap[param]; found {
// Remove from the map if there are no more references for the given (volume,path) pair.
delete(n.debugLockMap, param)
} else {
return LockInfoVolPathMssing{param.volume, param.path}
}
return nil
}
// deleteLockInfoEntry - Deletes the entry for given opsID in the lock state information of given <volume, path> pair.
// called when the nsLk ref count for the given <volume, path> pair is not 0.
func (n *nsLockMap) deleteLockInfoEntryForOps(param nsParam, operationID string) error {
if n.debugLockMap == nil {
return LockInfoNil{}
}
// delete the lock info for the given operation.
if infoMap, found := n.debugLockMap[param]; found {
// the opertion finished holding the lock on the resource, remove the entry for the given operation with the operation ID.
if _, foundInfo := infoMap.lockInfo[operationID]; foundInfo {
// decrease the global running and lock reference counter.
n.runningLockCounter--
n.globalLockCounter--
// decrease the lock referee counter for the lock info for given <volume,path> pair.
// decrease the running operation number. Its assumed that the operation is over once an attempt to release the lock is made.
infoMap.running--
// decrease the total reference count of locks jeld on <volume,path> pair.
infoMap.ref--
delete(infoMap.lockInfo, operationID)
} else {
// Unlock request with invalid opertion ID not accepted.
return LockInfoOpsIDNotFound{param.volume, param.path, operationID}
}
} else {
return LockInfoVolPathMssing{param.volume, param.path}
}
return nil
}
// return randomly generated string ID if lock debug is enabled,
// else returns empty string
func getOpsID() (opsID string) {
// check if lock debug is enabled.
if globalDebugLock {
// generated random ID.
opsID = string(generateRequestID())
}
return opsID
}

744
cmd/lock-instrument_test.go Normal file
View File

@ -0,0 +1,744 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"testing"
"time"
)
type lockStateCase struct {
volume string
path string
lockOrigin string
opsID string
readLock bool // lock type.
setBlocked bool // initialize the initial state to blocked.
expectedErr error
// expected global lock stats.
expectedLockStatus string // Status of the lock Blocked/Running.
expectedGlobalLockCount int // Total number of locks held across the system, includes blocked + held locks.
expectedBlockedLockCount int // Total blocked lock across the system.
expectedRunningLockCount int // Total successfully held locks (non-blocking).
// expected lock statu for given <volume, path> pair.
expectedVolPathLockCount int // Total locks held for given <volume,path> pair, includes blocked locks.
expectedVolPathRunningCount int // Total succcesfully held locks for given <volume, path> pair.
expectedVolPathBlockCount int // Total locks blocked on the given <volume, path> pair.
}
// Used for validating the Lock info obtaining from contol RPC end point for obtaining lock related info.
func verifyRPCLockInfoResponse(l lockStateCase, rpcLockInfoResponse SystemLockState, t TestErrHandler, testNum int) {
// Assert the total number of locks (locked + acquired) in the system.
if rpcLockInfoResponse.TotalLocks != int64(l.expectedGlobalLockCount) {
t.Fatalf("Test %d: Expected the global lock counter to be %v, but got %v", testNum, int64(l.expectedGlobalLockCount),
rpcLockInfoResponse.TotalLocks)
}
// verify the count for total blocked locks.
if rpcLockInfoResponse.TotalBlockedLocks != int64(l.expectedBlockedLockCount) {
t.Fatalf("Test %d: Expected the total blocked lock counter to be %v, but got %v", testNum, int64(l.expectedBlockedLockCount),
rpcLockInfoResponse.TotalBlockedLocks)
}
// verify the count for total running locks.
if rpcLockInfoResponse.TotalAcquiredLocks != int64(l.expectedRunningLockCount) {
t.Fatalf("Test %d: Expected the total running lock counter to be %v, but got %v", testNum, int64(l.expectedRunningLockCount),
rpcLockInfoResponse.TotalAcquiredLocks)
}
for _, locksInfoPerObject := range rpcLockInfoResponse.LocksInfoPerObject {
// See whether the entry for the <bucket, object> exists in the RPC response.
if locksInfoPerObject.Bucket == l.volume && locksInfoPerObject.Object == l.path {
// Assert the total number of locks (blocked + acquired) for the given <buckt, object> pair.
if locksInfoPerObject.LocksOnObject != int64(l.expectedVolPathLockCount) {
t.Errorf("Test %d: Expected the total lock count for bucket: \"%s\", object: \"%s\" to be %v, but got %v", testNum,
l.volume, l.path, int64(l.expectedVolPathLockCount), locksInfoPerObject.LocksOnObject)
}
// Assert the total number of acquired locks for the given <buckt, object> pair.
if locksInfoPerObject.LocksAcquiredOnObject != int64(l.expectedVolPathRunningCount) {
t.Errorf("Test %d: Expected the acquired lock count for bucket: \"%s\", object: \"%s\" to be %v, but got %v", testNum,
l.volume, l.path, int64(l.expectedVolPathRunningCount), locksInfoPerObject.LocksAcquiredOnObject)
}
// Assert the total number of blocked locks for the given <buckt, object> pair.
if locksInfoPerObject.TotalBlockedLocks != int64(l.expectedVolPathBlockCount) {
t.Errorf("Test %d: Expected the blocked lock count for bucket: \"%s\", object: \"%s\" to be %v, but got %v", testNum,
l.volume, l.path, int64(l.expectedVolPathBlockCount), locksInfoPerObject.TotalBlockedLocks)
}
// Flag to mark whether there's an entry in the RPC lock info response for given opsID.
var opsIDfound bool
for _, opsLockState := range locksInfoPerObject.LockDetailsOnObject {
// first check whether the entry for the given operation ID exists.
if opsLockState.OperationID == l.opsID {
opsIDfound = true
// asserting the type of lock (RLock/WLock) from the RPC lock info response.
if l.readLock {
if opsLockState.LockType != debugRLockStr {
t.Errorf("Test case %d: Expected the lock type to be \"%s\"", testNum, debugRLockStr)
}
} else {
if opsLockState.LockType != debugWLockStr {
t.Errorf("Test case %d: Expected the lock type to be \"%s\"", testNum, debugWLockStr)
}
}
if opsLockState.Status != l.expectedLockStatus {
t.Errorf("Test case %d: Expected the status of the operation to be \"%s\", got \"%s\"", testNum, l.expectedLockStatus, opsLockState.Status)
}
// if opsLockState.LockOrigin != l.lockOrigin {
// t.Fatalf("Test case %d: Expected the origin of the lock to be \"%s\", got \"%s\"", testNum, opsLockState.LockOrigin, l.lockOrigin)
// }
// all check satisfied, return here.
// Any mismatch in the earlier checks would have ended the tests due to `Fatalf`,
// control reaching here implies that all checks are satisfied.
return
}
}
// opsID not found.
// No entry for an operation with given operation ID exists.
if !opsIDfound {
t.Fatalf("Test case %d: Entry for OpsId: \"%s\" not found in <bucket>: \"%s\", <path>: \"%s\" doesn't exist in the RPC response", testNum, l.opsID, l.volume, l.path)
}
}
}
// No entry exists for given <bucket, object> pair in the RPC response.
t.Errorf("Test case %d: Entry for <bucket>: \"%s\", <object>: \"%s\" doesn't exist in the RPC response", testNum, l.volume, l.path)
}
// Asserts the lock counter from the global nsMutex inmemory lock with the expected one.
func verifyGlobalLockStats(l lockStateCase, t *testing.T, testNum int) {
nsMutex.lockMapMutex.Lock()
// Verifying the lock stats.
if nsMutex.globalLockCounter != int64(l.expectedGlobalLockCount) {
t.Errorf("Test %d: Expected the global lock counter to be %v, but got %v", testNum, int64(l.expectedGlobalLockCount),
nsMutex.globalLockCounter)
}
// verify the count for total blocked locks.
if nsMutex.blockedCounter != int64(l.expectedBlockedLockCount) {
t.Errorf("Test %d: Expected the total blocked lock counter to be %v, but got %v", testNum, int64(l.expectedBlockedLockCount),
nsMutex.blockedCounter)
}
// verify the count for total running locks.
if nsMutex.runningLockCounter != int64(l.expectedRunningLockCount) {
t.Errorf("Test %d: Expected the total running lock counter to be %v, but got %v", testNum, int64(l.expectedRunningLockCount),
nsMutex.runningLockCounter)
}
nsMutex.lockMapMutex.Unlock()
// Verifying again with the JSON response of the lock info.
// Verifying the lock stats.
sysLockState, err := generateSystemLockResponse()
if err != nil {
t.Fatalf("Obtaining lock info failed with <ERROR> %s", err)
}
if sysLockState.TotalLocks != int64(l.expectedGlobalLockCount) {
t.Errorf("Test %d: Expected the global lock counter to be %v, but got %v", testNum, int64(l.expectedGlobalLockCount),
sysLockState.TotalLocks)
}
// verify the count for total blocked locks.
if sysLockState.TotalBlockedLocks != int64(l.expectedBlockedLockCount) {
t.Errorf("Test %d: Expected the total blocked lock counter to be %v, but got %v", testNum, int64(l.expectedBlockedLockCount),
sysLockState.TotalBlockedLocks)
}
// verify the count for total running locks.
if sysLockState.TotalAcquiredLocks != int64(l.expectedRunningLockCount) {
t.Errorf("Test %d: Expected the total running lock counter to be %v, but got %v", testNum, int64(l.expectedRunningLockCount),
sysLockState.TotalAcquiredLocks)
}
}
// Verify the lock counter for entries of given <volume, path> pair.
func verifyLockStats(l lockStateCase, t *testing.T, testNum int) {
nsMutex.lockMapMutex.Lock()
defer nsMutex.lockMapMutex.Unlock()
param := nsParam{l.volume, l.path}
// Verify the total locks (blocked+running) for given <vol,path> pair.
if nsMutex.debugLockMap[param].ref != int64(l.expectedVolPathLockCount) {
t.Errorf("Test %d: Expected the total lock count for volume: \"%s\", path: \"%s\" to be %v, but got %v", testNum,
param.volume, param.path, int64(l.expectedVolPathLockCount), nsMutex.debugLockMap[param].ref)
}
// Verify the total running locks for given <volume, path> pair.
if nsMutex.debugLockMap[param].running != int64(l.expectedVolPathRunningCount) {
t.Errorf("Test %d: Expected the total running locks for volume: \"%s\", path: \"%s\" to be %v, but got %v", testNum, param.volume, param.path,
int64(l.expectedVolPathRunningCount), nsMutex.debugLockMap[param].running)
}
// Verify the total blocked locks for givne <volume, path> pair.
if nsMutex.debugLockMap[param].blocked != int64(l.expectedVolPathBlockCount) {
t.Errorf("Test %d: Expected the total blocked locks for volume: \"%s\", path: \"%s\" to be %v, but got %v", testNum, param.volume, param.path,
int64(l.expectedVolPathBlockCount), nsMutex.debugLockMap[param].blocked)
}
}
// verifyLockState - function which asserts the expected lock info in the system with the actual values in the nsMutex.
func verifyLockState(l lockStateCase, t *testing.T, testNum int) {
param := nsParam{l.volume, l.path}
verifyGlobalLockStats(l, t, testNum)
nsMutex.lockMapMutex.Lock()
// Verifying the lock statuS fields.
if debugLockMap, ok := nsMutex.debugLockMap[param]; ok {
if lockInfo, ok := debugLockMap.lockInfo[l.opsID]; ok {
// Validating the lock type filed in the debug lock information.
if l.readLock {
if lockInfo.lockType != debugRLockStr {
t.Errorf("Test case %d: Expected the lock type in the lock debug info to be \"%s\"", testNum, debugRLockStr)
}
} else {
if lockInfo.lockType != debugWLockStr {
t.Errorf("Test case %d: Expected the lock type in the lock debug info to be \"%s\"", testNum, debugWLockStr)
}
}
// // validating the lock origin.
// if l.lockOrigin != lockInfo.lockOrigin {
// t.Fatalf("Test %d: Expected the lock origin info to be \"%s\", but got \"%s\"", testNum, l.lockOrigin, lockInfo.lockOrigin)
// }
// validating the status of the lock.
if lockInfo.status != l.expectedLockStatus {
t.Errorf("Test %d: Expected the status of the lock to be \"%s\", but got \"%s\"", testNum, l.expectedLockStatus, lockInfo.status)
}
} else {
// Stop the tests if lock debug entry for given <volume, path> pair is not found.
t.Errorf("Test case %d: Expected an debug lock entry for opsID \"%s\"", testNum, l.opsID)
}
} else {
// To change the status the entry for given <volume, path> should exist in the lock info struct.
t.Errorf("Test case %d: Debug lock entry for volume: %s, path: %s doesn't exist", testNum, param.volume, param.path)
}
// verifyLockStats holds its own lock.
nsMutex.lockMapMutex.Unlock()
// verify the lock count.
verifyLockStats(l, t, testNum)
}
// TestNewDebugLockInfoPerVolumePath - Validates the values initialized by newDebugLockInfoPerVolumePath().
func TestNewDebugLockInfoPerVolumePath(t *testing.T) {
lockInfo := newDebugLockInfoPerVolumePath()
if lockInfo.ref != 0 {
t.Errorf("Expected initial reference value of total locks to be 0, got %d", lockInfo.ref)
}
if lockInfo.blocked != 0 {
t.Errorf("Expected initial reference of blocked locks to be 0, got %d", lockInfo.blocked)
}
if lockInfo.running != 0 {
t.Errorf("Expected initial reference value of held locks to be 0, got %d", lockInfo.running)
}
}
// TestNsLockMapStatusBlockedToRunning - Validates the function for changing the lock state from blocked to running.
func TestNsLockMapStatusBlockedToRunning(t *testing.T) {
testCases := []struct {
volume string
path string
lockOrigin string
opsID string
readLock bool // lock type.
setBlocked bool // initialize the initial state to blocked.
expectedErr error
}{
// Test case - 1.
{
volume: "my-bucket",
path: "my-object",
lockOrigin: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a",
opsID: "abcd1234",
readLock: true,
setBlocked: true,
// expected metrics.
expectedErr: nil,
},
// Test case - 2.
// No entry for <volume, path> pair.
// So an attempt to change the state of the lock from `Blocked`->`Running` should fail.
{
volume: "my-bucket",
path: "my-object-2",
lockOrigin: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a",
opsID: "abcd1234",
readLock: false,
setBlocked: false,
// expected metrics.
expectedErr: LockInfoVolPathMssing{"my-bucket", "my-object-2"},
},
// Test case - 3.
// Entry for the given operationID doesn't exist in the lock state info.
{
volume: "my-bucket",
path: "my-object",
lockOrigin: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a",
opsID: "ops-Id-not-registered",
readLock: true,
setBlocked: false,
// expected metrics.
expectedErr: LockInfoOpsIDNotFound{"my-bucket", "my-object", "ops-Id-not-registered"},
},
// Test case - 4.
// Test case with non-existent lock origin.
{
volume: "my-bucket",
path: "my-object",
lockOrigin: "Bad Origin",
opsID: "abcd1234",
readLock: true,
setBlocked: false,
// expected metrics.
expectedErr: LockInfoOriginNotFound{"my-bucket", "my-object", "abcd1234", "Bad Origin"},
},
// Test case - 5.
// Test case with write lock.
{
volume: "my-bucket",
path: "my-object",
lockOrigin: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a",
opsID: "abcd1234",
readLock: false,
setBlocked: true,
// expected metrics.
expectedErr: nil,
},
}
param := nsParam{testCases[0].volume, testCases[0].path}
// Testing before the initialization done.
// Since the data structures for
actualErr := nsMutex.statusBlockedToRunning(param, testCases[0].lockOrigin,
testCases[0].opsID, testCases[0].readLock)
expectedNilErr := LockInfoNil{}
if actualErr != expectedNilErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedNilErr, actualErr)
}
nsMutex = &nsLockMap{
// entries of <volume,path> -> stateInfo of locks, for instrumentation purpose.
debugLockMap: make(map[nsParam]*debugLockInfoPerVolumePath),
lockMap: make(map[nsParam]*nsLock),
}
// Entry for <volume, path> pair is set to nil.
// Should fail with `LockInfoNil{}`.
nsMutex.debugLockMap[param] = nil
actualErr = nsMutex.statusBlockedToRunning(param, testCases[0].lockOrigin,
testCases[0].opsID, testCases[0].readLock)
expectedNilErr = LockInfoNil{}
if actualErr != expectedNilErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedNilErr, actualErr)
}
// Setting the lock info the be `nil`.
nsMutex.debugLockMap[param] = &debugLockInfoPerVolumePath{
lockInfo: nil, // setting the lockinfo to nil.
ref: 0,
blocked: 0,
running: 0,
}
actualErr = nsMutex.statusBlockedToRunning(param, testCases[0].lockOrigin,
testCases[0].opsID, testCases[0].readLock)
expectedOpsErr := LockInfoOpsIDNotFound{testCases[0].volume, testCases[0].path, testCases[0].opsID}
if actualErr != expectedOpsErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedOpsErr, actualErr)
}
// Next case: ase whether an attempt to change the state of the lock to "Running" done,
// but the initial state if already "Running". Such an attempt should fail
nsMutex.debugLockMap[param] = &debugLockInfoPerVolumePath{
lockInfo: make(map[string]debugLockInfo),
ref: 0,
blocked: 0,
running: 0,
}
// Setting the status of the lock to be "Running".
// The initial state of the lock should set to "Blocked", otherwise its not possible to change the state from "Blocked" -> "Running".
nsMutex.debugLockMap[param].lockInfo[testCases[0].opsID] = debugLockInfo{
lockOrigin: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a",
status: "Running", // State set to "Running". Should fail with `LockInfoStateNotBlocked`.
since: time.Now().UTC(),
}
actualErr = nsMutex.statusBlockedToRunning(param, testCases[0].lockOrigin,
testCases[0].opsID, testCases[0].readLock)
expectedBlockErr := LockInfoStateNotBlocked{testCases[0].volume, testCases[0].path, testCases[0].opsID}
if actualErr != expectedBlockErr {
t.Fatalf("Errors mismatch: Expected: \"%s\", got: \"%s\"", expectedBlockErr, actualErr)
}
// enabling lock instrumentation.
globalDebugLock = true
// initializing the locks.
initNSLock(false)
// set debug lock info to `nil` so that the next tests have to initialize them again.
defer func() {
globalDebugLock = false
nsMutex.debugLockMap = nil
}()
// Iterate over the cases and assert the result.
for i, testCase := range testCases {
param := nsParam{testCase.volume, testCase.path}
// status of the lock to be set to "Blocked", before setting Blocked->Running.
if testCase.setBlocked {
nsMutex.lockMapMutex.Lock()
err := nsMutex.statusNoneToBlocked(param, testCase.lockOrigin, testCase.opsID, testCase.readLock)
if err != nil {
t.Fatalf("Test %d: Initializing the initial state to Blocked failed <ERROR> %s", i+1, err)
}
nsMutex.lockMapMutex.Unlock()
}
// invoking the method under test.
actualErr = nsMutex.statusBlockedToRunning(param, testCase.lockOrigin, testCase.opsID, testCase.readLock)
if actualErr != testCase.expectedErr {
t.Fatalf("Test %d: Errors mismatch: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, actualErr)
}
// In case of no error proceed with validating the lock state information.
if actualErr == nil {
// debug entry for given <volume, path> pair should exist.
if debugLockMap, ok := nsMutex.debugLockMap[param]; ok {
if lockInfo, ok := debugLockMap.lockInfo[testCase.opsID]; ok {
// Validating the lock type filed in the debug lock information.
if testCase.readLock {
if lockInfo.lockType != debugRLockStr {
t.Errorf("Test case %d: Expected the lock type in the lock debug info to be \"%s\"", i+1, debugRLockStr)
}
} else {
if lockInfo.lockType != debugWLockStr {
t.Errorf("Test case %d: Expected the lock type in the lock debug info to be \"%s\"", i+1, debugWLockStr)
}
}
// validating the lock origin.
if testCase.lockOrigin != lockInfo.lockOrigin {
t.Errorf("Test %d: Expected the lock origin info to be \"%s\", but got \"%s\"", i+1, testCase.lockOrigin, lockInfo.lockOrigin)
}
// validating the status of the lock.
if lockInfo.status != "Running" {
t.Errorf("Test %d: Expected the status of the lock to be \"%s\", but got \"%s\"", i+1, "Running", lockInfo.status)
}
} else {
// Stop the tests if lock debug entry for given <volume, path> pair is not found.
t.Fatalf("Test case %d: Expected an debug lock entry for opsID \"%s\"", i+1, testCase.opsID)
}
} else {
// To change the status the entry for given <volume, path> should exist in the lock info struct.
t.Fatalf("Test case %d: Debug lock entry for volume: %s, path: %s doesn't exist", i+1, param.volume, param.path)
}
}
}
}
// TestNsLockMapStatusNoneToBlocked - Validates the function for changing the lock state to blocked
func TestNsLockMapStatusNoneToBlocked(t *testing.T) {
testCases := []lockStateCase{
// Test case - 1.
{
volume: "my-bucket",
path: "my-object",
lockOrigin: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a",
opsID: "abcd1234",
readLock: true,
// expected metrics.
expectedErr: nil,
expectedLockStatus: "Blocked",
expectedGlobalLockCount: 1,
expectedRunningLockCount: 0,
expectedBlockedLockCount: 1,
expectedVolPathLockCount: 1,
expectedVolPathRunningCount: 0,
expectedVolPathBlockCount: 1,
},
// Test case - 2.
// No entry for <volume, path> pair.
// So an attempt to change the state of the lock from `Blocked`->`Running` should fail.
{
volume: "my-bucket",
path: "my-object-2",
lockOrigin: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a",
opsID: "abcd1234",
readLock: false,
// expected metrics.
expectedErr: nil,
expectedLockStatus: "Blocked",
expectedGlobalLockCount: 2,
expectedRunningLockCount: 0,
expectedBlockedLockCount: 2,
expectedVolPathLockCount: 1,
expectedVolPathRunningCount: 0,
expectedVolPathBlockCount: 1,
},
// Test case - 3.
// Entry for the given operationID doesn't exist in the lock state info.
// The entry should be created and relevant counters should be set.
{
volume: "my-bucket",
path: "my-object",
lockOrigin: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a",
opsID: "ops-Id-not-registered",
readLock: true,
// expected metrics.
expectedErr: nil,
expectedLockStatus: "Blocked",
expectedGlobalLockCount: 3,
expectedRunningLockCount: 0,
expectedBlockedLockCount: 3,
expectedVolPathLockCount: 2,
expectedVolPathRunningCount: 0,
expectedVolPathBlockCount: 2,
},
}
param := nsParam{testCases[0].volume, testCases[0].path}
// Testing before the initialization done.
// Since the data structures for
actualErr := nsMutex.statusBlockedToRunning(param, testCases[0].lockOrigin,
testCases[0].opsID, testCases[0].readLock)
expectedNilErr := LockInfoNil{}
if actualErr != expectedNilErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedNilErr, actualErr)
}
// enabling lock instrumentation.
globalDebugLock = true
// initializing the locks.
initNSLock(false)
// set debug lock info to `nil` so that the next tests have to initialize them again.
defer func() {
globalDebugLock = false
nsMutex.debugLockMap = nil
}()
// Iterate over the cases and assert the result.
for i, testCase := range testCases {
nsMutex.lockMapMutex.Lock()
param := nsParam{testCase.volume, testCase.path}
actualErr := nsMutex.statusNoneToBlocked(param, testCase.lockOrigin, testCase.opsID, testCase.readLock)
if actualErr != testCase.expectedErr {
t.Fatalf("Test %d: Errors mismatch: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, actualErr)
}
nsMutex.lockMapMutex.Unlock()
if actualErr == nil {
verifyLockState(testCase, t, i+1)
}
}
}
// TestNsLockMapDeleteLockInfoEntryForOps - Validates the removal of entry for given Operational ID from the lock info.
func TestNsLockMapDeleteLockInfoEntryForOps(t *testing.T) {
testCases := []lockStateCase{
// Test case - 1.
{
volume: "my-bucket",
path: "my-object",
lockOrigin: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a",
opsID: "abcd1234",
readLock: true,
// expected metrics.
},
}
// case - 1.
// Testing the case where delete lock info is attempted even before the lock is initialized.
param := nsParam{testCases[0].volume, testCases[0].path}
// Testing before the initialization done.
actualErr := nsMutex.deleteLockInfoEntryForOps(param, testCases[0].opsID)
expectedNilErr := LockInfoNil{}
if actualErr != expectedNilErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedNilErr, actualErr)
}
// enabling lock instrumentation.
globalDebugLock = true
// initializing the locks.
initNSLock(false)
// set debug lock info to `nil` so that the next tests have to initialize them again.
defer func() {
globalDebugLock = false
nsMutex.debugLockMap = nil
}()
// case - 2.
// Case where an attempt to delete the entry for non-existent <volume, path> pair is done.
// Set the status of the lock to blocked and then to running.
nonExistParam := nsParam{volume: "non-exist-volume", path: "non-exist-path"}
actualErr = nsMutex.deleteLockInfoEntryForOps(nonExistParam, testCases[0].opsID)
expectedVolPathErr := LockInfoVolPathMssing{nonExistParam.volume, nonExistParam.path}
if actualErr != expectedVolPathErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedVolPathErr, actualErr)
}
// Case - 3.
// Lock state is set to Running and then an attempt to delete the info for non-existent opsID done.
nsMutex.lockMapMutex.Lock()
err := nsMutex.statusNoneToBlocked(param, testCases[0].lockOrigin, testCases[0].opsID, testCases[0].readLock)
if err != nil {
t.Fatalf("Setting lock status to Blocked failed: <ERROR> %s", err)
}
nsMutex.lockMapMutex.Unlock()
err = nsMutex.statusBlockedToRunning(param, testCases[0].lockOrigin, testCases[0].opsID, testCases[0].readLock)
if err != nil {
t.Fatalf("Setting lock status to Running failed: <ERROR> %s", err)
}
actualErr = nsMutex.deleteLockInfoEntryForOps(param, "non-existent-OpsID")
expectedOpsIDErr := LockInfoOpsIDNotFound{param.volume, param.path, "non-existent-OpsID"}
if actualErr != expectedOpsIDErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedOpsIDErr, actualErr)
}
// case - 4.
// Attempt to delete an registered entry is done.
// All metrics should be 0 after deleting the entry.
// Verify that the entry the opsID exists.
if debugLockMap, ok := nsMutex.debugLockMap[param]; ok {
if _, ok := debugLockMap.lockInfo[testCases[0].opsID]; !ok {
t.Fatalf("Entry for OpsID \"%s\" in <volume> %s, <path> %s should have existed. ", testCases[0].opsID, param.volume, param.path)
}
} else {
t.Fatalf("Entry for <volume> %s, <path> %s should have existed. ", param.volume, param.path)
}
actualErr = nsMutex.deleteLockInfoEntryForOps(param, testCases[0].opsID)
if actualErr != nil {
t.Fatalf("Expected the error to be <nil>, but got <ERROR> %s", actualErr)
}
// Verify that the entry for the opsId doesn't exists.
if debugLockMap, ok := nsMutex.debugLockMap[param]; ok {
if _, ok := debugLockMap.lockInfo[testCases[0].opsID]; ok {
t.Fatalf("The entry for opsID \"%s\" should have been deleted", testCases[0].opsID)
}
} else {
t.Fatalf("Entry for <volume> %s, <path> %s should have existed. ", param.volume, param.path)
}
if nsMutex.runningLockCounter != int64(0) {
t.Errorf("Expected the count of total running locks to be %v, but got %v", int64(0), nsMutex.runningLockCounter)
}
if nsMutex.blockedCounter != int64(0) {
t.Errorf("Expected the count of total blocked locks to be %v, but got %v", int64(0), nsMutex.blockedCounter)
}
if nsMutex.globalLockCounter != int64(0) {
t.Errorf("Expected the count of all locks to be %v, but got %v", int64(0), nsMutex.globalLockCounter)
}
}
// TestNsLockMapDeleteLockInfoEntryForVolumePath - Tests validate the logic for removal
// of entry for given <volume, path> pair from lock info.
func TestNsLockMapDeleteLockInfoEntryForVolumePath(t *testing.T) {
testCases := []lockStateCase{
// Test case - 1.
{
volume: "my-bucket",
path: "my-object",
lockOrigin: "/home/vadmeste/work/go/src/github.com/minio/minio/xl-v1-object.go:683 +0x2a",
opsID: "abcd1234",
readLock: true,
// expected metrics.
},
}
// case - 1.
// Testing the case where delete lock info is attempted even before the lock is initialized.
param := nsParam{testCases[0].volume, testCases[0].path}
// Testing before the initialization done.
actualErr := nsMutex.deleteLockInfoEntryForVolumePath(param)
expectedNilErr := LockInfoNil{}
if actualErr != expectedNilErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedNilErr, actualErr)
}
// enabling lock instrumentation.
globalDebugLock = true
// initializing the locks.
initNSLock(false)
// set debug lock info to `nil` so that the next tests have to initialize them again.
defer func() {
globalDebugLock = false
nsMutex.debugLockMap = nil
}()
// case - 2.
// Case where an attempt to delete the entry for non-existent <volume, path> pair is done.
// Set the status of the lock to blocked and then to running.
nonExistParam := nsParam{volume: "non-exist-volume", path: "non-exist-path"}
actualErr = nsMutex.deleteLockInfoEntryForVolumePath(nonExistParam)
expectedVolPathErr := LockInfoVolPathMssing{nonExistParam.volume, nonExistParam.path}
if actualErr != expectedVolPathErr {
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedVolPathErr, actualErr)
}
// case - 3.
// Attempt to delete an registered entry is done.
// All metrics should be 0 after deleting the entry.
// Registering the entry first.
nsMutex.lockMapMutex.Lock()
err := nsMutex.statusNoneToBlocked(param, testCases[0].lockOrigin, testCases[0].opsID, testCases[0].readLock)
if err != nil {
t.Fatalf("Setting lock status to Blocked failed: <ERROR> %s", err)
}
nsMutex.lockMapMutex.Unlock()
err = nsMutex.statusBlockedToRunning(param, testCases[0].lockOrigin, testCases[0].opsID, testCases[0].readLock)
if err != nil {
t.Fatalf("Setting lock status to Running failed: <ERROR> %s", err)
}
// Verify that the entry the for given <volume, path> exists.
if _, ok := nsMutex.debugLockMap[param]; !ok {
t.Fatalf("Entry for <volume> %s, <path> %s should have existed.", param.volume, param.path)
}
// first delete the entry for the operation ID.
_ = nsMutex.deleteLockInfoEntryForOps(param, testCases[0].opsID)
actualErr = nsMutex.deleteLockInfoEntryForVolumePath(param)
if actualErr != nil {
t.Fatalf("Expected the error to be <nil>, but got <ERROR> %s", actualErr)
}
// Verify that the entry for the opsId doesn't exists.
if _, ok := nsMutex.debugLockMap[param]; ok {
t.Fatalf("Entry for <volume> %s, <path> %s should have been deleted. ", param.volume, param.path)
}
// The lock count values should be 0.
if nsMutex.runningLockCounter != int64(0) {
t.Errorf("Expected the count of total running locks to be %v, but got %v", int64(0), nsMutex.runningLockCounter)
}
if nsMutex.blockedCounter != int64(0) {
t.Errorf("Expected the count of total blocked locks to be %v, but got %v", int64(0), nsMutex.blockedCounter)
}
if nsMutex.globalLockCounter != int64(0) {
t.Errorf("Expected the count of all locks to be %v, but got %v", int64(0), nsMutex.globalLockCounter)
}
}

227
cmd/lock-rpc-server.go Normal file
View File

@ -0,0 +1,227 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"net/rpc"
"path"
"strings"
"sync"
"time"
router "github.com/gorilla/mux"
)
const lockRPCPath = "/minio/lock"
// LockArgs besides lock name, holds Token and Timestamp for session
// authentication and validation server restart.
type LockArgs struct {
Name string
Token string
Timestamp time.Time
}
// SetToken - sets the token to the supplied value.
func (l *LockArgs) SetToken(token string) {
l.Token = token
}
// SetTimestamp - sets the timestamp to the supplied value.
func (l *LockArgs) SetTimestamp(tstamp time.Time) {
l.Timestamp = tstamp
}
type lockServer struct {
rpcPath string
mutex sync.Mutex
// e.g, when a Lock(name) is held, map[string][]bool{"name" : []bool{true}}
// when one or more RLock() is held, map[string][]bool{"name" : []bool{false, false}}
lockMap map[string][]bool
timestamp time.Time // Timestamp set at the time of initialization. Resets naturally on minio server restart.
}
func (l *lockServer) verifyArgs(args *LockArgs) error {
if !l.timestamp.Equal(args.Timestamp) {
return errInvalidTimestamp
}
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
return nil
}
/// Distributed lock handlers
// LoginHandler - handles LoginHandler RPC call.
func (l *lockServer) LoginHandler(args *RPCLoginArgs, reply *RPCLoginReply) error {
jwt, err := newJWT(defaultTokenExpiry)
if err != nil {
return err
}
if err = jwt.Authenticate(args.Username, args.Password); err != nil {
return err
}
token, err := jwt.GenerateToken(args.Username)
if err != nil {
return err
}
reply.Token = token
reply.Timestamp = l.timestamp
return nil
}
// Lock - rpc handler for (single) write lock operation.
func (l *lockServer) Lock(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
_, ok := l.lockMap[args.Name]
// No locks held on the given name.
if !ok {
*reply = true
l.lockMap[args.Name] = []bool{true}
} else {
// Either a read or write lock is held on the given name.
*reply = false
}
return nil
}
// Unlock - rpc handler for (single) write unlock operation.
func (l *lockServer) Unlock(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
locksHeld, ok := l.lockMap[args.Name]
// No lock is held on the given name, there must be some issue at the lock client side.
if !ok {
*reply = false
return fmt.Errorf("Unlock attempted on an un-locked entity: %s", args.Name)
} else if len(locksHeld) == 1 && locksHeld[0] == true {
*reply = true
delete(l.lockMap, args.Name)
return nil
} else {
*reply = false
return fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.Name, len(locksHeld))
}
}
// RLock - rpc handler for read lock operation.
func (l *lockServer) RLock(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
locksHeld, ok := l.lockMap[args.Name]
// No locks held on the given name.
if !ok {
// First read-lock to be held on *name.
l.lockMap[args.Name] = []bool{false}
*reply = true
} else if len(locksHeld) == 1 && locksHeld[0] == true {
// A write-lock is held, read lock can't be granted.
*reply = false
} else {
// Add an entry for this read lock.
l.lockMap[args.Name] = append(locksHeld, false)
*reply = true
}
return nil
}
// RUnlock - rpc handler for read unlock operation.
func (l *lockServer) RUnlock(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
locksHeld, ok := l.lockMap[args.Name]
if !ok {
*reply = false
return fmt.Errorf("RUnlock attempted on an un-locked entity: %s", args.Name)
} else if len(locksHeld) == 1 && locksHeld[0] == true {
// A write-lock is held, cannot release a read lock
*reply = false
return fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.Name)
} else if len(locksHeld) > 1 {
// Remove one of the read locks held.
locksHeld = locksHeld[1:]
l.lockMap[args.Name] = locksHeld
*reply = true
} else {
// Delete the map entry since this is the last read lock held
// on *name.
delete(l.lockMap, args.Name)
*reply = true
}
return nil
}
// Initialize distributed lock.
func initDistributedNSLock(mux *router.Router, serverConfig serverCmdConfig) {
lockServers := newLockServers(serverConfig)
registerStorageLockers(mux, lockServers)
}
// Create one lock server for every local storage rpc server.
func newLockServers(serverConfig serverCmdConfig) (lockServers []*lockServer) {
// Initialize posix storage API.
exports := serverConfig.disks
ignoredExports := serverConfig.ignoredDisks
// Save ignored disks in a map
skipDisks := make(map[string]bool)
for _, ignoredExport := range ignoredExports {
skipDisks[ignoredExport] = true
}
for _, export := range exports {
if skipDisks[export] {
continue
}
if isLocalStorage(export) {
if idx := strings.LastIndex(export, ":"); idx != -1 {
export = export[idx+1:]
}
lockServers = append(lockServers, &lockServer{
rpcPath: export,
mutex: sync.Mutex{},
lockMap: make(map[string][]bool),
timestamp: time.Now().UTC(),
})
}
}
return lockServers
}
// registerStorageLockers - register locker rpc handlers for net/rpc library clients
func registerStorageLockers(mux *router.Router, lockServers []*lockServer) {
for _, lockServer := range lockServers {
lockRPCServer := rpc.NewServer()
lockRPCServer.RegisterName("Dsync", lockServer)
lockRouter := mux.PathPrefix(reservedBucket).Subrouter()
lockRouter.Path(path.Join("/lock", lockServer.rpcPath)).Handler(lockRPCServer)
}
}

View File

@ -67,9 +67,10 @@ func errorIf(err error, msg string, data ...interface{}) {
fields := logrus.Fields{ fields := logrus.Fields{
"cause": err.Error(), "cause": err.Error(),
} }
if globalTrace { if e, ok := err.(*Error); ok {
fields["stack"] = "\n" + stackInfo() fields["stack"] = strings.Join(e.Trace(), " ")
} }
log.WithFields(fields).Errorf(msg, data...) log.WithFields(fields).Errorf(msg, data...)
} }

View File

@ -20,6 +20,7 @@ import (
"fmt" "fmt"
"os" "os"
"sort" "sort"
"strings"
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/mc/pkg/console" "github.com/minio/mc/pkg/console"
@ -27,11 +28,20 @@ import (
var ( var (
// global flags for minio. // global flags for minio.
minioFlags = []cli.Flag{ globalFlags = []cli.Flag{
cli.BoolFlag{ cli.BoolFlag{
Name: "help, h", Name: "help, h",
Usage: "Show help.", Usage: "Show help.",
}, },
cli.StringFlag{
Name: "config-dir, C",
Value: mustGetConfigPath(),
Usage: "Path to configuration folder.",
},
cli.BoolFlag{
Name: "quiet",
Usage: "Suppress chatty output.",
},
} }
) )
@ -62,6 +72,9 @@ func init() {
// Set global trace flag. // Set global trace flag.
globalTrace = os.Getenv("MINIO_TRACE") == "1" globalTrace = os.Getenv("MINIO_TRACE") == "1"
// Set all the debug flags from ENV if any.
setGlobalsDebugFromEnv()
} }
func migrate() { func migrate() {
@ -112,7 +125,7 @@ func registerApp() *cli.App {
app.Author = "Minio.io" app.Author = "Minio.io"
app.Usage = "Cloud Storage Server." app.Usage = "Cloud Storage Server."
app.Description = `Minio is an Amazon S3 compatible object storage server. Use it to store photos, videos, VMs, containers, log files, or any blob of data as objects.` app.Description = `Minio is an Amazon S3 compatible object storage server. Use it to store photos, videos, VMs, containers, log files, or any blob of data as objects.`
app.Flags = append(minioFlags, globalFlags...) app.Flags = globalFlags
app.Commands = commands app.Commands = commands
app.CustomAppHelpTemplate = minioHelpTemplate app.CustomAppHelpTemplate = minioHelpTemplate
app.CommandNotFound = func(ctx *cli.Context, command string) { app.CommandNotFound = func(ctx *cli.Context, command string) {
@ -160,19 +173,24 @@ func Main() {
// Enable all loggers by now. // Enable all loggers by now.
enableLoggers() enableLoggers()
// Init the error tracing module.
initError()
// Set global quiet flag. // Set global quiet flag.
globalQuiet = c.Bool("quiet") || c.GlobalBool("quiet") globalQuiet = c.Bool("quiet") || c.GlobalBool("quiet")
// Do not print update messages, if quiet flag is set. // Do not print update messages, if quiet flag is set.
if !globalQuiet { if !globalQuiet {
// Do not print any errors in release update function. if strings.HasPrefix(Version, "RELEASE.") {
noError := true updateMsg, _, err := getReleaseUpdate(minioUpdateStableURL)
updateMsg := getReleaseUpdate(minioUpdateStableURL, noError) if err != nil {
if updateMsg.Update { // Ignore any errors during getReleaseUpdate() because
// the internet might not be available.
return nil
}
console.Println(updateMsg) console.Println(updateMsg)
} }
} }
return nil return nil
} }

View File

@ -18,9 +18,66 @@ package cmd
import ( import (
"errors" "errors"
"fmt"
pathutil "path"
"runtime"
"strconv"
"strings"
"sync" "sync"
"github.com/minio/dsync"
) )
// Global name space lock.
var nsMutex *nsLockMap
// Initialize distributed locking only in case of distributed setup.
// Returns if the setup is distributed or not on success.
func initDsyncNodes(disks []string, port int) error {
serverPort := strconv.Itoa(port)
cred := serverConfig.GetCredential()
// Initialize rpc lock client information only if this instance is a distributed setup.
var clnts []dsync.RPC
for _, disk := range disks {
if idx := strings.LastIndex(disk, ":"); idx != -1 {
clnts = append(clnts, newAuthClient(&authConfig{
accessKey: cred.AccessKeyID,
secretKey: cred.SecretAccessKey,
// Construct a new dsync server addr.
address: disk[:idx] + ":" + serverPort,
// Construct a new rpc path for the disk.
path: pathutil.Join(lockRPCPath, disk[idx+1:]),
loginMethod: "Dsync.LoginHandler",
}))
}
}
return dsync.SetNodesWithClients(clnts)
}
// initNSLock - initialize name space lock map.
func initNSLock(isDist bool) {
nsMutex = &nsLockMap{
isDist: isDist,
lockMap: make(map[nsParam]*nsLock),
}
if globalDebugLock {
// lock Debugging enabed, initialize nsLockMap with entry for debugging information.
// entries of <volume,path> -> stateInfo of locks, for instrumentation purpose.
nsMutex.debugLockMap = make(map[nsParam]*debugLockInfoPerVolumePath)
}
}
func (n *nsLockMap) initLockInfoForVolumePath(param nsParam) {
n.debugLockMap[param] = newDebugLockInfoPerVolumePath()
}
// RWLocker - interface that any read-write locking library should implement.
type RWLocker interface {
sync.Locker
RLock()
RUnlock()
}
// nsParam - carries name space resource. // nsParam - carries name space resource.
type nsParam struct { type nsParam struct {
volume string volume string
@ -29,98 +86,189 @@ type nsParam struct {
// nsLock - provides primitives for locking critical namespace regions. // nsLock - provides primitives for locking critical namespace regions.
type nsLock struct { type nsLock struct {
sync.RWMutex writer RWLocker
ref uint readerArray []RWLocker
ref uint
} }
// nsLockMap - namespace lock map, provides primitives to Lock, // nsLockMap - namespace lock map, provides primitives to Lock,
// Unlock, RLock and RUnlock. // Unlock, RLock and RUnlock.
type nsLockMap struct { type nsLockMap struct {
lockMap map[nsParam]*nsLock // lock counter used for lock debugging.
mutex sync.Mutex globalLockCounter int64 //total locks held.
} blockedCounter int64 // total operations blocked waiting for locks.
runningLockCounter int64 // total locks held but not released yet.
debugLockMap map[nsParam]*debugLockInfoPerVolumePath // info for instrumentation on locks.
// Global name space lock. isDist bool // indicates whether the locking service is part of a distributed setup or not.
var nsMutex *nsLockMap lockMap map[nsParam]*nsLock
lockMapMutex sync.Mutex
// initNSLock - initialize name space lock map.
func initNSLock() {
nsMutex = &nsLockMap{
lockMap: make(map[nsParam]*nsLock),
}
} }
// Lock the namespace resource. // Lock the namespace resource.
func (n *nsLockMap) lock(volume, path string, readLock bool) { func (n *nsLockMap) lock(volume, path string, lockOrigin, opsID string, readLock bool) {
n.mutex.Lock() var nsLk *nsLock
n.lockMapMutex.Lock()
param := nsParam{volume, path} param := nsParam{volume, path}
nsLk, found := n.lockMap[param] nsLk, found := n.lockMap[param]
if !found { if !found {
nsLk = &nsLock{ nsLk = &nsLock{
writer: func() RWLocker {
if n.isDist {
return dsync.NewDRWMutex(pathutil.Join(volume, path))
}
return &sync.RWMutex{}
}(),
ref: 0, ref: 0,
} }
n.lockMap[param] = nsLk n.lockMap[param] = nsLk
} }
nsLk.ref++ // Update ref count here to avoid multiple races. nsLk.ref++ // Update ref count here to avoid multiple races.
rwlock := nsLk.writer
if readLock && n.isDist {
rwlock = dsync.NewDRWMutex(pathutil.Join(volume, path))
}
if globalDebugLock {
// change the state of the lock to be blocked for the given pair of <volume, path> and <OperationID> till the lock unblocks.
// The lock for accessing `nsMutex` is held inside the function itself.
err := n.statusNoneToBlocked(param, lockOrigin, opsID, readLock)
if err != nil {
errorIf(err, "Failed to set lock state to blocked.")
}
}
// Unlock map before Locking NS which might block. // Unlock map before Locking NS which might block.
n.mutex.Unlock() n.lockMapMutex.Unlock()
// Locking here can block. // Locking here can block.
if readLock { if readLock {
nsLk.RLock() rwlock.RLock()
if n.isDist {
// Only add (for reader case) to array after RLock() succeeds
// (so that we know for sure that element in [0] can be RUnlocked())
n.lockMapMutex.Lock()
if len(nsLk.readerArray) == 0 {
nsLk.readerArray = []RWLocker{rwlock}
} else {
nsLk.readerArray = append(nsLk.readerArray, rwlock)
}
n.lockMapMutex.Unlock()
}
} else { } else {
nsLk.Lock() rwlock.Lock()
}
// check if lock debugging enabled.
if globalDebugLock {
// Changing the status of the operation from blocked to running.
// change the state of the lock to be running (from blocked) for the given pair of <volume, path> and <OperationID>.
err := n.statusBlockedToRunning(param, lockOrigin, opsID, readLock)
if err != nil {
errorIf(err, "Failed to set the lock state to running.")
}
} }
} }
// Unlock the namespace resource. // Unlock the namespace resource.
func (n *nsLockMap) unlock(volume, path string, readLock bool) { func (n *nsLockMap) unlock(volume, path, opsID string, readLock bool) {
// nsLk.Unlock() will not block, hence locking the map for the entire function is fine. // nsLk.Unlock() will not block, hence locking the map for the entire function is fine.
n.mutex.Lock() n.lockMapMutex.Lock()
defer n.mutex.Unlock() defer n.lockMapMutex.Unlock()
param := nsParam{volume, path} param := nsParam{volume, path}
if nsLk, found := n.lockMap[param]; found { if nsLk, found := n.lockMap[param]; found {
if readLock { if readLock {
nsLk.RUnlock() if n.isDist {
if len(nsLk.readerArray) == 0 {
errorIf(errors.New("Length of reader lock array cannot be 0."), "Invalid reader lock array length detected.")
}
// Release first lock first (FIFO)
nsLk.readerArray[0].RUnlock()
// And discard first element
nsLk.readerArray = nsLk.readerArray[1:]
} else {
nsLk.writer.RUnlock()
}
} else { } else {
nsLk.Unlock() nsLk.writer.Unlock()
} }
if nsLk.ref == 0 { if nsLk.ref == 0 {
errorIf(errors.New("Namespace reference count cannot be 0."), "Invalid reference count detected.") errorIf(errors.New("Namespace reference count cannot be 0."), "Invalid reference count detected.")
} }
if nsLk.ref != 0 { if nsLk.ref != 0 {
nsLk.ref-- nsLk.ref--
// locking debug enabled, delete the lock state entry for given operation ID.
if globalDebugLock {
err := n.deleteLockInfoEntryForOps(param, opsID)
if err != nil {
errorIf(err, "Failed to delete lock info entry.")
}
}
} }
if nsLk.ref == 0 { if nsLk.ref == 0 {
if len(nsLk.readerArray) != 0 && n.isDist {
errorIf(errors.New("Length of reader lock array should be 0 upon deleting map entry."), "Invalid reader lock array length detected.")
}
// Remove from the map if there are no more references. // Remove from the map if there are no more references.
delete(n.lockMap, param) delete(n.lockMap, param)
// locking debug enabled, delete the lock state entry for given <volume, path> pair.
if globalDebugLock {
err := n.deleteLockInfoEntryForVolumePath(param)
if err != nil {
errorIf(err, "Failed to delete lock info entry.")
}
}
} }
} }
} }
// Lock - locks the given resource for writes, using a previously // Lock - locks the given resource for writes, using a previously
// allocated name space lock or initializing a new one. // allocated name space lock or initializing a new one.
func (n *nsLockMap) Lock(volume, path string) { func (n *nsLockMap) Lock(volume, path, opsID string) {
var lockOrigin string
// lock debugging enabled. The caller information of the lock held has be obtained here before calling any other function.
if globalDebugLock {
// fetching the package, function name and the line number of the caller from the runtime.
// here is an example https://play.golang.org/p/perrmNRI9_ .
pc, fn, line, success := runtime.Caller(1)
if !success {
errorIf(errors.New("Couldn't get caller info."), "Fetching caller info form runtime failed.")
}
lockOrigin = fmt.Sprintf("[lock held] in %s[%s:%d]", runtime.FuncForPC(pc).Name(), fn, line)
}
readLock := false readLock := false
n.lock(volume, path, readLock) n.lock(volume, path, lockOrigin, opsID, readLock)
} }
// Unlock - unlocks any previously acquired write locks. // Unlock - unlocks any previously acquired write locks.
func (n *nsLockMap) Unlock(volume, path string) { func (n *nsLockMap) Unlock(volume, path, opsID string) {
readLock := false readLock := false
n.unlock(volume, path, readLock) n.unlock(volume, path, opsID, readLock)
} }
// RLock - locks any previously acquired read locks. // RLock - locks any previously acquired read locks.
func (n *nsLockMap) RLock(volume, path string) { func (n *nsLockMap) RLock(volume, path, opsID string) {
var lockOrigin string
readLock := true readLock := true
n.lock(volume, path, readLock) // lock debugging enabled. The caller information of the lock held has be obtained here before calling any other function.
if globalDebugLock {
// fetching the package, function name and the line number of the caller from the runtime.
// here is an example https://play.golang.org/p/perrmNRI9_ .
pc, fn, line, success := runtime.Caller(1)
if !success {
errorIf(errors.New("Couldn't get caller info."), "Fetching caller info form runtime failed.")
}
lockOrigin = fmt.Sprintf("[lock held] in %s[%s:%d]", runtime.FuncForPC(pc).Name(), fn, line)
}
n.lock(volume, path, lockOrigin, opsID, readLock)
} }
// RUnlock - unlocks any previously acquired read locks. // RUnlock - unlocks any previously acquired read locks.
func (n *nsLockMap) RUnlock(volume, path string) { func (n *nsLockMap) RUnlock(volume, path, opsID string) {
readLock := true readLock := true
n.unlock(volume, path, readLock) n.unlock(volume, path, opsID, readLock)
} }

View File

@ -16,16 +16,21 @@
package cmd package cmd
import "testing" import (
"strconv"
"sync"
"testing"
"time"
)
// Tests functionality provided by namespace lock. // Tests functionality provided by namespace lock.
func TestNamespaceLockTest(t *testing.T) { func TestNamespaceLockTest(t *testing.T) {
// List of test cases. // List of test cases.
testCases := []struct { testCases := []struct {
lk func(s1, s2 string) lk func(s1, s2, s3 string)
unlk func(s1, s2 string) unlk func(s1, s2, s3 string)
rlk func(s1, s2 string) rlk func(s1, s2, s3 string)
runlk func(s1, s2 string) runlk func(s1, s2, s3 string)
lkCount int lkCount int
lockedRefCount uint lockedRefCount uint
unlockedRefCount uint unlockedRefCount uint
@ -58,7 +63,7 @@ func TestNamespaceLockTest(t *testing.T) {
// Write lock tests. // Write lock tests.
testCase := testCases[0] testCase := testCases[0]
testCase.lk("a", "b") // lock once. testCase.lk("a", "b", "c") // lock once.
nsLk, ok := nsMutex.lockMap[nsParam{"a", "b"}] nsLk, ok := nsMutex.lockMap[nsParam{"a", "b"}]
if !ok && testCase.shouldPass { if !ok && testCase.shouldPass {
t.Errorf("Lock in map missing.") t.Errorf("Lock in map missing.")
@ -67,7 +72,7 @@ func TestNamespaceLockTest(t *testing.T) {
if testCase.lockedRefCount != nsLk.ref && testCase.shouldPass { if testCase.lockedRefCount != nsLk.ref && testCase.shouldPass {
t.Errorf("Test %d fails, expected to pass. Wanted ref count is %d, got %d", 1, testCase.lockedRefCount, nsLk.ref) t.Errorf("Test %d fails, expected to pass. Wanted ref count is %d, got %d", 1, testCase.lockedRefCount, nsLk.ref)
} }
testCase.unlk("a", "b") // unlock once. testCase.unlk("a", "b", "c") // unlock once.
if testCase.unlockedRefCount != nsLk.ref && testCase.shouldPass { if testCase.unlockedRefCount != nsLk.ref && testCase.shouldPass {
t.Errorf("Test %d fails, expected to pass. Wanted ref count is %d, got %d", 1, testCase.unlockedRefCount, nsLk.ref) t.Errorf("Test %d fails, expected to pass. Wanted ref count is %d, got %d", 1, testCase.unlockedRefCount, nsLk.ref)
} }
@ -78,10 +83,10 @@ func TestNamespaceLockTest(t *testing.T) {
// Read lock tests. // Read lock tests.
testCase = testCases[1] testCase = testCases[1]
testCase.rlk("a", "b") // lock once. testCase.rlk("a", "b", "c") // lock once.
testCase.rlk("a", "b") // lock second time. testCase.rlk("a", "b", "c") // lock second time.
testCase.rlk("a", "b") // lock third time. testCase.rlk("a", "b", "c") // lock third time.
testCase.rlk("a", "b") // lock fourth time. testCase.rlk("a", "b", "c") // lock fourth time.
nsLk, ok = nsMutex.lockMap[nsParam{"a", "b"}] nsLk, ok = nsMutex.lockMap[nsParam{"a", "b"}]
if !ok && testCase.shouldPass { if !ok && testCase.shouldPass {
t.Errorf("Lock in map missing.") t.Errorf("Lock in map missing.")
@ -90,8 +95,9 @@ func TestNamespaceLockTest(t *testing.T) {
if testCase.lockedRefCount != nsLk.ref && testCase.shouldPass { if testCase.lockedRefCount != nsLk.ref && testCase.shouldPass {
t.Errorf("Test %d fails, expected to pass. Wanted ref count is %d, got %d", 1, testCase.lockedRefCount, nsLk.ref) t.Errorf("Test %d fails, expected to pass. Wanted ref count is %d, got %d", 1, testCase.lockedRefCount, nsLk.ref)
} }
testCase.runlk("a", "b") // unlock once.
testCase.runlk("a", "b") // unlock second time. testCase.runlk("a", "b", "c") // unlock once.
testCase.runlk("a", "b", "c") // unlock second time.
if testCase.unlockedRefCount != nsLk.ref && testCase.shouldPass { if testCase.unlockedRefCount != nsLk.ref && testCase.shouldPass {
t.Errorf("Test %d fails, expected to pass. Wanted ref count is %d, got %d", 2, testCase.unlockedRefCount, nsLk.ref) t.Errorf("Test %d fails, expected to pass. Wanted ref count is %d, got %d", 2, testCase.unlockedRefCount, nsLk.ref)
} }
@ -102,7 +108,7 @@ func TestNamespaceLockTest(t *testing.T) {
// Read lock 0 ref count. // Read lock 0 ref count.
testCase = testCases[2] testCase = testCases[2]
testCase.rlk("a", "c") // lock once. testCase.rlk("a", "c", "d") // lock once.
nsLk, ok = nsMutex.lockMap[nsParam{"a", "c"}] nsLk, ok = nsMutex.lockMap[nsParam{"a", "c"}]
if !ok && testCase.shouldPass { if !ok && testCase.shouldPass {
@ -112,7 +118,7 @@ func TestNamespaceLockTest(t *testing.T) {
if testCase.lockedRefCount != nsLk.ref && testCase.shouldPass { if testCase.lockedRefCount != nsLk.ref && testCase.shouldPass {
t.Errorf("Test %d fails, expected to pass. Wanted ref count is %d, got %d", 3, testCase.lockedRefCount, nsLk.ref) t.Errorf("Test %d fails, expected to pass. Wanted ref count is %d, got %d", 3, testCase.lockedRefCount, nsLk.ref)
} }
testCase.runlk("a", "c") // unlock once. testCase.runlk("a", "c", "d") // unlock once.
if testCase.unlockedRefCount != nsLk.ref && testCase.shouldPass { if testCase.unlockedRefCount != nsLk.ref && testCase.shouldPass {
t.Errorf("Test %d fails, expected to pass. Wanted ref count is %d, got %d", 3, testCase.unlockedRefCount, nsLk.ref) t.Errorf("Test %d fails, expected to pass. Wanted ref count is %d, got %d", 3, testCase.unlockedRefCount, nsLk.ref)
} }
@ -121,3 +127,266 @@ func TestNamespaceLockTest(t *testing.T) {
t.Errorf("Lock map not found.") t.Errorf("Lock map not found.")
} }
} }
func TestLockStats(t *testing.T) {
expectedResult := []lockStateCase{
// Test case - 1.
// Case where 10 read locks are held.
// Entry for any of the 10 reads locks has to be found.
// Since they held in a loop, Lock origin for first 10 read locks (opsID 0-9) should be the same.
{
volume: "my-bucket",
path: "my-object",
opsID: "0",
readLock: true,
lockOrigin: "[lock held] in github.com/minio/minio/cmd.TestLockStats[/Users/hackintoshrao/mycode/go/src/github.com/minio/minio/cmd/namespace-lock_test.go:298]",
// expected metrics.
expectedErr: nil,
expectedLockStatus: "Running",
expectedGlobalLockCount: 10,
expectedRunningLockCount: 10,
expectedBlockedLockCount: 0,
expectedVolPathLockCount: 10,
expectedVolPathRunningCount: 10,
expectedVolPathBlockCount: 0,
},
// Test case - 2.
// Case where the first 5 read locks are released.
// Entry for any of the 6-10th "Running" reads lock has to be found.
{
volume: "my-bucket",
path: "my-object",
opsID: "6",
readLock: true,
lockOrigin: "[lock held] in github.com/minio/minio/cmd.TestLockStats[/Users/hackintoshrao/mycode/go/src/github.com/minio/minio/cmd/namespace-lock_test.go:298]",
// expected metrics.
expectedErr: nil,
expectedLockStatus: "Running",
expectedGlobalLockCount: 5,
expectedRunningLockCount: 5,
expectedBlockedLockCount: 0,
expectedVolPathLockCount: 5,
expectedVolPathRunningCount: 5,
expectedVolPathBlockCount: 0,
},
// Test case - 3.
{
volume: "my-bucket",
path: "my-object",
opsID: "10",
readLock: false,
lockOrigin: "[lock held] in github.com/minio/minio/cmd.TestLockStats[/Users/hackintoshrao/mycode/go/src/github.com/minio/minio/cmd/namespace-lock_test.go:298]",
// expected metrics.
expectedErr: nil,
expectedLockStatus: "Running",
expectedGlobalLockCount: 2,
expectedRunningLockCount: 1,
expectedBlockedLockCount: 1,
expectedVolPathLockCount: 2,
expectedVolPathRunningCount: 1,
expectedVolPathBlockCount: 1,
},
// Test case - 4.
{
volume: "my-bucket",
path: "my-object",
// expected metrics.
expectedErr: nil,
expectedLockStatus: "Blocked",
expectedGlobalLockCount: 1,
expectedRunningLockCount: 0,
expectedBlockedLockCount: 1,
expectedVolPathLockCount: 1,
expectedVolPathRunningCount: 0,
expectedVolPathBlockCount: 1,
},
// Test case - 5.
{
volume: "my-bucket",
path: "my-object",
opsID: "11",
readLock: false,
lockOrigin: "[lock held] in github.com/minio/minio/cmd.TestLockStats[/Users/hackintoshrao/mycode/go/src/github.com/minio/minio/cmd/namespace-lock_test.go:298]",
// expected metrics.
expectedErr: nil,
expectedLockStatus: "Running",
expectedGlobalLockCount: 1,
expectedRunningLockCount: 1,
expectedBlockedLockCount: 0,
expectedVolPathLockCount: 1,
expectedVolPathRunningCount: 1,
expectedVolPathBlockCount: 0,
},
// Test case - 6.
// Case where in the first 5 read locks are released, but 2 write locks are
// blocked waiting for the remaining 5 read locks locks to be released (10 read locks were held initially).
// We check the entry for the first blocked write call here.
{
volume: "my-bucket",
path: "my-object",
opsID: "10",
readLock: false,
// write lock is held at line 318.
// this confirms that we are looking the right write lock.
lockOrigin: "[lock held] in github.com/minio/minio/cmd.TestLockStats.func2[/Users/hackintoshrao/mycode/go/src/github.com/minio/minio/cmd/namespace-lock_test.go:318]",
// expected metrics.
expectedErr: nil,
expectedLockStatus: "Blocked",
// count of held(running) + blocked locks.
expectedGlobalLockCount: 7,
// count of acquired locks.
expectedRunningLockCount: 5,
// 2 write calls are blocked, waiting for the remaining 5 read locks.
expectedBlockedLockCount: 2,
expectedVolPathLockCount: 7,
expectedVolPathRunningCount: 5,
expectedVolPathBlockCount: 2,
},
// Test case - 7.
// Case where in 9 out of 10 read locks are released.
// Since there's one more pending read lock, the 2 write locks are still blocked.
// Testing the entry for the last read lock.
{volume: "my-bucket",
path: "my-object",
opsID: "9",
readLock: true,
lockOrigin: "[lock held] in github.com/minio/minio/cmd.TestLockStats.func2[/Users/hackintoshrao/mycode/go/src/github.com/minio/minio/cmd/namespace-lock_test.go:318]",
// expected metrics.
expectedErr: nil,
expectedLockStatus: "Running",
// Total running + blocked locks.
// 2 blocked write lock.
expectedGlobalLockCount: 3,
expectedRunningLockCount: 1,
expectedBlockedLockCount: 2,
expectedVolPathLockCount: 3,
expectedVolPathRunningCount: 1,
expectedVolPathBlockCount: 2,
},
// Test case - 8.
{
volume: "my-bucket",
path: "my-object",
// expected metrics.
expectedErr: nil,
expectedLockStatus: "Blocked",
expectedGlobalLockCount: 0,
expectedRunningLockCount: 0,
expectedBlockedLockCount: 0,
},
}
var wg sync.WaitGroup
// enabling lock instrumentation.
globalDebugLock = true
// initializing the locks.
initNSLock(false)
// set debug lock info to `nil` so that the next tests have to initialize them again.
defer func() {
globalDebugLock = false
nsMutex.debugLockMap = nil
}()
// hold 10 read locks.
for i := 0; i < 10; i++ {
nsMutex.RLock("my-bucket", "my-object", strconv.Itoa(i))
}
// expected lock info.
expectedLockStats := expectedResult[0]
// verify the actual lock info with the expected one.
verifyLockState(expectedLockStats, t, 1)
// unlock 5 readlock.
for i := 0; i < 5; i++ {
nsMutex.RUnlock("my-bucket", "my-object", strconv.Itoa(i))
}
expectedLockStats = expectedResult[1]
// verify the actual lock info with the expected one.
verifyLockState(expectedLockStats, t, 2)
syncChan := make(chan struct{}, 1)
wg.Add(1)
go func() {
defer wg.Done()
// blocks till all read locks are released.
nsMutex.Lock("my-bucket", "my-object", strconv.Itoa(10))
// Once the above attempt to lock is unblocked/acquired, we verify the stats and release the lock.
expectedWLockStats := expectedResult[2]
// Since the write lock acquired here, the number of blocked locks should reduce by 1 and
// count of running locks should increase by 1.
verifyLockState(expectedWLockStats, t, 3)
// release the write lock.
nsMutex.Unlock("my-bucket", "my-object", strconv.Itoa(10))
// The number of running locks should decrease by 1.
// expectedWLockStats = expectedResult[3]
// verifyLockState(expectedWLockStats, t, 4)
// Take the lock stats after the first write lock is unlocked.
// Only then unlock then second write lock.
syncChan <- struct{}{}
}()
// waiting so that the write locks in the above go routines are held.
// sleeping so that we can predict the order of the write locks held.
time.Sleep(100 * time.Millisecond)
// since there are 5 more readlocks still held on <"my-bucket","my-object">,
// an attempt to hold write locks blocks. So its run in a new go routine.
wg.Add(1)
go func() {
defer wg.Done()
// blocks till all read locks are released.
nsMutex.Lock("my-bucket", "my-object", strconv.Itoa(11))
// Once the above attempt to lock is unblocked/acquired, we release the lock.
// Unlock the second write lock only after lock stats for first write lock release is taken.
<-syncChan
// The number of running locks should decrease by 1.
expectedWLockStats := expectedResult[4]
verifyLockState(expectedWLockStats, t, 5)
nsMutex.Unlock("my-bucket", "my-object", strconv.Itoa(11))
}()
expectedLockStats = expectedResult[5]
time.Sleep(1 * time.Second)
// verify the actual lock info with the expected one.
verifyLockState(expectedLockStats, t, 6)
// unlock 4 out of remaining 5 read locks.
for i := 0; i < 4; i++ {
nsMutex.RUnlock("my-bucket", "my-object", strconv.Itoa(i+5))
}
// verify the entry for one remaining read lock and count of blocked write locks.
expectedLockStats = expectedResult[6]
// verify the actual lock info with the expected one.
verifyLockState(expectedLockStats, t, 7)
// Releasing the last read lock.
nsMutex.RUnlock("my-bucket", "my-object", strconv.Itoa(9))
wg.Wait()
expectedLockStats = expectedResult[7]
// verify the actual lock info with the expected one.
verifyGlobalLockStats(expectedLockStats, t, 8)
}

View File

@ -16,6 +16,8 @@
package cmd package cmd
import "github.com/minio/minio/pkg/disk"
// naughtyDisk wraps a POSIX disk and returns programmed errors // naughtyDisk wraps a POSIX disk and returns programmed errors
// specified by the developer. The purpose is to simulate errors // specified by the developer. The purpose is to simulate errors
// that are hard to simulate in practise like DiskNotFound. // that are hard to simulate in practise like DiskNotFound.
@ -46,6 +48,13 @@ func (d *naughtyDisk) calcError() (err error) {
return nil return nil
} }
func (d *naughtyDisk) DiskInfo() (info disk.Info, err error) {
if err := d.calcError(); err != nil {
return info, err
}
return d.disk.DiskInfo()
}
func (d *naughtyDisk) MakeVol(volume string) (err error) { func (d *naughtyDisk) MakeVol(volume string) (err error) {
if err := d.calcError(); err != nil { if err := d.calcError(); err != nil {
return err return err

125
cmd/net-rpc-client.go Normal file
View File

@ -0,0 +1,125 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"errors"
"net/rpc"
"sync"
)
// RPCClient is a wrapper type for rpc.Client which provides reconnect on first failure.
type RPCClient struct {
mu sync.Mutex
rpcPrivate *rpc.Client
node string
rpcPath string
}
// newClient constructs a RPCClient object with node and rpcPath initialized.
// It _doesn't_ connect to the remote endpoint. See Call method to see when the
// connect happens.
func newClient(node, rpcPath string) *RPCClient {
return &RPCClient{
node: node,
rpcPath: rpcPath,
}
}
// clearRPCClient clears the pointer to the rpc.Client object in a safe manner
func (rpcClient *RPCClient) clearRPCClient() {
rpcClient.mu.Lock()
rpcClient.rpcPrivate = nil
rpcClient.mu.Unlock()
}
// getRPCClient gets the pointer to the rpc.Client object in a safe manner
func (rpcClient *RPCClient) getRPCClient() *rpc.Client {
rpcClient.mu.Lock()
rpcLocalStack := rpcClient.rpcPrivate
rpcClient.mu.Unlock()
return rpcLocalStack
}
// dialRPCClient tries to establish a connection to the server in a safe manner
func (rpcClient *RPCClient) dialRPCClient() (*rpc.Client, error) {
rpcClient.mu.Lock()
defer rpcClient.mu.Unlock()
// After acquiring lock, check whether another thread may not have already dialed and established connection
if rpcClient.rpcPrivate != nil {
return rpcClient.rpcPrivate, nil
}
rpc, err := rpc.DialHTTPPath("tcp", rpcClient.node, rpcClient.rpcPath)
if err != nil {
return nil, err
} else if rpc == nil {
return nil, errors.New("No valid RPC Client created after dial")
}
rpcClient.rpcPrivate = rpc
return rpcClient.rpcPrivate, nil
}
// Call makes a RPC call to the remote endpoint using the default codec, namely encoding/gob.
func (rpcClient *RPCClient) Call(serviceMethod string, args interface{}, reply interface{}) error {
// Make a copy below so that we can safely (continue to) work with the rpc.Client.
// Even in the case the two threads would simultaneously find that the connection is not initialised,
// they would both attempt to dial and only one of them would succeed in doing so.
rpcLocalStack := rpcClient.getRPCClient()
// If the rpc.Client is nil, we attempt to (re)connect with the remote endpoint.
if rpcLocalStack == nil {
var err error
rpcLocalStack, err = rpcClient.dialRPCClient()
if err != nil {
return err
}
}
// If the RPC fails due to a network-related error, then we reset
// rpc.Client for a subsequent reconnect.
err := rpcLocalStack.Call(serviceMethod, args, reply)
if err != nil {
if err.Error() == rpc.ErrShutdown.Error() {
// Reset rpcClient.rpc to nil to trigger a reconnect in future
// and close the underlying connection.
rpcClient.clearRPCClient()
// Close the underlying connection.
rpcLocalStack.Close()
// Set rpc error as rpc.ErrShutdown type.
err = rpc.ErrShutdown
}
}
return err
}
// Close closes the underlying socket file descriptor.
func (rpcClient *RPCClient) Close() error {
// See comment above for making a copy on local stack
rpcLocalStack := rpcClient.getRPCClient()
// If rpc client has not connected yet there is nothing to close.
if rpcLocalStack == nil {
return nil
}
// Reset rpcClient.rpc to allow for subsequent calls to use a new
// (socket) connection.
rpcClient.clearRPCClient()
return rpcLocalStack.Close()
}

View File

@ -57,19 +57,19 @@ func testGetObjectInfo(obj ObjectLayer, instanceType string, t TestErrHandler) {
{"Test", "", ObjectInfo{}, BucketNameInvalid{Bucket: "Test"}, false}, {"Test", "", ObjectInfo{}, BucketNameInvalid{Bucket: "Test"}, false},
{"---", "", ObjectInfo{}, BucketNameInvalid{Bucket: "---"}, false}, {"---", "", ObjectInfo{}, BucketNameInvalid{Bucket: "---"}, false},
{"ad", "", ObjectInfo{}, BucketNameInvalid{Bucket: "ad"}, false}, {"ad", "", ObjectInfo{}, BucketNameInvalid{Bucket: "ad"}, false},
// Test cases with valid but non-existing bucket names (Test number 5-7). // Test cases with valid but non-existing bucket names (Test number 5-6).
{"abcdefgh", "abc", ObjectInfo{}, BucketNotFound{Bucket: "abcdefgh"}, false}, {"abcdefgh", "abc", ObjectInfo{}, BucketNotFound{Bucket: "abcdefgh"}, false},
{"ijklmnop", "efg", ObjectInfo{}, BucketNotFound{Bucket: "ijklmnop"}, false}, {"ijklmnop", "efg", ObjectInfo{}, BucketNotFound{Bucket: "ijklmnop"}, false},
// Test cases with valid but non-existing bucket names and invalid object name (Test number 8-9). // Test cases with valid but non-existing bucket names and invalid object name (Test number 7-8).
{"test-getobjectinfo", "", ObjectInfo{}, ObjectNameInvalid{Bucket: "test-getobjectinfo", Object: ""}, false}, {"test-getobjectinfo", "", ObjectInfo{}, ObjectNameInvalid{Bucket: "test-getobjectinfo", Object: ""}, false},
{"test-getobjectinfo", "", ObjectInfo{}, ObjectNameInvalid{Bucket: "test-getobjectinfo", Object: ""}, false}, {"test-getobjectinfo", "", ObjectInfo{}, ObjectNameInvalid{Bucket: "test-getobjectinfo", Object: ""}, false},
// Test cases with non-existing object name with existing bucket (Test number 10-12). // Test cases with non-existing object name with existing bucket (Test number 9-11).
{"test-getobjectinfo", "Africa", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Africa"}, false}, {"test-getobjectinfo", "Africa", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Africa"}, false},
{"test-getobjectinfo", "Antartica", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Antartica"}, false}, {"test-getobjectinfo", "Antartica", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Antartica"}, false},
{"test-getobjectinfo", "Asia/myfile", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Asia/myfile"}, false}, {"test-getobjectinfo", "Asia/myfile", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Asia/myfile"}, false},
// Test case with existing bucket but object name set to a directory (Test number 13). // Test case with existing bucket but object name set to a directory (Test number 12).
{"test-getobjectinfo", "Asia", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Asia"}, false}, {"test-getobjectinfo", "Asia", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Asia"}, false},
// Valid case with existing object (Test number 14). // Valid case with existing object (Test number 13).
{"test-getobjectinfo", "Asia/asiapics.jpg", resultCases[0], nil, true}, {"test-getobjectinfo", "Asia/asiapics.jpg", resultCases[0], nil, true},
} }
for i, testCase := range testCases { for i, testCase := range testCases {

View File

@ -38,7 +38,7 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr
errMsg := "Bucket not found: minio-bucket" errMsg := "Bucket not found: minio-bucket"
// opearation expected to fail since the bucket on which NewMultipartUpload is being initiated doesn't exist. // opearation expected to fail since the bucket on which NewMultipartUpload is being initiated doesn't exist.
uploadID, err := obj.NewMultipartUpload(bucket, object, nil) _, err := obj.NewMultipartUpload(bucket, object, nil)
if err == nil { if err == nil {
t.Fatalf("%s: Expected to fail since the NewMultipartUpload is intialized on a non-existent bucket.", instanceType) t.Fatalf("%s: Expected to fail since the NewMultipartUpload is intialized on a non-existent bucket.", instanceType)
} }
@ -53,7 +53,7 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
uploadID, err = obj.NewMultipartUpload(bucket, object, nil) uploadID, err := obj.NewMultipartUpload(bucket, object, nil)
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
@ -92,6 +92,7 @@ func testObjectAPIIsUploadIDExists(obj ObjectLayer, instanceType string, t TestE
} }
err = obj.AbortMultipartUpload(bucket, object, "abc") err = obj.AbortMultipartUpload(bucket, object, "abc")
err = errorCause(err)
switch err.(type) { switch err.(type) {
case InvalidUploadID: case InvalidUploadID:
default: default:

View File

@ -151,7 +151,8 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
} }
for i, testCase := range testCases { for i, testCase := range testCases {
actualMd5Hex, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, testCase.intputDataSize, bytes.NewReader(testCase.inputData), testCase.inputMeta) objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, testCase.intputDataSize, bytes.NewReader(testCase.inputData), testCase.inputMeta)
actualErr = errorCause(actualErr)
if actualErr != nil && testCase.expectedError == nil { if actualErr != nil && testCase.expectedError == nil {
t.Errorf("Test %d: %s: Expected to pass, but failed with: error %s.", i+1, instanceType, actualErr.Error()) t.Errorf("Test %d: %s: Expected to pass, but failed with: error %s.", i+1, instanceType, actualErr.Error())
} }
@ -159,14 +160,14 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but passed instead.", i+1, instanceType, testCase.expectedError.Error()) t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but passed instead.", i+1, instanceType, testCase.expectedError.Error())
} }
// Failed as expected, but does it fail for the expected reason. // Failed as expected, but does it fail for the expected reason.
if actualErr != nil && testCase.expectedError != actualErr { if actualErr != nil && actualErr != testCase.expectedError {
t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", i+1, instanceType, testCase.expectedError.Error(), actualErr.Error()) t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", i+1, instanceType, testCase.expectedError.Error(), actualErr.Error())
} }
// Test passes as expected, but the output values are verified for correctness here. // Test passes as expected, but the output values are verified for correctness here.
if actualErr == nil { if actualErr == nil {
// Asserting whether the md5 output is correct. // Asserting whether the md5 output is correct.
if expectedMD5, ok := testCase.inputMeta["md5Sum"]; ok && expectedMD5 != actualMd5Hex { if expectedMD5, ok := testCase.inputMeta["md5Sum"]; ok && expectedMD5 != objInfo.MD5Sum {
t.Errorf("Test %d: %s: Calculated Md5 different from the actual one %s.", i+1, instanceType, actualMd5Hex) t.Errorf("Test %d: %s: Calculated Md5 different from the actual one %s.", i+1, instanceType, objInfo.MD5Sum)
} }
} }
} }
@ -223,7 +224,8 @@ func testObjectAPIPutObjectDiskNotFOund(obj ObjectLayer, instanceType string, di
} }
for i, testCase := range testCases { for i, testCase := range testCases {
actualMd5Hex, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, testCase.intputDataSize, bytes.NewReader(testCase.inputData), testCase.inputMeta) objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, testCase.intputDataSize, bytes.NewReader(testCase.inputData), testCase.inputMeta)
actualErr = errorCause(err)
if actualErr != nil && testCase.shouldPass { if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error()) t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
} }
@ -240,8 +242,8 @@ func testObjectAPIPutObjectDiskNotFOund(obj ObjectLayer, instanceType string, di
// Test passes as expected, but the output values are verified for correctness here. // Test passes as expected, but the output values are verified for correctness here.
if actualErr == nil && testCase.shouldPass { if actualErr == nil && testCase.shouldPass {
// Asserting whether the md5 output is correct. // Asserting whether the md5 output is correct.
if testCase.inputMeta["md5Sum"] != actualMd5Hex { if testCase.inputMeta["md5Sum"] != objInfo.MD5Sum {
t.Errorf("Test %d: %s: Calculated Md5 different from the actual one %s.", i+1, instanceType, actualMd5Hex) t.Errorf("Test %d: %s: Calculated Md5 different from the actual one %s.", i+1, instanceType, objInfo.MD5Sum)
} }
} }
} }
@ -272,6 +274,7 @@ func testObjectAPIPutObjectDiskNotFOund(obj ObjectLayer, instanceType string, di
InsufficientWriteQuorum{}, InsufficientWriteQuorum{},
} }
_, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, testCase.intputDataSize, bytes.NewReader(testCase.inputData), testCase.inputMeta) _, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, testCase.intputDataSize, bytes.NewReader(testCase.inputData), testCase.inputMeta)
actualErr = errorCause(actualErr)
if actualErr != nil && testCase.shouldPass { if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", len(testCases)+1, instanceType, actualErr.Error()) t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", len(testCases)+1, instanceType, actualErr.Error())
} }

View File

@ -17,7 +17,7 @@
package cmd package cmd
import ( import (
"path/filepath" "net"
"strings" "strings"
"sync" "sync"
) )
@ -35,6 +35,7 @@ const (
// isErrIgnored should we ignore this error?, takes a list of errors which can be ignored. // isErrIgnored should we ignore this error?, takes a list of errors which can be ignored.
func isErrIgnored(err error, ignoredErrs []error) bool { func isErrIgnored(err error, ignoredErrs []error) bool {
err = errorCause(err)
for _, ignoredErr := range ignoredErrs { for _, ignoredErr := range ignoredErrs {
if ignoredErr == err { if ignoredErr == err {
return true return true
@ -53,13 +54,62 @@ func fsHouseKeeping(storageDisk StorageAPI) error {
return nil return nil
} }
// Check if a network path is local to this node.
func isLocalStorage(networkPath string) bool {
if idx := strings.LastIndex(networkPath, ":"); idx != -1 {
// e.g 10.0.0.1:/mnt/networkPath
netAddr, _, err := splitNetPath(networkPath)
if err != nil {
errorIf(err, "Splitting into ip and path failed")
return false
}
// netAddr will only be set if this is not a local path.
if netAddr == "" {
return true
}
// Resolve host to address to check if the IP is loopback.
// If address resolution fails, assume it's a non-local host.
addrs, err := net.LookupHost(netAddr)
if err != nil {
errorIf(err, "Failed to lookup host")
return false
}
for _, addr := range addrs {
if ip := net.ParseIP(addr); ip.IsLoopback() {
return true
}
}
iaddrs, err := net.InterfaceAddrs()
if err != nil {
errorIf(err, "Unable to list interface addresses")
return false
}
for _, addr := range addrs {
for _, iaddr := range iaddrs {
ip, _, err := net.ParseCIDR(iaddr.String())
if err != nil {
errorIf(err, "Unable to parse CIDR")
return false
}
if ip.String() == addr {
return true
}
}
}
return false
}
return true
}
// Depending on the disk type network or local, initialize storage API. // Depending on the disk type network or local, initialize storage API.
func newStorageAPI(disk string) (storage StorageAPI, err error) { func newStorageAPI(disk string) (storage StorageAPI, err error) {
if !strings.ContainsRune(disk, ':') || filepath.VolumeName(disk) != "" { if isLocalStorage(disk) {
// Initialize filesystem storage API. if idx := strings.LastIndex(disk, ":"); idx != -1 {
return newPosix(disk[idx+1:])
}
return newPosix(disk) return newPosix(disk)
} }
// Initialize rpc client storage API.
return newRPCClient(disk) return newRPCClient(disk)
} }
@ -84,7 +134,7 @@ func initMetaVolume(storageDisks []StorageAPI) error {
// Indicate this wait group is done. // Indicate this wait group is done.
defer wg.Done() defer wg.Done()
// Attempt to create `.minio`. // Attempt to create `.minio.sys`.
err := disk.MakeVol(minioMetaBucket) err := disk.MakeVol(minioMetaBucket)
if err != nil { if err != nil {
switch err { switch err {
@ -135,7 +185,11 @@ func xlHouseKeeping(storageDisks []StorageAPI) error {
// Cleanup all temp entries upon start. // Cleanup all temp entries upon start.
err := cleanupDir(disk, minioMetaBucket, tmpMetaPrefix) err := cleanupDir(disk, minioMetaBucket, tmpMetaPrefix)
if err != nil { if err != nil {
errs[index] = err switch errorCause(err) {
case errDiskNotFound, errVolumeNotFound:
default:
errs[index] = err
}
} }
}(index, disk) }(index, disk)
} }
@ -171,7 +225,7 @@ func cleanupDir(storage StorageAPI, volume, dirPath string) error {
if err == errFileNotFound { if err == errFileNotFound {
return nil return nil
} else if err != nil { // For any other errors fail. } else if err != nil { // For any other errors fail.
return err return traceError(err)
} // else on success.. } // else on success..
// Recurse and delete all other entries. // Recurse and delete all other entries.

View File

@ -26,48 +26,57 @@ import (
// handle all cases where we have known types of errors returned by // handle all cases where we have known types of errors returned by
// underlying storage layer. // underlying storage layer.
func toObjectErr(err error, params ...string) error { func toObjectErr(err error, params ...string) error {
e, ok := err.(*Error)
if ok {
err = e.e
}
switch err { switch err {
case errVolumeNotFound: case errVolumeNotFound:
if len(params) >= 1 { if len(params) >= 1 {
return BucketNotFound{Bucket: params[0]} err = BucketNotFound{Bucket: params[0]}
} }
case errVolumeNotEmpty: case errVolumeNotEmpty:
if len(params) >= 1 { if len(params) >= 1 {
return BucketNotEmpty{Bucket: params[0]} err = BucketNotEmpty{Bucket: params[0]}
} }
case errVolumeExists: case errVolumeExists:
if len(params) >= 1 { if len(params) >= 1 {
return BucketExists{Bucket: params[0]} err = BucketExists{Bucket: params[0]}
} }
case errDiskFull: case errDiskFull:
return StorageFull{} err = StorageFull{}
case errIsNotRegular, errFileAccessDenied: case errIsNotRegular, errFileAccessDenied:
if len(params) >= 2 { if len(params) >= 2 {
return ObjectExistsAsDirectory{ err = ObjectExistsAsDirectory{
Bucket: params[0], Bucket: params[0],
Object: params[1], Object: params[1],
} }
} }
case errFileNotFound: case errFileNotFound:
if len(params) >= 2 { if len(params) >= 2 {
return ObjectNotFound{ err = ObjectNotFound{
Bucket: params[0], Bucket: params[0],
Object: params[1], Object: params[1],
} }
} }
case errFileNameTooLong: case errFileNameTooLong:
if len(params) >= 2 { if len(params) >= 2 {
return ObjectNameInvalid{ err = ObjectNameInvalid{
Bucket: params[0], Bucket: params[0],
Object: params[1], Object: params[1],
} }
} }
case errXLReadQuorum: case errXLReadQuorum:
return InsufficientReadQuorum{} err = InsufficientReadQuorum{}
case errXLWriteQuorum: case errXLWriteQuorum:
return InsufficientWriteQuorum{} err = InsufficientWriteQuorum{}
case io.ErrUnexpectedEOF, io.ErrShortWrite: case io.ErrUnexpectedEOF, io.ErrShortWrite:
return IncompleteBody{} err = IncompleteBody{}
}
if ok {
e.e = err
return e
} }
return err return err
} }

View File

@ -27,10 +27,18 @@ import (
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"sync"
mux "github.com/gorilla/mux" mux "github.com/gorilla/mux"
) )
var objLayerMutex *sync.Mutex
var globalObjectAPI ObjectLayer
func init() {
objLayerMutex = &sync.Mutex{}
}
// supportedGetReqParams - supported request parameters for GET presigned request. // supportedGetReqParams - supported request parameters for GET presigned request.
var supportedGetReqParams = map[string]string{ var supportedGetReqParams = map[string]string{
"response-expires": "Expires", "response-expires": "Expires",
@ -84,6 +92,13 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
bucket = vars["bucket"] bucket = vars["bucket"]
object = vars["object"] object = vars["object"]
// Fetch object stat info.
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
default: default:
// For all unknown auth types return error. // For all unknown auth types return error.
@ -101,8 +116,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
return return
} }
} }
// Fetch object stat info. objInfo, err := objectAPI.GetObjectInfo(bucket, object)
objInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object)
if err != nil { if err != nil {
errorIf(err, "Unable to fetch object info.") errorIf(err, "Unable to fetch object info.")
apiErr := toAPIErrorCode(err) apiErr := toAPIErrorCode(err)
@ -161,7 +175,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
}) })
// Reads the object at startOffset and writes to mw. // Reads the object at startOffset and writes to mw.
if err := api.ObjectAPI.GetObject(bucket, object, startOffset, length, writer); err != nil { if err := objectAPI.GetObject(bucket, object, startOffset, length, writer); err != nil {
errorIf(err, "Unable to write to client.") errorIf(err, "Unable to write to client.")
if !dataWritten { if !dataWritten {
// Error response only if no data has been written to client yet. i.e if // Error response only if no data has been written to client yet. i.e if
@ -190,6 +204,12 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
bucket = vars["bucket"] bucket = vars["bucket"]
object = vars["object"] object = vars["object"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
default: default:
// For all unknown auth types return error. // For all unknown auth types return error.
@ -208,7 +228,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
} }
} }
objInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object) objInfo, err := objectAPI.GetObjectInfo(bucket, object)
if err != nil { if err != nil {
errorIf(err, "Unable to fetch object info.") errorIf(err, "Unable to fetch object info.")
apiErr := toAPIErrorCode(err) apiErr := toAPIErrorCode(err)
@ -240,6 +260,12 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
bucket := vars["bucket"] bucket := vars["bucket"]
object := vars["object"] object := vars["object"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
default: default:
// For all unknown auth types return error. // For all unknown auth types return error.
@ -289,7 +315,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
return return
} }
objInfo, err := api.ObjectAPI.GetObjectInfo(sourceBucket, sourceObject) objInfo, err := objectAPI.GetObjectInfo(sourceBucket, sourceObject)
if err != nil { if err != nil {
errorIf(err, "Unable to fetch object info.") errorIf(err, "Unable to fetch object info.")
writeErrorResponse(w, r, toAPIErrorCode(err), objectSource) writeErrorResponse(w, r, toAPIErrorCode(err), objectSource)
@ -307,11 +333,14 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
return return
} }
// Size of object.
size := objInfo.Size
pipeReader, pipeWriter := io.Pipe() pipeReader, pipeWriter := io.Pipe()
go func() { go func() {
startOffset := int64(0) // Read the whole file. startOffset := int64(0) // Read the whole file.
// Get the object. // Get the object.
gErr := api.ObjectAPI.GetObject(sourceBucket, sourceObject, startOffset, objInfo.Size, pipeWriter) gErr := objectAPI.GetObject(sourceBucket, sourceObject, startOffset, size, pipeWriter)
if gErr != nil { if gErr != nil {
errorIf(gErr, "Unable to read an object.") errorIf(gErr, "Unable to read an object.")
pipeWriter.CloseWithError(gErr) pipeWriter.CloseWithError(gErr)
@ -320,19 +349,14 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
pipeWriter.Close() // Close. pipeWriter.Close() // Close.
}() }()
// Size of object.
size := objInfo.Size
// Save metadata.
metadata := make(map[string]string)
// Save other metadata if available. // Save other metadata if available.
metadata = objInfo.UserDefined metadata := objInfo.UserDefined
// Do not set `md5sum` as CopyObject will not keep the // Do not set `md5sum` as CopyObject will not keep the
// same md5sum as the source. // same md5sum as the source.
// Create the object. // Create the object.
md5Sum, err := api.ObjectAPI.PutObject(bucket, object, size, pipeReader, metadata) objInfo, err = objectAPI.PutObject(bucket, object, size, pipeReader, metadata)
if err != nil { if err != nil {
// Close the this end of the pipe upon error in PutObject. // Close the this end of the pipe upon error in PutObject.
pipeReader.CloseWithError(err) pipeReader.CloseWithError(err)
@ -343,13 +367,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
// Explicitly close the reader, before fetching object info. // Explicitly close the reader, before fetching object info.
pipeReader.Close() pipeReader.Close()
objInfo, err = api.ObjectAPI.GetObjectInfo(bucket, object) md5Sum := objInfo.MD5Sum
if err != nil {
errorIf(err, "Unable to fetch object info.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return
}
response := generateCopyObjectResponse(md5Sum, objInfo.ModTime) response := generateCopyObjectResponse(md5Sum, objInfo.ModTime)
encodedSuccessResponse := encodeResponse(response) encodedSuccessResponse := encodeResponse(response)
// write headers // write headers
@ -374,6 +392,12 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
// ---------- // ----------
// This implementation of the PUT operation adds an object to a bucket. // This implementation of the PUT operation adds an object to a bucket.
func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
// If the matching failed, it means that the X-Amz-Copy-Source was // If the matching failed, it means that the X-Amz-Copy-Source was
// wrong, fail right here. // wrong, fail right here.
if _, ok := r.Header["X-Amz-Copy-Source"]; ok { if _, ok := r.Header["X-Amz-Copy-Source"]; ok {
@ -418,7 +442,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
// Make sure we hex encode md5sum here. // Make sure we hex encode md5sum here.
metadata["md5Sum"] = hex.EncodeToString(md5Bytes) metadata["md5Sum"] = hex.EncodeToString(md5Bytes)
var md5Sum string var objInfo ObjectInfo
switch rAuthType { switch rAuthType {
default: default:
// For all unknown auth types return error. // For all unknown auth types return error.
@ -431,7 +455,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
return return
} }
// Create anonymous object. // Create anonymous object.
md5Sum, err = api.ObjectAPI.PutObject(bucket, object, size, r.Body, metadata) objInfo, err = objectAPI.PutObject(bucket, object, size, r.Body, metadata)
case authTypeStreamingSigned: case authTypeStreamingSigned:
// Initialize stream signature verifier. // Initialize stream signature verifier.
reader, s3Error := newSignV4ChunkedReader(r) reader, s3Error := newSignV4ChunkedReader(r)
@ -439,31 +463,22 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
return return
} }
md5Sum, err = api.ObjectAPI.PutObject(bucket, object, size, reader, metadata) objInfo, err = objectAPI.PutObject(bucket, object, size, reader, metadata)
case authTypePresigned, authTypeSigned: case authTypePresigned, authTypeSigned:
// Initialize signature verifier. // Initialize signature verifier.
reader := newSignVerify(r) reader := newSignVerify(r)
// Create object. // Create object.
md5Sum, err = api.ObjectAPI.PutObject(bucket, object, size, reader, metadata) objInfo, err = objectAPI.PutObject(bucket, object, size, reader, metadata)
} }
if err != nil { if err != nil {
errorIf(err, "Unable to create an object.") errorIf(err, "Unable to create an object.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return return
} }
if md5Sum != "" { w.Header().Set("ETag", "\""+objInfo.MD5Sum+"\"")
w.Header().Set("ETag", "\""+md5Sum+"\"")
}
writeSuccessResponse(w, nil) writeSuccessResponse(w, nil)
if globalEventNotifier.IsBucketNotificationSet(bucket) { if globalEventNotifier.IsBucketNotificationSet(bucket) {
// Fetch object info for notifications.
objInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object)
if err != nil {
errorIf(err, "Unable to fetch object info for \"%s\"", path.Join(bucket, object))
return
}
// Notify object created event. // Notify object created event.
eventNotify(eventData{ eventNotify(eventData{
Type: ObjectCreatedPut, Type: ObjectCreatedPut,
@ -485,6 +500,12 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
bucket = vars["bucket"] bucket = vars["bucket"]
object = vars["object"] object = vars["object"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
default: default:
// For all unknown auth types return error. // For all unknown auth types return error.
@ -506,7 +527,7 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
// Extract metadata that needs to be saved. // Extract metadata that needs to be saved.
metadata := extractMetadataFromHeader(r.Header) metadata := extractMetadataFromHeader(r.Header)
uploadID, err := api.ObjectAPI.NewMultipartUpload(bucket, object, metadata) uploadID, err := objectAPI.NewMultipartUpload(bucket, object, metadata)
if err != nil { if err != nil {
errorIf(err, "Unable to initiate new multipart upload id.") errorIf(err, "Unable to initiate new multipart upload id.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
@ -527,6 +548,12 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
bucket := vars["bucket"] bucket := vars["bucket"]
object := vars["object"] object := vars["object"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
// get Content-Md5 sent by client and verify if valid // get Content-Md5 sent by client and verify if valid
md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5")) md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5"))
if err != nil { if err != nil {
@ -588,7 +615,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
return return
} }
// No need to verify signature, anonymous request access is already allowed. // No need to verify signature, anonymous request access is already allowed.
partMD5, err = api.ObjectAPI.PutObjectPart(bucket, object, uploadID, partID, size, r.Body, incomingMD5) partMD5, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, r.Body, incomingMD5)
case authTypeStreamingSigned: case authTypeStreamingSigned:
// Initialize stream signature verifier. // Initialize stream signature verifier.
reader, s3Error := newSignV4ChunkedReader(r) reader, s3Error := newSignV4ChunkedReader(r)
@ -596,11 +623,11 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
return return
} }
partMD5, err = api.ObjectAPI.PutObjectPart(bucket, object, uploadID, partID, size, reader, incomingMD5) partMD5, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, reader, incomingMD5)
case authTypePresigned, authTypeSigned: case authTypePresigned, authTypeSigned:
// Initialize signature verifier. // Initialize signature verifier.
reader := newSignVerify(r) reader := newSignVerify(r)
partMD5, err = api.ObjectAPI.PutObjectPart(bucket, object, uploadID, partID, size, reader, incomingMD5) partMD5, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, reader, incomingMD5)
} }
if err != nil { if err != nil {
errorIf(err, "Unable to create object part.") errorIf(err, "Unable to create object part.")
@ -620,6 +647,12 @@ func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter,
bucket := vars["bucket"] bucket := vars["bucket"]
object := vars["object"] object := vars["object"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
default: default:
// For all unknown auth types return error. // For all unknown auth types return error.
@ -639,7 +672,7 @@ func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter,
} }
uploadID, _, _, _ := getObjectResources(r.URL.Query()) uploadID, _, _, _ := getObjectResources(r.URL.Query())
if err := api.ObjectAPI.AbortMultipartUpload(bucket, object, uploadID); err != nil { if err := objectAPI.AbortMultipartUpload(bucket, object, uploadID); err != nil {
errorIf(err, "Unable to abort multipart upload.") errorIf(err, "Unable to abort multipart upload.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return return
@ -653,6 +686,12 @@ func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *ht
bucket := vars["bucket"] bucket := vars["bucket"]
object := vars["object"] object := vars["object"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
default: default:
// For all unknown auth types return error. // For all unknown auth types return error.
@ -680,7 +719,7 @@ func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *ht
writeErrorResponse(w, r, ErrInvalidMaxParts, r.URL.Path) writeErrorResponse(w, r, ErrInvalidMaxParts, r.URL.Path)
return return
} }
listPartsInfo, err := api.ObjectAPI.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts) listPartsInfo, err := objectAPI.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
if err != nil { if err != nil {
errorIf(err, "Unable to list uploaded parts.") errorIf(err, "Unable to list uploaded parts.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
@ -700,6 +739,12 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
bucket := vars["bucket"] bucket := vars["bucket"]
object := vars["object"] object := vars["object"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
// Get upload id. // Get upload id.
uploadID, _, _, _ := getObjectResources(r.URL.Query()) uploadID, _, _, _ := getObjectResources(r.URL.Query())
@ -762,9 +807,10 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
} }
doneCh := make(chan struct{}) doneCh := make(chan struct{})
// Signal that completeMultipartUpload is over via doneCh // Signal that completeMultipartUpload is over via doneCh
go func(doneCh chan<- struct{}) { go func(doneCh chan<- struct{}) {
md5Sum, err = api.ObjectAPI.CompleteMultipartUpload(bucket, object, uploadID, completeParts) md5Sum, err = objectAPI.CompleteMultipartUpload(bucket, object, uploadID, completeParts)
doneCh <- struct{}{} doneCh <- struct{}{}
}(doneCh) }(doneCh)
@ -799,7 +845,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
if globalEventNotifier.IsBucketNotificationSet(bucket) { if globalEventNotifier.IsBucketNotificationSet(bucket) {
// Fetch object info for notifications. // Fetch object info for notifications.
objInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object) objInfo, err := objectAPI.GetObjectInfo(bucket, object)
if err != nil { if err != nil {
errorIf(err, "Unable to fetch object info for \"%s\"", path.Join(bucket, object)) errorIf(err, "Unable to fetch object info for \"%s\"", path.Join(bucket, object))
return return
@ -825,6 +871,12 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
bucket := vars["bucket"] bucket := vars["bucket"]
object := vars["object"] object := vars["object"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path)
return
}
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
default: default:
// For all unknown auth types return error. // For all unknown auth types return error.
@ -845,7 +897,7 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
/// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html /// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html
/// Ignore delete object errors, since we are suppposed to reply /// Ignore delete object errors, since we are suppposed to reply
/// only 204. /// only 204.
if err := api.ObjectAPI.DeleteObject(bucket, object); err != nil { if err := objectAPI.DeleteObject(bucket, object); err != nil {
writeSuccessNoContent(w) writeSuccessNoContent(w)
return return
} }

View File

@ -27,34 +27,12 @@ import (
// Wrapper for calling GetObject API handler tests for both XL multiple disks and FS single drive setup. // Wrapper for calling GetObject API handler tests for both XL multiple disks and FS single drive setup.
func TestAPIGetOjectHandler(t *testing.T) { func TestAPIGetOjectHandler(t *testing.T) {
ExecObjectLayerTest(t, testAPIGetOjectHandler) ExecObjectLayerAPITest(t, testAPIGetOjectHandler, []string{"GetObject"})
} }
func testAPIGetOjectHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { func testAPIGetOjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials credential, t TestErrHandler) {
// get random bucket name.
bucketName := getRandomBucketName()
objectName := "test-object" objectName := "test-object"
// Create bucket.
err := obj.MakeBucket(bucketName)
if err != nil {
// failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err)
}
// Register the API end points with XL/FS object layer.
// Registering only the GetObject handler.
apiRouter := initTestAPIEndPoints(obj, []string{"GetObject"})
// initialize the server and obtain the credentials and root.
// credentials are necessary to sign the HTTP request.
rootPath, err := newTestConfig("us-east-1")
if err != nil {
t.Fatalf("Init Test config failed")
}
// remove the root folder after the test ends.
defer removeAll(rootPath)
credentials := serverConfig.GetCredential()
// set of byte data for PutObject. // set of byte data for PutObject.
// object has to be inserted before running tests for GetObject. // object has to be inserted before running tests for GetObject.
// this is required even to assert the GetObject data, // this is required even to assert the GetObject data,
@ -78,7 +56,7 @@ func testAPIGetOjectHandler(obj ObjectLayer, instanceType string, t TestErrHandl
// iterate through the above set of inputs and upload the object. // iterate through the above set of inputs and upload the object.
for i, input := range putObjectInputs { for i, input := range putObjectInputs {
// uploading the object. // uploading the object.
_, err = obj.PutObject(input.bucketName, input.objectName, input.contentLength, bytes.NewBuffer(input.textData), input.metaData) _, err := obj.PutObject(input.bucketName, input.objectName, input.contentLength, bytes.NewBuffer(input.textData), input.metaData)
// if object upload fails stop the test. // if object upload fails stop the test.
if err != nil { if err != nil {
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err) t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
@ -174,40 +152,168 @@ func testAPIGetOjectHandler(obj ObjectLayer, instanceType string, t TestErrHandl
} }
} }
// Wrapper for calling Copy Object API handler tests for both XL multiple disks and single node setup. // Wrapper for calling PutObject API handler tests using streaming signature v4 for both XL multiple disks and FS single drive setup.
func TestAPICopyObjectHandler(t *testing.T) { func TestAPIPutObjectStreamSigV4Handler(t *testing.T) {
ExecObjectLayerTest(t, testAPICopyObjectHandler) ExecObjectLayerAPITest(t, testAPIPutObjectStreamSigV4Handler, []string{"PutObject"})
} }
func testAPICopyObjectHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
// get random bucket name. credentials credential, t TestErrHandler) {
bucketName := getRandomBucketName()
objectName := "test-object"
// Create bucket.
err := obj.MakeBucket(bucketName)
if err != nil {
// failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err)
}
// Register the API end points with XL/FS object layer.
// Registering only the Copy Object handler.
apiRouter := initTestAPIEndPoints(obj, []string{"CopyObject"})
// initialize the server and obtain the credentials and root.
// credentials are necessary to sign the HTTP request.
rootPath, err := newTestConfig("us-east-1")
if err != nil {
t.Fatalf("Init Test config failed")
}
// remove the root folder after the test ends.
defer removeAll(rootPath)
err = initEventNotifier(obj) objectName := "test-object"
bytesDataLen := 65 * 1024
bytesData := bytes.Repeat([]byte{'a'}, bytesDataLen)
// byte data for PutObject.
// test cases with inputs and expected result for GetObject.
testCases := []struct {
bucketName string
objectName string
data []byte
dataLen int
// expected output.
expectedContent []byte // expected response body.
expectedRespStatus int // expected response status body.
}{
// Test case - 1.
// Fetching the entire object and validating its contents.
{
bucketName: bucketName,
objectName: objectName,
data: bytesData,
dataLen: len(bytesData),
expectedContent: []byte{},
expectedRespStatus: http.StatusOK,
},
}
// Iterating over the cases, fetching the object validating the response.
for i, testCase := range testCases {
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for Put Object end point.
req, err := newTestStreamingSignedRequest("PUT",
getPutObjectURL("", testCase.bucketName, testCase.objectName),
int64(testCase.dataLen), 64*1024, bytes.NewReader(testCase.data),
credentials.AccessKeyID, credentials.SecretAccessKey)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for Put Object: <ERROR> %v", i+1, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler,`func (api objectAPIHandlers) GetObjectHandler` handles the request.
apiRouter.ServeHTTP(rec, req)
// Assert the response code with the expected status.
if rec.Code != testCase.expectedRespStatus {
t.Fatalf("Case %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testCase.expectedRespStatus, rec.Code)
}
// read the response body.
actualContent, err := ioutil.ReadAll(rec.Body)
if err != nil {
t.Fatalf("Test %d: %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
}
// Verify whether the bucket obtained object is same as the one inserted.
if !bytes.Equal(testCase.expectedContent, actualContent) {
t.Errorf("Test %d: %s: Object content differs from expected value.: %s", i+1, instanceType, string(actualContent))
}
buffer := new(bytes.Buffer)
err = obj.GetObject(testCase.bucketName, testCase.objectName, 0, int64(bytesDataLen), buffer)
if err != nil {
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
}
if !bytes.Equal(bytesData, buffer.Bytes()) {
t.Errorf("Test %d: %s: Data Mismatch: Data fetched back from the uploaded object doesn't match the original one.", i+1, instanceType)
}
buffer.Reset()
}
}
// Wrapper for calling PutObject API handler tests for both XL multiple disks and FS single drive setup.
func TestAPIPutObjectHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testAPIPutObjectHandler, []string{"PutObject"})
}
func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials credential, t TestErrHandler) {
objectName := "test-object"
// byte data for PutObject.
bytesData := generateBytesData(6 * 1024 * 1024)
// test cases with inputs and expected result for GetObject.
testCases := []struct {
bucketName string
objectName string
data []byte
dataLen int
// expected output.
expectedContent []byte // expected response body.
expectedRespStatus int // expected response status body.
}{
// Test case - 1.
// Fetching the entire object and validating its contents.
{
bucketName: bucketName,
objectName: objectName,
data: bytesData,
dataLen: len(bytesData),
expectedContent: []byte{},
expectedRespStatus: http.StatusOK,
},
}
// Iterating over the cases, fetching the object validating the response.
for i, testCase := range testCases {
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for Get Object end point.
req, err := newTestSignedRequest("PUT", getPutObjectURL("", testCase.bucketName, testCase.objectName),
int64(testCase.dataLen), bytes.NewReader(testCase.data), credentials.AccessKeyID, credentials.SecretAccessKey)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for Put Object: <ERROR> %v", i+1, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler,`func (api objectAPIHandlers) GetObjectHandler` handles the request.
apiRouter.ServeHTTP(rec, req)
// Assert the response code with the expected status.
if rec.Code != testCase.expectedRespStatus {
t.Fatalf("Case %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testCase.expectedRespStatus, rec.Code)
}
// read the response body.
actualContent, err := ioutil.ReadAll(rec.Body)
if err != nil {
t.Fatalf("Test %d: %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
}
// Verify whether the bucket obtained object is same as the one inserted.
if !bytes.Equal(testCase.expectedContent, actualContent) {
t.Errorf("Test %d: %s: Object content differs from expected value.: %s", i+1, instanceType, string(actualContent))
}
buffer := new(bytes.Buffer)
err = obj.GetObject(testCase.bucketName, testCase.objectName, 0, int64(len(bytesData)), buffer)
if err != nil {
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
}
if !bytes.Equal(bytesData, buffer.Bytes()) {
t.Errorf("Test %d: %s: Data Mismatch: Data fetched back from the uploaded object doesn't match the original one.", i+1, instanceType)
}
buffer.Reset()
}
}
// Wrapper for calling Copy Object API handler tests for both XL multiple disks and single node setup.
func TestAPICopyObjectHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testAPICopyObjectHandler, []string{"CopyObject"})
}
func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials credential, t TestErrHandler) {
objectName := "test-object"
// register event notifier.
err := initEventNotifier(obj)
if err != nil { if err != nil {
t.Fatalf("Initializing event notifiers failed") t.Fatalf("Initializing event notifiers failed")
} }
credentials := serverConfig.GetCredential()
// set of byte data for PutObject. // set of byte data for PutObject.
// object has to be inserted before running tests for Copy Object. // object has to be inserted before running tests for Copy Object.
// this is required even to assert the copied object, // this is required even to assert the copied object,

View File

@ -22,6 +22,7 @@ import "io"
type ObjectLayer interface { type ObjectLayer interface {
// Storage operations. // Storage operations.
Shutdown() error Shutdown() error
HealDiskMetadata() error
StorageInfo() StorageInfo StorageInfo() StorageInfo
// Bucket operations. // Bucket operations.
@ -35,7 +36,7 @@ type ObjectLayer interface {
// Object operations. // Object operations.
GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error)
GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error)
PutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string) (md5 string, err error) PutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string) (objInto ObjectInfo, err error)
DeleteObject(bucket, object string) error DeleteObject(bucket, object string) error
HealObject(bucket, object string) error HealObject(bucket, object string) error

View File

@ -72,12 +72,12 @@ func readUploadsJSON(bucket, object string, disk StorageAPI) (uploadIDs uploadsV
// Reads entire `uploads.json`. // Reads entire `uploads.json`.
buf, err := disk.ReadAll(minioMetaBucket, uploadJSONPath) buf, err := disk.ReadAll(minioMetaBucket, uploadJSONPath)
if err != nil { if err != nil {
return uploadsV1{}, err return uploadsV1{}, traceError(err)
} }
// Decode `uploads.json`. // Decode `uploads.json`.
if err = json.Unmarshal(buf, &uploadIDs); err != nil { if err = json.Unmarshal(buf, &uploadIDs); err != nil {
return uploadsV1{}, err return uploadsV1{}, traceError(err)
} }
// Success. // Success.
@ -103,7 +103,7 @@ func cleanupUploadedParts(bucket, object, uploadID string, storageDisks ...Stora
// Cleanup uploadID for all disks. // Cleanup uploadID for all disks.
for index, disk := range storageDisks { for index, disk := range storageDisks {
if disk == nil { if disk == nil {
errs[index] = errDiskNotFound errs[index] = traceError(errDiskNotFound)
continue continue
} }
wg.Add(1) wg.Add(1)

View File

@ -148,7 +148,7 @@ func completeMultipartMD5(parts ...completePart) (string, error) {
for _, part := range parts { for _, part := range parts {
md5Bytes, err := hex.DecodeString(part.ETag) md5Bytes, err := hex.DecodeString(part.ETag)
if err != nil { if err != nil {
return "", err return "", traceError(err)
} }
finalMD5Bytes = append(finalMD5Bytes, md5Bytes...) finalMD5Bytes = append(finalMD5Bytes, md5Bytes...)
} }

View File

@ -200,12 +200,12 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, c TestErrH
objects[key] = []byte(randomString) objects[key] = []byte(randomString)
metadata := make(map[string]string) metadata := make(map[string]string)
metadata["md5Sum"] = expectedMD5Sumhex metadata["md5Sum"] = expectedMD5Sumhex
var md5Sum string var objInfo ObjectInfo
md5Sum, err = obj.PutObject("bucket", key, int64(len(randomString)), bytes.NewBufferString(randomString), metadata) objInfo, err = obj.PutObject("bucket", key, int64(len(randomString)), bytes.NewBufferString(randomString), metadata)
if err != nil { if err != nil {
c.Fatalf("%s: <ERROR> %s", instanceType, err) c.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
if md5Sum != expectedMD5Sumhex { if objInfo.MD5Sum != expectedMD5Sumhex {
c.Errorf("Md5 Mismatch") c.Errorf("Md5 Mismatch")
} }
} }
@ -625,6 +625,9 @@ func testListBuckets(obj ObjectLayer, instanceType string, c TestErrHandler) {
// add three and test exists + prefix. // add three and test exists + prefix.
err = obj.MakeBucket("bucket22") err = obj.MakeBucket("bucket22")
if err != nil {
c.Fatalf("%s: <ERROR> %s", instanceType, err)
}
buckets, err = obj.ListBuckets() buckets, err = obj.ListBuckets()
if err != nil { if err != nil {
@ -707,6 +710,7 @@ func testNonExistantObjectInBucket(obj ObjectLayer, instanceType string, c TestE
if err == nil { if err == nil {
c.Fatalf("%s: Expected error but found nil", instanceType) c.Fatalf("%s: Expected error but found nil", instanceType)
} }
err = errorCause(err)
switch err := err.(type) { switch err := err.(type) {
case ObjectNotFound: case ObjectNotFound:
if err.Error() != "Object not found: bucket#dir1" { if err.Error() != "Object not found: bucket#dir1" {
@ -740,6 +744,7 @@ func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string,
} }
_, err = obj.GetObjectInfo("bucket", "dir1") _, err = obj.GetObjectInfo("bucket", "dir1")
err = errorCause(err)
switch err := err.(type) { switch err := err.(type) {
case ObjectNotFound: case ObjectNotFound:
if err.Bucket != "bucket" { if err.Bucket != "bucket" {
@ -755,6 +760,7 @@ func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string,
} }
_, err = obj.GetObjectInfo("bucket", "dir1/") _, err = obj.GetObjectInfo("bucket", "dir1/")
err = errorCause(err)
switch err := err.(type) { switch err := err.(type) {
case ObjectNameInvalid: case ObjectNameInvalid:
if err.Bucket != "bucket" { if err.Bucket != "bucket" {

89
cmd/posix-errors.go Normal file
View File

@ -0,0 +1,89 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"os"
"runtime"
"syscall"
)
// Check if the given error corresponds to ENOTDIR (is not a directory)
func isSysErrNotDir(err error) bool {
if pathErr, ok := err.(*os.PathError); ok {
switch pathErr.Err {
case syscall.ENOTDIR:
return true
}
}
return false
}
// Check if the given error corresponds to EISDIR (is a directory)
func isSysErrIsDir(err error) bool {
if pathErr, ok := err.(*os.PathError); ok {
switch pathErr.Err {
case syscall.EISDIR:
return true
}
}
return false
}
// Check if the given error corresponds to ENOTEMPTY for unix
// and ERROR_DIR_NOT_EMPTY for windows (directory not empty)
func isSysErrNotEmpty(err error) bool {
if pathErr, ok := err.(*os.PathError); ok {
if runtime.GOOS == "windows" {
if errno, _ok := pathErr.Err.(syscall.Errno); _ok && errno == 0x91 {
// ERROR_DIR_NOT_EMPTY
return true
}
}
switch pathErr.Err {
case syscall.ENOTEMPTY:
return true
}
}
return false
}
// Check if the given error corresponds to the specific ERROR_PATH_NOT_FOUND for windows
func isSysErrPathNotFound(err error) bool {
if runtime.GOOS != "windows" {
return false
}
if pathErr, ok := err.(*os.PathError); ok {
if errno, _ok := pathErr.Err.(syscall.Errno); _ok && errno == 0x03 {
// ERROR_PATH_NOT_FOUND
return true
}
}
return false
}
// Check if the given error corresponds to the specific ERROR_INVALID_HANDLE for windows
func isSysErrHandleInvalid(err error) bool {
if runtime.GOOS != "windows" {
return false
}
// Check if err contains ERROR_INVALID_HANDLE errno
if errno, ok := err.(syscall.Errno); ok && errno == 0x6 {
return true
}
return false
}

View File

@ -89,7 +89,6 @@ func parseDirents(dirPath string, buf []byte) (entries []string, err error) {
// Could happen if it was deleted in the middle while // Could happen if it was deleted in the middle while
// this list was being performed. // this list was being performed.
if os.IsNotExist(err) { if os.IsNotExist(err) {
err = nil
continue continue
} }
return nil, err return nil, err

View File

@ -41,7 +41,9 @@ func TestUNCPaths(t *testing.T) {
// Instantiate posix object to manage a disk // Instantiate posix object to manage a disk
var err error var err error
err = os.Mkdir("c:\\testdisk", 0700) err = os.Mkdir("c:\\testdisk", 0700)
if err != nil {
t.Fatal(err)
}
// Cleanup on exit of test // Cleanup on exit of test
defer os.RemoveAll("c:\\testdisk") defer os.RemoveAll("c:\\testdisk")
@ -74,7 +76,9 @@ func TestUNCPathENOTDIR(t *testing.T) {
var err error var err error
// Instantiate posix object to manage a disk // Instantiate posix object to manage a disk
err = os.Mkdir("c:\\testdisk", 0700) err = os.Mkdir("c:\\testdisk", 0700)
if err != nil {
t.Fatal(err)
}
// Cleanup on exit of test // Cleanup on exit of test
defer os.RemoveAll("c:\\testdisk") defer os.RemoveAll("c:\\testdisk")
var fs StorageAPI var fs StorageAPI

View File

@ -160,6 +160,12 @@ func checkDiskFree(diskPath string, minFreeDisk int64) (err error) {
return nil return nil
} }
// DiskInfo provides current information about disk space usage,
// total free inodes and underlying filesystem.
func (s *posix) DiskInfo() (info disk.Info, err error) {
return getDiskInfo(s.diskPath)
}
// getVolDir - will convert incoming volume names to // getVolDir - will convert incoming volume names to
// corresponding valid volume names on the backend in a platform // corresponding valid volume names on the backend in a platform
// compatible way for all operating systems. If volume is not found // compatible way for all operating systems. If volume is not found
@ -333,12 +339,7 @@ func (s *posix) DeleteVol(volume string) (err error) {
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
return errVolumeNotFound return errVolumeNotFound
} else if strings.Contains(err.Error(), "directory is not empty") { } else if isSysErrNotEmpty(err) {
// On windows the string is slightly different, handle it here.
return errVolumeNotEmpty
} else if strings.Contains(err.Error(), "directory not empty") {
// Hopefully for all other operating systems, this is
// assumed to be consistent.
return errVolumeNotEmpty return errVolumeNotEmpty
} }
return err return err
@ -433,7 +434,7 @@ func (s *posix) ReadAll(volume, path string) (buf []byte, err error) {
case syscall.ENOTDIR, syscall.EISDIR: case syscall.ENOTDIR, syscall.EISDIR:
return nil, errFileNotFound return nil, errFileNotFound
default: default:
if strings.Contains(pathErr.Err.Error(), "The handle is invalid") { if isSysErrHandleInvalid(pathErr.Err) {
// This case is special and needs to be handled for windows. // This case is special and needs to be handled for windows.
return nil, errFileNotFound return nil, errFileNotFound
} }
@ -492,7 +493,7 @@ func (s *posix) ReadFile(volume string, path string, offset int64, buf []byte) (
return 0, errFileNotFound return 0, errFileNotFound
} else if os.IsPermission(err) { } else if os.IsPermission(err) {
return 0, errFileAccessDenied return 0, errFileAccessDenied
} else if strings.Contains(err.Error(), "not a directory") { } else if isSysErrNotDir(err) {
return 0, errFileAccessDenied return 0, errFileAccessDenied
} }
return 0, err return 0, err
@ -569,9 +570,9 @@ func (s *posix) AppendFile(volume, path string, buf []byte) (err error) {
// with mode 0777 mkdir honors system umask. // with mode 0777 mkdir honors system umask.
if err = mkdirAll(filepath.Dir(filePath), 0777); err != nil { if err = mkdirAll(filepath.Dir(filePath), 0777); err != nil {
// File path cannot be verified since one of the parents is a file. // File path cannot be verified since one of the parents is a file.
if strings.Contains(err.Error(), "not a directory") { if isSysErrNotDir(err) {
return errFileAccessDenied return errFileAccessDenied
} else if runtime.GOOS == "windows" && strings.Contains(err.Error(), "system cannot find the path specified") { } else if isSysErrPathNotFound(err) {
// Add specific case for windows. // Add specific case for windows.
return errFileAccessDenied return errFileAccessDenied
} }
@ -583,7 +584,7 @@ func (s *posix) AppendFile(volume, path string, buf []byte) (err error) {
w, err := os.OpenFile(preparePath(filePath), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0666) w, err := os.OpenFile(preparePath(filePath), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0666)
if err != nil { if err != nil {
// File path cannot be verified since one of the parents is a file. // File path cannot be verified since one of the parents is a file.
if strings.Contains(err.Error(), "not a directory") { if isSysErrNotDir(err) {
return errFileAccessDenied return errFileAccessDenied
} }
return err return err
@ -639,7 +640,7 @@ func (s *posix) StatFile(volume, path string) (file FileInfo, err error) {
} }
// File path cannot be verified since one of the parents is a file. // File path cannot be verified since one of the parents is a file.
if strings.Contains(err.Error(), "not a directory") { if isSysErrNotDir(err) {
return FileInfo{}, errFileNotFound return FileInfo{}, errFileNotFound
} }
@ -798,9 +799,9 @@ func (s *posix) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err e
// Creates all the parent directories, with mode 0777 mkdir honors system umask. // Creates all the parent directories, with mode 0777 mkdir honors system umask.
if err = mkdirAll(preparePath(slashpath.Dir(dstFilePath)), 0777); err != nil { if err = mkdirAll(preparePath(slashpath.Dir(dstFilePath)), 0777); err != nil {
// File path cannot be verified since one of the parents is a file. // File path cannot be verified since one of the parents is a file.
if strings.Contains(err.Error(), "not a directory") { if isSysErrNotDir(err) {
return errFileAccessDenied return errFileAccessDenied
} else if strings.Contains(err.Error(), "The system cannot find the path specified.") && runtime.GOOS == "windows" { } else if isSysErrPathNotFound(err) {
// This is a special case should be handled only for // This is a special case should be handled only for
// windows, because windows API does not return "not a // windows, because windows API does not return "not a
// directory" error message. Handle this specifically here. // directory" error message. Handle this specifically here.

View File

@ -18,7 +18,6 @@ package cmd
import ( import (
"bytes" "bytes"
"errors"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
@ -901,7 +900,7 @@ func TestReadFile(t *testing.T) {
return &os.PathError{ return &os.PathError{
Op: "seek", Op: "seek",
Path: preparePath(slashpath.Join(path, "success-vol", "myobject")), Path: preparePath(slashpath.Join(path, "success-vol", "myobject")),
Err: errors.New("An attempt was made to move the file pointer before the beginning of the file."), Err: syscall.Errno(0x83), // ERROR_NEGATIVE_SEEK
} }
} }
return &os.PathError{ return &os.PathError{
@ -953,7 +952,24 @@ func TestReadFile(t *testing.T) {
if err != nil && testCase.expectedErr != nil { if err != nil && testCase.expectedErr != nil {
// Validate if the type string of the errors are an exact match. // Validate if the type string of the errors are an exact match.
if err.Error() != testCase.expectedErr.Error() { if err.Error() != testCase.expectedErr.Error() {
t.Errorf("Case: %d %#v, expected: %s, got: %s", i+1, testCase, testCase.expectedErr, err) if runtime.GOOS != "windows" {
t.Errorf("Case: %d %#v, expected: %s, got: %s", i+1, testCase, testCase.expectedErr, err)
} else {
var resultErrno, expectErrno uintptr
if pathErr, ok := err.(*os.PathError); ok {
if errno, pok := pathErr.Err.(syscall.Errno); pok {
resultErrno = uintptr(errno)
}
}
if pathErr, ok := testCase.expectedErr.(*os.PathError); ok {
if errno, pok := pathErr.Err.(syscall.Errno); pok {
expectErrno = uintptr(errno)
}
}
if !(expectErrno != 0 && resultErrno != 0 && expectErrno == resultErrno) {
t.Errorf("Case: %d %#v, expected: %s, got: %s", i+1, testCase, testCase.expectedErr, err)
}
}
} }
// Err unexpected EOF special case, where we verify we have provided a larger // Err unexpected EOF special case, where we verify we have provided a larger
// buffer than the data itself, but the results are in-fact valid. So we validate // buffer than the data itself, but the results are in-fact valid. So we validate

201
cmd/post-policy_test.go Normal file
View File

@ -0,0 +1,201 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"encoding/base64"
"fmt"
"mime/multipart"
"net/http"
"net/http/httptest"
"testing"
"time"
)
const (
expirationDateFormat = "2006-01-02T15:04:05.999Z"
iso8601DateFormat = "20060102T150405Z"
)
// newPostPolicyBytes - creates a bare bones postpolicy string with key and bucket matches.
func newPostPolicyBytes(credential, bucketName, objectKey string, expiration time.Time) []byte {
t := time.Now().UTC()
// Add the expiration date.
expirationStr := fmt.Sprintf(`"expiration": "%s"`, expiration.Format(expirationDateFormat))
// Add the bucket condition, only accept buckets equal to the one passed.
bucketConditionStr := fmt.Sprintf(`["eq", "$bucket", "%s"]`, bucketName)
// Add the key condition, only accept keys equal to the one passed.
keyConditionStr := fmt.Sprintf(`["eq", "$key", "%s"]`, objectKey)
// Add the algorithm condition, only accept AWS SignV4 Sha256.
algorithmConditionStr := `["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"]`
// Add the date condition, only accept the current date.
dateConditionStr := fmt.Sprintf(`["eq", "$x-amz-date", "%s"]`, t.Format(iso8601DateFormat))
// Add the credential string, only accept the credential passed.
credentialConditionStr := fmt.Sprintf(`["eq", "$x-amz-credential", "%s"]`, credential)
// Combine all conditions into one string.
conditionStr := fmt.Sprintf(`"conditions":[%s, %s, %s, %s, %s]`, bucketConditionStr, keyConditionStr, algorithmConditionStr, dateConditionStr, credentialConditionStr)
retStr := "{"
retStr = retStr + expirationStr + ","
retStr = retStr + conditionStr
retStr = retStr + "}"
return []byte(retStr)
}
// Wrapper for calling TestPostPolicyHandlerHandler tests for both XL multiple disks and single node setup.
func TestPostPolicyHandler(t *testing.T) {
ExecObjectLayerTest(t, testPostPolicyHandler)
}
// testPostPolicyHandler - Tests validate post policy handler uploading objects.
func testPostPolicyHandler(obj ObjectLayer, instanceType string, t TestErrHandler) {
// get random bucket name.
bucketName := getRandomBucketName()
// Register the API end points with XL/FS object layer.
apiRouter := initTestAPIEndPoints(obj, []string{"PostPolicy"})
// initialize the server and obtain the credentials and root.
// credentials are necessary to sign the HTTP request.
rootPath, err := newTestConfig("us-east-1")
if err != nil {
t.Fatalf("Init Test config failed")
}
// remove the root folder after the test ends.
defer removeAll(rootPath)
// bucketnames[0].
// objectNames[0].
// uploadIds [0].
// Create bucket before initiating NewMultipartUpload.
err = obj.MakeBucket(bucketName)
if err != nil {
// Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error())
}
// Collection of non-exhaustive ListMultipartUploads test cases, valid errors
// and success responses.
testCases := []struct {
objectName string
data []byte
expectedRespStatus int
shouldPass bool
}{
// Success case.
{
objectName: "test",
data: []byte("Hello, World"),
expectedRespStatus: http.StatusNoContent,
shouldPass: true,
},
// Bad case.
{
objectName: "test",
data: []byte("Hello, World"),
expectedRespStatus: http.StatusBadRequest,
shouldPass: false,
},
}
for i, testCase := range testCases {
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
req, perr := newPostRequest("", bucketName, testCase.objectName, testCase.data, testCase.shouldPass)
if perr != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PostPolicyHandler: <ERROR> %v", i+1, instanceType, perr)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
}
}
}
// postPresignSignatureV4 - presigned signature for PostPolicy requests.
func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
// Get signining key.
signingkey := getSigningKey(secretAccessKey, t, location)
// Calculate signature.
signature := getSignature(signingkey, policyBase64)
return signature
}
func newPostRequest(endPoint, bucketName, objectName string, objData []byte, shouldPass bool) (*http.Request, error) {
// Keep time.
t := time.Now().UTC()
// Expire the request five minutes from now.
expirationTime := t.Add(time.Minute * 5)
// Get the user credential.
credentials := serverConfig.GetCredential()
credStr := getCredential(credentials.AccessKeyID, serverConfig.GetRegion(), t)
// Create a new post policy.
policy := newPostPolicyBytes(credStr, bucketName, objectName, expirationTime)
// Only need the encoding.
encodedPolicy := base64.StdEncoding.EncodeToString(policy)
formData := make(map[string]string)
if shouldPass {
// Presign with V4 signature based on the policy.
signature := postPresignSignatureV4(encodedPolicy, t, credentials.SecretAccessKey, serverConfig.GetRegion())
formData = map[string]string{
"bucket": bucketName,
"key": objectName,
"x-amz-credential": credStr,
"policy": encodedPolicy,
"x-amz-signature": signature,
"x-amz-date": t.Format(iso8601DateFormat),
"x-amz-algorithm": "AWS4-HMAC-SHA256",
}
}
// Create the multipart form.
var buf bytes.Buffer
w := multipart.NewWriter(&buf)
// Set the normal formData
for k, v := range formData {
w.WriteField(k, v)
}
// Set the File formData
writer, err := w.CreateFormFile("file", "s3verify/post/object")
if err != nil {
// return nil, err
return nil, err
}
writer.Write(objData)
// Close before creating the new request.
w.Close()
// Set the body equal to the created policy.
reader := bytes.NewReader(buf.Bytes())
req, err := http.NewRequest("POST", makeTestTargetURL(endPoint, bucketName, objectName, nil), reader)
if err != nil {
return nil, err
}
// Set form content-type.
req.Header.Set("Content-Type", w.FormDataContentType())
return req, nil
}

227
cmd/prepare-storage.go Normal file
View File

@ -0,0 +1,227 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"time"
"github.com/minio/minio-go/pkg/set"
)
// Channel where minioctl heal handler would notify if it were successful. This
// would be used by waitForFormattingDisks routine to check if it's worth
// retrying loadAllFormats.
var globalWakeupCh chan struct{}
func init() {
globalWakeupCh = make(chan struct{}, 1)
}
/*
Following table lists different possible states the backend could be in.
* In a single-node, multi-disk setup, "Online" would refer to disks' status.
* In a multi-node setup, it could refer to disks' or network connectivity
between the nodes, or both.
+----------+--------------------------+-----------------------+
| Online | Format status | Course of action |
| | | |
-----------+--------------------------+-----------------------+
| All | All Formatted | |
+----------+--------------------------+ initObjectLayer |
| Quorum | Quorum Formatted | |
+----------+--------------------------+-----------------------+
| All | Quorum | Print message saying |
| | Formatted, | "Heal via minioctl" |
| | some unformatted | and initObjectLayer |
+----------+--------------------------+-----------------------+
| All | None Formatted | FormatDisks |
| | | and initObjectLayer |
| | | |
+----------+--------------------------+-----------------------+
| | | Wait for notify from |
| Quorum | | "Heal via minioctl" |
| | Quorum UnFormatted | |
+----------+--------------------------+-----------------------+
| No | | Wait till enough |
| Quorum | _ | nodes are online and |
| | | one of the above |
| | | sections apply |
+----------+--------------------------+-----------------------+
N B A disk can be in one of the following states.
- Unformatted
- Formatted
- Corrupted
- Offline
*/
// InitActions - a type synonym for enumerating initialization activities.
type InitActions int
const (
// FormatDisks - see above table for disk states where it is applicable.
FormatDisks InitActions = iota
// WaitForHeal - Wait for disks to heal.
WaitForHeal
// WaitForQuorum - Wait for quorum number of disks to be online.
WaitForQuorum
// WaitForAll - Wait for all disks to be online.
WaitForAll
// WaitForFormatting - Wait for formatting to be triggered from the '1st' server in the cluster.
WaitForFormatting
// InitObjectLayer - Initialize object layer.
InitObjectLayer
// Abort initialization of object layer since there aren't enough good
// copies of format.json to recover.
Abort
)
func prepForInit(disks []string, sErrs []error, diskCount int) InitActions {
// Count errors by error value.
errMap := make(map[error]int)
for _, err := range sErrs {
errMap[err]++
}
quorum := diskCount/2 + 1
disksOffline := errMap[errDiskNotFound]
disksFormatted := errMap[nil]
disksUnformatted := errMap[errUnformattedDisk]
disksCorrupted := errMap[errCorruptedFormat]
// All disks are unformatted, proceed to formatting disks.
if disksUnformatted == diskCount {
// Only the first server formats an uninitialized setup, others wait for notification.
if isLocalStorage(disks[0]) {
return FormatDisks
}
return WaitForFormatting
} else if disksUnformatted >= quorum {
if disksUnformatted+disksOffline == diskCount {
return WaitForAll
}
// Some disks possibly corrupted.
return WaitForHeal
}
// Already formatted, proceed to initialization of object layer.
if disksFormatted == diskCount {
return InitObjectLayer
} else if disksFormatted >= quorum {
if (disksFormatted+disksOffline == diskCount) ||
(disksFormatted+disksUnformatted == diskCount) {
return InitObjectLayer
}
// Some disks possibly corrupted.
return WaitForHeal
}
// No Quorum.
if disksOffline >= quorum {
return WaitForQuorum
}
// There is quorum or more corrupted disks, there is not enough good
// disks to reconstruct format.json.
if disksCorrupted >= quorum {
return Abort
}
// Some of the formatted disks are possibly offline.
return WaitForHeal
}
func retryFormattingDisks(disks []string, storageDisks []StorageAPI) ([]StorageAPI, error) {
nextBackoff := time.Duration(0)
var err error
done := false
for !done {
select {
case <-time.After(nextBackoff * time.Second):
// Attempt to load all `format.json`.
_, sErrs := loadAllFormats(storageDisks)
switch prepForInit(disks, sErrs, len(storageDisks)) {
case Abort:
err = errCorruptedFormat
done = true
case FormatDisks:
err = initFormatXL(storageDisks)
done = true
case InitObjectLayer:
err = nil
done = true
}
case <-globalWakeupCh:
// Reset nextBackoff to reduce the subsequent wait and re-read
// format.json from all disks again.
nextBackoff = 0
}
}
if err != nil {
return nil, err
}
return storageDisks, nil
}
func waitForFormattingDisks(disks, ignoredDisks []string) ([]StorageAPI, error) {
// FS Setup
if len(disks) == 1 {
storage, err := newStorageAPI(disks[0])
if err != nil && err != errDiskNotFound {
return nil, err
}
return []StorageAPI{storage}, nil
}
// XL Setup
if err := checkSufficientDisks(disks); err != nil {
return nil, err
}
disksSet := set.NewStringSet()
if len(ignoredDisks) > 0 {
disksSet = set.CreateStringSet(ignoredDisks...)
}
// Bootstrap disks.
storageDisks := make([]StorageAPI, len(disks))
for index, disk := range disks {
// Check if disk is ignored.
if disksSet.Contains(disk) {
storageDisks[index] = nil
continue
}
// Intentionally ignore disk not found errors. XL is designed
// to handle these errors internally.
storage, err := newStorageAPI(disk)
if err != nil && err != errDiskNotFound {
return nil, err
}
storageDisks[index] = storage
}
return retryFormattingDisks(disks, storageDisks)
}

153
cmd/prepare-storage_test.go Normal file
View File

@ -0,0 +1,153 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"runtime"
"testing"
)
func (action InitActions) String() string {
switch action {
case InitObjectLayer:
return "InitObjectLayer"
case FormatDisks:
return "FormatDisks"
case WaitForFormatting:
return "WaitForFormatting"
case WaitForHeal:
return "WaitForHeal"
case WaitForAll:
return "WaitForAll"
case WaitForQuorum:
return "WaitForQuorum"
case Abort:
return "Abort"
default:
return "Unknown"
}
}
func TestPrepForInit(t *testing.T) {
var disks []string
if runtime.GOOS == "windows" {
disks = []string{
`c:\mnt\disk1`,
`c:\mnt\disk2`,
`c:\mnt\disk3`,
`c:\mnt\disk4`,
`c:\mnt\disk5`,
`c:\mnt\disk6`,
`c:\mnt\disk7`,
`c:\mnt\disk8`,
}
} else {
disks = []string{
"/mnt/disk1",
"/mnt/disk2",
"/mnt/disk3",
"/mnt/disk4",
"/mnt/disk5",
"/mnt/disk6",
"/mnt/disk7",
"/mnt/disk8",
}
}
// Building up disks that resolve to localhost and remote w.r.t isLocalStorage().
var (
disksLocal []string
disksRemote []string
)
for i := range disks {
disksLocal = append(disksLocal, "localhost:"+disks[i])
}
// Using 4.4.4.4 as a known non-local address.
for i := range disks {
disksRemote = append(disksRemote, "4.4.4.4:"+disks[i])
}
// All disks are unformatted, a fresh setup.
allUnformatted := []error{
errUnformattedDisk, errUnformattedDisk, errUnformattedDisk, errUnformattedDisk,
errUnformattedDisk, errUnformattedDisk, errUnformattedDisk, errUnformattedDisk,
}
// All disks are formatted, possible restart of a node in a formatted setup.
allFormatted := []error{
nil, nil, nil, nil,
nil, nil, nil, nil,
}
// Quorum number of disks are formatted and rest are offline.
quorumFormatted := []error{
nil, nil, nil, nil,
nil, errDiskNotFound, errDiskNotFound, errDiskNotFound,
}
// Minority disks are corrupted, can be healed.
minorityCorrupted := []error{
errCorruptedFormat, errCorruptedFormat, errCorruptedFormat, nil,
nil, nil, nil, nil,
}
// Majority disks are corrupted, pretty bad setup.
majorityCorrupted := []error{
errCorruptedFormat, errCorruptedFormat, errCorruptedFormat, errCorruptedFormat,
errCorruptedFormat, nil, nil, nil,
}
// Quorum disks are unformatted, remaining yet to come online.
quorumUnformatted := []error{
errUnformattedDisk, errUnformattedDisk, errUnformattedDisk, errUnformattedDisk,
errUnformattedDisk, errDiskNotFound, errDiskNotFound, errDiskNotFound,
}
quorumUnformattedSomeCorrupted := []error{
errUnformattedDisk, errUnformattedDisk, errUnformattedDisk, errUnformattedDisk,
errUnformattedDisk, errCorruptedFormat, errCorruptedFormat, errDiskNotFound,
}
// Quorum number of disks not online yet.
noQuourm := []error{
errDiskNotFound, errDiskNotFound, errDiskNotFound, errDiskNotFound,
errDiskNotFound, nil, nil, nil,
}
testCases := []struct {
// Params for prepForInit().
disks []string
errs []error
diskCount int
action InitActions
}{
// Local disks.
{disksLocal, allFormatted, 8, InitObjectLayer},
{disksLocal, quorumFormatted, 8, InitObjectLayer},
{disksLocal, allUnformatted, 8, FormatDisks},
{disksLocal, quorumUnformatted, 8, WaitForAll},
{disksLocal, quorumUnformattedSomeCorrupted, 8, WaitForHeal},
{disksLocal, noQuourm, 8, WaitForQuorum},
{disksLocal, minorityCorrupted, 8, WaitForHeal},
{disksLocal, majorityCorrupted, 8, Abort},
// Remote disks.
{disksRemote, allFormatted, 8, InitObjectLayer},
{disksRemote, quorumFormatted, 8, InitObjectLayer},
{disksRemote, allUnformatted, 8, WaitForFormatting},
{disksRemote, quorumUnformatted, 8, WaitForAll},
{disksRemote, quorumUnformattedSomeCorrupted, 8, WaitForHeal},
{disksRemote, noQuourm, 8, WaitForQuorum},
{disksRemote, minorityCorrupted, 8, WaitForHeal},
{disksRemote, majorityCorrupted, 8, Abort},
}
for i, test := range testCases {
actual := prepForInit(test.disks, test.errs, test.diskCount)
if actual != test.action {
t.Errorf("Test %d expected %s but receieved %s\n", i+1, test.action, actual)
}
}
}

View File

@ -17,7 +17,6 @@
package cmd package cmd
import ( import (
"errors"
"net/http" "net/http"
"os" "os"
"strings" "strings"
@ -25,86 +24,102 @@ import (
router "github.com/gorilla/mux" router "github.com/gorilla/mux"
) )
// newObjectLayer - initialize any object layer depending on the number of disks. func newObjectLayerFn() ObjectLayer {
func newObjectLayer(disks, ignoredDisks []string) (ObjectLayer, error) { objLayerMutex.Lock()
if len(disks) == 1 { defer objLayerMutex.Unlock()
exportPath := disks[0] return globalObjectAPI
// Initialize FS object layer.
return newFSObjects(exportPath)
}
// Initialize XL object layer.
objAPI, err := newXLObjects(disks, ignoredDisks)
if err == errXLWriteQuorum {
return objAPI, errors.New("Disks are different with last minio server run.")
}
return objAPI, err
} }
// configureServer handler returns final handler for the http server. // newObjectLayer - initialize any object layer depending on the number of disks.
func configureServerHandler(srvCmdConfig serverCmdConfig) http.Handler { func newObjectLayer(disks, ignoredDisks []string) (ObjectLayer, error) {
// Initialize name space lock. var objAPI ObjectLayer
initNSLock() var err error
if len(disks) == 1 {
objAPI, err := newObjectLayer(srvCmdConfig.disks, srvCmdConfig.ignoredDisks) // Initialize FS object layer.
fatalIf(err, "Unable to intialize object layer.") objAPI, err = newFSObjects(disks[0])
} else {
// Initialize XL object layer.
objAPI, err = newXLObjects(disks, ignoredDisks)
}
if err != nil {
return nil, err
}
// Migrate bucket policy from configDir to .minio.sys/buckets/ // Migrate bucket policy from configDir to .minio.sys/buckets/
err = migrateBucketPolicyConfig(objAPI) err = migrateBucketPolicyConfig(objAPI)
fatalIf(err, "Unable to migrate bucket policy from config directory") if err != nil {
errorIf(err, "Unable to migrate bucket policy from config directory")
return nil, err
}
err = cleanupOldBucketPolicyConfigs() err = cleanupOldBucketPolicyConfigs()
fatalIf(err, "Unable to clean up bucket policy from config directory.") if err != nil {
errorIf(err, "Unable to clean up bucket policy from config directory.")
// Initialize storage rpc server. return nil, err
storageRPC, err := newRPCServer(srvCmdConfig.disks[0]) // FIXME: should only have one path.
fatalIf(err, "Unable to initialize storage RPC server.")
// Initialize API.
apiHandlers := objectAPIHandlers{
ObjectAPI: objAPI,
} }
// Initialize Web.
webHandlers := &webAPIHandlers{
ObjectAPI: objAPI,
}
// Initialize Controller.
ctrlHandlers := &controllerAPIHandlers{
ObjectAPI: objAPI,
}
// Initialize and monitor shutdown signals.
err = initGracefulShutdown(os.Exit)
fatalIf(err, "Unable to initialize graceful shutdown operation")
// Register the callback that should be called when the process shuts down. // Register the callback that should be called when the process shuts down.
globalShutdownCBs.AddObjectLayerCB(func() errCode { globalShutdownCBs.AddObjectLayerCB(func() errCode {
if sErr := objAPI.Shutdown(); sErr != nil { if objAPI != nil {
return exitFailure if sErr := objAPI.Shutdown(); sErr != nil {
return exitFailure
}
} }
return exitSuccess return exitSuccess
}) })
// Initialize a new event notifier. // Initialize a new event notifier.
err = initEventNotifier(objAPI) err = initEventNotifier(objAPI)
fatalIf(err, "Unable to initialize event notification queue") if err != nil {
errorIf(err, "Unable to initialize event notification.")
}
// Initialize a new bucket policies. // Initialize and load bucket policies.
err = initBucketPolicies(objAPI) err = initBucketPolicies(objAPI)
fatalIf(err, "Unable to load all bucket policies") if err != nil {
errorIf(err, "Unable to load all bucket policies.")
}
// Success.
return objAPI, nil
}
// configureServer handler returns final handler for the http server.
func configureServerHandler(srvCmdConfig serverCmdConfig) http.Handler {
// Initialize storage rpc servers for every disk that is hosted on this node.
storageRPCs, err := newRPCServer(srvCmdConfig)
fatalIf(err, "Unable to initialize storage RPC server.")
// Initialize and monitor shutdown signals.
err = initGracefulShutdown(os.Exit)
fatalIf(err, "Unable to initialize graceful shutdown operation")
// Initialize API.
apiHandlers := objectAPIHandlers{
ObjectAPI: newObjectLayerFn,
}
// Initialize Web.
webHandlers := &webAPIHandlers{
ObjectAPI: newObjectLayerFn,
}
// Initialize Controller.
controllerHandlers := &controllerAPIHandlers{
ObjectAPI: newObjectLayerFn,
}
// Initialize router. // Initialize router.
mux := router.NewRouter() mux := router.NewRouter()
// Register all routers. // Register all routers.
registerStorageRPCRouter(mux, storageRPC) registerStorageRPCRouters(mux, storageRPCs)
// FIXME: till net/rpc auth is brought in "minio control" can be enabled only though // Initialize distributed NS lock.
// this env variable. initDistributedNSLock(mux, srvCmdConfig)
if os.Getenv("MINIO_CONTROL") != "" {
registerControlRPCRouter(mux, ctrlHandlers) // Register controller rpc router.
} registerControllerRPCRouter(mux, controllerHandlers)
// set environmental variable MINIO_BROWSER=off to disable minio web browser. // set environmental variable MINIO_BROWSER=off to disable minio web browser.
// By default minio web browser is enabled. // By default minio web browser is enabled.
@ -112,11 +127,10 @@ func configureServerHandler(srvCmdConfig serverCmdConfig) http.Handler {
registerWebRouter(mux, webHandlers) registerWebRouter(mux, webHandlers)
} }
registerAPIRouter(mux, apiHandlers)
// Add new routers here. // Add new routers here.
registerAPIRouter(mux, apiHandlers)
// List of some generic handlers which are applied for all // List of some generic handlers which are applied for all incoming requests.
// incoming requests.
var handlerFns = []HandlerFunc{ var handlerFns = []HandlerFunc{
// Limits the number of concurrent http requests. // Limits the number of concurrent http requests.
setRateLimitHandler, setRateLimitHandler,

View File

@ -1,124 +0,0 @@
package cmd
import (
"net/rpc"
router "github.com/gorilla/mux"
)
// Storage server implements rpc primitives to facilitate exporting a
// disk over a network.
type storageServer struct {
storage StorageAPI
}
/// Volume operations handlers
// MakeVolHandler - make vol handler is rpc wrapper for MakeVol operation.
func (s *storageServer) MakeVolHandler(arg *string, reply *GenericReply) error {
return s.storage.MakeVol(*arg)
}
// ListVolsHandler - list vols handler is rpc wrapper for ListVols operation.
func (s *storageServer) ListVolsHandler(arg *string, reply *ListVolsReply) error {
vols, err := s.storage.ListVols()
if err != nil {
return err
}
reply.Vols = vols
return nil
}
// StatVolHandler - stat vol handler is a rpc wrapper for StatVol operation.
func (s *storageServer) StatVolHandler(arg *string, reply *VolInfo) error {
volInfo, err := s.storage.StatVol(*arg)
if err != nil {
return err
}
*reply = volInfo
return nil
}
// DeleteVolHandler - delete vol handler is a rpc wrapper for
// DeleteVol operation.
func (s *storageServer) DeleteVolHandler(arg *string, reply *GenericReply) error {
return s.storage.DeleteVol(*arg)
}
/// File operations
// StatFileHandler - stat file handler is rpc wrapper to stat file.
func (s *storageServer) StatFileHandler(arg *StatFileArgs, reply *FileInfo) error {
fileInfo, err := s.storage.StatFile(arg.Vol, arg.Path)
if err != nil {
return err
}
*reply = fileInfo
return nil
}
// ListDirHandler - list directory handler is rpc wrapper to list dir.
func (s *storageServer) ListDirHandler(arg *ListDirArgs, reply *[]string) error {
entries, err := s.storage.ListDir(arg.Vol, arg.Path)
if err != nil {
return err
}
*reply = entries
return nil
}
// ReadAllHandler - read all handler is rpc wrapper to read all storage API.
func (s *storageServer) ReadAllHandler(arg *ReadFileArgs, reply *[]byte) error {
buf, err := s.storage.ReadAll(arg.Vol, arg.Path)
if err != nil {
return err
}
reply = &buf
return nil
}
// ReadFileHandler - read file handler is rpc wrapper to read file.
func (s *storageServer) ReadFileHandler(arg *ReadFileArgs, reply *int64) error {
n, err := s.storage.ReadFile(arg.Vol, arg.Path, arg.Offset, arg.Buffer)
if err != nil {
return err
}
reply = &n
return nil
}
// AppendFileHandler - append file handler is rpc wrapper to append file.
func (s *storageServer) AppendFileHandler(arg *AppendFileArgs, reply *GenericReply) error {
return s.storage.AppendFile(arg.Vol, arg.Path, arg.Buffer)
}
// DeleteFileHandler - delete file handler is rpc wrapper to delete file.
func (s *storageServer) DeleteFileHandler(arg *DeleteFileArgs, reply *GenericReply) error {
return s.storage.DeleteFile(arg.Vol, arg.Path)
}
// RenameFileHandler - rename file handler is rpc wrapper to rename file.
func (s *storageServer) RenameFileHandler(arg *RenameFileArgs, reply *GenericReply) error {
return s.storage.RenameFile(arg.SrcVol, arg.SrcPath, arg.DstVol, arg.DstPath)
}
// Initialize new storage rpc.
func newRPCServer(exportPath string) (*storageServer, error) {
// Initialize posix storage API.
storage, err := newPosix(exportPath)
if err != nil && err != errDiskNotFound {
return nil, err
}
return &storageServer{
storage: storage,
}, nil
}
// registerStorageRPCRouter - register storage rpc router.
func registerStorageRPCRouter(mux *router.Router, stServer *storageServer) {
storageRPCServer := rpc.NewServer()
storageRPCServer.RegisterName("Storage", stServer)
storageRouter := mux.NewRoute().PathPrefix(reservedBucket).Subrouter()
// Add minio storage routes.
storageRouter.Path("/storage").Handler(storageRPCServer)
}

View File

@ -28,28 +28,32 @@ import (
"github.com/minio/cli" "github.com/minio/cli"
) )
var serverCmd = cli.Command{ var srvConfig serverCmdConfig
Name: "server",
Usage: "Start object storage server.", var serverFlags = []cli.Flag{
Flags: []cli.Flag{ cli.StringFlag{
cli.StringFlag{ Name: "address",
Name: "address", Value: ":9000",
Value: ":9000", Usage: "Specify custom server \"ADDRESS:PORT\", defaults to \":9000\".",
Usage: "Specify custom server \"ADDRESS:PORT\", defaults to \":9000\".",
},
cli.StringFlag{
Name: "ignore-disks",
Usage: "Specify comma separated list of disks that are offline.",
},
}, },
cli.StringFlag{
Name: "ignore-disks",
Usage: "Specify comma separated list of disks that are offline.",
},
}
var serverCmd = cli.Command{
Name: "server",
Usage: "Start object storage server.",
Flags: append(serverFlags, globalFlags...),
Action: serverMain, Action: serverMain,
CustomHelpTemplate: `NAME: CustomHelpTemplate: `NAME:
minio {{.Name}} - {{.Usage}} minio {{.Name}} - {{.Usage}}
USAGE: USAGE:
minio {{.Name}} [OPTIONS] PATH [PATH...] minio {{.Name}} [FLAGS] PATH [PATH...]
OPTIONS: FLAGS:
{{range .Flags}}{{.}} {{range .Flags}}{{.}}
{{end}} {{end}}
ENVIRONMENT VARIABLES: ENVIRONMENT VARIABLES:
@ -72,15 +76,21 @@ EXAMPLES:
$ minio {{.Name}} C:\MyShare $ minio {{.Name}} C:\MyShare
4. Start minio server on 12 disks to enable erasure coded layer with 6 data and 6 parity. 4. Start minio server on 12 disks to enable erasure coded layer with 6 data and 6 parity.
$ minio {{.Name}} /mnt/export1/backend /mnt/export2/backend /mnt/export3/backend /mnt/export4/backend \ $ minio {{.Name}} /mnt/export1/ /mnt/export2/ /mnt/export3/ /mnt/export4/ \
/mnt/export5/backend /mnt/export6/backend /mnt/export7/backend /mnt/export8/backend /mnt/export9/backend \ /mnt/export5/ /mnt/export6/ /mnt/export7/ /mnt/export8/ /mnt/export9/ \
/mnt/export10/backend /mnt/export11/backend /mnt/export12/backend /mnt/export10/ /mnt/export11/ /mnt/export12/
5. Start minio server on 12 disks while ignoring two disks for initialization. 5. Start minio server on 12 disks while ignoring two disks for initialization.
$ minio {{.Name}} --ignore-disks=/mnt/export1/backend,/mnt/export2/backend /mnt/export1/backend \ $ minio {{.Name}} --ignore-disks=/mnt/export1/ /mnt/export1/ /mnt/export2/ \
/mnt/export2/backend /mnt/export3/backend /mnt/export4/backend /mnt/export5/backend /mnt/export6/backend \ /mnt/export3/ /mnt/export4/ /mnt/export5/ /mnt/export6/ /mnt/export7/ \
/mnt/export7/backend /mnt/export8/backend /mnt/export9/backend /mnt/export10/backend /mnt/export11/backend \ /mnt/export8/ /mnt/export9/ /mnt/export10/ /mnt/export11/ /mnt/export12/
/mnt/export12/backend
6. Start minio server on a 4 node distributed setup. Type the following command on all the 4 nodes.
$ export MINIO_ACCESS_KEY=minio
$ export MINIO_SECRET_KEY=miniostorage
$ minio {{.Name}} 192.168.1.11:/mnt/export/ 192.168.1.12:/mnt/export/ \
192.168.1.13:/mnt/export/ 192.168.1.14:/mnt/export/
`, `,
} }
@ -194,16 +204,70 @@ func initServerConfig(c *cli.Context) {
// Do not fail if this is not allowed, lower limits are fine as well. // Do not fail if this is not allowed, lower limits are fine as well.
} }
// Validate if input disks are sufficient for initializing XL.
func checkSufficientDisks(disks []string) error {
// Verify total number of disks.
totalDisks := len(disks)
if totalDisks > maxErasureBlocks {
return errXLMaxDisks
}
if totalDisks < minErasureBlocks {
return errXLMinDisks
}
// isEven function to verify if a given number if even.
isEven := func(number int) bool {
return number%2 == 0
}
// Verify if we have even number of disks.
// only combination of 4, 6, 8, 10, 12, 14, 16 are supported.
if !isEven(totalDisks) {
return errXLNumDisks
}
// Success.
return nil
}
// Validates if disks are of supported format, invalid arguments are rejected.
func checkNamingDisks(disks []string) error {
for _, disk := range disks {
_, _, err := splitNetPath(disk)
if err != nil {
return err
}
}
return nil
}
// Check server arguments. // Check server arguments.
func checkServerSyntax(c *cli.Context) { func checkServerSyntax(c *cli.Context) {
if !c.Args().Present() || c.Args().First() == "help" { if !c.Args().Present() || c.Args().First() == "help" {
cli.ShowCommandHelpAndExit(c, "server", 1) cli.ShowCommandHelpAndExit(c, "server", 1)
} }
disks := c.Args()
if len(disks) > 1 {
// Validate if input disks have duplicates in them.
err := checkDuplicates(disks)
fatalIf(err, "Invalid disk arguments for server.")
// Validate if input disks are sufficient for erasure coded setup.
err = checkSufficientDisks(disks)
fatalIf(err, "Invalid disk arguments for server.")
// Validate if input disks are properly named in accordance with either
// - /mnt/disk1
// - ip:/mnt/disk1
err = checkNamingDisks(disks)
fatalIf(err, "Invalid disk arguments for server.")
}
} }
// Extract port number from address address should be of the form host:port. // Extract port number from address address should be of the form host:port.
func getPort(address string) int { func getPort(address string) int {
_, portStr, _ := net.SplitHostPort(address) _, portStr, _ := net.SplitHostPort(address)
// If port empty, default to port '80' // If port empty, default to port '80'
if portStr == "" { if portStr == "" {
portStr = "80" portStr = "80"
@ -219,6 +283,51 @@ func getPort(address string) int {
return portInt return portInt
} }
// Returns if slice of disks is a distributed setup.
func isDistributedSetup(disks []string) (isDist bool) {
// Port to connect to for the lock servers in a distributed setup.
for _, disk := range disks {
if !isLocalStorage(disk) {
// One or more disks supplied as arguments are not
// attached to the local node.
isDist = true
}
}
return isDist
}
// Format disks before initialization object layer.
func formatDisks(disks, ignoredDisks []string) error {
storageDisks, err := waitForFormattingDisks(disks, ignoredDisks)
for _, storage := range storageDisks {
if storage == nil {
continue
}
switch store := storage.(type) {
// Closing associated TCP connections since
// []StorageAPI is garbage collected eventually.
case networkStorage:
store.rpcClient.Close()
}
}
if err != nil {
return err
}
if isLocalStorage(disks[0]) {
// notify every one else that they can try init again.
for _, storage := range storageDisks {
switch store := storage.(type) {
// Closing associated TCP connections since
// []StorageAPI is garage collected eventually.
case networkStorage:
var reply GenericReply
_ = store.rpcClient.Call("Storage.TryInitHandler", &GenericArgs{}, &reply)
}
}
}
return nil
}
// serverMain handler called for 'minio server' command. // serverMain handler called for 'minio server' command.
func serverMain(c *cli.Context) { func serverMain(c *cli.Context) {
// Check 'server' cli arguments. // Check 'server' cli arguments.
@ -244,12 +353,29 @@ func serverMain(c *cli.Context) {
// Disks to be used in server init. // Disks to be used in server init.
disks := c.Args() disks := c.Args()
isDist := isDistributedSetup(disks)
// Set nodes for dsync for distributed setup.
if isDist {
err = initDsyncNodes(disks, port)
fatalIf(err, "Unable to initialize distributed locking")
}
// Initialize name space lock.
initNSLock(isDist)
// Configure server. // Configure server.
handler := configureServerHandler(serverCmdConfig{ srvConfig = serverCmdConfig{
serverAddr: serverAddress, serverAddr: serverAddress,
disks: disks, disks: disks,
ignoredDisks: ignoredDisks, ignoredDisks: ignoredDisks,
}) }
// Initialize and monitor shutdown signals.
err = initGracefulShutdown(os.Exit)
fatalIf(err, "Unable to initialize graceful shutdown operation")
// Configure server.
handler := configureServerHandler(srvConfig)
apiServer := NewServerMux(serverAddress, handler) apiServer := NewServerMux(serverAddress, handler)
@ -267,12 +393,36 @@ func serverMain(c *cli.Context) {
// Start server. // Start server.
// Configure TLS if certs are available. // Configure TLS if certs are available.
if tls { wait := make(chan struct{}, 1)
err = apiServer.ListenAndServeTLS(mustGetCertFile(), mustGetKeyFile()) go func(tls bool, wait chan<- struct{}) {
} else { if tls {
// Fallback to http. err = apiServer.ListenAndServeTLS(mustGetCertFile(), mustGetKeyFile())
err = apiServer.ListenAndServe() } else {
// Fallback to http.
err = apiServer.ListenAndServe()
}
wait <- struct{}{}
}(tls, wait)
err = formatDisks(disks, ignoredDisks)
if err != nil {
// FIXME: call graceful exit
errorIf(err, "formatting storage disks failed")
return
} }
newObject, err := newObjectLayer(disks, ignoredDisks)
if err != nil {
// FIXME: call graceful exit
errorIf(err, "intializing object layer failed")
return
}
printEventNotifiers()
objLayerMutex.Lock()
globalObjectAPI = newObject
objLayerMutex.Unlock()
<-wait
fatalIf(err, "Failed to start minio server.") fatalIf(err, "Failed to start minio server.")
} }

View File

@ -60,14 +60,25 @@ func printServerCommonMsg(endPoints []string) {
console.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKeyID))) console.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKeyID)))
console.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", cred.SecretAccessKey))) console.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", cred.SecretAccessKey)))
console.Println(colorBlue("Region: ") + colorBold(fmt.Sprintf(getFormatStr(len(region), 3), region))) console.Println(colorBlue("Region: ") + colorBold(fmt.Sprintf(getFormatStr(len(region), 3), region)))
arnMsg := colorBlue("SqsARNs: ")
console.Println(colorBlue("\nBrowser Access:"))
console.Println(fmt.Sprintf(getFormatStr(len(endPointStr), 3), endPointStr))
}
// Prints bucket notification configurations.
func printEventNotifiers() {
if globalEventNotifier == nil {
// In case initEventNotifier() was not done or failed.
return
}
arnMsg := colorBlue("\nSQS ARNs: ")
if len(globalEventNotifier.queueTargets) == 0 {
arnMsg += colorBold(fmt.Sprintf(getFormatStr(len("<none>"), 2), "<none>"))
}
for queueArn := range globalEventNotifier.queueTargets { for queueArn := range globalEventNotifier.queueTargets {
arnMsg += colorBold(fmt.Sprintf(getFormatStr(len(queueArn), 2), queueArn)) arnMsg += colorBold(fmt.Sprintf(getFormatStr(len(queueArn), 2), queueArn))
} }
console.Println(arnMsg) console.Println(arnMsg)
console.Println(colorBlue("\nBrowser Access:"))
console.Println(fmt.Sprintf(getFormatStr(len(endPointStr), 3), endPointStr))
} }
// Prints startup message for command line access. Prints link to our documentation // Prints startup message for command line access. Prints link to our documentation

View File

@ -309,6 +309,7 @@ func (s *TestSuiteCommon) TestDeleteBucketNotEmpty(c *C) {
} }
// Test deletes multple objects and verifies server resonse.
func (s *TestSuiteCommon) TestDeleteMultipleObjects(c *C) { func (s *TestSuiteCommon) TestDeleteMultipleObjects(c *C) {
// generate a random bucket name. // generate a random bucket name.
bucketName := getRandomBucketName() bucketName := getRandomBucketName()
@ -347,18 +348,11 @@ func (s *TestSuiteCommon) TestDeleteMultipleObjects(c *C) {
ObjectName: objName, ObjectName: objName,
}) })
} }
// Append a non-existent object for which the response should be marked
// as deleted.
delObjReq.Objects = append(delObjReq.Objects, ObjectIdentifier{
ObjectName: fmt.Sprintf("%d/%s", 10, objectName),
})
// Marshal delete request. // Marshal delete request.
deleteReqBytes, err := xml.Marshal(delObjReq) deleteReqBytes, err := xml.Marshal(delObjReq)
c.Assert(err, IsNil) c.Assert(err, IsNil)
// object name was "prefix/myobject", an attempt to delelte "prefix" // Delete list of objects.
// Should not delete "prefix/myobject"
request, err = newTestSignedRequest("POST", getMultiDeleteObjectURL(s.endPoint, bucketName), request, err = newTestSignedRequest("POST", getMultiDeleteObjectURL(s.endPoint, bucketName),
int64(len(deleteReqBytes)), bytes.NewReader(deleteReqBytes), s.accessKey, s.secretKey) int64(len(deleteReqBytes)), bytes.NewReader(deleteReqBytes), s.accessKey, s.secretKey)
c.Assert(err, IsNil) c.Assert(err, IsNil)
@ -372,11 +366,31 @@ func (s *TestSuiteCommon) TestDeleteMultipleObjects(c *C) {
c.Assert(err, IsNil) c.Assert(err, IsNil)
err = xml.Unmarshal(delRespBytes, &deleteResp) err = xml.Unmarshal(delRespBytes, &deleteResp)
c.Assert(err, IsNil) c.Assert(err, IsNil)
for i := 0; i <= 10; i++ { for i := 0; i < 10; i++ {
// All the objects should be under deleted list (including non-existent object) // All the objects should be under deleted list (including non-existent object)
c.Assert(deleteResp.DeletedObjects[i], DeepEquals, delObjReq.Objects[i]) c.Assert(deleteResp.DeletedObjects[i], DeepEquals, delObjReq.Objects[i])
} }
c.Assert(len(deleteResp.Errors), Equals, 0) c.Assert(len(deleteResp.Errors), Equals, 0)
// Attempt second time results should be same, NoSuchKey for objects not found
// shouldn't be set.
request, err = newTestSignedRequest("POST", getMultiDeleteObjectURL(s.endPoint, bucketName),
int64(len(deleteReqBytes)), bytes.NewReader(deleteReqBytes), s.accessKey, s.secretKey)
c.Assert(err, IsNil)
client = http.Client{}
response, err = client.Do(request)
c.Assert(err, IsNil)
c.Assert(response.StatusCode, Equals, http.StatusOK)
deleteResp = DeleteObjectsResponse{}
delRespBytes, err = ioutil.ReadAll(response.Body)
c.Assert(err, IsNil)
err = xml.Unmarshal(delRespBytes, &deleteResp)
c.Assert(err, IsNil)
for i := 0; i < 10; i++ {
c.Assert(deleteResp.DeletedObjects[i], DeepEquals, delObjReq.Objects[i])
}
c.Assert(len(deleteResp.Errors), Equals, 0)
} }
// Tests delete object responses and success. // Tests delete object responses and success.
@ -1364,6 +1378,7 @@ func (s *TestSuiteCommon) TestListObjectsHandler(c *C) {
c.Assert(response.StatusCode, Equals, http.StatusOK) c.Assert(response.StatusCode, Equals, http.StatusOK)
getContent, err := ioutil.ReadAll(response.Body) getContent, err := ioutil.ReadAll(response.Body)
c.Assert(err, IsNil)
c.Assert(strings.Contains(string(getContent), "<Key>bar</Key>"), Equals, true) c.Assert(strings.Contains(string(getContent), "<Key>bar</Key>"), Equals, true)
// create listObjectsV2 request with valid parameters // create listObjectsV2 request with valid parameters
@ -1377,6 +1392,7 @@ func (s *TestSuiteCommon) TestListObjectsHandler(c *C) {
c.Assert(response.StatusCode, Equals, http.StatusOK) c.Assert(response.StatusCode, Equals, http.StatusOK)
getContent, err = ioutil.ReadAll(response.Body) getContent, err = ioutil.ReadAll(response.Body)
c.Assert(err, IsNil)
c.Assert(strings.Contains(string(getContent), "<Key>bar</Key>"), Equals, true) c.Assert(strings.Contains(string(getContent), "<Key>bar</Key>"), Equals, true)
c.Assert(strings.Contains(string(getContent), "<Owner><ID></ID><DisplayName></DisplayName></Owner>"), Equals, true) c.Assert(strings.Contains(string(getContent), "<Owner><ID></ID><DisplayName></DisplayName></Owner>"), Equals, true)
@ -1960,6 +1976,7 @@ func (s *TestSuiteCommon) TestObjectMultipartAbort(c *C) {
// execute the HTTP request initiating the new multipart upload. // execute the HTTP request initiating the new multipart upload.
response, err = client.Do(request) response, err = client.Do(request)
c.Assert(err, IsNil)
c.Assert(response.StatusCode, Equals, http.StatusOK) c.Assert(response.StatusCode, Equals, http.StatusOK)
// parse the response body and obtain the new upload ID. // parse the response body and obtain the new upload ID.
@ -1977,6 +1994,7 @@ func (s *TestSuiteCommon) TestObjectMultipartAbort(c *C) {
// execute the HTTP request initiating the new multipart upload. // execute the HTTP request initiating the new multipart upload.
response, err = client.Do(request) response, err = client.Do(request)
c.Assert(err, IsNil)
c.Assert(response.StatusCode, Equals, http.StatusOK) c.Assert(response.StatusCode, Equals, http.StatusOK)
// parse the response body and obtain the new upload ID. // parse the response body and obtain the new upload ID.
@ -2193,6 +2211,7 @@ func (s *TestSuiteCommon) TestObjectMultipartListError(c *C) {
c.Assert(err, IsNil) c.Assert(err, IsNil)
// execute the HTTP request initiating the new multipart upload. // execute the HTTP request initiating the new multipart upload.
response, err = client.Do(request) response, err = client.Do(request)
c.Assert(err, IsNil)
c.Assert(response.StatusCode, Equals, http.StatusOK) c.Assert(response.StatusCode, Equals, http.StatusOK)
// parse the response body and obtain the new upload ID. // parse the response body and obtain the new upload ID.
decoder := xml.NewDecoder(response.Body) decoder := xml.NewDecoder(response.Body)

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"encoding/xml" "encoding/xml"
"fmt"
"io/ioutil" "io/ioutil"
"net" "net"
"net/http" "net/http"
@ -65,6 +66,40 @@ var ignoredHeaders = map[string]bool{
"User-Agent": true, "User-Agent": true,
} }
// Headers to ignore in streaming v4
var ignoredStreamingHeaders = map[string]bool{
"Authorization": true,
"Content-Type": true,
"Content-Md5": true,
"User-Agent": true,
}
// calculateSignedChunkLength - calculates the length of chunk metadata
func calculateSignedChunkLength(chunkDataSize int64) int64 {
return int64(len(fmt.Sprintf("%x", chunkDataSize))) +
17 + // ";chunk-signature="
64 + // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2"
2 + // CRLF
chunkDataSize +
2 // CRLF
}
// calculateSignedChunkLength - calculates the length of the overall stream (data + metadata)
func calculateStreamContentLength(dataLen, chunkSize int64) int64 {
if dataLen <= 0 {
return 0
}
chunksCount := int64(dataLen / chunkSize)
remainingBytes := int64(dataLen % chunkSize)
streamLen := int64(0)
streamLen += chunksCount * calculateSignedChunkLength(chunkSize)
if remainingBytes > 0 {
streamLen += calculateSignedChunkLength(remainingBytes)
}
streamLen += calculateSignedChunkLength(0)
return streamLen
}
// Ask the kernel for a free open port. // Ask the kernel for a free open port.
func getFreePort() int { func getFreePort() int {
addr, err := net.ResolveTCPAddr("tcp", "localhost:0") addr, err := net.ResolveTCPAddr("tcp", "localhost:0")

View File

@ -17,7 +17,7 @@
package cmd package cmd
import ( import (
"fmt" "errors"
"strings" "strings"
"time" "time"
@ -32,24 +32,24 @@ type JWT struct {
credential credential
} }
// Default - each token expires in 10hrs. // Default each token expires in 100yrs.
const ( const (
tokenExpires time.Duration = 10 defaultTokenExpiry time.Duration = time.Hour * 876000 // 100yrs.
) )
// newJWT - returns new JWT object. // newJWT - returns new JWT object.
func newJWT() (*JWT, error) { func newJWT(expiry time.Duration) (*JWT, error) {
if serverConfig == nil { if serverConfig == nil {
return nil, fmt.Errorf("server not initialzed") return nil, errors.New("Server not initialzed")
} }
// Save access, secret keys. // Save access, secret keys.
cred := serverConfig.GetCredential() cred := serverConfig.GetCredential()
if !isValidAccessKey.MatchString(cred.AccessKeyID) { if !isValidAccessKey.MatchString(cred.AccessKeyID) {
return nil, fmt.Errorf("Invalid access key") return nil, errors.New("Invalid access key")
} }
if !isValidSecretKey.MatchString(cred.SecretAccessKey) { if !isValidSecretKey.MatchString(cred.SecretAccessKey) {
return nil, fmt.Errorf("Invalid secret key") return nil, errors.New("Invalid secret key")
} }
return &JWT{cred}, nil return &JWT{cred}, nil
@ -61,13 +61,13 @@ func (jwt *JWT) GenerateToken(accessKey string) (string, error) {
accessKey = strings.TrimSpace(accessKey) accessKey = strings.TrimSpace(accessKey)
if !isValidAccessKey.MatchString(accessKey) { if !isValidAccessKey.MatchString(accessKey) {
return "", fmt.Errorf("Invalid access key") return "", errors.New("Invalid access key")
} }
tUTCNow := time.Now().UTC() tUTCNow := time.Now().UTC()
token := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, jwtgo.MapClaims{ token := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, jwtgo.MapClaims{
// Token expires in 10hrs. // Token expires in 10hrs.
"exp": tUTCNow.Add(time.Hour * tokenExpires).Unix(), "exp": tUTCNow.Add(defaultTokenExpiry).Unix(),
"iat": tUTCNow.Unix(), "iat": tUTCNow.Unix(),
"sub": accessKey, "sub": accessKey,
}) })
@ -80,20 +80,20 @@ func (jwt *JWT) Authenticate(accessKey, secretKey string) error {
accessKey = strings.TrimSpace(accessKey) accessKey = strings.TrimSpace(accessKey)
if !isValidAccessKey.MatchString(accessKey) { if !isValidAccessKey.MatchString(accessKey) {
return fmt.Errorf("Invalid access key") return errors.New("Invalid access key")
} }
if !isValidSecretKey.MatchString(secretKey) { if !isValidSecretKey.MatchString(secretKey) {
return fmt.Errorf("Invalid secret key") return errors.New("Invalid secret key")
} }
if accessKey != jwt.AccessKeyID { if accessKey != jwt.AccessKeyID {
return fmt.Errorf("Access key does not match") return errors.New("Access key does not match")
} }
hashedSecretKey, _ := bcrypt.GenerateFromPassword([]byte(jwt.SecretAccessKey), bcrypt.DefaultCost) hashedSecretKey, _ := bcrypt.GenerateFromPassword([]byte(jwt.SecretAccessKey), bcrypt.DefaultCost)
if bcrypt.CompareHashAndPassword(hashedSecretKey, []byte(secretKey)) != nil { if bcrypt.CompareHashAndPassword(hashedSecretKey, []byte(secretKey)) != nil {
return fmt.Errorf("Authentication failed") return errors.New("Authentication failed")
} }
// Success. // Success.

View File

@ -72,11 +72,11 @@ func TestNewJWT(t *testing.T) {
expectedErr error expectedErr error
}{ }{
// Test non-existent config directory. // Test non-existent config directory.
{path.Join(path1, "non-existent-dir"), false, nil, fmt.Errorf("server not initialzed")}, {path.Join(path1, "non-existent-dir"), false, nil, fmt.Errorf("Server not initialzed")},
// Test empty config directory. // Test empty config directory.
{path2, false, nil, fmt.Errorf("server not initialzed")}, {path2, false, nil, fmt.Errorf("Server not initialzed")},
// Test empty config file. // Test empty config file.
{path3, false, nil, fmt.Errorf("server not initialzed")}, {path3, false, nil, fmt.Errorf("Server not initialzed")},
// Test initialized config file. // Test initialized config file.
{path4, true, nil, nil}, {path4, true, nil, nil},
// Test to read already created config file. // Test to read already created config file.
@ -108,7 +108,7 @@ func TestNewJWT(t *testing.T) {
serverConfig.SetCredential(*testCase.cred) serverConfig.SetCredential(*testCase.cred)
} }
_, err := newJWT() _, err := newJWT(defaultWebTokenExpiry)
if testCase.expectedErr != nil { if testCase.expectedErr != nil {
if err == nil { if err == nil {
@ -132,7 +132,7 @@ func TestGenerateToken(t *testing.T) {
} }
defer removeAll(testPath) defer removeAll(testPath)
jwt, err := newJWT() jwt, err := newJWT(defaultWebTokenExpiry)
if err != nil { if err != nil {
t.Fatalf("unable get new JWT, %s", err) t.Fatalf("unable get new JWT, %s", err)
} }
@ -179,7 +179,7 @@ func TestAuthenticate(t *testing.T) {
} }
defer removeAll(testPath) defer removeAll(testPath)
jwt, err := newJWT() jwt, err := newJWT(defaultWebTokenExpiry)
if err != nil { if err != nil {
t.Fatalf("unable get new JWT, %s", err) t.Fatalf("unable get new JWT, %s", err)
} }

View File

@ -59,3 +59,6 @@ var errVolumeAccessDenied = errors.New("volume access denied")
// errVolumeAccessDenied - cannot access file, insufficient permissions. // errVolumeAccessDenied - cannot access file, insufficient permissions.
var errFileAccessDenied = errors.New("file access denied") var errFileAccessDenied = errors.New("file access denied")
// errVolumeBusy - remote disk is not connected to yet.
var errVolumeBusy = errors.New("volume is busy")

View File

@ -16,8 +16,13 @@
package cmd package cmd
import "github.com/minio/minio/pkg/disk"
// StorageAPI interface. // StorageAPI interface.
type StorageAPI interface { type StorageAPI interface {
// Storage operations.
DiskInfo() (info disk.Info, err error)
// Volume operations. // Volume operations.
MakeVol(volume string) (err error) MakeVol(volume string) (err error)
ListVols() (vols []VolInfo, err error) ListVols() (vols []VolInfo, err error)

View File

@ -17,37 +17,48 @@
package cmd package cmd
import ( import (
"net/http" "io"
"net"
"net/rpc" "net/rpc"
"path"
"strconv"
"strings" "strings"
"time"
"github.com/minio/minio/pkg/disk"
) )
type networkStorage struct { type networkStorage struct {
netScheme string netAddr string
netAddr string netPath string
netPath string rpcClient *AuthRPCClient
rpcClient *rpc.Client
httpClient *http.Client
} }
const ( const (
storageRPCPath = reservedBucket + "/storage" storageRPCPath = reservedBucket + "/storage"
) )
// splits network path into its components Address and Path.
func splitNetPath(networkPath string) (netAddr, netPath string) {
index := strings.LastIndex(networkPath, ":")
netAddr = networkPath[:index]
netPath = networkPath[index+1:]
return netAddr, netPath
}
// Converts rpc.ServerError to underlying error. This function is // Converts rpc.ServerError to underlying error. This function is
// written so that the storageAPI errors are consistent across network // written so that the storageAPI errors are consistent across network
// disks as well. // disks as well.
func toStorageErr(err error) error { func toStorageErr(err error) error {
if err == nil {
return nil
}
switch err.(type) {
case *net.OpError:
return errDiskNotFound
}
switch err.Error() { switch err.Error() {
case io.EOF.Error():
return io.EOF
case io.ErrUnexpectedEOF.Error():
return io.ErrUnexpectedEOF
case rpc.ErrShutdown.Error():
return errDiskNotFound
case errUnexpected.Error():
return errUnexpected
case errDiskFull.Error(): case errDiskFull.Error():
return errDiskFull return errDiskFull
case errVolumeNotFound.Error(): case errVolumeNotFound.Error():
@ -56,14 +67,20 @@ func toStorageErr(err error) error {
return errVolumeExists return errVolumeExists
case errFileNotFound.Error(): case errFileNotFound.Error():
return errFileNotFound return errFileNotFound
case errFileNameTooLong.Error():
return errFileNameTooLong
case errFileAccessDenied.Error():
return errFileAccessDenied
case errIsNotRegular.Error(): case errIsNotRegular.Error():
return errIsNotRegular return errIsNotRegular
case errVolumeNotEmpty.Error(): case errVolumeNotEmpty.Error():
return errVolumeNotEmpty return errVolumeNotEmpty
case errFileAccessDenied.Error():
return errFileAccessDenied
case errVolumeAccessDenied.Error(): case errVolumeAccessDenied.Error():
return errVolumeAccessDenied return errVolumeAccessDenied
case errCorruptedFormat.Error():
return errCorruptedFormat
case errUnformattedDisk.Error():
return errUnformattedDisk
} }
return err return err
} }
@ -75,50 +92,59 @@ func newRPCClient(networkPath string) (StorageAPI, error) {
return nil, errInvalidArgument return nil, errInvalidArgument
} }
// TODO validate netAddr and netPath. // Split network path into its components.
netAddr, netPath := splitNetPath(networkPath) netAddr, netPath, err := splitNetPath(networkPath)
// Dial minio rpc storage http path.
rpcClient, err := rpc.DialHTTPPath("tcp", netAddr, storageRPCPath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Initialize http client. // Dial minio rpc storage http path.
httpClient := &http.Client{ rpcPath := path.Join(storageRPCPath, netPath)
// Setting a sensible time out of 6minutes to wait for port := getPort(srvConfig.serverAddr)
// response headers. Request is pro-actively cancelled rpcAddr := netAddr + ":" + strconv.Itoa(port)
// after 6minutes if no response was received from server. // Initialize rpc client with network address and rpc path.
Timeout: 6 * time.Minute, cred := serverConfig.GetCredential()
Transport: http.DefaultTransport, rpcClient := newAuthClient(&authConfig{
} accessKey: cred.AccessKeyID,
secretKey: cred.SecretAccessKey,
address: rpcAddr,
path: rpcPath,
loginMethod: "Storage.LoginHandler",
})
// Initialize network storage. // Initialize network storage.
ndisk := &networkStorage{ ndisk := &networkStorage{
netScheme: "http", // TODO: fix for ssl rpc support. netAddr: netAddr,
netAddr: netAddr, netPath: netPath,
netPath: netPath, rpcClient: rpcClient,
rpcClient: rpcClient,
httpClient: httpClient,
} }
// Returns successfully here. // Returns successfully here.
return ndisk, nil return ndisk, nil
} }
// MakeVol - make a volume. // DiskInfo - fetch disk information for a remote disk.
func (n networkStorage) DiskInfo() (info disk.Info, err error) {
args := GenericArgs{}
if err = n.rpcClient.Call("Storage.DiskInfoHandler", &args, &info); err != nil {
return disk.Info{}, err
}
return info, nil
}
// MakeVol - create a volume on a remote disk.
func (n networkStorage) MakeVol(volume string) error { func (n networkStorage) MakeVol(volume string) error {
reply := GenericReply{} reply := GenericReply{}
if err := n.rpcClient.Call("Storage.MakeVolHandler", volume, &reply); err != nil { args := GenericVolArgs{Vol: volume}
if err := n.rpcClient.Call("Storage.MakeVolHandler", &args, &reply); err != nil {
return toStorageErr(err) return toStorageErr(err)
} }
return nil return nil
} }
// ListVols - List all volumes. // ListVols - List all volumes on a remote disk.
func (n networkStorage) ListVols() (vols []VolInfo, err error) { func (n networkStorage) ListVols() (vols []VolInfo, err error) {
ListVols := ListVolsReply{} ListVols := ListVolsReply{}
err = n.rpcClient.Call("Storage.ListVolsHandler", "", &ListVols) err = n.rpcClient.Call("Storage.ListVolsHandler", &GenericArgs{}, &ListVols)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -127,7 +153,8 @@ func (n networkStorage) ListVols() (vols []VolInfo, err error) {
// StatVol - get current Stat volume info. // StatVol - get current Stat volume info.
func (n networkStorage) StatVol(volume string) (volInfo VolInfo, err error) { func (n networkStorage) StatVol(volume string) (volInfo VolInfo, err error) {
if err = n.rpcClient.Call("Storage.StatVolHandler", volume, &volInfo); err != nil { args := GenericVolArgs{Vol: volume}
if err = n.rpcClient.Call("Storage.StatVolHandler", &args, &volInfo); err != nil {
return VolInfo{}, toStorageErr(err) return VolInfo{}, toStorageErr(err)
} }
return volInfo, nil return volInfo, nil
@ -136,7 +163,8 @@ func (n networkStorage) StatVol(volume string) (volInfo VolInfo, err error) {
// DeleteVol - Delete a volume. // DeleteVol - Delete a volume.
func (n networkStorage) DeleteVol(volume string) error { func (n networkStorage) DeleteVol(volume string) error {
reply := GenericReply{} reply := GenericReply{}
if err := n.rpcClient.Call("Storage.DeleteVolHandler", volume, &reply); err != nil { args := GenericVolArgs{Vol: volume}
if err := n.rpcClient.Call("Storage.DeleteVolHandler", &args, &reply); err != nil {
return toStorageErr(err) return toStorageErr(err)
} }
return nil return nil
@ -147,7 +175,7 @@ func (n networkStorage) DeleteVol(volume string) error {
// CreateFile - create file. // CreateFile - create file.
func (n networkStorage) AppendFile(volume, path string, buffer []byte) (err error) { func (n networkStorage) AppendFile(volume, path string, buffer []byte) (err error) {
reply := GenericReply{} reply := GenericReply{}
if err = n.rpcClient.Call("Storage.AppendFileHandler", AppendFileArgs{ if err = n.rpcClient.Call("Storage.AppendFileHandler", &AppendFileArgs{
Vol: volume, Vol: volume,
Path: path, Path: path,
Buffer: buffer, Buffer: buffer,
@ -159,7 +187,7 @@ func (n networkStorage) AppendFile(volume, path string, buffer []byte) (err erro
// StatFile - get latest Stat information for a file at path. // StatFile - get latest Stat information for a file at path.
func (n networkStorage) StatFile(volume, path string) (fileInfo FileInfo, err error) { func (n networkStorage) StatFile(volume, path string) (fileInfo FileInfo, err error) {
if err = n.rpcClient.Call("Storage.StatFileHandler", StatFileArgs{ if err = n.rpcClient.Call("Storage.StatFileHandler", &StatFileArgs{
Vol: volume, Vol: volume,
Path: path, Path: path,
}, &fileInfo); err != nil { }, &fileInfo); err != nil {
@ -173,7 +201,7 @@ func (n networkStorage) StatFile(volume, path string) (fileInfo FileInfo, err er
// This API is meant to be used on files which have small memory footprint, do // This API is meant to be used on files which have small memory footprint, do
// not use this on large files as it would cause server to crash. // not use this on large files as it would cause server to crash.
func (n networkStorage) ReadAll(volume, path string) (buf []byte, err error) { func (n networkStorage) ReadAll(volume, path string) (buf []byte, err error) {
if err = n.rpcClient.Call("Storage.ReadAllHandler", ReadAllArgs{ if err = n.rpcClient.Call("Storage.ReadAllHandler", &ReadAllArgs{
Vol: volume, Vol: volume,
Path: path, Path: path,
}, &buf); err != nil { }, &buf); err != nil {
@ -184,20 +212,22 @@ func (n networkStorage) ReadAll(volume, path string) (buf []byte, err error) {
// ReadFile - reads a file. // ReadFile - reads a file.
func (n networkStorage) ReadFile(volume string, path string, offset int64, buffer []byte) (m int64, err error) { func (n networkStorage) ReadFile(volume string, path string, offset int64, buffer []byte) (m int64, err error) {
if err = n.rpcClient.Call("Storage.ReadFileHandler", ReadFileArgs{ var result []byte
err = n.rpcClient.Call("Storage.ReadFileHandler", &ReadFileArgs{
Vol: volume, Vol: volume,
Path: path, Path: path,
Offset: offset, Offset: offset,
Buffer: buffer, Size: len(buffer),
}, &m); err != nil { }, &result)
return 0, toStorageErr(err) // Copy results to buffer.
} copy(buffer, result)
return m, nil // Return length of result, err if any.
return int64(len(result)), toStorageErr(err)
} }
// ListDir - list all entries at prefix. // ListDir - list all entries at prefix.
func (n networkStorage) ListDir(volume, path string) (entries []string, err error) { func (n networkStorage) ListDir(volume, path string) (entries []string, err error) {
if err = n.rpcClient.Call("Storage.ListDirHandler", ListDirArgs{ if err = n.rpcClient.Call("Storage.ListDirHandler", &ListDirArgs{
Vol: volume, Vol: volume,
Path: path, Path: path,
}, &entries); err != nil { }, &entries); err != nil {
@ -210,7 +240,7 @@ func (n networkStorage) ListDir(volume, path string) (entries []string, err erro
// DeleteFile - Delete a file at path. // DeleteFile - Delete a file at path.
func (n networkStorage) DeleteFile(volume, path string) (err error) { func (n networkStorage) DeleteFile(volume, path string) (err error) {
reply := GenericReply{} reply := GenericReply{}
if err = n.rpcClient.Call("Storage.DeleteFileHandler", DeleteFileArgs{ if err = n.rpcClient.Call("Storage.DeleteFileHandler", &DeleteFileArgs{
Vol: volume, Vol: volume,
Path: path, Path: path,
}, &reply); err != nil { }, &reply); err != nil {
@ -222,7 +252,7 @@ func (n networkStorage) DeleteFile(volume, path string) (err error) {
// RenameFile - Rename file. // RenameFile - Rename file.
func (n networkStorage) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err error) { func (n networkStorage) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err error) {
reply := GenericReply{} reply := GenericReply{}
if err = n.rpcClient.Call("Storage.RenameFileHandler", RenameFileArgs{ if err = n.rpcClient.Call("Storage.RenameFileHandler", &RenameFileArgs{
SrcVol: srcVolume, SrcVol: srcVolume,
SrcPath: srcPath, SrcPath: srcPath,
DstVol: dstVolume, DstVol: dstVolume,

View File

@ -16,11 +16,14 @@
package cmd package cmd
// GenericReply represents any generic RPC reply. // GenericVolArgs - generic volume args.
type GenericReply struct{} type GenericVolArgs struct {
// Authentication token generated by Login.
GenericArgs
// GenericArgs represents any generic RPC arguments. // Name of the volume.
type GenericArgs struct{} Vol string
}
// ListVolsReply represents list of vols RPC reply. // ListVolsReply represents list of vols RPC reply.
type ListVolsReply struct { type ListVolsReply struct {
@ -30,6 +33,9 @@ type ListVolsReply struct {
// ReadAllArgs represents read all RPC arguments. // ReadAllArgs represents read all RPC arguments.
type ReadAllArgs struct { type ReadAllArgs struct {
// Authentication token generated by Login.
GenericArgs
// Name of the volume. // Name of the volume.
Vol string Vol string
@ -39,6 +45,9 @@ type ReadAllArgs struct {
// ReadFileArgs represents read file RPC arguments. // ReadFileArgs represents read file RPC arguments.
type ReadFileArgs struct { type ReadFileArgs struct {
// Authentication token generated by Login.
GenericArgs
// Name of the volume. // Name of the volume.
Vol string Vol string
@ -48,12 +57,15 @@ type ReadFileArgs struct {
// Starting offset to start reading into Buffer. // Starting offset to start reading into Buffer.
Offset int64 Offset int64
// Data buffer read from the path at offset. // Data size read from the path at offset.
Buffer []byte Size int
} }
// AppendFileArgs represents append file RPC arguments. // AppendFileArgs represents append file RPC arguments.
type AppendFileArgs struct { type AppendFileArgs struct {
// Authentication token generated by Login.
GenericArgs
// Name of the volume. // Name of the volume.
Vol string Vol string
@ -66,6 +78,9 @@ type AppendFileArgs struct {
// StatFileArgs represents stat file RPC arguments. // StatFileArgs represents stat file RPC arguments.
type StatFileArgs struct { type StatFileArgs struct {
// Authentication token generated by Login.
GenericArgs
// Name of the volume. // Name of the volume.
Vol string Vol string
@ -75,6 +90,9 @@ type StatFileArgs struct {
// DeleteFileArgs represents delete file RPC arguments. // DeleteFileArgs represents delete file RPC arguments.
type DeleteFileArgs struct { type DeleteFileArgs struct {
// Authentication token generated by Login.
GenericArgs
// Name of the volume. // Name of the volume.
Vol string Vol string
@ -84,6 +102,9 @@ type DeleteFileArgs struct {
// ListDirArgs represents list contents RPC arguments. // ListDirArgs represents list contents RPC arguments.
type ListDirArgs struct { type ListDirArgs struct {
// Authentication token generated by Login.
GenericArgs
// Name of the volume. // Name of the volume.
Vol string Vol string
@ -93,6 +114,9 @@ type ListDirArgs struct {
// RenameFileArgs represents rename file RPC arguments. // RenameFileArgs represents rename file RPC arguments.
type RenameFileArgs struct { type RenameFileArgs struct {
// Authentication token generated by Login.
GenericArgs
// Name of source volume. // Name of source volume.
SrcVol string SrcVol string

252
cmd/storage-rpc-server.go Normal file
View File

@ -0,0 +1,252 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"io"
"net/rpc"
"path"
"strings"
router "github.com/gorilla/mux"
"github.com/minio/minio/pkg/disk"
)
// Storage server implements rpc primitives to facilitate exporting a
// disk over a network.
type storageServer struct {
storage StorageAPI
path string
}
/// Auth operations
// Login - login handler.
func (s *storageServer) LoginHandler(args *RPCLoginArgs, reply *RPCLoginReply) error {
jwt, err := newJWT(defaultTokenExpiry)
if err != nil {
return err
}
if err = jwt.Authenticate(args.Username, args.Password); err != nil {
return err
}
token, err := jwt.GenerateToken(args.Username)
if err != nil {
return err
}
reply.Token = token
reply.ServerVersion = Version
return nil
}
/// Storage operations handlers.
// DiskInfoHandler - disk info handler is rpc wrapper for DiskInfo operation.
func (s *storageServer) DiskInfoHandler(args *GenericArgs, reply *disk.Info) error {
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
info, err := s.storage.DiskInfo()
*reply = info
return err
}
/// Volume operations handlers.
// MakeVolHandler - make vol handler is rpc wrapper for MakeVol operation.
func (s *storageServer) MakeVolHandler(args *GenericVolArgs, reply *GenericReply) error {
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
return s.storage.MakeVol(args.Vol)
}
// ListVolsHandler - list vols handler is rpc wrapper for ListVols operation.
func (s *storageServer) ListVolsHandler(args *GenericArgs, reply *ListVolsReply) error {
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
vols, err := s.storage.ListVols()
if err != nil {
return err
}
reply.Vols = vols
return nil
}
// StatVolHandler - stat vol handler is a rpc wrapper for StatVol operation.
func (s *storageServer) StatVolHandler(args *GenericVolArgs, reply *VolInfo) error {
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
volInfo, err := s.storage.StatVol(args.Vol)
if err != nil {
return err
}
*reply = volInfo
return nil
}
// DeleteVolHandler - delete vol handler is a rpc wrapper for
// DeleteVol operation.
func (s *storageServer) DeleteVolHandler(args *GenericVolArgs, reply *GenericReply) error {
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
return s.storage.DeleteVol(args.Vol)
}
/// File operations
// StatFileHandler - stat file handler is rpc wrapper to stat file.
func (s *storageServer) StatFileHandler(args *StatFileArgs, reply *FileInfo) error {
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
fileInfo, err := s.storage.StatFile(args.Vol, args.Path)
if err != nil {
return err
}
*reply = fileInfo
return nil
}
// ListDirHandler - list directory handler is rpc wrapper to list dir.
func (s *storageServer) ListDirHandler(args *ListDirArgs, reply *[]string) error {
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
entries, err := s.storage.ListDir(args.Vol, args.Path)
if err != nil {
return err
}
*reply = entries
return nil
}
// ReadAllHandler - read all handler is rpc wrapper to read all storage API.
func (s *storageServer) ReadAllHandler(args *ReadFileArgs, reply *[]byte) error {
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
buf, err := s.storage.ReadAll(args.Vol, args.Path)
if err != nil {
return err
}
*reply = buf
return nil
}
// ReadFileHandler - read file handler is rpc wrapper to read file.
func (s *storageServer) ReadFileHandler(args *ReadFileArgs, reply *[]byte) (err error) {
defer func() {
if r := recover(); r != nil {
// Recover any panic and return ErrCacheFull.
err = bytes.ErrTooLarge
}
}() // Do not crash the server.
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
// Allocate the requested buffer from the client.
*reply = make([]byte, args.Size)
var n int64
n, err = s.storage.ReadFile(args.Vol, args.Path, args.Offset, *reply)
// Sending an error over the rpc layer, would cause unmarshalling to fail. In situations
// when we have short read i.e `io.ErrUnexpectedEOF` treat it as good condition and copy
// the buffer properly.
if err == io.ErrUnexpectedEOF {
// Reset to nil as good condition.
err = nil
}
*reply = (*reply)[0:n]
return err
}
// AppendFileHandler - append file handler is rpc wrapper to append file.
func (s *storageServer) AppendFileHandler(args *AppendFileArgs, reply *GenericReply) error {
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
return s.storage.AppendFile(args.Vol, args.Path, args.Buffer)
}
// DeleteFileHandler - delete file handler is rpc wrapper to delete file.
func (s *storageServer) DeleteFileHandler(args *DeleteFileArgs, reply *GenericReply) error {
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
return s.storage.DeleteFile(args.Vol, args.Path)
}
// RenameFileHandler - rename file handler is rpc wrapper to rename file.
func (s *storageServer) RenameFileHandler(args *RenameFileArgs, reply *GenericReply) error {
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
return s.storage.RenameFile(args.SrcVol, args.SrcPath, args.DstVol, args.DstPath)
}
// Initialize new storage rpc.
func newRPCServer(serverConfig serverCmdConfig) (servers []*storageServer, err error) {
// Initialize posix storage API.
exports := serverConfig.disks
ignoredExports := serverConfig.ignoredDisks
// Save ignored disks in a map
skipDisks := make(map[string]bool)
for _, ignoredExport := range ignoredExports {
skipDisks[ignoredExport] = true
}
for _, export := range exports {
if skipDisks[export] {
continue
}
// e.g server:/mnt/disk1
if isLocalStorage(export) {
if idx := strings.LastIndex(export, ":"); idx != -1 {
export = export[idx+1:]
}
var storage StorageAPI
storage, err = newPosix(export)
if err != nil && err != errDiskNotFound {
return nil, err
}
if idx := strings.LastIndex(export, ":"); idx != -1 {
export = export[idx+1:]
}
servers = append(servers, &storageServer{
storage: storage,
path: export,
})
}
}
return servers, err
}
// registerStorageRPCRouter - register storage rpc router.
func registerStorageRPCRouters(mux *router.Router, stServers []*storageServer) {
// Create a unique route for each disk exported from this node.
for _, stServer := range stServers {
storageRPCServer := rpc.NewServer()
storageRPCServer.RegisterName("Storage", stServer)
// Add minio storage routes.
storageRouter := mux.PathPrefix(reservedBucket).Subrouter()
storageRouter.Path(path.Join("/storage", stServer.path)).Handler(storageRPCServer)
}
}

View File

@ -91,7 +91,7 @@ func TestByteErrors(t *testing.T) {
t.Errorf("Expected error, got %v", got) t.Errorf("Expected error, got %v", got)
} }
// Empty string. // Empty string.
got, err = strconvBytes("") _, err = strconvBytes("")
if err == nil { if err == nil {
t.Errorf("Expected error parsing nothing") t.Errorf("Expected error parsing nothing")
} }

View File

@ -45,7 +45,35 @@ import (
// Tests should initNSLock only once. // Tests should initNSLock only once.
func init() { func init() {
// Initialize name space lock. // Initialize name space lock.
initNSLock() isDist := false
initNSLock(isDist)
}
func prepareFS() (ObjectLayer, string, error) {
fsDirs, err := getRandomDisks(1)
if err != nil {
return nil, "", err
}
obj, err := getSingleNodeObjectLayer(fsDirs[0])
if err != nil {
removeRoots(fsDirs)
return nil, "", err
}
return obj, fsDirs[0], nil
}
func prepareXL() (ObjectLayer, []string, error) {
nDisks := 16
fsDirs, err := getRandomDisks(nDisks)
if err != nil {
return nil, nil, err
}
obj, err := getXLObjectLayer(fsDirs)
if err != nil {
removeRoots(fsDirs)
return nil, nil, err
}
return obj, fsDirs, nil
} }
// TestErrHandler - Golang Testing.T and Testing.B, and gocheck.C satisfy this interface. // TestErrHandler - Golang Testing.T and Testing.B, and gocheck.C satisfy this interface.
@ -109,6 +137,7 @@ type TestServer struct {
AccessKey string AccessKey string
SecretKey string SecretKey string
Server *httptest.Server Server *httptest.Server
Obj ObjectLayer
} }
// Starts the test server and returns the TestServer instance. // Starts the test server and returns the TestServer instance.
@ -116,7 +145,67 @@ func StartTestServer(t TestErrHandler, instanceType string) TestServer {
// create an instance of TestServer. // create an instance of TestServer.
testServer := TestServer{} testServer := TestServer{}
// create temporary backend for the test server. // create temporary backend for the test server.
_, erasureDisks, err := makeTestBackend(instanceType) nDisks := 16
disks, err := getRandomDisks(nDisks)
if err != nil {
t.Fatal("Failed to create disks for the backend")
}
root, err := newTestConfig("us-east-1")
if err != nil {
t.Fatalf("%s", err)
}
// Test Server needs to start before formatting of disks.
// Get credential.
credentials := serverConfig.GetCredential()
testServer.Root = root
testServer.Disks = disks
testServer.AccessKey = credentials.AccessKeyID
testServer.SecretKey = credentials.SecretAccessKey
// Run TestServer.
testServer.Server = httptest.NewServer(configureServerHandler(serverCmdConfig{disks: disks}))
objLayer, err := makeTestBackend(disks, instanceType)
if err != nil {
t.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
testServer.Obj = objLayer
objLayerMutex.Lock()
globalObjectAPI = objLayer
objLayerMutex.Unlock()
return testServer
}
// Initializes control RPC end points.
// The object Layer will be a temp back used for testing purpose.
func initTestControlRPCEndPoint(objectLayer ObjectLayer) http.Handler {
// Initialize Web.
controllerHandlers := &controllerAPIHandlers{
ObjectAPI: func() ObjectLayer { return objectLayer },
}
// Initialize router.
muxRouter := router.NewRouter()
registerControllerRPCRouter(muxRouter, controllerHandlers)
return muxRouter
}
// StartTestRPCServer - Creates a temp XL/FS backend and initializes control RPC end points,
// then starts a test server with those control RPC end points registered.
func StartTestRPCServer(t TestErrHandler, instanceType string) TestServer {
// create temporary backend for the test server.
nDisks := 16
disks, err := getRandomDisks(nDisks)
if err != nil {
t.Fatal("Failed to create disks for the backend")
}
// create an instance of TestServer.
testRPCServer := TestServer{}
// create temporary backend for the test server.
objLayer, err := makeTestBackend(disks, instanceType)
if err != nil { if err != nil {
t.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err) t.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
@ -130,14 +219,15 @@ func StartTestServer(t TestErrHandler, instanceType string) TestServer {
// Get credential. // Get credential.
credentials := serverConfig.GetCredential() credentials := serverConfig.GetCredential()
testServer.Root = root testRPCServer.Root = root
testServer.Disks = erasureDisks testRPCServer.Disks = disks
testServer.AccessKey = credentials.AccessKeyID testRPCServer.AccessKey = credentials.AccessKeyID
testServer.SecretKey = credentials.SecretAccessKey testRPCServer.SecretKey = credentials.SecretAccessKey
testRPCServer.Obj = objLayer
// Run TestServer. // Run TestServer.
testServer.Server = httptest.NewServer(configureServerHandler(serverCmdConfig{disks: erasureDisks})) testRPCServer.Server = httptest.NewServer(initTestControlRPCEndPoint(objLayer))
return testServer return testRPCServer
} }
// Configure the server for the test run. // Configure the server for the test run.
@ -177,6 +267,208 @@ func (testServer TestServer) Stop() {
testServer.Server.Close() testServer.Server.Close()
} }
// Sign given request using Signature V4.
func signStreamingRequest(req *http.Request, accessKey, secretKey string) (string, error) {
// Get hashed payload.
hashedPayload := req.Header.Get("x-amz-content-sha256")
if hashedPayload == "" {
return "", fmt.Errorf("Invalid hashed payload.")
}
currTime := time.Now().UTC()
// Set x-amz-date.
req.Header.Set("x-amz-date", currTime.Format(iso8601Format))
// Get header map.
headerMap := make(map[string][]string)
for k, vv := range req.Header {
// If request header key is not in ignored headers, then add it.
if _, ok := ignoredStreamingHeaders[http.CanonicalHeaderKey(k)]; !ok {
headerMap[strings.ToLower(k)] = vv
}
}
// Get header keys.
headers := []string{"host"}
for k := range headerMap {
headers = append(headers, k)
}
sort.Strings(headers)
// Get canonical headers.
var buf bytes.Buffer
for _, k := range headers {
buf.WriteString(k)
buf.WriteByte(':')
switch {
case k == "host":
buf.WriteString(req.URL.Host)
fallthrough
default:
for idx, v := range headerMap[k] {
if idx > 0 {
buf.WriteByte(',')
}
buf.WriteString(v)
}
buf.WriteByte('\n')
}
}
canonicalHeaders := buf.String()
// Get signed headers.
signedHeaders := strings.Join(headers, ";")
// Get canonical query string.
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
// Get canonical URI.
canonicalURI := getURLEncodedName(req.URL.Path)
// Get canonical request.
// canonicalRequest =
// <HTTPMethod>\n
// <CanonicalURI>\n
// <CanonicalQueryString>\n
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
//
canonicalRequest := strings.Join([]string{
req.Method,
canonicalURI,
req.URL.RawQuery,
canonicalHeaders,
signedHeaders,
hashedPayload,
}, "\n")
// Get scope.
scope := strings.Join([]string{
currTime.Format(yyyymmdd),
"us-east-1",
"s3",
"aws4_request",
}, "/")
stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n"
stringToSign = stringToSign + scope + "\n"
stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest)))
date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd)))
region := sumHMAC(date, []byte("us-east-1"))
service := sumHMAC(region, []byte("s3"))
signingKey := sumHMAC(service, []byte("aws4_request"))
signature := hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
// final Authorization header
parts := []string{
"AWS4-HMAC-SHA256" + " Credential=" + accessKey + "/" + scope,
"SignedHeaders=" + signedHeaders,
"Signature=" + signature,
}
auth := strings.Join(parts, ", ")
req.Header.Set("Authorization", auth)
return signature, nil
}
// Returns new HTTP request object.
func newTestStreamingRequest(method, urlStr string, dataLength, chunkSize int64, body io.ReadSeeker) (*http.Request, error) {
if method == "" {
method = "POST"
}
req, err := http.NewRequest(method, urlStr, nil)
if err != nil {
return nil, err
}
if body == nil {
// this is added to avoid panic during ioutil.ReadAll(req.Body).
// th stack trace can be found here https://github.com/minio/minio/pull/2074 .
// This is very similar to https://github.com/golang/go/issues/7527.
req.Body = ioutil.NopCloser(bytes.NewReader([]byte("")))
}
contentLength := calculateStreamContentLength(dataLength, chunkSize)
req.Header.Set("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD")
req.Header.Set("content-encoding", "aws-chunked")
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLength, 10))
req.Header.Set("content-length", strconv.FormatInt(contentLength, 10))
// Seek back to beginning.
body.Seek(0, 0)
// Add body
req.Body = ioutil.NopCloser(body)
req.ContentLength = contentLength
return req, nil
}
// Returns new HTTP request object signed with streaming signature v4.
func newTestStreamingSignedRequest(method, urlStr string, contentLength, chunkSize int64, body io.ReadSeeker, accessKey, secretKey string) (*http.Request, error) {
req, err := newTestStreamingRequest(method, urlStr, contentLength, chunkSize, body)
if err != nil {
return nil, err
}
signature, err := signStreamingRequest(req, accessKey, secretKey)
if err != nil {
return nil, err
}
var stream []byte
var buffer []byte
body.Seek(0, 0)
for {
buffer = make([]byte, chunkSize)
n, err := body.Read(buffer)
if err != nil && err != io.EOF {
return nil, err
}
currTime := time.Now().UTC()
// Get scope.
scope := strings.Join([]string{
currTime.Format(yyyymmdd),
"us-east-1",
"s3",
"aws4_request",
}, "/")
stringToSign := "AWS4-HMAC-SHA256-PAYLOAD" + "\n"
stringToSign = stringToSign + currTime.Format(iso8601Format) + "\n"
stringToSign = stringToSign + scope + "\n"
stringToSign = stringToSign + signature + "\n"
stringToSign = stringToSign + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + "\n" // hex(sum256(""))
stringToSign = stringToSign + hex.EncodeToString(sum256(buffer[:n]))
date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd)))
region := sumHMAC(date, []byte("us-east-1"))
service := sumHMAC(region, []byte("s3"))
signingKey := sumHMAC(service, []byte("aws4_request"))
signature = hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
stream = append(stream, []byte(fmt.Sprintf("%x", n)+";chunk-signature="+signature+"\r\n")...)
stream = append(stream, buffer[:n]...)
stream = append(stream, []byte("\r\n")...)
if n <= 0 {
break
}
}
req.Body = ioutil.NopCloser(bytes.NewReader(stream))
return req, nil
}
// Sign given request using Signature V4. // Sign given request using Signature V4.
func signRequest(req *http.Request, accessKey, secretKey string) error { func signRequest(req *http.Request, accessKey, secretKey string) error {
// Get hashed payload. // Get hashed payload.
@ -285,6 +577,11 @@ func signRequest(req *http.Request, accessKey, secretKey string) error {
return nil return nil
} }
// getCredential generate a credential string.
func getCredential(accessKeyID, location string, t time.Time) string {
return accessKeyID + "/" + getScope(t, location)
}
// Returns new HTTP request object. // Returns new HTTP request object.
func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeeker) (*http.Request, error) { func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeeker) (*http.Request, error) {
if method == "" { if method == "" {
@ -336,6 +633,11 @@ func newTestSignedRequest(method, urlStr string, contentLength int64, body io.Re
return nil, err return nil, err
} }
// Anonymous request return quickly.
if accessKey == "" || secretKey == "" {
return req, nil
}
err = signRequest(req, accessKey, secretKey) err = signRequest(req, accessKey, secretKey)
if err != nil { if err != nil {
return nil, err return nil, err
@ -378,6 +680,9 @@ func newTestWebRPCRequest(rpcMethod string, authorization string, data interface
} }
encapsulatedData := genericJSON{JSONRPC: "2.0", ID: "1", Method: rpcMethod, Params: data} encapsulatedData := genericJSON{JSONRPC: "2.0", ID: "1", Method: rpcMethod, Params: data}
jsonData, err := json.Marshal(encapsulatedData) jsonData, err := json.Marshal(encapsulatedData)
if err != nil {
return nil, err
}
req, err := newWebRPCRequest(rpcMethod, authorization, bytes.NewReader(jsonData)) req, err := newWebRPCRequest(rpcMethod, authorization, bytes.NewReader(jsonData))
if err != nil { if err != nil {
return nil, err return nil, err
@ -416,24 +721,24 @@ func getTestWebRPCResponse(resp *httptest.ResponseRecorder, data interface{}) er
// if the option is // if the option is
// FS: Returns a temp single disk setup initializes FS Backend. // FS: Returns a temp single disk setup initializes FS Backend.
// XL: Returns a 16 temp single disk setup and initializse XL Backend. // XL: Returns a 16 temp single disk setup and initializse XL Backend.
func makeTestBackend(instanceType string) (ObjectLayer, []string, error) { func makeTestBackend(disks []string, instanceType string) (ObjectLayer, error) {
switch instanceType { switch instanceType {
case "FS": case "FS":
objLayer, fsroot, err := getSingleNodeObjectLayer() objLayer, err := getSingleNodeObjectLayer(disks[0])
if err != nil { if err != nil {
return nil, []string{}, err return nil, err
} }
return objLayer, []string{fsroot}, err return objLayer, err
case "XL": case "XL":
objectLayer, erasureDisks, err := getXLObjectLayer() objectLayer, err := getXLObjectLayer(disks)
if err != nil { if err != nil {
return nil, []string{}, err return nil, err
} }
return objectLayer, erasureDisks, err return objectLayer, err
default: default:
errMsg := "Invalid instance type, Only FS and XL are valid options" errMsg := "Invalid instance type, Only FS and XL are valid options"
return nil, []string{}, fmt.Errorf("Failed obtaining Temp XL layer: <ERROR> %s", errMsg) return nil, fmt.Errorf("Failed obtaining Temp XL layer: <ERROR> %s", errMsg)
} }
} }
@ -693,7 +998,13 @@ func getHEADBucketURL(endPoint, bucketName string) string {
// return URL for deleting the bucket. // return URL for deleting the bucket.
func getDeleteBucketURL(endPoint, bucketName string) string { func getDeleteBucketURL(endPoint, bucketName string) string {
return makeTestTargetURL(endPoint, bucketName, "", url.Values{}) return makeTestTargetURL(endPoint, bucketName, "", url.Values{})
}
// return URL For fetching location of the bucket.
func getBucketLocationURL(endPoint, bucketName string) string {
queryValue := url.Values{}
queryValue.Set("location", "")
return makeTestTargetURL(endPoint, bucketName, "", queryValue)
} }
// return URL for listing objects in the bucket with V1 legacy API. // return URL for listing objects in the bucket with V1 legacy API.
@ -740,14 +1051,26 @@ func getAbortMultipartUploadURL(endPoint, bucketName, objectName, uploadID strin
return makeTestTargetURL(endPoint, bucketName, objectName, queryValue) return makeTestTargetURL(endPoint, bucketName, objectName, queryValue)
} }
// return URL for a new multipart upload. // return URL for a listing pending multipart uploads.
func getListMultipartURL(endPoint, bucketName string) string { func getListMultipartURL(endPoint, bucketName string) string {
queryValue := url.Values{} queryValue := url.Values{}
queryValue.Set("uploads", "") queryValue.Set("uploads", "")
return makeTestTargetURL(endPoint, bucketName, "", queryValue) return makeTestTargetURL(endPoint, bucketName, "", queryValue)
} }
// return URL for a new multipart upload. // return URL for listing pending multipart uploads with parameters.
func getListMultipartUploadsURLWithParams(endPoint, bucketName, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads string) string {
queryValue := url.Values{}
queryValue.Set("uploads", "")
queryValue.Set("prefix", prefix)
queryValue.Set("delimiter", delimiter)
queryValue.Set("key-marker", keyMarker)
queryValue.Set("upload-id-marker", uploadIDMarker)
queryValue.Set("max-uploads", maxUploads)
return makeTestTargetURL(endPoint, bucketName, "", queryValue)
}
// return URL for a listing parts on a given upload id.
func getListMultipartURLWithParams(endPoint, bucketName, objectName, uploadID, maxParts string) string { func getListMultipartURLWithParams(endPoint, bucketName, objectName, uploadID, maxParts string) string {
queryValues := url.Values{} queryValues := url.Values{}
queryValues.Set("uploadId", uploadID) queryValues.Set("uploadId", uploadID)
@ -768,21 +1091,30 @@ func getTestRoot() (string, error) {
return ioutil.TempDir(os.TempDir(), "api-") return ioutil.TempDir(os.TempDir(), "api-")
} }
// getXLObjectLayer - Instantiates XL object layer and returns it. // getRandomDisks - Creates a slice of N random disks, each of the form - minio-XXX
func getXLObjectLayer() (ObjectLayer, []string, error) { func getRandomDisks(N int) ([]string, error) {
var nDisks = 16 // Maximum disks.
var erasureDisks []string var erasureDisks []string
for i := 0; i < nDisks; i++ { for i := 0; i < N; i++ {
path, err := ioutil.TempDir(os.TempDir(), "minio-") path, err := ioutil.TempDir(os.TempDir(), "minio-")
if err != nil { if err != nil {
return nil, nil, err // Remove directories created so far.
removeRoots(erasureDisks)
return nil, err
} }
erasureDisks = append(erasureDisks, path) erasureDisks = append(erasureDisks, path)
} }
return erasureDisks, nil
}
// getXLObjectLayer - Instantiates XL object layer and returns it.
func getXLObjectLayer(erasureDisks []string) (ObjectLayer, error) {
err := formatDisks(erasureDisks, nil)
if err != nil {
return nil, err
}
objLayer, err := newXLObjects(erasureDisks, nil) objLayer, err := newXLObjects(erasureDisks, nil)
if err != nil { if err != nil {
return nil, nil, err return nil, err
} }
// Disabling the cache for integration tests. // Disabling the cache for integration tests.
// Should use the object layer tests for validating cache. // Should use the object layer tests for validating cache.
@ -790,23 +1122,17 @@ func getXLObjectLayer() (ObjectLayer, []string, error) {
xl.objCacheEnabled = false xl.objCacheEnabled = false
} }
return objLayer, erasureDisks, nil return objLayer, nil
} }
// getSingleNodeObjectLayer - Instantiates single node object layer and returns it. // getSingleNodeObjectLayer - Instantiates single node object layer and returns it.
func getSingleNodeObjectLayer() (ObjectLayer, string, error) { func getSingleNodeObjectLayer(disk string) (ObjectLayer, error) {
// Make a temporary directory to use as the obj. // Create the object layer.
fsDir, err := ioutil.TempDir("", "minio-") objLayer, err := newFSObjects(disk)
if err != nil { if err != nil {
return nil, "", err return nil, err
} }
return objLayer, nil
// Create the obj.
objLayer, err := newFSObjects(fsDir)
if err != nil {
return nil, "", err
}
return objLayer, fsDir, nil
} }
// removeRoots - Cleans up initialized directories during tests. // removeRoots - Cleans up initialized directories during tests.
@ -826,6 +1152,66 @@ func removeDiskN(disks []string, n int) {
} }
} }
// creates a bucket for the tests and returns the bucket name.
// initializes the specified API endpoints for the tests.
// initialies the root and returns its path.
// return credentials.
func initAPIHandlerTest(obj ObjectLayer, endPoints []string) (bucketName, rootPath string, apiRouter http.Handler, err error) {
// get random bucket name.
bucketName = getRandomBucketName()
// Create bucket.
err = obj.MakeBucket(bucketName)
if err != nil {
// failed to create newbucket, return err.
return "", "", nil, err
}
// Register the API end points with XL/FS object layer.
// Registering only the GetObject handler.
apiRouter = initTestAPIEndPoints(obj, endPoints)
// initialize the server and obtain the credentials and root.
// credentials are necessary to sign the HTTP request.
rootPath, err = newTestConfig("us-east-1")
if err != nil {
return "", "", nil, err
}
return bucketName, rootPath, apiRouter, nil
}
// ExecObjectLayerAPITest - executes object layer API tests.
// Creates single node and XL ObjectLayer instance, registers the specified API end points and runs test for both the layers.
func ExecObjectLayerAPITest(t TestErrHandler, objAPITest objAPITestType, endPoints []string) {
objLayer, fsDir, err := prepareFS()
if err != nil {
t.Fatalf("Initialization of object layer failed for single node setup: %s", err)
}
bucketFS, fsRoot, fsAPIRouter, err := initAPIHandlerTest(objLayer, endPoints)
if err != nil {
t.Fatalf("Initialzation of API handler tests failed: <ERROR> %s", err)
}
credentials := serverConfig.GetCredential()
// Executing the object layer tests for single node setup.
objAPITest(objLayer, singleNodeTestStr, bucketFS, fsAPIRouter, credentials, t)
objLayer, xlDisks, err := prepareXL()
if err != nil {
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
}
bucketXL, xlRoot, xlAPIRouter, err := initAPIHandlerTest(objLayer, endPoints)
if err != nil {
t.Fatalf("Initialzation of API handler tests failed: <ERROR> %s", err)
}
credentials = serverConfig.GetCredential()
// Executing the object layer tests for XL.
objAPITest(objLayer, xLTestStr, bucketXL, xlAPIRouter, credentials, t)
defer removeRoots(append(xlDisks, fsDir, fsRoot, xlRoot))
}
// function to be passed to ExecObjectLayerAPITest, for executing object layr API handler tests.
type objAPITestType func(obj ObjectLayer, instanceType string, bucketName string,
apiRouter http.Handler, credentials credential, t TestErrHandler)
// Regular object test type. // Regular object test type.
type objTestType func(obj ObjectLayer, instanceType string, t TestErrHandler) type objTestType func(obj ObjectLayer, instanceType string, t TestErrHandler)
@ -835,14 +1221,14 @@ type objTestDiskNotFoundType func(obj ObjectLayer, instanceType string, dirs []s
// ExecObjectLayerTest - executes object layer tests. // ExecObjectLayerTest - executes object layer tests.
// Creates single node and XL ObjectLayer instance and runs test for both the layers. // Creates single node and XL ObjectLayer instance and runs test for both the layers.
func ExecObjectLayerTest(t TestErrHandler, objTest objTestType) { func ExecObjectLayerTest(t TestErrHandler, objTest objTestType) {
objLayer, fsDir, err := getSingleNodeObjectLayer() objLayer, fsDir, err := prepareFS()
if err != nil { if err != nil {
t.Fatalf("Initialization of object layer failed for single node setup: %s", err) t.Fatalf("Initialization of object layer failed for single node setup: %s", err)
} }
// Executing the object layer tests for single node setup. // Executing the object layer tests for single node setup.
objTest(objLayer, singleNodeTestStr, t) objTest(objLayer, singleNodeTestStr, t)
objLayer, fsDirs, err := getXLObjectLayer() objLayer, fsDirs, err := prepareXL()
if err != nil { if err != nil {
t.Fatalf("Initialization of object layer failed for XL setup: %s", err) t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
} }
@ -854,7 +1240,7 @@ func ExecObjectLayerTest(t TestErrHandler, objTest objTestType) {
// ExecObjectLayerDiskNotFoundTest - executes object layer tests while deleting // ExecObjectLayerDiskNotFoundTest - executes object layer tests while deleting
// disks in between tests. Creates XL ObjectLayer instance and runs test for XL layer. // disks in between tests. Creates XL ObjectLayer instance and runs test for XL layer.
func ExecObjectLayerDiskNotFoundTest(t *testing.T, objTest objTestDiskNotFoundType) { func ExecObjectLayerDiskNotFoundTest(t *testing.T, objTest objTestDiskNotFoundType) {
objLayer, fsDirs, err := getXLObjectLayer() objLayer, fsDirs, err := prepareXL()
if err != nil { if err != nil {
t.Fatalf("Initialization of object layer failed for XL setup: %s", err) t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
} }
@ -869,13 +1255,18 @@ type objTestStaleFilesType func(obj ObjectLayer, instanceType string, dirs []str
// ExecObjectLayerStaleFilesTest - executes object layer tests those leaves stale // ExecObjectLayerStaleFilesTest - executes object layer tests those leaves stale
// files/directories under .minio/tmp. Creates XL ObjectLayer instance and runs test for XL layer. // files/directories under .minio/tmp. Creates XL ObjectLayer instance and runs test for XL layer.
func ExecObjectLayerStaleFilesTest(t *testing.T, objTest objTestStaleFilesType) { func ExecObjectLayerStaleFilesTest(t *testing.T, objTest objTestStaleFilesType) {
objLayer, fsDirs, err := getXLObjectLayer() nDisks := 16
erasureDisks, err := getRandomDisks(nDisks)
if err != nil {
t.Fatalf("Initialization of disks for XL setup: %s", err)
}
objLayer, err := getXLObjectLayer(erasureDisks)
if err != nil { if err != nil {
t.Fatalf("Initialization of object layer failed for XL setup: %s", err) t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
} }
// Executing the object layer tests for XL. // Executing the object layer tests for XL.
objTest(objLayer, xLTestStr, fsDirs, t) objTest(objLayer, xLTestStr, erasureDisks, t)
defer removeRoots(fsDirs) defer removeRoots(erasureDisks)
} }
// Takes in XL/FS object layer, and the list of API end points to be tested/required, registers the API end points and returns the HTTP handler. // Takes in XL/FS object layer, and the list of API end points to be tested/required, registers the API end points and returns the HTTP handler.
@ -888,7 +1279,7 @@ func initTestAPIEndPoints(objLayer ObjectLayer, apiFunctions []string) http.Hand
// All object storage operations are registered as HTTP handlers on `objectAPIHandlers`. // All object storage operations are registered as HTTP handlers on `objectAPIHandlers`.
// When the handlers get a HTTP request they use the underlyting ObjectLayer to perform operations. // When the handlers get a HTTP request they use the underlyting ObjectLayer to perform operations.
api := objectAPIHandlers{ api := objectAPIHandlers{
ObjectAPI: objLayer, ObjectAPI: func() ObjectLayer { return objLayer },
} }
// API Router. // API Router.
apiRouter := muxRouter.NewRoute().PathPrefix("/").Subrouter() apiRouter := muxRouter.NewRoute().PathPrefix("/").Subrouter()
@ -903,24 +1294,25 @@ func initTestAPIEndPoints(objLayer ObjectLayer, apiFunctions []string) http.Hand
// Register GetObject handler. // Register GetObject handler.
case "CopyObject`": case "CopyObject`":
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(api.CopyObjectHandler) bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(api.CopyObjectHandler)
// Register PutBucket Policy handler. // Register PutBucket Policy handler.
case "PutBucketPolicy": case "PutBucketPolicy":
bucket.Methods("PUT").HandlerFunc(api.PutBucketPolicyHandler).Queries("policy", "") bucket.Methods("PUT").HandlerFunc(api.PutBucketPolicyHandler).Queries("policy", "")
// Register Delete bucket HTTP policy handler. // Register Delete bucket HTTP policy handler.
case "DeleteBucketPolicy": case "DeleteBucketPolicy":
bucket.Methods("DELETE").HandlerFunc(api.DeleteBucketPolicyHandler).Queries("policy", "") bucket.Methods("DELETE").HandlerFunc(api.DeleteBucketPolicyHandler).Queries("policy", "")
// Register Get Bucket policy HTTP Handler.
// Register Get Bucket policy HTTP Handler.
case "GetBucketPolicy": case "GetBucketPolicy":
bucket.Methods("GET").HandlerFunc(api.GetBucketPolicyHandler).Queries("policy", "") bucket.Methods("GET").HandlerFunc(api.GetBucketPolicyHandler).Queries("policy", "")
// Register GetBucketLocation handler.
// Register Post Bucket policy function. case "GetBucketLocation":
case "PostBucketPolicy": bucket.Methods("GET").HandlerFunc(api.GetBucketLocationHandler).Queries("location", "")
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(api.PostPolicyBucketHandler) // Register HeadBucket handler.
case "HeadBucket":
// Register all api endpoints by default. bucket.Methods("HEAD").HandlerFunc(api.HeadBucketHandler)
// Register ListMultipartUploads handler.
case "ListMultipartUploads":
bucket.Methods("GET").HandlerFunc(api.ListMultipartUploadsHandler).Queries("uploads", "")
// Register all api endpoints by default.
default: default:
registerAPIRouter(muxRouter, api) registerAPIRouter(muxRouter, api)
// No need to register any more end points, all the end points are registered. // No need to register any more end points, all the end points are registered.
@ -930,10 +1322,11 @@ func initTestAPIEndPoints(objLayer ObjectLayer, apiFunctions []string) http.Hand
return muxRouter return muxRouter
} }
// Initialize Web RPC Handlers for testing
func initTestWebRPCEndPoint(objLayer ObjectLayer) http.Handler { func initTestWebRPCEndPoint(objLayer ObjectLayer) http.Handler {
// Initialize Web. // Initialize Web.
webHandlers := &webAPIHandlers{ webHandlers := &webAPIHandlers{
ObjectAPI: objLayer, ObjectAPI: func() ObjectLayer { return objLayer },
} }
// Initialize router. // Initialize router.

View File

@ -148,7 +148,7 @@ func listDirFactory(isLeaf isLeafFunc, disks ...StorageAPI) listDirFunc {
break break
} }
// Return error at the end. // Return error at the end.
return nil, false, err return nil, false, traceError(err)
} }
return listDir return listDir
} }
@ -173,7 +173,7 @@ func doTreeWalk(bucket, prefixDir, entryPrefixMatch, marker string, recursive bo
if err != nil { if err != nil {
select { select {
case <-endWalkCh: case <-endWalkCh:
return errWalkAbort return traceError(errWalkAbort)
case resultCh <- treeWalkResult{err: err}: case resultCh <- treeWalkResult{err: err}:
return err return err
} }
@ -235,7 +235,7 @@ func doTreeWalk(bucket, prefixDir, entryPrefixMatch, marker string, recursive bo
isEOF := ((i == len(entries)-1) && isEnd) isEOF := ((i == len(entries)-1) && isEnd)
select { select {
case <-endWalkCh: case <-endWalkCh:
return errWalkAbort return traceError(errWalkAbort)
case resultCh <- treeWalkResult{entry: pathJoin(prefixDir, entry), end: isEOF}: case resultCh <- treeWalkResult{entry: pathJoin(prefixDir, entry), end: isEOF}:
} }
} }

View File

@ -337,7 +337,7 @@ func TestListDir(t *testing.T) {
} }
// None of the disks are available, should get errDiskNotFound. // None of the disks are available, should get errDiskNotFound.
_, _, err = listDir(volume, "", "") _, _, err = listDir(volume, "", "")
if err != errDiskNotFound { if errorCause(err) != errDiskNotFound {
t.Error("expected errDiskNotFound error.") t.Error("expected errDiskNotFound error.")
} }
} }

View File

@ -38,6 +38,9 @@ var errSignatureMismatch = errors.New("Signature does not match")
// used when token used for authentication by the MinioBrowser has expired // used when token used for authentication by the MinioBrowser has expired
var errInvalidToken = errors.New("Invalid token") var errInvalidToken = errors.New("Invalid token")
// used when cached timestamp do not match with what client remembers.
var errInvalidTimestamp = errors.New("Timestamps don't match, server may have restarted.")
// If x-amz-content-sha256 header value mismatches with what we calculate. // If x-amz-content-sha256 header value mismatches with what we calculate.
var errContentSHA256Mismatch = errors.New("sha256 mismatch") var errContentSHA256Mismatch = errors.New("sha256 mismatch")

View File

@ -33,10 +33,6 @@ import (
// command specific flags. // command specific flags.
var ( var (
updateFlags = []cli.Flag{ updateFlags = []cli.Flag{
cli.BoolFlag{
Name: "help, h",
Usage: "Help for update.",
},
cli.BoolFlag{ cli.BoolFlag{
Name: "experimental, E", Name: "experimental, E",
Usage: "Check experimental update.", Usage: "Check experimental update.",
@ -49,7 +45,7 @@ var updateCmd = cli.Command{
Name: "update", Name: "update",
Usage: "Check for a new software update.", Usage: "Check for a new software update.",
Action: mainUpdate, Action: mainUpdate,
Flags: updateFlags, Flags: append(updateFlags, globalFlags...),
CustomHelpTemplate: `Name: CustomHelpTemplate: `Name:
minio {{.Name}} - {{.Usage}} minio {{.Name}} - {{.Usage}}
@ -133,7 +129,7 @@ func parseReleaseData(data string) (time.Time, error) {
} }
// verify updates for releases. // verify updates for releases.
func getReleaseUpdate(updateURL string, noError bool) updateMessage { func getReleaseUpdate(updateURL string) (updateMsg updateMessage, errMsg string, err error) {
// Construct a new update url. // Construct a new update url.
newUpdateURLPrefix := updateURL + "/" + runtime.GOOS + "-" + runtime.GOARCH newUpdateURLPrefix := updateURL + "/" + runtime.GOOS + "-" + runtime.GOARCH
newUpdateURL := newUpdateURLPrefix + "/minio.shasum" newUpdateURL := newUpdateURLPrefix + "/minio.shasum"
@ -150,7 +146,7 @@ func getReleaseUpdate(updateURL string, noError bool) updateMessage {
} }
// Initialize update message. // Initialize update message.
updateMsg := updateMessage{ updateMsg = updateMessage{
Download: downloadURL, Download: downloadURL,
Version: Version, Version: Version,
} }
@ -160,61 +156,54 @@ func getReleaseUpdate(updateURL string, noError bool) updateMessage {
Timeout: 3 * time.Second, Timeout: 3 * time.Second,
} }
// Fetch new update.
data, err := client.Get(newUpdateURL)
if err != nil && noError {
return updateMsg
}
fatalIf((err), "Unable to read from update URL "+newUpdateURL+".")
// Error out if 'update' command is issued for development based builds.
if Version == "DEVELOPMENT.GOGET" && !noError {
fatalIf((errors.New("")),
"Update mechanism is not supported for go get based binary builds. Please download official releases from https://minio.io/#minio")
}
// Parse current minio version into RFC3339. // Parse current minio version into RFC3339.
current, err := time.Parse(time.RFC3339, Version) current, err := time.Parse(time.RFC3339, Version)
if err != nil && noError { if err != nil {
return updateMsg errMsg = "Unable to parse version string as time."
return
} }
fatalIf((err), "Unable to parse version string as time.")
// Verify if current minio version is zero. // Verify if current minio version is zero.
if current.IsZero() && !noError { if current.IsZero() {
fatalIf((errors.New("")), err = errors.New("date should not be zero")
"Updates mechanism is not supported for custom builds. Please download official releases from https://minio.io/#minio") errMsg = "Updates mechanism is not supported for custom builds. Please download official releases from https://minio.io/#minio"
return
}
// Fetch new update.
data, err := client.Get(newUpdateURL)
if err != nil {
return
} }
// Verify if we have a valid http response i.e http.StatusOK. // Verify if we have a valid http response i.e http.StatusOK.
if data != nil { if data != nil {
if data.StatusCode != http.StatusOK { if data.StatusCode != http.StatusOK {
// Return quickly if noError is set. errMsg = "Failed to retrieve update notice."
if noError { err = errors.New("http status : " + data.Status)
return updateMsg return
}
fatalIf((errors.New("")), "Failed to retrieve update notice. "+data.Status)
} }
} }
// Read the response body. // Read the response body.
updateBody, err := ioutil.ReadAll(data.Body) updateBody, err := ioutil.ReadAll(data.Body)
if err != nil && noError { if err != nil {
return updateMsg errMsg = "Failed to retrieve update notice. Please try again later."
return
} }
fatalIf((err), "Failed to retrieve update notice. Please try again later.")
errMsg = "Failed to retrieve update notice. Please try again later. Please report this issue at https://github.com/minio/minio/issues"
// Parse the date if its valid. // Parse the date if its valid.
latest, err := parseReleaseData(string(updateBody)) latest, err := parseReleaseData(string(updateBody))
if err != nil && noError { if err != nil {
return updateMsg return
} }
errMsg := "Failed to retrieve update notice. Please try again later. Please report this issue at https://github.com/minio/minio/issues"
fatalIf(err, errMsg)
// Verify if the date is not zero. // Verify if the date is not zero.
if latest.IsZero() && !noError { if latest.IsZero() {
fatalIf((errors.New("")), errMsg) err = errors.New("date should not be zero")
return
} }
// Is the update latest?. // Is the update latest?.
@ -223,18 +212,25 @@ func getReleaseUpdate(updateURL string, noError bool) updateMessage {
} }
// Return update message. // Return update message.
return updateMsg return updateMsg, "", nil
} }
// main entry point for update command. // main entry point for update command.
func mainUpdate(ctx *cli.Context) { func mainUpdate(ctx *cli.Context) {
// Print all errors as they occur. // Error out if 'update' command is issued for development based builds.
noError := false if Version == "DEVELOPMENT.GOGET" {
fatalIf(errors.New(""), "Update mechanism is not supported for go get based binary builds. Please download official releases from https://minio.io/#minio")
}
// Check for update. // Check for update.
var updateMsg updateMessage
var errMsg string
var err error
if ctx.Bool("experimental") { if ctx.Bool("experimental") {
console.Println(getReleaseUpdate(minioUpdateExperimentalURL, noError)) updateMsg, errMsg, err = getReleaseUpdate(minioUpdateExperimentalURL)
} else { } else {
console.Println(getReleaseUpdate(minioUpdateStableURL, noError)) updateMsg, errMsg, err = getReleaseUpdate(minioUpdateStableURL)
} }
fatalIf(err, errMsg)
console.Println(updateMsg)
} }

View File

@ -20,10 +20,14 @@ import (
"encoding/base64" "encoding/base64"
"encoding/xml" "encoding/xml"
"errors" "errors"
"fmt"
"io" "io"
"net"
"net/http" "net/http"
"os" "os"
"os/exec" "os/exec"
"path/filepath"
"runtime"
"strings" "strings"
"sync" "sync"
"syscall" "syscall"
@ -43,6 +47,54 @@ func cloneHeader(h http.Header) http.Header {
return h2 return h2
} }
// checkDuplicates - function to validate if there are duplicates in a slice of strings.
func checkDuplicates(list []string) error {
// Empty lists are not allowed.
if len(list) == 0 {
return errInvalidArgument
}
// Empty keys are not allowed.
for _, key := range list {
if key == "" {
return errInvalidArgument
}
}
listMaps := make(map[string]int)
// Navigate through each configs and count the entries.
for _, key := range list {
listMaps[key]++
}
// Validate if there are any duplicate counts.
for key, count := range listMaps {
if count != 1 {
return fmt.Errorf("Duplicate key: \"%s\" found of count: \"%d\"", key, count)
}
}
// No duplicates.
return nil
}
// splits network path into its components Address and Path.
func splitNetPath(networkPath string) (netAddr, netPath string, err error) {
if runtime.GOOS == "windows" {
if volumeName := filepath.VolumeName(networkPath); volumeName != "" {
return "", networkPath, nil
}
}
networkParts := strings.SplitN(networkPath, ":", 2)
if len(networkParts) == 1 {
return "", networkPath, nil
}
if networkParts[1] == "" {
return "", "", &net.AddrError{Err: "Missing path in network path", Addr: networkPath}
} else if networkParts[0] == "" {
return "", "", &net.AddrError{Err: "Missing address in network path", Addr: networkPath}
} else if !filepath.IsAbs(networkParts[1]) {
return "", "", &net.AddrError{Err: "Network path should be absolute", Addr: networkPath}
}
return networkParts[0], networkParts[1], nil
}
// xmlDecoder provide decoded value in xml. // xmlDecoder provide decoded value in xml.
func xmlDecoder(body io.Reader, v interface{}, size int64) error { func xmlDecoder(body io.Reader, v interface{}, size int64) error {
var lbody io.Reader var lbody io.Reader

61
cmd/utils_nix_test.go Normal file
View File

@ -0,0 +1,61 @@
// +build !windows
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"net"
"testing"
)
// Test for splitNetPath
func TestSplitNetPath(t *testing.T) {
testCases := []struct {
networkPath string
netAddr string
netPath string
err error
}{
// Invalid cases 1-5.
{"10.1.10.1:", "", "", &net.AddrError{Err: "Missing path in network path", Addr: "10.1.10.1:"}},
{"10.1.10.1:../1", "", "", &net.AddrError{Err: "Network path should be absolute", Addr: "10.1.10.1:../1"}},
{":/tmp/1", "", "", &net.AddrError{Err: "Missing address in network path", Addr: ":/tmp/1"}},
{"10.1.10.1:disk/1", "", "", &net.AddrError{Err: "Network path should be absolute", Addr: "10.1.10.1:disk/1"}},
{"10.1.10.1:\\path\\test", "", "", &net.AddrError{Err: "Network path should be absolute", Addr: "10.1.10.1:\\path\\test"}},
// Valid cases 6-8
{"10.1.10.1", "", "10.1.10.1", nil},
{"10.1.10.1://", "10.1.10.1", "//", nil},
{"10.1.10.1:/disk/1", "10.1.10.1", "/disk/1", nil},
}
for i, test := range testCases {
receivedAddr, receivedPath, receivedErr := splitNetPath(test.networkPath)
if receivedAddr != test.netAddr {
t.Errorf("Test case %d: Expected: %s, Received: %s", i+1, test.netAddr, receivedAddr)
}
if receivedPath != test.netPath {
t.Errorf("Test case %d: Expected: %s, Received: %s", i+1, test.netPath, receivedPath)
}
if test.err != nil {
if receivedErr == nil || receivedErr.Error() != test.err.Error() {
t.Errorf("Test case %d: Expected: %v, Received: %v", i+1, test.err, receivedErr)
}
}
}
}

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"fmt"
"net/http" "net/http"
"reflect" "reflect"
"testing" "testing"
@ -46,6 +47,57 @@ func TestCloneHeader(t *testing.T) {
} }
} }
// Tests check duplicates function.
func TestCheckDuplicates(t *testing.T) {
tests := []struct {
list []string
err error
shouldPass bool
}{
// Test 1 - for '/tmp/1' repeated twice.
{
list: []string{"/tmp/1", "/tmp/1", "/tmp/2", "/tmp/3"},
err: fmt.Errorf("Duplicate key: \"/tmp/1\" found of count: \"2\""),
shouldPass: false,
},
// Test 2 - for '/tmp/1' repeated thrice.
{
list: []string{"/tmp/1", "/tmp/1", "/tmp/1", "/tmp/3"},
err: fmt.Errorf("Duplicate key: \"/tmp/1\" found of count: \"3\""),
shouldPass: false,
},
// Test 3 - empty string.
{
list: []string{""},
err: errInvalidArgument,
shouldPass: false,
},
// Test 4 - empty string.
{
list: nil,
err: errInvalidArgument,
shouldPass: false,
},
// Test 5 - non repeated strings.
{
list: []string{"/tmp/1", "/tmp/2", "/tmp/3"},
err: nil,
shouldPass: true,
},
}
// Validate if function runs as expected.
for i, test := range tests {
err := checkDuplicates(test.list)
if test.shouldPass && err != test.err {
t.Errorf("Test: %d, Expected %s got %s", i+1, test.err, err)
}
if !test.shouldPass && err.Error() != test.err.Error() {
t.Errorf("Test: %d, Expected %s got %s", i+1, test.err, err)
}
}
}
// Tests maximum object size. // Tests maximum object size.
func TestMaxObjectSize(t *testing.T) { func TestMaxObjectSize(t *testing.T) {
sizes := []struct { sizes := []struct {

70
cmd/utils_windows_test.go Normal file
View File

@ -0,0 +1,70 @@
// +build windows
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"net"
"testing"
)
// Test for splitNetPath
func TestSplitNetPath(t *testing.T) {
testCases := []struct {
networkPath string
netAddr string
netPath string
err error
}{
// Invalid cases 1-8.
{":C:", "", "", &net.AddrError{Err: "Missing address in network path", Addr: ":C:"}},
{"10.1.10.1:", "", "", &net.AddrError{Err: "Missing path in network path", Addr: "10.1.10.1:"}},
{"10.1.10.1:C", "", "", &net.AddrError{Err: "Network path should be absolute", Addr: "10.1.10.1:C"}},
{"10.1.10.1:C:", "", "", &net.AddrError{Err: "Network path should be absolute", Addr: "10.1.10.1:C:"}},
{"10.1.10.1:C:../path", "", "", &net.AddrError{Err: "Network path should be absolute", Addr: "10.1.10.1:C:../path"}},
{"10.1.10.1:C:tmp/1", "", "", &net.AddrError{Err: "Network path should be absolute", Addr: "10.1.10.1:C:tmp/1"}},
{"10.1.10.1::C:\\path\\test", "", "", &net.AddrError{
Err: "Network path should be absolute",
Addr: "10.1.10.1::C:\\path\\test",
}},
{"10.1.10.1:\\path\\test", "", "", &net.AddrError{
Err: "Network path should be absolute",
Addr: "10.1.10.1:\\path\\test",
}},
// Valid cases 9-11.
{"10.1.10.1:C:\\path\\test", "10.1.10.1", "C:\\path\\test", nil},
{"C:\\path\\test", "", "C:\\path\\test", nil},
{`10.1.10.1:\\?\UNC\path\test`, "10.1.10.1", `\\?\UNC\path\test`, nil},
}
for i, test := range testCases {
receivedAddr, receivedPath, receivedErr := splitNetPath(test.networkPath)
if receivedAddr != test.netAddr {
t.Errorf("Test case %d: Expected: %s, Received: %s", i+1, test.netAddr, receivedAddr)
}
if receivedPath != test.netPath {
t.Errorf("Test case %d: Expected: %s, Received: %s", i+1, test.netPath, receivedPath)
}
if test.err != nil {
if receivedErr == nil || receivedErr.Error() != test.err.Error() {
t.Errorf("Test case %d: Expected: %v, Received: %v", i+1, test.err, receivedErr)
}
}
}
}

View File

@ -25,15 +25,25 @@ var versionCmd = cli.Command{
Name: "version", Name: "version",
Usage: "Print version.", Usage: "Print version.",
Action: mainVersion, Action: mainVersion,
Flags: globalFlags,
CustomHelpTemplate: `NAME: CustomHelpTemplate: `NAME:
minio {{.Name}} - {{.Usage}} minio {{.Name}} - {{.Usage}}
USAGE: USAGE:
minio {{.Name}} {{if .Description}} minio {{.Name}}
FLAGS:
{{range .Flags}}{{.}}
{{end}}
`, `,
} }
func mainVersion(ctx *cli.Context) { func mainVersion(ctx *cli.Context) {
if len(ctx.Args()) != 0 {
cli.ShowCommandHelpAndExit(ctx, "version", 1)
}
console.Println("Version: " + Version) console.Println("Version: " + Version)
console.Println("Release-Tag: " + ReleaseTag) console.Println("Release-Tag: " + ReleaseTag)
console.Println("Commit-ID: " + CommitID) console.Println("Commit-ID: " + CommitID)

View File

@ -19,6 +19,7 @@ package cmd
import ( import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
@ -40,7 +41,7 @@ import (
// isJWTReqAuthenticated validates if any incoming request to be a // isJWTReqAuthenticated validates if any incoming request to be a
// valid JWT authenticated request. // valid JWT authenticated request.
func isJWTReqAuthenticated(req *http.Request) bool { func isJWTReqAuthenticated(req *http.Request) bool {
jwt, err := newJWT() jwt, err := newJWT(defaultWebTokenExpiry)
if err != nil { if err != nil {
errorIf(err, "unable to initialize a new JWT") errorIf(err, "unable to initialize a new JWT")
return false return false
@ -124,7 +125,11 @@ func (web *webAPIHandlers) StorageInfo(r *http.Request, args *GenericArgs, reply
return &json2.Error{Message: "Unauthorized request"} return &json2.Error{Message: "Unauthorized request"}
} }
reply.UIVersion = miniobrowser.UIVersion reply.UIVersion = miniobrowser.UIVersion
reply.StorageInfo = web.ObjectAPI.StorageInfo() objectAPI := web.ObjectAPI()
if objectAPI == nil {
return &json2.Error{Message: "Volume not found"}
}
reply.StorageInfo = objectAPI.StorageInfo()
return nil return nil
} }
@ -139,7 +144,11 @@ func (web *webAPIHandlers) MakeBucket(r *http.Request, args *MakeBucketArgs, rep
return &json2.Error{Message: "Unauthorized request"} return &json2.Error{Message: "Unauthorized request"}
} }
reply.UIVersion = miniobrowser.UIVersion reply.UIVersion = miniobrowser.UIVersion
if err := web.ObjectAPI.MakeBucket(args.BucketName); err != nil { objectAPI := web.ObjectAPI()
if objectAPI == nil {
return &json2.Error{Message: "Volume not found"}
}
if err := objectAPI.MakeBucket(args.BucketName); err != nil {
return &json2.Error{Message: err.Error()} return &json2.Error{Message: err.Error()}
} }
return nil return nil
@ -164,7 +173,11 @@ func (web *webAPIHandlers) ListBuckets(r *http.Request, args *WebGenericArgs, re
if !isJWTReqAuthenticated(r) { if !isJWTReqAuthenticated(r) {
return &json2.Error{Message: "Unauthorized request"} return &json2.Error{Message: "Unauthorized request"}
} }
buckets, err := web.ObjectAPI.ListBuckets() objectAPI := web.ObjectAPI()
if objectAPI == nil {
return &json2.Error{Message: "Volume not found"}
}
buckets, err := objectAPI.ListBuckets()
if err != nil { if err != nil {
return &json2.Error{Message: err.Error()} return &json2.Error{Message: err.Error()}
} }
@ -212,7 +225,11 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r
return &json2.Error{Message: "Unauthorized request"} return &json2.Error{Message: "Unauthorized request"}
} }
for { for {
lo, err := web.ObjectAPI.ListObjects(args.BucketName, args.Prefix, marker, "/", 1000) objectAPI := web.ObjectAPI()
if objectAPI == nil {
return &json2.Error{Message: "Volume not found"}
}
lo, err := objectAPI.ListObjects(args.BucketName, args.Prefix, marker, "/", 1000)
if err != nil { if err != nil {
return &json2.Error{Message: err.Error()} return &json2.Error{Message: err.Error()}
} }
@ -250,7 +267,11 @@ func (web *webAPIHandlers) RemoveObject(r *http.Request, args *RemoveObjectArgs,
return &json2.Error{Message: "Unauthorized request"} return &json2.Error{Message: "Unauthorized request"}
} }
reply.UIVersion = miniobrowser.UIVersion reply.UIVersion = miniobrowser.UIVersion
if err := web.ObjectAPI.DeleteObject(args.BucketName, args.ObjectName); err != nil { objectAPI := web.ObjectAPI()
if objectAPI == nil {
return &json2.Error{Message: "Volume not found"}
}
if err := objectAPI.DeleteObject(args.BucketName, args.ObjectName); err != nil {
return &json2.Error{Message: err.Error()} return &json2.Error{Message: err.Error()}
} }
return nil return nil
@ -268,9 +289,14 @@ type LoginRep struct {
UIVersion string `json:"uiVersion"` UIVersion string `json:"uiVersion"`
} }
// Default JWT for minio browser expires in 24hrs.
const (
defaultWebTokenExpiry time.Duration = time.Hour * 24 // 24Hrs.
)
// Login - user login handler. // Login - user login handler.
func (web *webAPIHandlers) Login(r *http.Request, args *LoginArgs, reply *LoginRep) error { func (web *webAPIHandlers) Login(r *http.Request, args *LoginArgs, reply *LoginRep) error {
jwt, err := newJWT() jwt, err := newJWT(defaultWebTokenExpiry)
if err != nil { if err != nil {
return &json2.Error{Message: err.Error()} return &json2.Error{Message: err.Error()}
} }
@ -335,7 +361,7 @@ func (web *webAPIHandlers) SetAuth(r *http.Request, args *SetAuthArgs, reply *Se
return &json2.Error{Message: err.Error()} return &json2.Error{Message: err.Error()}
} }
jwt, err := newJWT() jwt, err := newJWT(defaultWebTokenExpiry) // JWT Expiry set to 24Hrs.
if err != nil { if err != nil {
return &json2.Error{Message: err.Error()} return &json2.Error{Message: err.Error()}
} }
@ -384,13 +410,18 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
// Extract incoming metadata if any. // Extract incoming metadata if any.
metadata := extractMetadataFromHeader(r.Header) metadata := extractMetadataFromHeader(r.Header)
if _, err := web.ObjectAPI.PutObject(bucket, object, -1, r.Body, metadata); err != nil { objectAPI := web.ObjectAPI()
if objectAPI == nil {
writeWebErrorResponse(w, errors.New("Volume not found"))
return
}
if _, err := objectAPI.PutObject(bucket, object, -1, r.Body, metadata); err != nil {
writeWebErrorResponse(w, err) writeWebErrorResponse(w, err)
return return
} }
// Fetch object info for notifications. // Fetch object info for notifications.
objInfo, err := web.ObjectAPI.GetObjectInfo(bucket, object) objInfo, err := objectAPI.GetObjectInfo(bucket, object)
if err != nil { if err != nil {
errorIf(err, "Unable to fetch object info for \"%s\"", path.Join(bucket, object)) errorIf(err, "Unable to fetch object info for \"%s\"", path.Join(bucket, object))
return return
@ -416,7 +447,7 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) {
object := vars["object"] object := vars["object"]
tokenStr := r.URL.Query().Get("token") tokenStr := r.URL.Query().Get("token")
jwt, err := newJWT() jwt, err := newJWT(defaultWebTokenExpiry) // Expiry set to 24Hrs.
if err != nil { if err != nil {
errorIf(err, "error in getting new JWT") errorIf(err, "error in getting new JWT")
return return
@ -435,13 +466,18 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) {
// Add content disposition. // Add content disposition.
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", path.Base(object))) w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", path.Base(object)))
objInfo, err := web.ObjectAPI.GetObjectInfo(bucket, object) objectAPI := web.ObjectAPI()
if objectAPI == nil {
writeWebErrorResponse(w, errors.New("Volume not found"))
return
}
objInfo, err := objectAPI.GetObjectInfo(bucket, object)
if err != nil { if err != nil {
writeWebErrorResponse(w, err) writeWebErrorResponse(w, err)
return return
} }
offset := int64(0) offset := int64(0)
err = web.ObjectAPI.GetObject(bucket, object, offset, objInfo.Size, w) err = objectAPI.GetObject(bucket, object, offset, objInfo.Size, w)
if err != nil { if err != nil {
/// No need to print error, response writer already written to. /// No need to print error, response writer already written to.
return return
@ -529,7 +565,11 @@ func (web *webAPIHandlers) GetBucketPolicy(r *http.Request, args *GetBucketPolic
return &json2.Error{Message: "Unauthorized request"} return &json2.Error{Message: "Unauthorized request"}
} }
policyInfo, err := readBucketAccessPolicy(web.ObjectAPI, args.BucketName) objectAPI := web.ObjectAPI()
if objectAPI == nil {
return &json2.Error{Message: "Server not initialized"}
}
policyInfo, err := readBucketAccessPolicy(objectAPI, args.BucketName)
if err != nil { if err != nil {
return &json2.Error{Message: err.Error()} return &json2.Error{Message: err.Error()}
} }
@ -560,7 +600,11 @@ func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolic
return &json2.Error{Message: "Invalid policy " + args.Policy} return &json2.Error{Message: "Invalid policy " + args.Policy}
} }
policyInfo, err := readBucketAccessPolicy(web.ObjectAPI, args.BucketName) objectAPI := web.ObjectAPI()
if objectAPI == nil {
return &json2.Error{Message: "Server not initialized"}
}
policyInfo, err := readBucketAccessPolicy(objectAPI, args.BucketName)
if err != nil { if err != nil {
return &json2.Error{Message: err.Error()} return &json2.Error{Message: err.Error()}
} }
@ -573,7 +617,7 @@ func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolic
} }
// TODO: update policy statements according to bucket name, prefix and policy arguments. // TODO: update policy statements according to bucket name, prefix and policy arguments.
if err := writeBucketPolicy(args.BucketName, web.ObjectAPI, bytes.NewReader(data), int64(len(data))); err != nil { if err := writeBucketPolicy(args.BucketName, objectAPI, bytes.NewReader(data), int64(len(data))); err != nil {
return &json2.Error{Message: err.Error()} return &json2.Error{Message: err.Error()}
} }

View File

@ -30,7 +30,7 @@ import (
// webAPI container for Web API. // webAPI container for Web API.
type webAPIHandlers struct { type webAPIHandlers struct {
ObjectAPI ObjectLayer ObjectAPI func() ObjectLayer
} }
// indexHandler - Handler to serve index.html // indexHandler - Handler to serve index.html

View File

@ -28,15 +28,15 @@ import (
func (xl xlObjects) MakeBucket(bucket string) error { func (xl xlObjects) MakeBucket(bucket string) error {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return BucketNameInvalid{Bucket: bucket} return traceError(BucketNameInvalid{Bucket: bucket})
}
// Verify if bucket is found.
if xl.isBucketExist(bucket) {
return toObjectErr(errVolumeExists, bucket)
} }
nsMutex.Lock(bucket, "") // generates random string on setting MINIO_DEBUG=lock, else returns empty string.
defer nsMutex.Unlock(bucket, "") // used for instrumentation on locks.
opsID := getOpsID()
nsMutex.Lock(bucket, "", opsID)
defer nsMutex.Unlock(bucket, "", opsID)
// Initialize sync waitgroup. // Initialize sync waitgroup.
var wg = &sync.WaitGroup{} var wg = &sync.WaitGroup{}
@ -47,7 +47,7 @@ func (xl xlObjects) MakeBucket(bucket string) error {
// Make a volume entry on all underlying storage disks. // Make a volume entry on all underlying storage disks.
for index, disk := range xl.storageDisks { for index, disk := range xl.storageDisks {
if disk == nil { if disk == nil {
dErrs[index] = errDiskNotFound dErrs[index] = traceError(errDiskNotFound)
continue continue
} }
wg.Add(1) wg.Add(1)
@ -56,7 +56,7 @@ func (xl xlObjects) MakeBucket(bucket string) error {
defer wg.Done() defer wg.Done()
err := disk.MakeVol(bucket) err := disk.MakeVol(bucket)
if err != nil { if err != nil {
dErrs[index] = err dErrs[index] = traceError(err)
} }
}(index, disk) }(index, disk)
} }
@ -68,7 +68,7 @@ func (xl xlObjects) MakeBucket(bucket string) error {
if !isDiskQuorum(dErrs, xl.writeQuorum) { if !isDiskQuorum(dErrs, xl.writeQuorum) {
// Purge successfully created buckets if we don't have writeQuorum. // Purge successfully created buckets if we don't have writeQuorum.
xl.undoMakeBucket(bucket) xl.undoMakeBucket(bucket)
return toObjectErr(errXLWriteQuorum, bucket) return toObjectErr(traceError(errXLWriteQuorum), bucket)
} }
// Verify we have any other errors which should undo make bucket. // Verify we have any other errors which should undo make bucket.
@ -146,6 +146,7 @@ func (xl xlObjects) getBucketInfo(bucketName string) (bucketInfo BucketInfo, err
} }
return bucketInfo, nil return bucketInfo, nil
} }
err = traceError(err)
// For any reason disk went offline continue and pick the next one. // For any reason disk went offline continue and pick the next one.
if isErrIgnored(err, bucketMetadataOpIgnoredErrs) { if isErrIgnored(err, bucketMetadataOpIgnoredErrs) {
continue continue
@ -157,16 +158,12 @@ func (xl xlObjects) getBucketInfo(bucketName string) (bucketInfo BucketInfo, err
// Checks whether bucket exists. // Checks whether bucket exists.
func (xl xlObjects) isBucketExist(bucket string) bool { func (xl xlObjects) isBucketExist(bucket string) bool {
nsMutex.RLock(bucket, "")
defer nsMutex.RUnlock(bucket, "")
// Check whether bucket exists. // Check whether bucket exists.
_, err := xl.getBucketInfo(bucket) _, err := xl.getBucketInfo(bucket)
if err != nil { if err != nil {
if err == errVolumeNotFound { if err == errVolumeNotFound {
return false return false
} }
errorIf(err, "Stat failed on bucket "+bucket+".")
return false return false
} }
return true return true
@ -178,8 +175,12 @@ func (xl xlObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return BucketInfo{}, BucketNameInvalid{Bucket: bucket} return BucketInfo{}, BucketNameInvalid{Bucket: bucket}
} }
nsMutex.RLock(bucket, "") // generates random string on setting MINIO_DEBUG=lock, else returns empty string.
defer nsMutex.RUnlock(bucket, "") // used for instrumentation on locks.
opsID := getOpsID()
nsMutex.RLock(bucket, "", opsID)
defer nsMutex.RUnlock(bucket, "", opsID)
bucketInfo, err := xl.getBucketInfo(bucket) bucketInfo, err := xl.getBucketInfo(bucket)
if err != nil { if err != nil {
return BucketInfo{}, toObjectErr(err, bucket) return BucketInfo{}, toObjectErr(err, bucket)
@ -249,13 +250,13 @@ func (xl xlObjects) DeleteBucket(bucket string) error {
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return BucketNameInvalid{Bucket: bucket} return BucketNameInvalid{Bucket: bucket}
} }
// Verify if bucket is found.
if !xl.isBucketExist(bucket) {
return BucketNotFound{Bucket: bucket}
}
nsMutex.Lock(bucket, "") // generates random string on setting MINIO_DEBUG=lock, else returns empty string.
defer nsMutex.Unlock(bucket, "") // used for instrumentation on locks.
opsID := getOpsID()
nsMutex.Lock(bucket, "", opsID)
defer nsMutex.Unlock(bucket, "", opsID)
// Collect if all disks report volume not found. // Collect if all disks report volume not found.
var wg = &sync.WaitGroup{} var wg = &sync.WaitGroup{}
@ -264,7 +265,7 @@ func (xl xlObjects) DeleteBucket(bucket string) error {
// Remove a volume entry on all underlying storage disks. // Remove a volume entry on all underlying storage disks.
for index, disk := range xl.storageDisks { for index, disk := range xl.storageDisks {
if disk == nil { if disk == nil {
dErrs[index] = errDiskNotFound dErrs[index] = traceError(errDiskNotFound)
continue continue
} }
wg.Add(1) wg.Add(1)
@ -274,12 +275,15 @@ func (xl xlObjects) DeleteBucket(bucket string) error {
// Attempt to delete bucket. // Attempt to delete bucket.
err := disk.DeleteVol(bucket) err := disk.DeleteVol(bucket)
if err != nil { if err != nil {
dErrs[index] = err dErrs[index] = traceError(err)
return return
} }
// Cleanup all the previously incomplete multiparts. // Cleanup all the previously incomplete multiparts.
err = cleanupDir(disk, path.Join(minioMetaBucket, mpartMetaPrefix), bucket) err = cleanupDir(disk, path.Join(minioMetaBucket, mpartMetaPrefix), bucket)
if err != nil && err != errVolumeNotFound { if err != nil {
if errorCause(err) == errVolumeNotFound {
return
}
dErrs[index] = err dErrs[index] = err
} }
}(index, disk) }(index, disk)
@ -290,7 +294,7 @@ func (xl xlObjects) DeleteBucket(bucket string) error {
if !isDiskQuorum(dErrs, xl.writeQuorum) { if !isDiskQuorum(dErrs, xl.writeQuorum) {
xl.undoDeleteBucket(bucket) xl.undoDeleteBucket(bucket)
return toObjectErr(errXLWriteQuorum, bucket) return toObjectErr(traceError(errXLWriteQuorum), bucket)
} }
if reducedErr := reduceErrs(dErrs, []error{ if reducedErr := reduceErrs(dErrs, []error{
@ -300,5 +304,7 @@ func (xl xlObjects) DeleteBucket(bucket string) error {
}); reducedErr != nil { }); reducedErr != nil {
return toObjectErr(reducedErr, bucket) return toObjectErr(reducedErr, bucket)
} }
// Success.
return nil return nil
} }

View File

@ -27,6 +27,9 @@ var errXLMinDisks = errors.New("Minimum '4' disks are required to enable erasure
// errXLNumDisks - returned for odd number of disks. // errXLNumDisks - returned for odd number of disks.
var errXLNumDisks = errors.New("Total number of disks should be multiples of '2'") var errXLNumDisks = errors.New("Total number of disks should be multiples of '2'")
// errXLDuplicateArguments - returned for duplicate disks.
var errXLDuplicateArguments = errors.New("Duplicate disks found.")
// errXLReadQuorum - did not meet read quorum. // errXLReadQuorum - did not meet read quorum.
var errXLReadQuorum = errors.New("Read failed. Insufficient number of disks online") var errXLReadQuorum = errors.New("Read failed. Insufficient number of disks online")

View File

@ -134,11 +134,11 @@ func xlLatestMetadata(partsMetadata []xlMetaV1, errs []error) (latestMeta xlMeta
func xlShouldHeal(partsMetadata []xlMetaV1, errs []error) bool { func xlShouldHeal(partsMetadata []xlMetaV1, errs []error) bool {
modTime := commonTime(listObjectModtimes(partsMetadata, errs)) modTime := commonTime(listObjectModtimes(partsMetadata, errs))
for index := range partsMetadata { for index := range partsMetadata {
if errs[index] == errFileNotFound { if errs[index] == errDiskNotFound {
return true continue
} }
if errs[index] != nil { if errs[index] != nil {
continue return true
} }
if modTime != partsMetadata[index].Stat.ModTime { if modTime != partsMetadata[index].Stat.ModTime {
return true return true

View File

@ -137,12 +137,23 @@ func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, ma
result.Prefixes = append(result.Prefixes, objInfo.Name) result.Prefixes = append(result.Prefixes, objInfo.Name)
continue continue
} }
result.Objects = append(result.Objects, ObjectInfo{
Name: objInfo.Name, // generates random string on setting MINIO_DEBUG=lock, else returns empty string.
ModTime: objInfo.ModTime, // used for instrumentation on locks.
Size: objInfo.Size, opsID := getOpsID()
IsDir: false,
}) // Check if the current object needs healing
nsMutex.RLock(bucket, objInfo.Name, opsID)
partsMetadata, errs := readAllXLMetadata(xl.storageDisks, bucket, objInfo.Name)
if xlShouldHeal(partsMetadata, errs) {
result.Objects = append(result.Objects, ObjectInfo{
Name: objInfo.Name,
ModTime: objInfo.ModTime,
Size: objInfo.Size,
IsDir: false,
})
}
nsMutex.RUnlock(bucket, objInfo.Name, opsID)
} }
return result, nil return result, nil
} }
@ -151,28 +162,28 @@ func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, ma
func (xl xlObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { func (xl xlObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return ListObjectsInfo{}, BucketNameInvalid{Bucket: bucket} return ListObjectsInfo{}, traceError(BucketNameInvalid{Bucket: bucket})
} }
// Verify if bucket exists. // Verify if bucket exists.
if !xl.isBucketExist(bucket) { if !xl.isBucketExist(bucket) {
return ListObjectsInfo{}, BucketNotFound{Bucket: bucket} return ListObjectsInfo{}, traceError(BucketNotFound{Bucket: bucket})
} }
if !IsValidObjectPrefix(prefix) { if !IsValidObjectPrefix(prefix) {
return ListObjectsInfo{}, ObjectNameInvalid{Bucket: bucket, Object: prefix} return ListObjectsInfo{}, traceError(ObjectNameInvalid{Bucket: bucket, Object: prefix})
} }
// Verify if delimiter is anything other than '/', which we do not support. // Verify if delimiter is anything other than '/', which we do not support.
if delimiter != "" && delimiter != slashSeparator { if delimiter != "" && delimiter != slashSeparator {
return ListObjectsInfo{}, UnsupportedDelimiter{ return ListObjectsInfo{}, traceError(UnsupportedDelimiter{
Delimiter: delimiter, Delimiter: delimiter,
} })
} }
// Verify if marker has prefix. // Verify if marker has prefix.
if marker != "" { if marker != "" {
if !strings.HasPrefix(marker, prefix) { if !strings.HasPrefix(marker, prefix) {
return ListObjectsInfo{}, InvalidMarkerPrefixCombination{ return ListObjectsInfo{}, traceError(InvalidMarkerPrefixCombination{
Marker: marker, Marker: marker,
Prefix: prefix, Prefix: prefix,
} })
} }
} }

View File

@ -48,7 +48,7 @@ func (xl xlObjects) listObjects(bucket, prefix, marker, delimiter string, maxKey
// For any walk error return right away. // For any walk error return right away.
if walkResult.err != nil { if walkResult.err != nil {
// File not found is a valid case. // File not found is a valid case.
if walkResult.err == errFileNotFound { if errorCause(walkResult.err) == errFileNotFound {
return ListObjectsInfo{}, nil return ListObjectsInfo{}, nil
} }
return ListObjectsInfo{}, toObjectErr(walkResult.err, bucket, prefix) return ListObjectsInfo{}, toObjectErr(walkResult.err, bucket, prefix)
@ -66,8 +66,7 @@ func (xl xlObjects) listObjects(bucket, prefix, marker, delimiter string, maxKey
objInfo, err = xl.getObjectInfo(bucket, entry) objInfo, err = xl.getObjectInfo(bucket, entry)
if err != nil { if err != nil {
// Ignore errFileNotFound // Ignore errFileNotFound
if err == errFileNotFound { if errorCause(err) == errFileNotFound {
errorIf(err, "Unable to get object info", bucket, entry)
continue continue
} }
return ListObjectsInfo{}, toObjectErr(err, bucket, prefix) return ListObjectsInfo{}, toObjectErr(err, bucket, prefix)
@ -109,28 +108,28 @@ func (xl xlObjects) listObjects(bucket, prefix, marker, delimiter string, maxKey
func (xl xlObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { func (xl xlObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return ListObjectsInfo{}, BucketNameInvalid{Bucket: bucket} return ListObjectsInfo{}, traceError(BucketNameInvalid{Bucket: bucket})
} }
// Verify if bucket exists. // Verify if bucket exists.
if !xl.isBucketExist(bucket) { if !xl.isBucketExist(bucket) {
return ListObjectsInfo{}, BucketNotFound{Bucket: bucket} return ListObjectsInfo{}, traceError(BucketNotFound{Bucket: bucket})
} }
if !IsValidObjectPrefix(prefix) { if !IsValidObjectPrefix(prefix) {
return ListObjectsInfo{}, ObjectNameInvalid{Bucket: bucket, Object: prefix} return ListObjectsInfo{}, traceError(ObjectNameInvalid{Bucket: bucket, Object: prefix})
} }
// Verify if delimiter is anything other than '/', which we do not support. // Verify if delimiter is anything other than '/', which we do not support.
if delimiter != "" && delimiter != slashSeparator { if delimiter != "" && delimiter != slashSeparator {
return ListObjectsInfo{}, UnsupportedDelimiter{ return ListObjectsInfo{}, traceError(UnsupportedDelimiter{
Delimiter: delimiter, Delimiter: delimiter,
} })
} }
// Verify if marker has prefix. // Verify if marker has prefix.
if marker != "" { if marker != "" {
if !strings.HasPrefix(marker, prefix) { if !strings.HasPrefix(marker, prefix) {
return ListObjectsInfo{}, InvalidMarkerPrefixCombination{ return ListObjectsInfo{}, traceError(InvalidMarkerPrefixCombination{
Marker: marker, Marker: marker,
Prefix: prefix, Prefix: prefix,
} })
} }
} }

Some files were not shown because too many files have changed in this diff Show More