mirror of https://github.com/minio/minio.git
server: Add more elaborate startup messages. (#2731)
These messages based on our prep stage during XL and prints more informative message regarding drive information. This change also does a much needed refactoring.
This commit is contained in:
parent
63a7ca1af0
commit
6494b77d41
|
@ -645,7 +645,3 @@ func getAPIErrorResponse(err APIError, resource string) APIErrorResponse {
|
||||||
|
|
||||||
return data
|
return data
|
||||||
}
|
}
|
||||||
|
|
||||||
func getErrMalformedCredentialDate(malformedDateStr string) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
|
@ -24,7 +24,12 @@ type objectAPIHandlers struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// registerAPIRouter - registers S3 compatible APIs.
|
// registerAPIRouter - registers S3 compatible APIs.
|
||||||
func registerAPIRouter(mux *router.Router, api objectAPIHandlers) {
|
func registerAPIRouter(mux *router.Router) {
|
||||||
|
// Initialize API.
|
||||||
|
api := objectAPIHandlers{
|
||||||
|
ObjectAPI: newObjectLayerFn,
|
||||||
|
}
|
||||||
|
|
||||||
// API Router
|
// API Router
|
||||||
apiRouter := mux.NewRoute().PathPrefix("/").Subrouter()
|
apiRouter := mux.NewRoute().PathPrefix("/").Subrouter()
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,7 @@ func prepareBenchmarkBackend(instanceType string) (ObjectLayer, []string, error)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
obj, err := makeTestBackend(disks, instanceType)
|
obj, _, err := initObjectLayer(disks, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"sync"
|
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -157,19 +156,6 @@ func TestSendBucketNotification(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func initMockEventNotifier(objAPI ObjectLayer) error {
|
|
||||||
if objAPI == nil {
|
|
||||||
return errInvalidArgument
|
|
||||||
}
|
|
||||||
globalEventNotifier = &eventNotifier{
|
|
||||||
rwMutex: &sync.RWMutex{},
|
|
||||||
queueTargets: nil,
|
|
||||||
notificationConfigs: make(map[string]*notificationConfig),
|
|
||||||
snsTargets: make(map[string][]chan []NotificationEvent),
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func testGetBucketNotificationHandler(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
func testGetBucketNotificationHandler(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
// get random bucket name.
|
// get random bucket name.
|
||||||
randBucket := getRandomBucketName()
|
randBucket := getRandomBucketName()
|
||||||
|
@ -196,8 +182,8 @@ func testGetBucketNotificationHandler(obj ObjectLayer, instanceType string, t Te
|
||||||
|
|
||||||
// Register the API end points with XL/FS object layer.
|
// Register the API end points with XL/FS object layer.
|
||||||
apiRouter := initTestAPIEndPoints(obj, []string{
|
apiRouter := initTestAPIEndPoints(obj, []string{
|
||||||
"GetBucketNotificationHandler",
|
"GetBucketNotification",
|
||||||
"PutBucketNotificationHandler",
|
"PutBucketNotification",
|
||||||
})
|
})
|
||||||
|
|
||||||
// initialize the server and obtain the credentials and root.
|
// initialize the server and obtain the credentials and root.
|
||||||
|
@ -212,7 +198,7 @@ func testGetBucketNotificationHandler(obj ObjectLayer, instanceType string, t Te
|
||||||
credentials := serverConfig.GetCredential()
|
credentials := serverConfig.GetCredential()
|
||||||
|
|
||||||
//Initialize global event notifier with mock queue targets.
|
//Initialize global event notifier with mock queue targets.
|
||||||
err = initMockEventNotifier(obj)
|
err = initEventNotifier(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %s: Failed to initialize mock event notifier %v",
|
t.Fatalf("Test %s: Failed to initialize mock event notifier %v",
|
||||||
instanceType, err)
|
instanceType, err)
|
||||||
|
@ -304,8 +290,8 @@ func testGetBucketNotificationHandler(obj ObjectLayer, instanceType string, t Te
|
||||||
|
|
||||||
// Nil Object layer
|
// Nil Object layer
|
||||||
nilAPIRouter := initTestAPIEndPoints(nil, []string{
|
nilAPIRouter := initTestAPIEndPoints(nil, []string{
|
||||||
"GetBucketNotificationHandler",
|
"GetBucketNotification",
|
||||||
"PutBucketNotificationHandler",
|
"PutBucketNotification",
|
||||||
})
|
})
|
||||||
testRec := httptest.NewRecorder()
|
testRec := httptest.NewRecorder()
|
||||||
testReq, tErr := newTestSignedRequestV4("GET", getGetBucketNotificationURL("", randBucket),
|
testReq, tErr := newTestSignedRequestV4("GET", getGetBucketNotificationURL("", randBucket),
|
||||||
|
@ -344,8 +330,8 @@ func testPutBucketNotificationHandler(obj ObjectLayer, instanceType string, t Te
|
||||||
|
|
||||||
// Register the API end points with XL/FS object layer.
|
// Register the API end points with XL/FS object layer.
|
||||||
apiRouter := initTestAPIEndPoints(obj, []string{
|
apiRouter := initTestAPIEndPoints(obj, []string{
|
||||||
"GetBucketNotificationHandler",
|
"GetBucketNotification",
|
||||||
"PutBucketNotificationHandler",
|
"PutBucketNotification",
|
||||||
})
|
})
|
||||||
|
|
||||||
// initialize the server and obtain the credentials and root.
|
// initialize the server and obtain the credentials and root.
|
||||||
|
@ -360,7 +346,7 @@ func testPutBucketNotificationHandler(obj ObjectLayer, instanceType string, t Te
|
||||||
credentials := serverConfig.GetCredential()
|
credentials := serverConfig.GetCredential()
|
||||||
|
|
||||||
//Initialize global event notifier with mock queue targets.
|
//Initialize global event notifier with mock queue targets.
|
||||||
err = initMockEventNotifier(obj)
|
err = initEventNotifier(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %s: Failed to initialize mock event notifier %v",
|
t.Fatalf("Test %s: Failed to initialize mock event notifier %v",
|
||||||
instanceType, err)
|
instanceType, err)
|
||||||
|
@ -460,8 +446,8 @@ func testPutBucketNotificationHandler(obj ObjectLayer, instanceType string, t Te
|
||||||
|
|
||||||
// Nil Object layer
|
// Nil Object layer
|
||||||
nilAPIRouter := initTestAPIEndPoints(nil, []string{
|
nilAPIRouter := initTestAPIEndPoints(nil, []string{
|
||||||
"GetBucketNotificationHandler",
|
"GetBucketNotification",
|
||||||
"PutBucketNotificationHandler",
|
"PutBucketNotification",
|
||||||
})
|
})
|
||||||
testRec := httptest.NewRecorder()
|
testRec := httptest.NewRecorder()
|
||||||
testReq, tErr := newTestSignedRequestV4("PUT", getPutBucketNotificationURL("", randBucket),
|
testReq, tErr := newTestSignedRequestV4("PUT", getPutBucketNotificationURL("", randBucket),
|
||||||
|
@ -503,8 +489,8 @@ func testListenBucketNotificationHandler(obj ObjectLayer, instanceType string, t
|
||||||
|
|
||||||
// Register the API end points with XL/FS object layer.
|
// Register the API end points with XL/FS object layer.
|
||||||
apiRouter := initTestAPIEndPoints(obj, []string{
|
apiRouter := initTestAPIEndPoints(obj, []string{
|
||||||
"PutBucketNotificationHandler",
|
"PutBucketNotification",
|
||||||
"ListenBucketNotificationHandler",
|
"ListenBucketNotification",
|
||||||
"PutObject",
|
"PutObject",
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -519,8 +505,8 @@ func testListenBucketNotificationHandler(obj ObjectLayer, instanceType string, t
|
||||||
|
|
||||||
credentials := serverConfig.GetCredential()
|
credentials := serverConfig.GetCredential()
|
||||||
|
|
||||||
//Initialize global event notifier with mock queue targets.
|
// Initialize global event notifier with mock queue targets.
|
||||||
err = initMockEventNotifier(obj)
|
err = initEventNotifier(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %s: Failed to initialize mock event notifier %v",
|
t.Fatalf("Test %s: Failed to initialize mock event notifier %v",
|
||||||
instanceType, err)
|
instanceType, err)
|
||||||
|
@ -556,8 +542,8 @@ func testListenBucketNotificationHandler(obj ObjectLayer, instanceType string, t
|
||||||
// FIXME: Need to find a way to run valid listen bucket notification test case without blocking the unit test.
|
// FIXME: Need to find a way to run valid listen bucket notification test case without blocking the unit test.
|
||||||
{randBucket, "", "", invalidEvents, CheckStatus, signatureMismatchError.HTTPStatusCode, ""},
|
{randBucket, "", "", invalidEvents, CheckStatus, signatureMismatchError.HTTPStatusCode, ""},
|
||||||
{randBucket, tooBigPrefix, "", validEvents, CheckStatus, http.StatusBadRequest, ""},
|
{randBucket, tooBigPrefix, "", validEvents, CheckStatus, http.StatusBadRequest, ""},
|
||||||
{invalidBucket, "", "", nil, CheckStatus, http.StatusBadRequest, ""},
|
{invalidBucket, "", "", validEvents, CheckStatus, http.StatusBadRequest, ""},
|
||||||
{randBucket, "", "", nil, InvalidAuth, signatureMismatchError.HTTPStatusCode, signatureMismatchError.Code},
|
{randBucket, "", "", validEvents, InvalidAuth, signatureMismatchError.HTTPStatusCode, signatureMismatchError.Code},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, test := range testCases {
|
for i, test := range testCases {
|
||||||
|
@ -607,8 +593,8 @@ func testListenBucketNotificationHandler(obj ObjectLayer, instanceType string, t
|
||||||
|
|
||||||
// Nil Object layer
|
// Nil Object layer
|
||||||
nilAPIRouter := initTestAPIEndPoints(nil, []string{
|
nilAPIRouter := initTestAPIEndPoints(nil, []string{
|
||||||
"PutBucketNotificationHandler",
|
"PutBucketNotification",
|
||||||
"ListenBucketNotificationHandler",
|
"ListenBucketNotification",
|
||||||
})
|
})
|
||||||
testRec = httptest.NewRecorder()
|
testRec = httptest.NewRecorder()
|
||||||
testReq, tErr = newTestSignedRequestV4("GET",
|
testReq, tErr = newTestSignedRequestV4("GET",
|
||||||
|
@ -647,8 +633,8 @@ func testRemoveNotificationConfig(obj ObjectLayer, instanceType string, t TestEr
|
||||||
|
|
||||||
// Register the API end points with XL/FS object layer.
|
// Register the API end points with XL/FS object layer.
|
||||||
apiRouter := initTestAPIEndPoints(obj, []string{
|
apiRouter := initTestAPIEndPoints(obj, []string{
|
||||||
"PutBucketNotificationHandler",
|
"PutBucketNotification",
|
||||||
"ListenBucketNotificationHandler",
|
"ListenBucketNotification",
|
||||||
})
|
})
|
||||||
|
|
||||||
// initialize the server and obtain the credentials and root.
|
// initialize the server and obtain the credentials and root.
|
||||||
|
@ -663,7 +649,7 @@ func testRemoveNotificationConfig(obj ObjectLayer, instanceType string, t TestEr
|
||||||
credentials := serverConfig.GetCredential()
|
credentials := serverConfig.GetCredential()
|
||||||
|
|
||||||
//Initialize global event notifier with mock queue targets.
|
//Initialize global event notifier with mock queue targets.
|
||||||
err = initMockEventNotifier(obj)
|
err = initEventNotifier(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %s: Failed to initialize mock event notifier %v",
|
t.Fatalf("Test %s: Failed to initialize mock event notifier %v",
|
||||||
instanceType, err)
|
instanceType, err)
|
||||||
|
|
|
@ -136,27 +136,6 @@ func isMinioSNS(topicARN arnTopic) bool {
|
||||||
return strings.HasSuffix(topicARN.Type, snsTypeMinio)
|
return strings.HasSuffix(topicARN.Type, snsTypeMinio)
|
||||||
}
|
}
|
||||||
|
|
||||||
// isMinioSNSConfigured - verifies if one topic ARN is valid and is enabled.
|
|
||||||
func isMinioSNSConfigured(topicARN string, topicConfigs []topicConfig) bool {
|
|
||||||
for _, topicConfig := range topicConfigs {
|
|
||||||
// Validate if topic ARN is already enabled.
|
|
||||||
if topicARN == topicConfig.TopicARN {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate if we recognize the queue type.
|
|
||||||
func isValidQueue(sqsARN arnSQS) bool {
|
|
||||||
amqpQ := isAMQPQueue(sqsARN) // Is amqp queue?
|
|
||||||
natsQ := isNATSQueue(sqsARN) // Is nats queue?
|
|
||||||
elasticQ := isElasticQueue(sqsARN) // Is elastic queue?
|
|
||||||
redisQ := isRedisQueue(sqsARN) // Is redis queue?
|
|
||||||
postgresQ := isPostgreSQLQueue(sqsARN) // Is postgres queue?
|
|
||||||
return amqpQ || natsQ || elasticQ || redisQ || postgresQ
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate if we recognize the topic type.
|
// Validate if we recognize the topic type.
|
||||||
func isValidTopic(topicARN arnTopic) bool {
|
func isValidTopic(topicARN arnTopic) bool {
|
||||||
return isMinioSNS(topicARN) // Is minio topic?.
|
return isMinioSNS(topicARN) // Is minio topic?.
|
||||||
|
|
|
@ -83,15 +83,6 @@ func isConfigFileExists() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// mustGetConfigFile must get server config file.
|
|
||||||
func mustGetConfigFile() string {
|
|
||||||
configFile, err := getConfigFile()
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return configFile
|
|
||||||
}
|
|
||||||
|
|
||||||
// getConfigFile get server config file.
|
// getConfigFile get server config file.
|
||||||
func getConfigFile() (string, error) {
|
func getConfigFile() (string, error) {
|
||||||
configPath, err := getConfigPath()
|
configPath, err := getConfigPath()
|
||||||
|
|
|
@ -59,14 +59,6 @@ func TestControlLockMain(t *testing.T) {
|
||||||
// schedule cleanup at the end
|
// schedule cleanup at the end
|
||||||
defer testServer.Stop()
|
defer testServer.Stop()
|
||||||
|
|
||||||
// initializing the locks.
|
|
||||||
initNSLock(false)
|
|
||||||
// set debug lock info to `nil` so that other tests do not see
|
|
||||||
// such modified env settings.
|
|
||||||
defer func() {
|
|
||||||
nsMutex.debugLockMap = nil
|
|
||||||
}()
|
|
||||||
|
|
||||||
// fetch http server endpoint
|
// fetch http server endpoint
|
||||||
url := testServer.Server.URL
|
url := testServer.Server.URL
|
||||||
|
|
||||||
|
@ -95,29 +87,13 @@ func TestControlShutdownMain(t *testing.T) {
|
||||||
// fetch http server endpoint
|
// fetch http server endpoint
|
||||||
url := testServer.Server.URL
|
url := testServer.Server.URL
|
||||||
|
|
||||||
// create a dummy exit function
|
|
||||||
testExitFn := func(exitCode int) {
|
|
||||||
if exitCode != int(exitSuccess) {
|
|
||||||
t.Errorf("Control-Shutdown-Main test failed - server exited with non-success error code - %d",
|
|
||||||
exitCode)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// initialize the shutdown signal listener
|
|
||||||
err := initGracefulShutdown(testExitFn)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Control-Shutdown-Main test failed in initGracefulShutdown() - %s",
|
|
||||||
err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// create args to call
|
// create args to call
|
||||||
args := []string{"./minio", "control", "shutdown", url}
|
args := []string{"./minio", "control", "shutdown", url}
|
||||||
|
|
||||||
// run app
|
// run app
|
||||||
err = app.Run(args)
|
err := app.Run(args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Control-Shutdown-Main test failed with - %s",
|
t.Errorf("Control-Shutdown-Main test failed with - %s", err)
|
||||||
err.Error())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -134,7 +110,6 @@ func TestControlMain(t *testing.T) {
|
||||||
// run app
|
// run app
|
||||||
err := app.Run(args)
|
err := app.Run(args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Control-Main test failed with - %s",
|
t.Errorf("Control-Main test failed with - %s", err)
|
||||||
err.Error())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -113,14 +113,10 @@ func (c *controllerAPIHandlers) HealObjectHandler(args *HealObjectArgs, reply *G
|
||||||
|
|
||||||
// HealObject - heal the object.
|
// HealObject - heal the object.
|
||||||
func (c *controllerAPIHandlers) HealDiskMetadataHandler(args *GenericArgs, reply *GenericReply) error {
|
func (c *controllerAPIHandlers) HealDiskMetadataHandler(args *GenericArgs, reply *GenericReply) error {
|
||||||
objAPI := c.ObjectAPI()
|
|
||||||
if objAPI == nil {
|
|
||||||
return errServerNotInitialized
|
|
||||||
}
|
|
||||||
if !isRPCTokenValid(args.Token) {
|
if !isRPCTokenValid(args.Token) {
|
||||||
return errInvalidToken
|
return errInvalidToken
|
||||||
}
|
}
|
||||||
err := objAPI.HealDiskMetadata()
|
err := repairDiskMetadata(c.StorageDisks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -153,15 +149,6 @@ func (c *controllerAPIHandlers) ShutdownHandler(args *ShutdownArgs, reply *Gener
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *controllerAPIHandlers) TryInitHandler(args *GenericArgs, reply *GenericReply) error {
|
|
||||||
go func() {
|
|
||||||
globalWakeupCh <- struct{}{}
|
|
||||||
}()
|
|
||||||
*reply = GenericReply{}
|
|
||||||
return nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// LockInfo - RPC control handler for `minio control lock`.
|
// LockInfo - RPC control handler for `minio control lock`.
|
||||||
// Returns the info of the locks held in the system.
|
// Returns the info of the locks held in the system.
|
||||||
func (c *controllerAPIHandlers) LockInfo(arg *GenericArgs, reply *SystemLockState) error {
|
func (c *controllerAPIHandlers) LockInfo(arg *GenericArgs, reply *SystemLockState) error {
|
||||||
|
|
|
@ -28,7 +28,13 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register controller RPC handlers.
|
// Register controller RPC handlers.
|
||||||
func registerControllerRPCRouter(mux *router.Router, ctrlHandlers *controllerAPIHandlers) {
|
func registerControllerRPCRouter(mux *router.Router, srvCmdConfig serverCmdConfig) {
|
||||||
|
// Initialize Controller.
|
||||||
|
ctrlHandlers := &controllerAPIHandlers{
|
||||||
|
ObjectAPI: newObjectLayerFn,
|
||||||
|
StorageDisks: srvCmdConfig.storageDisks,
|
||||||
|
}
|
||||||
|
|
||||||
ctrlRPCServer := rpc.NewServer()
|
ctrlRPCServer := rpc.NewServer()
|
||||||
ctrlRPCServer.RegisterName("Controller", ctrlHandlers)
|
ctrlRPCServer.RegisterName("Controller", ctrlHandlers)
|
||||||
|
|
||||||
|
@ -38,5 +44,6 @@ func registerControllerRPCRouter(mux *router.Router, ctrlHandlers *controllerAPI
|
||||||
|
|
||||||
// Handler for object healing.
|
// Handler for object healing.
|
||||||
type controllerAPIHandlers struct {
|
type controllerAPIHandlers struct {
|
||||||
ObjectAPI func() ObjectLayer
|
ObjectAPI func() ObjectLayer
|
||||||
|
StorageDisks []StorageAPI
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,14 +66,6 @@ func TestRPCControlLock(t *testing.T) {
|
||||||
|
|
||||||
// Tests to validate the correctness of lock instrumentation control RPC end point.
|
// Tests to validate the correctness of lock instrumentation control RPC end point.
|
||||||
func (s *TestRPCControllerSuite) testRPCControlLock(c *testing.T) {
|
func (s *TestRPCControllerSuite) testRPCControlLock(c *testing.T) {
|
||||||
// initializing the locks.
|
|
||||||
initNSLock(false)
|
|
||||||
// set debug lock info to `nil` so that the next tests have to
|
|
||||||
// initialize them again.
|
|
||||||
defer func() {
|
|
||||||
nsMutex.debugLockMap = nil
|
|
||||||
}()
|
|
||||||
|
|
||||||
expectedResult := []lockStateCase{
|
expectedResult := []lockStateCase{
|
||||||
// Test case - 1.
|
// Test case - 1.
|
||||||
// Case where 10 read locks are held.
|
// Case where 10 read locks are held.
|
||||||
|
@ -297,10 +289,8 @@ func (s *TestRPCControllerSuite) testControllerHealDiskMetadataH(c *testing.T) {
|
||||||
args := &GenericArgs{}
|
args := &GenericArgs{}
|
||||||
reply := &GenericReply{}
|
reply := &GenericReply{}
|
||||||
err := client.Call("Controller.HealDiskMetadataHandler", args, reply)
|
err := client.Call("Controller.HealDiskMetadataHandler", args, reply)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Errorf("Control.HealDiskMetadataH - test failed with <ERROR> %s",
|
c.Errorf("Control.HealDiskMetadataH - test failed with <ERROR> %s", err)
|
||||||
err.Error())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -320,20 +310,16 @@ func (s *TestRPCControllerSuite) testControllerHealObjectH(t *testing.T) {
|
||||||
client := newAuthClient(s.testAuthConf)
|
client := newAuthClient(s.testAuthConf)
|
||||||
defer client.Close()
|
defer client.Close()
|
||||||
|
|
||||||
err := s.testServer.Obj.MakeBucket("testbucket")
|
err := newObjectLayerFn().MakeBucket("testbucket")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"Controller.HealObjectH - create bucket failed with <ERROR> %s",
|
"Controller.HealObjectH - create bucket failed with <ERROR> %s", err)
|
||||||
err.Error(),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
datum := strings.NewReader("a")
|
datum := strings.NewReader("a")
|
||||||
_, err = s.testServer.Obj.PutObject("testbucket", "testobject", 1,
|
_, err = newObjectLayerFn().PutObject("testbucket", "testobject", 1, datum, nil, "")
|
||||||
datum, nil, "")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Controller.HealObjectH - put object failed with <ERROR> %s",
|
t.Fatalf("Controller.HealObjectH - put object failed with <ERROR> %s", err)
|
||||||
err.Error())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
args := &HealObjectArgs{GenericArgs{}, "testbucket", "testobject"}
|
args := &HealObjectArgs{GenericArgs{}, "testbucket", "testobject"}
|
||||||
|
@ -341,8 +327,7 @@ func (s *TestRPCControllerSuite) testControllerHealObjectH(t *testing.T) {
|
||||||
err = client.Call("Controller.HealObjectHandler", args, reply)
|
err = client.Call("Controller.HealObjectHandler", args, reply)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Controller.HealObjectH - test failed with <ERROR> %s",
|
t.Errorf("Controller.HealObjectH - test failed with <ERROR> %s", err)
|
||||||
err.Error())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -363,20 +348,16 @@ func (s *TestRPCControllerSuite) testControllerListObjectsHealH(t *testing.T) {
|
||||||
defer client.Close()
|
defer client.Close()
|
||||||
|
|
||||||
// careate a bucket
|
// careate a bucket
|
||||||
err := s.testServer.Obj.MakeBucket("testbucket")
|
err := newObjectLayerFn().MakeBucket("testbucket")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"Controller.ListObjectsHealH - create bucket failed - %s",
|
"Controller.ListObjectsHealH - create bucket failed - %s", err)
|
||||||
err.Error(),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
r := strings.NewReader("0")
|
r := strings.NewReader("0")
|
||||||
_, err = s.testServer.Obj.PutObject(
|
_, err = newObjectLayerFn().PutObject("testbucket", "testObj-0", 1, r, nil, "")
|
||||||
"testbucket", "testObj-0", 1, r, nil, "")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Controller.ListObjectsHealH - object creation failed - %s",
|
t.Fatalf("Controller.ListObjectsHealH - object creation failed - %s", err)
|
||||||
err.Error())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
args := &HealListArgs{
|
args := &HealListArgs{
|
||||||
|
@ -387,7 +368,6 @@ func (s *TestRPCControllerSuite) testControllerListObjectsHealH(t *testing.T) {
|
||||||
err = client.Call("Controller.ListObjectsHealHandler", args, reply)
|
err = client.Call("Controller.ListObjectsHealHandler", args, reply)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Controller.ListObjectsHealHandler - test failed - %s",
|
t.Errorf("Controller.ListObjectsHealHandler - test failed - %s", err)
|
||||||
err.Error())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -222,8 +222,9 @@ func TestErasureReadUtils(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
objLayer, err := getXLObjectLayer(disks, nil)
|
objLayer, _, err := initObjectLayer(disks, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
removeRoots(disks)
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer removeRoots(disks)
|
defer removeRoots(disks)
|
||||||
|
|
|
@ -90,29 +90,26 @@ func testEventNotify(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
|
|
||||||
// Tests various forms of inititalization of event notifier.
|
// Tests various forms of inititalization of event notifier.
|
||||||
func TestInitEventNotifier(t *testing.T) {
|
func TestInitEventNotifier(t *testing.T) {
|
||||||
disk, err := getRandomDisks(1)
|
disks, err := getRandomDisks(1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unable to create directories for FS backend. ", err)
|
t.Fatal("Unable to create directories for FS backend. ", err)
|
||||||
}
|
}
|
||||||
fs, err := getSingleNodeObjectLayer(disk[0])
|
defer removeRoots(disks)
|
||||||
|
fs, _, err := initObjectLayer(disks, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unable to initialize FS backend.", err)
|
t.Fatal("Unable to initialize FS backend.", err)
|
||||||
}
|
}
|
||||||
nDisks := 16
|
nDisks := 16
|
||||||
disks, err := getRandomDisks(nDisks)
|
disks, err = getRandomDisks(nDisks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unable to create directories for XL backend. ", err)
|
t.Fatal("Unable to create directories for XL backend. ", err)
|
||||||
}
|
}
|
||||||
xl, err := getXLObjectLayer(disks, nil)
|
defer removeRoots(disks)
|
||||||
|
xl, _, err := initObjectLayer(disks, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unable to initialize XL backend.", err)
|
t.Fatal("Unable to initialize XL backend.", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
disks = append(disks, disk...)
|
|
||||||
for _, d := range disks {
|
|
||||||
defer removeAll(d)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collection of test cases for inititalizing event notifier.
|
// Collection of test cases for inititalizing event notifier.
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
objAPI ObjectLayer
|
objAPI ObjectLayer
|
||||||
|
@ -156,11 +153,11 @@ func TestInitEventNotifierFaultyDisks(t *testing.T) {
|
||||||
defer removeAll(rootPath)
|
defer removeAll(rootPath)
|
||||||
|
|
||||||
disk, err := getRandomDisks(1)
|
disk, err := getRandomDisks(1)
|
||||||
defer removeAll(disk[0])
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unable to create directories for FS backend. ", err)
|
t.Fatal("Unable to create directories for FS backend. ", err)
|
||||||
}
|
}
|
||||||
obj, err := getSingleNodeObjectLayer(disk[0])
|
defer removeAll(disk[0])
|
||||||
|
obj, _, err := initObjectLayer(disk, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unable to initialize FS backend.", err)
|
t.Fatal("Unable to initialize FS backend.", err)
|
||||||
}
|
}
|
||||||
|
@ -210,7 +207,7 @@ func TestInitEventNotifierWithAMQP(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unable to create directories for FS backend. ", err)
|
t.Fatal("Unable to create directories for FS backend. ", err)
|
||||||
}
|
}
|
||||||
fs, err := getSingleNodeObjectLayer(disk[0])
|
fs, _, err := initObjectLayer(disk, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unable to initialize FS backend.", err)
|
t.Fatal("Unable to initialize FS backend.", err)
|
||||||
}
|
}
|
||||||
|
@ -237,7 +234,7 @@ func TestInitEventNotifierWithElasticSearch(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unable to create directories for FS backend. ", err)
|
t.Fatal("Unable to create directories for FS backend. ", err)
|
||||||
}
|
}
|
||||||
fs, err := getSingleNodeObjectLayer(disk[0])
|
fs, _, err := initObjectLayer(disk, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unable to initialize FS backend.", err)
|
t.Fatal("Unable to initialize FS backend.", err)
|
||||||
}
|
}
|
||||||
|
@ -264,7 +261,7 @@ func TestInitEventNotifierWithRedis(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unable to create directories for FS backend. ", err)
|
t.Fatal("Unable to create directories for FS backend. ", err)
|
||||||
}
|
}
|
||||||
fs, err := getSingleNodeObjectLayer(disk[0])
|
fs, _, err := initObjectLayer(disk, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unable to initialize FS backend.", err)
|
t.Fatal("Unable to initialize FS backend.", err)
|
||||||
}
|
}
|
||||||
|
@ -295,7 +292,7 @@ func TestListenBucketNotification(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unable to create directories for FS backend. ", err)
|
t.Fatal("Unable to create directories for FS backend. ", err)
|
||||||
}
|
}
|
||||||
obj, err := getSingleNodeObjectLayer(disk[0])
|
obj, _, err := initObjectLayer(disk, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unable to initialize FS backend.", err)
|
t.Fatal("Unable to initialize FS backend.", err)
|
||||||
}
|
}
|
||||||
|
@ -325,11 +322,6 @@ func TestListenBucketNotification(t *testing.T) {
|
||||||
t.Fatal("Unexpected error:", err)
|
t.Fatal("Unexpected error:", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate if minio SNS is configured for an empty topic configs.
|
|
||||||
if isMinioSNSConfigured(listenARN, nil) {
|
|
||||||
t.Fatal("SNS listen shouldn't be configured.")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the config is loaded
|
// Check if the config is loaded
|
||||||
notificationCfg := globalEventNotifier.GetBucketNotificationConfig(bucketName)
|
notificationCfg := globalEventNotifier.GetBucketNotificationConfig(bucketName)
|
||||||
if notificationCfg == nil {
|
if notificationCfg == nil {
|
||||||
|
@ -339,8 +331,8 @@ func TestListenBucketNotification(t *testing.T) {
|
||||||
t.Fatal("Notification config is not correctly loaded. Exactly one topic and one queue config are expected")
|
t.Fatal("Notification config is not correctly loaded. Exactly one topic and one queue config are expected")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if listen notification config is enabled
|
// Check if topic ARN is enabled
|
||||||
if !isMinioSNSConfigured(listenARN, notificationCfg.TopicConfigs) {
|
if notificationCfg.TopicConfigs[0].TopicARN != listenARN {
|
||||||
t.Fatal("SNS listen is not configured.")
|
t.Fatal("SNS listen is not configured.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -149,6 +149,20 @@ func reduceFormatErrs(errs []error, diskCount int) (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// creates format.json, the FS format info in minioMetaBucket.
|
||||||
|
func initFormatFS(storageDisk StorageAPI) error {
|
||||||
|
// Initialize meta volume, if volume already exists ignores it.
|
||||||
|
if err := initMetaVolume([]StorageAPI{storageDisk}); err != nil {
|
||||||
|
return fmt.Errorf("Unable to initialize '.minio.sys' meta volume, %s", err)
|
||||||
|
}
|
||||||
|
return saveFSFormatData(storageDisk, newFSFormatV1())
|
||||||
|
}
|
||||||
|
|
||||||
|
// loads format.json from minioMetaBucket if it exists.
|
||||||
|
func loadFormatFS(storageDisk StorageAPI) (format *formatConfigV1, err error) {
|
||||||
|
return loadFormat(storageDisk)
|
||||||
|
}
|
||||||
|
|
||||||
// loadAllFormats - load all format config from all input disks in parallel.
|
// loadAllFormats - load all format config from all input disks in parallel.
|
||||||
func loadAllFormats(bootstrapDisks []StorageAPI) ([]*formatConfigV1, []error) {
|
func loadAllFormats(bootstrapDisks []StorageAPI) ([]*formatConfigV1, []error) {
|
||||||
// Initialize sync waitgroup.
|
// Initialize sync waitgroup.
|
||||||
|
@ -198,6 +212,17 @@ func loadAllFormats(bootstrapDisks []StorageAPI) ([]*formatConfigV1, []error) {
|
||||||
// if (jbod inconsistent) return error // phase2
|
// if (jbod inconsistent) return error // phase2
|
||||||
// if (disks not recognized) // Always error.
|
// if (disks not recognized) // Always error.
|
||||||
func genericFormatCheck(formatConfigs []*formatConfigV1, sErrs []error) (err error) {
|
func genericFormatCheck(formatConfigs []*formatConfigV1, sErrs []error) (err error) {
|
||||||
|
if len(formatConfigs) == 1 {
|
||||||
|
// Successfully read, validate further.
|
||||||
|
if sErrs[0] == nil {
|
||||||
|
if !isFSFormat(formatConfigs[0]) {
|
||||||
|
return errFSDiskFormat
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
} // Returns error here.
|
||||||
|
return sErrs[0]
|
||||||
|
}
|
||||||
|
|
||||||
// Calculate the errors.
|
// Calculate the errors.
|
||||||
var (
|
var (
|
||||||
errCorruptFormatCount = 0
|
errCorruptFormatCount = 0
|
||||||
|
@ -390,8 +415,7 @@ func loadFormat(disk StorageAPI) (format *formatConfigV1, err error) {
|
||||||
return format, nil
|
return format, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// isFormatNotFound - returns true if all `format.json` are not
|
// isFormatNotFound - returns true if all `format.json` are not found on all disks.
|
||||||
// found on all disks.
|
|
||||||
func isFormatNotFound(formats []*formatConfigV1) bool {
|
func isFormatNotFound(formats []*formatConfigV1) bool {
|
||||||
for _, format := range formats {
|
for _, format := range formats {
|
||||||
// One of the `format.json` is found.
|
// One of the `format.json` is found.
|
||||||
|
@ -403,8 +427,7 @@ func isFormatNotFound(formats []*formatConfigV1) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// isFormatFound - returns true if all input formats are found on
|
// isFormatFound - returns true if all input formats are found on all disks.
|
||||||
// all disks.
|
|
||||||
func isFormatFound(formats []*formatConfigV1) bool {
|
func isFormatFound(formats []*formatConfigV1) bool {
|
||||||
for _, format := range formats {
|
for _, format := range formats {
|
||||||
// One of `format.json` is not found.
|
// One of `format.json` is not found.
|
||||||
|
|
|
@ -114,6 +114,13 @@ func genFormatXLInvalidXLVersion() []*formatConfigV1 {
|
||||||
return formatConfigs
|
return formatConfigs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func genFormatFS() *formatConfigV1 {
|
||||||
|
return &formatConfigV1{
|
||||||
|
Version: "1",
|
||||||
|
Format: "fs",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// generates a invalid format.json version for XL backend.
|
// generates a invalid format.json version for XL backend.
|
||||||
func genFormatXLInvalidJBODCount() []*formatConfigV1 {
|
func genFormatXLInvalidJBODCount() []*formatConfigV1 {
|
||||||
jbod := make([]string, 7)
|
jbod := make([]string, 7)
|
||||||
|
@ -269,7 +276,7 @@ func TestFormatXLHealFreshDisks(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// Create an instance of xl backend.
|
// Create an instance of xl backend.
|
||||||
obj, err := getXLObjectLayer(fsDirs, nil)
|
obj, _, err := initObjectLayer(fsDirs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
@ -301,7 +308,7 @@ func TestFormatXLHealFreshDisksErrorExpected(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// Create an instance of xl backend.
|
// Create an instance of xl backend.
|
||||||
obj, err := getXLObjectLayer(fsDirs, nil)
|
obj, _, err := initObjectLayer(fsDirs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
@ -311,10 +318,8 @@ func TestFormatXLHealFreshDisksErrorExpected(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < 16; i++ {
|
// Prepares all disks are offline.
|
||||||
d := storageDisks[i].(*posix)
|
prepareNOfflineDisks(storageDisks, 16, t)
|
||||||
storageDisks[i] = &naughtyDisk{disk: d, defaultErr: errDiskNotFound}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load again XL format.json to validate it
|
// Load again XL format.json to validate it
|
||||||
_, err = loadFormatXL(storageDisks)
|
_, err = loadFormatXL(storageDisks)
|
||||||
|
@ -586,8 +591,9 @@ func TestInitFormatXLErrors(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer removeRoots(fsDirs)
|
||||||
// Create an instance of xl backend.
|
// Create an instance of xl backend.
|
||||||
obj, err := getXLObjectLayer(fsDirs, nil)
|
obj, _, err := initObjectLayer(fsDirs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -620,8 +626,6 @@ func TestInitFormatXLErrors(t *testing.T) {
|
||||||
if err := initFormatXL(testStorageDisks); err != errDiskNotFound {
|
if err := initFormatXL(testStorageDisks); err != errDiskNotFound {
|
||||||
t.Fatal("Got a different error: ", err)
|
t.Fatal("Got a different error: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
removeRoots(fsDirs)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test for reduceFormatErrs()
|
// Test for reduceFormatErrs()
|
||||||
|
@ -672,7 +676,14 @@ func TestGenericFormatCheck(t *testing.T) {
|
||||||
if err := genericFormatCheck(formatConfigs, errs); err == nil {
|
if err := genericFormatCheck(formatConfigs, errs); err == nil {
|
||||||
t.Fatalf("Should fail here")
|
t.Fatalf("Should fail here")
|
||||||
}
|
}
|
||||||
|
errs = []error{nil}
|
||||||
|
if err := genericFormatCheck([]*formatConfigV1{genFormatFS()}, errs); err != nil {
|
||||||
|
t.Fatal("Got unexpected err: ", err)
|
||||||
|
}
|
||||||
|
errs = []error{errFaultyDisk}
|
||||||
|
if err := genericFormatCheck([]*formatConfigV1{genFormatFS()}, errs); err == nil {
|
||||||
|
t.Fatalf("Should fail here")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLoadFormatXLErrs(t *testing.T) {
|
func TestLoadFormatXLErrs(t *testing.T) {
|
||||||
|
@ -681,9 +692,10 @@ func TestLoadFormatXLErrs(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer removeRoots(fsDirs)
|
||||||
|
|
||||||
// Create an instance of xl backend.
|
// Create an instance of xl backend.
|
||||||
obj, err := getXLObjectLayer(fsDirs, nil)
|
obj, _, err := initObjectLayer(fsDirs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -707,8 +719,9 @@ func TestLoadFormatXLErrs(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer removeRoots(fsDirs)
|
||||||
|
|
||||||
obj, err = getXLObjectLayer(fsDirs, nil)
|
obj, _, err = initObjectLayer(fsDirs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -726,14 +739,13 @@ func TestLoadFormatXLErrs(t *testing.T) {
|
||||||
t.Fatal("Got an unexpected error: ", err)
|
t.Fatal("Got an unexpected error: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
removeRoots(fsDirs)
|
|
||||||
|
|
||||||
fsDirs, err = getRandomDisks(nDisks)
|
fsDirs, err = getRandomDisks(nDisks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer removeRoots(fsDirs)
|
||||||
|
|
||||||
obj, err = getXLObjectLayer(fsDirs, nil)
|
obj, _, err = initObjectLayer(fsDirs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -749,14 +761,13 @@ func TestLoadFormatXLErrs(t *testing.T) {
|
||||||
t.Fatal("Got an unexpected error: ", err)
|
t.Fatal("Got an unexpected error: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
removeRoots(fsDirs)
|
|
||||||
|
|
||||||
fsDirs, err = getRandomDisks(nDisks)
|
fsDirs, err = getRandomDisks(nDisks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer removeRoots(fsDirs)
|
||||||
|
|
||||||
obj, err = getXLObjectLayer(fsDirs, nil)
|
obj, _, err = initObjectLayer(fsDirs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -769,8 +780,6 @@ func TestLoadFormatXLErrs(t *testing.T) {
|
||||||
if _, err := loadFormatXL(xl.storageDisks); err != errDiskNotFound {
|
if _, err := loadFormatXL(xl.storageDisks); err != errDiskNotFound {
|
||||||
t.Fatal("Got an unexpected error: ", err)
|
t.Fatal("Got an unexpected error: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
removeRoots(fsDirs)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests for healFormatXLCorruptedDisks() with cases which lead to errors
|
// Tests for healFormatXLCorruptedDisks() with cases which lead to errors
|
||||||
|
@ -782,7 +791,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Everything is fine, should return nil
|
// Everything is fine, should return nil
|
||||||
obj, err := getXLObjectLayer(fsDirs, nil)
|
obj, _, err := initObjectLayer(fsDirs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -799,7 +808,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Disks 0..15 are nil
|
// Disks 0..15 are nil
|
||||||
obj, err = getXLObjectLayer(fsDirs, nil)
|
obj, _, err = initObjectLayer(fsDirs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -818,7 +827,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// One disk returns Faulty Disk
|
// One disk returns Faulty Disk
|
||||||
obj, err = getXLObjectLayer(fsDirs, nil)
|
obj, _, err = initObjectLayer(fsDirs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -839,7 +848,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// One disk is not found, heal corrupted disks should return nil
|
// One disk is not found, heal corrupted disks should return nil
|
||||||
obj, err = getXLObjectLayer(fsDirs, nil)
|
obj, _, err = initObjectLayer(fsDirs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -856,7 +865,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove format.json of all disks
|
// Remove format.json of all disks
|
||||||
obj, err = getXLObjectLayer(fsDirs, nil)
|
obj, _, err = initObjectLayer(fsDirs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -877,7 +886,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Corrupted format json in one disk
|
// Corrupted format json in one disk
|
||||||
obj, err = getXLObjectLayer(fsDirs, nil)
|
obj, _, err = initObjectLayer(fsDirs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -902,7 +911,7 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Everything is fine, should return nil
|
// Everything is fine, should return nil
|
||||||
obj, err := getXLObjectLayer(fsDirs, nil)
|
obj, _, err := initObjectLayer(fsDirs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -918,7 +927,7 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Disks 0..15 are nil
|
// Disks 0..15 are nil
|
||||||
obj, err = getXLObjectLayer(fsDirs, nil)
|
obj, _, err = initObjectLayer(fsDirs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -937,7 +946,7 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// One disk returns Faulty Disk
|
// One disk returns Faulty Disk
|
||||||
obj, err = getXLObjectLayer(fsDirs, nil)
|
obj, _, err = initObjectLayer(fsDirs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -958,7 +967,7 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// One disk is not found, heal corrupted disks should return nil
|
// One disk is not found, heal corrupted disks should return nil
|
||||||
obj, err = getXLObjectLayer(fsDirs, nil)
|
obj, _, err = initObjectLayer(fsDirs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -975,7 +984,7 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove format.json of all disks
|
// Remove format.json of all disks
|
||||||
obj, err = getXLObjectLayer(fsDirs, nil)
|
obj, _, err = initObjectLayer(fsDirs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -996,7 +1005,7 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove format.json of all disks
|
// Remove format.json of all disks
|
||||||
obj, err = getXLObjectLayer(fsDirs, nil)
|
obj, _, err = initObjectLayer(fsDirs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -123,8 +123,8 @@ func newFSMetaV1() (fsMeta fsMetaV1) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newFSFormatV1 - initializes new formatConfigV1 with FS format info.
|
// newFSFormatV1 - initializes new formatConfigV1 with FS format info.
|
||||||
func newFSFormatV1() (format formatConfigV1) {
|
func newFSFormatV1() (format *formatConfigV1) {
|
||||||
return formatConfigV1{
|
return &formatConfigV1{
|
||||||
Version: "1",
|
Version: "1",
|
||||||
Format: "fs",
|
Format: "fs",
|
||||||
FS: &fsFormat{
|
FS: &fsFormat{
|
||||||
|
@ -134,12 +134,12 @@ func newFSFormatV1() (format formatConfigV1) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// isFSFormat - returns whether given formatConfigV1 is FS type or not.
|
// isFSFormat - returns whether given formatConfigV1 is FS type or not.
|
||||||
func isFSFormat(format formatConfigV1) bool {
|
func isFSFormat(format *formatConfigV1) bool {
|
||||||
return format.Format == "fs"
|
return format.Format == "fs"
|
||||||
}
|
}
|
||||||
|
|
||||||
// writes FS format (format.json) into minioMetaBucket.
|
// writes FS format (format.json) into minioMetaBucket.
|
||||||
func writeFSFormatData(storage StorageAPI, fsFormat formatConfigV1) error {
|
func saveFSFormatData(storage StorageAPI, fsFormat *formatConfigV1) error {
|
||||||
metadataBytes, err := json.Marshal(fsFormat)
|
metadataBytes, err := json.Marshal(fsFormat)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -67,24 +67,31 @@ func TestHasExtendedHeader(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func initFSObjects(disk string, t *testing.T) (obj ObjectLayer) {
|
||||||
|
obj, _, err := initObjectLayer([]string{disk}, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unexpected err: ", err)
|
||||||
|
}
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
|
||||||
// TestReadFsMetadata - readFSMetadata testing with a healthy and faulty disk
|
// TestReadFsMetadata - readFSMetadata testing with a healthy and faulty disk
|
||||||
func TestReadFSMetadata(t *testing.T) {
|
func TestReadFSMetadata(t *testing.T) {
|
||||||
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
||||||
defer removeAll(disk)
|
defer removeAll(disk)
|
||||||
obj, err := newFSObjects(disk)
|
|
||||||
if err != nil {
|
obj := initFSObjects(disk, t)
|
||||||
t.Fatal("Unexpected err: ", err)
|
|
||||||
}
|
|
||||||
fs := obj.(fsObjects)
|
fs := obj.(fsObjects)
|
||||||
|
|
||||||
bucketName := "bucket"
|
bucketName := "bucket"
|
||||||
objectName := "object"
|
objectName := "object"
|
||||||
|
|
||||||
if err = obj.MakeBucket(bucketName); err != nil {
|
if err := obj.MakeBucket(bucketName); err != nil {
|
||||||
t.Fatal("Unexpected err: ", err)
|
t.Fatal("Unexpected err: ", err)
|
||||||
}
|
}
|
||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
if _, err = obj.PutObject(bucketName, objectName, int64(len("abcd")), bytes.NewReader([]byte("abcd")),
|
if _, err := obj.PutObject(bucketName, objectName, int64(len("abcd")), bytes.NewReader([]byte("abcd")),
|
||||||
map[string]string{"X-Amz-Meta-AppId": "a"}, sha256sum); err != nil {
|
map[string]string{"X-Amz-Meta-AppId": "a"}, sha256sum); err != nil {
|
||||||
t.Fatal("Unexpected err: ", err)
|
t.Fatal("Unexpected err: ", err)
|
||||||
}
|
}
|
||||||
|
@ -93,15 +100,15 @@ func TestReadFSMetadata(t *testing.T) {
|
||||||
fsPath := "buckets/" + bucketName + "/" + objectName + "/fs.json"
|
fsPath := "buckets/" + bucketName + "/" + objectName + "/fs.json"
|
||||||
|
|
||||||
// Regular fs metadata reading, no errors expected
|
// Regular fs metadata reading, no errors expected
|
||||||
if _, err = readFSMetadata(fs.storage, ".minio.sys", fsPath); err != nil {
|
if _, err := readFSMetadata(fs.storage, ".minio.sys", fsPath); err != nil {
|
||||||
t.Fatal("Unexpected error ", err)
|
t.Fatal("Unexpected error ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Corrupted fs.json
|
// Corrupted fs.json
|
||||||
if err = fs.storage.AppendFile(".minio.sys", fsPath, []byte{'a'}); err != nil {
|
if err := fs.storage.AppendFile(".minio.sys", fsPath, []byte{'a'}); err != nil {
|
||||||
t.Fatal("Unexpected error ", err)
|
t.Fatal("Unexpected error ", err)
|
||||||
}
|
}
|
||||||
if _, err = readFSMetadata(fs.storage, ".minio.sys", fsPath); err == nil {
|
if _, err := readFSMetadata(fs.storage, ".minio.sys", fsPath); err == nil {
|
||||||
t.Fatal("Should fail", err)
|
t.Fatal("Should fail", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -109,7 +116,7 @@ func TestReadFSMetadata(t *testing.T) {
|
||||||
fsStorage := fs.storage.(*posix)
|
fsStorage := fs.storage.(*posix)
|
||||||
naughty := newNaughtyDisk(fsStorage, nil, errFaultyDisk)
|
naughty := newNaughtyDisk(fsStorage, nil, errFaultyDisk)
|
||||||
fs.storage = naughty
|
fs.storage = naughty
|
||||||
if _, err = readFSMetadata(fs.storage, ".minio.sys", fsPath); errorCause(err) != errFaultyDisk {
|
if _, err := readFSMetadata(fs.storage, ".minio.sys", fsPath); errorCause(err) != errFaultyDisk {
|
||||||
t.Fatal("Should fail", err)
|
t.Fatal("Should fail", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -119,20 +126,17 @@ func TestReadFSMetadata(t *testing.T) {
|
||||||
func TestWriteFSMetadata(t *testing.T) {
|
func TestWriteFSMetadata(t *testing.T) {
|
||||||
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
||||||
defer removeAll(disk)
|
defer removeAll(disk)
|
||||||
obj, err := newFSObjects(disk)
|
obj := initFSObjects(disk, t)
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Unexpected err: ", err)
|
|
||||||
}
|
|
||||||
fs := obj.(fsObjects)
|
fs := obj.(fsObjects)
|
||||||
|
|
||||||
bucketName := "bucket"
|
bucketName := "bucket"
|
||||||
objectName := "object"
|
objectName := "object"
|
||||||
|
|
||||||
if err = obj.MakeBucket(bucketName); err != nil {
|
if err := obj.MakeBucket(bucketName); err != nil {
|
||||||
t.Fatal("Unexpected err: ", err)
|
t.Fatal("Unexpected err: ", err)
|
||||||
}
|
}
|
||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
if _, err = obj.PutObject(bucketName, objectName, int64(len("abcd")), bytes.NewReader([]byte("abcd")),
|
if _, err := obj.PutObject(bucketName, objectName, int64(len("abcd")), bytes.NewReader([]byte("abcd")),
|
||||||
map[string]string{"X-Amz-Meta-AppId": "a"}, sha256sum); err != nil {
|
map[string]string{"X-Amz-Meta-AppId": "a"}, sha256sum); err != nil {
|
||||||
t.Fatal("Unexpected err: ", err)
|
t.Fatal("Unexpected err: ", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,11 +28,8 @@ func TestFSIsBucketExist(t *testing.T) {
|
||||||
// Prepare for testing
|
// Prepare for testing
|
||||||
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
||||||
defer removeAll(disk)
|
defer removeAll(disk)
|
||||||
obj, err := newFSObjects(disk)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Cannot create a new FS object: ", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
obj := initFSObjects(disk, t)
|
||||||
fs := obj.(fsObjects)
|
fs := obj.(fsObjects)
|
||||||
bucketName := "bucket"
|
bucketName := "bucket"
|
||||||
|
|
||||||
|
@ -64,22 +61,20 @@ func TestFSIsUploadExists(t *testing.T) {
|
||||||
// Prepare for testing
|
// Prepare for testing
|
||||||
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
||||||
defer removeAll(disk)
|
defer removeAll(disk)
|
||||||
obj, err := newFSObjects(disk)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Cannot create a new FS object: ", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
obj := initFSObjects(disk, t)
|
||||||
fs := obj.(fsObjects)
|
fs := obj.(fsObjects)
|
||||||
|
|
||||||
var uploadID string
|
|
||||||
bucketName := "bucket"
|
bucketName := "bucket"
|
||||||
objectName := "object"
|
objectName := "object"
|
||||||
|
|
||||||
obj.MakeBucket(bucketName)
|
if err := obj.MakeBucket(bucketName); err != nil {
|
||||||
uploadID, err = obj.NewMultipartUpload(bucketName, objectName, nil)
|
t.Fatal("Unexpected err: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
uploadID, err := obj.NewMultipartUpload(bucketName, objectName, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Cannot create a new FS object: ", err)
|
t.Fatal("Unexpected err: ", err)
|
||||||
}
|
}
|
||||||
// Test with valid upload id
|
// Test with valid upload id
|
||||||
if exists := fs.isUploadIDExists(bucketName, objectName, uploadID); !exists {
|
if exists := fs.isUploadIDExists(bucketName, objectName, uploadID); !exists {
|
||||||
|
@ -110,10 +105,8 @@ func TestFSWriteUploadJSON(t *testing.T) {
|
||||||
// Prepare for tests
|
// Prepare for tests
|
||||||
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
||||||
defer removeAll(disk)
|
defer removeAll(disk)
|
||||||
obj, err := newFSObjects(disk)
|
|
||||||
if err != nil {
|
obj := initFSObjects(disk, t)
|
||||||
t.Fatal("Unexpected err: ", err)
|
|
||||||
}
|
|
||||||
fs := obj.(fsObjects)
|
fs := obj.(fsObjects)
|
||||||
|
|
||||||
bucketName := "bucket"
|
bucketName := "bucket"
|
||||||
|
@ -121,6 +114,9 @@ func TestFSWriteUploadJSON(t *testing.T) {
|
||||||
|
|
||||||
obj.MakeBucket(bucketName)
|
obj.MakeBucket(bucketName)
|
||||||
uploadID, err := obj.NewMultipartUpload(bucketName, objectName, nil)
|
uploadID, err := obj.NewMultipartUpload(bucketName, objectName, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unexpected err: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unexpected err: ", err)
|
t.Fatal("Unexpected err: ", err)
|
||||||
|
@ -146,10 +142,8 @@ func TestFSUpdateUploadsJSON(t *testing.T) {
|
||||||
// Prepare for tests
|
// Prepare for tests
|
||||||
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
||||||
defer removeAll(disk)
|
defer removeAll(disk)
|
||||||
obj, err := newFSObjects(disk)
|
|
||||||
if err != nil {
|
obj := initFSObjects(disk, t)
|
||||||
t.Fatal("Unexpected err: ", err)
|
|
||||||
}
|
|
||||||
fs := obj.(fsObjects)
|
fs := obj.(fsObjects)
|
||||||
|
|
||||||
bucketName := "bucket"
|
bucketName := "bucket"
|
||||||
|
|
|
@ -462,22 +462,11 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
|
||||||
return "", traceError(IncompleteBody{})
|
return "", traceError(IncompleteBody{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate if payload is valid.
|
|
||||||
if isSignVerify(data) {
|
|
||||||
if err := data.(*signVerifyReader).Verify(); err != nil {
|
|
||||||
// Incoming payload wrong, delete the temporary object.
|
|
||||||
fs.storage.DeleteFile(minioMetaBucket, tmpPartPath)
|
|
||||||
// Error return.
|
|
||||||
return "", toObjectErr(traceError(err), bucket, object)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
|
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
|
||||||
if md5Hex != "" {
|
if md5Hex != "" {
|
||||||
if newMD5Hex != md5Hex {
|
if newMD5Hex != md5Hex {
|
||||||
// MD5 mismatch, delete the temporary object.
|
// MD5 mismatch, delete the temporary object.
|
||||||
fs.storage.DeleteFile(minioMetaBucket, tmpPartPath)
|
fs.storage.DeleteFile(minioMetaBucket, tmpPartPath)
|
||||||
|
|
||||||
return "", traceError(BadDigest{md5Hex, newMD5Hex})
|
return "", traceError(BadDigest{md5Hex, newMD5Hex})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,10 +31,7 @@ func TestNewMultipartUploadFaultyDisk(t *testing.T) {
|
||||||
// Prepare for tests
|
// Prepare for tests
|
||||||
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
||||||
defer removeAll(disk)
|
defer removeAll(disk)
|
||||||
obj, err := newFSObjects(disk)
|
obj := initFSObjects(disk, t)
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Cannot create a new FS object: ", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fs := obj.(fsObjects)
|
fs := obj.(fsObjects)
|
||||||
bucketName := "bucket"
|
bucketName := "bucket"
|
||||||
|
@ -67,18 +64,14 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
|
||||||
// Prepare for tests
|
// Prepare for tests
|
||||||
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
||||||
defer removeAll(disk)
|
defer removeAll(disk)
|
||||||
obj, err := newFSObjects(disk)
|
obj := initFSObjects(disk, t)
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Cannot create a new FS object: ", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fs := obj.(fsObjects)
|
fs := obj.(fsObjects)
|
||||||
bucketName := "bucket"
|
bucketName := "bucket"
|
||||||
objectName := "object"
|
objectName := "object"
|
||||||
data := []byte("12345")
|
data := []byte("12345")
|
||||||
dataLen := int64(len(data))
|
dataLen := int64(len(data))
|
||||||
|
|
||||||
if err = obj.MakeBucket(bucketName); err != nil {
|
if err := obj.MakeBucket(bucketName); err != nil {
|
||||||
t.Fatal("Cannot create bucket, err: ", err)
|
t.Fatal("Cannot create bucket, err: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -119,17 +112,14 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) {
|
||||||
// Prepare for tests
|
// Prepare for tests
|
||||||
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
||||||
defer removeAll(disk)
|
defer removeAll(disk)
|
||||||
obj, err := newFSObjects(disk)
|
obj := initFSObjects(disk, t)
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Cannot create a new FS object: ", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fs := obj.(fsObjects)
|
fs := obj.(fsObjects)
|
||||||
bucketName := "bucket"
|
bucketName := "bucket"
|
||||||
objectName := "object"
|
objectName := "object"
|
||||||
data := []byte("12345")
|
data := []byte("12345")
|
||||||
|
|
||||||
if err = obj.MakeBucket(bucketName); err != nil {
|
if err := obj.MakeBucket(bucketName); err != nil {
|
||||||
t.Fatal("Cannot create bucket, err: ", err)
|
t.Fatal("Cannot create bucket, err: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -175,17 +165,13 @@ func TestListMultipartUploadsFaultyDisk(t *testing.T) {
|
||||||
// Prepare for tests
|
// Prepare for tests
|
||||||
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
||||||
defer removeAll(disk)
|
defer removeAll(disk)
|
||||||
obj, err := newFSObjects(disk)
|
obj := initFSObjects(disk, t)
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Cannot create a new FS object: ", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fs := obj.(fsObjects)
|
fs := obj.(fsObjects)
|
||||||
bucketName := "bucket"
|
bucketName := "bucket"
|
||||||
objectName := "object"
|
objectName := "object"
|
||||||
data := []byte("12345")
|
data := []byte("12345")
|
||||||
|
|
||||||
if err = obj.MakeBucket(bucketName); err != nil {
|
if err := obj.MakeBucket(bucketName); err != nil {
|
||||||
t.Fatal("Cannot create bucket, err: ", err)
|
t.Fatal("Cannot create bucket, err: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
88
cmd/fs-v1.go
88
cmd/fs-v1.go
|
@ -20,7 +20,7 @@ import (
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"fmt"
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
@ -33,8 +33,7 @@ import (
|
||||||
|
|
||||||
// fsObjects - Implements fs object layer.
|
// fsObjects - Implements fs object layer.
|
||||||
type fsObjects struct {
|
type fsObjects struct {
|
||||||
storage StorageAPI
|
storage StorageAPI
|
||||||
physicalDisk string
|
|
||||||
|
|
||||||
// List pool management.
|
// List pool management.
|
||||||
listPool *treeWalkPool
|
listPool *treeWalkPool
|
||||||
|
@ -46,73 +45,27 @@ var fsTreeWalkIgnoredErrs = []error{
|
||||||
errVolumeNotFound,
|
errVolumeNotFound,
|
||||||
}
|
}
|
||||||
|
|
||||||
// creates format.json, the FS format info in minioMetaBucket.
|
|
||||||
func initFormatFS(storageDisk StorageAPI) error {
|
|
||||||
return writeFSFormatData(storageDisk, newFSFormatV1())
|
|
||||||
}
|
|
||||||
|
|
||||||
// loads format.json from minioMetaBucket if it exists.
|
|
||||||
func loadFormatFS(storageDisk StorageAPI) (format formatConfigV1, err error) {
|
|
||||||
// Reads entire `format.json`.
|
|
||||||
buf, err := storageDisk.ReadAll(minioMetaBucket, fsFormatJSONFile)
|
|
||||||
if err != nil {
|
|
||||||
return formatConfigV1{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal format config.
|
|
||||||
if err = json.Unmarshal(buf, &format); err != nil {
|
|
||||||
return formatConfigV1{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return structured `format.json`.
|
|
||||||
return format, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newFSObjects - initialize new fs object layer.
|
// newFSObjects - initialize new fs object layer.
|
||||||
func newFSObjects(disk string) (ObjectLayer, error) {
|
func newFSObjects(storage StorageAPI) (ObjectLayer, error) {
|
||||||
storage, err := newStorageAPI(disk)
|
if storage == nil {
|
||||||
if err != nil && err != errDiskNotFound {
|
return nil, errInvalidArgument
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attempt to create `.minio.sys`.
|
|
||||||
err = storage.MakeVol(minioMetaBucket)
|
|
||||||
if err != nil {
|
|
||||||
switch err {
|
|
||||||
// Ignore the errors.
|
|
||||||
case errVolumeExists, errDiskNotFound, errFaultyDisk:
|
|
||||||
default:
|
|
||||||
return nil, toObjectErr(err, minioMetaBucket)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Runs house keeping code, like creating minioMetaBucket, cleaning up tmp files etc.
|
// Runs house keeping code, like creating minioMetaBucket, cleaning up tmp files etc.
|
||||||
if err = fsHouseKeeping(storage); err != nil {
|
if err := fsHouseKeeping(storage); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// loading format.json from minioMetaBucket.
|
// Load format and validate.
|
||||||
// Note: The format.json content is ignored, reserved for future use.
|
_, err := loadFormatFS(storage)
|
||||||
format, err := loadFormatFS(storage)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == errFileNotFound {
|
return nil, fmt.Errorf("Unable to recognize backend format, %s", err)
|
||||||
// format.json doesn't exist, create it inside minioMetaBucket.
|
|
||||||
err = initFormatFS(storage)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else if !isFSFormat(format) {
|
|
||||||
return nil, errFSDiskFormat
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize fs objects.
|
// Initialize fs objects.
|
||||||
fs := fsObjects{
|
fs := fsObjects{
|
||||||
storage: storage,
|
storage: storage,
|
||||||
physicalDisk: disk,
|
listPool: newTreeWalkPool(globalLookupTimeout),
|
||||||
listPool: newTreeWalkPool(globalLookupTimeout),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return successfully initialized object layer.
|
// Return successfully initialized object layer.
|
||||||
|
@ -153,10 +106,12 @@ func (fs fsObjects) Shutdown() error {
|
||||||
func (fs fsObjects) StorageInfo() StorageInfo {
|
func (fs fsObjects) StorageInfo() StorageInfo {
|
||||||
info, err := fs.storage.DiskInfo()
|
info, err := fs.storage.DiskInfo()
|
||||||
errorIf(err, "Unable to get disk info %#v", fs.storage)
|
errorIf(err, "Unable to get disk info %#v", fs.storage)
|
||||||
return StorageInfo{
|
storageInfo := StorageInfo{
|
||||||
Total: info.Total,
|
Total: info.Total,
|
||||||
Free: info.Free,
|
Free: info.Free,
|
||||||
}
|
}
|
||||||
|
storageInfo.Backend.Type = FS
|
||||||
|
return storageInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Bucket operations
|
/// Bucket operations
|
||||||
|
@ -450,16 +405,6 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
|
||||||
metadata["md5Sum"] = newMD5Hex
|
metadata["md5Sum"] = newMD5Hex
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate if payload is valid.
|
|
||||||
if isSignVerify(data) {
|
|
||||||
if vErr := data.(*signVerifyReader).Verify(); vErr != nil {
|
|
||||||
// Incoming payload wrong, delete the temporary object.
|
|
||||||
fs.storage.DeleteFile(minioMetaBucket, tempObj)
|
|
||||||
// Error return.
|
|
||||||
return ObjectInfo{}, toObjectErr(traceError(vErr), bucket, object)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// md5Hex representation.
|
// md5Hex representation.
|
||||||
md5Hex := metadata["md5Sum"]
|
md5Hex := metadata["md5Sum"]
|
||||||
if md5Hex != "" {
|
if md5Hex != "" {
|
||||||
|
@ -679,8 +624,3 @@ func (fs fsObjects) HealObject(bucket, object string) error {
|
||||||
func (fs fsObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
|
func (fs fsObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
|
||||||
return ListObjectsInfo{}, traceError(NotImplemented{})
|
return ListObjectsInfo{}, traceError(NotImplemented{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// HealDiskMetadata -- heal disk metadata, not supported in FS
|
|
||||||
func (fs fsObjects) HealDiskMetadata() error {
|
|
||||||
return NotImplemented{}
|
|
||||||
}
|
|
||||||
|
|
|
@ -40,27 +40,48 @@ func TestNewFS(t *testing.T) {
|
||||||
disks = append(disks, xlDisk)
|
disks = append(disks, xlDisk)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fsStorageDisks, err := initStorageDisks([]string{disk}, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Uexpected error: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
xlStorageDisks, err := initStorageDisks(disks, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Uexpected error: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Initializes all disks with XL
|
// Initializes all disks with XL
|
||||||
err := formatDisks(disks, nil)
|
err = waitForFormatDisks(true, "", xlStorageDisks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to format XL %s", err)
|
t.Fatalf("Unable to format XL %s", err)
|
||||||
}
|
}
|
||||||
_, err = newXLObjects(disks, nil)
|
_, err = newXLObjects(xlStorageDisks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to initialize XL object, %s", err)
|
t.Fatalf("Unable to initialize XL object, %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
disk string
|
disk StorageAPI
|
||||||
expectedErr error
|
expectedErr error
|
||||||
}{
|
}{
|
||||||
{disk, nil},
|
{fsStorageDisks[0], nil},
|
||||||
{disks[0], errFSDiskFormat},
|
{xlStorageDisks[0], errFSDiskFormat},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
if _, err := newFSObjects(testCase.disk); err != testCase.expectedErr {
|
if err = waitForFormatDisks(true, "", []StorageAPI{testCase.disk}); err != testCase.expectedErr {
|
||||||
t.Fatalf("expected: %s, got: %s", testCase.expectedErr, err)
|
t.Errorf("expected: %s, got :%s", testCase.expectedErr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_, err = newFSObjects(nil)
|
||||||
|
if err != errInvalidArgument {
|
||||||
|
t.Errorf("Expecting error invalid argument, got %s", err)
|
||||||
|
}
|
||||||
|
_, err = newFSObjects(xlStorageDisks[0])
|
||||||
|
if err != nil {
|
||||||
|
errMsg := "Unable to recognize backend format, Disk is not in FS format."
|
||||||
|
if err.Error() == errMsg {
|
||||||
|
t.Errorf("Expecting %s, got %s", errMsg, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -71,10 +92,7 @@ func TestFSShutdown(t *testing.T) {
|
||||||
// Prepare for tests
|
// Prepare for tests
|
||||||
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
||||||
defer removeAll(disk)
|
defer removeAll(disk)
|
||||||
obj, err := newFSObjects(disk)
|
obj := initFSObjects(disk, t)
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Cannot create a new FS object: ", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fs := obj.(fsObjects)
|
fs := obj.(fsObjects)
|
||||||
fsStorage := fs.storage.(*posix)
|
fsStorage := fs.storage.(*posix)
|
||||||
|
@ -108,15 +126,11 @@ func TestFSLoadFormatFS(t *testing.T) {
|
||||||
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
||||||
defer removeAll(disk)
|
defer removeAll(disk)
|
||||||
|
|
||||||
obj, err := newFSObjects(disk)
|
obj := initFSObjects(disk, t)
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Should not fail here", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fs := obj.(fsObjects)
|
fs := obj.(fsObjects)
|
||||||
|
|
||||||
// Regular format loading
|
// Regular format loading
|
||||||
_, err = loadFormatFS(fs.storage)
|
_, err := loadFormatFS(fs.storage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Should not fail here", err)
|
t.Fatal("Should not fail here", err)
|
||||||
}
|
}
|
||||||
|
@ -141,11 +155,7 @@ func TestFSGetBucketInfo(t *testing.T) {
|
||||||
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
||||||
defer removeAll(disk)
|
defer removeAll(disk)
|
||||||
|
|
||||||
obj, err := newFSObjects(disk)
|
obj := initFSObjects(disk, t)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fs := obj.(fsObjects)
|
fs := obj.(fsObjects)
|
||||||
bucketName := "bucket"
|
bucketName := "bucket"
|
||||||
|
|
||||||
|
@ -182,7 +192,7 @@ func TestFSDeleteObject(t *testing.T) {
|
||||||
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
||||||
defer removeAll(disk)
|
defer removeAll(disk)
|
||||||
|
|
||||||
obj, _ := newFSObjects(disk)
|
obj := initFSObjects(disk, t)
|
||||||
fs := obj.(fsObjects)
|
fs := obj.(fsObjects)
|
||||||
bucketName := "bucket"
|
bucketName := "bucket"
|
||||||
objectName := "object"
|
objectName := "object"
|
||||||
|
@ -223,7 +233,7 @@ func TestFSDeleteBucket(t *testing.T) {
|
||||||
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
||||||
defer removeAll(disk)
|
defer removeAll(disk)
|
||||||
|
|
||||||
obj, _ := newFSObjects(disk)
|
obj := initFSObjects(disk, t)
|
||||||
fs := obj.(fsObjects)
|
fs := obj.(fsObjects)
|
||||||
bucketName := "bucket"
|
bucketName := "bucket"
|
||||||
|
|
||||||
|
@ -264,11 +274,10 @@ func TestFSListBuckets(t *testing.T) {
|
||||||
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
||||||
defer removeAll(disk)
|
defer removeAll(disk)
|
||||||
|
|
||||||
obj, _ := newFSObjects(disk)
|
obj := initFSObjects(disk, t)
|
||||||
fs := obj.(fsObjects)
|
fs := obj.(fsObjects)
|
||||||
|
|
||||||
bucketName := "bucket"
|
bucketName := "bucket"
|
||||||
|
|
||||||
if err := obj.MakeBucket(bucketName); err != nil {
|
if err := obj.MakeBucket(bucketName); err != nil {
|
||||||
t.Fatal("Unexpected error: ", err)
|
t.Fatal("Unexpected error: ", err)
|
||||||
}
|
}
|
||||||
|
@ -303,11 +312,8 @@ func TestFSHealObject(t *testing.T) {
|
||||||
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
||||||
defer removeAll(disk)
|
defer removeAll(disk)
|
||||||
|
|
||||||
obj, err := newFSObjects(disk)
|
obj := initFSObjects(disk, t)
|
||||||
if err != nil {
|
err := obj.HealObject("bucket", "object")
|
||||||
t.Fatal("Cannot create a new FS object: ", err)
|
|
||||||
}
|
|
||||||
err = obj.HealObject("bucket", "object")
|
|
||||||
if err == nil || !isSameType(errorCause(err), NotImplemented{}) {
|
if err == nil || !isSameType(errorCause(err), NotImplemented{}) {
|
||||||
t.Fatalf("Heal Object should return NotImplemented error ")
|
t.Fatalf("Heal Object should return NotImplemented error ")
|
||||||
}
|
}
|
||||||
|
@ -318,26 +324,8 @@ func TestFSListObjectsHeal(t *testing.T) {
|
||||||
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
||||||
defer removeAll(disk)
|
defer removeAll(disk)
|
||||||
|
|
||||||
obj, err := newFSObjects(disk)
|
obj := initFSObjects(disk, t)
|
||||||
if err != nil {
|
_, err := obj.ListObjectsHeal("bucket", "prefix", "marker", "delimiter", 1000)
|
||||||
t.Fatal("Cannot create a new FS object: ", err)
|
|
||||||
}
|
|
||||||
_, err = obj.ListObjectsHeal("bucket", "prefix", "marker", "delimiter", 1000)
|
|
||||||
if err == nil || !isSameType(errorCause(err), NotImplemented{}) {
|
|
||||||
t.Fatalf("Heal Object should return NotImplemented error ")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestFSHealDiskMetadata - tests for fs HealDiskMetadata
|
|
||||||
func TestFSHealDiskMetadata(t *testing.T) {
|
|
||||||
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
|
|
||||||
defer removeAll(disk)
|
|
||||||
|
|
||||||
obj, err := newFSObjects(disk)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Cannot create a new FS object: ", err)
|
|
||||||
}
|
|
||||||
err = obj.HealDiskMetadata()
|
|
||||||
if err == nil || !isSameType(errorCause(err), NotImplemented{}) {
|
if err == nil || !isSameType(errorCause(err), NotImplemented{}) {
|
||||||
t.Fatalf("Heal Object should return NotImplemented error ")
|
t.Fatalf("Heal Object should return NotImplemented error ")
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,6 +52,8 @@ var (
|
||||||
globalMaxCacheSize = uint64(maxCacheSize)
|
globalMaxCacheSize = uint64(maxCacheSize)
|
||||||
// Cache expiry.
|
// Cache expiry.
|
||||||
globalCacheExpiry = objcache.DefaultExpiry
|
globalCacheExpiry = objcache.DefaultExpiry
|
||||||
|
// Minio default port, can be changed through command line.
|
||||||
|
globalMinioPort = 9000
|
||||||
// Add new variable global values here.
|
// Add new variable global values here.
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,6 @@ import (
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Validates location constraint in PutBucket request body.
|
// Validates location constraint in PutBucket request body.
|
||||||
|
@ -124,16 +123,3 @@ func extractPostPolicyFormValues(reader *multipart.Reader) (filePart io.Reader,
|
||||||
}
|
}
|
||||||
return filePart, fileName, formValues, nil
|
return filePart, fileName, formValues, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send whitespace character, once every 5secs, until CompleteMultipartUpload is done.
|
|
||||||
// CompleteMultipartUpload method of the object layer indicates that it's done via doneCh
|
|
||||||
func sendWhiteSpaceChars(w http.ResponseWriter, doneCh <-chan struct{}) {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
w.Write([]byte(" "))
|
|
||||||
case <-doneCh:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -107,16 +107,13 @@ func (l LockInfoStateNotBlocked) Error() string {
|
||||||
return fmt.Sprintf("Lock state should be \"Blocked\" for <volume> %s, <path> %s, <operationID> %s.", l.volume, l.path, l.operationID)
|
return fmt.Sprintf("Lock state should be \"Blocked\" for <volume> %s, <path> %s, <operationID> %s.", l.volume, l.path, l.operationID)
|
||||||
}
|
}
|
||||||
|
|
||||||
var errLockNotInitialized = errors.New("Debug Lock Map not initialized:\n1. Enable Lock Debugging using right ENV settings \n2. Make sure initNSLock() is called.")
|
var errLockNotInitialized = errors.New("Debug lockMap not initialized.")
|
||||||
|
|
||||||
// change the state of the lock from Blocked to Running.
|
// change the state of the lock from Blocked to Running.
|
||||||
func (n *nsLockMap) statusBlockedToRunning(param nsParam, lockOrigin, operationID string, readLock bool) error {
|
func (n *nsLockMap) statusBlockedToRunning(param nsParam, lockOrigin, operationID string, readLock bool) error {
|
||||||
// This operation is not executed under the scope nsLockMap.mutex.Lock(), lock has to be explicitly held here.
|
// This operation is not executed under the scope nsLockMap.mutex.Lock(), lock has to be explicitly held here.
|
||||||
n.lockMapMutex.Lock()
|
n.lockMapMutex.Lock()
|
||||||
defer n.lockMapMutex.Unlock()
|
defer n.lockMapMutex.Unlock()
|
||||||
if n.debugLockMap == nil {
|
|
||||||
return errLockNotInitialized
|
|
||||||
}
|
|
||||||
// new state info to be set for the lock.
|
// new state info to be set for the lock.
|
||||||
newLockInfo := debugLockInfo{
|
newLockInfo := debugLockInfo{
|
||||||
lockOrigin: lockOrigin,
|
lockOrigin: lockOrigin,
|
||||||
|
@ -132,38 +129,32 @@ func (n *nsLockMap) statusBlockedToRunning(param nsParam, lockOrigin, operationI
|
||||||
}
|
}
|
||||||
|
|
||||||
// check whether the lock info entry for <volume, path> pair already exists and its not `nil`.
|
// check whether the lock info entry for <volume, path> pair already exists and its not `nil`.
|
||||||
if debugLockMap, ok := n.debugLockMap[param]; ok {
|
lockInfo, ok := n.debugLockMap[param]
|
||||||
// ``*debugLockInfoPerVolumePath` entry containing lock info for `param <volume, path>` is `nil`.
|
if !ok {
|
||||||
if debugLockMap == nil {
|
// The lock state info for given <volume, path> pair should already exist.
|
||||||
return errLockNotInitialized
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// The lock state info foe given <volume, path> pair should already exist.
|
|
||||||
// If not return `LockInfoVolPathMssing`.
|
// If not return `LockInfoVolPathMssing`.
|
||||||
return LockInfoVolPathMssing{param.volume, param.path}
|
return LockInfoVolPathMssing{param.volume, param.path}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lock info the for the given operation ID shouldn't be `nil`.
|
// Lock info the for the given operation ID shouldn't be `nil`.
|
||||||
if n.debugLockMap[param].lockInfo == nil {
|
if lockInfo == nil {
|
||||||
return LockInfoOpsIDNotFound{param.volume, param.path, operationID}
|
return errLockNotInitialized
|
||||||
}
|
}
|
||||||
|
lockInfoOpID, ok := n.debugLockMap[param].lockInfo[operationID]
|
||||||
if lockInfo, ok := n.debugLockMap[param].lockInfo[operationID]; ok {
|
if !ok {
|
||||||
// The entry for the lock origined at `lockOrigin` should already exist.
|
|
||||||
// If not return `LockInfoOriginNotFound`.
|
|
||||||
if lockInfo.lockOrigin != lockOrigin {
|
|
||||||
return LockInfoOriginNotFound{param.volume, param.path, operationID, lockOrigin}
|
|
||||||
}
|
|
||||||
// Status of the lock should already be set to "Blocked".
|
|
||||||
// If not return `LockInfoStateNotBlocked`.
|
|
||||||
if lockInfo.status != "Blocked" {
|
|
||||||
return LockInfoStateNotBlocked{param.volume, param.path, operationID}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// The lock info entry for given `opsID` should already exist for given <volume, path> pair.
|
// The lock info entry for given `opsID` should already exist for given <volume, path> pair.
|
||||||
// If not return `LockInfoOpsIDNotFound`.
|
// If not return `LockInfoOpsIDNotFound`.
|
||||||
return LockInfoOpsIDNotFound{param.volume, param.path, operationID}
|
return LockInfoOpsIDNotFound{param.volume, param.path, operationID}
|
||||||
}
|
}
|
||||||
|
// The entry for the lock origined at `lockOrigin` should already exist.
|
||||||
|
// If not return `LockInfoOriginNotFound`.
|
||||||
|
if lockInfoOpID.lockOrigin != lockOrigin {
|
||||||
|
return LockInfoOriginNotFound{param.volume, param.path, operationID, lockOrigin}
|
||||||
|
}
|
||||||
|
// Status of the lock should already be set to "Blocked".
|
||||||
|
// If not return `LockInfoStateNotBlocked`.
|
||||||
|
if lockInfoOpID.status != "Blocked" {
|
||||||
|
return LockInfoStateNotBlocked{param.volume, param.path, operationID}
|
||||||
|
}
|
||||||
|
|
||||||
// All checks finished.
|
// All checks finished.
|
||||||
// changing the status of the operation from blocked to running and updating the time.
|
// changing the status of the operation from blocked to running and updating the time.
|
||||||
|
@ -178,12 +169,12 @@ func (n *nsLockMap) statusBlockedToRunning(param nsParam, lockOrigin, operationI
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n *nsLockMap) initLockInfoForVolumePath(param nsParam) {
|
||||||
|
n.debugLockMap[param] = newDebugLockInfoPerVolumePath()
|
||||||
|
}
|
||||||
|
|
||||||
// change the state of the lock from Ready to Blocked.
|
// change the state of the lock from Ready to Blocked.
|
||||||
func (n *nsLockMap) statusNoneToBlocked(param nsParam, lockOrigin, operationID string, readLock bool) error {
|
func (n *nsLockMap) statusNoneToBlocked(param nsParam, lockOrigin, operationID string, readLock bool) error {
|
||||||
if n.debugLockMap == nil {
|
|
||||||
return errLockNotInitialized
|
|
||||||
}
|
|
||||||
|
|
||||||
newLockInfo := debugLockInfo{
|
newLockInfo := debugLockInfo{
|
||||||
lockOrigin: lockOrigin,
|
lockOrigin: lockOrigin,
|
||||||
status: "Blocked",
|
status: "Blocked",
|
||||||
|
@ -195,16 +186,15 @@ func (n *nsLockMap) statusNoneToBlocked(param nsParam, lockOrigin, operationID s
|
||||||
newLockInfo.lockType = debugWLockStr
|
newLockInfo.lockType = debugWLockStr
|
||||||
}
|
}
|
||||||
|
|
||||||
if lockInfo, ok := n.debugLockMap[param]; ok {
|
lockInfo, ok := n.debugLockMap[param]
|
||||||
if lockInfo == nil {
|
if !ok {
|
||||||
// *debugLockInfoPerVolumePath entry is nil, initialize here to avoid any case of `nil` pointer access.
|
|
||||||
n.initLockInfoForVolumePath(param)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// State info entry for the given <volume, pair> doesn't exist, initializing it.
|
// State info entry for the given <volume, pair> doesn't exist, initializing it.
|
||||||
n.initLockInfoForVolumePath(param)
|
n.initLockInfoForVolumePath(param)
|
||||||
}
|
}
|
||||||
|
if lockInfo == nil {
|
||||||
|
// *debugLockInfoPerVolumePath entry is nil, initialize here to avoid any case of `nil` pointer access.
|
||||||
|
n.initLockInfoForVolumePath(param)
|
||||||
|
}
|
||||||
// lockInfo is a map[string]debugLockInfo, which holds map[OperationID]{status,time, origin} of the lock.
|
// lockInfo is a map[string]debugLockInfo, which holds map[OperationID]{status,time, origin} of the lock.
|
||||||
if n.debugLockMap[param].lockInfo == nil {
|
if n.debugLockMap[param].lockInfo == nil {
|
||||||
n.debugLockMap[param].lockInfo = make(map[string]debugLockInfo)
|
n.debugLockMap[param].lockInfo = make(map[string]debugLockInfo)
|
||||||
|
@ -224,45 +214,37 @@ func (n *nsLockMap) statusNoneToBlocked(param nsParam, lockOrigin, operationID s
|
||||||
|
|
||||||
// deleteLockInfoEntry - Deletes the lock state information for given <volume, path> pair. Called when nsLk.ref count is 0.
|
// deleteLockInfoEntry - Deletes the lock state information for given <volume, path> pair. Called when nsLk.ref count is 0.
|
||||||
func (n *nsLockMap) deleteLockInfoEntryForVolumePath(param nsParam) error {
|
func (n *nsLockMap) deleteLockInfoEntryForVolumePath(param nsParam) error {
|
||||||
if n.debugLockMap == nil {
|
|
||||||
return errLockNotInitialized
|
|
||||||
}
|
|
||||||
// delete the lock info for the given operation.
|
// delete the lock info for the given operation.
|
||||||
if _, found := n.debugLockMap[param]; found {
|
if _, found := n.debugLockMap[param]; !found {
|
||||||
// Remove from the map if there are no more references for the given (volume,path) pair.
|
|
||||||
delete(n.debugLockMap, param)
|
|
||||||
} else {
|
|
||||||
return LockInfoVolPathMssing{param.volume, param.path}
|
return LockInfoVolPathMssing{param.volume, param.path}
|
||||||
}
|
}
|
||||||
|
// Remove from the map if there are no more references for the given (volume,path) pair.
|
||||||
|
delete(n.debugLockMap, param)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// deleteLockInfoEntry - Deletes the entry for given opsID in the lock state information of given <volume, path> pair.
|
// deleteLockInfoEntry - Deletes the entry for given opsID in the lock state information of given <volume, path> pair.
|
||||||
// called when the nsLk ref count for the given <volume, path> pair is not 0.
|
// called when the nsLk ref count for the given <volume, path> pair is not 0.
|
||||||
func (n *nsLockMap) deleteLockInfoEntryForOps(param nsParam, operationID string) error {
|
func (n *nsLockMap) deleteLockInfoEntryForOps(param nsParam, operationID string) error {
|
||||||
if n.debugLockMap == nil {
|
|
||||||
return errLockNotInitialized
|
|
||||||
}
|
|
||||||
// delete the lock info for the given operation.
|
// delete the lock info for the given operation.
|
||||||
if infoMap, found := n.debugLockMap[param]; found {
|
infoMap, found := n.debugLockMap[param]
|
||||||
// the opertion finished holding the lock on the resource, remove the entry for the given operation with the operation ID.
|
if !found {
|
||||||
if _, foundInfo := infoMap.lockInfo[operationID]; foundInfo {
|
|
||||||
// decrease the global running and lock reference counter.
|
|
||||||
n.runningLockCounter--
|
|
||||||
n.globalLockCounter--
|
|
||||||
// decrease the lock referee counter for the lock info for given <volume,path> pair.
|
|
||||||
// decrease the running operation number. Its assumed that the operation is over once an attempt to release the lock is made.
|
|
||||||
infoMap.running--
|
|
||||||
// decrease the total reference count of locks jeld on <volume,path> pair.
|
|
||||||
infoMap.ref--
|
|
||||||
delete(infoMap.lockInfo, operationID)
|
|
||||||
} else {
|
|
||||||
// Unlock request with invalid opertion ID not accepted.
|
|
||||||
return LockInfoOpsIDNotFound{param.volume, param.path, operationID}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return LockInfoVolPathMssing{param.volume, param.path}
|
return LockInfoVolPathMssing{param.volume, param.path}
|
||||||
}
|
}
|
||||||
|
// the opertion finished holding the lock on the resource, remove the entry for the given operation with the operation ID.
|
||||||
|
if _, foundInfo := infoMap.lockInfo[operationID]; !foundInfo {
|
||||||
|
// Unlock request with invalid opertion ID not accepted.
|
||||||
|
return LockInfoOpsIDNotFound{param.volume, param.path, operationID}
|
||||||
|
}
|
||||||
|
// decrease the global running and lock reference counter.
|
||||||
|
n.runningLockCounter--
|
||||||
|
n.globalLockCounter--
|
||||||
|
// decrease the lock referee counter for the lock info for given <volume,path> pair.
|
||||||
|
// decrease the running operation number. Its assumed that the operation is over once an attempt to release the lock is made.
|
||||||
|
infoMap.running--
|
||||||
|
// decrease the total reference count of locks jeld on <volume,path> pair.
|
||||||
|
infoMap.ref--
|
||||||
|
delete(infoMap.lockInfo, operationID)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -246,7 +246,6 @@ func TestNewDebugLockInfoPerVolumePath(t *testing.T) {
|
||||||
|
|
||||||
// TestNsLockMapStatusBlockedToRunning - Validates the function for changing the lock state from blocked to running.
|
// TestNsLockMapStatusBlockedToRunning - Validates the function for changing the lock state from blocked to running.
|
||||||
func TestNsLockMapStatusBlockedToRunning(t *testing.T) {
|
func TestNsLockMapStatusBlockedToRunning(t *testing.T) {
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
volume string
|
volume string
|
||||||
path string
|
path string
|
||||||
|
@ -327,9 +326,9 @@ func TestNsLockMapStatusBlockedToRunning(t *testing.T) {
|
||||||
actualErr := nsMutex.statusBlockedToRunning(param, testCases[0].lockOrigin,
|
actualErr := nsMutex.statusBlockedToRunning(param, testCases[0].lockOrigin,
|
||||||
testCases[0].opsID, testCases[0].readLock)
|
testCases[0].opsID, testCases[0].readLock)
|
||||||
|
|
||||||
expectedNilErr := errLockNotInitialized
|
expectedErr := LockInfoVolPathMssing{testCases[0].volume, testCases[0].path}
|
||||||
if actualErr != expectedNilErr {
|
if actualErr != expectedErr {
|
||||||
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedNilErr, actualErr)
|
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedErr, actualErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
nsMutex = &nsLockMap{
|
nsMutex = &nsLockMap{
|
||||||
|
@ -337,15 +336,13 @@ func TestNsLockMapStatusBlockedToRunning(t *testing.T) {
|
||||||
debugLockMap: make(map[nsParam]*debugLockInfoPerVolumePath),
|
debugLockMap: make(map[nsParam]*debugLockInfoPerVolumePath),
|
||||||
lockMap: make(map[nsParam]*nsLock),
|
lockMap: make(map[nsParam]*nsLock),
|
||||||
}
|
}
|
||||||
// Entry for <volume, path> pair is set to nil.
|
// Entry for <volume, path> pair is set to nil. Should fail with `errLockNotInitialized`.
|
||||||
// Should fail with `errLockNotInitialized`.
|
|
||||||
nsMutex.debugLockMap[param] = nil
|
nsMutex.debugLockMap[param] = nil
|
||||||
actualErr = nsMutex.statusBlockedToRunning(param, testCases[0].lockOrigin,
|
actualErr = nsMutex.statusBlockedToRunning(param, testCases[0].lockOrigin,
|
||||||
testCases[0].opsID, testCases[0].readLock)
|
testCases[0].opsID, testCases[0].readLock)
|
||||||
|
|
||||||
expectedNilErr = errLockNotInitialized
|
if actualErr != errLockNotInitialized {
|
||||||
if actualErr != expectedNilErr {
|
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", errLockNotInitialized, actualErr)
|
||||||
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedNilErr, actualErr)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setting the lock info the be `nil`.
|
// Setting the lock info the be `nil`.
|
||||||
|
@ -391,10 +388,7 @@ func TestNsLockMapStatusBlockedToRunning(t *testing.T) {
|
||||||
|
|
||||||
// initializing the locks.
|
// initializing the locks.
|
||||||
initNSLock(false)
|
initNSLock(false)
|
||||||
// set debug lock info to `nil` so that the next tests have to initialize them again.
|
|
||||||
defer func() {
|
|
||||||
nsMutex.debugLockMap = nil
|
|
||||||
}()
|
|
||||||
// Iterate over the cases and assert the result.
|
// Iterate over the cases and assert the result.
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
param := nsParam{testCase.volume, testCase.path}
|
param := nsParam{testCase.volume, testCase.path}
|
||||||
|
@ -518,22 +512,20 @@ func TestNsLockMapStatusNoneToBlocked(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// initializing the locks.
|
||||||
|
initNSLock(false)
|
||||||
|
|
||||||
param := nsParam{testCases[0].volume, testCases[0].path}
|
param := nsParam{testCases[0].volume, testCases[0].path}
|
||||||
// Testing before the initialization done.
|
// Testing before the initialization done.
|
||||||
// Since the data structures for
|
// Since the data structures for
|
||||||
actualErr := nsMutex.statusBlockedToRunning(param, testCases[0].lockOrigin,
|
actualErr := nsMutex.statusBlockedToRunning(param, testCases[0].lockOrigin,
|
||||||
testCases[0].opsID, testCases[0].readLock)
|
testCases[0].opsID, testCases[0].readLock)
|
||||||
|
|
||||||
expectedNilErr := errLockNotInitialized
|
expectedErr := LockInfoVolPathMssing{testCases[0].volume, testCases[0].path}
|
||||||
if actualErr != expectedNilErr {
|
if actualErr != expectedErr {
|
||||||
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedNilErr, actualErr)
|
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedErr, actualErr)
|
||||||
}
|
}
|
||||||
// initializing the locks.
|
|
||||||
initNSLock(false)
|
|
||||||
// set debug lock info to `nil` so that the next tests have to initialize them again.
|
|
||||||
defer func() {
|
|
||||||
nsMutex.debugLockMap = nil
|
|
||||||
}()
|
|
||||||
// Iterate over the cases and assert the result.
|
// Iterate over the cases and assert the result.
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
nsMutex.lockMapMutex.Lock()
|
nsMutex.lockMapMutex.Lock()
|
||||||
|
@ -562,6 +554,10 @@ func TestNsLockMapDeleteLockInfoEntryForOps(t *testing.T) {
|
||||||
// expected metrics.
|
// expected metrics.
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// initializing the locks.
|
||||||
|
initNSLock(false)
|
||||||
|
|
||||||
// case - 1.
|
// case - 1.
|
||||||
// Testing the case where delete lock info is attempted even before the lock is initialized.
|
// Testing the case where delete lock info is attempted even before the lock is initialized.
|
||||||
param := nsParam{testCases[0].volume, testCases[0].path}
|
param := nsParam{testCases[0].volume, testCases[0].path}
|
||||||
|
@ -569,29 +565,12 @@ func TestNsLockMapDeleteLockInfoEntryForOps(t *testing.T) {
|
||||||
|
|
||||||
actualErr := nsMutex.deleteLockInfoEntryForOps(param, testCases[0].opsID)
|
actualErr := nsMutex.deleteLockInfoEntryForOps(param, testCases[0].opsID)
|
||||||
|
|
||||||
expectedNilErr := errLockNotInitialized
|
expectedErr := LockInfoVolPathMssing{testCases[0].volume, testCases[0].path}
|
||||||
if actualErr != expectedNilErr {
|
if actualErr != expectedErr {
|
||||||
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedNilErr, actualErr)
|
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedErr, actualErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// initializing the locks.
|
// Case - 2.
|
||||||
initNSLock(false)
|
|
||||||
// set debug lock info to `nil` so that the next tests have to initialize them again.
|
|
||||||
defer func() {
|
|
||||||
nsMutex.debugLockMap = nil
|
|
||||||
}()
|
|
||||||
// case - 2.
|
|
||||||
// Case where an attempt to delete the entry for non-existent <volume, path> pair is done.
|
|
||||||
// Set the status of the lock to blocked and then to running.
|
|
||||||
nonExistParam := nsParam{volume: "non-exist-volume", path: "non-exist-path"}
|
|
||||||
actualErr = nsMutex.deleteLockInfoEntryForOps(nonExistParam, testCases[0].opsID)
|
|
||||||
|
|
||||||
expectedVolPathErr := LockInfoVolPathMssing{nonExistParam.volume, nonExistParam.path}
|
|
||||||
if actualErr != expectedVolPathErr {
|
|
||||||
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedVolPathErr, actualErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Case - 3.
|
|
||||||
// Lock state is set to Running and then an attempt to delete the info for non-existent opsID done.
|
// Lock state is set to Running and then an attempt to delete the info for non-existent opsID done.
|
||||||
nsMutex.lockMapMutex.Lock()
|
nsMutex.lockMapMutex.Lock()
|
||||||
err := nsMutex.statusNoneToBlocked(param, testCases[0].lockOrigin, testCases[0].opsID, testCases[0].readLock)
|
err := nsMutex.statusNoneToBlocked(param, testCases[0].lockOrigin, testCases[0].opsID, testCases[0].readLock)
|
||||||
|
@ -660,36 +639,21 @@ func TestNsLockMapDeleteLockInfoEntryForVolumePath(t *testing.T) {
|
||||||
// expected metrics.
|
// expected metrics.
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// initializing the locks.
|
||||||
|
initNSLock(false)
|
||||||
|
|
||||||
// case - 1.
|
// case - 1.
|
||||||
// Testing the case where delete lock info is attempted even before the lock is initialized.
|
// Case where an attempt to delete the entry for non-existent <volume, path> pair is done.
|
||||||
|
// Set the status of the lock to blocked and then to running.
|
||||||
param := nsParam{testCases[0].volume, testCases[0].path}
|
param := nsParam{testCases[0].volume, testCases[0].path}
|
||||||
// Testing before the initialization done.
|
|
||||||
|
|
||||||
actualErr := nsMutex.deleteLockInfoEntryForVolumePath(param)
|
actualErr := nsMutex.deleteLockInfoEntryForVolumePath(param)
|
||||||
|
expectedNilErr := LockInfoVolPathMssing{param.volume, param.path}
|
||||||
expectedNilErr := errLockNotInitialized
|
|
||||||
if actualErr != expectedNilErr {
|
if actualErr != expectedNilErr {
|
||||||
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedNilErr, actualErr)
|
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedNilErr, actualErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// initializing the locks.
|
|
||||||
initNSLock(false)
|
|
||||||
// set debug lock info to `nil` so that the next tests have to initialize them again.
|
|
||||||
defer func() {
|
|
||||||
nsMutex.debugLockMap = nil
|
|
||||||
}()
|
|
||||||
// case - 2.
|
// case - 2.
|
||||||
// Case where an attempt to delete the entry for non-existent <volume, path> pair is done.
|
|
||||||
// Set the status of the lock to blocked and then to running.
|
|
||||||
nonExistParam := nsParam{volume: "non-exist-volume", path: "non-exist-path"}
|
|
||||||
actualErr = nsMutex.deleteLockInfoEntryForVolumePath(nonExistParam)
|
|
||||||
|
|
||||||
expectedVolPathErr := LockInfoVolPathMssing{nonExistParam.volume, nonExistParam.path}
|
|
||||||
if actualErr != expectedVolPathErr {
|
|
||||||
t.Fatalf("Errors mismatch: Expected \"%s\", got \"%s\"", expectedVolPathErr, actualErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// case - 3.
|
|
||||||
// Attempt to delete an registered entry is done.
|
// Attempt to delete an registered entry is done.
|
||||||
// All metrics should be 0 after deleting the entry.
|
// All metrics should be 0 after deleting the entry.
|
||||||
|
|
||||||
|
|
|
@ -77,8 +77,8 @@ type lockServer struct {
|
||||||
timestamp time.Time
|
timestamp time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize distributed name space lock.
|
// Register distributed NS lock handlers.
|
||||||
func initDistributedNSLock(mux *router.Router, serverConfig serverCmdConfig) {
|
func registerDistNSLockRouter(mux *router.Router, serverConfig serverCmdConfig) {
|
||||||
lockServers := newLockServers(serverConfig)
|
lockServers := newLockServers(serverConfig)
|
||||||
registerStorageLockers(mux, lockServers)
|
registerStorageLockers(mux, lockServers)
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,10 +72,6 @@ func initNSLock(isDist bool) {
|
||||||
nsMutex.debugLockMap = make(map[nsParam]*debugLockInfoPerVolumePath)
|
nsMutex.debugLockMap = make(map[nsParam]*debugLockInfoPerVolumePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *nsLockMap) initLockInfoForVolumePath(param nsParam) {
|
|
||||||
n.debugLockMap[param] = newDebugLockInfoPerVolumePath()
|
|
||||||
}
|
|
||||||
|
|
||||||
// RWLocker - interface that any read-write locking library should implement.
|
// RWLocker - interface that any read-write locking library should implement.
|
||||||
type RWLocker interface {
|
type RWLocker interface {
|
||||||
sync.Locker
|
sync.Locker
|
||||||
|
|
|
@ -301,11 +301,6 @@ func TestLockStats(t *testing.T) {
|
||||||
// initializing the locks.
|
// initializing the locks.
|
||||||
initNSLock(false)
|
initNSLock(false)
|
||||||
|
|
||||||
// set debug lock info to `nil` so that the next tests have to initialize them again.
|
|
||||||
defer func() {
|
|
||||||
nsMutex.debugLockMap = nil
|
|
||||||
}()
|
|
||||||
|
|
||||||
// hold 10 read locks.
|
// hold 10 read locks.
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
nsMutex.RLock("my-bucket", "my-object", strconv.Itoa(i))
|
nsMutex.RLock("my-bucket", "my-object", strconv.Itoa(i))
|
||||||
|
|
|
@ -17,13 +17,14 @@
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/minio/minio/pkg/disk"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/minio/minio/pkg/disk"
|
||||||
)
|
)
|
||||||
|
|
||||||
// naughtyDisk wraps a POSIX disk and returns programmed errors
|
// naughtyDisk wraps a POSIX disk and returns programmed errors
|
||||||
// specified by the developer. The purpose is to simulate errors
|
// specified by the developer. The purpose is to simulate errors
|
||||||
// that are hard to simulate in practise like DiskNotFound.
|
// that are hard to simulate in practice like DiskNotFound.
|
||||||
// Programmed errors are stored in errors field.
|
// Programmed errors are stored in errors field.
|
||||||
type naughtyDisk struct {
|
type naughtyDisk struct {
|
||||||
// The real disk
|
// The real disk
|
||||||
|
@ -42,6 +43,10 @@ func newNaughtyDisk(d *posix, errs map[int]error, defaultErr error) *naughtyDisk
|
||||||
return &naughtyDisk{disk: d, errors: errs, defaultErr: defaultErr}
|
return &naughtyDisk{disk: d, errors: errs, defaultErr: defaultErr}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *naughtyDisk) String() string {
|
||||||
|
return d.String()
|
||||||
|
}
|
||||||
|
|
||||||
func (d *naughtyDisk) calcError() (err error) {
|
func (d *naughtyDisk) calcError() (err error) {
|
||||||
d.mu.Lock()
|
d.mu.Lock()
|
||||||
defer d.mu.Unlock()
|
defer d.mu.Unlock()
|
||||||
|
|
|
@ -65,11 +65,12 @@ func (rpcClient *RPCClient) getRPCClient() *rpc.Client {
|
||||||
// dialRPCClient tries to establish a connection to the server in a safe manner
|
// dialRPCClient tries to establish a connection to the server in a safe manner
|
||||||
func (rpcClient *RPCClient) dialRPCClient() (*rpc.Client, error) {
|
func (rpcClient *RPCClient) dialRPCClient() (*rpc.Client, error) {
|
||||||
rpcClient.mu.Lock()
|
rpcClient.mu.Lock()
|
||||||
defer rpcClient.mu.Unlock()
|
|
||||||
// After acquiring lock, check whether another thread may not have already dialed and established connection
|
// After acquiring lock, check whether another thread may not have already dialed and established connection
|
||||||
if rpcClient.rpcPrivate != nil {
|
if rpcClient.rpcPrivate != nil {
|
||||||
|
rpcClient.mu.Unlock()
|
||||||
return rpcClient.rpcPrivate, nil
|
return rpcClient.rpcPrivate, nil
|
||||||
}
|
}
|
||||||
|
rpcClient.mu.Unlock()
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
var conn net.Conn
|
var conn net.Conn
|
||||||
|
@ -92,7 +93,9 @@ func (rpcClient *RPCClient) dialRPCClient() (*rpc.Client, error) {
|
||||||
if rpc == nil {
|
if rpc == nil {
|
||||||
return nil, errors.New("No valid RPC Client created after dial")
|
return nil, errors.New("No valid RPC Client created after dial")
|
||||||
}
|
}
|
||||||
|
rpcClient.mu.Lock()
|
||||||
rpcClient.rpcPrivate = rpc
|
rpcClient.rpcPrivate = rpc
|
||||||
|
rpcClient.mu.Unlock()
|
||||||
return rpc, nil
|
return rpc, nil
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
|
|
@ -564,6 +564,18 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func initFSObjectsB(disk string, t *testing.B) (obj ObjectLayer) {
|
||||||
|
storageDisks, err := initStorageDisks([]string{disk}, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unexpected err: ", err)
|
||||||
|
}
|
||||||
|
obj, err = newFSObjects(storageDisks[0])
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unexpected err: ", err)
|
||||||
|
}
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
|
||||||
func BenchmarkListObjects(b *testing.B) {
|
func BenchmarkListObjects(b *testing.B) {
|
||||||
// Make a temporary directory to use as the obj.
|
// Make a temporary directory to use as the obj.
|
||||||
directory, err := ioutil.TempDir("", "minio-list-benchmark")
|
directory, err := ioutil.TempDir("", "minio-list-benchmark")
|
||||||
|
@ -573,10 +585,7 @@ func BenchmarkListObjects(b *testing.B) {
|
||||||
defer removeAll(directory)
|
defer removeAll(directory)
|
||||||
|
|
||||||
// Create the obj.
|
// Create the obj.
|
||||||
obj, err := newFSObjects(directory)
|
obj := initFSObjectsB(directory, b)
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a bucket.
|
// Create a bucket.
|
||||||
err = obj.MakeBucket("ls-benchmark-bucket")
|
err = obj.MakeBucket("ls-benchmark-bucket")
|
||||||
|
|
|
@ -105,9 +105,6 @@ func isLocalStorage(networkPath string) bool {
|
||||||
// Depending on the disk type network or local, initialize storage API.
|
// Depending on the disk type network or local, initialize storage API.
|
||||||
func newStorageAPI(disk string) (storage StorageAPI, err error) {
|
func newStorageAPI(disk string) (storage StorageAPI, err error) {
|
||||||
if isLocalStorage(disk) {
|
if isLocalStorage(disk) {
|
||||||
if idx := strings.LastIndex(disk, ":"); idx != -1 {
|
|
||||||
return newPosix(disk[idx+1:])
|
|
||||||
}
|
|
||||||
return newPosix(disk)
|
return newPosix(disk)
|
||||||
}
|
}
|
||||||
return newRPCClient(disk)
|
return newRPCClient(disk)
|
||||||
|
|
|
@ -18,12 +18,35 @@ package cmd
|
||||||
|
|
||||||
import "time"
|
import "time"
|
||||||
|
|
||||||
|
// BackendType - represents different backend types.
|
||||||
|
type BackendType int
|
||||||
|
|
||||||
|
// Enum for different backend types.
|
||||||
|
const (
|
||||||
|
Unknown BackendType = iota
|
||||||
|
// Filesystem backend.
|
||||||
|
FS
|
||||||
|
// Multi disk single node XL backend.
|
||||||
|
XL
|
||||||
|
// Add your own backend.
|
||||||
|
)
|
||||||
|
|
||||||
// StorageInfo - represents total capacity of underlying storage.
|
// StorageInfo - represents total capacity of underlying storage.
|
||||||
type StorageInfo struct {
|
type StorageInfo struct {
|
||||||
// Total disk space.
|
// Total disk space.
|
||||||
Total int64
|
Total int64
|
||||||
// Free available disk space.
|
// Free available disk space.
|
||||||
Free int64
|
Free int64
|
||||||
|
// Backend type.
|
||||||
|
Backend struct {
|
||||||
|
// Represents various backend types, currently on FS and XL.
|
||||||
|
Type BackendType
|
||||||
|
|
||||||
|
// Following fields are only meaningful if BackendType is XL.
|
||||||
|
OnlineDisks int // Online disks during server startup.
|
||||||
|
OfflineDisks int // Offline disks during server startup.
|
||||||
|
Quorum int // Minimum disks required for successful operations.
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// BucketInfo - represents bucket metadata.
|
// BucketInfo - represents bucket metadata.
|
||||||
|
|
|
@ -22,7 +22,6 @@ import "io"
|
||||||
type ObjectLayer interface {
|
type ObjectLayer interface {
|
||||||
// Storage operations.
|
// Storage operations.
|
||||||
Shutdown() error
|
Shutdown() error
|
||||||
HealDiskMetadata() error
|
|
||||||
StorageInfo() StorageInfo
|
StorageInfo() StorageInfo
|
||||||
|
|
||||||
// Bucket operations.
|
// Bucket operations.
|
||||||
|
|
25
cmd/posix.go
25
cmd/posix.go
|
@ -40,10 +40,11 @@ const (
|
||||||
|
|
||||||
// posix - implements StorageAPI interface.
|
// posix - implements StorageAPI interface.
|
||||||
type posix struct {
|
type posix struct {
|
||||||
ioErrCount int32 // ref: https://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
ioErrCount int32 // ref: https://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
diskPath string
|
diskPath string
|
||||||
minFreeSpace int64
|
suppliedDiskPath string
|
||||||
minFreeInodes int64
|
minFreeSpace int64
|
||||||
|
minFreeInodes int64
|
||||||
}
|
}
|
||||||
|
|
||||||
var errFaultyDisk = errors.New("Faulty disk")
|
var errFaultyDisk = errors.New("Faulty disk")
|
||||||
|
@ -95,6 +96,10 @@ func newPosix(diskPath string) (StorageAPI, error) {
|
||||||
if diskPath == "" {
|
if diskPath == "" {
|
||||||
return nil, errInvalidArgument
|
return nil, errInvalidArgument
|
||||||
}
|
}
|
||||||
|
suppliedDiskPath := diskPath
|
||||||
|
if idx := strings.LastIndex(diskPath, ":"); idx != -1 {
|
||||||
|
diskPath = diskPath[idx+1:]
|
||||||
|
}
|
||||||
var err error
|
var err error
|
||||||
// Disallow relative paths, figure out absolute paths.
|
// Disallow relative paths, figure out absolute paths.
|
||||||
diskPath, err = filepath.Abs(diskPath)
|
diskPath, err = filepath.Abs(diskPath)
|
||||||
|
@ -102,9 +107,10 @@ func newPosix(diskPath string) (StorageAPI, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
fs := &posix{
|
fs := &posix{
|
||||||
diskPath: diskPath,
|
suppliedDiskPath: suppliedDiskPath,
|
||||||
minFreeSpace: fsMinFreeSpace,
|
diskPath: diskPath,
|
||||||
minFreeInodes: fsMinFreeInodesPercent,
|
minFreeSpace: fsMinFreeSpace,
|
||||||
|
minFreeInodes: fsMinFreeInodesPercent,
|
||||||
}
|
}
|
||||||
st, err := os.Stat(preparePath(diskPath))
|
st, err := os.Stat(preparePath(diskPath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -162,6 +168,11 @@ func (s posix) checkDiskFree() (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Implements stringer compatible interface.
|
||||||
|
func (s *posix) String() string {
|
||||||
|
return s.suppliedDiskPath
|
||||||
|
}
|
||||||
|
|
||||||
// DiskInfo provides current information about disk space usage,
|
// DiskInfo provides current information about disk space usage,
|
||||||
// total free inodes and underlying filesystem.
|
// total free inodes and underlying filesystem.
|
||||||
func (s *posix) DiskInfo() (info disk.Info, err error) {
|
func (s *posix) DiskInfo() (info disk.Info, err error) {
|
||||||
|
|
|
@ -66,6 +66,18 @@ func TestPostPolicyHandler(t *testing.T) {
|
||||||
|
|
||||||
// testPostPolicyHandler - Tests validate post policy handler uploading objects.
|
// testPostPolicyHandler - Tests validate post policy handler uploading objects.
|
||||||
func testPostPolicyHandler(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
func testPostPolicyHandler(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
|
root, err := newTestConfig("us-east-1")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Initializing config.json failed")
|
||||||
|
}
|
||||||
|
defer removeAll(root)
|
||||||
|
|
||||||
|
// Register event notifier.
|
||||||
|
err = initEventNotifier(obj)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Initializing event notifiers failed")
|
||||||
|
}
|
||||||
|
|
||||||
// get random bucket name.
|
// get random bucket name.
|
||||||
bucketName := getRandomBucketName()
|
bucketName := getRandomBucketName()
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,136 @@
|
||||||
|
/*
|
||||||
|
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
humanize "github.com/dustin/go-humanize"
|
||||||
|
"github.com/minio/mc/pkg/console"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Helper to generate integer sequences into a friendlier user consumable format.
|
||||||
|
func int2Str(i int, t int) string {
|
||||||
|
if i < 10 {
|
||||||
|
if t < 10 {
|
||||||
|
return fmt.Sprintf("0%d/0%d", i, t)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("0%d/%d", i, t)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d/%d", i, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print a given message once.
|
||||||
|
type printOnceFunc func(msg string)
|
||||||
|
|
||||||
|
// Print once is a constructor returning a function printing once.
|
||||||
|
// internally print uses sync.Once to perform exactly one action.
|
||||||
|
func printOnceFn() printOnceFunc {
|
||||||
|
var once sync.Once
|
||||||
|
return func(msg string) {
|
||||||
|
once.Do(func() { console.Println(msg) })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints custom message when healing is required for XL and Distributed XL backend.
|
||||||
|
func printHealMsg(firstEndpoint string, storageDisks []StorageAPI, fn printOnceFunc) {
|
||||||
|
msg := getHealMsg(firstEndpoint, storageDisks)
|
||||||
|
fn(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Constructs a formatted heal message, when cluster is found to be in state where it requires healing.
|
||||||
|
// healing is optional, server continues to initialize object layer after printing this message.
|
||||||
|
// it is upto the end user to perform a heal if needed.
|
||||||
|
func getHealMsg(firstEndpoint string, storageDisks []StorageAPI) string {
|
||||||
|
msg := fmt.Sprintln("\nData volume requires HEALING. Please run the following command:")
|
||||||
|
msg += "MINIO_ACCESS_KEY=%s "
|
||||||
|
msg += "MINIO_SECRET_KEY=%s "
|
||||||
|
msg += "minio control heal %s"
|
||||||
|
creds := serverConfig.GetCredential()
|
||||||
|
msg = fmt.Sprintf(msg, creds.AccessKeyID, creds.SecretAccessKey, firstEndpoint)
|
||||||
|
disksInfo, _, _ := getDisksInfo(storageDisks)
|
||||||
|
for i, info := range disksInfo {
|
||||||
|
msg += fmt.Sprintf(
|
||||||
|
"\n[%s] %s - %s %s",
|
||||||
|
int2Str(i+1, len(storageDisks)),
|
||||||
|
storageDisks[i],
|
||||||
|
humanize.IBytes(uint64(info.Total)),
|
||||||
|
func() string {
|
||||||
|
if info.Total > 0 {
|
||||||
|
return "online"
|
||||||
|
}
|
||||||
|
return "offline"
|
||||||
|
}(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints regular message when we have sufficient disks to start the cluster.
|
||||||
|
func printRegularMsg(storageDisks []StorageAPI, fn printOnceFunc) {
|
||||||
|
msg := getRegularMsg(storageDisks)
|
||||||
|
fn(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Constructs a formatted regular message when we have sufficient disks to start the cluster.
|
||||||
|
func getRegularMsg(storageDisks []StorageAPI) string {
|
||||||
|
msg := colorBlue("\nInitializing data volume.")
|
||||||
|
disksInfo, _, _ := getDisksInfo(storageDisks)
|
||||||
|
for i, info := range disksInfo {
|
||||||
|
msg += fmt.Sprintf(
|
||||||
|
"\n[%s] %s - %s %s",
|
||||||
|
int2Str(i+1, len(storageDisks)),
|
||||||
|
storageDisks[i],
|
||||||
|
humanize.IBytes(uint64(info.Total)),
|
||||||
|
func() string {
|
||||||
|
if info.Total > 0 {
|
||||||
|
return "online"
|
||||||
|
}
|
||||||
|
return "offline"
|
||||||
|
}(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints initialization message when cluster is being initialized for the first time.
|
||||||
|
func printFormatMsg(storageDisks []StorageAPI, fn printOnceFunc) {
|
||||||
|
msg := getFormatMsg(storageDisks)
|
||||||
|
fn(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate a formatted message when cluster is being initialized for the first time.
|
||||||
|
func getFormatMsg(storageDisks []StorageAPI) string {
|
||||||
|
msg := colorBlue("\nInitializing data volume for the first time.")
|
||||||
|
disksInfo, _, _ := getDisksInfo(storageDisks)
|
||||||
|
for i, info := range disksInfo {
|
||||||
|
msg += fmt.Sprintf(
|
||||||
|
"\n[%s] %s - %s %s",
|
||||||
|
int2Str(i+1, len(storageDisks)),
|
||||||
|
storageDisks[i],
|
||||||
|
humanize.IBytes(uint64(info.Total)),
|
||||||
|
func() string {
|
||||||
|
if info.Total > 0 {
|
||||||
|
return "online"
|
||||||
|
}
|
||||||
|
return "offline"
|
||||||
|
}(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return msg
|
||||||
|
}
|
|
@ -0,0 +1,88 @@
|
||||||
|
/*
|
||||||
|
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
// Tests heal message to be correct and properly formatted.
|
||||||
|
func TestHealMsg(t *testing.T) {
|
||||||
|
storageDisks, fsDirs := prepareXLStorageDisks(t)
|
||||||
|
defer removeRoots(fsDirs)
|
||||||
|
testCases := []struct {
|
||||||
|
endPoint string
|
||||||
|
storageDisks []StorageAPI
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
endPoint: "http://10.1.10.1:9000",
|
||||||
|
storageDisks: storageDisks,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
msg := getHealMsg(testCase.endPoint, testCase.storageDisks)
|
||||||
|
if msg == "" {
|
||||||
|
t.Fatalf("Test: %d Unable to get heal message.", i+1)
|
||||||
|
}
|
||||||
|
msg = getRegularMsg(testCase.storageDisks)
|
||||||
|
if msg == "" {
|
||||||
|
t.Fatalf("Test: %d Unable to get regular message.", i+1)
|
||||||
|
}
|
||||||
|
msg = getFormatMsg(testCase.storageDisks)
|
||||||
|
if msg == "" {
|
||||||
|
t.Fatalf("Test: %d Unable to get format message.", i+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests disk info, validates if we do return proper disk info structure
|
||||||
|
// even in case of certain disks not available.
|
||||||
|
func TestDisksInfo(t *testing.T) {
|
||||||
|
storageDisks, fsDirs := prepareXLStorageDisks(t)
|
||||||
|
defer removeRoots(fsDirs)
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
storageDisks []StorageAPI
|
||||||
|
onlineDisks int
|
||||||
|
offlineDisks int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
storageDisks: storageDisks,
|
||||||
|
onlineDisks: 16,
|
||||||
|
offlineDisks: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
storageDisks: prepareNOfflineDisks(deepCopyStorageDisks(storageDisks), 4, t),
|
||||||
|
onlineDisks: 12,
|
||||||
|
offlineDisks: 4,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
storageDisks: prepareNOfflineDisks(deepCopyStorageDisks(storageDisks), 16, t),
|
||||||
|
onlineDisks: 0,
|
||||||
|
offlineDisks: 16,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
_, onlineDisks, offlineDisks := getDisksInfo(testCase.storageDisks)
|
||||||
|
if testCase.onlineDisks != onlineDisks {
|
||||||
|
t.Errorf("Test %d: Expected online disks %d, got %d", i+1, testCase.onlineDisks, onlineDisks)
|
||||||
|
}
|
||||||
|
if testCase.offlineDisks != offlineDisks {
|
||||||
|
t.Errorf("Test %d: Expected offline disks %d, got %d", i+1, testCase.offlineDisks, offlineDisks)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -19,6 +19,7 @@ package cmd
|
||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/minio/mc/pkg/console"
|
||||||
"github.com/minio/minio-go/pkg/set"
|
"github.com/minio/minio-go/pkg/set"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -49,24 +50,23 @@ func init() {
|
||||||
| Quorum | Quorum Formatted | |
|
| Quorum | Quorum Formatted | |
|
||||||
+----------+--------------------------+-----------------------+
|
+----------+--------------------------+-----------------------+
|
||||||
| All | Quorum | Print message saying |
|
| All | Quorum | Print message saying |
|
||||||
| | Formatted, | "Heal via minioctl" |
|
| | Formatted, | "Heal via control" |
|
||||||
| | some unformatted | and initObjectLayer |
|
| | some unformatted | and initObjectLayer |
|
||||||
+----------+--------------------------+-----------------------+
|
+----------+--------------------------+-----------------------+
|
||||||
| All | None Formatted | FormatDisks |
|
| All | None Formatted | FormatDisks |
|
||||||
| | | and initObjectLayer |
|
| | | and initObjectLayer |
|
||||||
| | | |
|
| | | |
|
||||||
+----------+--------------------------+-----------------------+
|
+----------+--------------------------+-----------------------+
|
||||||
| | | Wait for notify from |
|
|
||||||
| Quorum | | "Heal via minioctl" |
|
|
||||||
| | Quorum UnFormatted | |
|
|
||||||
+----------+--------------------------+-----------------------+
|
|
||||||
| No | | Wait till enough |
|
| No | | Wait till enough |
|
||||||
| Quorum | _ | nodes are online and |
|
| Quorum | _ | nodes are online and |
|
||||||
| | | one of the above |
|
| | | one of the above |
|
||||||
| | | sections apply |
|
| | | sections apply |
|
||||||
+----------+--------------------------+-----------------------+
|
+----------+--------------------------+-----------------------+
|
||||||
|
| | | |
|
||||||
|
| Quorum | Quorum UnFormatted | Abort |
|
||||||
|
+----------+--------------------------+-----------------------+
|
||||||
|
|
||||||
N B A disk can be in one of the following states.
|
A disk can be in one of the following states.
|
||||||
- Unformatted
|
- Unformatted
|
||||||
- Formatted
|
- Formatted
|
||||||
- Corrupted
|
- Corrupted
|
||||||
|
@ -101,7 +101,7 @@ const (
|
||||||
Abort
|
Abort
|
||||||
)
|
)
|
||||||
|
|
||||||
func prepForInit(disks []string, sErrs []error, diskCount int) InitActions {
|
func prepForInitXL(firstDisk bool, sErrs []error, diskCount int) InitActions {
|
||||||
// Count errors by error value.
|
// Count errors by error value.
|
||||||
errMap := make(map[error]int)
|
errMap := make(map[error]int)
|
||||||
for _, err := range sErrs {
|
for _, err := range sErrs {
|
||||||
|
@ -114,34 +114,7 @@ func prepForInit(disks []string, sErrs []error, diskCount int) InitActions {
|
||||||
disksUnformatted := errMap[errUnformattedDisk]
|
disksUnformatted := errMap[errUnformattedDisk]
|
||||||
disksCorrupted := errMap[errCorruptedFormat]
|
disksCorrupted := errMap[errCorruptedFormat]
|
||||||
|
|
||||||
// All disks are unformatted, proceed to formatting disks.
|
// No Quorum lots of offline disks, wait for quorum.
|
||||||
if disksUnformatted == diskCount {
|
|
||||||
// Only the first server formats an uninitialized setup, others wait for notification.
|
|
||||||
if isLocalStorage(disks[0]) {
|
|
||||||
return FormatDisks
|
|
||||||
}
|
|
||||||
return WaitForFormatting
|
|
||||||
} else if disksUnformatted >= quorum {
|
|
||||||
if disksUnformatted+disksOffline == diskCount {
|
|
||||||
return WaitForAll
|
|
||||||
}
|
|
||||||
// Some disks possibly corrupted.
|
|
||||||
return WaitForHeal
|
|
||||||
}
|
|
||||||
|
|
||||||
// Already formatted, proceed to initialization of object layer.
|
|
||||||
if disksFormatted == diskCount {
|
|
||||||
return InitObjectLayer
|
|
||||||
} else if disksFormatted >= quorum {
|
|
||||||
if (disksFormatted+disksOffline == diskCount) ||
|
|
||||||
(disksFormatted+disksUnformatted == diskCount) {
|
|
||||||
return InitObjectLayer
|
|
||||||
}
|
|
||||||
// Some disks possibly corrupted.
|
|
||||||
return WaitForHeal
|
|
||||||
}
|
|
||||||
|
|
||||||
// No Quorum.
|
|
||||||
if disksOffline >= quorum {
|
if disksOffline >= quorum {
|
||||||
return WaitForQuorum
|
return WaitForQuorum
|
||||||
}
|
}
|
||||||
|
@ -151,57 +124,113 @@ func prepForInit(disks []string, sErrs []error, diskCount int) InitActions {
|
||||||
if disksCorrupted >= quorum {
|
if disksCorrupted >= quorum {
|
||||||
return Abort
|
return Abort
|
||||||
}
|
}
|
||||||
// Some of the formatted disks are possibly offline.
|
|
||||||
return WaitForHeal
|
|
||||||
}
|
|
||||||
|
|
||||||
func retryFormattingDisks(disks []string, storageDisks []StorageAPI) ([]StorageAPI, error) {
|
// All disks are unformatted, proceed to formatting disks.
|
||||||
nextBackoff := time.Duration(0)
|
if disksUnformatted == diskCount {
|
||||||
var err error
|
// Only the first server formats an uninitialized setup, others wait for notification.
|
||||||
done := false
|
if firstDisk { // First node always initializes.
|
||||||
for !done {
|
return FormatDisks
|
||||||
select {
|
|
||||||
case <-time.After(nextBackoff * time.Second):
|
|
||||||
// Attempt to load all `format.json`.
|
|
||||||
_, sErrs := loadAllFormats(storageDisks)
|
|
||||||
switch prepForInit(disks, sErrs, len(storageDisks)) {
|
|
||||||
case Abort:
|
|
||||||
err = errCorruptedFormat
|
|
||||||
done = true
|
|
||||||
case FormatDisks:
|
|
||||||
err = initFormatXL(storageDisks)
|
|
||||||
done = true
|
|
||||||
case InitObjectLayer:
|
|
||||||
err = nil
|
|
||||||
done = true
|
|
||||||
}
|
|
||||||
case <-globalWakeupCh:
|
|
||||||
// Reset nextBackoff to reduce the subsequent wait and re-read
|
|
||||||
// format.json from all disks again.
|
|
||||||
nextBackoff = 0
|
|
||||||
}
|
}
|
||||||
|
return WaitForFormatting
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
// Total disks unformatted are in quorum verify if we have some offline disks.
|
||||||
|
if disksUnformatted >= quorum {
|
||||||
|
// Some disks offline and some disks unformatted, wait for all of them to come online.
|
||||||
|
if disksUnformatted+disksOffline == diskCount {
|
||||||
|
return WaitForAll
|
||||||
|
}
|
||||||
|
// Some disks possibly corrupted and too many unformatted disks.
|
||||||
|
return Abort
|
||||||
}
|
}
|
||||||
return storageDisks, nil
|
|
||||||
|
// Already formatted and in quorum, proceed to initialization of object layer.
|
||||||
|
if disksFormatted >= quorum {
|
||||||
|
if disksFormatted+disksOffline == diskCount {
|
||||||
|
return InitObjectLayer
|
||||||
|
}
|
||||||
|
// Some of the formatted disks are possibly corrupted or unformatted, heal them.
|
||||||
|
return WaitForHeal
|
||||||
|
} // No quorum wait for quorum number of disks.
|
||||||
|
return WaitForQuorum
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForFormattingDisks(disks, ignoredDisks []string) ([]StorageAPI, error) {
|
// Implements a jitter backoff loop for formatting all disks during
|
||||||
// FS Setup
|
// initialization of the server.
|
||||||
|
func retryFormattingDisks(firstDisk bool, firstEndpoint string, storageDisks []StorageAPI) error {
|
||||||
|
if storageDisks == nil {
|
||||||
|
return errInvalidArgument
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a done channel to control 'ListObjects' go routine.
|
||||||
|
doneCh := make(chan struct{}, 1)
|
||||||
|
|
||||||
|
// Indicate to our routine to exit cleanly upon return.
|
||||||
|
defer close(doneCh)
|
||||||
|
|
||||||
|
// Wait on the jitter retry loop.
|
||||||
|
for range newRetryTimer(time.Second, time.Second*30, MaxJitter, doneCh) {
|
||||||
|
// Attempt to load all `format.json`.
|
||||||
|
formatConfigs, sErrs := loadAllFormats(storageDisks)
|
||||||
|
// Check if this is a XL or distributed XL, anything > 1 is considered XL backend.
|
||||||
|
if len(formatConfigs) > 1 {
|
||||||
|
switch prepForInitXL(firstDisk, sErrs, len(storageDisks)) {
|
||||||
|
case Abort:
|
||||||
|
return errCorruptedFormat
|
||||||
|
case FormatDisks:
|
||||||
|
console.Eraseline()
|
||||||
|
printFormatMsg(storageDisks, printOnceFn())
|
||||||
|
return initFormatXL(storageDisks)
|
||||||
|
case InitObjectLayer:
|
||||||
|
console.Eraseline()
|
||||||
|
// Validate formats load before proceeding forward.
|
||||||
|
err := genericFormatCheck(formatConfigs, sErrs)
|
||||||
|
if err == nil {
|
||||||
|
printRegularMsg(storageDisks, printOnceFn())
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
case WaitForHeal:
|
||||||
|
// Validate formats load before proceeding forward.
|
||||||
|
err := genericFormatCheck(formatConfigs, sErrs)
|
||||||
|
if err == nil {
|
||||||
|
printHealMsg(firstEndpoint, storageDisks, printOnceFn())
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
case WaitForQuorum:
|
||||||
|
console.Printf(
|
||||||
|
"Initializing data volume. Waiting for minimum %d servers to come online.\n",
|
||||||
|
len(storageDisks)/2+1,
|
||||||
|
)
|
||||||
|
case WaitForAll:
|
||||||
|
console.Println("Initializing data volume for first time. Waiting for other servers to come online.")
|
||||||
|
case WaitForFormatting:
|
||||||
|
console.Println("Initializing data volume for first time. Waiting for first server to come online.")
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
} // else We have FS backend now. Check fs format as well now.
|
||||||
|
if isFormatFound(formatConfigs) {
|
||||||
|
console.Eraseline()
|
||||||
|
// Validate formats load before proceeding forward.
|
||||||
|
return genericFormatCheck(formatConfigs, sErrs)
|
||||||
|
} // else initialize the format for FS.
|
||||||
|
return initFormatFS(storageDisks[0])
|
||||||
|
} // Return here.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize storage disks based on input arguments.
|
||||||
|
func initStorageDisks(disks, ignoredDisks []string) ([]StorageAPI, error) {
|
||||||
|
// Single disk means we will use FS backend.
|
||||||
if len(disks) == 1 {
|
if len(disks) == 1 {
|
||||||
storage, err := newStorageAPI(disks[0])
|
storage, err := newStorageAPI(disks[0])
|
||||||
if err != nil && err != errDiskNotFound {
|
if err != nil && err != errDiskNotFound {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return []StorageAPI{storage}, nil
|
return []StorageAPI{storage}, nil
|
||||||
}
|
} // Otherwise proceed with XL setup.
|
||||||
|
|
||||||
// XL Setup
|
|
||||||
if err := checkSufficientDisks(disks); err != nil {
|
if err := checkSufficientDisks(disks); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
disksSet := set.NewStringSet()
|
disksSet := set.NewStringSet()
|
||||||
if len(ignoredDisks) > 0 {
|
if len(ignoredDisks) > 0 {
|
||||||
disksSet = set.CreateStringSet(ignoredDisks...)
|
disksSet = set.CreateStringSet(ignoredDisks...)
|
||||||
|
@ -223,6 +252,30 @@ func waitForFormattingDisks(disks, ignoredDisks []string) ([]StorageAPI, error)
|
||||||
}
|
}
|
||||||
storageDisks[index] = storage
|
storageDisks[index] = storage
|
||||||
}
|
}
|
||||||
// Start wait loop retrying formatting disks.
|
return storageDisks, nil
|
||||||
return retryFormattingDisks(disks, storageDisks)
|
}
|
||||||
|
|
||||||
|
// Format disks before initialization object layer.
|
||||||
|
func waitForFormatDisks(firstDisk bool, firstEndpoint string, storageDisks []StorageAPI) (err error) {
|
||||||
|
if storageDisks == nil {
|
||||||
|
return errInvalidArgument
|
||||||
|
}
|
||||||
|
// Start retry loop retrying until disks are formatted properly, until we have reached
|
||||||
|
// a conditional quorum of formatted disks.
|
||||||
|
err = retryFormattingDisks(firstDisk, firstEndpoint, storageDisks)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if firstDisk {
|
||||||
|
// Notify every one else that they can try init again.
|
||||||
|
for _, storage := range storageDisks {
|
||||||
|
switch store := storage.(type) {
|
||||||
|
// Wake up remote storage servers to initiate init again.
|
||||||
|
case networkStorage:
|
||||||
|
var reply GenericReply
|
||||||
|
_ = store.rpcClient.Call("Storage.TryInitHandler", &GenericArgs{}, &reply)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,10 +16,7 @@
|
||||||
|
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import "testing"
|
||||||
"runtime"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (action InitActions) String() string {
|
func (action InitActions) String() string {
|
||||||
switch action {
|
switch action {
|
||||||
|
@ -41,43 +38,8 @@ func (action InitActions) String() string {
|
||||||
return "Unknown"
|
return "Unknown"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func TestPrepForInit(t *testing.T) {
|
|
||||||
var disks []string
|
func TestPrepForInitXL(t *testing.T) {
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
disks = []string{
|
|
||||||
`c:\mnt\disk1`,
|
|
||||||
`c:\mnt\disk2`,
|
|
||||||
`c:\mnt\disk3`,
|
|
||||||
`c:\mnt\disk4`,
|
|
||||||
`c:\mnt\disk5`,
|
|
||||||
`c:\mnt\disk6`,
|
|
||||||
`c:\mnt\disk7`,
|
|
||||||
`c:\mnt\disk8`,
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
disks = []string{
|
|
||||||
"/mnt/disk1",
|
|
||||||
"/mnt/disk2",
|
|
||||||
"/mnt/disk3",
|
|
||||||
"/mnt/disk4",
|
|
||||||
"/mnt/disk5",
|
|
||||||
"/mnt/disk6",
|
|
||||||
"/mnt/disk7",
|
|
||||||
"/mnt/disk8",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Building up disks that resolve to localhost and remote w.r.t isLocalStorage().
|
|
||||||
var (
|
|
||||||
disksLocal []string
|
|
||||||
disksRemote []string
|
|
||||||
)
|
|
||||||
for i := range disks {
|
|
||||||
disksLocal = append(disksLocal, "localhost:"+disks[i])
|
|
||||||
}
|
|
||||||
// Using 4.4.4.4 as a known non-local address.
|
|
||||||
for i := range disks {
|
|
||||||
disksRemote = append(disksRemote, "4.4.4.4:"+disks[i])
|
|
||||||
}
|
|
||||||
// All disks are unformatted, a fresh setup.
|
// All disks are unformatted, a fresh setup.
|
||||||
allUnformatted := []error{
|
allUnformatted := []error{
|
||||||
errUnformattedDisk, errUnformattedDisk, errUnformattedDisk, errUnformattedDisk,
|
errUnformattedDisk, errUnformattedDisk, errUnformattedDisk, errUnformattedDisk,
|
||||||
|
@ -120,32 +82,32 @@ func TestPrepForInit(t *testing.T) {
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
// Params for prepForInit().
|
// Params for prepForInit().
|
||||||
disks []string
|
firstDisk bool
|
||||||
errs []error
|
errs []error
|
||||||
diskCount int
|
diskCount int
|
||||||
action InitActions
|
action InitActions
|
||||||
}{
|
}{
|
||||||
// Local disks.
|
// Local disks.
|
||||||
{disksLocal, allFormatted, 8, InitObjectLayer},
|
{true, allFormatted, 8, InitObjectLayer},
|
||||||
{disksLocal, quorumFormatted, 8, InitObjectLayer},
|
{true, quorumFormatted, 8, InitObjectLayer},
|
||||||
{disksLocal, allUnformatted, 8, FormatDisks},
|
{true, allUnformatted, 8, FormatDisks},
|
||||||
{disksLocal, quorumUnformatted, 8, WaitForAll},
|
{true, quorumUnformatted, 8, WaitForAll},
|
||||||
{disksLocal, quorumUnformattedSomeCorrupted, 8, WaitForHeal},
|
{true, quorumUnformattedSomeCorrupted, 8, Abort},
|
||||||
{disksLocal, noQuourm, 8, WaitForQuorum},
|
{true, noQuourm, 8, WaitForQuorum},
|
||||||
{disksLocal, minorityCorrupted, 8, WaitForHeal},
|
{true, minorityCorrupted, 8, WaitForHeal},
|
||||||
{disksLocal, majorityCorrupted, 8, Abort},
|
{true, majorityCorrupted, 8, Abort},
|
||||||
// Remote disks.
|
// Remote disks.
|
||||||
{disksRemote, allFormatted, 8, InitObjectLayer},
|
{false, allFormatted, 8, InitObjectLayer},
|
||||||
{disksRemote, quorumFormatted, 8, InitObjectLayer},
|
{false, quorumFormatted, 8, InitObjectLayer},
|
||||||
{disksRemote, allUnformatted, 8, WaitForFormatting},
|
{false, allUnformatted, 8, WaitForFormatting},
|
||||||
{disksRemote, quorumUnformatted, 8, WaitForAll},
|
{false, quorumUnformatted, 8, WaitForAll},
|
||||||
{disksRemote, quorumUnformattedSomeCorrupted, 8, WaitForHeal},
|
{false, quorumUnformattedSomeCorrupted, 8, Abort},
|
||||||
{disksRemote, noQuourm, 8, WaitForQuorum},
|
{false, noQuourm, 8, WaitForQuorum},
|
||||||
{disksRemote, minorityCorrupted, 8, WaitForHeal},
|
{false, minorityCorrupted, 8, WaitForHeal},
|
||||||
{disksRemote, majorityCorrupted, 8, Abort},
|
{false, majorityCorrupted, 8, Abort},
|
||||||
}
|
}
|
||||||
for i, test := range testCases {
|
for i, test := range testCases {
|
||||||
actual := prepForInit(test.disks, test.errs, test.diskCount)
|
actual := prepForInitXL(test.firstDisk, test.errs, test.diskCount)
|
||||||
if actual != test.action {
|
if actual != test.action {
|
||||||
t.Errorf("Test %d expected %s but receieved %s\n", i+1, test.action, actual)
|
t.Errorf("Test %d expected %s but receieved %s\n", i+1, test.action, actual)
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,109 @@
|
||||||
|
/*
|
||||||
|
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// lockedRandSource provides protected rand source, implements rand.Source interface.
|
||||||
|
type lockedRandSource struct {
|
||||||
|
lk sync.Mutex
|
||||||
|
src rand.Source
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int63 returns a non-negative pseudo-random 63-bit integer as an
|
||||||
|
// int64.
|
||||||
|
func (r *lockedRandSource) Int63() (n int64) {
|
||||||
|
r.lk.Lock()
|
||||||
|
n = r.src.Int63()
|
||||||
|
r.lk.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seed uses the provided seed value to initialize the generator to a
|
||||||
|
// deterministic state.
|
||||||
|
func (r *lockedRandSource) Seed(seed int64) {
|
||||||
|
r.lk.Lock()
|
||||||
|
r.src.Seed(seed)
|
||||||
|
r.lk.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxRetry is the maximum number of retries before stopping.
|
||||||
|
var MaxRetry = 5
|
||||||
|
|
||||||
|
// MaxJitter will randomize over the full exponential backoff time
|
||||||
|
const MaxJitter = 1.0
|
||||||
|
|
||||||
|
// NoJitter disables the use of jitter for randomizing the exponential backoff time
|
||||||
|
const NoJitter = 0.0
|
||||||
|
|
||||||
|
// Global random source for fetching random values.
|
||||||
|
var globalRandomSource = rand.New(&lockedRandSource{
|
||||||
|
src: rand.NewSource(time.Now().UTC().UnixNano()),
|
||||||
|
})
|
||||||
|
|
||||||
|
// newRetryTimer creates a timer with exponentially increasing delays
|
||||||
|
// until the maximum retry attempts are reached.
|
||||||
|
func newRetryTimer(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan struct{} {
|
||||||
|
attemptCh := make(chan struct{})
|
||||||
|
|
||||||
|
// computes the exponential backoff duration according to
|
||||||
|
// https://www.awsarchitectureblog.com/2015/03/backoff.html
|
||||||
|
exponentialBackoffWait := func(attempt int) time.Duration {
|
||||||
|
// normalize jitter to the range [0, 1.0]
|
||||||
|
if jitter < NoJitter {
|
||||||
|
jitter = NoJitter
|
||||||
|
}
|
||||||
|
if jitter > MaxJitter {
|
||||||
|
jitter = MaxJitter
|
||||||
|
}
|
||||||
|
|
||||||
|
//sleep = random_between(0, min(cap, base * 2 ** attempt))
|
||||||
|
sleep := unit * time.Duration(1<<uint(attempt))
|
||||||
|
if sleep > cap {
|
||||||
|
sleep = cap
|
||||||
|
}
|
||||||
|
if jitter != NoJitter {
|
||||||
|
sleep -= time.Duration(globalRandomSource.Float64() * float64(sleep) * jitter)
|
||||||
|
}
|
||||||
|
return sleep
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(attemptCh)
|
||||||
|
var nextBackoff int
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
// Attempts starts.
|
||||||
|
case attemptCh <- struct{}{}:
|
||||||
|
nextBackoff++
|
||||||
|
case <-globalWakeupCh:
|
||||||
|
// Reset nextBackoff to reduce the subsequent wait and re-read
|
||||||
|
// format.json from all disks again.
|
||||||
|
nextBackoff = 0
|
||||||
|
case <-doneCh:
|
||||||
|
// Stop the routine.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
time.Sleep(exponentialBackoffWait(nextBackoff))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return attemptCh
|
||||||
|
}
|
|
@ -31,15 +31,15 @@ func newObjectLayerFn() ObjectLayer {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newObjectLayer - initialize any object layer depending on the number of disks.
|
// newObjectLayer - initialize any object layer depending on the number of disks.
|
||||||
func newObjectLayer(disks, ignoredDisks []string) (ObjectLayer, error) {
|
func newObjectLayer(storageDisks []StorageAPI) (ObjectLayer, error) {
|
||||||
var objAPI ObjectLayer
|
var objAPI ObjectLayer
|
||||||
var err error
|
var err error
|
||||||
if len(disks) == 1 {
|
if len(storageDisks) == 1 {
|
||||||
// Initialize FS object layer.
|
// Initialize FS object layer.
|
||||||
objAPI, err = newFSObjects(disks[0])
|
objAPI, err = newFSObjects(storageDisks[0])
|
||||||
} else {
|
} else {
|
||||||
// Initialize XL object layer.
|
// Initialize XL object layer.
|
||||||
objAPI, err = newXLObjects(disks, ignoredDisks)
|
objAPI, err = newXLObjects(storageDisks)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -58,20 +58,18 @@ func newObjectLayer(disks, ignoredDisks []string) (ObjectLayer, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register the callback that should be called when the process shuts down.
|
if globalShutdownCBs != nil {
|
||||||
globalShutdownCBs.AddObjectLayerCB(func() errCode {
|
// Register the callback that should be called when the process shuts down.
|
||||||
if objAPI != nil {
|
globalShutdownCBs.AddObjectLayerCB(func() errCode {
|
||||||
if sErr := objAPI.Shutdown(); sErr != nil {
|
if objAPI != nil {
|
||||||
errorIf(err, "Unable to shutdown object API.")
|
if sErr := objAPI.Shutdown(); sErr != nil {
|
||||||
return exitFailure
|
errorIf(err, "Unable to shutdown object API.")
|
||||||
|
return exitFailure
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
return exitSuccess
|
||||||
return exitSuccess
|
})
|
||||||
})
|
}
|
||||||
|
|
||||||
// Initialize a new event notifier.
|
|
||||||
err = initEventNotifier(objAPI)
|
|
||||||
fatalIf(err, "Unable to initialize event notification.")
|
|
||||||
|
|
||||||
// Initialize and load bucket policies.
|
// Initialize and load bucket policies.
|
||||||
err = initBucketPolicies(objAPI)
|
err = initBucketPolicies(objAPI)
|
||||||
|
@ -83,45 +81,28 @@ func newObjectLayer(disks, ignoredDisks []string) (ObjectLayer, error) {
|
||||||
|
|
||||||
// configureServer handler returns final handler for the http server.
|
// configureServer handler returns final handler for the http server.
|
||||||
func configureServerHandler(srvCmdConfig serverCmdConfig) http.Handler {
|
func configureServerHandler(srvCmdConfig serverCmdConfig) http.Handler {
|
||||||
// Initialize storage rpc servers for every disk that is hosted on this node.
|
|
||||||
storageRPCs, err := newRPCServer(srvCmdConfig)
|
|
||||||
fatalIf(err, "Unable to initialize storage RPC server.")
|
|
||||||
|
|
||||||
// Initialize API.
|
|
||||||
apiHandlers := objectAPIHandlers{
|
|
||||||
ObjectAPI: newObjectLayerFn,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize Web.
|
|
||||||
webHandlers := &webAPIHandlers{
|
|
||||||
ObjectAPI: newObjectLayerFn,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize Controller.
|
|
||||||
controllerHandlers := &controllerAPIHandlers{
|
|
||||||
ObjectAPI: newObjectLayerFn,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize router.
|
// Initialize router.
|
||||||
mux := router.NewRouter()
|
mux := router.NewRouter()
|
||||||
|
|
||||||
// Register all routers.
|
// Register storage rpc router.
|
||||||
registerStorageRPCRouters(mux, storageRPCs)
|
registerStorageRPCRouters(mux, srvCmdConfig)
|
||||||
|
|
||||||
// Initialize distributed NS lock.
|
// Initialize distributed NS lock.
|
||||||
initDistributedNSLock(mux, srvCmdConfig)
|
if isDistributedSetup(srvCmdConfig.disks) {
|
||||||
|
registerDistNSLockRouter(mux, srvCmdConfig)
|
||||||
|
}
|
||||||
|
|
||||||
// Register controller rpc router.
|
// Register controller rpc router.
|
||||||
registerControllerRPCRouter(mux, controllerHandlers)
|
registerControllerRPCRouter(mux, srvCmdConfig)
|
||||||
|
|
||||||
// set environmental variable MINIO_BROWSER=off to disable minio web browser.
|
// set environmental variable MINIO_BROWSER=off to disable minio web browser.
|
||||||
// By default minio web browser is enabled.
|
// By default minio web browser is enabled.
|
||||||
if !strings.EqualFold(os.Getenv("MINIO_BROWSER"), "off") {
|
if !strings.EqualFold(os.Getenv("MINIO_BROWSER"), "off") {
|
||||||
registerWebRouter(mux, webHandlers)
|
registerWebRouter(mux)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add new routers here.
|
// Add API router.
|
||||||
registerAPIRouter(mux, apiHandlers)
|
registerAPIRouter(mux)
|
||||||
|
|
||||||
// List of some generic handlers which are applied for all incoming requests.
|
// List of some generic handlers which are applied for all incoming requests.
|
||||||
var handlerFns = []HandlerFunc{
|
var handlerFns = []HandlerFunc{
|
||||||
|
|
|
@ -28,8 +28,6 @@ import (
|
||||||
"github.com/minio/cli"
|
"github.com/minio/cli"
|
||||||
)
|
)
|
||||||
|
|
||||||
var srvConfig serverCmdConfig
|
|
||||||
|
|
||||||
var serverFlags = []cli.Flag{
|
var serverFlags = []cli.Flag{
|
||||||
cli.StringFlag{
|
cli.StringFlag{
|
||||||
Name: "address",
|
Name: "address",
|
||||||
|
@ -65,6 +63,9 @@ ENVIRONMENT VARIABLES:
|
||||||
MINIO_CACHE_SIZE: Set total cache size in NN[GB|MB|KB]. Defaults to 8GB.
|
MINIO_CACHE_SIZE: Set total cache size in NN[GB|MB|KB]. Defaults to 8GB.
|
||||||
MINIO_CACHE_EXPIRY: Set cache expiration duration in NN[h|m|s]. Defaults to 72 hours.
|
MINIO_CACHE_EXPIRY: Set cache expiration duration in NN[h|m|s]. Defaults to 72 hours.
|
||||||
|
|
||||||
|
SECURITY:
|
||||||
|
MINIO_SECURE_CONSOLE: Set secure console to '0' to disable printing secret key. Defaults to '1'.
|
||||||
|
|
||||||
EXAMPLES:
|
EXAMPLES:
|
||||||
1. Start minio server.
|
1. Start minio server.
|
||||||
$ minio {{.Name}} /home/shared
|
$ minio {{.Name}} /home/shared
|
||||||
|
@ -98,6 +99,7 @@ type serverCmdConfig struct {
|
||||||
serverAddr string
|
serverAddr string
|
||||||
disks []string
|
disks []string
|
||||||
ignoredDisks []string
|
ignoredDisks []string
|
||||||
|
storageDisks []StorageAPI
|
||||||
}
|
}
|
||||||
|
|
||||||
// getListenIPs - gets all the ips to listen on.
|
// getListenIPs - gets all the ips to listen on.
|
||||||
|
@ -241,13 +243,21 @@ func checkNamingDisks(disks []string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check server arguments.
|
func validateRemoteDisks(disks []StorageAPI) error {
|
||||||
func checkServerSyntax(c *cli.Context) {
|
for _, disk := range disks {
|
||||||
if !c.Args().Present() || c.Args().First() == "help" {
|
_, err := disk.DiskInfo()
|
||||||
cli.ShowCommandHelpAndExit(c, "server", 1)
|
if _, ok := err.(*net.OpError); ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
disks := c.Args()
|
return nil
|
||||||
if len(disks) > 1 {
|
}
|
||||||
|
|
||||||
|
// Validate input disks.
|
||||||
|
func validateDisks(disks []string, ignoredDisks []string) []StorageAPI {
|
||||||
|
isXL := len(disks) > 1
|
||||||
|
if isXL {
|
||||||
// Validate if input disks have duplicates in them.
|
// Validate if input disks have duplicates in them.
|
||||||
err := checkDuplicates(disks)
|
err := checkDuplicates(disks)
|
||||||
fatalIf(err, "Invalid disk arguments for server.")
|
fatalIf(err, "Invalid disk arguments for server.")
|
||||||
|
@ -262,6 +272,13 @@ func checkServerSyntax(c *cli.Context) {
|
||||||
err = checkNamingDisks(disks)
|
err = checkNamingDisks(disks)
|
||||||
fatalIf(err, "Invalid disk arguments for server.")
|
fatalIf(err, "Invalid disk arguments for server.")
|
||||||
}
|
}
|
||||||
|
storageDisks, err := initStorageDisks(disks, ignoredDisks)
|
||||||
|
fatalIf(err, "Unable to initialize storage disks.")
|
||||||
|
if isXL {
|
||||||
|
err = validateRemoteDisks(storageDisks)
|
||||||
|
fatalIf(err, "Unable to validate remote disks.")
|
||||||
|
}
|
||||||
|
return storageDisks
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extract port number from address address should be of the form host:port.
|
// Extract port number from address address should be of the form host:port.
|
||||||
|
@ -296,57 +313,21 @@ func isDistributedSetup(disks []string) (isDist bool) {
|
||||||
return isDist
|
return isDist
|
||||||
}
|
}
|
||||||
|
|
||||||
// Format disks before initialization object layer.
|
|
||||||
func formatDisks(disks, ignoredDisks []string) error {
|
|
||||||
storageDisks, err := waitForFormattingDisks(disks, ignoredDisks)
|
|
||||||
for _, storage := range storageDisks {
|
|
||||||
if storage == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
switch store := storage.(type) {
|
|
||||||
// Closing associated TCP connections since
|
|
||||||
// []StorageAPI is garbage collected eventually.
|
|
||||||
case networkStorage:
|
|
||||||
store.rpcClient.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if isLocalStorage(disks[0]) {
|
|
||||||
// notify every one else that they can try init again.
|
|
||||||
for _, storage := range storageDisks {
|
|
||||||
switch store := storage.(type) {
|
|
||||||
// Closing associated TCP connections since
|
|
||||||
// []StorageAPI is garbage collected
|
|
||||||
// eventually.
|
|
||||||
case networkStorage:
|
|
||||||
var reply GenericReply
|
|
||||||
_ = store.rpcClient.Call("Storage.TryInitHandler", &GenericArgs{}, &reply)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// serverMain handler called for 'minio server' command.
|
// serverMain handler called for 'minio server' command.
|
||||||
func serverMain(c *cli.Context) {
|
func serverMain(c *cli.Context) {
|
||||||
// Check 'server' cli arguments.
|
if !c.Args().Present() || c.Args().First() == "help" {
|
||||||
checkServerSyntax(c)
|
cli.ShowCommandHelpAndExit(c, "server", 1)
|
||||||
|
}
|
||||||
// Initialize server config.
|
|
||||||
initServerConfig(c)
|
|
||||||
|
|
||||||
// If https.
|
|
||||||
tls := isSSL()
|
|
||||||
|
|
||||||
// Server address.
|
// Server address.
|
||||||
serverAddress := c.String("address")
|
serverAddress := c.String("address")
|
||||||
|
|
||||||
// Check if requested port is available.
|
// Check if requested port is available.
|
||||||
port := getPort(serverAddress)
|
port := getPort(serverAddress)
|
||||||
err := checkPortAvailability(port)
|
fatalIf(checkPortAvailability(port), "Port unavailable %d", port)
|
||||||
fatalIf(err, "Port unavailable %d", port)
|
|
||||||
|
// Saves port in a globally accessible value.
|
||||||
|
globalMinioPort = port
|
||||||
|
|
||||||
// Disks to be ignored in server init, to skip format healing.
|
// Disks to be ignored in server init, to skip format healing.
|
||||||
ignoredDisks := strings.Split(c.String("ignore-disks"), ",")
|
ignoredDisks := strings.Split(c.String("ignore-disks"), ",")
|
||||||
|
@ -354,8 +335,35 @@ func serverMain(c *cli.Context) {
|
||||||
// Disks to be used in server init.
|
// Disks to be used in server init.
|
||||||
disks := c.Args()
|
disks := c.Args()
|
||||||
|
|
||||||
isDist := isDistributedSetup(disks)
|
// Check 'server' cli arguments.
|
||||||
|
storageDisks := validateDisks(disks, ignoredDisks)
|
||||||
|
|
||||||
|
// Initialize server config.
|
||||||
|
initServerConfig(c)
|
||||||
|
|
||||||
|
// If https.
|
||||||
|
tls := isSSL()
|
||||||
|
|
||||||
|
// First disk argument check if it is local.
|
||||||
|
firstDisk := isLocalStorage(disks[0])
|
||||||
|
|
||||||
|
// Initialize and monitor shutdown signals.
|
||||||
|
err := initGracefulShutdown(os.Exit)
|
||||||
|
fatalIf(err, "Unable to initialize graceful shutdown operation")
|
||||||
|
|
||||||
|
// Configure server.
|
||||||
|
srvConfig := serverCmdConfig{
|
||||||
|
serverAddr: serverAddress,
|
||||||
|
disks: disks,
|
||||||
|
ignoredDisks: ignoredDisks,
|
||||||
|
storageDisks: storageDisks,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Configure server.
|
||||||
|
handler := configureServerHandler(srvConfig)
|
||||||
|
|
||||||
// Set nodes for dsync for distributed setup.
|
// Set nodes for dsync for distributed setup.
|
||||||
|
isDist := isDistributedSetup(disks)
|
||||||
if isDist {
|
if isDist {
|
||||||
err = initDsyncNodes(disks, port)
|
err = initDsyncNodes(disks, port)
|
||||||
fatalIf(err, "Unable to initialize distributed locking")
|
fatalIf(err, "Unable to initialize distributed locking")
|
||||||
|
@ -364,20 +372,7 @@ func serverMain(c *cli.Context) {
|
||||||
// Initialize name space lock.
|
// Initialize name space lock.
|
||||||
initNSLock(isDist)
|
initNSLock(isDist)
|
||||||
|
|
||||||
// Configure server.
|
// Initialize a new HTTP server.
|
||||||
srvConfig = serverCmdConfig{
|
|
||||||
serverAddr: serverAddress,
|
|
||||||
disks: disks,
|
|
||||||
ignoredDisks: ignoredDisks,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize and monitor shutdown signals.
|
|
||||||
err = initGracefulShutdown(os.Exit)
|
|
||||||
fatalIf(err, "Unable to initialize graceful shutdown operation")
|
|
||||||
|
|
||||||
// Configure server.
|
|
||||||
handler := configureServerHandler(srvConfig)
|
|
||||||
|
|
||||||
apiServer := NewServerMux(serverAddress, handler)
|
apiServer := NewServerMux(serverAddress, handler)
|
||||||
|
|
||||||
// Fetch endpoints which we are going to serve from.
|
// Fetch endpoints which we are going to serve from.
|
||||||
|
@ -405,28 +400,24 @@ func serverMain(c *cli.Context) {
|
||||||
}(tls, wait)
|
}(tls, wait)
|
||||||
|
|
||||||
// Wait for formatting of disks.
|
// Wait for formatting of disks.
|
||||||
err = formatDisks(disks, ignoredDisks)
|
err = waitForFormatDisks(firstDisk, endPoints[0], storageDisks)
|
||||||
if err != nil {
|
fatalIf(err, "formatting storage disks failed")
|
||||||
// FIXME: call graceful exit
|
|
||||||
errorIf(err, "formatting storage disks failed")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Once formatted, initialize object layer.
|
// Once formatted, initialize object layer.
|
||||||
newObject, err := newObjectLayer(disks, ignoredDisks)
|
newObject, err := newObjectLayer(storageDisks)
|
||||||
if err != nil {
|
fatalIf(err, "intializing object layer failed")
|
||||||
// FIXME: call graceful exit
|
|
||||||
errorIf(err, "intializing object layer failed")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prints the formatted startup message.
|
|
||||||
printStartupMessage(endPoints)
|
|
||||||
|
|
||||||
objLayerMutex.Lock()
|
objLayerMutex.Lock()
|
||||||
globalObjectAPI = newObject
|
globalObjectAPI = newObject
|
||||||
objLayerMutex.Unlock()
|
objLayerMutex.Unlock()
|
||||||
|
|
||||||
|
// Initialize a new event notifier.
|
||||||
|
err = initEventNotifier(newObjectLayerFn())
|
||||||
|
fatalIf(err, "Unable to initialize event notification.")
|
||||||
|
|
||||||
|
// Prints the formatted startup message once object layer is initialized.
|
||||||
|
printStartupMessage(endPoints)
|
||||||
|
|
||||||
// Waits on the server.
|
// Waits on the server.
|
||||||
<-wait
|
<-wait
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,6 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/minio/cli"
|
"github.com/minio/cli"
|
||||||
|
@ -167,11 +166,11 @@ func TestCheckServerSyntax(t *testing.T) {
|
||||||
app := cli.NewApp()
|
app := cli.NewApp()
|
||||||
app.Commands = []cli.Command{serverCmd}
|
app.Commands = []cli.Command{serverCmd}
|
||||||
serverFlagSet := flag.NewFlagSet("server", 0)
|
serverFlagSet := flag.NewFlagSet("server", 0)
|
||||||
ctx := cli.NewContext(app, serverFlagSet, nil)
|
cli.NewContext(app, serverFlagSet, nil)
|
||||||
disksGen := func(n int) []string {
|
disksGen := func(n int) []string {
|
||||||
var disks []string
|
disks, err := getRandomDisks(n)
|
||||||
for i := 0; i < n; i++ {
|
if err != nil {
|
||||||
disks = append(disks, "disk"+strconv.Itoa(i))
|
t.Fatalf("Unable to initialie disks %s", err)
|
||||||
}
|
}
|
||||||
return disks
|
return disks
|
||||||
}
|
}
|
||||||
|
@ -181,12 +180,13 @@ func TestCheckServerSyntax(t *testing.T) {
|
||||||
disksGen(8),
|
disksGen(8),
|
||||||
disksGen(16),
|
disksGen(16),
|
||||||
}
|
}
|
||||||
for i, test := range testCases {
|
for i, disks := range testCases {
|
||||||
err := serverFlagSet.Parse(test)
|
err := serverFlagSet.Parse(disks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Test %d failed to parse arguments %s", i+1, test)
|
t.Errorf("Test %d failed to parse arguments %s", i+1, disks)
|
||||||
}
|
}
|
||||||
checkServerSyntax(ctx)
|
defer removeRoots(disks)
|
||||||
|
_ = validateDisks(disks, nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,9 +18,11 @@ package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
humanize "github.com/dustin/go-humanize"
|
||||||
"github.com/minio/mc/pkg/console"
|
"github.com/minio/mc/pkg/console"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -44,6 +46,7 @@ func printStartupMessage(endPoints []string) {
|
||||||
printServerCommonMsg(endPoints)
|
printServerCommonMsg(endPoints)
|
||||||
printCLIAccessMsg(endPoints[0])
|
printCLIAccessMsg(endPoints[0])
|
||||||
printObjectAPIMsg()
|
printObjectAPIMsg()
|
||||||
|
printStorageInfo()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prints common server startup message. Prints credential, region and browser access.
|
// Prints common server startup message. Prints credential, region and browser access.
|
||||||
|
@ -58,7 +61,11 @@ func printServerCommonMsg(endPoints []string) {
|
||||||
// Colorize the message and print.
|
// Colorize the message and print.
|
||||||
console.Println(colorBlue("\nEndpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(endPointStr), 1), endPointStr)))
|
console.Println(colorBlue("\nEndpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(endPointStr), 1), endPointStr)))
|
||||||
console.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKeyID)))
|
console.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKeyID)))
|
||||||
console.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", cred.SecretAccessKey)))
|
secretKey := cred.SecretAccessKey
|
||||||
|
if os.Getenv("MINIO_SECURE_CONSOLE") == "0" {
|
||||||
|
secretKey = "*REDACTED*"
|
||||||
|
}
|
||||||
|
console.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", secretKey)))
|
||||||
console.Println(colorBlue("Region: ") + colorBold(fmt.Sprintf(getFormatStr(len(region), 3), region)))
|
console.Println(colorBlue("Region: ") + colorBold(fmt.Sprintf(getFormatStr(len(region), 3), region)))
|
||||||
printEventNotifiers()
|
printEventNotifiers()
|
||||||
|
|
||||||
|
@ -90,11 +97,15 @@ func printCLIAccessMsg(endPoint string) {
|
||||||
|
|
||||||
// Configure 'mc', following block prints platform specific information for minio client.
|
// Configure 'mc', following block prints platform specific information for minio client.
|
||||||
console.Println(colorBlue("\nCommand-line Access: ") + mcQuickStartGuide)
|
console.Println(colorBlue("\nCommand-line Access: ") + mcQuickStartGuide)
|
||||||
|
secretKey := cred.SecretAccessKey
|
||||||
|
if os.Getenv("MINIO_SECURE_CONSOLE") == "0" {
|
||||||
|
secretKey = "*REDACTED*"
|
||||||
|
}
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
mcMessage := fmt.Sprintf("$ mc.exe config host add myminio %s %s %s", endPoint, cred.AccessKeyID, cred.SecretAccessKey)
|
mcMessage := fmt.Sprintf("$ mc.exe config host add myminio %s %s %s", endPoint, cred.AccessKeyID, secretKey)
|
||||||
console.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
|
console.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
|
||||||
} else {
|
} else {
|
||||||
mcMessage := fmt.Sprintf("$ mc config host add myminio %s %s %s", endPoint, cred.AccessKeyID, cred.SecretAccessKey)
|
mcMessage := fmt.Sprintf("$ mc config host add myminio %s %s %s", endPoint, cred.AccessKeyID, secretKey)
|
||||||
console.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
|
console.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -107,3 +118,24 @@ func printObjectAPIMsg() {
|
||||||
console.Println(colorBlue(" Python: ") + fmt.Sprintf(getFormatStr(len(pyQuickStartGuide), 4), pyQuickStartGuide))
|
console.Println(colorBlue(" Python: ") + fmt.Sprintf(getFormatStr(len(pyQuickStartGuide), 4), pyQuickStartGuide))
|
||||||
console.Println(colorBlue(" JavaScript: ") + jsQuickStartGuide)
|
console.Println(colorBlue(" JavaScript: ") + jsQuickStartGuide)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get formatted disk/storage info message.
|
||||||
|
func getStorageInfoMsg() string {
|
||||||
|
storageInfo := newObjectLayerFn().StorageInfo()
|
||||||
|
msg := fmt.Sprintf("%s %s Free", colorBlue("Drive Capacity:"), humanize.IBytes(uint64(storageInfo.Free)))
|
||||||
|
diskInfo := fmt.Sprintf(" %d Online, %d Offline. We can withstand [%d] more drive failure(s).",
|
||||||
|
storageInfo.Backend.OnlineDisks,
|
||||||
|
storageInfo.Backend.OfflineDisks,
|
||||||
|
storageInfo.Backend.Quorum,
|
||||||
|
)
|
||||||
|
if storageInfo.Backend.Type == XL {
|
||||||
|
msg += colorBlue("\nStatus:") + fmt.Sprintf(getFormatStr(len(diskInfo), 8), diskInfo)
|
||||||
|
}
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prints startup message of storage capacity and erasure information.
|
||||||
|
func printStorageInfo() {
|
||||||
|
console.Println()
|
||||||
|
console.Println(getStorageInfoMsg())
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,34 @@
|
||||||
|
/*
|
||||||
|
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
// Tests if we generate storage info.
|
||||||
|
func TestStorageInfoMsg(t *testing.T) {
|
||||||
|
obj, _, err := prepareXL()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unable to initialize XL backend", err)
|
||||||
|
}
|
||||||
|
objLayerMutex.Lock()
|
||||||
|
globalObjectAPI = obj
|
||||||
|
objLayerMutex.Unlock()
|
||||||
|
|
||||||
|
if msg := getStorageInfoMsg(); msg == "" {
|
||||||
|
t.Fatal("Empty message string is not implemented")
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,56 +0,0 @@
|
||||||
/*
|
|
||||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"net/url"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Replaces any occurring '/' in string, into its encoded representation.
|
|
||||||
func percentEncodeSlash(s string) string {
|
|
||||||
return strings.Replace(s, "/", "%2F", -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// queryEncode - encodes query values in their URL encoded form. In
|
|
||||||
// addition to the percent encoding performed by getURLEncodedName() used
|
|
||||||
// here, it also percent encodes '/' (forward slash)
|
|
||||||
func queryEncode(v url.Values) string {
|
|
||||||
if v == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
var buf bytes.Buffer
|
|
||||||
keys := make([]string, 0, len(v))
|
|
||||||
for k := range v {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
sort.Strings(keys)
|
|
||||||
for _, k := range keys {
|
|
||||||
vs := v[k]
|
|
||||||
prefix := percentEncodeSlash(getURLEncodedName(k)) + "="
|
|
||||||
for _, v := range vs {
|
|
||||||
if buf.Len() > 0 {
|
|
||||||
buf.WriteByte('&')
|
|
||||||
}
|
|
||||||
buf.WriteString(prefix)
|
|
||||||
buf.WriteString(percentEncodeSlash(getURLEncodedName(v)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
|
@ -22,59 +22,6 @@ func TestResourceListSorting(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests validate the query encoding.
|
|
||||||
func TestQueryEncode(t *testing.T) {
|
|
||||||
testCases := []struct {
|
|
||||||
// Input.
|
|
||||||
input url.Values
|
|
||||||
// Expected result.
|
|
||||||
result string
|
|
||||||
}{
|
|
||||||
// % should be encoded as %25
|
|
||||||
{url.Values{
|
|
||||||
"key": []string{"thisisthe%url"},
|
|
||||||
}, "key=thisisthe%25url"},
|
|
||||||
// UTF-8 encoding.
|
|
||||||
{url.Values{
|
|
||||||
"key": []string{"本語"},
|
|
||||||
}, "key=%E6%9C%AC%E8%AA%9E"},
|
|
||||||
// UTF-8 encoding with ASCII.
|
|
||||||
{url.Values{
|
|
||||||
"key": []string{"本語.1"},
|
|
||||||
}, "key=%E6%9C%AC%E8%AA%9E.1"},
|
|
||||||
// Unusual ASCII characters.
|
|
||||||
{url.Values{
|
|
||||||
"key": []string{">123"},
|
|
||||||
}, "key=%3E123"},
|
|
||||||
// Fragment path characters.
|
|
||||||
{url.Values{
|
|
||||||
"key": []string{"myurl#link"},
|
|
||||||
}, "key=myurl%23link"},
|
|
||||||
// Space should be set to %20 not '+'.
|
|
||||||
{url.Values{
|
|
||||||
"key": []string{"space in url"},
|
|
||||||
}, "key=space%20in%20url"},
|
|
||||||
// '+' shouldn't be treated as space.
|
|
||||||
{url.Values{
|
|
||||||
"key": []string{"url+path"},
|
|
||||||
}, "key=url%2Bpath"},
|
|
||||||
// '/' shouldn't be treated as '/' should be percent coded.
|
|
||||||
{url.Values{
|
|
||||||
"key": []string{"url/+path"},
|
|
||||||
}, "key=url%2F%2Bpath"},
|
|
||||||
// Values is empty and empty string.
|
|
||||||
{nil, ""},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests generated values from url encoded name.
|
|
||||||
for i, testCase := range testCases {
|
|
||||||
result := queryEncode(testCase.input)
|
|
||||||
if testCase.result != result {
|
|
||||||
t.Errorf("Test %d: Expected queryEncoded result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDoesPresignedV2SignatureMatch(t *testing.T) {
|
func TestDoesPresignedV2SignatureMatch(t *testing.T) {
|
||||||
root, err := newTestConfig("us-east-1")
|
root, err := newTestConfig("us-east-1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -1,105 +0,0 @@
|
||||||
/*
|
|
||||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"hash"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/minio/sha256-simd"
|
|
||||||
)
|
|
||||||
|
|
||||||
// signVerifyReader represents an io.Reader compatible interface which
|
|
||||||
// transparently calculates sha256, caller should call `Verify()` to
|
|
||||||
// validate the signature header.
|
|
||||||
type signVerifyReader struct {
|
|
||||||
Request *http.Request // HTTP request to be validated and read.
|
|
||||||
HashWriter hash.Hash // sha256 hash writer.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initializes a new signature verify reader.
|
|
||||||
func newSignVerify(req *http.Request) *signVerifyReader {
|
|
||||||
return &signVerifyReader{
|
|
||||||
Request: req, // Save the request.
|
|
||||||
HashWriter: sha256.New(), // Inititalize sha256.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// isSignVerify - is given reader a `signVerifyReader`.
|
|
||||||
func isSignVerify(reader io.Reader) bool {
|
|
||||||
_, ok := reader.(*signVerifyReader)
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify - verifies signature and returns error upon signature mismatch.
|
|
||||||
func (v *signVerifyReader) Verify() error {
|
|
||||||
region := serverConfig.GetRegion()
|
|
||||||
shaPayloadHex := hex.EncodeToString(v.HashWriter.Sum(nil))
|
|
||||||
if skipContentSha256Cksum(v.Request) {
|
|
||||||
// Sets 'UNSIGNED-PAYLOAD' if client requested to not calculated sha256.
|
|
||||||
shaPayloadHex = unsignedPayload
|
|
||||||
}
|
|
||||||
// Signature verification block.
|
|
||||||
var s3Error APIErrorCode
|
|
||||||
if isRequestSignatureV4(v.Request) {
|
|
||||||
s3Error = doesSignatureMatch(shaPayloadHex, v.Request, region)
|
|
||||||
} else if isRequestPresignedSignatureV4(v.Request) {
|
|
||||||
s3Error = doesPresignedSignatureMatch(shaPayloadHex, v.Request, region)
|
|
||||||
} else {
|
|
||||||
// Couldn't figure out the request type, set the error as AccessDenied.
|
|
||||||
s3Error = ErrAccessDenied
|
|
||||||
}
|
|
||||||
// Set signature error as 'errSignatureMismatch' if possible.
|
|
||||||
var sErr error
|
|
||||||
// Validate if we have received signature mismatch or sha256 mismatch.
|
|
||||||
if s3Error != ErrNone {
|
|
||||||
switch s3Error {
|
|
||||||
case ErrContentSHA256Mismatch:
|
|
||||||
sErr = errContentSHA256Mismatch
|
|
||||||
case ErrSignatureDoesNotMatch:
|
|
||||||
sErr = errSignatureMismatch
|
|
||||||
default:
|
|
||||||
sErr = fmt.Errorf("%v", getAPIError(s3Error))
|
|
||||||
}
|
|
||||||
return sErr
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reads from request body and writes to hash writer. All reads performed
|
|
||||||
// through it are matched with corresponding writes to hash writer. There is
|
|
||||||
// no internal buffering the write must complete before the read completes.
|
|
||||||
// Any error encountered while writing is reported as a read error. As a
|
|
||||||
// special case `Read()` skips writing to hash writer if the client requested
|
|
||||||
// for the payload to be skipped.
|
|
||||||
func (v *signVerifyReader) Read(b []byte) (n int, err error) {
|
|
||||||
n, err = v.Request.Body.Read(b)
|
|
||||||
if n > 0 {
|
|
||||||
// Skip calculating the hash.
|
|
||||||
if skipContentSha256Cksum(v.Request) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Stagger all reads to its corresponding writes to hash writer.
|
|
||||||
if n, err = v.HashWriter.Write(b[:n]); err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
|
@ -59,6 +59,3 @@ var errVolumeAccessDenied = errors.New("volume access denied")
|
||||||
|
|
||||||
// errVolumeAccessDenied - cannot access file, insufficient permissions.
|
// errVolumeAccessDenied - cannot access file, insufficient permissions.
|
||||||
var errFileAccessDenied = errors.New("file access denied")
|
var errFileAccessDenied = errors.New("file access denied")
|
||||||
|
|
||||||
// errVolumeBusy - remote disk is not connected to yet.
|
|
||||||
var errVolumeBusy = errors.New("volume is busy")
|
|
||||||
|
|
|
@ -20,6 +20,9 @@ import "github.com/minio/minio/pkg/disk"
|
||||||
|
|
||||||
// StorageAPI interface.
|
// StorageAPI interface.
|
||||||
type StorageAPI interface {
|
type StorageAPI interface {
|
||||||
|
// Stringified version of disk.
|
||||||
|
String() string
|
||||||
|
|
||||||
// Storage operations.
|
// Storage operations.
|
||||||
DiskInfo() (info disk.Info, err error)
|
DiskInfo() (info disk.Info, err error)
|
||||||
|
|
||||||
|
|
|
@ -100,8 +100,7 @@ func newRPCClient(networkPath string) (StorageAPI, error) {
|
||||||
|
|
||||||
// Dial minio rpc storage http path.
|
// Dial minio rpc storage http path.
|
||||||
rpcPath := path.Join(storageRPCPath, netPath)
|
rpcPath := path.Join(storageRPCPath, netPath)
|
||||||
port := getPort(srvConfig.serverAddr)
|
rpcAddr := netAddr + ":" + strconv.Itoa(globalMinioPort)
|
||||||
rpcAddr := netAddr + ":" + strconv.Itoa(port)
|
|
||||||
// Initialize rpc client with network address and rpc path.
|
// Initialize rpc client with network address and rpc path.
|
||||||
cred := serverConfig.GetCredential()
|
cred := serverConfig.GetCredential()
|
||||||
rpcClient := newAuthClient(&authConfig{
|
rpcClient := newAuthClient(&authConfig{
|
||||||
|
@ -123,6 +122,11 @@ func newRPCClient(networkPath string) (StorageAPI, error) {
|
||||||
return ndisk, nil
|
return ndisk, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Stringer interface compatible representation of network device.
|
||||||
|
func (n networkStorage) String() string {
|
||||||
|
return n.netAddr + ":" + n.netPath
|
||||||
|
}
|
||||||
|
|
||||||
// DiskInfo - fetch disk information for a remote disk.
|
// DiskInfo - fetch disk information for a remote disk.
|
||||||
func (n networkStorage) DiskInfo() (info disk.Info, err error) {
|
func (n networkStorage) DiskInfo() (info disk.Info, err error) {
|
||||||
args := GenericArgs{}
|
args := GenericArgs{}
|
||||||
|
|
|
@ -0,0 +1,114 @@
|
||||||
|
/*
|
||||||
|
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/rpc"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tests storage error transformation.
|
||||||
|
func TestStorageErr(t *testing.T) {
|
||||||
|
unknownErr := errors.New("Unknown error")
|
||||||
|
testCases := []struct {
|
||||||
|
expectedErr error
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
expectedErr: nil,
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedErr: io.EOF,
|
||||||
|
err: fmt.Errorf("%s", io.EOF.Error()),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedErr: io.ErrUnexpectedEOF,
|
||||||
|
err: fmt.Errorf("%s", io.ErrUnexpectedEOF.Error()),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedErr: errDiskNotFound,
|
||||||
|
err: &net.OpError{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedErr: errDiskNotFound,
|
||||||
|
err: rpc.ErrShutdown,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedErr: errUnexpected,
|
||||||
|
err: fmt.Errorf("%s", errUnexpected.Error()),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedErr: errDiskFull,
|
||||||
|
err: fmt.Errorf("%s", errDiskFull.Error()),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedErr: errVolumeNotFound,
|
||||||
|
err: fmt.Errorf("%s", errVolumeNotFound.Error()),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedErr: errVolumeExists,
|
||||||
|
err: fmt.Errorf("%s", errVolumeExists.Error()),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedErr: errFileNotFound,
|
||||||
|
err: fmt.Errorf("%s", errFileNotFound.Error()),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedErr: errFileAccessDenied,
|
||||||
|
err: fmt.Errorf("%s", errFileAccessDenied.Error()),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedErr: errIsNotRegular,
|
||||||
|
err: fmt.Errorf("%s", errIsNotRegular.Error()),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedErr: errVolumeNotEmpty,
|
||||||
|
err: fmt.Errorf("%s", errVolumeNotEmpty.Error()),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedErr: errVolumeAccessDenied,
|
||||||
|
err: fmt.Errorf("%s", errVolumeAccessDenied.Error()),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedErr: errCorruptedFormat,
|
||||||
|
err: fmt.Errorf("%s", errCorruptedFormat.Error()),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedErr: errUnformattedDisk,
|
||||||
|
err: fmt.Errorf("%s", errUnformattedDisk.Error()),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedErr: errFileNameTooLong,
|
||||||
|
err: fmt.Errorf("%s", errFileNameTooLong.Error()),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expectedErr: unknownErr,
|
||||||
|
err: unknownErr,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
resultErr := toStorageErr(testCase.err)
|
||||||
|
if testCase.expectedErr != resultErr {
|
||||||
|
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.expectedErr, resultErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -205,6 +205,18 @@ func (s *storageServer) RenameFileHandler(args *RenameFileArgs, reply *GenericRe
|
||||||
return s.storage.RenameFile(args.SrcVol, args.SrcPath, args.DstVol, args.DstPath)
|
return s.storage.RenameFile(args.SrcVol, args.SrcPath, args.DstVol, args.DstPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TryInitHandler - wake up storage server.
|
||||||
|
func (s *storageServer) TryInitHandler(args *GenericArgs, reply *GenericReply) error {
|
||||||
|
if !isRPCTokenValid(args.Token) {
|
||||||
|
return errInvalidToken
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
globalWakeupCh <- struct{}{}
|
||||||
|
}()
|
||||||
|
*reply = GenericReply{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Initialize new storage rpc.
|
// Initialize new storage rpc.
|
||||||
func newRPCServer(serverConfig serverCmdConfig) (servers []*storageServer, err error) {
|
func newRPCServer(serverConfig serverCmdConfig) (servers []*storageServer, err error) {
|
||||||
// Initialize posix storage API.
|
// Initialize posix storage API.
|
||||||
|
@ -245,9 +257,13 @@ func newRPCServer(serverConfig serverCmdConfig) (servers []*storageServer, err e
|
||||||
}
|
}
|
||||||
|
|
||||||
// registerStorageRPCRouter - register storage rpc router.
|
// registerStorageRPCRouter - register storage rpc router.
|
||||||
func registerStorageRPCRouters(mux *router.Router, stServers []*storageServer) {
|
func registerStorageRPCRouters(mux *router.Router, srvCmdConfig serverCmdConfig) {
|
||||||
|
// Initialize storage rpc servers for every disk that is hosted on this node.
|
||||||
|
storageRPCs, err := newRPCServer(srvCmdConfig)
|
||||||
|
fatalIf(err, "Unable to initialize storage RPC server.")
|
||||||
|
|
||||||
// Create a unique route for each disk exported from this node.
|
// Create a unique route for each disk exported from this node.
|
||||||
for _, stServer := range stServers {
|
for _, stServer := range storageRPCs {
|
||||||
storageRPCServer := rpc.NewServer()
|
storageRPCServer := rpc.NewServer()
|
||||||
storageRPCServer.RegisterName("Storage", stServer)
|
storageRPCServer.RegisterName("Storage", stServer)
|
||||||
// Add minio storage routes.
|
// Add minio storage routes.
|
||||||
|
|
|
@ -40,6 +40,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/fatih/color"
|
||||||
router "github.com/gorilla/mux"
|
router "github.com/gorilla/mux"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -48,6 +49,9 @@ func init() {
|
||||||
// Initialize name space lock.
|
// Initialize name space lock.
|
||||||
isDist := false
|
isDist := false
|
||||||
initNSLock(isDist)
|
initNSLock(isDist)
|
||||||
|
|
||||||
|
// Disable printing console messages during tests.
|
||||||
|
color.Output = ioutil.Discard
|
||||||
}
|
}
|
||||||
|
|
||||||
func prepareFS() (ObjectLayer, string, error) {
|
func prepareFS() (ObjectLayer, string, error) {
|
||||||
|
@ -55,7 +59,7 @@ func prepareFS() (ObjectLayer, string, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
obj, err := getSingleNodeObjectLayer(fsDirs[0])
|
obj, _, err := initObjectLayer(fsDirs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
removeRoots(fsDirs)
|
removeRoots(fsDirs)
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
|
@ -69,7 +73,7 @@ func prepareXL() (ObjectLayer, []string, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
obj, err := getXLObjectLayer(fsDirs, nil)
|
obj, _, err := initObjectLayer(fsDirs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
removeRoots(fsDirs)
|
removeRoots(fsDirs)
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
|
@ -170,13 +174,20 @@ func StartTestServer(t TestErrHandler, instanceType string) TestServer {
|
||||||
testServer.Disks = disks
|
testServer.Disks = disks
|
||||||
testServer.AccessKey = credentials.AccessKeyID
|
testServer.AccessKey = credentials.AccessKeyID
|
||||||
testServer.SecretKey = credentials.SecretAccessKey
|
testServer.SecretKey = credentials.SecretAccessKey
|
||||||
// Run TestServer.
|
|
||||||
testServer.Server = httptest.NewServer(configureServerHandler(serverCmdConfig{disks: disks}))
|
|
||||||
|
|
||||||
objLayer, err := makeTestBackend(disks, instanceType)
|
objLayer, storageDisks, err := initObjectLayer(disks, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
t.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Run TestServer.
|
||||||
|
testServer.Server = httptest.NewServer(configureServerHandler(
|
||||||
|
serverCmdConfig{
|
||||||
|
disks: disks,
|
||||||
|
storageDisks: storageDisks,
|
||||||
|
},
|
||||||
|
))
|
||||||
|
|
||||||
testServer.Obj = objLayer
|
testServer.Obj = objLayer
|
||||||
objLayerMutex.Lock()
|
objLayerMutex.Lock()
|
||||||
globalObjectAPI = objLayer
|
globalObjectAPI = objLayer
|
||||||
|
@ -186,16 +197,10 @@ func StartTestServer(t TestErrHandler, instanceType string) TestServer {
|
||||||
|
|
||||||
// Initializes control RPC end points.
|
// Initializes control RPC end points.
|
||||||
// The object Layer will be a temp back used for testing purpose.
|
// The object Layer will be a temp back used for testing purpose.
|
||||||
func initTestControlRPCEndPoint(objectLayer ObjectLayer) http.Handler {
|
func initTestControlRPCEndPoint(srvCmdConfig serverCmdConfig) http.Handler {
|
||||||
// Initialize Web.
|
|
||||||
|
|
||||||
controllerHandlers := &controllerAPIHandlers{
|
|
||||||
ObjectAPI: func() ObjectLayer { return objectLayer },
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize router.
|
// Initialize router.
|
||||||
muxRouter := router.NewRouter()
|
muxRouter := router.NewRouter()
|
||||||
registerControllerRPCRouter(muxRouter, controllerHandlers)
|
registerControllerRPCRouter(muxRouter, srvCmdConfig)
|
||||||
return muxRouter
|
return muxRouter
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -208,20 +213,14 @@ func StartTestRPCServer(t TestErrHandler, instanceType string) TestServer {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Failed to create disks for the backend")
|
t.Fatal("Failed to create disks for the backend")
|
||||||
}
|
}
|
||||||
// create an instance of TestServer.
|
|
||||||
testRPCServer := TestServer{}
|
|
||||||
// create temporary backend for the test server.
|
|
||||||
objLayer, err := makeTestBackend(disks, instanceType)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
root, err := newTestConfig("us-east-1")
|
root, err := newTestConfig("us-east-1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s", err)
|
t.Fatalf("%s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// create an instance of TestServer.
|
||||||
|
testRPCServer := TestServer{}
|
||||||
// Get credential.
|
// Get credential.
|
||||||
credentials := serverConfig.GetCredential()
|
credentials := serverConfig.GetCredential()
|
||||||
|
|
||||||
|
@ -229,9 +228,21 @@ func StartTestRPCServer(t TestErrHandler, instanceType string) TestServer {
|
||||||
testRPCServer.Disks = disks
|
testRPCServer.Disks = disks
|
||||||
testRPCServer.AccessKey = credentials.AccessKeyID
|
testRPCServer.AccessKey = credentials.AccessKeyID
|
||||||
testRPCServer.SecretKey = credentials.SecretAccessKey
|
testRPCServer.SecretKey = credentials.SecretAccessKey
|
||||||
testRPCServer.Obj = objLayer
|
|
||||||
|
// create temporary backend for the test server.
|
||||||
|
objLayer, storageDisks, err := initObjectLayer(disks, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
objLayerMutex.Lock()
|
||||||
|
globalObjectAPI = objLayer
|
||||||
|
objLayerMutex.Unlock()
|
||||||
|
|
||||||
// Run TestServer.
|
// Run TestServer.
|
||||||
testRPCServer.Server = httptest.NewServer(initTestControlRPCEndPoint(objLayer))
|
testRPCServer.Server = httptest.NewServer(initTestControlRPCEndPoint(serverCmdConfig{
|
||||||
|
storageDisks: storageDisks,
|
||||||
|
}))
|
||||||
|
|
||||||
return testRPCServer
|
return testRPCServer
|
||||||
}
|
}
|
||||||
|
@ -476,6 +487,38 @@ func newTestStreamingSignedRequest(method, urlStr string, contentLength, chunkSi
|
||||||
return req, nil
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Replaces any occurring '/' in string, into its encoded representation.
|
||||||
|
func percentEncodeSlash(s string) string {
|
||||||
|
return strings.Replace(s, "/", "%2F", -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// queryEncode - encodes query values in their URL encoded form. In
|
||||||
|
// addition to the percent encoding performed by getURLEncodedName() used
|
||||||
|
// here, it also percent encodes '/' (forward slash)
|
||||||
|
func queryEncode(v url.Values) string {
|
||||||
|
if v == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
var buf bytes.Buffer
|
||||||
|
keys := make([]string, 0, len(v))
|
||||||
|
for k := range v {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
for _, k := range keys {
|
||||||
|
vs := v[k]
|
||||||
|
prefix := percentEncodeSlash(getURLEncodedName(k)) + "="
|
||||||
|
for _, v := range vs {
|
||||||
|
if buf.Len() > 0 {
|
||||||
|
buf.WriteByte('&')
|
||||||
|
}
|
||||||
|
buf.WriteString(prefix)
|
||||||
|
buf.WriteString(percentEncodeSlash(getURLEncodedName(v)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
// preSignV2 - presign the request in following style.
|
// preSignV2 - presign the request in following style.
|
||||||
// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}.
|
// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}.
|
||||||
func preSignV2(req *http.Request, accessKeyID, secretAccessKey string, expires int64) error {
|
func preSignV2(req *http.Request, accessKeyID, secretAccessKey string, expires int64) error {
|
||||||
|
@ -860,31 +903,6 @@ func getTestWebRPCResponse(resp *httptest.ResponseRecorder, data interface{}) er
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// creates the temp backend setup.
|
|
||||||
// if the option is
|
|
||||||
// FS: Returns a temp single disk setup initializes FS Backend.
|
|
||||||
// XL: Returns a 16 temp single disk setup and initializse XL Backend.
|
|
||||||
func makeTestBackend(disks []string, instanceType string) (ObjectLayer, error) {
|
|
||||||
switch instanceType {
|
|
||||||
case "FS":
|
|
||||||
objLayer, err := getSingleNodeObjectLayer(disks[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return objLayer, err
|
|
||||||
|
|
||||||
case "XL":
|
|
||||||
objectLayer, err := getXLObjectLayer(disks, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return objectLayer, err
|
|
||||||
default:
|
|
||||||
errMsg := "Invalid instance type, Only FS and XL are valid options"
|
|
||||||
return nil, fmt.Errorf("Failed obtaining Temp XL layer: <ERROR> %s", errMsg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var src = rand.NewSource(time.Now().UTC().UnixNano())
|
var src = rand.NewSource(time.Now().UTC().UnixNano())
|
||||||
|
|
||||||
// Function to generate random string for bucket/object names.
|
// Function to generate random string for bucket/object names.
|
||||||
|
@ -1216,33 +1234,31 @@ func getRandomDisks(N int) ([]string, error) {
|
||||||
return erasureDisks, nil
|
return erasureDisks, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getXLObjectLayer - Instantiates XL object layer and returns it.
|
// initObjectLayer - Instantiates object layer and returns it.
|
||||||
func getXLObjectLayer(erasureDisks []string, ignoredDisks []string) (ObjectLayer, error) {
|
func initObjectLayer(disks []string, ignoredDisks []string) (ObjectLayer, []StorageAPI, error) {
|
||||||
err := formatDisks(erasureDisks, ignoredDisks)
|
storageDisks, err := initStorageDisks(disks, ignoredDisks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
objLayer, err := newXLObjects(erasureDisks, ignoredDisks)
|
|
||||||
|
err = waitForFormatDisks(true, "", storageDisks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
objLayer, err := newObjectLayer(storageDisks)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// Disabling the cache for integration tests.
|
// Disabling the cache for integration tests.
|
||||||
// Should use the object layer tests for validating cache.
|
// Should use the object layer tests for validating cache.
|
||||||
if xl, ok := objLayer.(xlObjects); ok {
|
if xl, ok := objLayer.(xlObjects); ok {
|
||||||
xl.objCacheEnabled = false
|
xl.objCacheEnabled = false
|
||||||
}
|
}
|
||||||
|
|
||||||
return objLayer, nil
|
// Success.
|
||||||
}
|
return objLayer, storageDisks, nil
|
||||||
|
|
||||||
// getSingleNodeObjectLayer - Instantiates single node object layer and returns it.
|
|
||||||
func getSingleNodeObjectLayer(disk string) (ObjectLayer, error) {
|
|
||||||
// Create the object layer.
|
|
||||||
objLayer, err := newFSObjects(disk)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return objLayer, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// removeRoots - Cleans up initialized directories during tests.
|
// removeRoots - Cleans up initialized directories during tests.
|
||||||
|
@ -1262,6 +1278,48 @@ func removeDiskN(disks []string, n int) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Makes a entire new copy of a StorageAPI slice.
|
||||||
|
func deepCopyStorageDisks(storageDisks []StorageAPI) []StorageAPI {
|
||||||
|
newStorageDisks := make([]StorageAPI, len(storageDisks))
|
||||||
|
for i, disk := range storageDisks {
|
||||||
|
newStorageDisks[i] = disk
|
||||||
|
}
|
||||||
|
return newStorageDisks
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initializes storage disks with 'N' errored disks, N disks return 'err' for each disk access.
|
||||||
|
func prepareNErroredDisks(storageDisks []StorageAPI, offline int, err error, t *testing.T) []StorageAPI {
|
||||||
|
if offline > len(storageDisks) {
|
||||||
|
t.Fatal("Requested more offline disks than supplied storageDisks slice", offline, len(storageDisks))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < offline; i++ {
|
||||||
|
d := storageDisks[i].(*posix)
|
||||||
|
storageDisks[i] = &naughtyDisk{disk: d, defaultErr: err}
|
||||||
|
}
|
||||||
|
return storageDisks
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initializes storage disks with 'N' offline disks, N disks returns 'errDiskNotFound' for each disk access.
|
||||||
|
func prepareNOfflineDisks(storageDisks []StorageAPI, offline int, t *testing.T) []StorageAPI {
|
||||||
|
return prepareNErroredDisks(storageDisks, offline, errDiskNotFound, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initializes backend storage disks.
|
||||||
|
func prepareXLStorageDisks(t *testing.T) ([]StorageAPI, []string) {
|
||||||
|
nDisks := 16
|
||||||
|
fsDirs, err := getRandomDisks(nDisks)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unexpected error: ", err)
|
||||||
|
}
|
||||||
|
_, storageDisks, err := initObjectLayer(fsDirs, nil)
|
||||||
|
if err != nil {
|
||||||
|
removeRoots(fsDirs)
|
||||||
|
t.Fatal("Unable to initialize storage disks", err)
|
||||||
|
}
|
||||||
|
return storageDisks, fsDirs
|
||||||
|
}
|
||||||
|
|
||||||
// creates a bucket for the tests and returns the bucket name.
|
// creates a bucket for the tests and returns the bucket name.
|
||||||
// initializes the specified API endpoints for the tests.
|
// initializes the specified API endpoints for the tests.
|
||||||
// initialies the root and returns its path.
|
// initialies the root and returns its path.
|
||||||
|
@ -1370,7 +1428,7 @@ func ExecObjectLayerStaleFilesTest(t *testing.T, objTest objTestStaleFilesType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Initialization of disks for XL setup: %s", err)
|
t.Fatalf("Initialization of disks for XL setup: %s", err)
|
||||||
}
|
}
|
||||||
objLayer, err := getXLObjectLayer(erasureDisks, nil)
|
objLayer, _, err := initObjectLayer(erasureDisks, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
|
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -1379,105 +1437,79 @@ func ExecObjectLayerStaleFilesTest(t *testing.T, objTest objTestStaleFilesType)
|
||||||
defer removeRoots(erasureDisks)
|
defer removeRoots(erasureDisks)
|
||||||
}
|
}
|
||||||
|
|
||||||
// addAPIFunc helper function to add API functions identified by name to the routers.
|
func registerBucketLevelFunc(bucket *router.Router, api objectAPIHandlers, apiFunctions ...string) {
|
||||||
func addAPIFunc(muxRouter *router.Router, apiRouter *router.Router, bucket *router.Router,
|
for _, apiFunction := range apiFunctions {
|
||||||
api objectAPIHandlers, apiFunction string) {
|
switch apiFunction {
|
||||||
switch apiFunction {
|
case "PostPolicy":
|
||||||
// Register ListBuckets handler.
|
// Register PostPolicy handler.
|
||||||
case "ListBuckets":
|
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(api.PostPolicyBucketHandler)
|
||||||
apiRouter.Methods("GET").HandlerFunc(api.ListBucketsHandler)
|
// Register GetObject handler.
|
||||||
// Register GetObject handler.
|
case "GetObject":
|
||||||
case "GetObject":
|
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(api.GetObjectHandler)
|
||||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(api.GetObjectHandler)
|
// Register PutObject handler.
|
||||||
// Register PutObject handler.
|
case "PutObject":
|
||||||
case "PutObject":
|
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(api.PutObjectHandler)
|
||||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(api.PutObjectHandler)
|
// Register Delete Object handler.
|
||||||
// Register Delete Object handler.
|
case "DeleteObject":
|
||||||
case "DeleteObject":
|
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(api.DeleteObjectHandler)
|
||||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(api.DeleteObjectHandler)
|
// Register Copy Object handler.
|
||||||
// Register Copy Object handler.
|
case "CopyObject":
|
||||||
case "CopyObject":
|
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(api.CopyObjectHandler)
|
||||||
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(api.CopyObjectHandler)
|
// Register PutBucket Policy handler.
|
||||||
// Register PutBucket Policy handler.
|
case "PutBucketPolicy":
|
||||||
case "PutBucketPolicy":
|
bucket.Methods("PUT").HandlerFunc(api.PutBucketPolicyHandler).Queries("policy", "")
|
||||||
bucket.Methods("PUT").HandlerFunc(api.PutBucketPolicyHandler).Queries("policy", "")
|
// Register Delete bucket HTTP policy handler.
|
||||||
// Register Delete bucket HTTP policy handler.
|
case "DeleteBucketPolicy":
|
||||||
case "DeleteBucketPolicy":
|
bucket.Methods("DELETE").HandlerFunc(api.DeleteBucketPolicyHandler).Queries("policy", "")
|
||||||
bucket.Methods("DELETE").HandlerFunc(api.DeleteBucketPolicyHandler).Queries("policy", "")
|
// Register Get Bucket policy HTTP Handler.
|
||||||
// Register Get Bucket policy HTTP Handler.
|
case "GetBucketPolicy":
|
||||||
case "GetBucketPolicy":
|
bucket.Methods("GET").HandlerFunc(api.GetBucketPolicyHandler).Queries("policy", "")
|
||||||
bucket.Methods("GET").HandlerFunc(api.GetBucketPolicyHandler).Queries("policy", "")
|
// Register GetBucketLocation handler.
|
||||||
// Register GetBucketLocation handler.
|
case "GetBucketLocation":
|
||||||
case "GetBucketLocation":
|
bucket.Methods("GET").HandlerFunc(api.GetBucketLocationHandler).Queries("location", "")
|
||||||
bucket.Methods("GET").HandlerFunc(api.GetBucketLocationHandler).Queries("location", "")
|
// Register HeadBucket handler.
|
||||||
// Register HeadBucket handler.
|
case "HeadBucket":
|
||||||
case "HeadBucket":
|
bucket.Methods("HEAD").HandlerFunc(api.HeadBucketHandler)
|
||||||
bucket.Methods("HEAD").HandlerFunc(api.HeadBucketHandler)
|
// Register New Multipart upload handler.
|
||||||
// Register New Multipart upload handler.
|
case "NewMultipart":
|
||||||
case "NewMultipart":
|
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(api.NewMultipartUploadHandler).Queries("uploads", "")
|
||||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(api.NewMultipartUploadHandler).Queries("uploads", "")
|
// Register PutObjectPart handler.
|
||||||
// Register PutObjectPart handler.
|
case "PutObjectPart":
|
||||||
case "PutObjectPart":
|
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(api.PutObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(api.PutObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
// Register ListObjectParts handler.
|
||||||
// Register ListObjectParts handler.
|
case "ListObjectParts":
|
||||||
case "ListObjectParts":
|
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(api.ListObjectPartsHandler).Queries("uploadId", "{uploadId:.*}")
|
||||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(api.ListObjectPartsHandler).Queries("uploadId", "{uploadId:.*}")
|
// Register ListMultipartUploads handler.
|
||||||
// Register ListMultipartUploads handler.
|
case "ListMultipartUploads":
|
||||||
case "ListMultipartUploads":
|
bucket.Methods("GET").HandlerFunc(api.ListMultipartUploadsHandler).Queries("uploads", "")
|
||||||
bucket.Methods("GET").HandlerFunc(api.ListMultipartUploadsHandler).Queries("uploads", "")
|
// Register Complete Multipart Upload handler.
|
||||||
// Register Complete Multipart Upload handler.
|
case "CompleteMultipart":
|
||||||
case "CompleteMultipart":
|
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(api.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}")
|
||||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(api.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}")
|
// Register GetBucketNotification Handler.
|
||||||
// Register GetBucketNotification Handler.
|
case "GetBucketNotification":
|
||||||
case "GetBucketNotification":
|
bucket.Methods("GET").HandlerFunc(api.GetBucketNotificationHandler).Queries("notification", "")
|
||||||
bucket.Methods("GET").HandlerFunc(api.GetBucketNotificationHandler).Queries("notification", "")
|
// Register PutBucketNotification Handler.
|
||||||
// Register PutBucketNotification Handler.
|
case "PutBucketNotification":
|
||||||
case "PutBucketNotification":
|
bucket.Methods("PUT").HandlerFunc(api.PutBucketNotificationHandler).Queries("notification", "")
|
||||||
bucket.Methods("PUT").HandlerFunc(api.PutBucketNotificationHandler).Queries("notification", "")
|
// Register ListenBucketNotification Handler.
|
||||||
// Register ListenBucketNotification Handler.
|
case "ListenBucketNotification":
|
||||||
case "ListenBucketNotification":
|
bucket.Methods("GET").HandlerFunc(api.ListenBucketNotificationHandler).Queries("events", "{events:.*}")
|
||||||
bucket.Methods("GET").HandlerFunc(api.ListenBucketNotificationHandler).Queries("events", "{events:.*}")
|
}
|
||||||
// Register all api endpoints by default.
|
|
||||||
default:
|
|
||||||
registerAPIRouter(muxRouter, api)
|
|
||||||
// No need to register any more end points, all the end points are registered.
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns a http.Handler capable of routing API requests to handlers corresponding to apiFunctions,
|
// registerAPIFunctions helper function to add API functions identified by name to the routers.
|
||||||
// with ObjectAPI set to nil.
|
func registerAPIFunctions(muxRouter *router.Router, objLayer ObjectLayer, apiFunctions ...string) {
|
||||||
func initTestNilObjAPIEndPoints(apiFunctions []string) http.Handler {
|
if len(apiFunctions) == 0 {
|
||||||
muxRouter := router.NewRouter()
|
// Register all api endpoints by default.
|
||||||
// All object storage operations are registered as HTTP handlers on `objectAPIHandlers`.
|
registerAPIRouter(muxRouter)
|
||||||
// When the handlers get a HTTP request they use the underlyting ObjectLayer to perform operations.
|
return
|
||||||
nilAPI := objectAPIHandlers{
|
|
||||||
ObjectAPI: func() ObjectLayer {
|
|
||||||
objLayerMutex.Lock()
|
|
||||||
defer objLayerMutex.Unlock()
|
|
||||||
globalObjectAPI = nil
|
|
||||||
return globalObjectAPI
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// API Router.
|
// API Router.
|
||||||
apiRouter := muxRouter.NewRoute().PathPrefix("/").Subrouter()
|
apiRouter := muxRouter.NewRoute().PathPrefix("/").Subrouter()
|
||||||
// Bucket router.
|
// Bucket router.
|
||||||
bucket := apiRouter.PathPrefix("/{bucket}").Subrouter()
|
bucketRouter := apiRouter.PathPrefix("/{bucket}").Subrouter()
|
||||||
// Iterate the list of API functions requested for and register them in mux HTTP handler.
|
|
||||||
for _, apiFunction := range apiFunctions {
|
|
||||||
addAPIFunc(muxRouter, apiRouter, bucket, nilAPI, apiFunction)
|
|
||||||
}
|
|
||||||
return muxRouter
|
|
||||||
}
|
|
||||||
|
|
||||||
// Takes in XL/FS object layer, and the list of API end points to be tested/required, registers the API end points and returns the HTTP handler.
|
|
||||||
// Need isolated registration of API end points while writing unit tests for end points.
|
|
||||||
// All the API end points are registered only for the default case.
|
|
||||||
func initTestAPIEndPoints(objLayer ObjectLayer, apiFunctions []string) http.Handler {
|
|
||||||
// initialize a new mux router.
|
|
||||||
// goriilla/mux is the library used to register all the routes and handle them.
|
|
||||||
muxRouter := router.NewRouter()
|
|
||||||
// All object storage operations are registered as HTTP handlers on `objectAPIHandlers`.
|
// All object storage operations are registered as HTTP handlers on `objectAPIHandlers`.
|
||||||
// When the handlers get a HTTP request they use the underlyting ObjectLayer to perform operations.
|
// When the handlers get a HTTP request they use the underlyting ObjectLayer to perform operations.
|
||||||
objLayerMutex.Lock()
|
objLayerMutex.Lock()
|
||||||
|
@ -1488,26 +1520,49 @@ func initTestAPIEndPoints(objLayer ObjectLayer, apiFunctions []string) http.Hand
|
||||||
ObjectAPI: newObjectLayerFn,
|
ObjectAPI: newObjectLayerFn,
|
||||||
}
|
}
|
||||||
|
|
||||||
// API Router.
|
// Register ListBuckets handler.
|
||||||
apiRouter := muxRouter.NewRoute().PathPrefix("/").Subrouter()
|
apiRouter.Methods("GET").HandlerFunc(api.ListBucketsHandler)
|
||||||
// Bucket router.
|
// Register all bucket level handlers.
|
||||||
bucket := apiRouter.PathPrefix("/{bucket}").Subrouter()
|
registerBucketLevelFunc(bucketRouter, api, apiFunctions...)
|
||||||
// Iterate the list of API functions requested for and register them in mux HTTP handler.
|
}
|
||||||
for _, apiFunction := range apiFunctions {
|
|
||||||
addAPIFunc(muxRouter, apiRouter, bucket, api, apiFunction)
|
// Returns a http.Handler capable of routing API requests to handlers corresponding to apiFunctions,
|
||||||
|
// with ObjectAPI set to nil.
|
||||||
|
func initTestNilObjAPIEndPoints(apiFunctions []string) http.Handler {
|
||||||
|
muxRouter := router.NewRouter()
|
||||||
|
if len(apiFunctions) > 0 {
|
||||||
|
// Iterate the list of API functions requested for and register them in mux HTTP handler.
|
||||||
|
registerAPIFunctions(muxRouter, nil, apiFunctions...)
|
||||||
|
return muxRouter
|
||||||
}
|
}
|
||||||
|
registerAPIRouter(muxRouter)
|
||||||
|
return muxRouter
|
||||||
|
}
|
||||||
|
|
||||||
|
// Takes in XL/FS object layer, and the list of API end points to be tested/required, registers the API end points and returns the HTTP handler.
|
||||||
|
// Need isolated registration of API end points while writing unit tests for end points.
|
||||||
|
// All the API end points are registered only for the default case.
|
||||||
|
func initTestAPIEndPoints(objLayer ObjectLayer, apiFunctions []string) http.Handler {
|
||||||
|
// initialize a new mux router.
|
||||||
|
// goriilla/mux is the library used to register all the routes and handle them.
|
||||||
|
muxRouter := router.NewRouter()
|
||||||
|
if len(apiFunctions) > 0 {
|
||||||
|
// Iterate the list of API functions requested for and register them in mux HTTP handler.
|
||||||
|
registerAPIFunctions(muxRouter, objLayer, apiFunctions...)
|
||||||
|
return muxRouter
|
||||||
|
}
|
||||||
|
registerAPIRouter(muxRouter)
|
||||||
return muxRouter
|
return muxRouter
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize Web RPC Handlers for testing
|
// Initialize Web RPC Handlers for testing
|
||||||
func initTestWebRPCEndPoint(objLayer ObjectLayer) http.Handler {
|
func initTestWebRPCEndPoint(objLayer ObjectLayer) http.Handler {
|
||||||
// Initialize Web.
|
objLayerMutex.Lock()
|
||||||
webHandlers := &webAPIHandlers{
|
globalObjectAPI = objLayer
|
||||||
ObjectAPI: func() ObjectLayer { return objLayer },
|
objLayerMutex.Unlock()
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize router.
|
// Initialize router.
|
||||||
muxRouter := router.NewRouter()
|
muxRouter := router.NewRouter()
|
||||||
registerWebRouter(muxRouter, webHandlers)
|
registerWebRouter(muxRouter)
|
||||||
return muxRouter
|
return muxRouter
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,6 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
router "github.com/gorilla/mux"
|
|
||||||
"github.com/minio/minio-go/pkg/policy"
|
"github.com/minio/minio-go/pkg/policy"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1016,9 +1015,9 @@ func testWebSetBucketPolicyHandler(obj ObjectLayer, instanceType string, t TestE
|
||||||
// TestWebCheckAuthorization - Test Authorization for all web handlers
|
// TestWebCheckAuthorization - Test Authorization for all web handlers
|
||||||
func TestWebCheckAuthorization(t *testing.T) {
|
func TestWebCheckAuthorization(t *testing.T) {
|
||||||
// Prepare XL backend
|
// Prepare XL backend
|
||||||
obj, fsDirs, e := prepareXL()
|
obj, fsDirs, err := prepareXL()
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Fatalf("Initialization of object layer failed for XL setup: %s", e)
|
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
|
||||||
}
|
}
|
||||||
// Executing the object layer tests for XL.
|
// Executing the object layer tests for XL.
|
||||||
defer removeRoots(fsDirs)
|
defer removeRoots(fsDirs)
|
||||||
|
@ -1027,9 +1026,9 @@ func TestWebCheckAuthorization(t *testing.T) {
|
||||||
apiRouter := initTestWebRPCEndPoint(obj)
|
apiRouter := initTestWebRPCEndPoint(obj)
|
||||||
// initialize the server and obtain the credentials and root.
|
// initialize the server and obtain the credentials and root.
|
||||||
// credentials are necessary to sign the HTTP request.
|
// credentials are necessary to sign the HTTP request.
|
||||||
rootPath, e := newTestConfig("us-east-1")
|
rootPath, err := newTestConfig("us-east-1")
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Fatalf("Init Test config failed")
|
t.Fatal("Init Test config failed", err)
|
||||||
}
|
}
|
||||||
// remove the root folder after the test ends.
|
// remove the root folder after the test ends.
|
||||||
defer removeAll(rootPath)
|
defer removeAll(rootPath)
|
||||||
|
@ -1042,9 +1041,9 @@ func TestWebCheckAuthorization(t *testing.T) {
|
||||||
for _, rpcCall := range webRPCs {
|
for _, rpcCall := range webRPCs {
|
||||||
args := &GenericArgs{}
|
args := &GenericArgs{}
|
||||||
reply := &WebGenericRep{}
|
reply := &WebGenericRep{}
|
||||||
req, err := newTestWebRPCRequest("Web."+rpcCall, "Bearer fooauthorization", args)
|
req, nerr := newTestWebRPCRequest("Web."+rpcCall, "Bearer fooauthorization", args)
|
||||||
if err != nil {
|
if nerr != nil {
|
||||||
t.Fatalf("Test %s: Failed to create HTTP request: <ERROR> %v", rpcCall, err)
|
t.Fatalf("Test %s: Failed to create HTTP request: <ERROR> %v", rpcCall, nerr)
|
||||||
}
|
}
|
||||||
apiRouter.ServeHTTP(rec, req)
|
apiRouter.ServeHTTP(rec, req)
|
||||||
if rec.Code != http.StatusOK {
|
if rec.Code != http.StatusOK {
|
||||||
|
@ -1097,18 +1096,14 @@ func TestWebCheckAuthorization(t *testing.T) {
|
||||||
|
|
||||||
// TestWebObjectLayerNotReady - Test RPCs responses when disks are not ready
|
// TestWebObjectLayerNotReady - Test RPCs responses when disks are not ready
|
||||||
func TestWebObjectLayerNotReady(t *testing.T) {
|
func TestWebObjectLayerNotReady(t *testing.T) {
|
||||||
webHandlers := &webAPIHandlers{
|
// Initialize web rpc endpoint.
|
||||||
ObjectAPI: func() ObjectLayer { return nil },
|
apiRouter := initTestWebRPCEndPoint(nil)
|
||||||
}
|
|
||||||
// Initialize router.
|
|
||||||
apiRouter := router.NewRouter()
|
|
||||||
registerWebRouter(apiRouter, webHandlers)
|
|
||||||
|
|
||||||
// initialize the server and obtain the credentials and root.
|
// initialize the server and obtain the credentials and root.
|
||||||
// credentials are necessary to sign the HTTP request.
|
// credentials are necessary to sign the HTTP request.
|
||||||
rootPath, e := newTestConfig("us-east-1")
|
rootPath, err := newTestConfig("us-east-1")
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Fatalf("Init Test config failed")
|
t.Fatal("Init Test config failed", err)
|
||||||
}
|
}
|
||||||
// remove the root folder after the test ends.
|
// remove the root folder after the test ends.
|
||||||
defer removeAll(rootPath)
|
defer removeAll(rootPath)
|
||||||
|
@ -1116,9 +1111,9 @@ func TestWebObjectLayerNotReady(t *testing.T) {
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
|
|
||||||
credentials := serverConfig.GetCredential()
|
credentials := serverConfig.GetCredential()
|
||||||
authorization, e := getWebRPCToken(apiRouter, credentials.AccessKeyID, credentials.SecretAccessKey)
|
authorization, err := getWebRPCToken(apiRouter, credentials.AccessKeyID, credentials.SecretAccessKey)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Fatal("Cannot authenticate")
|
t.Fatal("Cannot authenticate", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if web rpc calls return Server not initialized. ServerInfo, GenerateAuth,
|
// Check if web rpc calls return Server not initialized. ServerInfo, GenerateAuth,
|
||||||
|
@ -1128,9 +1123,9 @@ func TestWebObjectLayerNotReady(t *testing.T) {
|
||||||
for _, rpcCall := range webRPCs {
|
for _, rpcCall := range webRPCs {
|
||||||
args := &GenericArgs{}
|
args := &GenericArgs{}
|
||||||
reply := &WebGenericRep{}
|
reply := &WebGenericRep{}
|
||||||
req, err := newTestWebRPCRequest("Web."+rpcCall, authorization, args)
|
req, nerr := newTestWebRPCRequest("Web."+rpcCall, authorization, args)
|
||||||
if err != nil {
|
if nerr != nil {
|
||||||
t.Fatalf("Test %s: Failed to create HTTP request: <ERROR> %v", rpcCall, err)
|
t.Fatalf("Test %s: Failed to create HTTP request: <ERROR> %v", rpcCall, nerr)
|
||||||
}
|
}
|
||||||
apiRouter.ServeHTTP(rec, req)
|
apiRouter.ServeHTTP(rec, req)
|
||||||
if rec.Code != http.StatusOK {
|
if rec.Code != http.StatusOK {
|
||||||
|
@ -1184,9 +1179,9 @@ func TestWebObjectLayerNotReady(t *testing.T) {
|
||||||
// TestWebObjectLayerFaultyDisks - Test Web RPC responses with faulty disks
|
// TestWebObjectLayerFaultyDisks - Test Web RPC responses with faulty disks
|
||||||
func TestWebObjectLayerFaultyDisks(t *testing.T) {
|
func TestWebObjectLayerFaultyDisks(t *testing.T) {
|
||||||
// Prepare XL backend
|
// Prepare XL backend
|
||||||
obj, fsDirs, e := prepareXL()
|
obj, fsDirs, err := prepareXL()
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Fatalf("Initialization of object layer failed for XL setup: %s", e)
|
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
|
||||||
}
|
}
|
||||||
// Executing the object layer tests for XL.
|
// Executing the object layer tests for XL.
|
||||||
defer removeRoots(fsDirs)
|
defer removeRoots(fsDirs)
|
||||||
|
@ -1197,18 +1192,14 @@ func TestWebObjectLayerFaultyDisks(t *testing.T) {
|
||||||
xl.storageDisks[i] = newNaughtyDisk(d.(*posix), nil, errFaultyDisk)
|
xl.storageDisks[i] = newNaughtyDisk(d.(*posix), nil, errFaultyDisk)
|
||||||
}
|
}
|
||||||
|
|
||||||
webHandlers := &webAPIHandlers{
|
// Initialize web rpc endpoint.
|
||||||
ObjectAPI: func() ObjectLayer { return obj },
|
apiRouter := initTestWebRPCEndPoint(obj)
|
||||||
}
|
|
||||||
// Initialize router.
|
|
||||||
apiRouter := router.NewRouter()
|
|
||||||
registerWebRouter(apiRouter, webHandlers)
|
|
||||||
|
|
||||||
// initialize the server and obtain the credentials and root.
|
// initialize the server and obtain the credentials and root.
|
||||||
// credentials are necessary to sign the HTTP request.
|
// credentials are necessary to sign the HTTP request.
|
||||||
rootPath, e := newTestConfig("us-east-1")
|
rootPath, err := newTestConfig("us-east-1")
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Fatalf("Init Test config failed")
|
t.Fatal("Init Test config failed", err)
|
||||||
}
|
}
|
||||||
// remove the root folder after the test ends.
|
// remove the root folder after the test ends.
|
||||||
defer removeAll(rootPath)
|
defer removeAll(rootPath)
|
||||||
|
@ -1216,9 +1207,9 @@ func TestWebObjectLayerFaultyDisks(t *testing.T) {
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
|
|
||||||
credentials := serverConfig.GetCredential()
|
credentials := serverConfig.GetCredential()
|
||||||
authorization, e := getWebRPCToken(apiRouter, credentials.AccessKeyID, credentials.SecretAccessKey)
|
authorization, err := getWebRPCToken(apiRouter, credentials.AccessKeyID, credentials.SecretAccessKey)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Fatal("Cannot authenticate")
|
t.Fatal("Cannot authenticate", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if web rpc calls return errors with faulty disks. ServerInfo, GenerateAuth, SetAuth, GetAuth are not concerned
|
// Check if web rpc calls return errors with faulty disks. ServerInfo, GenerateAuth, SetAuth, GetAuth are not concerned
|
||||||
|
@ -1228,9 +1219,9 @@ func TestWebObjectLayerFaultyDisks(t *testing.T) {
|
||||||
for _, rpcCall := range webRPCs {
|
for _, rpcCall := range webRPCs {
|
||||||
args := &GenericArgs{}
|
args := &GenericArgs{}
|
||||||
reply := &WebGenericRep{}
|
reply := &WebGenericRep{}
|
||||||
req, err := newTestWebRPCRequest("Web."+rpcCall, authorization, args)
|
req, nerr := newTestWebRPCRequest("Web."+rpcCall, authorization, args)
|
||||||
if err != nil {
|
if nerr != nil {
|
||||||
t.Fatalf("Test %s: Failed to create HTTP request: <ERROR> %v", rpcCall, err)
|
t.Fatalf("Test %s: Failed to create HTTP request: <ERROR> %v", rpcCall, nerr)
|
||||||
}
|
}
|
||||||
apiRouter.ServeHTTP(rec, req)
|
apiRouter.ServeHTTP(rec, req)
|
||||||
if rec.Code != http.StatusOK {
|
if rec.Code != http.StatusOK {
|
||||||
|
|
|
@ -58,7 +58,12 @@ func assetFS() *assetfs.AssetFS {
|
||||||
const specialAssets = "loader.css|logo.svg|firefox.png|safari.png|chrome.png|favicon.ico"
|
const specialAssets = "loader.css|logo.svg|firefox.png|safari.png|chrome.png|favicon.ico"
|
||||||
|
|
||||||
// registerWebRouter - registers web router for serving minio browser.
|
// registerWebRouter - registers web router for serving minio browser.
|
||||||
func registerWebRouter(mux *router.Router, web *webAPIHandlers) {
|
func registerWebRouter(mux *router.Router) {
|
||||||
|
// Initialize Web.
|
||||||
|
web := &webAPIHandlers{
|
||||||
|
ObjectAPI: newObjectLayerFn,
|
||||||
|
}
|
||||||
|
|
||||||
// Initialize a new json2 codec.
|
// Initialize a new json2 codec.
|
||||||
codec := json2.NewCodec()
|
codec := json2.NewCodec()
|
||||||
|
|
||||||
|
|
|
@ -428,16 +428,6 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
|
||||||
size = sizeWritten
|
size = sizeWritten
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate if payload is valid.
|
|
||||||
if isSignVerify(data) {
|
|
||||||
if err = data.(*signVerifyReader).Verify(); err != nil {
|
|
||||||
// Incoming payload wrong, delete the temporary object.
|
|
||||||
xl.deleteObject(minioMetaBucket, tmpPartPath)
|
|
||||||
// Returns md5 mismatch.
|
|
||||||
return "", toObjectErr(err, bucket, object)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate new md5sum.
|
// Calculate new md5sum.
|
||||||
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
|
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
|
||||||
if md5Hex != "" {
|
if md5Hex != "" {
|
||||||
|
|
|
@ -609,16 +609,6 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate if payload is valid.
|
|
||||||
if isSignVerify(data) {
|
|
||||||
if vErr := data.(*signVerifyReader).Verify(); vErr != nil {
|
|
||||||
// Incoming payload wrong, delete the temporary object.
|
|
||||||
xl.deleteObject(minioMetaTmpBucket, tempObj)
|
|
||||||
// Error return.
|
|
||||||
return ObjectInfo{}, toObjectErr(traceError(vErr), bucket, object)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// md5Hex representation.
|
// md5Hex representation.
|
||||||
md5Hex := metadata["md5Sum"]
|
md5Hex := metadata["md5Sum"]
|
||||||
if md5Hex != "" {
|
if md5Hex != "" {
|
||||||
|
|
68
cmd/xl-v1.go
68
cmd/xl-v1.go
|
@ -20,7 +20,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/minio/minio-go/pkg/set"
|
|
||||||
"github.com/minio/minio/pkg/disk"
|
"github.com/minio/minio/pkg/disk"
|
||||||
"github.com/minio/minio/pkg/objcache"
|
"github.com/minio/minio/pkg/objcache"
|
||||||
)
|
)
|
||||||
|
@ -107,37 +106,10 @@ func repairDiskMetadata(storageDisks []StorageAPI) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newXLObjects - initialize new xl object layer.
|
// newXLObjects - initialize new xl object layer.
|
||||||
func newXLObjects(disks, ignoredDisks []string) (ObjectLayer, error) {
|
func newXLObjects(storageDisks []StorageAPI) (ObjectLayer, error) {
|
||||||
if disks == nil {
|
if storageDisks == nil {
|
||||||
return nil, errInvalidArgument
|
return nil, errInvalidArgument
|
||||||
}
|
}
|
||||||
disksSet := set.NewStringSet()
|
|
||||||
if len(ignoredDisks) > 0 {
|
|
||||||
disksSet = set.CreateStringSet(ignoredDisks...)
|
|
||||||
}
|
|
||||||
// Bootstrap disks.
|
|
||||||
storageDisks := make([]StorageAPI, len(disks))
|
|
||||||
for index, disk := range disks {
|
|
||||||
// Check if disk is ignored.
|
|
||||||
if disksSet.Contains(disk) {
|
|
||||||
storageDisks[index] = nil
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
// Intentionally ignore disk not found errors. XL is designed
|
|
||||||
// to handle these errors internally.
|
|
||||||
storageDisks[index], err = newStorageAPI(disk)
|
|
||||||
if err != nil && err != errDiskNotFound {
|
|
||||||
switch diskType := storageDisks[index].(type) {
|
|
||||||
case networkStorage:
|
|
||||||
diskType.rpcClient.Close()
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fix format files in case of fresh or corrupted disks
|
|
||||||
repairDiskMetadata(storageDisks)
|
|
||||||
|
|
||||||
// Runs house keeping code, like t, cleaning up tmp files etc.
|
// Runs house keeping code, like t, cleaning up tmp files etc.
|
||||||
if err := xlHouseKeeping(storageDisks); err != nil {
|
if err := xlHouseKeeping(storageDisks); err != nil {
|
||||||
|
@ -147,7 +119,6 @@ func newXLObjects(disks, ignoredDisks []string) (ObjectLayer, error) {
|
||||||
// Load saved XL format.json and validate.
|
// Load saved XL format.json and validate.
|
||||||
newPosixDisks, err := loadFormatXL(storageDisks)
|
newPosixDisks, err := loadFormatXL(storageDisks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// errCorruptedDisk - healing failed
|
|
||||||
return nil, fmt.Errorf("Unable to recognize backend format, %s", err)
|
return nil, fmt.Errorf("Unable to recognize backend format, %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -204,25 +175,36 @@ func (d byDiskTotal) Less(i, j int) bool {
|
||||||
return d[i].Total < d[j].Total
|
return d[i].Total < d[j].Total
|
||||||
}
|
}
|
||||||
|
|
||||||
// StorageInfo - returns underlying storage statistics.
|
// getDisksInfo - fetch disks info across all other storage API.
|
||||||
func (xl xlObjects) StorageInfo() StorageInfo {
|
func getDisksInfo(disks []StorageAPI) (disksInfo []disk.Info, onlineDisks int, offlineDisks int) {
|
||||||
var disksInfo []disk.Info
|
for _, storageDisk := range disks {
|
||||||
for _, storageDisk := range xl.storageDisks {
|
|
||||||
if storageDisk == nil {
|
if storageDisk == nil {
|
||||||
// Storage disk is empty, perhaps ignored disk or not available.
|
// Storage disk is empty, perhaps ignored disk or not available.
|
||||||
|
offlineDisks++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
info, err := storageDisk.DiskInfo()
|
info, err := storageDisk.DiskInfo()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorIf(err, "Unable to fetch disk info for %#v", storageDisk)
|
errorIf(err, "Unable to fetch disk info for %#v", storageDisk)
|
||||||
|
if err == errDiskNotFound {
|
||||||
|
offlineDisks++
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
onlineDisks++
|
||||||
disksInfo = append(disksInfo, info)
|
disksInfo = append(disksInfo, info)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort so that the first element is the smallest.
|
// Sort so that the first element is the smallest.
|
||||||
sort.Sort(byDiskTotal(disksInfo))
|
sort.Sort(byDiskTotal(disksInfo))
|
||||||
|
|
||||||
|
// Success.
|
||||||
|
return disksInfo, onlineDisks, offlineDisks
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get an aggregated storage info across all disks.
|
||||||
|
func getStorageInfo(disks []StorageAPI) StorageInfo {
|
||||||
|
disksInfo, onlineDisks, offlineDisks := getDisksInfo(disks)
|
||||||
if len(disksInfo) == 0 {
|
if len(disksInfo) == 0 {
|
||||||
return StorageInfo{
|
return StorageInfo{
|
||||||
Total: -1,
|
Total: -1,
|
||||||
|
@ -232,8 +214,18 @@ func (xl xlObjects) StorageInfo() StorageInfo {
|
||||||
// Return calculated storage info, choose the lowest Total and
|
// Return calculated storage info, choose the lowest Total and
|
||||||
// Free as the total aggregated values. Total capacity is always
|
// Free as the total aggregated values. Total capacity is always
|
||||||
// the multiple of smallest disk among the disk list.
|
// the multiple of smallest disk among the disk list.
|
||||||
return StorageInfo{
|
storageInfo := StorageInfo{
|
||||||
Total: disksInfo[0].Total * int64(len(xl.storageDisks)),
|
Total: disksInfo[0].Total * int64(onlineDisks),
|
||||||
Free: disksInfo[0].Free * int64(len(xl.storageDisks)),
|
Free: disksInfo[0].Free * int64(onlineDisks),
|
||||||
}
|
}
|
||||||
|
storageInfo.Backend.Type = XL
|
||||||
|
storageInfo.Backend.OnlineDisks = onlineDisks
|
||||||
|
storageInfo.Backend.OfflineDisks = offlineDisks
|
||||||
|
storageInfo.Backend.Quorum = len(disks) / 2
|
||||||
|
return storageInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// StorageInfo - returns underlying storage statistics.
|
||||||
|
func (xl xlObjects) StorageInfo() StorageInfo {
|
||||||
|
return getStorageInfo(xl.storageDisks)
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,7 +48,12 @@ func TestStorageInfo(t *testing.T) {
|
||||||
t.Fatalf("Diskinfo total values should be greater 0")
|
t.Fatalf("Diskinfo total values should be greater 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
objLayer, err = newXLObjects(fsDirs, fsDirs[:4])
|
storageDisks, err := initStorageDisks(fsDirs, fsDirs[:4])
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unexpected error: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
objLayer, err = newXLObjects(storageDisks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to initialize 'XL' object layer with ignored disks %s.", fsDirs[:4])
|
t.Fatalf("Unable to initialize 'XL' object layer with ignored disks %s.", fsDirs[:4])
|
||||||
}
|
}
|
||||||
|
@ -83,23 +88,38 @@ func TestNewXL(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// No disks input.
|
// No disks input.
|
||||||
_, err := newXLObjects(nil, nil)
|
_, err := newXLObjects(nil)
|
||||||
if err != errInvalidArgument {
|
if err != errInvalidArgument {
|
||||||
t.Fatalf("Unable to initialize erasure, %s", err)
|
t.Fatalf("Unable to initialize erasure, %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
storageDisks, err := initStorageDisks(erasureDisks, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unexpected error: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = waitForFormatDisks(true, "", nil)
|
||||||
|
if err != errInvalidArgument {
|
||||||
|
t.Fatalf("Expecting error, got %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Initializes all erasure disks
|
// Initializes all erasure disks
|
||||||
err = formatDisks(erasureDisks, nil)
|
err = waitForFormatDisks(true, "", storageDisks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to format disks for erasure, %s", err)
|
t.Fatalf("Unable to format disks for erasure, %s", err)
|
||||||
}
|
}
|
||||||
_, err = newXLObjects(erasureDisks, nil)
|
_, err = newXLObjects(storageDisks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to initialize erasure, %s", err)
|
t.Fatalf("Unable to initialize erasure, %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
storageDisks, err = initStorageDisks(erasureDisks, erasureDisks[:2])
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Unexpected error: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Initializes all erasure disks, ignoring first two.
|
// Initializes all erasure disks, ignoring first two.
|
||||||
_, err = newXLObjects(erasureDisks, erasureDisks[:2])
|
_, err = newXLObjects(storageDisks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to initialize erasure, %s", err)
|
t.Fatalf("Unable to initialize erasure, %s", err)
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue