mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
Controller rpc tests (#2709)
* Test code for controller-handler operations: * Heal operations * List operation * Switch to "testing" lib, moving away from gocheck * Minor refactors * Remove extra call to initGracefulShutdown * Remove dead code in mainControl: Dead code found by the TestControlMain() test function that always passes. * Add tests for control-*-main.go
This commit is contained in:
parent
559ad38b8c
commit
32f097b4d6
@ -45,10 +45,5 @@ COMMANDS:
|
||||
}
|
||||
|
||||
func mainControl(ctx *cli.Context) {
|
||||
if ctx.Args().First() != "" { // command help.
|
||||
cli.ShowCommandHelp(ctx, ctx.Args().First())
|
||||
} else {
|
||||
// command with Subcommands is an App.
|
||||
cli.ShowAppHelp(ctx)
|
||||
}
|
||||
cli.ShowAppHelp(ctx)
|
||||
}
|
||||
|
143
cmd/control-mains_test.go
Normal file
143
cmd/control-mains_test.go
Normal file
@ -0,0 +1,143 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/minio/cli"
|
||||
)
|
||||
|
||||
// Test to call healControl() in control-heal-main.go
|
||||
func TestControlHealMain(t *testing.T) {
|
||||
// create cli app for testing
|
||||
app := cli.NewApp()
|
||||
app.Commands = []cli.Command{controlCmd}
|
||||
|
||||
// start test server
|
||||
testServer := StartTestServer(t, "XL")
|
||||
|
||||
// schedule cleanup at the end
|
||||
defer testServer.Stop()
|
||||
|
||||
// fetch http server endpoint
|
||||
url := testServer.Server.URL
|
||||
|
||||
// create args to call
|
||||
args := []string{"./minio", "control", "heal", url}
|
||||
|
||||
// run app
|
||||
err := app.Run(args)
|
||||
if err != nil {
|
||||
t.Errorf("Control-Heal-Main test failed with - %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Test to call lockControl() in control-lock-main.go
|
||||
func TestControlLockMain(t *testing.T) {
|
||||
// create cli app for testing
|
||||
app := cli.NewApp()
|
||||
app.Commands = []cli.Command{controlCmd}
|
||||
|
||||
// start test server
|
||||
testServer := StartTestServer(t, "XL")
|
||||
|
||||
// schedule cleanup at the end
|
||||
defer testServer.Stop()
|
||||
|
||||
// enabling lock instrumentation.
|
||||
globalDebugLock = true
|
||||
// initializing the locks.
|
||||
initNSLock(false)
|
||||
// set debug lock info to `nil` so that other tests do not see
|
||||
// such modified env settings.
|
||||
defer func() {
|
||||
globalDebugLock = false
|
||||
nsMutex.debugLockMap = nil
|
||||
}()
|
||||
|
||||
// fetch http server endpoint
|
||||
url := testServer.Server.URL
|
||||
|
||||
// create args to call
|
||||
args := []string{"./minio", "control", "lock", url}
|
||||
|
||||
// run app
|
||||
err := app.Run(args)
|
||||
if err != nil {
|
||||
t.Errorf("Control-Lock-Main test failed with - %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Test to call shutdownControl() in control-shutdown-main.go
|
||||
func TestControlShutdownMain(t *testing.T) {
|
||||
// create cli app for testing
|
||||
app := cli.NewApp()
|
||||
app.Commands = []cli.Command{controlCmd}
|
||||
|
||||
// start test server
|
||||
testServer := StartTestServer(t, "XL")
|
||||
|
||||
// schedule cleanup at the end
|
||||
defer testServer.Stop()
|
||||
|
||||
// fetch http server endpoint
|
||||
url := testServer.Server.URL
|
||||
|
||||
// create a dummy exit function
|
||||
testExitFn := func(exitCode int) {
|
||||
if exitCode != int(exitSuccess) {
|
||||
t.Errorf("Control-Shutdown-Main test failed - server exited with non-success error code - %d",
|
||||
exitCode)
|
||||
}
|
||||
}
|
||||
|
||||
// initialize the shutdown signal listener
|
||||
err := initGracefulShutdown(testExitFn)
|
||||
if err != nil {
|
||||
t.Fatalf("Control-Shutdown-Main test failed in initGracefulShutdown() - %s",
|
||||
err.Error())
|
||||
}
|
||||
|
||||
// create args to call
|
||||
args := []string{"./minio", "control", "shutdown", url}
|
||||
|
||||
// run app
|
||||
err = app.Run(args)
|
||||
if err != nil {
|
||||
t.Errorf("Control-Shutdown-Main test failed with - %s",
|
||||
err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: This test practically always passes, but its the only way to
|
||||
// execute mainControl in a test situation
|
||||
func TestControlMain(t *testing.T) {
|
||||
// create cli app for testing
|
||||
app := cli.NewApp()
|
||||
app.Commands = []cli.Command{controlCmd}
|
||||
|
||||
// create args to call
|
||||
args := []string{"./minio", "control"}
|
||||
|
||||
// run app
|
||||
err := app.Run(args)
|
||||
if err != nil {
|
||||
t.Errorf("Control-Main test failed with - %s",
|
||||
err.Error())
|
||||
}
|
||||
}
|
@ -127,8 +127,8 @@ type ShutdownArgs struct {
|
||||
// Authentication token generated by Login.
|
||||
GenericArgs
|
||||
|
||||
// Should the server be restarted, call active connections are served before server
|
||||
// is restarted.
|
||||
// Should the server be restarted, all active connections are
|
||||
// served before server is restarted.
|
||||
Restart bool
|
||||
}
|
||||
|
||||
|
@ -19,45 +19,59 @@ package cmd
|
||||
import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// API suite container common to both FS and XL.
|
||||
type TestRPCControllerSuite struct {
|
||||
serverType string
|
||||
testServer TestServer
|
||||
endPoint string
|
||||
accessKey string
|
||||
secretKey string
|
||||
serverType string
|
||||
testServer TestServer
|
||||
testAuthConf *authConfig
|
||||
}
|
||||
|
||||
// Init and run test on XL backend.
|
||||
var _ = Suite(&TestRPCControllerSuite{serverType: "XL"})
|
||||
|
||||
// Setting up the test suite.
|
||||
// Starting the Test server with temporary FS backend.
|
||||
func (s *TestRPCControllerSuite) SetUpSuite(c *C) {
|
||||
func (s *TestRPCControllerSuite) SetUpSuite(c *testing.T) {
|
||||
s.testServer = StartTestRPCServer(c, s.serverType)
|
||||
s.endPoint = s.testServer.Server.Listener.Addr().String()
|
||||
s.accessKey = s.testServer.AccessKey
|
||||
s.secretKey = s.testServer.SecretKey
|
||||
s.testAuthConf = &authConfig{
|
||||
address: s.testServer.Server.Listener.Addr().String(),
|
||||
accessKey: s.testServer.AccessKey,
|
||||
secretKey: s.testServer.SecretKey,
|
||||
path: path.Join(reservedBucket, controlPath),
|
||||
loginMethod: "Controller.LoginHandler",
|
||||
}
|
||||
}
|
||||
|
||||
// Called implicitly by "gopkg.in/check.v1" after all tests are run.
|
||||
func (s *TestRPCControllerSuite) TearDownSuite(c *C) {
|
||||
// No longer used with gocheck, but used in explicit teardown code in
|
||||
// each test function. // Called implicitly by "gopkg.in/check.v1"
|
||||
// after all tests are run.
|
||||
func (s *TestRPCControllerSuite) TearDownSuite(c *testing.T) {
|
||||
s.testServer.Stop()
|
||||
}
|
||||
|
||||
func TestRPCControlLock(t *testing.T) {
|
||||
//setup code
|
||||
s := &TestRPCControllerSuite{serverType: "XL"}
|
||||
s.SetUpSuite(t)
|
||||
|
||||
//run test
|
||||
s.testRPCControlLock(t)
|
||||
|
||||
//teardown code
|
||||
s.TearDownSuite(t)
|
||||
}
|
||||
|
||||
// Tests to validate the correctness of lock instrumentation control RPC end point.
|
||||
func (s *TestRPCControllerSuite) TestRPCControlLock(c *C) {
|
||||
func (s *TestRPCControllerSuite) testRPCControlLock(c *testing.T) {
|
||||
// enabling lock instrumentation.
|
||||
globalDebugLock = true
|
||||
// initializing the locks.
|
||||
initNSLock(false)
|
||||
// set debug lock info to `nil` so that the next tests have to initialize them again.
|
||||
// set debug lock info to `nil` so that the next tests have to
|
||||
// initialize them again.
|
||||
defer func() {
|
||||
globalDebugLock = false
|
||||
nsMutex.debugLockMap = nil
|
||||
@ -181,16 +195,7 @@ func (s *TestRPCControllerSuite) TestRPCControlLock(c *C) {
|
||||
nsMutex.RLock("my-bucket", "my-object", strconv.Itoa(i))
|
||||
}
|
||||
|
||||
authCfg := &authConfig{
|
||||
accessKey: s.accessKey,
|
||||
secretKey: s.secretKey,
|
||||
address: s.endPoint,
|
||||
path: path.Join(reservedBucket, controlPath),
|
||||
loginMethod: "Controller.LoginHandler",
|
||||
}
|
||||
|
||||
client := newAuthClient(authCfg)
|
||||
|
||||
client := newAuthClient(s.testAuthConf)
|
||||
defer client.Close()
|
||||
|
||||
args := &GenericArgs{}
|
||||
@ -273,19 +278,23 @@ func (s *TestRPCControllerSuite) TestRPCControlLock(c *C) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestControllerHealDiskMetadataH(t *testing.T) {
|
||||
//setup code
|
||||
s := &TestRPCControllerSuite{serverType: "XL"}
|
||||
s.SetUpSuite(t)
|
||||
|
||||
//run test
|
||||
s.testControllerHealDiskMetadataH(t)
|
||||
|
||||
//teardown code
|
||||
s.TearDownSuite(t)
|
||||
}
|
||||
|
||||
// TestControllerHandlerHealDiskMetadata - Registers and call the `HealDiskMetadataHandler`,
|
||||
// asserts to validate the success.
|
||||
func (s *TestRPCControllerSuite) TestControllerHandlerHealDiskMetadata(c *C) {
|
||||
func (s *TestRPCControllerSuite) testControllerHealDiskMetadataH(c *testing.T) {
|
||||
// The suite has already started the test RPC server, just send RPC calls.
|
||||
authCfg := &authConfig{
|
||||
accessKey: s.accessKey,
|
||||
secretKey: s.secretKey,
|
||||
address: s.endPoint,
|
||||
path: path.Join(reservedBucket, controlPath),
|
||||
loginMethod: "Controller.LoginHandler",
|
||||
}
|
||||
|
||||
client := newAuthClient(authCfg)
|
||||
client := newAuthClient(s.testAuthConf)
|
||||
defer client.Close()
|
||||
|
||||
args := &GenericArgs{}
|
||||
@ -293,6 +302,96 @@ func (s *TestRPCControllerSuite) TestControllerHandlerHealDiskMetadata(c *C) {
|
||||
err := client.Call("Controller.HealDiskMetadataHandler", args, reply)
|
||||
|
||||
if err != nil {
|
||||
c.Errorf("Heal Meta Disk Handler test failed with <ERROR> %s", err.Error())
|
||||
c.Errorf("Control.HealDiskMetadataH - test failed with <ERROR> %s",
|
||||
err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestControllerHealObjectH(t *testing.T) {
|
||||
//setup code
|
||||
s := &TestRPCControllerSuite{serverType: "XL"}
|
||||
s.SetUpSuite(t)
|
||||
|
||||
//run test
|
||||
s.testControllerHealObjectH(t)
|
||||
|
||||
//teardown code
|
||||
s.TearDownSuite(t)
|
||||
}
|
||||
|
||||
func (s *TestRPCControllerSuite) testControllerHealObjectH(t *testing.T) {
|
||||
client := newAuthClient(s.testAuthConf)
|
||||
defer client.Close()
|
||||
|
||||
err := s.testServer.Obj.MakeBucket("testbucket")
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"Controller.HealObjectH - create bucket failed with <ERROR> %s",
|
||||
err.Error(),
|
||||
)
|
||||
}
|
||||
|
||||
datum := strings.NewReader("a")
|
||||
_, err = s.testServer.Obj.PutObject("testbucket", "testobject", 1,
|
||||
datum, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Controller.HealObjectH - put object failed with <ERROR> %s",
|
||||
err.Error())
|
||||
}
|
||||
|
||||
args := &HealObjectArgs{GenericArgs{}, "testbucket", "testobject"}
|
||||
reply := &GenericReply{}
|
||||
err = client.Call("Controller.HealObjectHandler", args, reply)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Controller.HealObjectH - test failed with <ERROR> %s",
|
||||
err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestControllerListObjectsHealH(t *testing.T) {
|
||||
//setup code
|
||||
s := &TestRPCControllerSuite{serverType: "XL"}
|
||||
s.SetUpSuite(t)
|
||||
|
||||
//run test
|
||||
s.testControllerListObjectsHealH(t)
|
||||
|
||||
//teardown code
|
||||
s.TearDownSuite(t)
|
||||
}
|
||||
|
||||
func (s *TestRPCControllerSuite) testControllerListObjectsHealH(t *testing.T) {
|
||||
client := newAuthClient(s.testAuthConf)
|
||||
defer client.Close()
|
||||
|
||||
// careate a bucket
|
||||
err := s.testServer.Obj.MakeBucket("testbucket")
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"Controller.ListObjectsHealH - create bucket failed - %s",
|
||||
err.Error(),
|
||||
)
|
||||
}
|
||||
|
||||
r := strings.NewReader("0")
|
||||
_, err = s.testServer.Obj.PutObject(
|
||||
"testbucket", "testObj-0", 1, r, nil,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Controller.ListObjectsHealH - object creation failed - %s",
|
||||
err.Error())
|
||||
}
|
||||
|
||||
args := &HealListArgs{
|
||||
GenericArgs{}, "testbucket", "testObj-",
|
||||
"", "", 100,
|
||||
}
|
||||
reply := &GenericReply{}
|
||||
err = client.Call("Controller.ListObjectsHealHandler", args, reply)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Controller.ListObjectsHealHandler - test failed - %s",
|
||||
err.Error())
|
||||
}
|
||||
}
|
||||
|
@ -87,10 +87,6 @@ func configureServerHandler(srvCmdConfig serverCmdConfig) http.Handler {
|
||||
storageRPCs, err := newRPCServer(srvCmdConfig)
|
||||
fatalIf(err, "Unable to initialize storage RPC server.")
|
||||
|
||||
// Initialize and monitor shutdown signals.
|
||||
err = initGracefulShutdown(os.Exit)
|
||||
fatalIf(err, "Unable to initialize graceful shutdown operation")
|
||||
|
||||
// Initialize API.
|
||||
apiHandlers := objectAPIHandlers{
|
||||
ObjectAPI: newObjectLayerFn,
|
||||
|
@ -82,7 +82,7 @@ EXAMPLES:
|
||||
|
||||
5. Start minio server on 12 disks while ignoring two disks for initialization.
|
||||
$ minio {{.Name}} --ignore-disks=/mnt/export1/ /mnt/export1/ /mnt/export2/ \
|
||||
/mnt/export3/ /mnt/export4/ /mnt/export5/ /mnt/export6/ /mnt/export7/ \
|
||||
/mnt/export3/ /mnt/export4/ /mnt/export5/ /mnt/export6/ /mnt/export7/ \
|
||||
/mnt/export8/ /mnt/export9/ /mnt/export10/ /mnt/export11/ /mnt/export12/
|
||||
|
||||
6. Start minio server on a 4 node distributed setup. Type the following command on all the 4 nodes.
|
||||
@ -318,7 +318,8 @@ func formatDisks(disks, ignoredDisks []string) error {
|
||||
for _, storage := range storageDisks {
|
||||
switch store := storage.(type) {
|
||||
// Closing associated TCP connections since
|
||||
// []StorageAPI is garage collected eventually.
|
||||
// []StorageAPI is garbage collected
|
||||
// eventually.
|
||||
case networkStorage:
|
||||
var reply GenericReply
|
||||
_ = store.rpcClient.Call("Storage.TryInitHandler", &GenericArgs{}, &reply)
|
||||
|
12
cmd/utils.go
12
cmd/utils.go
@ -293,18 +293,22 @@ func startMonitorShutdownSignal(onExitFn onExitFunc) error {
|
||||
// Initiate graceful shutdown.
|
||||
globalShutdownSignalCh <- shutdownHalt
|
||||
case signal := <-globalShutdownSignalCh:
|
||||
// Call all object storage shutdown callbacks and exit for emergency
|
||||
// Call all object storage shutdown
|
||||
// callbacks and exit for emergency
|
||||
exitCode := globalShutdownCBs.RunObjectLayerCBs()
|
||||
if exitCode != exitSuccess {
|
||||
runExitFn(exitCode)
|
||||
|
||||
}
|
||||
|
||||
exitCode = globalShutdownCBs.RunGenericCBs()
|
||||
if exitCode != exitSuccess {
|
||||
runExitFn(exitCode)
|
||||
}
|
||||
// All shutdown callbacks ensure that the server is safely terminated
|
||||
// and any concurrent process could be started again
|
||||
|
||||
// All shutdown callbacks ensure that
|
||||
// the server is safely terminated and
|
||||
// any concurrent process could be
|
||||
// started again
|
||||
if signal == shutdownRestart {
|
||||
path := os.Args[0]
|
||||
cmdArgs := os.Args[1:]
|
||||
|
Loading…
Reference in New Issue
Block a user