server: Implement --ignore-disks for ignoring disks from healing. (#2158)

By default server heals/creates missing directories and re-populates
`format.json`, in some scenarios when disk is down for maintainenance
it would be beneficial for users to ignore such disks rather than
mistakenly using `root` partition.

Fixes #2128
This commit is contained in:
Harshavardhana 2016-07-10 14:38:15 -07:00 committed by Anand Babu (AB) Periasamy
parent 0793237d94
commit bdff0848ed
8 changed files with 100 additions and 54 deletions

View File

@ -115,27 +115,31 @@ var errSomeDiskOffline = errors.New("some disks are offline")
var errDiskOrderMismatch = errors.New("disk order mismatch")
// Returns error slice into understandable errors.
func reduceFormatErrs(errs []error, diskCount int) error {
func reduceFormatErrs(errs []error, diskCount int) (err error) {
var errUnformattedDiskCount = 0
var errDiskNotFoundCount = 0
for _, err := range errs {
if err == errUnformattedDisk {
for _, dErr := range errs {
if dErr == errUnformattedDisk {
errUnformattedDiskCount++
} else if err == errDiskNotFound {
} else if dErr == errDiskNotFound {
errDiskNotFoundCount++
}
}
// Returns errUnformattedDisk if all disks report unFormattedDisk.
if errUnformattedDiskCount == diskCount {
// Unformatted disks found, we need to figure out if any disks are offline.
if errUnformattedDiskCount > 0 {
// Returns errUnformattedDisk if all disks report unFormattedDisk.
if errUnformattedDiskCount < diskCount {
if errDiskNotFoundCount > 0 {
// Only some disks are fresh but some disks are offline as well.
return errSomeDiskOffline
}
// Some disks are fresh disks an unformatted, not disks are offline.
return errSomeDiskUnformatted
}
// All disks returned unformatted, all disks must be fresh.
return errUnformattedDisk
} else if errUnformattedDiskCount < diskCount && errDiskNotFoundCount == 0 {
// Only some disks return unFormattedDisk and all disks are online.
return errSomeDiskUnformatted
} else if errUnformattedDiskCount < diskCount && errDiskNotFoundCount > 0 {
// Only some disks return unFormattedDisk and some disks are
// offline as well.
return errSomeDiskOffline
}
// No unformatted disks found no need to handle disk not found case, return success here.
return nil
}
@ -152,6 +156,10 @@ func loadAllFormats(bootstrapDisks []StorageAPI) ([]*formatConfigV1, []error) {
// Make a volume entry on all underlying storage disks.
for index, disk := range bootstrapDisks {
if disk == nil {
sErrs[index] = errDiskNotFound
continue
}
wg.Add(1)
// Make a volume inside a go-routine.
go func(index int, disk StorageAPI) {
@ -409,6 +417,11 @@ func healFormatXL(storageDisks []StorageAPI) error {
var referenceConfig *formatConfigV1
// Loads `format.json` from all disks.
for index, disk := range storageDisks {
// Disk not found or ignored is a valid case.
if disk == nil {
// Proceed without healing.
return nil
}
formatXL, err := loadFormat(disk)
if err != nil {
if err == errUnformattedDisk {
@ -429,11 +442,6 @@ func healFormatXL(storageDisks []StorageAPI) error {
return nil
}
// Init meta volume.
if err := initMetaVolume(storageDisks); err != nil {
return err
}
// All disks are fresh, format.json will be written by initFormatXL()
if isFormatNotFound(formatConfigs) {
return initFormatXL(storageDisks)
@ -499,6 +507,10 @@ func loadFormatXL(bootstrapDisks []StorageAPI) (disks []StorageAPI, err error) {
// Try to load `format.json` bootstrap disks.
for index, disk := range bootstrapDisks {
if disk == nil {
diskNotFoundCount++
continue
}
var formatXL *formatConfigV1
formatXL, err = loadFormat(disk)
if err != nil {
@ -515,16 +527,13 @@ func loadFormatXL(bootstrapDisks []StorageAPI) (disks []StorageAPI, err error) {
formatConfigs[index] = formatXL
}
// If all disks indicate that 'format.json' is not available
// return 'errUnformattedDisk'.
if unformattedDisksFoundCnt == len(bootstrapDisks) {
// If all disks indicate that 'format.json' is not available return 'errUnformattedDisk'.
if unformattedDisksFoundCnt > len(bootstrapDisks)-(len(bootstrapDisks)/2+1) {
return nil, errUnformattedDisk
} else if diskNotFoundCount == len(bootstrapDisks) {
return nil, errDiskNotFound
} else if diskNotFoundCount > len(bootstrapDisks)-(len(bootstrapDisks)/2+1) {
return nil, errXLReadQuorum
} else if unformattedDisksFoundCnt > len(bootstrapDisks)-(len(bootstrapDisks)/2+1) {
return nil, errXLReadQuorum
}
// Validate the format configs read are correct.

View File

@ -40,7 +40,7 @@ func TestNewFS(t *testing.T) {
}
// Initializes all disks with XL
_, err := newXLObjects(disks)
_, err := newXLObjects(disks, nil)
if err != nil {
t.Fatalf("Unable to initialize XL object, %s", err)
}

View File

@ -86,7 +86,7 @@ func initMetaVolume(storageDisks []StorageAPI) error {
// Initialize all disks in parallel.
for index, disk := range storageDisks {
if disk == nil {
errs[index] = errDiskNotFound
// Ignore create meta volume on disks which are not found.
continue
}
wg.Add(1)
@ -135,7 +135,6 @@ func xlHouseKeeping(storageDisks []StorageAPI) error {
// Initialize all disks in parallel.
for index, disk := range storageDisks {
if disk == nil {
errs[index] = errDiskNotFound
continue
}
wg.Add(1)

View File

@ -23,16 +23,15 @@ import (
router "github.com/gorilla/mux"
)
// newObjectLayer - initialize any object layer depending on the
// number of export paths.
func newObjectLayer(exportPaths []string) (ObjectLayer, error) {
if len(exportPaths) == 1 {
exportPath := exportPaths[0]
// newObjectLayer - initialize any object layer depending on the number of disks.
func newObjectLayer(disks, ignoredDisks []string) (ObjectLayer, error) {
if len(disks) == 1 {
exportPath := disks[0]
// Initialize FS object layer.
return newFSObjects(exportPath)
}
// Initialize XL object layer.
objAPI, err := newXLObjects(exportPaths)
objAPI, err := newXLObjects(disks, ignoredDisks)
if err == errXLWriteQuorum {
return objAPI, errors.New("Disks are different with last minio server run.")
}
@ -41,11 +40,11 @@ func newObjectLayer(exportPaths []string) (ObjectLayer, error) {
// configureServer handler returns final handler for the http server.
func configureServerHandler(srvCmdConfig serverCmdConfig) http.Handler {
objAPI, err := newObjectLayer(srvCmdConfig.exportPaths)
objAPI, err := newObjectLayer(srvCmdConfig.disks, srvCmdConfig.ignoredDisks)
fatalIf(err, "Unable to intialize object layer.")
// Initialize storage rpc server.
storageRPC, err := newRPCServer(srvCmdConfig.exportPaths[0]) // FIXME: should only have one path.
storageRPC, err := newRPCServer(srvCmdConfig.disks[0]) // FIXME: should only have one path.
fatalIf(err, "Unable to initialize storage RPC server.")
// Initialize API.

View File

@ -39,6 +39,11 @@ var serverCmd = cli.Command{
cli.StringFlag{
Name: "address",
Value: ":9000",
Usage: "Specify custom server \"ADDRESS:PORT\", defaults to \":9000\".",
},
cli.StringFlag{
Name: "ignore-disks",
Usage: "Specify comma separated list of disks that are offline.",
},
},
Action: serverMain,
@ -52,8 +57,13 @@ OPTIONS:
{{range .Flags}}{{.}}
{{end}}
ENVIRONMENT VARIABLES:
MINIO_ACCESS_KEY: Access key string of 5 to 20 characters in length.
MINIO_SECRET_KEY: Secret key string of 8 to 40 characters in length.
ACCESS:
MINIO_ACCESS_KEY: Access key string of 5 to 20 characters in length.
MINIO_SECRET_KEY: Secret key string of 8 to 40 characters in length.
CACHING:
MINIO_CACHE_SIZE: Set total cache size in NN[GB|MB|KB]. Defaults to 8GB.
MINIO_CACHE_EXPIRY: Set cache expiration duration in NN[h|m|s]. Defaults to 72 hours.
EXAMPLES:
1. Start minio server.
@ -65,16 +75,23 @@ EXAMPLES:
3. Start minio server on Windows.
$ minio {{.Name}} C:\MyShare
4. Start minio server 12 disks to enable erasure coded layer with 6 data and 6 parity.
4. Start minio server on 12 disks to enable erasure coded layer with 6 data and 6 parity.
$ minio {{.Name}} /mnt/export1/backend /mnt/export2/backend /mnt/export3/backend /mnt/export4/backend \
/mnt/export5/backend /mnt/export6/backend /mnt/export7/backend /mnt/export8/backend /mnt/export9/backend \
/mnt/export10/backend /mnt/export11/backend /mnt/export12/backend
5. Start minio server on 12 disks while ignoring two disks for initialization.
$ minio {{.Name}} --ignore-disks=/mnt/export1/backend,/mnt/export2/backend /mnt/export1/backend \
/mnt/export2/backend /mnt/export3/backend /mnt/export4/backend /mnt/export5/backend /mnt/export6/backend \
/mnt/export7/backend /mnt/export8/backend /mnt/export9/backend /mnt/export10/backend /mnt/export11/backend \
/mnt/export12/backend
`,
}
type serverCmdConfig struct {
serverAddr string
exportPaths []string
serverAddr string
disks []string
ignoredDisks []string
}
// configureServer configure a new server instance
@ -292,13 +309,17 @@ func serverMain(c *cli.Context) {
// Check if requested port is available.
checkPortAvailability(getPort(net.JoinHostPort(host, port)))
// Save all command line args as export paths.
exportPaths := c.Args()
// Disks to be ignored in server init, to skip format healing.
ignoredDisks := strings.Split(c.String("ignore-disks"), ",")
// Disks to be used in server init.
disks := c.Args()
// Configure server.
apiServer := configureServer(serverCmdConfig{
serverAddr: serverAddress,
exportPaths: exportPaths,
serverAddr: serverAddress,
disks: disks,
ignoredDisks: ignoredDisks,
})
// Credential.

View File

@ -129,7 +129,7 @@ func StartTestServer(t TestErrHandler, instanceType string) TestServer {
testServer.AccessKey = credentials.AccessKeyID
testServer.SecretKey = credentials.SecretAccessKey
// Run TestServer.
testServer.Server = httptest.NewServer(configureServerHandler(serverCmdConfig{exportPaths: erasureDisks}))
testServer.Server = httptest.NewServer(configureServerHandler(serverCmdConfig{disks: erasureDisks}))
return testServer
}
@ -632,7 +632,7 @@ func getXLObjectLayer() (ObjectLayer, []string, error) {
erasureDisks = append(erasureDisks, path)
}
objLayer, err := newXLObjects(erasureDisks)
objLayer, err := newXLObjects(erasureDisks, nil)
if err != nil {
return nil, nil, err
}

View File

@ -93,8 +93,19 @@ func checkSufficientDisks(disks []string) error {
return nil
}
// isDiskFound - validates if the disk is found in a list of input disks.
func isDiskFound(disk string, disks []string) bool {
for _, d := range disks {
// Disk found return
if disk == d {
return true
}
}
return false
}
// newXLObjects - initialize new xl object layer.
func newXLObjects(disks []string) (ObjectLayer, error) {
func newXLObjects(disks, ignoredDisks []string) (ObjectLayer, error) {
// Validate if input disks are sufficient.
if err := checkSufficientDisks(disks); err != nil {
return nil, err
@ -103,9 +114,14 @@ func newXLObjects(disks []string) (ObjectLayer, error) {
// Bootstrap disks.
storageDisks := make([]StorageAPI, len(disks))
for index, disk := range disks {
// Check if disk is ignored.
if isDiskFound(disk, ignoredDisks) {
storageDisks[index] = nil
continue
}
var err error
// Intentionally ignore disk not found errors. XL will
// manage such errors internally.
// Intentionally ignore disk not found errors. XL is designed
// to handle these errors internally.
storageDisks[index], err = newStorageAPI(disk)
if err != nil && err != errDiskNotFound {
return nil, err
@ -122,12 +138,14 @@ func newXLObjects(disks []string) (ObjectLayer, error) {
return nil, err
}
// Initialize meta volume, if volume already exists ignores it.
if err := initMetaVolume(storageDisks); err != nil {
return nil, fmt.Errorf("Unable to initialize '.minio' meta volume, %s", err)
}
// Handles different cases properly.
switch reduceFormatErrs(sErrs, len(storageDisks)) {
case errUnformattedDisk:
if err := initMetaVolume(storageDisks); err != nil {
return nil, fmt.Errorf("Unable to initialize '.minio' meta volume, %s", err)
}
// All drives online but fresh, initialize format.
if err := initFormatXL(storageDisks); err != nil {
return nil, fmt.Errorf("Unable to initialize format, %s", err)
@ -139,8 +157,8 @@ func newXLObjects(disks []string) (ObjectLayer, error) {
return nil, fmt.Errorf("Unable to heal backend %s", err)
}
case errSomeDiskOffline:
// Some disks offline but some report missing format.json.
// FIXME.
// FIXME: in future.
return nil, fmt.Errorf("Unable to initialize format %s and %s", errSomeDiskOffline, errSomeDiskUnformatted)
}
// Runs house keeping code, like t, cleaning up tmp files etc.

View File

@ -132,7 +132,7 @@ func TestNewXL(t *testing.T) {
defer removeAll(disk)
}
// Initializes all erasure disks
_, err := newXLObjects(erasureDisks)
_, err := newXLObjects(erasureDisks, nil)
if err != nil {
t.Fatalf("Unable to initialize erasure, %s", err)
}