XL: Implement ignore errors. (#2136)

Each metadata ops have a list of errors which can be
ignored, this is essentially needed when

  - disks are not found
  - disks are found but cannot be accessed (permission denied)
  - disks are there but fresh disks were added

This is needed since we don't have healing code in place where
it would have healed the fresh disks added.

Fixes #2072
This commit is contained in:
Harshavardhana 2016-07-07 22:10:27 -07:00 committed by Anand Babu (AB) Periasamy
parent 4c21d6d09d
commit ca1b1921c4
8 changed files with 122 additions and 76 deletions

View File

@ -85,7 +85,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
// For any walk error return right away. // For any walk error return right away.
if walkResult.err != nil { if walkResult.err != nil {
// File not found or Disk not found is a valid case. // File not found or Disk not found is a valid case.
if walkResult.err == errFileNotFound || walkResult.err == errDiskNotFound || walkResult.err == errFaultyDisk { if isErrIgnored(walkResult.err, walkResultIgnoredErrs) {
eof = true eof = true
break break
} }

View File

@ -43,6 +43,16 @@ func registerShutdown(callback func()) {
}() }()
} }
// isErrIgnored should we ignore this error?, takes a list of errors which can be ignored.
func isErrIgnored(err error, ignoredErrs []error) bool {
for _, ignoredErr := range ignoredErrs {
if ignoredErr == err {
return true
}
}
return false
}
// House keeping code needed for FS. // House keeping code needed for FS.
func fsHouseKeeping(storageDisk StorageAPI) error { func fsHouseKeeping(storageDisk StorageAPI) error {
// Cleanup all temp entries upon start. // Cleanup all temp entries upon start.

View File

@ -21,6 +21,15 @@ import (
"strings" "strings"
) )
// list of all errors that can be ignored in tree walk operation.
var walkResultIgnoredErrs = []error{
errFileNotFound,
errVolumeNotFound,
errDiskNotFound,
errDiskAccessDenied,
errFaultyDisk,
}
// Tree walk result carries results of tree walking. // Tree walk result carries results of tree walking.
type treeWalkResult struct { type treeWalkResult struct {
entry string entry string
@ -48,32 +57,31 @@ func listDirFactory(isLeaf func(string, string) bool, disks ...StorageAPI) listD
continue continue
} }
entries, err = disk.ListDir(bucket, prefixDir) entries, err = disk.ListDir(bucket, prefixDir)
if err != nil { if err == nil {
// For any reason disk was deleted or goes offline, continue // Skip the entries which do not match the prefixEntry.
// and list from other disks if possible. for i, entry := range entries {
if err == errDiskNotFound || err == errFaultyDisk { if !strings.HasPrefix(entry, prefixEntry) {
continue entries[i] = ""
continue
}
if isLeaf(bucket, pathJoin(prefixDir, entry)) {
entries[i] = strings.TrimSuffix(entry, slashSeparator)
}
} }
break sort.Strings(entries)
} // Skip the empty strings
// Skip the entries which do not match the prefixEntry. for len(entries) > 0 && entries[0] == "" {
for i, entry := range entries { entries = entries[1:]
if !strings.HasPrefix(entry, prefixEntry) {
entries[i] = ""
continue
}
if isLeaf(bucket, pathJoin(prefixDir, entry)) {
entries[i] = strings.TrimSuffix(entry, slashSeparator)
} }
return entries, nil
} }
sort.Strings(entries) // For any reason disk was deleted or goes offline, continue
// Skip the empty strings // and list from other disks if possible.
for len(entries) > 0 && entries[0] == "" { if isErrIgnored(err, walkResultIgnoredErrs) {
entries = entries[1:] continue
} }
return entries, nil break
} }
// Return error at the end. // Return error at the end.
return nil, err return nil, err
} }

View File

@ -102,6 +102,13 @@ func (xl xlObjects) undoMakeBucket(bucket string) {
wg.Wait() wg.Wait()
} }
// list all errors that can be ignored in a bucket metadata operation.
var bucketMetadataOpIgnoredErrs = []error{
errDiskNotFound,
errDiskAccessDenied,
errFaultyDisk,
}
// getBucketInfo - returns the BucketInfo from one of the load balanced disks. // getBucketInfo - returns the BucketInfo from one of the load balanced disks.
func (xl xlObjects) getBucketInfo(bucketName string) (bucketInfo BucketInfo, err error) { func (xl xlObjects) getBucketInfo(bucketName string) (bucketInfo BucketInfo, err error) {
for _, disk := range xl.getLoadBalancedQuorumDisks() { for _, disk := range xl.getLoadBalancedQuorumDisks() {
@ -110,20 +117,20 @@ func (xl xlObjects) getBucketInfo(bucketName string) (bucketInfo BucketInfo, err
} }
var volInfo VolInfo var volInfo VolInfo
volInfo, err = disk.StatVol(bucketName) volInfo, err = disk.StatVol(bucketName)
if err != nil { if err == nil {
// For any reason disk went offline continue and pick the next one. bucketInfo = BucketInfo{
if err == errDiskNotFound || err == errFaultyDisk { Name: volInfo.Name,
continue Created: volInfo.Created,
} }
return BucketInfo{}, err return bucketInfo, nil
} }
bucketInfo = BucketInfo{ // For any reason disk went offline continue and pick the next one.
Name: volInfo.Name, if isErrIgnored(err, bucketMetadataOpIgnoredErrs) {
Created: volInfo.Created, continue
} }
break break
} }
return bucketInfo, nil return BucketInfo{}, err
} }
// Checks whether bucket exists. // Checks whether bucket exists.
@ -166,10 +173,6 @@ func (xl xlObjects) listBuckets() (bucketsInfo []BucketInfo, err error) {
} }
var volsInfo []VolInfo var volsInfo []VolInfo
volsInfo, err = disk.ListVols() volsInfo, err = disk.ListVols()
// Ignore any disks not found.
if err == errDiskNotFound || err == errFaultyDisk {
continue
}
if err == nil { if err == nil {
// NOTE: The assumption here is that volumes across all disks in // NOTE: The assumption here is that volumes across all disks in
// readQuorum have consistent view i.e they all have same number // readQuorum have consistent view i.e they all have same number
@ -189,6 +192,10 @@ func (xl xlObjects) listBuckets() (bucketsInfo []BucketInfo, err error) {
} }
return bucketsInfo, nil return bucketsInfo, nil
} }
// Ignore any disks not found.
if isErrIgnored(err, bucketMetadataOpIgnoredErrs) {
continue
}
break break
} }
return nil, err return nil, err
@ -245,8 +252,7 @@ func (xl xlObjects) DeleteBucket(bucket string) error {
// an unknown error. // an unknown error.
for _, err := range dErrs { for _, err := range dErrs {
if err != nil { if err != nil {
// We ignore error if errVolumeNotFound, errDiskNotFound or errFaultyDisk if isErrIgnored(err, objMetadataOpIgnoredErrs) {
if err == errVolumeNotFound || err == errDiskNotFound || err == errFaultyDisk {
volumeNotFoundErrCnt++ volumeNotFoundErrCnt++
continue continue
} }

View File

@ -67,7 +67,7 @@ func (xl xlObjects) isObject(bucket, prefix string) (ok bool) {
return true return true
} }
// Ignore for file not found, disk not found or faulty disk. // Ignore for file not found, disk not found or faulty disk.
if err == errFileNotFound || err == errDiskNotFound || err == errFaultyDisk { if isErrIgnored(err, walkResultIgnoredErrs) {
continue continue
} }
errorIf(err, "Unable to stat a file %s/%s/%s", bucket, prefix, xlMetaJSONFile) errorIf(err, "Unable to stat a file %s/%s/%s", bucket, prefix, xlMetaJSONFile)

View File

@ -194,6 +194,14 @@ func pickValidXLMeta(xlMetas []xlMetaV1) xlMetaV1 {
panic("Unable to look for valid XL metadata content") panic("Unable to look for valid XL metadata content")
} }
// list of all errors that can be ignored in a metadata operation.
var objMetadataOpIgnoredErrs = []error{
errDiskNotFound,
errDiskAccessDenied,
errFaultyDisk,
errVolumeNotFound,
}
// readXLMetadata - returns the object metadata `xl.json` content from // readXLMetadata - returns the object metadata `xl.json` content from
// one of the disks picked at random. // one of the disks picked at random.
func (xl xlObjects) readXLMetadata(bucket, object string) (xlMeta xlMetaV1, err error) { func (xl xlObjects) readXLMetadata(bucket, object string) (xlMeta xlMetaV1, err error) {
@ -202,16 +210,18 @@ func (xl xlObjects) readXLMetadata(bucket, object string) (xlMeta xlMetaV1, err
continue continue
} }
xlMeta, err = readXLMeta(disk, bucket, object) xlMeta, err = readXLMeta(disk, bucket, object)
if err != nil { if err == nil {
// For any reason disk is not available continue and read from other disks. return xlMeta, nil
if err == errDiskNotFound || err == errFaultyDisk { }
continue // For any reason disk or bucket is not available continue
} // and read from other disks.
return xlMetaV1{}, err if isErrIgnored(err, objMetadataOpIgnoredErrs) {
continue
} }
break break
} }
return xlMeta, nil // Return error here.
return xlMetaV1{}, err
} }
// Undo rename xl metadata, renames successfully renamed `xl.json` back to source location. // Undo rename xl metadata, renames successfully renamed `xl.json` back to source location.

View File

@ -178,16 +178,16 @@ func (xl xlObjects) isMultipartUpload(bucket, prefix string) bool {
continue continue
} }
_, err := disk.StatFile(bucket, pathJoin(prefix, uploadsJSONFile)) _, err := disk.StatFile(bucket, pathJoin(prefix, uploadsJSONFile))
if err != nil { if err == nil {
// For any reason disk was deleted or goes offline, continue return true
if err == errDiskNotFound || err == errFaultyDisk { }
continue // For any reason disk was deleted or goes offline, continue
} if isErrIgnored(err, objMetadataOpIgnoredErrs) {
return false continue
} }
break break
} }
return true return false
} }
// listUploadsInfo - list all uploads info. // listUploadsInfo - list all uploads info.
@ -199,20 +199,20 @@ func (xl xlObjects) listUploadsInfo(prefixPath string) (uploadsInfo []uploadInfo
splitPrefixes := strings.SplitN(prefixPath, "/", 3) splitPrefixes := strings.SplitN(prefixPath, "/", 3)
var uploadsJSON uploadsV1 var uploadsJSON uploadsV1
uploadsJSON, err = readUploadsJSON(splitPrefixes[1], splitPrefixes[2], disk) uploadsJSON, err = readUploadsJSON(splitPrefixes[1], splitPrefixes[2], disk)
if err != nil { if err == nil {
// For any reason disk was deleted or goes offline, continue uploadsInfo = uploadsJSON.Uploads
if err == errDiskNotFound || err == errFaultyDisk { return uploadsInfo, nil
continue }
} if err == errFileNotFound {
if err == errFileNotFound { return []uploadInfo{}, nil
return []uploadInfo{}, nil }
} // For any reason disk was deleted or goes offline, continue
return nil, err if isErrIgnored(err, objMetadataOpIgnoredErrs) {
continue
} }
uploadsInfo = uploadsJSON.Uploads
break break
} }
return uploadsInfo, nil return []uploadInfo{}, err
} }
// isUploadIDExists - verify if a given uploadID exists and is valid. // isUploadIDExists - verify if a given uploadID exists and is valid.
@ -249,16 +249,16 @@ func (xl xlObjects) statPart(bucket, object, uploadID, partName string) (fileInf
continue continue
} }
fileInfo, err = disk.StatFile(minioMetaBucket, partNamePath) fileInfo, err = disk.StatFile(minioMetaBucket, partNamePath)
if err != nil { if err == nil {
// For any reason disk was deleted or goes offline, continue return fileInfo, nil
if err == errDiskNotFound { }
continue // For any reason disk was deleted or goes offline, continue
} if isErrIgnored(err, objMetadataOpIgnoredErrs) {
return FileInfo{}, err continue
} }
break break
} }
return fileInfo, nil return FileInfo{}, err
} }
// commitXLMetadata - commit `xl.json` from source prefix to destination prefix. // commitXLMetadata - commit `xl.json` from source prefix to destination prefix.

View File

@ -68,7 +68,10 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
continue continue
} }
uploads, _, err = listMultipartUploadIDs(bucket, keyMarker, uploadIDMarker, maxUploads, disk) uploads, _, err = listMultipartUploadIDs(bucket, keyMarker, uploadIDMarker, maxUploads, disk)
if err == errDiskNotFound || err == errFaultyDisk { if err == nil {
break
}
if isErrIgnored(err, objMetadataOpIgnoredErrs) {
continue continue
} }
break break
@ -100,7 +103,7 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
// For any walk error return right away. // For any walk error return right away.
if walkResult.err != nil { if walkResult.err != nil {
// File not found or Disk not found is a valid case. // File not found or Disk not found is a valid case.
if walkResult.err == errFileNotFound || walkResult.err == errDiskNotFound || walkResult.err == errFaultyDisk { if isErrIgnored(walkResult.err, walkResultIgnoredErrs) {
continue continue
} }
return ListMultipartsInfo{}, err return ListMultipartsInfo{}, err
@ -130,14 +133,17 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
continue continue
} }
newUploads, end, err = listMultipartUploadIDs(bucket, entry, uploadIDMarker, maxUploads, disk) newUploads, end, err = listMultipartUploadIDs(bucket, entry, uploadIDMarker, maxUploads, disk)
if err == errDiskNotFound || err == errFaultyDisk { if err == nil {
break
}
if isErrIgnored(err, objMetadataOpIgnoredErrs) {
continue continue
} }
break break
} }
nsMutex.RUnlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, entry)) nsMutex.RUnlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, entry))
if err != nil { if err != nil {
if err == errFileNotFound || walkResult.err == errDiskNotFound || walkResult.err == errFaultyDisk { if isErrIgnored(err, walkResultIgnoredErrs) {
continue continue
} }
return ListMultipartsInfo{}, err return ListMultipartsInfo{}, err
@ -723,7 +729,10 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
continue continue
} }
uploadsJSON, err = readUploadsJSON(bucket, object, disk) uploadsJSON, err = readUploadsJSON(bucket, object, disk)
if err == errDiskNotFound || err == errFaultyDisk { if err == nil {
break
}
if isErrIgnored(err, objMetadataOpIgnoredErrs) {
continue continue
} }
break break
@ -774,7 +783,10 @@ func (xl xlObjects) abortMultipartUpload(bucket, object, uploadID string) (err e
continue continue
} }
uploadsJSON, err = readUploadsJSON(bucket, object, disk) uploadsJSON, err = readUploadsJSON(bucket, object, disk)
if err == errDiskNotFound || err == errFaultyDisk { if err == nil {
break
}
if isErrIgnored(err, objMetadataOpIgnoredErrs) {
continue continue
} }
break break