mirror of
https://github.com/minio/minio.git
synced 2025-04-15 00:35:31 -04:00
Merge pull request #1849 from harshavardhana/multipart
XL/PutObject: Handle all pending cases of DiskNotFound.
This commit is contained in:
commit
70a1231f02
35
posix.go
35
posix.go
@ -249,6 +249,11 @@ func (s posix) StatVol(volume string) (volInfo VolInfo, err error) {
|
|||||||
|
|
||||||
// DeleteVol - delete a volume.
|
// DeleteVol - delete a volume.
|
||||||
func (s posix) DeleteVol(volume string) error {
|
func (s posix) DeleteVol(volume string) error {
|
||||||
|
// Validate if disk is free.
|
||||||
|
if err := checkDiskFree(s.diskPath, s.minFreeDisk); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Verify if volume is valid and it exists.
|
// Verify if volume is valid and it exists.
|
||||||
volumeDir, err := s.getVolDir(volume)
|
volumeDir, err := s.getVolDir(volume)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -274,6 +279,11 @@ func (s posix) DeleteVol(volume string) error {
|
|||||||
// ListDir - return all the entries at the given directory path.
|
// ListDir - return all the entries at the given directory path.
|
||||||
// If an entry is a directory it will be returned with a trailing "/".
|
// If an entry is a directory it will be returned with a trailing "/".
|
||||||
func (s posix) ListDir(volume, dirPath string) ([]string, error) {
|
func (s posix) ListDir(volume, dirPath string) ([]string, error) {
|
||||||
|
// Validate if disk is free.
|
||||||
|
if err := checkDiskFree(s.diskPath, s.minFreeDisk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// Verify if volume is valid and it exists.
|
// Verify if volume is valid and it exists.
|
||||||
volumeDir, err := s.getVolDir(volume)
|
volumeDir, err := s.getVolDir(volume)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -296,6 +306,11 @@ func (s posix) ListDir(volume, dirPath string) ([]string, error) {
|
|||||||
// for io.EOF. Additionally ReadFile also starts reading from an
|
// for io.EOF. Additionally ReadFile also starts reading from an
|
||||||
// offset.
|
// offset.
|
||||||
func (s posix) ReadFile(volume string, path string, offset int64, buf []byte) (n int64, err error) {
|
func (s posix) ReadFile(volume string, path string, offset int64, buf []byte) (n int64, err error) {
|
||||||
|
// Validate if disk is free.
|
||||||
|
if err = checkDiskFree(s.diskPath, s.minFreeDisk); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
volumeDir, err := s.getVolDir(volume)
|
volumeDir, err := s.getVolDir(volume)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
@ -356,6 +371,11 @@ func (s posix) ReadFile(volume string, path string, offset int64, buf []byte) (n
|
|||||||
// AppendFile - append a byte array at path, if file doesn't exist at
|
// AppendFile - append a byte array at path, if file doesn't exist at
|
||||||
// path this call explicitly creates it.
|
// path this call explicitly creates it.
|
||||||
func (s posix) AppendFile(volume, path string, buf []byte) (n int64, err error) {
|
func (s posix) AppendFile(volume, path string, buf []byte) (n int64, err error) {
|
||||||
|
// Validate if disk is free.
|
||||||
|
if err = checkDiskFree(s.diskPath, s.minFreeDisk); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
volumeDir, err := s.getVolDir(volume)
|
volumeDir, err := s.getVolDir(volume)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
@ -403,6 +423,11 @@ func (s posix) AppendFile(volume, path string, buf []byte) (n int64, err error)
|
|||||||
|
|
||||||
// StatFile - get file info.
|
// StatFile - get file info.
|
||||||
func (s posix) StatFile(volume, path string) (file FileInfo, err error) {
|
func (s posix) StatFile(volume, path string) (file FileInfo, err error) {
|
||||||
|
// Validate if disk is free.
|
||||||
|
if err = checkDiskFree(s.diskPath, s.minFreeDisk); err != nil {
|
||||||
|
return FileInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
volumeDir, err := s.getVolDir(volume)
|
volumeDir, err := s.getVolDir(volume)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return FileInfo{}, err
|
return FileInfo{}, err
|
||||||
@ -480,6 +505,11 @@ func deleteFile(basePath, deletePath string) error {
|
|||||||
|
|
||||||
// DeleteFile - delete a file at path.
|
// DeleteFile - delete a file at path.
|
||||||
func (s posix) DeleteFile(volume, path string) error {
|
func (s posix) DeleteFile(volume, path string) error {
|
||||||
|
// Validate if disk is free.
|
||||||
|
if err := checkDiskFree(s.diskPath, s.minFreeDisk); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
volumeDir, err := s.getVolDir(volume)
|
volumeDir, err := s.getVolDir(volume)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -506,6 +536,11 @@ func (s posix) DeleteFile(volume, path string) error {
|
|||||||
|
|
||||||
// RenameFile - rename source path to destination path atomically.
|
// RenameFile - rename source path to destination path atomically.
|
||||||
func (s posix) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) error {
|
func (s posix) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) error {
|
||||||
|
// Validate if disk is free.
|
||||||
|
if err := checkDiskFree(s.diskPath, s.minFreeDisk); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
srcVolumeDir, err := s.getVolDir(srcVolume)
|
srcVolumeDir, err := s.getVolDir(srcVolume)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -53,6 +53,11 @@ func (xl xlObjects) listDir(bucket, prefixDir string, filter func(entry string)
|
|||||||
}
|
}
|
||||||
entries, err = disk.ListDir(bucket, prefixDir)
|
entries, err = disk.ListDir(bucket, prefixDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// For any reason disk was deleted or goes offline, continue
|
||||||
|
// and list form other disks if possible.
|
||||||
|
if err == errDiskNotFound {
|
||||||
|
continue
|
||||||
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// Skip the entries which do not match the filter.
|
// Skip the entries which do not match the filter.
|
||||||
|
@ -56,9 +56,7 @@ func (xl xlObjects) MakeBucket(bucket string) error {
|
|||||||
err := disk.MakeVol(bucket)
|
err := disk.MakeVol(bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dErrs[index] = err
|
dErrs[index] = err
|
||||||
return
|
|
||||||
}
|
}
|
||||||
dErrs[index] = nil
|
|
||||||
}(index, disk)
|
}(index, disk)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -99,7 +97,7 @@ func (xl xlObjects) getBucketInfo(bucketName string) (bucketInfo BucketInfo, err
|
|||||||
var volInfo VolInfo
|
var volInfo VolInfo
|
||||||
volInfo, err = disk.StatVol(bucketName)
|
volInfo, err = disk.StatVol(bucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// For some reason disk went offline pick the next one.
|
// For any reason disk went offline continue and pick the next one.
|
||||||
if err == errDiskNotFound {
|
if err == errDiskNotFound {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -154,6 +152,10 @@ func (xl xlObjects) listBuckets() (bucketsInfo []BucketInfo, err error) {
|
|||||||
}
|
}
|
||||||
var volsInfo []VolInfo
|
var volsInfo []VolInfo
|
||||||
volsInfo, err = disk.ListVols()
|
volsInfo, err = disk.ListVols()
|
||||||
|
// Ignore any disks not found.
|
||||||
|
if err == errDiskNotFound {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// NOTE: The assumption here is that volumes across all disks in
|
// NOTE: The assumption here is that volumes across all disks in
|
||||||
// readQuorum have consistent view i.e they all have same number
|
// readQuorum have consistent view i.e they all have same number
|
||||||
@ -218,9 +220,7 @@ func (xl xlObjects) DeleteBucket(bucket string) error {
|
|||||||
err := disk.DeleteVol(bucket)
|
err := disk.DeleteVol(bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dErrs[index] = err
|
dErrs[index] = err
|
||||||
return
|
|
||||||
}
|
}
|
||||||
dErrs[index] = nil
|
|
||||||
}(index, disk)
|
}(index, disk)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,6 +63,9 @@ func (xl xlObjects) isObject(bucket, prefix string) bool {
|
|||||||
}
|
}
|
||||||
_, err := disk.StatFile(bucket, path.Join(prefix, xlMetaJSONFile))
|
_, err := disk.StatFile(bucket, path.Join(prefix, xlMetaJSONFile))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if err == errDiskNotFound {
|
||||||
|
continue
|
||||||
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
|
@ -209,6 +209,10 @@ func (xl xlObjects) readXLMetadata(bucket, object string) (xlMeta xlMetaV1, err
|
|||||||
var buf []byte
|
var buf []byte
|
||||||
buf, err = readAll(disk, bucket, path.Join(object, xlMetaJSONFile))
|
buf, err = readAll(disk, bucket, path.Join(object, xlMetaJSONFile))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// For any reason disk is not available continue and read from other disks.
|
||||||
|
if err == errDiskNotFound {
|
||||||
|
continue
|
||||||
|
}
|
||||||
return xlMetaV1{}, err
|
return xlMetaV1{}, err
|
||||||
}
|
}
|
||||||
err = json.Unmarshal(buf, &xlMeta)
|
err = json.Unmarshal(buf, &xlMeta)
|
||||||
@ -338,11 +342,10 @@ func (xl xlObjects) writeUniqueXLMetadata(bucket, prefix string, xlMetas []xlMet
|
|||||||
xlMetas[index].Erasure.Index = index + 1
|
xlMetas[index].Erasure.Index = index + 1
|
||||||
|
|
||||||
// Write unique `xl.json` for a disk at index.
|
// Write unique `xl.json` for a disk at index.
|
||||||
if err := writeXLMetadata(disk, bucket, prefix, xlMetas[index]); err != nil {
|
err := writeXLMetadata(disk, bucket, prefix, xlMetas[index])
|
||||||
|
if err != nil {
|
||||||
mErrs[index] = err
|
mErrs[index] = err
|
||||||
return
|
|
||||||
}
|
}
|
||||||
mErrs[index] = nil
|
|
||||||
}(index, disk)
|
}(index, disk)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -404,11 +407,10 @@ func (xl xlObjects) writeSameXLMetadata(bucket, prefix string, xlMeta xlMetaV1)
|
|||||||
metadata.Erasure.Index = index + 1
|
metadata.Erasure.Index = index + 1
|
||||||
|
|
||||||
// Write xl metadata.
|
// Write xl metadata.
|
||||||
if err := writeXLMetadata(disk, bucket, prefix, metadata); err != nil {
|
err := writeXLMetadata(disk, bucket, prefix, metadata)
|
||||||
|
if err != nil {
|
||||||
mErrs[index] = err
|
mErrs[index] = err
|
||||||
return
|
|
||||||
}
|
}
|
||||||
mErrs[index] = nil
|
|
||||||
}(index, disk, xlMeta)
|
}(index, disk, xlMeta)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -363,6 +363,9 @@ func (xl xlObjects) isMultipartUpload(bucket, prefix string) bool {
|
|||||||
}
|
}
|
||||||
_, err := disk.StatFile(bucket, pathJoin(prefix, uploadsJSONFile))
|
_, err := disk.StatFile(bucket, pathJoin(prefix, uploadsJSONFile))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if err == errDiskNotFound {
|
||||||
|
continue
|
||||||
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
@ -377,8 +380,12 @@ func (xl xlObjects) listUploadsInfo(prefixPath string) (uploadsInfo []uploadInfo
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
splitPrefixes := strings.SplitN(prefixPath, "/", 3)
|
splitPrefixes := strings.SplitN(prefixPath, "/", 3)
|
||||||
uploadsJSON, err := readUploadsJSON(splitPrefixes[1], splitPrefixes[2], disk)
|
var uploadsJSON uploadsV1
|
||||||
|
uploadsJSON, err = readUploadsJSON(splitPrefixes[1], splitPrefixes[2], disk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if err == errDiskNotFound {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if err == errFileNotFound {
|
if err == errFileNotFound {
|
||||||
return []uploadInfo{}, nil
|
return []uploadInfo{}, nil
|
||||||
}
|
}
|
||||||
|
@ -68,6 +68,9 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
uploads, _, err = listMultipartUploadIDs(bucket, keyMarker, uploadIDMarker, maxUploads, disk)
|
uploads, _, err = listMultipartUploadIDs(bucket, keyMarker, uploadIDMarker, maxUploads, disk)
|
||||||
|
if err == errDiskNotFound {
|
||||||
|
continue
|
||||||
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
nsMutex.RUnlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, keyMarker))
|
nsMutex.RUnlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, keyMarker))
|
||||||
@ -124,9 +127,12 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
|
|||||||
if disk == nil {
|
if disk == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
newUploads, end, err = listMultipartUploadIDs(bucket, entry, uploadIDMarker, maxUploads, disk)
|
||||||
|
if err == errDiskNotFound {
|
||||||
|
continue
|
||||||
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
newUploads, end, err = listMultipartUploadIDs(bucket, entry, uploadIDMarker, maxUploads, disk)
|
|
||||||
nsMutex.RUnlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, entry))
|
nsMutex.RUnlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, entry))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == errFileNotFound || walkResult.err == errDiskNotFound {
|
if err == errFileNotFound || walkResult.err == errDiskNotFound {
|
||||||
@ -661,13 +667,17 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
|
|||||||
// Validate if there are other incomplete upload-id's present for
|
// Validate if there are other incomplete upload-id's present for
|
||||||
// the object, if yes do not attempt to delete 'uploads.json'.
|
// the object, if yes do not attempt to delete 'uploads.json'.
|
||||||
var disk StorageAPI
|
var disk StorageAPI
|
||||||
|
var uploadsJSON uploadsV1
|
||||||
for _, disk = range xl.getLoadBalancedQuorumDisks() {
|
for _, disk = range xl.getLoadBalancedQuorumDisks() {
|
||||||
if disk == nil {
|
if disk == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
uploadsJSON, err = readUploadsJSON(bucket, object, disk)
|
||||||
|
if err == errDiskNotFound {
|
||||||
|
continue
|
||||||
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
uploadsJSON, err := readUploadsJSON(bucket, object, disk)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", toObjectErr(err, minioMetaBucket, object)
|
return "", toObjectErr(err, minioMetaBucket, object)
|
||||||
}
|
}
|
||||||
@ -709,13 +719,17 @@ func (xl xlObjects) abortMultipartUpload(bucket, object, uploadID string) (err e
|
|||||||
// Validate if there are other incomplete upload-id's present for
|
// Validate if there are other incomplete upload-id's present for
|
||||||
// the object, if yes do not attempt to delete 'uploads.json'.
|
// the object, if yes do not attempt to delete 'uploads.json'.
|
||||||
var disk StorageAPI
|
var disk StorageAPI
|
||||||
|
var uploadsJSON uploadsV1
|
||||||
for _, disk = range xl.getLoadBalancedQuorumDisks() {
|
for _, disk = range xl.getLoadBalancedQuorumDisks() {
|
||||||
if disk == nil {
|
if disk == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
uploadsJSON, err = readUploadsJSON(bucket, object, disk)
|
||||||
|
if err == errDiskNotFound {
|
||||||
|
continue
|
||||||
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
uploadsJSON, err := readUploadsJSON(bucket, object, disk)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return toObjectErr(err, bucket, object)
|
return toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
|
@ -401,9 +401,7 @@ func (xl xlObjects) deleteObject(bucket, object string) error {
|
|||||||
err := cleanupDir(disk, bucket, object)
|
err := cleanupDir(disk, bucket, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dErrs[index] = err
|
dErrs[index] = err
|
||||||
return
|
|
||||||
}
|
}
|
||||||
dErrs[index] = nil
|
|
||||||
}(index, disk)
|
}(index, disk)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user