mirror of
https://github.com/minio/minio.git
synced 2025-05-23 02:21:51 -04:00
XL/fs: initObjectLayer should cleanup tmpMetaPrefix in parallel. (#1752)
Fixes #1747
This commit is contained in:
parent
ee6645f421
commit
a9e778f460
@ -16,25 +16,58 @@
|
|||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import "strings"
|
import (
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
// Common initialization needed for both object layers.
|
// Common initialization needed for both object layers.
|
||||||
func initObjectLayer(storageDisks ...StorageAPI) error {
|
func initObjectLayer(storageDisks ...StorageAPI) error {
|
||||||
// This happens for the first time, but keep this here since this
|
// This happens for the first time, but keep this here since this
|
||||||
// is the only place where it can be made expensive optimizing all
|
// is the only place where it can be made expensive optimizing all
|
||||||
// other calls. Create minio meta volume, if it doesn't exist yet.
|
// other calls. Create minio meta volume, if it doesn't exist yet.
|
||||||
for _, storage := range storageDisks {
|
var wg = &sync.WaitGroup{}
|
||||||
if err := storage.MakeVol(minioMetaBucket); err != nil {
|
|
||||||
if err != errVolumeExists && err != errDiskNotFound {
|
// Initialize errs to collect errors inside go-routine.
|
||||||
return toObjectErr(err, minioMetaBucket)
|
var errs = make([]error, len(storageDisks))
|
||||||
|
|
||||||
|
// Initialize all disks in parallel.
|
||||||
|
for index, disk := range storageDisks {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(index int, disk StorageAPI) {
|
||||||
|
// Indicate this wait group is done.
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
// Attempt to create `.minio`.
|
||||||
|
err := disk.MakeVol(minioMetaBucket)
|
||||||
|
if err != nil {
|
||||||
|
if err != errVolumeExists && err != errDiskNotFound {
|
||||||
|
errs[index] = err
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
// Cleanup all temp entries upon start.
|
||||||
// Cleanup all temp entries upon start.
|
err = cleanupDir(disk, minioMetaBucket, tmpMetaPrefix)
|
||||||
err := cleanupDir(storage, minioMetaBucket, tmpMetaPrefix)
|
if err != nil {
|
||||||
if err != nil {
|
errs[index] = err
|
||||||
return toObjectErr(err, minioMetaBucket, tmpMetaPrefix)
|
return
|
||||||
}
|
}
|
||||||
|
errs[index] = nil
|
||||||
|
}(index, disk)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wait for all cleanup to finish.
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
// Return upon first error.
|
||||||
|
for _, err := range errs {
|
||||||
|
if err == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return toObjectErr(err, minioMetaBucket, tmpMetaPrefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return success here.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -208,28 +208,45 @@ func (xl xlObjects) isObject(bucket, prefix string) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// statPart - stat a part file.
|
||||||
|
func (xl xlObjects) statPart(bucket, objectPart string) (fileInfo FileInfo, err error) {
|
||||||
|
// Count for errors encountered.
|
||||||
|
var xlJSONErrCount = 0
|
||||||
|
|
||||||
|
// Return the first success entry based on the selected random disk.
|
||||||
|
for xlJSONErrCount < len(xl.storageDisks) {
|
||||||
|
// Choose a random disk on each attempt, do not hit the same disk all the time.
|
||||||
|
disk := xl.getRandomDisk() // Pick a random disk.
|
||||||
|
fileInfo, err = disk.StatFile(bucket, objectPart)
|
||||||
|
if err == nil {
|
||||||
|
return fileInfo, nil
|
||||||
|
}
|
||||||
|
xlJSONErrCount++ // Update error count.
|
||||||
|
}
|
||||||
|
return FileInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
// readXLMetadata - read xl metadata.
|
// readXLMetadata - read xl metadata.
|
||||||
func readXLMetadata(disk StorageAPI, bucket, object string) (xlMeta xlMetaV1, err error) {
|
func (xl xlObjects) readXLMetadata(bucket, object string) (xlMeta xlMetaV1, err error) {
|
||||||
r, err := disk.ReadFile(bucket, path.Join(object, xlMetaJSONFile), int64(0))
|
// Count for errors encountered.
|
||||||
if err != nil {
|
var xlJSONErrCount = 0
|
||||||
return xlMetaV1{}, err
|
|
||||||
}
|
|
||||||
defer r.Close()
|
|
||||||
_, err = xlMeta.ReadFrom(r)
|
|
||||||
if err != nil {
|
|
||||||
return xlMetaV1{}, err
|
|
||||||
}
|
|
||||||
return xlMeta, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// deleteXLJson - delete `xl.json` on all disks.
|
// Return the first success entry based on the selected random disk.
|
||||||
func (xl xlObjects) deleteXLMetadata(bucket, object string) error {
|
for xlJSONErrCount < len(xl.storageDisks) {
|
||||||
return xl.deleteObject(bucket, path.Join(object, xlMetaJSONFile))
|
var r io.ReadCloser
|
||||||
}
|
// Choose a random disk on each attempt, do not hit the same disk all the time.
|
||||||
|
disk := xl.getRandomDisk() // Pick a random disk.
|
||||||
// renameXLJson - rename `xl.json` on all disks.
|
r, err = disk.ReadFile(bucket, path.Join(object, xlMetaJSONFile), int64(0))
|
||||||
func (xl xlObjects) renameXLMetadata(srcBucket, srcPrefix, dstBucket, dstPrefix string) error {
|
if err == nil {
|
||||||
return xl.renameObject(srcBucket, path.Join(srcPrefix, xlMetaJSONFile), dstBucket, path.Join(dstPrefix, xlMetaJSONFile))
|
defer r.Close()
|
||||||
|
_, err = xlMeta.ReadFrom(r)
|
||||||
|
if err == nil {
|
||||||
|
return xlMeta, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
xlJSONErrCount++ // Update error count.
|
||||||
|
}
|
||||||
|
return xlMetaV1{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// getDiskDistribution - get disk distribution.
|
// getDiskDistribution - get disk distribution.
|
||||||
|
@ -86,14 +86,15 @@ func (xl xlObjects) newMultipartUploadCommon(bucket string, object string, meta
|
|||||||
if err = xl.writeXLMetadata(minioMetaBucket, tempUploadIDPath, xlMeta); err != nil {
|
if err = xl.writeXLMetadata(minioMetaBucket, tempUploadIDPath, xlMeta); err != nil {
|
||||||
return "", toObjectErr(err, minioMetaBucket, tempUploadIDPath)
|
return "", toObjectErr(err, minioMetaBucket, tempUploadIDPath)
|
||||||
}
|
}
|
||||||
if err = xl.renameXLMetadata(minioMetaBucket, tempUploadIDPath, minioMetaBucket, uploadIDPath); err != nil {
|
rErr := xl.renameObject(minioMetaBucket, tempUploadIDPath, minioMetaBucket, uploadIDPath)
|
||||||
if dErr := xl.deleteXLMetadata(minioMetaBucket, tempUploadIDPath); dErr != nil {
|
if rErr == nil {
|
||||||
|
if dErr := xl.deleteObject(minioMetaBucket, tempUploadIDPath); dErr != nil {
|
||||||
return "", toObjectErr(dErr, minioMetaBucket, tempUploadIDPath)
|
return "", toObjectErr(dErr, minioMetaBucket, tempUploadIDPath)
|
||||||
}
|
}
|
||||||
return "", toObjectErr(err, minioMetaBucket, uploadIDPath)
|
// Return success.
|
||||||
|
return uploadID, nil
|
||||||
}
|
}
|
||||||
// Return success.
|
return "", toObjectErr(rErr, minioMetaBucket, uploadIDPath)
|
||||||
return uploadID, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMultipartUpload - initialize a new multipart upload, returns a unique id.
|
// NewMultipartUpload - initialize a new multipart upload, returns a unique id.
|
||||||
@ -129,7 +130,7 @@ func (xl xlObjects) putObjectPartCommon(bucket string, object string, uploadID s
|
|||||||
tmpPartPath := path.Join(tmpMetaPrefix, bucket, object, uploadID, partSuffix)
|
tmpPartPath := path.Join(tmpMetaPrefix, bucket, object, uploadID, partSuffix)
|
||||||
fileWriter, err := xl.erasureDisk.CreateFile(minioMetaBucket, tmpPartPath)
|
fileWriter, err := xl.erasureDisk.CreateFile(minioMetaBucket, tmpPartPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", toObjectErr(err, bucket, object)
|
return "", toObjectErr(err, minioMetaBucket, tmpPartPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize md5 writer.
|
// Initialize md5 writer.
|
||||||
@ -184,7 +185,7 @@ func (xl xlObjects) putObjectPartCommon(bucket string, object string, uploadID s
|
|||||||
}
|
}
|
||||||
|
|
||||||
uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID)
|
uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID)
|
||||||
xlMeta, err := readXLMetadata(xl.getRandomDisk(), minioMetaBucket, uploadIDPath)
|
xlMeta, err := xl.readXLMetadata(minioMetaBucket, uploadIDPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", toObjectErr(err, minioMetaBucket, uploadIDPath)
|
return "", toObjectErr(err, minioMetaBucket, uploadIDPath)
|
||||||
}
|
}
|
||||||
@ -230,9 +231,8 @@ func (xl xlObjects) listObjectPartsCommon(bucket, object, uploadID string, partN
|
|||||||
defer nsMutex.Unlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object, uploadID))
|
defer nsMutex.Unlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object, uploadID))
|
||||||
result := ListPartsInfo{}
|
result := ListPartsInfo{}
|
||||||
|
|
||||||
disk := xl.getRandomDisk() // Pick a random disk and read `xl.json` from there.
|
|
||||||
uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID)
|
uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID)
|
||||||
xlMeta, err := readXLMetadata(disk, minioMetaBucket, uploadIDPath)
|
xlMeta, err := xl.readXLMetadata(minioMetaBucket, uploadIDPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ListPartsInfo{}, toObjectErr(err, minioMetaBucket, uploadIDPath)
|
return ListPartsInfo{}, toObjectErr(err, minioMetaBucket, uploadIDPath)
|
||||||
}
|
}
|
||||||
@ -261,9 +261,9 @@ func (xl xlObjects) listObjectPartsCommon(bucket, object, uploadID string, partN
|
|||||||
}
|
}
|
||||||
count := maxParts
|
count := maxParts
|
||||||
for _, part := range parts {
|
for _, part := range parts {
|
||||||
var fi FileInfo
|
|
||||||
partNamePath := path.Join(mpartMetaPrefix, bucket, object, uploadID, part.Name)
|
partNamePath := path.Join(mpartMetaPrefix, bucket, object, uploadID, part.Name)
|
||||||
fi, err = disk.StatFile(minioMetaBucket, partNamePath)
|
var fi FileInfo
|
||||||
|
fi, err = xl.statPart(minioMetaBucket, partNamePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ListPartsInfo{}, toObjectErr(err, minioMetaBucket, partNamePath)
|
return ListPartsInfo{}, toObjectErr(err, minioMetaBucket, partNamePath)
|
||||||
}
|
}
|
||||||
@ -327,7 +327,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
|
|||||||
uploadIDPath := pathJoin(mpartMetaPrefix, bucket, object, uploadID)
|
uploadIDPath := pathJoin(mpartMetaPrefix, bucket, object, uploadID)
|
||||||
|
|
||||||
// Read the current `xl.json`.
|
// Read the current `xl.json`.
|
||||||
xlMeta, err := readXLMetadata(xl.getRandomDisk(), minioMetaBucket, uploadIDPath)
|
xlMeta, err := xl.readXLMetadata(minioMetaBucket, uploadIDPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", toObjectErr(err, minioMetaBucket, uploadIDPath)
|
return "", toObjectErr(err, minioMetaBucket, uploadIDPath)
|
||||||
}
|
}
|
||||||
|
@ -25,13 +25,19 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64) (io.Read
|
|||||||
if !IsValidObjectName(object) {
|
if !IsValidObjectName(object) {
|
||||||
return nil, ObjectNameInvalid{Bucket: bucket, Object: object}
|
return nil, ObjectNameInvalid{Bucket: bucket, Object: object}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Lock the object before reading.
|
||||||
nsMutex.RLock(bucket, object)
|
nsMutex.RLock(bucket, object)
|
||||||
defer nsMutex.RUnlock(bucket, object)
|
defer nsMutex.RUnlock(bucket, object)
|
||||||
fileReader, fileWriter := io.Pipe()
|
fileReader, fileWriter := io.Pipe()
|
||||||
xlMeta, err := readXLMetadata(xl.getRandomDisk(), bucket, object)
|
|
||||||
|
// Read metadata associated with the object.
|
||||||
|
xlMeta, err := xl.readXLMetadata(bucket, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, toObjectErr(err, bucket, object)
|
return nil, toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get part index offset.
|
||||||
partIndex, offset, err := xlMeta.getPartIndexOffset(startOffset)
|
partIndex, offset, err := xlMeta.getPartIndexOffset(startOffset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, toObjectErr(err, bucket, object)
|
return nil, toObjectErr(err, bucket, object)
|
||||||
@ -90,33 +96,24 @@ func (xl xlObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) {
|
|||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getObjectInfo - get object info.
|
||||||
func (xl xlObjects) getObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) {
|
func (xl xlObjects) getObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) {
|
||||||
// Count for errors encountered.
|
var xlMeta xlMetaV1
|
||||||
var xlJSONErrCount = 0
|
xlMeta, err = xl.readXLMetadata(bucket, object)
|
||||||
|
if err != nil {
|
||||||
// Return the first success entry based on the selected random disk.
|
// Return error.
|
||||||
for xlJSONErrCount < len(xl.storageDisks) {
|
return ObjectInfo{}, err
|
||||||
// Choose a random disk on each attempt, do not hit the same disk all the time.
|
|
||||||
disk := xl.getRandomDisk() // Pick a random disk.
|
|
||||||
var xlMeta xlMetaV1
|
|
||||||
xlMeta, err = readXLMetadata(disk, bucket, object)
|
|
||||||
if err == nil {
|
|
||||||
objInfo = ObjectInfo{}
|
|
||||||
objInfo.IsDir = false
|
|
||||||
objInfo.Bucket = bucket
|
|
||||||
objInfo.Name = object
|
|
||||||
objInfo.Size = xlMeta.Stat.Size
|
|
||||||
objInfo.ModTime = xlMeta.Stat.ModTime
|
|
||||||
objInfo.MD5Sum = xlMeta.Meta["md5Sum"]
|
|
||||||
objInfo.ContentType = xlMeta.Meta["content-type"]
|
|
||||||
objInfo.ContentEncoding = xlMeta.Meta["content-encoding"]
|
|
||||||
return objInfo, nil
|
|
||||||
}
|
|
||||||
xlJSONErrCount++ // Update error count.
|
|
||||||
}
|
}
|
||||||
|
objInfo = ObjectInfo{}
|
||||||
// Return error at the end.
|
objInfo.IsDir = false
|
||||||
return ObjectInfo{}, err
|
objInfo.Bucket = bucket
|
||||||
|
objInfo.Name = object
|
||||||
|
objInfo.Size = xlMeta.Stat.Size
|
||||||
|
objInfo.ModTime = xlMeta.Stat.ModTime
|
||||||
|
objInfo.MD5Sum = xlMeta.Meta["md5Sum"]
|
||||||
|
objInfo.ContentType = xlMeta.Meta["content-type"]
|
||||||
|
objInfo.ContentEncoding = xlMeta.Meta["content-encoding"]
|
||||||
|
return objInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// renameObject - renaming all source objects to destination object across all disks.
|
// renameObject - renaming all source objects to destination object across all disks.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user