mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
Simplify and cleanup metadata r/w functions (#8146)
This commit is contained in:
parent
a7be313230
commit
53e4887e02
@ -449,7 +449,7 @@ func checkAtimeSupport(dir string) (err error) {
|
||||
func (c *cacheObjects) migrateCacheFromV1toV2(ctx context.Context) {
|
||||
logger.StartupMessage(colorBlue("Cache migration initiated ...."))
|
||||
|
||||
var wg = &sync.WaitGroup{}
|
||||
var wg sync.WaitGroup
|
||||
errs := make([]error, len(c.cache))
|
||||
for i, dc := range c.cache {
|
||||
if dc == nil {
|
||||
|
@ -325,7 +325,7 @@ func quorumUnformattedDisks(errs []error) bool {
|
||||
// loadFormatXLAll - load all format config from all input disks in parallel.
|
||||
func loadFormatXLAll(storageDisks []StorageAPI) ([]*formatXLV3, []error) {
|
||||
// Initialize sync waitgroup.
|
||||
var wg = &sync.WaitGroup{}
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Initialize list of errors.
|
||||
var sErrs = make([]error, len(storageDisks))
|
||||
@ -652,7 +652,7 @@ func formatXLV3Check(reference *formatXLV3, format *formatXLV3) error {
|
||||
func saveFormatXLAll(ctx context.Context, storageDisks []StorageAPI, formats []*formatXLV3) error {
|
||||
var errs = make([]error, len(storageDisks))
|
||||
|
||||
var wg = &sync.WaitGroup{}
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Write `format.json` to all disks.
|
||||
for index, disk := range storageDisks {
|
||||
@ -812,7 +812,7 @@ func initFormatXLMetaVolume(storageDisks []StorageAPI, formats []*formatXLV3) er
|
||||
// This happens for the first time, but keep this here since this
|
||||
// is the only place where it can be made expensive optimizing all
|
||||
// other calls. Create minio meta volume, if it doesn't exist yet.
|
||||
var wg = &sync.WaitGroup{}
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Initialize errs to collect errors inside go-routine.
|
||||
var errs = make([]error, len(storageDisks))
|
||||
|
@ -43,7 +43,7 @@ func (xl xlObjects) MakeBucketWithLocation(ctx context.Context, bucket, location
|
||||
}
|
||||
|
||||
// Initialize sync waitgroup.
|
||||
var wg = &sync.WaitGroup{}
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Initialize list of errors.
|
||||
var dErrs = make([]error, len(xl.getDisks()))
|
||||
@ -82,7 +82,7 @@ func (xl xlObjects) MakeBucketWithLocation(ctx context.Context, bucket, location
|
||||
|
||||
func (xl xlObjects) undoDeleteBucket(bucket string) {
|
||||
// Initialize sync waitgroup.
|
||||
var wg = &sync.WaitGroup{}
|
||||
var wg sync.WaitGroup
|
||||
// Undo previous make bucket entry on all underlying storage disks.
|
||||
for index, disk := range xl.getDisks() {
|
||||
if disk == nil {
|
||||
@ -103,7 +103,7 @@ func (xl xlObjects) undoDeleteBucket(bucket string) {
|
||||
// undo make bucket operation upon quorum failure.
|
||||
func undoMakeBucket(storageDisks []StorageAPI, bucket string) {
|
||||
// Initialize sync waitgroup.
|
||||
var wg = &sync.WaitGroup{}
|
||||
var wg sync.WaitGroup
|
||||
// Undo previous make bucket entry on all underlying storage disks.
|
||||
for index, disk := range storageDisks {
|
||||
if disk == nil {
|
||||
@ -245,7 +245,7 @@ func (xl xlObjects) DeleteBucket(ctx context.Context, bucket string) error {
|
||||
defer bucketLock.Unlock()
|
||||
|
||||
// Collect if all disks report volume not found.
|
||||
var wg = &sync.WaitGroup{}
|
||||
var wg sync.WaitGroup
|
||||
var dErrs = make([]error, len(xl.getDisks()))
|
||||
|
||||
// Remove a volume entry on all underlying storage disks.
|
||||
|
@ -57,7 +57,7 @@ func healBucket(ctx context.Context, storageDisks []StorageAPI, bucket string, w
|
||||
dryRun bool) (res madmin.HealResultItem, err error) {
|
||||
|
||||
// Initialize sync waitgroup.
|
||||
var wg = &sync.WaitGroup{}
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Initialize list of errors.
|
||||
var dErrs = make([]error, len(storageDisks))
|
||||
|
@ -451,7 +451,7 @@ func renameXLMetadata(ctx context.Context, disks []StorageAPI, srcBucket, srcEnt
|
||||
|
||||
// writeUniqueXLMetadata - writes unique `xl.json` content for each disk in order.
|
||||
func writeUniqueXLMetadata(ctx context.Context, disks []StorageAPI, bucket, prefix string, xlMetas []xlMetaV1, quorum int) ([]StorageAPI, error) {
|
||||
var wg = &sync.WaitGroup{}
|
||||
var wg sync.WaitGroup
|
||||
var mErrs = make([]error, len(disks))
|
||||
|
||||
// Start writing `xl.json` to all disks in parallel.
|
||||
@ -461,19 +461,17 @@ func writeUniqueXLMetadata(ctx context.Context, disks []StorageAPI, bucket, pref
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
|
||||
// Pick one xlMeta for a disk at index.
|
||||
xlMetas[index].Erasure.Index = index + 1
|
||||
|
||||
// Write `xl.json` in a routine.
|
||||
go func(index int, disk StorageAPI) {
|
||||
go func(index int, disk StorageAPI, xlMeta xlMetaV1) {
|
||||
defer wg.Done()
|
||||
|
||||
// Pick one xlMeta for a disk at index.
|
||||
xlMetas[index].Erasure.Index = index + 1
|
||||
|
||||
// Write unique `xl.json` for a disk at index.
|
||||
err := writeXLMetadata(ctx, disk, bucket, prefix, xlMetas[index])
|
||||
if err != nil {
|
||||
mErrs[index] = err
|
||||
}
|
||||
}(index, disk)
|
||||
mErrs[index] = writeXLMetadata(ctx, disk, bucket, prefix, xlMeta)
|
||||
}(index, disk, xlMetas[index])
|
||||
}
|
||||
|
||||
// Wait for all the routines.
|
||||
@ -482,37 +480,3 @@ func writeUniqueXLMetadata(ctx context.Context, disks []StorageAPI, bucket, pref
|
||||
err := reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, quorum)
|
||||
return evalDisks(disks, mErrs), err
|
||||
}
|
||||
|
||||
// writeSameXLMetadata - write `xl.json` on all disks in order.
|
||||
func writeSameXLMetadata(ctx context.Context, disks []StorageAPI, bucket, prefix string, xlMeta xlMetaV1, writeQuorum int) ([]StorageAPI, error) {
|
||||
var wg = &sync.WaitGroup{}
|
||||
var mErrs = make([]error, len(disks))
|
||||
|
||||
// Start writing `xl.json` to all disks in parallel.
|
||||
for index, disk := range disks {
|
||||
if disk == nil {
|
||||
mErrs[index] = errDiskNotFound
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
// Write `xl.json` in a routine.
|
||||
go func(index int, disk StorageAPI, metadata xlMetaV1) {
|
||||
defer wg.Done()
|
||||
|
||||
// Save the disk order index.
|
||||
metadata.Erasure.Index = index + 1
|
||||
|
||||
// Write xl metadata.
|
||||
err := writeXLMetadata(ctx, disk, bucket, prefix, metadata)
|
||||
if err != nil {
|
||||
mErrs[index] = err
|
||||
}
|
||||
}(index, disk, xlMeta)
|
||||
}
|
||||
|
||||
// Wait for all the routines.
|
||||
wg.Wait()
|
||||
|
||||
err := reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, writeQuorum)
|
||||
return evalDisks(disks, mErrs), err
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ func (xl xlObjects) checkUploadIDExists(ctx context.Context, bucket, object, upl
|
||||
// Removes part given by partName belonging to a mulitpart upload from minioMetaBucket
|
||||
func (xl xlObjects) removeObjectPart(bucket, object, uploadID, partName string) {
|
||||
curpartPath := path.Join(bucket, object, uploadID, partName)
|
||||
wg := sync.WaitGroup{}
|
||||
var wg sync.WaitGroup
|
||||
for i, disk := range xl.getDisks() {
|
||||
if disk == nil {
|
||||
continue
|
||||
@ -103,7 +103,7 @@ func (xl xlObjects) statPart(ctx context.Context, bucket, object, uploadID, part
|
||||
|
||||
// commitXLMetadata - commit `xl.json` from source prefix to destination prefix in the given slice of disks.
|
||||
func commitXLMetadata(ctx context.Context, disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPrefix string, quorum int) ([]StorageAPI, error) {
|
||||
var wg = &sync.WaitGroup{}
|
||||
var wg sync.WaitGroup
|
||||
var mErrs = make([]error, len(disks))
|
||||
|
||||
srcJSONFile := path.Join(srcPrefix, xlMetaJSONFile)
|
||||
@ -123,13 +123,7 @@ func commitXLMetadata(ctx context.Context, disks []StorageAPI, srcBucket, srcPre
|
||||
defer disk.DeleteFile(srcBucket, srcPrefix)
|
||||
|
||||
// Renames `xl.json` from source prefix to destination prefix.
|
||||
rErr := disk.RenameFile(srcBucket, srcJSONFile, dstBucket, dstJSONFile)
|
||||
if rErr != nil {
|
||||
logger.LogIf(ctx, rErr)
|
||||
mErrs[index] = rErr
|
||||
return
|
||||
}
|
||||
mErrs[index] = nil
|
||||
mErrs[index] = disk.RenameFile(srcBucket, srcJSONFile, dstBucket, dstJSONFile)
|
||||
}(index, disk)
|
||||
}
|
||||
// Wait for all the routines.
|
||||
@ -218,16 +212,23 @@ func (xl xlObjects) newMultipartUpload(ctx context.Context, bucket string, objec
|
||||
// success.
|
||||
defer xl.deleteObject(ctx, minioMetaTmpBucket, tempUploadIDPath, writeQuorum, false)
|
||||
|
||||
onlineDisks := xl.getDisks()
|
||||
var partsMetadata = make([]xlMetaV1, len(onlineDisks))
|
||||
for i := range onlineDisks {
|
||||
partsMetadata[i] = xlMeta
|
||||
}
|
||||
|
||||
var err error
|
||||
// Write updated `xl.json` to all disks.
|
||||
disks, err := writeSameXLMetadata(ctx, xl.getDisks(), minioMetaTmpBucket, tempUploadIDPath, xlMeta, writeQuorum)
|
||||
onlineDisks, err = writeUniqueXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempUploadIDPath, partsMetadata, writeQuorum)
|
||||
if err != nil {
|
||||
return "", toObjectErr(err, minioMetaTmpBucket, tempUploadIDPath)
|
||||
}
|
||||
|
||||
// Attempt to rename temp upload object to actual upload path object
|
||||
_, rErr := rename(ctx, disks, minioMetaTmpBucket, tempUploadIDPath, minioMetaMultipartBucket, uploadIDPath, true, writeQuorum, nil)
|
||||
if rErr != nil {
|
||||
return "", toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath)
|
||||
_, err = rename(ctx, onlineDisks, minioMetaTmpBucket, tempUploadIDPath, minioMetaMultipartBucket, uploadIDPath, true, writeQuorum, nil)
|
||||
if err != nil {
|
||||
return "", toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
||||
}
|
||||
|
||||
// Return success.
|
||||
@ -456,7 +457,8 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
|
||||
defer xl.deleteObject(ctx, minioMetaTmpBucket, tempXLMetaPath, writeQuorum, false)
|
||||
|
||||
// Writes a unique `xl.json` each disk carrying new checksum related information.
|
||||
if onlineDisks, err = writeUniqueXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempXLMetaPath, partsMetadata, writeQuorum); err != nil {
|
||||
onlineDisks, err = writeUniqueXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempXLMetaPath, partsMetadata, writeQuorum)
|
||||
if err != nil {
|
||||
return pi, toObjectErr(err, minioMetaTmpBucket, tempXLMetaPath)
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@ var objectOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied)
|
||||
|
||||
// putObjectDir hints the bottom layer to create a new directory.
|
||||
func (xl xlObjects) putObjectDir(ctx context.Context, bucket, object string, writeQuorum int) error {
|
||||
var wg = &sync.WaitGroup{}
|
||||
var wg sync.WaitGroup
|
||||
|
||||
errs := make([]error, len(xl.getDisks()))
|
||||
// Prepare object creation in all disks
|
||||
@ -335,7 +335,7 @@ func (xl xlObjects) getObject(ctx context.Context, bucket, object string, startO
|
||||
|
||||
// getObjectInfoDir - This getObjectInfo is specific to object directory lookup.
|
||||
func (xl xlObjects) getObjectInfoDir(ctx context.Context, bucket, object string) (oi ObjectInfo, err error) {
|
||||
var wg = &sync.WaitGroup{}
|
||||
var wg sync.WaitGroup
|
||||
|
||||
errs := make([]error, len(xl.getDisks()))
|
||||
// Prepare object creation in a all disks
|
||||
@ -423,7 +423,7 @@ func (xl xlObjects) getObjectInfo(ctx context.Context, bucket, object string) (o
|
||||
}
|
||||
|
||||
func undoRename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isDir bool, errs []error) {
|
||||
var wg = &sync.WaitGroup{}
|
||||
var wg sync.WaitGroup
|
||||
// Undo rename object on disks where RenameFile succeeded.
|
||||
|
||||
// If srcEntry/dstEntry are objects then add a trailing slash to copy
|
||||
@ -453,7 +453,7 @@ func undoRename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry str
|
||||
// the respective underlying storage layer representations.
|
||||
func rename(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isDir bool, writeQuorum int, ignoredErr []error) ([]StorageAPI, error) {
|
||||
// Initialize sync waitgroup.
|
||||
var wg = &sync.WaitGroup{}
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Initialize list of errors.
|
||||
var errs = make([]error, len(disks))
|
||||
@ -737,7 +737,7 @@ func (xl xlObjects) deleteObject(ctx context.Context, bucket, object string, wri
|
||||
}
|
||||
|
||||
// Initialize sync waitgroup.
|
||||
var wg = &sync.WaitGroup{}
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Initialize list of errors.
|
||||
var dErrs = make([]error, len(disks))
|
||||
|
@ -174,14 +174,8 @@ func readXLMeta(ctx context.Context, disk StorageAPI, bucket string, object stri
|
||||
if len(xlMetaBuf) == 0 {
|
||||
return xlMetaV1{}, errFileNotFound
|
||||
}
|
||||
xlMeta, err = xlMetaV1UnmarshalJSON(ctx, xlMetaBuf)
|
||||
if err != nil {
|
||||
logger.GetReqInfo(ctx).AppendTags("disk", disk.String())
|
||||
logger.LogIf(ctx, err)
|
||||
return xlMetaV1{}, err
|
||||
}
|
||||
// Return structured `xl.json`.
|
||||
return xlMeta, nil
|
||||
logger.GetReqInfo(ctx).AppendTags("disk", disk.String())
|
||||
return xlMetaV1UnmarshalJSON(ctx, xlMetaBuf)
|
||||
}
|
||||
|
||||
// Reads all `xl.json` metadata as a xlMetaV1 slice.
|
||||
@ -189,7 +183,7 @@ func readXLMeta(ctx context.Context, disk StorageAPI, bucket string, object stri
|
||||
func readAllXLMetadata(ctx context.Context, disks []StorageAPI, bucket, object string) ([]xlMetaV1, []error) {
|
||||
errs := make([]error, len(disks))
|
||||
metadataArray := make([]xlMetaV1, len(disks))
|
||||
var wg = &sync.WaitGroup{}
|
||||
var wg sync.WaitGroup
|
||||
// Read `xl.json` parallelly across disks.
|
||||
for index, disk := range disks {
|
||||
if disk == nil {
|
||||
@ -200,12 +194,7 @@ func readAllXLMetadata(ctx context.Context, disks []StorageAPI, bucket, object s
|
||||
// Read `xl.json` in routine.
|
||||
go func(index int, disk StorageAPI) {
|
||||
defer wg.Done()
|
||||
var err error
|
||||
metadataArray[index], err = readXLMeta(ctx, disk, bucket, object)
|
||||
if err != nil {
|
||||
errs[index] = err
|
||||
return
|
||||
}
|
||||
metadataArray[index], errs[index] = readXLMeta(ctx, disk, bucket, object)
|
||||
}(index, disk)
|
||||
}
|
||||
|
||||
|
@ -100,7 +100,7 @@ func GetHistoricLoad() Load {
|
||||
// for the process currently
|
||||
func GetLoad() Load {
|
||||
vals := make(chan time.Duration, 3)
|
||||
wg := sync.WaitGroup{}
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < cpuLoadSampleSize; i++ {
|
||||
cpuCounter, err := newCounter()
|
||||
if err != nil {
|
||||
|
Loading…
Reference in New Issue
Block a user