diff --git a/cmd/erasure-metadata-utils.go b/cmd/erasure-metadata-utils.go index 2882b9baa..cc9f2ca93 100644 --- a/cmd/erasure-metadata-utils.go +++ b/cmd/erasure-metadata-utils.go @@ -67,6 +67,9 @@ func reduceErrs(errs []error, ignoredErrs []error) (maxCount int, maxErr error) // values of maximally occurring errors validated against a generic // quorum number that can be read or write quorum depending on usage. func reduceQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, quorum int, quorumErr error) error { + if contextCanceled(ctx) { + return context.Canceled + } maxCount, maxErr := reduceErrs(errs, ignoredErrs) if maxCount >= quorum { return maxErr diff --git a/cmd/erasure-object.go b/cmd/erasure-object.go index 2de128654..522ed5660 100644 --- a/cmd/erasure-object.go +++ b/cmd/erasure-object.go @@ -574,7 +574,7 @@ func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry str // Wait for all renames to finish. errs := g.Wait() - // We can safely allow RenameFile errors up to len(er.getDisks()) - writeQuorum + // We can safely allow RenameData errors up to len(er.getDisks()) - writeQuorum // otherwise return failure. Cleanup successful renames. err := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) return evalDisks(disks, errs), err @@ -1001,14 +1001,18 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st } } - // Whether a disk was initially or becomes offline - // during this upload, send it to the MRF list. - for i := 0; i < len(onlineDisks); i++ { - if onlineDisks[i] != nil && onlineDisks[i].IsOnline() { - continue + // For speedtest objects do not attempt to heal them. + if !opts.Speedtest { + // Whether a disk was initially or becomes offline + // during this upload, send it to the MRF list. + for i := 0; i < len(onlineDisks); i++ { + if onlineDisks[i] != nil && onlineDisks[i].IsOnline() { + continue + } + + er.addPartial(bucket, object, fi.VersionID, fi.Size) + break } - er.addPartial(bucket, object, fi.VersionID, fi.Size) - break } fi.ReplicationState = opts.PutReplicationState() diff --git a/cmd/object-api-errors.go b/cmd/object-api-errors.go index 11558fc59..58ad3dbca 100644 --- a/cmd/object-api-errors.go +++ b/cmd/object-api-errors.go @@ -31,6 +31,9 @@ func toObjectErr(err error, params ...string) error { if err == nil { return nil } + if errors.Is(err, context.Canceled) { + return context.Canceled + } switch err.Error() { case errVolumeNotFound.Error(): apiErr := BucketNotFound{} diff --git a/cmd/object-api-interface.go b/cmd/object-api-interface.go index 2fffc6c06..410883c82 100644 --- a/cmd/object-api-interface.go +++ b/cmd/object-api-interface.go @@ -66,7 +66,9 @@ type ObjectOptions struct { ReplicationSourceTaggingTimestamp time.Time // set if MinIOSourceTaggingTimestamp received ReplicationSourceLegalholdTimestamp time.Time // set if MinIOSourceObjectLegalholdTimestamp received ReplicationSourceRetentionTimestamp time.Time // set if MinIOSourceObjectRetentionTimestamp received - DeletePrefix bool // set true to enforce a prefix deletion, only application for DeleteObject API, + DeletePrefix bool // set true to enforce a prefix deletion, only application for DeleteObject API, + + Speedtest bool // object call specifically meant for SpeedTest code, set to 'true' when invoked by SpeedtestHandler. // Use the maximum parity (N/2), used when saving server configuration files MaxParity bool diff --git a/cmd/peer-rest-server.go b/cmd/peer-rest-server.go index d87e63f61..826c35f34 100644 --- a/cmd/peer-rest-server.go +++ b/cmd/peer-rest-server.go @@ -1169,6 +1169,7 @@ func selfSpeedtest(ctx context.Context, size, concurrent int, duration time.Dura var totalBytesRead uint64 objCountPerThread := make([]uint64, concurrent) + uploadsCtx, uploadsCancel := context.WithCancel(context.Background()) defer uploadsCancel() @@ -1187,10 +1188,9 @@ func selfSpeedtest(ctx context.Context, size, concurrent int, duration time.Dura hashReader, err := hash.NewReader(newRandomReader(size), int64(size), "", "", int64(size)) if err != nil { - if !contextCanceled(uploadsCtx) { + if !contextCanceled(uploadsCtx) && !errors.Is(err, context.Canceled) { errOnce.Do(func() { retError = err.Error() - logger.LogIf(ctx, err) }) } uploadsCancel() @@ -1202,12 +1202,13 @@ func selfSpeedtest(ctx context.Context, size, concurrent int, duration time.Dura UserDefined: map[string]string{ xhttp.AmzStorageClass: storageClass, }, + Speedtest: true, }) if err != nil { - if !contextCanceled(uploadsCtx) { + objCountPerThread[i]-- + if !contextCanceled(uploadsCtx) && !errors.Is(err, context.Canceled) { errOnce.Do(func() { retError = err.Error() - logger.LogIf(ctx, err) }) } uploadsCancel() @@ -1247,10 +1248,12 @@ func selfSpeedtest(ctx context.Context, size, concurrent int, duration time.Dura r, err := objAPI.GetObjectNInfo(downloadsCtx, minioMetaBucket, fmt.Sprintf("%s.%d.%d", objNamePrefix, i, j), nil, nil, noLock, ObjectOptions{}) if err != nil { - if !contextCanceled(downloadsCtx) { + if isErrObjectNotFound(err) { + continue + } + if !contextCanceled(downloadsCtx) && !errors.Is(err, context.Canceled) { errOnce.Do(func() { retError = err.Error() - logger.LogIf(ctx, err) }) } downloadsCancel() @@ -1265,10 +1268,9 @@ func selfSpeedtest(ctx context.Context, size, concurrent int, duration time.Dura atomic.AddUint64(&totalBytesRead, uint64(n)) } if err != nil { - if !contextCanceled(downloadsCtx) { + if !contextCanceled(downloadsCtx) && !errors.Is(err, context.Canceled) { errOnce.Do(func() { retError = err.Error() - logger.LogIf(ctx, err) }) } downloadsCancel() diff --git a/cmd/xl-storage.go b/cmd/xl-storage.go index c277677a4..7e8632455 100644 --- a/cmd/xl-storage.go +++ b/cmd/xl-storage.go @@ -2281,7 +2281,7 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f // Any failed rename calls un-roll previous transaction. s.deleteFile(dstVolumeDir, legacyDataPath, true) } - return err + return osErrToFileErr(err) } // renameAll only for objects that have xl.meta not saved inline.