reduce 250ms to 50ms retry looking for metacache block (#16795)

This commit is contained in:
Harshavardhana 2023-03-17 14:44:01 -07:00 committed by GitHub
parent 850a945a18
commit 280442e533
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -397,6 +397,11 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt
retries := 0 retries := 0
rpc := globalNotificationSys.restClientFromHash(pathJoin(o.Bucket, o.Prefix)) rpc := globalNotificationSys.restClientFromHash(pathJoin(o.Bucket, o.Prefix))
const (
retryDelay = 50 * time.Millisecond
retryDelay250 = 250 * time.Millisecond
)
for { for {
if contextCanceled(ctx) { if contextCanceled(ctx) {
return entries, ctx.Err() return entries, ctx.Err()
@ -411,7 +416,6 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt
retries = 1 retries = 1
} }
const retryDelay = 250 * time.Millisecond
// All operations are performed without locks, so we must be careful and allow for failures. // All operations are performed without locks, so we must be careful and allow for failures.
// Read metadata associated with the object from a disk. // Read metadata associated with the object from a disk.
if retries > 0 { if retries > 0 {
@ -425,7 +429,7 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt
_, err := disk.ReadVersion(ctx, minioMetaBucket, _, err := disk.ReadVersion(ctx, minioMetaBucket,
o.objectPath(0), "", false) o.objectPath(0), "", false)
if err != nil { if err != nil {
time.Sleep(retryDelay) time.Sleep(retryDelay250)
retries++ retries++
continue continue
} }
@ -440,11 +444,19 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt
switch toObjectErr(err, minioMetaBucket, o.objectPath(0)).(type) { switch toObjectErr(err, minioMetaBucket, o.objectPath(0)).(type) {
case ObjectNotFound: case ObjectNotFound:
retries++ retries++
if retries == 1 {
time.Sleep(retryDelay) time.Sleep(retryDelay)
} else {
time.Sleep(retryDelay250)
}
continue continue
case InsufficientReadQuorum: case InsufficientReadQuorum:
retries++ retries++
if retries == 1 {
time.Sleep(retryDelay) time.Sleep(retryDelay)
} else {
time.Sleep(retryDelay250)
}
continue continue
default: default:
return entries, fmt.Errorf("reading first part metadata: %w", err) return entries, fmt.Errorf("reading first part metadata: %w", err)
@ -463,7 +475,7 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt
retries = -1 retries = -1
} }
retries++ retries++
time.Sleep(retryDelay) time.Sleep(retryDelay250)
continue continue
case errors.Is(err, io.EOF): case errors.Is(err, io.EOF):
return entries, io.EOF return entries, io.EOF
@ -497,7 +509,7 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt
_, err := disk.ReadVersion(ctx, minioMetaBucket, _, err := disk.ReadVersion(ctx, minioMetaBucket,
o.objectPath(partN), "", false) o.objectPath(partN), "", false)
if err != nil { if err != nil {
time.Sleep(retryDelay) time.Sleep(retryDelay250)
retries++ retries++
continue continue
} }
@ -508,7 +520,7 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt
// Load partN metadata... // Load partN metadata...
fi, metaArr, onlineDisks, err = er.getObjectFileInfo(ctx, minioMetaBucket, o.objectPath(partN), ObjectOptions{}, true) fi, metaArr, onlineDisks, err = er.getObjectFileInfo(ctx, minioMetaBucket, o.objectPath(partN), ObjectOptions{}, true)
if err != nil { if err != nil {
time.Sleep(retryDelay) time.Sleep(retryDelay250)
retries++ retries++
continue continue
} }
@ -530,9 +542,9 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt
}() }()
tmp := newMetacacheReader(pr) tmp := newMetacacheReader(pr)
defer tmp.Close()
e, err := tmp.filter(o) e, err := tmp.filter(o)
pr.CloseWithError(err) pr.CloseWithError(err)
tmp.Close()
entries.o = append(entries.o, e.o...) entries.o = append(entries.o, e.o...)
if o.Limit > 0 && entries.len() > o.Limit { if o.Limit > 0 && entries.len() > o.Limit {
entries.truncate(o.Limit) entries.truncate(o.Limit)
@ -546,11 +558,11 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt
switch toObjectErr(err, minioMetaBucket, o.objectPath(partN)).(type) { switch toObjectErr(err, minioMetaBucket, o.objectPath(partN)).(type) {
case ObjectNotFound: case ObjectNotFound:
retries++ retries++
time.Sleep(retryDelay) time.Sleep(retryDelay250)
continue continue
case InsufficientReadQuorum: case InsufficientReadQuorum:
retries++ retries++
time.Sleep(retryDelay) time.Sleep(retryDelay250)
continue continue
default: default:
logger.LogIf(ctx, err) logger.LogIf(ctx, err)