mirror of
https://github.com/minio/minio.git
synced 2025-02-03 18:06:00 -05:00
Fix TestDataUpdateTracker hanging (#10302)
Keep dataUpdateTracker while goroutine is starting. This will ensure the object is updated one `start` returns Tested with ``` λ go test -cpu=1,2,4,8 -test.run TestDataUpdateTracker -count=1000 PASS ok github.com/minio/minio/cmd 8.913s ``` Fixes #10295
This commit is contained in:
parent
59352d0ac2
commit
8e6787a302
@ -181,6 +181,8 @@ func (d *dataUpdateTracker) start(ctx context.Context, drives ...string) {
|
|||||||
}
|
}
|
||||||
d.load(ctx, drives...)
|
d.load(ctx, drives...)
|
||||||
go d.startCollector(ctx)
|
go d.startCollector(ctx)
|
||||||
|
// startSaver will unlock.
|
||||||
|
d.mu.Lock()
|
||||||
go d.startSaver(ctx, dataUpdateTrackerSaveInterval, drives)
|
go d.startSaver(ctx, dataUpdateTrackerSaveInterval, drives)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -214,17 +216,17 @@ func (d *dataUpdateTracker) load(ctx context.Context, drives ...string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// startSaver will start a saver that will write d to all supplied drives at specific intervals.
|
// startSaver will start a saver that will write d to all supplied drives at specific intervals.
|
||||||
|
// 'd' must be write locked when started and will be unlocked.
|
||||||
// The saver will save and exit when supplied context is closed.
|
// The saver will save and exit when supplied context is closed.
|
||||||
func (d *dataUpdateTracker) startSaver(ctx context.Context, interval time.Duration, drives []string) {
|
func (d *dataUpdateTracker) startSaver(ctx context.Context, interval time.Duration, drives []string) {
|
||||||
t := time.NewTicker(interval)
|
|
||||||
defer t.Stop()
|
|
||||||
var buf bytes.Buffer
|
|
||||||
d.mu.Lock()
|
|
||||||
saveNow := d.save
|
saveNow := d.save
|
||||||
exited := make(chan struct{})
|
exited := make(chan struct{})
|
||||||
d.saveExited = exited
|
d.saveExited = exited
|
||||||
d.mu.Unlock()
|
d.mu.Unlock()
|
||||||
|
t := time.NewTicker(interval)
|
||||||
|
defer t.Stop()
|
||||||
defer close(exited)
|
defer close(exited)
|
||||||
|
var buf bytes.Buffer
|
||||||
for {
|
for {
|
||||||
var exit bool
|
var exit bool
|
||||||
select {
|
select {
|
||||||
|
@ -80,7 +80,7 @@ func newErasureZones(ctx context.Context, endpointZones EndpointZones) (ObjectLa
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
go intDataUpdateTracker.start(GlobalContext, localDrives...)
|
go intDataUpdateTracker.start(ctx, localDrives...)
|
||||||
return z, nil
|
return z, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -179,7 +179,7 @@ func NewFSObjectLayer(fsPath string) (ObjectLayer, error) {
|
|||||||
fs.fsFormatRlk = rlk
|
fs.fsFormatRlk = rlk
|
||||||
|
|
||||||
go fs.cleanupStaleMultipartUploads(ctx, GlobalMultipartCleanupInterval, GlobalMultipartExpiry)
|
go fs.cleanupStaleMultipartUploads(ctx, GlobalMultipartCleanupInterval, GlobalMultipartExpiry)
|
||||||
go intDataUpdateTracker.start(GlobalContext, fsPath)
|
go intDataUpdateTracker.start(ctx, fsPath)
|
||||||
|
|
||||||
// Return successfully initialized object layer.
|
// Return successfully initialized object layer.
|
||||||
return fs, nil
|
return fs, nil
|
||||||
|
Loading…
x
Reference in New Issue
Block a user