mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
fix: timer usage across codebase (#14935)
it seems in some places we have been wrongly using the timer.Reset() function, nicely exposed by an example shared by @donatello https://go.dev/play/p/qoF71_D1oXD this PR fixes all the usage comprehensively
This commit is contained in:
parent
2dc8ac1e62
commit
6cfb1cb6fd
@ -654,12 +654,10 @@ func (a adminAPIHandlers) ProfileHandler(w http.ResponseWriter, r *http.Request)
|
||||
globalProfilerMu.Unlock()
|
||||
|
||||
timer := time.NewTimer(duration)
|
||||
defer timer.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
for k, v := range globalProfiler {
|
||||
v.Stop()
|
||||
delete(globalProfiler, k)
|
||||
|
@ -194,7 +194,6 @@ func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-periodicTimer.C:
|
||||
periodicTimer.Reset(time.Minute * 5)
|
||||
now := UTCNow()
|
||||
ahs.Lock()
|
||||
for path, h := range ahs.healSeqMap {
|
||||
@ -203,6 +202,8 @@ func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
ahs.Unlock()
|
||||
|
||||
periodicTimer.Reset(time.Minute * 5)
|
||||
case <-ctx.Done():
|
||||
// server could be restarting - need
|
||||
// to exit immediately
|
||||
@ -581,12 +582,7 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
|
||||
// heal-results in memory and the client has not consumed it
|
||||
// for too long.
|
||||
unconsumedTimer := time.NewTimer(healUnconsumedTimeout)
|
||||
defer func() {
|
||||
// stop the timeout timer so it is garbage collected.
|
||||
if !unconsumedTimer.Stop() {
|
||||
<-unconsumedTimer.C
|
||||
}
|
||||
}()
|
||||
defer unconsumedTimer.Stop()
|
||||
|
||||
var itemsLen int
|
||||
for {
|
||||
|
@ -312,9 +312,6 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-diskCheckTimer.C:
|
||||
// Reset to next interval.
|
||||
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
|
||||
|
||||
var erasureSetInPoolDisksToHeal []map[int][]StorageAPI
|
||||
|
||||
healDisks := globalBackgroundHealState.getHealLocalDiskEndpoints()
|
||||
@ -448,6 +445,9 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Reset for next interval.
|
||||
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -161,7 +161,7 @@ func NewReplicationStats(ctx context.Context, objectAPI ObjectLayer) *Replicatio
|
||||
|
||||
// load replication metrics at cluster start from initial data usage
|
||||
func (r *ReplicationStats) loadInitialReplicationMetrics(ctx context.Context) {
|
||||
rTimer := time.NewTimer(time.Minute * 1)
|
||||
rTimer := time.NewTimer(time.Minute)
|
||||
defer rTimer.Stop()
|
||||
var (
|
||||
dui DataUsageInfo
|
||||
@ -169,6 +169,7 @@ func (r *ReplicationStats) loadInitialReplicationMetrics(ctx context.Context) {
|
||||
)
|
||||
outer:
|
||||
for {
|
||||
rTimer.Reset(time.Minute)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
@ -1926,7 +1926,6 @@ func (p *ReplicationPool) periodicResyncMetaSave(ctx context.Context, objectAPI
|
||||
for {
|
||||
select {
|
||||
case <-resyncTimer.C:
|
||||
resyncTimer.Reset(resyncTimeInterval)
|
||||
now := UTCNow()
|
||||
p.resyncState.RLock()
|
||||
for bucket, brs := range p.resyncState.statusMap {
|
||||
@ -1947,6 +1946,8 @@ func (p *ReplicationPool) periodicResyncMetaSave(ctx context.Context, objectAPI
|
||||
}
|
||||
}
|
||||
p.resyncState.RUnlock()
|
||||
|
||||
resyncTimer.Reset(resyncTimeInterval)
|
||||
case <-ctx.Done():
|
||||
// server could be restarting - need
|
||||
// to exit immediately
|
||||
|
@ -197,9 +197,6 @@ func runDataScanner(pctx context.Context, objAPI ObjectLayer) {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-scannerTimer.C:
|
||||
// Reset the timer for next cycle.
|
||||
scannerTimer.Reset(scannerCycle.Get())
|
||||
|
||||
if intDataUpdateTracker.debug {
|
||||
console.Debugln("starting scanner cycle")
|
||||
}
|
||||
@ -232,6 +229,9 @@ func runDataScanner(pctx context.Context, objAPI ObjectLayer) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the timer for next cycle.
|
||||
scannerTimer.Reset(scannerCycle.Get())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1630,8 +1630,6 @@ func (c *diskCache) cleanupStaleUploads(ctx context.Context) {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-timer.C:
|
||||
// Reset for the next interval
|
||||
timer.Reset(cacheStaleUploadCleanupInterval)
|
||||
now := time.Now()
|
||||
readDirFn(pathJoin(c.dir, minioMetaBucket, cacheMultipartDir), func(shaDir string, typ os.FileMode) error {
|
||||
return readDirFn(pathJoin(c.dir, minioMetaBucket, cacheMultipartDir, shaDir), func(uploadIDDir string, typ os.FileMode) error {
|
||||
@ -1662,6 +1660,9 @@ func (c *diskCache) cleanupStaleUploads(ctx context.Context) {
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// Reset for the next interval
|
||||
timer.Reset(cacheStaleUploadCleanupInterval)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -301,14 +301,14 @@ func (s *erasureSets) monitorAndConnectEndpoints(ctx context.Context, monitorInt
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-monitor.C:
|
||||
// Reset the timer once fired for required interval.
|
||||
monitor.Reset(monitorInterval)
|
||||
|
||||
if serverDebugLog {
|
||||
console.Debugln("running disk monitoring")
|
||||
}
|
||||
|
||||
s.connectDisks()
|
||||
|
||||
// Reset the timer for next interval
|
||||
monitor.Reset(monitorInterval)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -504,9 +504,6 @@ func (s *erasureSets) cleanupDeletedObjects(ctx context.Context) {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-timer.C:
|
||||
// Reset for the next interval
|
||||
timer.Reset(globalAPIConfig.getDeleteCleanupInterval())
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, set := range s.sets {
|
||||
wg.Add(1)
|
||||
@ -519,6 +516,9 @@ func (s *erasureSets) cleanupDeletedObjects(ctx context.Context) {
|
||||
}(set)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Reset for the next interval
|
||||
timer.Reset(globalAPIConfig.getDeleteCleanupInterval())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -532,9 +532,6 @@ func (s *erasureSets) cleanupStaleUploads(ctx context.Context) {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-timer.C:
|
||||
// Reset for the next interval
|
||||
timer.Reset(globalAPIConfig.getStaleUploadsCleanupInterval())
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, set := range s.sets {
|
||||
wg.Add(1)
|
||||
@ -547,6 +544,9 @@ func (s *erasureSets) cleanupStaleUploads(ctx context.Context) {
|
||||
}(set)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Reset for the next interval
|
||||
timer.Reset(globalAPIConfig.getStaleUploadsCleanupInterval())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -887,8 +887,6 @@ func (fs *FSObjects) cleanupStaleUploads(ctx context.Context) {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-bgAppendTmpCleaner.C:
|
||||
bgAppendTmpCleaner.Reset(bgAppendsCleanupInterval)
|
||||
|
||||
foundUploadIDs := fs.getAllUploadIDs(ctx)
|
||||
|
||||
// Remove background append map from the memory
|
||||
@ -915,10 +913,8 @@ func (fs *FSObjects) cleanupStaleUploads(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
bgAppendTmpCleaner.Reset(bgAppendsCleanupInterval)
|
||||
case <-expiryUploadsTimer.C:
|
||||
// Reset for the next interval
|
||||
expiryUploadsTimer.Reset(globalAPIConfig.getStaleUploadsCleanupInterval())
|
||||
|
||||
expiry := globalAPIConfig.getStaleUploadsExpiry()
|
||||
now := time.Now()
|
||||
|
||||
@ -944,6 +940,9 @@ func (fs *FSObjects) cleanupStaleUploads(ctx context.Context) {
|
||||
fs.appendFileMapMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Reset for the next interval
|
||||
expiryUploadsTimer.Reset(globalAPIConfig.getStaleUploadsCleanupInterval())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -239,10 +239,10 @@ func lockMaintenance(ctx context.Context) {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-lkTimer.C:
|
||||
globalLockServer.expireOldLocks(lockValidityDuration)
|
||||
|
||||
// Reset the timer for next cycle.
|
||||
lkTimer.Reset(lockMaintenanceInterval)
|
||||
|
||||
globalLockServer.expireOldLocks(lockValidityDuration)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3330,10 +3330,10 @@ func (c *SiteReplicationSys) startHealRoutine(ctx context.Context, objAPI Object
|
||||
defer healTimer.Stop()
|
||||
|
||||
for {
|
||||
healTimer.Reset(siteHealTimeInterval)
|
||||
|
||||
select {
|
||||
case <-healTimer.C:
|
||||
healTimer.Reset(siteHealTimeInterval)
|
||||
|
||||
c.RLock()
|
||||
enabled := c.enabled
|
||||
c.RUnlock()
|
||||
|
@ -255,12 +255,12 @@ func (dm *DRWMutex) startContinousLockRefresh(lockLossCallback func(), id, sourc
|
||||
defer refreshTimer.Stop()
|
||||
|
||||
for {
|
||||
refreshTimer.Reset(dm.refreshInterval)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-refreshTimer.C:
|
||||
refreshTimer.Reset(dm.refreshInterval)
|
||||
|
||||
noQuorum, err := refreshLock(ctx, dm.clnt, id, source, quorum)
|
||||
if err == nil && noQuorum {
|
||||
// Clean the lock locally and in remote nodes
|
||||
|
Loading…
Reference in New Issue
Block a user