diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index a65a2a117..32b1cc741 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -654,12 +654,10 @@ func (a adminAPIHandlers) ProfileHandler(w http.ResponseWriter, r *http.Request) globalProfilerMu.Unlock() timer := time.NewTimer(duration) + defer timer.Stop() for { select { case <-ctx.Done(): - if !timer.Stop() { - <-timer.C - } for k, v := range globalProfiler { v.Stop() delete(globalProfiler, k) diff --git a/cmd/admin-heal-ops.go b/cmd/admin-heal-ops.go index 94ebb8e2f..eef0ee8f5 100644 --- a/cmd/admin-heal-ops.go +++ b/cmd/admin-heal-ops.go @@ -194,7 +194,6 @@ func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) { for { select { case <-periodicTimer.C: - periodicTimer.Reset(time.Minute * 5) now := UTCNow() ahs.Lock() for path, h := range ahs.healSeqMap { @@ -203,6 +202,8 @@ func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) { } } ahs.Unlock() + + periodicTimer.Reset(time.Minute * 5) case <-ctx.Done(): // server could be restarting - need // to exit immediately @@ -581,12 +582,7 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error { // heal-results in memory and the client has not consumed it // for too long. unconsumedTimer := time.NewTimer(healUnconsumedTimeout) - defer func() { - // stop the timeout timer so it is garbage collected. - if !unconsumedTimer.Stop() { - <-unconsumedTimer.C - } - }() + defer unconsumedTimer.Stop() var itemsLen int for { diff --git a/cmd/background-newdisks-heal-ops.go b/cmd/background-newdisks-heal-ops.go index 261eed1b5..590bf3d75 100644 --- a/cmd/background-newdisks-heal-ops.go +++ b/cmd/background-newdisks-heal-ops.go @@ -312,9 +312,6 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq case <-ctx.Done(): return case <-diskCheckTimer.C: - // Reset to next interval. - diskCheckTimer.Reset(defaultMonitorNewDiskInterval) - var erasureSetInPoolDisksToHeal []map[int][]StorageAPI healDisks := globalBackgroundHealState.getHealLocalDiskEndpoints() @@ -448,6 +445,9 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq } } wg.Wait() + + // Reset for next interval. + diskCheckTimer.Reset(defaultMonitorNewDiskInterval) } } } diff --git a/cmd/bucket-replication-stats.go b/cmd/bucket-replication-stats.go index 3f0c26721..70ebb642f 100644 --- a/cmd/bucket-replication-stats.go +++ b/cmd/bucket-replication-stats.go @@ -161,7 +161,7 @@ func NewReplicationStats(ctx context.Context, objectAPI ObjectLayer) *Replicatio // load replication metrics at cluster start from initial data usage func (r *ReplicationStats) loadInitialReplicationMetrics(ctx context.Context) { - rTimer := time.NewTimer(time.Minute * 1) + rTimer := time.NewTimer(time.Minute) defer rTimer.Stop() var ( dui DataUsageInfo @@ -169,6 +169,7 @@ func (r *ReplicationStats) loadInitialReplicationMetrics(ctx context.Context) { ) outer: for { + rTimer.Reset(time.Minute) select { case <-ctx.Done(): return diff --git a/cmd/bucket-replication.go b/cmd/bucket-replication.go index 1674f5981..7e4ab547a 100644 --- a/cmd/bucket-replication.go +++ b/cmd/bucket-replication.go @@ -1926,7 +1926,6 @@ func (p *ReplicationPool) periodicResyncMetaSave(ctx context.Context, objectAPI for { select { case <-resyncTimer.C: - resyncTimer.Reset(resyncTimeInterval) now := UTCNow() p.resyncState.RLock() for bucket, brs := range p.resyncState.statusMap { @@ -1947,6 +1946,8 @@ func (p *ReplicationPool) periodicResyncMetaSave(ctx context.Context, objectAPI } } p.resyncState.RUnlock() + + resyncTimer.Reset(resyncTimeInterval) case <-ctx.Done(): // server could be restarting - need // to exit immediately diff --git a/cmd/data-scanner.go b/cmd/data-scanner.go index 929b5ae62..45ad303ba 100644 --- a/cmd/data-scanner.go +++ b/cmd/data-scanner.go @@ -197,9 +197,6 @@ func runDataScanner(pctx context.Context, objAPI ObjectLayer) { case <-ctx.Done(): return case <-scannerTimer.C: - // Reset the timer for next cycle. - scannerTimer.Reset(scannerCycle.Get()) - if intDataUpdateTracker.debug { console.Debugln("starting scanner cycle") } @@ -232,6 +229,9 @@ func runDataScanner(pctx context.Context, objAPI ObjectLayer) { logger.LogIf(ctx, err) } } + + // Reset the timer for next cycle. + scannerTimer.Reset(scannerCycle.Get()) } } } diff --git a/cmd/disk-cache-backend.go b/cmd/disk-cache-backend.go index f93f77c40..b3a09e0e1 100644 --- a/cmd/disk-cache-backend.go +++ b/cmd/disk-cache-backend.go @@ -1630,8 +1630,6 @@ func (c *diskCache) cleanupStaleUploads(ctx context.Context) { case <-ctx.Done(): return case <-timer.C: - // Reset for the next interval - timer.Reset(cacheStaleUploadCleanupInterval) now := time.Now() readDirFn(pathJoin(c.dir, minioMetaBucket, cacheMultipartDir), func(shaDir string, typ os.FileMode) error { return readDirFn(pathJoin(c.dir, minioMetaBucket, cacheMultipartDir, shaDir), func(uploadIDDir string, typ os.FileMode) error { @@ -1662,6 +1660,9 @@ func (c *diskCache) cleanupStaleUploads(ctx context.Context) { } return nil }) + + // Reset for the next interval + timer.Reset(cacheStaleUploadCleanupInterval) } } } diff --git a/cmd/erasure-sets.go b/cmd/erasure-sets.go index e3f85e2d3..bd0d013fd 100644 --- a/cmd/erasure-sets.go +++ b/cmd/erasure-sets.go @@ -301,14 +301,14 @@ func (s *erasureSets) monitorAndConnectEndpoints(ctx context.Context, monitorInt case <-ctx.Done(): return case <-monitor.C: - // Reset the timer once fired for required interval. - monitor.Reset(monitorInterval) - if serverDebugLog { console.Debugln("running disk monitoring") } s.connectDisks() + + // Reset the timer for next interval + monitor.Reset(monitorInterval) } } } @@ -504,9 +504,6 @@ func (s *erasureSets) cleanupDeletedObjects(ctx context.Context) { case <-ctx.Done(): return case <-timer.C: - // Reset for the next interval - timer.Reset(globalAPIConfig.getDeleteCleanupInterval()) - var wg sync.WaitGroup for _, set := range s.sets { wg.Add(1) @@ -519,6 +516,9 @@ func (s *erasureSets) cleanupDeletedObjects(ctx context.Context) { }(set) } wg.Wait() + + // Reset for the next interval + timer.Reset(globalAPIConfig.getDeleteCleanupInterval()) } } } @@ -532,9 +532,6 @@ func (s *erasureSets) cleanupStaleUploads(ctx context.Context) { case <-ctx.Done(): return case <-timer.C: - // Reset for the next interval - timer.Reset(globalAPIConfig.getStaleUploadsCleanupInterval()) - var wg sync.WaitGroup for _, set := range s.sets { wg.Add(1) @@ -547,6 +544,9 @@ func (s *erasureSets) cleanupStaleUploads(ctx context.Context) { }(set) } wg.Wait() + + // Reset for the next interval + timer.Reset(globalAPIConfig.getStaleUploadsCleanupInterval()) } } } diff --git a/cmd/fs-v1-multipart.go b/cmd/fs-v1-multipart.go index 8d59338a7..ed1c022a3 100644 --- a/cmd/fs-v1-multipart.go +++ b/cmd/fs-v1-multipart.go @@ -887,8 +887,6 @@ func (fs *FSObjects) cleanupStaleUploads(ctx context.Context) { case <-ctx.Done(): return case <-bgAppendTmpCleaner.C: - bgAppendTmpCleaner.Reset(bgAppendsCleanupInterval) - foundUploadIDs := fs.getAllUploadIDs(ctx) // Remove background append map from the memory @@ -915,10 +913,8 @@ func (fs *FSObjects) cleanupStaleUploads(ctx context.Context) { } } + bgAppendTmpCleaner.Reset(bgAppendsCleanupInterval) case <-expiryUploadsTimer.C: - // Reset for the next interval - expiryUploadsTimer.Reset(globalAPIConfig.getStaleUploadsCleanupInterval()) - expiry := globalAPIConfig.getStaleUploadsExpiry() now := time.Now() @@ -944,6 +940,9 @@ func (fs *FSObjects) cleanupStaleUploads(ctx context.Context) { fs.appendFileMapMu.Unlock() } } + + // Reset for the next interval + expiryUploadsTimer.Reset(globalAPIConfig.getStaleUploadsCleanupInterval()) } } } diff --git a/cmd/lock-rest-server.go b/cmd/lock-rest-server.go index 353f86cc0..7d07004a3 100644 --- a/cmd/lock-rest-server.go +++ b/cmd/lock-rest-server.go @@ -239,10 +239,10 @@ func lockMaintenance(ctx context.Context) { case <-ctx.Done(): return case <-lkTimer.C: + globalLockServer.expireOldLocks(lockValidityDuration) + // Reset the timer for next cycle. lkTimer.Reset(lockMaintenanceInterval) - - globalLockServer.expireOldLocks(lockValidityDuration) } } } diff --git a/cmd/site-replication.go b/cmd/site-replication.go index 163a8161e..e6a86362f 100644 --- a/cmd/site-replication.go +++ b/cmd/site-replication.go @@ -3330,10 +3330,10 @@ func (c *SiteReplicationSys) startHealRoutine(ctx context.Context, objAPI Object defer healTimer.Stop() for { + healTimer.Reset(siteHealTimeInterval) + select { case <-healTimer.C: - healTimer.Reset(siteHealTimeInterval) - c.RLock() enabled := c.enabled c.RUnlock() diff --git a/internal/dsync/drwmutex.go b/internal/dsync/drwmutex.go index f64974f98..9cd077fd8 100644 --- a/internal/dsync/drwmutex.go +++ b/internal/dsync/drwmutex.go @@ -255,12 +255,12 @@ func (dm *DRWMutex) startContinousLockRefresh(lockLossCallback func(), id, sourc defer refreshTimer.Stop() for { + refreshTimer.Reset(dm.refreshInterval) + select { case <-ctx.Done(): return case <-refreshTimer.C: - refreshTimer.Reset(dm.refreshInterval) - noQuorum, err := refreshLock(ctx, dm.clnt, id, source, quorum) if err == nil && noQuorum { // Clean the lock locally and in remote nodes