fix: for DiskInfo call cache disk metrics (#15229)

Small uploads spend a significant amount of time (~5%) fetching disk info metrics. Also maps are allocated for each call.

Add a 100ms cache to disk metrics.
This commit is contained in:
Klaus Post 2022-07-05 11:02:30 -07:00 committed by GitHub
parent c7e01b139d
commit 2471bdda00
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 19 additions and 11 deletions

View File

@ -78,20 +78,28 @@ type xlStorageDiskIDCheck struct {
diskID string
storage *xlStorage
health *diskHealthTracker
metricsCache timedValue
}
func (p *xlStorageDiskIDCheck) getMetrics() DiskMetrics {
diskMetric := DiskMetrics{
APILatencies: make(map[string]uint64),
APICalls: make(map[string]uint64),
}
for i, v := range p.apiLatencies {
diskMetric.APILatencies[storageMetric(i).String()] = v.value()
}
for i := range p.apiCalls {
diskMetric.APICalls[storageMetric(i).String()] = atomic.LoadUint64(&p.apiCalls[i])
}
return diskMetric
p.metricsCache.Once.Do(func() {
p.metricsCache.TTL = 100 * time.Millisecond
p.metricsCache.Update = func() (interface{}, error) {
diskMetric := DiskMetrics{
APILatencies: make(map[string]uint64, len(p.apiLatencies)),
APICalls: make(map[string]uint64, len(p.apiCalls)),
}
for i, v := range p.apiLatencies {
diskMetric.APILatencies[storageMetric(i).String()] = v.value()
}
for i := range p.apiCalls {
diskMetric.APICalls[storageMetric(i).String()] = atomic.LoadUint64(&p.apiCalls[i])
}
return diskMetric, nil
}
})
m, _ := p.metricsCache.Get()
return m.(DiskMetrics)
}
type lockedLastMinuteLatency struct {