Add replication capacity metrics support in crawler (#10786)

This commit is contained in:
Ritesh H Shukla
2020-12-07 13:47:48 -08:00
committed by GitHub
parent 6d70f6a4ac
commit 038bcd9079
10 changed files with 235 additions and 69 deletions

View File

@@ -341,17 +341,17 @@ func (s *xlStorage) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCac
healOpts := globalHealConfig
globalHealConfigMu.Unlock()
dataUsageInfo, err := crawlDataFolder(ctx, s.diskPath, cache, func(item crawlItem) (int64, error) {
dataUsageInfo, err := crawlDataFolder(ctx, s.diskPath, cache, func(item crawlItem) (sizeSummary, error) {
// Look for `xl.meta/xl.json' at the leaf.
if !strings.HasSuffix(item.Path, SlashSeparator+xlStorageFormatFile) &&
!strings.HasSuffix(item.Path, SlashSeparator+xlStorageFormatFileV1) {
// if no xl.meta/xl.json found, skip the file.
return 0, errSkipFile
return sizeSummary{}, errSkipFile
}
buf, err := ioutil.ReadFile(item.Path)
if err != nil {
return 0, errSkipFile
return sizeSummary{}, errSkipFile
}
// Remove filename which is the meta file.
@@ -359,12 +359,13 @@ func (s *xlStorage) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCac
fivs, err := getFileInfoVersions(buf, item.bucket, item.objectPath())
if err != nil {
return 0, errSkipFile
return sizeSummary{}, errSkipFile
}
var totalSize int64
var numVersions = len(fivs.Versions)
sizeS := sizeSummary{}
for i, version := range fivs.Versions {
var successorModTime time.Time
if i > 0 {
@@ -395,9 +396,10 @@ func (s *xlStorage) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCac
}
totalSize += size
}
item.healReplication(ctx, objAPI, actionMeta{oi: version.ToObjectInfo(item.bucket, item.objectPath())})
item.healReplication(ctx, objAPI, actionMeta{oi: version.ToObjectInfo(item.bucket, item.objectPath())}, &sizeS)
}
return totalSize, nil
sizeS.totalSize = totalSize
return sizeS, nil
})
if err != nil {