mirror of
https://github.com/minio/minio.git
synced 2025-01-11 15:03:22 -05:00
feat: add dynamic usage cache (#12229)
A cache structure will be kept with a tree of usages. The cache is a tree structure where each keeps track of its children. An uncompacted branch contains a count of the files only directly at the branch level, and contains link to children branches or leaves. The leaves are "compacted" based on a number of properties. A compacted leaf contains the totals of all files beneath it. A leaf is only scanned once every dataUsageUpdateDirCycles, rarer if the bloom filter for the path is clean and no lifecycles are applied. Skipped leaves have their totals transferred from the previous cycle. A clean leaf will be included once every healFolderIncludeProb for partial heal scans. When selected there is a one in healObjectSelectProb that any object will be chosen for heal scan. Compaction happens when either: - The folder (and subfolders) contains less than dataScannerCompactLeastObject objects. - The folder itself contains more than dataScannerCompactAtFolders folders. - The folder only contains objects and no subfolders. - A bucket root will never be compacted. Furthermore, if a has more than dataScannerCompactAtChildren recursive children (uncompacted folders) the tree will be recursively scanned and the branches with the least number of objects will be compacted until the limit is reached. This ensures that any branch will never contain an unreasonable amount of other branches, and also that small branches with few objects don't take up unreasonable amounts of space. Whenever a branch is scanned, it is assumed that it will be un-compacted before it hits any of the above limits. This will make the branch rebalance itself when scanned if the distribution of objects has changed. TLDR; With current values: No bucket will ever have more than 10000 child nodes recursively. No single folder will have more than 2500 child nodes by itself. All subfolders are compacted if they have less than 500 objects in them recursively. We accumulate the (non-deletemarker) version count for paths as well, since we are changing the structure anyway.
This commit is contained in:
parent
f63eedb2b4
commit
229d83bb75
@ -45,9 +45,12 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
dataScannerSleepPerFolder = 20 * time.Millisecond // Time to wait between folders.
|
||||
dataScannerStartDelay = 1 * time.Minute // Time to wait on startup and between cycles.
|
||||
dataUsageUpdateDirCycles = 16 // Visit all folders every n cycles.
|
||||
dataScannerSleepPerFolder = time.Millisecond // Time to wait between folders.
|
||||
dataUsageUpdateDirCycles = 16 // Visit all folders every n cycles.
|
||||
dataScannerCompactLeastObject = 500 // Compact when there is less than this many objects in a branch.
|
||||
dataScannerCompactAtChildren = 10000 // Compact when there are this many children in a branch.
|
||||
dataScannerCompactAtFolders = dataScannerCompactAtChildren / 4 // Compact when this many subfolders in a single folder.
|
||||
dataScannerStartDelay = 1 * time.Minute // Time to wait on startup and between cycles.
|
||||
|
||||
healDeleteDangling = true
|
||||
healFolderIncludeProb = 32 // Include a clean folder one in n cycles.
|
||||
@ -185,11 +188,47 @@ type folderScanner struct {
|
||||
healFolderInclude uint32 // Include a clean folder one in n cycles.
|
||||
healObjectSelect uint32 // Do a heal check on an object once every n cycles. Must divide into healFolderInclude
|
||||
|
||||
newFolders []cachedFolder
|
||||
existingFolders []cachedFolder
|
||||
disks []StorageAPI
|
||||
disks []StorageAPI
|
||||
}
|
||||
|
||||
// Cache structure and compaction:
|
||||
//
|
||||
// A cache structure will be kept with a tree of usages.
|
||||
// The cache is a tree structure where each keeps track of its children.
|
||||
//
|
||||
// An uncompacted branch contains a count of the files only directly at the
|
||||
// branch level, and contains link to children branches or leaves.
|
||||
//
|
||||
// The leaves are "compacted" based on a number of properties.
|
||||
// A compacted leaf contains the totals of all files beneath it.
|
||||
//
|
||||
// A leaf is only scanned once every dataUsageUpdateDirCycles,
|
||||
// rarer if the bloom filter for the path is clean and no lifecycles are applied.
|
||||
// Skipped leaves have their totals transferred from the previous cycle.
|
||||
//
|
||||
// A clean leaf will be included once every healFolderIncludeProb for partial heal scans.
|
||||
// When selected there is a one in healObjectSelectProb that any object will be chosen for heal scan.
|
||||
//
|
||||
// Compaction happens when either:
|
||||
//
|
||||
// 1) The folder (and subfolders) contains less than dataScannerCompactLeastObject objects.
|
||||
// 2) The folder itself contains more than dataScannerCompactAtFolders folders.
|
||||
// 3) The folder only contains objects and no subfolders.
|
||||
//
|
||||
// A bucket root will never be compacted.
|
||||
//
|
||||
// Furthermore if a has more than dataScannerCompactAtChildren recursive children (uncompacted folders)
|
||||
// the tree will be recursively scanned and the branches with the least number of objects will be
|
||||
// compacted until the limit is reached.
|
||||
//
|
||||
// This ensures that any branch will never contain an unreasonable amount of other branches,
|
||||
// and also that small branches with few objects don't take up unreasonable amounts of space.
|
||||
// This keeps the cache size at a reasonable size for all buckets.
|
||||
//
|
||||
// Whenever a branch is scanned, it is assumed that it will be un-compacted
|
||||
// before it hits any of the above limits.
|
||||
// This will make the branch rebalance itself when scanned if the distribution of objects has changed.
|
||||
|
||||
// scanDataFolder will scanner the basepath+cache.Info.Name and return an updated cache.
|
||||
// The returned cache will always be valid, but may not be updated from the existing.
|
||||
// Before each operation sleepDuration is called which can be used to temporarily halt the scanner.
|
||||
@ -211,15 +250,11 @@ func scanDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
||||
return cache, errors.New("internal error: root scan attempted")
|
||||
}
|
||||
|
||||
skipHeal := cache.Info.SkipHealing
|
||||
|
||||
s := folderScanner{
|
||||
root: basePath,
|
||||
getSize: getSize,
|
||||
oldCache: cache,
|
||||
newCache: dataUsageCache{Info: cache.Info},
|
||||
newFolders: nil,
|
||||
existingFolders: nil,
|
||||
dataUsageScannerDebug: intDataUpdateTracker.debug,
|
||||
healFolderInclude: 0,
|
||||
healObjectSelect: 0,
|
||||
@ -238,7 +273,7 @@ func scanDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
||||
}
|
||||
|
||||
// Enable healing in XL mode.
|
||||
if globalIsErasure {
|
||||
if globalIsErasure && !cache.Info.SkipHealing {
|
||||
// Include a clean folder one in n cycles.
|
||||
s.healFolderInclude = healFolderIncludeProb
|
||||
// Do a heal check on an object once every n cycles. Must divide into healFolderInclude
|
||||
@ -257,139 +292,53 @@ func scanDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
||||
}
|
||||
|
||||
done := ctx.Done()
|
||||
var flattenLevels = 2
|
||||
|
||||
if s.dataUsageScannerDebug {
|
||||
console.Debugf(logPrefix+"Cycle: %v, Entries: %v %s\n", cache.Info.NextCycle, len(cache.Cache), logSuffix)
|
||||
}
|
||||
|
||||
// Always scan flattenLevels deep. Cache root is level 0.
|
||||
todo := []cachedFolder{{name: cache.Info.Name, objectHealProbDiv: 1}}
|
||||
for i := 0; i < flattenLevels; i++ {
|
||||
if s.dataUsageScannerDebug {
|
||||
console.Debugf(logPrefix+"Level %v, scanning %v directories. %s\n", i, len(todo), logSuffix)
|
||||
}
|
||||
select {
|
||||
case <-done:
|
||||
return cache, ctx.Err()
|
||||
default:
|
||||
}
|
||||
var err error
|
||||
todo, err = s.scanQueuedLevels(ctx, todo, i == flattenLevels-1, skipHeal)
|
||||
if err != nil {
|
||||
// No useful information...
|
||||
return cache, err
|
||||
}
|
||||
// Read top level in bucket.
|
||||
select {
|
||||
case <-done:
|
||||
return cache, ctx.Err()
|
||||
default:
|
||||
}
|
||||
root := dataUsageEntry{}
|
||||
folder := cachedFolder{name: cache.Info.Name, objectHealProbDiv: 1}
|
||||
err := s.scanFolder(ctx, folder, &root)
|
||||
if err != nil {
|
||||
// No useful information...
|
||||
return cache, err
|
||||
}
|
||||
|
||||
if s.dataUsageScannerDebug {
|
||||
console.Debugf(logPrefix+"New folders: %v %s\n", s.newFolders, logSuffix)
|
||||
}
|
||||
|
||||
// Add new folders first
|
||||
for _, folder := range s.newFolders {
|
||||
select {
|
||||
case <-done:
|
||||
return s.newCache, ctx.Err()
|
||||
default:
|
||||
}
|
||||
du, err := s.deepScanFolder(ctx, folder, skipHeal)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
if du == nil {
|
||||
console.Debugln(logPrefix + "no disk usage provided" + logSuffix)
|
||||
continue
|
||||
}
|
||||
|
||||
s.newCache.replace(folder.name, "", *du)
|
||||
// Add to parent manually
|
||||
if folder.parent != nil {
|
||||
parent := s.newCache.Cache[folder.parent.Key()]
|
||||
parent.addChildString(folder.name)
|
||||
}
|
||||
}
|
||||
|
||||
if s.dataUsageScannerDebug {
|
||||
console.Debugf(logPrefix+"Existing folders: %v %s\n", len(s.existingFolders), logSuffix)
|
||||
}
|
||||
|
||||
// Do selective scanning of existing folders.
|
||||
for _, folder := range s.existingFolders {
|
||||
select {
|
||||
case <-done:
|
||||
return s.newCache, ctx.Err()
|
||||
default:
|
||||
}
|
||||
h := hashPath(folder.name)
|
||||
if !h.mod(s.oldCache.Info.NextCycle, dataUsageUpdateDirCycles) {
|
||||
if !h.mod(s.oldCache.Info.NextCycle, s.healFolderInclude/folder.objectHealProbDiv) {
|
||||
s.newCache.replaceHashed(h, folder.parent, s.oldCache.Cache[h.Key()])
|
||||
continue
|
||||
} else {
|
||||
folder.objectHealProbDiv = s.healFolderInclude
|
||||
}
|
||||
folder.objectHealProbDiv = dataUsageUpdateDirCycles
|
||||
}
|
||||
if s.withFilter != nil {
|
||||
_, prefix := path2BucketObjectWithBasePath(basePath, folder.name)
|
||||
if s.oldCache.Info.lifeCycle == nil || !s.oldCache.Info.lifeCycle.HasActiveRules(prefix, true) {
|
||||
// If folder isn't in filter, skip it completely.
|
||||
if !s.withFilter.containsDir(folder.name) {
|
||||
if !h.mod(s.oldCache.Info.NextCycle, s.healFolderInclude/folder.objectHealProbDiv) {
|
||||
if s.dataUsageScannerDebug {
|
||||
console.Debugf(logPrefix+"Skipping non-updated folder: %v %s\n", folder, logSuffix)
|
||||
}
|
||||
s.newCache.replaceHashed(h, folder.parent, s.oldCache.Cache[h.Key()])
|
||||
continue
|
||||
} else {
|
||||
if s.dataUsageScannerDebug {
|
||||
console.Debugf(logPrefix+"Adding non-updated folder to heal check: %v %s\n", folder.name, logSuffix)
|
||||
}
|
||||
// Update probability of including objects
|
||||
folder.objectHealProbDiv = s.healFolderInclude
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update on this cycle...
|
||||
du, err := s.deepScanFolder(ctx, folder, skipHeal)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
if du == nil {
|
||||
logger.LogIf(ctx, errors.New("data-usage: no disk usage provided"))
|
||||
continue
|
||||
}
|
||||
s.newCache.replaceHashed(h, folder.parent, *du)
|
||||
}
|
||||
if s.dataUsageScannerDebug {
|
||||
console.Debugf(logPrefix+"Finished scanner, %v entries %s\n", len(s.newCache.Cache), logSuffix)
|
||||
console.Debugf(logPrefix+"Finished scanner, %v entries (%+v) %s \n", len(s.newCache.Cache), *s.newCache.sizeRecursive(s.newCache.Info.Name), logSuffix)
|
||||
}
|
||||
s.newCache.Info.LastUpdate = UTCNow()
|
||||
s.newCache.Info.NextCycle++
|
||||
return s.newCache, nil
|
||||
}
|
||||
|
||||
// scanQueuedLevels will scan the provided folders.
|
||||
// scanFolder will scan the provided folder.
|
||||
// Files found in the folders will be added to f.newCache.
|
||||
// If final is provided folders will be put into f.newFolders or f.existingFolders.
|
||||
// If final is not provided the folders found are returned from the function.
|
||||
func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFolder, final bool, skipHeal bool) ([]cachedFolder, error) {
|
||||
var nextFolders []cachedFolder
|
||||
func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, into *dataUsageEntry) error {
|
||||
done := ctx.Done()
|
||||
scannerLogPrefix := color.Green("folder-scanner:")
|
||||
for _, folder := range folders {
|
||||
thisHash := hashPath(folder.name)
|
||||
// Store initial compaction state.
|
||||
wasCompacted := into.Compacted
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
return nil, ctx.Err()
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
thisHash := hashPath(folder.name)
|
||||
existing := f.oldCache.findChildrenCopy(thisHash)
|
||||
existing, ok := f.oldCache.Cache[thisHash.Key()]
|
||||
var abandonedChildren dataUsageHashMap
|
||||
if !into.Compacted {
|
||||
abandonedChildren = f.oldCache.findChildrenCopy(thisHash)
|
||||
}
|
||||
|
||||
// If there are lifecycle rules for the prefix, remove the filter.
|
||||
filter := f.withFilter
|
||||
@ -402,28 +351,29 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
activeLifeCycle = f.oldCache.Info.lifeCycle
|
||||
filter = nil
|
||||
}
|
||||
if _, ok := f.oldCache.Cache[thisHash.Key()]; filter != nil && ok {
|
||||
|
||||
// Check if we can skip it due to bloom filter...
|
||||
if filter != nil && ok && existing.Compacted {
|
||||
// If folder isn't in filter and we have data, skip it completely.
|
||||
if folder.name != dataUsageRoot && !filter.containsDir(folder.name) {
|
||||
if !thisHash.mod(f.oldCache.Info.NextCycle, f.healFolderInclude/folder.objectHealProbDiv) {
|
||||
if f.healObjectSelect == 0 || !thisHash.mod(f.oldCache.Info.NextCycle, f.healFolderInclude/folder.objectHealProbDiv) {
|
||||
f.newCache.copyWithChildren(&f.oldCache, thisHash, folder.parent)
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(scannerLogPrefix+" Skipping non-updated folder: %v\n", folder.name)
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(scannerLogPrefix+" Adding non-updated folder to heal check: %v\n", folder.name)
|
||||
}
|
||||
// If probability was already scannerHealFolderInclude, keep it.
|
||||
folder.objectHealProbDiv = f.healFolderInclude
|
||||
return nil
|
||||
}
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(scannerLogPrefix+" Adding non-updated folder to heal check: %v\n", folder.name)
|
||||
}
|
||||
// If probability was already scannerHealFolderInclude, keep it.
|
||||
folder.objectHealProbDiv = f.healFolderInclude
|
||||
}
|
||||
}
|
||||
scannerSleeper.Sleep(ctx, dataScannerSleepPerFolder)
|
||||
|
||||
cache := dataUsageEntry{}
|
||||
|
||||
var existingFolders, newFolders []cachedFolder
|
||||
var foundObjects bool
|
||||
err := readDirFn(path.Join(f.root, folder.name), func(entName string, typ os.FileMode) error {
|
||||
// Parse
|
||||
entName = pathClean(path.Join(folder.name, entName))
|
||||
@ -455,26 +405,15 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
}
|
||||
|
||||
if typ&os.ModeDir != 0 {
|
||||
scannerSleeper.Sleep(ctx, dataScannerSleepPerFolder)
|
||||
|
||||
h := hashPath(entName)
|
||||
_, exists := f.oldCache.Cache[h.Key()]
|
||||
cache.addChildString(entName)
|
||||
|
||||
this := cachedFolder{name: entName, parent: &thisHash, objectHealProbDiv: folder.objectHealProbDiv}
|
||||
|
||||
delete(existing, h.Key()) // h.Key() already accounted for.
|
||||
|
||||
cache.addChild(h)
|
||||
|
||||
if final {
|
||||
if exists {
|
||||
f.existingFolders = append(f.existingFolders, this)
|
||||
} else {
|
||||
f.newFolders = append(f.newFolders, this)
|
||||
}
|
||||
delete(abandonedChildren, h.Key()) // h.Key() already accounted for.
|
||||
if exists {
|
||||
existingFolders = append(existingFolders, this)
|
||||
} else {
|
||||
nextFolders = append(nextFolders, this)
|
||||
newFolders = append(newFolders, this)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -497,54 +436,111 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
// if the drive belongs to an erasure set
|
||||
// that is already being healed, skip the
|
||||
// healing attempt on this drive.
|
||||
item.heal = item.heal && !skipHeal
|
||||
item.heal = item.heal && f.healObjectSelect > 0
|
||||
|
||||
sizeSummary, err := f.getSize(item)
|
||||
sz, err := f.getSize(item)
|
||||
if err == errSkipFile {
|
||||
wait() // wait to proceed to next entry.
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// successfully read means we have a valid object.
|
||||
|
||||
foundObjects = true
|
||||
// Remove filename i.e is the meta file to construct object name
|
||||
item.transformMetaDir()
|
||||
|
||||
// Object already accounted for, remove from heal map,
|
||||
// simply because getSize() function already heals the
|
||||
// object.
|
||||
delete(existing, path.Join(item.bucket, item.objectPath()))
|
||||
delete(abandonedChildren, path.Join(item.bucket, item.objectPath()))
|
||||
|
||||
cache.addSizes(sizeSummary)
|
||||
cache.Objects++
|
||||
cache.ObjSizes.add(sizeSummary.totalSize)
|
||||
into.addSizes(sz)
|
||||
into.Objects++
|
||||
|
||||
wait() // wait to proceed to next entry.
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
if f.healObjectSelect == 0 {
|
||||
// If we are not scanning, return now.
|
||||
f.newCache.replaceHashed(thisHash, folder.parent, cache)
|
||||
continue
|
||||
if foundObjects && globalIsErasure {
|
||||
// If we found an object in erasure mode, we skip subdirs (only datadirs)...
|
||||
break
|
||||
}
|
||||
|
||||
// If we have many subfolders, compact ourself.
|
||||
if !into.Compacted &&
|
||||
f.newCache.Info.Name != folder.name &&
|
||||
len(existingFolders)+len(newFolders) >= dataScannerCompactAtFolders {
|
||||
into.Compacted = true
|
||||
newFolders = append(newFolders, existingFolders...)
|
||||
existingFolders = nil
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(scannerLogPrefix+" Preemptively compacting: %v, entries: %v\n", folder.name, len(existingFolders)+len(newFolders))
|
||||
}
|
||||
}
|
||||
|
||||
scanFolder := func(folder cachedFolder) {
|
||||
if contextCanceled(ctx) {
|
||||
return
|
||||
}
|
||||
dst := into
|
||||
if !into.Compacted {
|
||||
dst = &dataUsageEntry{Compacted: false}
|
||||
}
|
||||
if err := f.scanFolder(ctx, folder, dst); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
if !into.Compacted {
|
||||
into.addChild(dataUsageHash(folder.name))
|
||||
}
|
||||
}
|
||||
|
||||
// Scan new...
|
||||
for _, folder := range newFolders {
|
||||
scanFolder(folder)
|
||||
}
|
||||
|
||||
// Scan existing...
|
||||
for _, folder := range existingFolders {
|
||||
h := hashPath(folder.name)
|
||||
// Check if we should skip scanning folder...
|
||||
// We can only skip if we are not indexing into a compacted destination
|
||||
// and the entry itself is compacted.
|
||||
if !into.Compacted && f.oldCache.isCompacted(h) {
|
||||
if !h.mod(f.oldCache.Info.NextCycle, dataUsageUpdateDirCycles) {
|
||||
if !h.mod(f.oldCache.Info.NextCycle, f.healFolderInclude/folder.objectHealProbDiv) {
|
||||
// Transfer and add as child...
|
||||
f.newCache.copyWithChildren(&f.oldCache, h, folder.parent)
|
||||
into.addChild(h)
|
||||
continue
|
||||
}
|
||||
folder.objectHealProbDiv = dataUsageUpdateDirCycles
|
||||
}
|
||||
}
|
||||
scanFolder(folder)
|
||||
}
|
||||
|
||||
// Scan for healing
|
||||
if f.healObjectSelect == 0 || len(abandonedChildren) == 0 {
|
||||
// If we are not heal scanning, return now.
|
||||
break
|
||||
}
|
||||
|
||||
objAPI, ok := newObjectLayerFn().(*erasureServerPools)
|
||||
if !ok || len(f.disks) == 0 {
|
||||
continue
|
||||
break
|
||||
}
|
||||
|
||||
bgSeq, found := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID)
|
||||
if !found {
|
||||
continue
|
||||
break
|
||||
}
|
||||
|
||||
// Whatever remains in 'existing' are folders at this level
|
||||
// Whatever remains in 'abandonedChildren' are folders at this level
|
||||
// that existed in the previous run but wasn't found now.
|
||||
//
|
||||
// This may be because of 2 reasons:
|
||||
@ -563,7 +559,7 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
}
|
||||
|
||||
healObjectsPrefix := color.Green("healObjects:")
|
||||
for k := range existing {
|
||||
for k := range abandonedChildren {
|
||||
bucket, prefix := path2BucketObject(k)
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(scannerLogPrefix+" checking disappeared folder: %v/%v\n", bucket, prefix)
|
||||
@ -677,115 +673,68 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
|
||||
objAPI.HealObjects(ctx, bucket, prefix, madmin.HealOpts{
|
||||
Recursive: true,
|
||||
Remove: healDeleteDangling,
|
||||
},
|
||||
func(bucket, object, versionID string) error {
|
||||
// Wait for each heal as per scanner frequency.
|
||||
wait()
|
||||
return bgSeq.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemObject)
|
||||
})
|
||||
}, func(bucket, object, versionID string) error {
|
||||
// Wait for each heal as per scanner frequency.
|
||||
wait()
|
||||
wait = scannerSleeper.Timer(ctx)
|
||||
return bgSeq.queueHealTask(healSource{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
versionID: versionID,
|
||||
}, madmin.HealItemObject)
|
||||
})
|
||||
}
|
||||
|
||||
// Add unless healing returned an error.
|
||||
if foundObjs {
|
||||
this := cachedFolder{name: k, parent: &thisHash, objectHealProbDiv: folder.objectHealProbDiv}
|
||||
cache.addChild(hashPath(k))
|
||||
if final {
|
||||
f.existingFolders = append(f.existingFolders, this)
|
||||
} else {
|
||||
nextFolders = append(nextFolders, this)
|
||||
this := cachedFolder{name: k, parent: &thisHash, objectHealProbDiv: 1}
|
||||
scanFolder(this)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
if !wasCompacted {
|
||||
f.newCache.replaceHashed(thisHash, folder.parent, *into)
|
||||
}
|
||||
|
||||
if !into.Compacted && f.newCache.Info.Name != folder.name {
|
||||
flat := f.newCache.sizeRecursive(thisHash.Key())
|
||||
flat.Compacted = true
|
||||
var compact bool
|
||||
if flat.Objects < dataScannerCompactLeastObject {
|
||||
if f.dataUsageScannerDebug && flat.Objects > 1 {
|
||||
// Disabled, rather chatty:
|
||||
//console.Debugf(scannerLogPrefix+" Only %d objects, compacting %s -> %+v\n", flat.Objects, folder.name, flat)
|
||||
}
|
||||
compact = true
|
||||
} else {
|
||||
// Compact if we only have objects as children...
|
||||
compact = true
|
||||
for k := range into.Children {
|
||||
if v, ok := f.newCache.Cache[k]; ok {
|
||||
if len(v.Children) > 0 || v.Objects > 1 {
|
||||
compact = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
f.newCache.replaceHashed(thisHash, folder.parent, cache)
|
||||
}
|
||||
return nextFolders, nil
|
||||
}
|
||||
|
||||
// deepScanFolder will deep scan a folder and return the size if no error occurs.
|
||||
func (f *folderScanner) deepScanFolder(ctx context.Context, folder cachedFolder, skipHeal bool) (*dataUsageEntry, error) {
|
||||
var cache dataUsageEntry
|
||||
|
||||
done := ctx.Done()
|
||||
|
||||
var addDir func(entName string, typ os.FileMode) error
|
||||
var dirStack = []string{f.root, folder.name}
|
||||
|
||||
deepScannerLogPrefix := color.Green("deep-scanner:")
|
||||
addDir = func(entName string, typ os.FileMode) error {
|
||||
select {
|
||||
case <-done:
|
||||
return errDoneForNow
|
||||
default:
|
||||
}
|
||||
|
||||
if typ&os.ModeDir != 0 {
|
||||
dirStack = append(dirStack, entName)
|
||||
err := readDirFn(path.Join(dirStack...), addDir)
|
||||
dirStack = dirStack[:len(dirStack)-1]
|
||||
scannerSleeper.Sleep(ctx, dataScannerSleepPerFolder)
|
||||
return err
|
||||
}
|
||||
|
||||
// Dynamic time delay.
|
||||
wait := scannerSleeper.Timer(ctx)
|
||||
|
||||
// Get file size, ignore errors.
|
||||
dirStack = append(dirStack, entName)
|
||||
fileName := path.Join(dirStack...)
|
||||
dirStack = dirStack[:len(dirStack)-1]
|
||||
|
||||
bucket, prefix := path2BucketObjectWithBasePath(f.root, fileName)
|
||||
var activeLifeCycle *lifecycle.Lifecycle
|
||||
if f.oldCache.Info.lifeCycle != nil && f.oldCache.Info.lifeCycle.HasActiveRules(prefix, false) {
|
||||
if f.dataUsageScannerDebug {
|
||||
console.Debugf(deepScannerLogPrefix+" Prefix %q has active rules\n", prefix)
|
||||
if f.dataUsageScannerDebug && compact {
|
||||
// Disabled, rather chatty:
|
||||
//console.Debugf(scannerLogPrefix+" Only objects (%d), compacting %s -> %+v\n", flat.Objects, folder.name, flat)
|
||||
}
|
||||
activeLifeCycle = f.oldCache.Info.lifeCycle
|
||||
}
|
||||
if compact {
|
||||
f.newCache.deleteRecursive(thisHash)
|
||||
f.newCache.replaceHashed(thisHash, folder.parent, *flat)
|
||||
}
|
||||
|
||||
item := scannerItem{
|
||||
Path: fileName,
|
||||
Typ: typ,
|
||||
bucket: bucket,
|
||||
prefix: path.Dir(prefix),
|
||||
objectName: path.Base(entName),
|
||||
debug: f.dataUsageScannerDebug,
|
||||
lifeCycle: activeLifeCycle,
|
||||
heal: hashPath(path.Join(prefix, entName)).mod(f.oldCache.Info.NextCycle, f.healObjectSelect/folder.objectHealProbDiv) && globalIsErasure,
|
||||
}
|
||||
|
||||
// if the drive belongs to an erasure set
|
||||
// that is already being healed, skip the
|
||||
// healing attempt on this drive.
|
||||
item.heal = item.heal && !skipHeal
|
||||
|
||||
sizeSummary, err := f.getSize(item)
|
||||
if err == errSkipFile {
|
||||
// Wait to throttle IO
|
||||
wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.LogIf(ctx, err)
|
||||
cache.addSizes(sizeSummary)
|
||||
cache.Objects++
|
||||
cache.ObjSizes.add(sizeSummary.totalSize)
|
||||
|
||||
// Wait to throttle IO
|
||||
wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
err := readDirFn(path.Join(dirStack...), addDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Compact if too many children...
|
||||
if !into.Compacted {
|
||||
f.newCache.reduceChildrenOf(thisHash, dataScannerCompactAtChildren, f.newCache.Info.Name != folder.name)
|
||||
}
|
||||
return &cache, nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// scannerItem represents each file while walking.
|
||||
@ -803,6 +752,7 @@ type scannerItem struct {
|
||||
|
||||
type sizeSummary struct {
|
||||
totalSize int64
|
||||
versions uint64
|
||||
replicatedSize int64
|
||||
pendingSize int64
|
||||
failedSize int64
|
||||
@ -863,7 +813,8 @@ func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, meta ac
|
||||
}
|
||||
if i.lifeCycle == nil {
|
||||
if i.debug {
|
||||
console.Debugf(applyActionsLogPrefix+" no lifecycle rules to apply: %q\n", i.objectPath())
|
||||
// disabled, very chatty:
|
||||
// console.Debugf(applyActionsLogPrefix+" no lifecycle rules to apply: %q\n", i.objectPath())
|
||||
}
|
||||
return false, size
|
||||
}
|
||||
|
@ -498,9 +498,6 @@ func (d *dataUpdateTracker) startCollector(ctx context.Context) {
|
||||
// Add all paths until done.
|
||||
d.mu.Lock()
|
||||
for i := range split {
|
||||
if d.debug {
|
||||
console.Debugln(color.Green("dataUpdateTracker:") + " Marking path dirty: " + color.Blue(path.Join(split[:i+1]...)))
|
||||
}
|
||||
d.Current.bf.AddString(hashPath(path.Join(split[:i+1]...)).String())
|
||||
}
|
||||
d.dirty = d.dirty || len(split) > 0
|
||||
@ -530,9 +527,6 @@ func (d *dataUpdateTracker) markDirty(in string) {
|
||||
// Add all paths until done.
|
||||
d.mu.Lock()
|
||||
for i := range split {
|
||||
if d.debug {
|
||||
console.Debugln(dateUpdateTrackerLogPrefix + " Marking path dirty: " + color.Blue(path.Join(split[:i+1]...)))
|
||||
}
|
||||
d.Current.bf.AddString(hashPath(path.Join(split[:i+1]...)).String())
|
||||
}
|
||||
d.dirty = d.dirty || len(split) > 0
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
"net/http"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -51,8 +52,10 @@ type dataUsageEntry struct {
|
||||
// These fields do no include any children.
|
||||
Size int64
|
||||
Objects uint64
|
||||
Versions uint64 // Versions that are not delete markers.
|
||||
ObjSizes sizeHistogram
|
||||
ReplicationStats replicationStats
|
||||
ReplicationStats *replicationStats
|
||||
Compacted bool
|
||||
}
|
||||
|
||||
//msgp:tuple replicationStats
|
||||
@ -69,6 +72,9 @@ type replicationStats struct {
|
||||
AfterThresholdCount uint64
|
||||
}
|
||||
|
||||
//msgp:encode ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4
|
||||
//msgp:marshal ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4
|
||||
|
||||
//msgp:tuple dataUsageEntryV2
|
||||
type dataUsageEntryV2 struct {
|
||||
// These fields do no include any children.
|
||||
@ -91,13 +97,26 @@ type dataUsageEntryV3 struct {
|
||||
Children dataUsageHashMap
|
||||
}
|
||||
|
||||
// dataUsageCache contains a cache of data usage entries latest version 4.
|
||||
//msgp:tuple dataUsageEntryV4
|
||||
type dataUsageEntryV4 struct {
|
||||
Children dataUsageHashMap
|
||||
// These fields do no include any children.
|
||||
Size int64
|
||||
Objects uint64
|
||||
ObjSizes sizeHistogram
|
||||
ReplicationStats replicationStats
|
||||
}
|
||||
|
||||
// dataUsageCache contains a cache of data usage entries latest version.
|
||||
type dataUsageCache struct {
|
||||
Info dataUsageCacheInfo
|
||||
Cache map[string]dataUsageEntry
|
||||
Disks []string
|
||||
}
|
||||
|
||||
//msgp:encode ignore dataUsageCacheV2 dataUsageCacheV3 dataUsageCacheV4
|
||||
//msgp:marshal ignore dataUsageCacheV2 dataUsageCacheV3 dataUsageCacheV4
|
||||
|
||||
// dataUsageCacheV2 contains a cache of data usage entries version 2.
|
||||
type dataUsageCacheV2 struct {
|
||||
Info dataUsageCacheInfo
|
||||
@ -112,6 +131,13 @@ type dataUsageCacheV3 struct {
|
||||
Cache map[string]dataUsageEntryV3
|
||||
}
|
||||
|
||||
// dataUsageCache contains a cache of data usage entries version 4.
|
||||
type dataUsageCacheV4 struct {
|
||||
Info dataUsageCacheInfo
|
||||
Disks []string
|
||||
Cache map[string]dataUsageEntryV4
|
||||
}
|
||||
|
||||
//msgp:ignore dataUsageEntryInfo
|
||||
type dataUsageEntryInfo struct {
|
||||
Name string
|
||||
@ -133,25 +159,42 @@ type dataUsageCacheInfo struct {
|
||||
|
||||
func (e *dataUsageEntry) addSizes(summary sizeSummary) {
|
||||
e.Size += summary.totalSize
|
||||
e.ReplicationStats.ReplicatedSize += uint64(summary.replicatedSize)
|
||||
e.ReplicationStats.FailedSize += uint64(summary.failedSize)
|
||||
e.ReplicationStats.PendingSize += uint64(summary.pendingSize)
|
||||
e.ReplicationStats.ReplicaSize += uint64(summary.replicaSize)
|
||||
e.ReplicationStats.PendingCount += uint64(summary.pendingCount)
|
||||
e.ReplicationStats.FailedCount += uint64(summary.failedCount)
|
||||
e.Versions += summary.versions
|
||||
e.ObjSizes.add(summary.totalSize)
|
||||
|
||||
if summary.replicaSize > 0 || summary.pendingSize > 0 || summary.replicatedSize > 0 ||
|
||||
summary.failedCount > 0 || summary.pendingCount > 0 || summary.failedSize > 0 {
|
||||
if e.ReplicationStats == nil {
|
||||
e.ReplicationStats = &replicationStats{}
|
||||
}
|
||||
e.ReplicationStats.ReplicatedSize += uint64(summary.replicatedSize)
|
||||
e.ReplicationStats.FailedSize += uint64(summary.failedSize)
|
||||
e.ReplicationStats.PendingSize += uint64(summary.pendingSize)
|
||||
e.ReplicationStats.ReplicaSize += uint64(summary.replicaSize)
|
||||
e.ReplicationStats.PendingCount += summary.pendingCount
|
||||
e.ReplicationStats.FailedCount += summary.failedCount
|
||||
}
|
||||
}
|
||||
|
||||
// merge other data usage entry into this, excluding children.
|
||||
func (e *dataUsageEntry) merge(other dataUsageEntry) {
|
||||
e.Objects += other.Objects
|
||||
e.Versions += other.Versions
|
||||
e.Size += other.Size
|
||||
e.ReplicationStats.PendingSize += other.ReplicationStats.PendingSize
|
||||
e.ReplicationStats.FailedSize += other.ReplicationStats.FailedSize
|
||||
e.ReplicationStats.ReplicatedSize += other.ReplicationStats.ReplicatedSize
|
||||
e.ReplicationStats.ReplicaSize += other.ReplicationStats.ReplicaSize
|
||||
e.ReplicationStats.PendingCount += other.ReplicationStats.PendingCount
|
||||
e.ReplicationStats.FailedCount += other.ReplicationStats.FailedCount
|
||||
ors := other.ReplicationStats
|
||||
empty := replicationStats{}
|
||||
if ors != nil && *ors != empty {
|
||||
if e.ReplicationStats == nil {
|
||||
e.ReplicationStats = &replicationStats{}
|
||||
}
|
||||
e.ReplicationStats.PendingSize += other.ReplicationStats.PendingSize
|
||||
e.ReplicationStats.FailedSize += other.ReplicationStats.FailedSize
|
||||
e.ReplicationStats.ReplicatedSize += other.ReplicationStats.ReplicatedSize
|
||||
e.ReplicationStats.ReplicaSize += other.ReplicationStats.ReplicaSize
|
||||
e.ReplicationStats.PendingCount += other.ReplicationStats.PendingCount
|
||||
e.ReplicationStats.FailedCount += other.ReplicationStats.FailedCount
|
||||
|
||||
}
|
||||
|
||||
for i, v := range other.ObjSizes[:] {
|
||||
e.ObjSizes[i] += v
|
||||
@ -196,6 +239,16 @@ func (d *dataUsageCache) find(path string) *dataUsageEntry {
|
||||
return &due
|
||||
}
|
||||
|
||||
// isCompacted returns whether an entry is compacted.
|
||||
// Returns false if not found.
|
||||
func (d *dataUsageCache) isCompacted(h dataUsageHash) bool {
|
||||
due, ok := d.Cache[h.Key()]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return due.Compacted
|
||||
}
|
||||
|
||||
// findChildrenCopy returns a copy of the children of the supplied hash.
|
||||
func (d *dataUsageCache) findChildrenCopy(h dataUsageHash) dataUsageHashMap {
|
||||
ch := d.Cache[h.String()].Children
|
||||
@ -217,6 +270,7 @@ func (d *dataUsageCache) subCache(path string) dataUsageCache {
|
||||
return dst
|
||||
}
|
||||
|
||||
// deleteRecursive will delete an entry recursively, but not change its parent.
|
||||
func (d *dataUsageCache) deleteRecursive(h dataUsageHash) {
|
||||
if existing, ok := d.Cache[h.String()]; ok {
|
||||
// Delete first if there should be a loop.
|
||||
@ -285,19 +339,22 @@ func (d *dataUsageCache) dui(path string, buckets []BucketInfo) madmin.DataUsage
|
||||
return madmin.DataUsageInfo{}
|
||||
}
|
||||
flat := d.flatten(*e)
|
||||
return madmin.DataUsageInfo{
|
||||
LastUpdate: d.Info.LastUpdate,
|
||||
ObjectsTotalCount: flat.Objects,
|
||||
ObjectsTotalSize: uint64(flat.Size),
|
||||
ReplicatedSize: flat.ReplicationStats.ReplicatedSize,
|
||||
ReplicationFailedSize: flat.ReplicationStats.FailedSize,
|
||||
ReplicationPendingSize: flat.ReplicationStats.PendingSize,
|
||||
ReplicaSize: flat.ReplicationStats.ReplicaSize,
|
||||
ReplicationPendingCount: flat.ReplicationStats.PendingCount,
|
||||
ReplicationFailedCount: flat.ReplicationStats.FailedCount,
|
||||
BucketsCount: uint64(len(e.Children)),
|
||||
BucketsUsage: d.bucketsUsageInfo(buckets),
|
||||
dui := madmin.DataUsageInfo{
|
||||
LastUpdate: d.Info.LastUpdate,
|
||||
ObjectsTotalCount: flat.Objects,
|
||||
ObjectsTotalSize: uint64(flat.Size),
|
||||
BucketsCount: uint64(len(e.Children)),
|
||||
BucketsUsage: d.bucketsUsageInfo(buckets),
|
||||
}
|
||||
if flat.ReplicationStats != nil {
|
||||
dui.ReplicationPendingSize = flat.ReplicationStats.PendingSize
|
||||
dui.ReplicatedSize = flat.ReplicationStats.ReplicatedSize
|
||||
dui.ReplicationFailedSize = flat.ReplicationStats.FailedSize
|
||||
dui.ReplicationPendingCount = flat.ReplicationStats.PendingCount
|
||||
dui.ReplicationFailedCount = flat.ReplicationStats.FailedCount
|
||||
dui.ReplicaSize = flat.ReplicationStats.ReplicaSize
|
||||
}
|
||||
return dui
|
||||
}
|
||||
|
||||
// replace will add or replace an entry in the cache.
|
||||
@ -317,6 +374,16 @@ func (d *dataUsageCache) replace(path, parent string, e dataUsageEntry) {
|
||||
}
|
||||
}
|
||||
|
||||
// listCache will return all cache paths.
|
||||
func (d *dataUsageCache) listCache() []string {
|
||||
dst := make([]string, 0, len(d.Cache))
|
||||
for k := range d.Cache {
|
||||
dst = append(dst, k)
|
||||
}
|
||||
sort.Strings(dst)
|
||||
return dst
|
||||
}
|
||||
|
||||
// replaceHashed add or replaces an entry to the cache based on its hash.
|
||||
// If a parent is specified it will be added to that if not already there.
|
||||
// If the parent does not exist, it will be added.
|
||||
@ -358,6 +425,91 @@ func (d *dataUsageCache) copyWithChildren(src *dataUsageCache, hash dataUsageHas
|
||||
}
|
||||
}
|
||||
|
||||
// reduceChildrenOf will reduce the recursive number of children to the limit
|
||||
// by compacting the children with the least number of objects.
|
||||
func (d *dataUsageCache) reduceChildrenOf(path dataUsageHash, limit int, compactSelf bool) {
|
||||
e, ok := d.Cache[path.Key()]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if e.Compacted {
|
||||
return
|
||||
}
|
||||
// If direct children have more, compact all.
|
||||
if len(e.Children) > limit && compactSelf {
|
||||
flat := d.sizeRecursive(path.Key())
|
||||
flat.Compacted = true
|
||||
d.deleteRecursive(path)
|
||||
d.replaceHashed(path, nil, *flat)
|
||||
return
|
||||
}
|
||||
total := d.totalChildrenRec(path.Key())
|
||||
if total < limit {
|
||||
return
|
||||
}
|
||||
|
||||
// Appears to be printed with _MINIO_SERVER_DEBUG=off
|
||||
// console.Debugf(" %d children found, compacting %v\n", total, path)
|
||||
|
||||
var leaves = make([]struct {
|
||||
objects uint64
|
||||
path dataUsageHash
|
||||
}, total)
|
||||
// Collect current leaves that have children.
|
||||
leaves = leaves[:0]
|
||||
remove := total - limit
|
||||
var add func(path dataUsageHash)
|
||||
add = func(path dataUsageHash) {
|
||||
e, ok := d.Cache[path.Key()]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if len(e.Children) == 0 {
|
||||
return
|
||||
}
|
||||
sz := d.sizeRecursive(path.Key())
|
||||
leaves = append(leaves, struct {
|
||||
objects uint64
|
||||
path dataUsageHash
|
||||
}{objects: sz.Objects, path: path})
|
||||
for ch := range e.Children {
|
||||
add(dataUsageHash(ch))
|
||||
}
|
||||
}
|
||||
|
||||
// Add path recursively.
|
||||
add(path)
|
||||
sort.Slice(leaves, func(i, j int) bool {
|
||||
return leaves[i].objects < leaves[j].objects
|
||||
})
|
||||
for remove > 0 && len(leaves) > 0 {
|
||||
// Remove top entry.
|
||||
e := leaves[0]
|
||||
candidate := e.path
|
||||
if candidate == path && !compactSelf {
|
||||
// We should be the biggest,
|
||||
// if we cannot compact ourself, we are done.
|
||||
break
|
||||
}
|
||||
removing := d.totalChildrenRec(candidate.Key())
|
||||
flat := d.sizeRecursive(candidate.Key())
|
||||
if flat == nil {
|
||||
leaves = leaves[1:]
|
||||
continue
|
||||
}
|
||||
// Appears to be printed with _MINIO_SERVER_DEBUG=off
|
||||
// console.Debugf("compacting %v, removing %d children\n", candidate, removing)
|
||||
|
||||
flat.Compacted = true
|
||||
d.deleteRecursive(candidate)
|
||||
d.replaceHashed(candidate, nil, *flat)
|
||||
|
||||
// Remove top entry and subtract removed children.
|
||||
remove -= removing
|
||||
leaves = leaves[1:]
|
||||
}
|
||||
}
|
||||
|
||||
// StringAll returns a detailed string representation of all entries in the cache.
|
||||
func (d *dataUsageCache) StringAll() string {
|
||||
s := fmt.Sprintf("info:%+v\n", d.Info)
|
||||
@ -372,7 +524,7 @@ func (h dataUsageHash) String() string {
|
||||
return string(h)
|
||||
}
|
||||
|
||||
// String returns a human readable representation of the string.
|
||||
// Key returns the key.
|
||||
func (h dataUsageHash) Key() string {
|
||||
return string(h)
|
||||
}
|
||||
@ -421,17 +573,20 @@ func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]madmi
|
||||
continue
|
||||
}
|
||||
flat := d.flatten(*e)
|
||||
dst[bucket.Name] = madmin.BucketUsageInfo{
|
||||
Size: uint64(flat.Size),
|
||||
ObjectsCount: flat.Objects,
|
||||
ReplicationPendingSize: flat.ReplicationStats.PendingSize,
|
||||
ReplicatedSize: flat.ReplicationStats.ReplicatedSize,
|
||||
ReplicationFailedSize: flat.ReplicationStats.FailedSize,
|
||||
ReplicationPendingCount: flat.ReplicationStats.PendingCount,
|
||||
ReplicationFailedCount: flat.ReplicationStats.FailedCount,
|
||||
ReplicaSize: flat.ReplicationStats.ReplicaSize,
|
||||
ObjectSizesHistogram: flat.ObjSizes.toMap(),
|
||||
bui := madmin.BucketUsageInfo{
|
||||
Size: uint64(flat.Size),
|
||||
ObjectsCount: flat.Objects,
|
||||
ObjectSizesHistogram: flat.ObjSizes.toMap(),
|
||||
}
|
||||
if flat.ReplicationStats != nil {
|
||||
bui.ReplicationPendingSize = flat.ReplicationStats.PendingSize
|
||||
bui.ReplicatedSize = flat.ReplicationStats.ReplicatedSize
|
||||
bui.ReplicationFailedSize = flat.ReplicationStats.FailedSize
|
||||
bui.ReplicationPendingCount = flat.ReplicationStats.PendingCount
|
||||
bui.ReplicationFailedCount = flat.ReplicationStats.FailedCount
|
||||
bui.ReplicaSize = flat.ReplicationStats.ReplicaSize
|
||||
}
|
||||
dst[bucket.Name] = bui
|
||||
}
|
||||
return dst
|
||||
}
|
||||
@ -444,17 +599,20 @@ func (d *dataUsageCache) bucketUsageInfo(bucket string) madmin.BucketUsageInfo {
|
||||
return madmin.BucketUsageInfo{}
|
||||
}
|
||||
flat := d.flatten(*e)
|
||||
return madmin.BucketUsageInfo{
|
||||
Size: uint64(flat.Size),
|
||||
ObjectsCount: flat.Objects,
|
||||
ReplicationPendingSize: flat.ReplicationStats.PendingSize,
|
||||
ReplicationPendingCount: flat.ReplicationStats.PendingCount,
|
||||
ReplicatedSize: flat.ReplicationStats.ReplicatedSize,
|
||||
ReplicationFailedSize: flat.ReplicationStats.FailedSize,
|
||||
ReplicationFailedCount: flat.ReplicationStats.FailedCount,
|
||||
ReplicaSize: flat.ReplicationStats.ReplicaSize,
|
||||
ObjectSizesHistogram: flat.ObjSizes.toMap(),
|
||||
bui := madmin.BucketUsageInfo{
|
||||
Size: uint64(flat.Size),
|
||||
ObjectsCount: flat.Objects,
|
||||
ObjectSizesHistogram: flat.ObjSizes.toMap(),
|
||||
}
|
||||
if flat.ReplicationStats != nil {
|
||||
bui.ReplicationPendingSize = flat.ReplicationStats.PendingSize
|
||||
bui.ReplicatedSize = flat.ReplicationStats.ReplicatedSize
|
||||
bui.ReplicationFailedSize = flat.ReplicationStats.FailedSize
|
||||
bui.ReplicationPendingCount = flat.ReplicationStats.PendingCount
|
||||
bui.ReplicationFailedCount = flat.ReplicationStats.FailedCount
|
||||
bui.ReplicaSize = flat.ReplicationStats.ReplicaSize
|
||||
}
|
||||
return bui
|
||||
}
|
||||
|
||||
// sizeRecursive returns the path as a flattened entry.
|
||||
@ -467,6 +625,19 @@ func (d *dataUsageCache) sizeRecursive(path string) *dataUsageEntry {
|
||||
return &flat
|
||||
}
|
||||
|
||||
// totalChildrenRec returns the total number of children recorded.
|
||||
func (d *dataUsageCache) totalChildrenRec(path string) int {
|
||||
root := d.find(path)
|
||||
if root == nil || len(root.Children) == 0 {
|
||||
return 0
|
||||
}
|
||||
n := len(root.Children)
|
||||
for ch := range root.Children {
|
||||
n += d.totalChildrenRec(ch)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// root returns the root of the cache.
|
||||
func (d *dataUsageCache) root() *dataUsageEntry {
|
||||
return d.find(d.Info.Name)
|
||||
@ -529,6 +700,9 @@ type objectIO interface {
|
||||
// Only backend errors are returned as errors.
|
||||
// If the object is not found or unable to deserialize d is cleared and nil error is returned.
|
||||
func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string) error {
|
||||
// Abandon if more than 5 minutes, so we don't hold up scanner.
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
|
||||
defer cancel()
|
||||
r, err := store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
@ -562,6 +736,9 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string)
|
||||
return err
|
||||
}
|
||||
|
||||
// Abandon if more than 5 minutes, so we don't hold up scanner.
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
|
||||
defer cancel()
|
||||
_, err = store.PutObject(ctx,
|
||||
dataUsageBucket,
|
||||
name,
|
||||
@ -577,16 +754,17 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string)
|
||||
// Bumping the cache version will drop data from previous versions
|
||||
// and write new data with the new version.
|
||||
const (
|
||||
dataUsageCacheVerV4 = 4
|
||||
dataUsageCacheVerV3 = 3
|
||||
dataUsageCacheVerV2 = 2
|
||||
dataUsageCacheVerV1 = 1
|
||||
dataUsageCacheVerCurrent = 5
|
||||
dataUsageCacheVerV4 = 4
|
||||
dataUsageCacheVerV3 = 3
|
||||
dataUsageCacheVerV2 = 2
|
||||
dataUsageCacheVerV1 = 1
|
||||
)
|
||||
|
||||
// serialize the contents of the cache.
|
||||
func (d *dataUsageCache) serializeTo(dst io.Writer) error {
|
||||
// Add version and compress.
|
||||
_, err := dst.Write([]byte{dataUsageCacheVerV4})
|
||||
_, err := dst.Write([]byte{dataUsageCacheVerCurrent})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -620,7 +798,8 @@ func (d *dataUsageCache) deserialize(r io.Reader) error {
|
||||
if n != 1 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
switch b[0] {
|
||||
ver := b[0]
|
||||
switch ver {
|
||||
case dataUsageCacheVerV1:
|
||||
return errors.New("cache version deprecated (will autoupdate)")
|
||||
case dataUsageCacheVerV2:
|
||||
@ -640,10 +819,11 @@ func (d *dataUsageCache) deserialize(r io.Reader) error {
|
||||
d.Cache = make(map[string]dataUsageEntry, len(dold.Cache))
|
||||
for k, v := range dold.Cache {
|
||||
d.Cache[k] = dataUsageEntry{
|
||||
Size: v.Size,
|
||||
Objects: v.Objects,
|
||||
ObjSizes: v.ObjSizes,
|
||||
Children: v.Children,
|
||||
Size: v.Size,
|
||||
Objects: v.Objects,
|
||||
ObjSizes: v.ObjSizes,
|
||||
Children: v.Children,
|
||||
Compacted: len(v.Children) == 0 && k != d.Info.Name,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -662,18 +842,23 @@ func (d *dataUsageCache) deserialize(r io.Reader) error {
|
||||
d.Disks = dold.Disks
|
||||
d.Cache = make(map[string]dataUsageEntry, len(dold.Cache))
|
||||
for k, v := range dold.Cache {
|
||||
d.Cache[k] = dataUsageEntry{
|
||||
due := dataUsageEntry{
|
||||
Size: v.Size,
|
||||
Objects: v.Objects,
|
||||
ObjSizes: v.ObjSizes,
|
||||
Children: v.Children,
|
||||
ReplicationStats: replicationStats{
|
||||
}
|
||||
if v.ReplicatedSize > 0 || v.ReplicaSize > 0 || v.ReplicationFailedSize > 0 || v.ReplicationPendingSize > 0 {
|
||||
due.ReplicationStats = &replicationStats{
|
||||
ReplicatedSize: v.ReplicatedSize,
|
||||
ReplicaSize: v.ReplicaSize,
|
||||
FailedSize: v.ReplicationFailedSize,
|
||||
PendingSize: v.ReplicationPendingSize,
|
||||
},
|
||||
}
|
||||
}
|
||||
due.Compacted = len(due.Children) == 0 && k != d.Info.Name
|
||||
|
||||
d.Cache[k] = due
|
||||
}
|
||||
return nil
|
||||
case dataUsageCacheVerV4:
|
||||
@ -683,10 +868,48 @@ func (d *dataUsageCache) deserialize(r io.Reader) error {
|
||||
return err
|
||||
}
|
||||
defer dec.Close()
|
||||
dold := &dataUsageCacheV4{}
|
||||
if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil {
|
||||
return err
|
||||
}
|
||||
d.Info = dold.Info
|
||||
d.Disks = dold.Disks
|
||||
d.Cache = make(map[string]dataUsageEntry, len(dold.Cache))
|
||||
for k, v := range dold.Cache {
|
||||
due := dataUsageEntry{
|
||||
Size: v.Size,
|
||||
Objects: v.Objects,
|
||||
ObjSizes: v.ObjSizes,
|
||||
Children: v.Children,
|
||||
}
|
||||
empty := replicationStats{}
|
||||
if v.ReplicationStats != empty {
|
||||
due.ReplicationStats = &v.ReplicationStats
|
||||
}
|
||||
due.Compacted = len(due.Children) == 0 && k != d.Info.Name
|
||||
|
||||
d.Cache[k] = due
|
||||
}
|
||||
|
||||
// Populate compacted value and remove unneeded replica stats.
|
||||
empty := replicationStats{}
|
||||
for k, e := range d.Cache {
|
||||
if e.ReplicationStats != nil && *e.ReplicationStats == empty {
|
||||
e.ReplicationStats = nil
|
||||
}
|
||||
|
||||
d.Cache[k] = e
|
||||
}
|
||||
case dataUsageCacheVerCurrent:
|
||||
// Zstd compressed.
|
||||
dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dec.Close()
|
||||
return d.DecodeMsg(msgp.NewReader(dec))
|
||||
}
|
||||
return fmt.Errorf("dataUsageCache: unknown version: %d", int(b[0]))
|
||||
return fmt.Errorf("dataUsageCache: unknown version: %d", int(ver))
|
||||
}
|
||||
|
||||
// Trim this from start+end of hashes.
|
||||
@ -717,6 +940,10 @@ func (z *dataUsageHashMap) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if zb0002 == 0 {
|
||||
*z = nil
|
||||
return
|
||||
}
|
||||
*z = make(dataUsageHashMap, zb0002)
|
||||
for i := uint32(0); i < zb0002; i++ {
|
||||
{
|
||||
@ -767,6 +994,10 @@ func (z *dataUsageHashMap) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if zb0002 == 0 {
|
||||
*z = nil
|
||||
return bts, nil
|
||||
}
|
||||
*z = make(dataUsageHashMap, zb0002)
|
||||
for i := uint32(0); i < zb0002; i++ {
|
||||
{
|
||||
|
@ -593,92 +593,6 @@ func (z *dataUsageCacheV2) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *dataUsageCacheV2) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 3
|
||||
// write "Info"
|
||||
err = en.Append(0x83, 0xa4, 0x49, 0x6e, 0x66, 0x6f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = z.Info.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Info")
|
||||
return
|
||||
}
|
||||
// write "Disks"
|
||||
err = en.Append(0xa5, 0x44, 0x69, 0x73, 0x6b, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteArrayHeader(uint32(len(z.Disks)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Disks")
|
||||
return
|
||||
}
|
||||
for za0001 := range z.Disks {
|
||||
err = en.WriteString(z.Disks[za0001])
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Disks", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "Cache"
|
||||
err = en.Append(0xa5, 0x43, 0x61, 0x63, 0x68, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteMapHeader(uint32(len(z.Cache)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Cache")
|
||||
return
|
||||
}
|
||||
for za0002, za0003 := range z.Cache {
|
||||
err = en.WriteString(za0002)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Cache")
|
||||
return
|
||||
}
|
||||
err = za0003.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Cache", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *dataUsageCacheV2) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 3
|
||||
// string "Info"
|
||||
o = append(o, 0x83, 0xa4, 0x49, 0x6e, 0x66, 0x6f)
|
||||
o, err = z.Info.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Info")
|
||||
return
|
||||
}
|
||||
// string "Disks"
|
||||
o = append(o, 0xa5, 0x44, 0x69, 0x73, 0x6b, 0x73)
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z.Disks)))
|
||||
for za0001 := range z.Disks {
|
||||
o = msgp.AppendString(o, z.Disks[za0001])
|
||||
}
|
||||
// string "Cache"
|
||||
o = append(o, 0xa5, 0x43, 0x61, 0x63, 0x68, 0x65)
|
||||
o = msgp.AppendMapHeader(o, uint32(len(z.Cache)))
|
||||
for za0002, za0003 := range z.Cache {
|
||||
o = msgp.AppendString(o, za0002)
|
||||
o, err = za0003.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Cache", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *dataUsageCacheV2) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
@ -864,92 +778,6 @@ func (z *dataUsageCacheV3) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *dataUsageCacheV3) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 3
|
||||
// write "Info"
|
||||
err = en.Append(0x83, 0xa4, 0x49, 0x6e, 0x66, 0x6f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = z.Info.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Info")
|
||||
return
|
||||
}
|
||||
// write "Disks"
|
||||
err = en.Append(0xa5, 0x44, 0x69, 0x73, 0x6b, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteArrayHeader(uint32(len(z.Disks)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Disks")
|
||||
return
|
||||
}
|
||||
for za0001 := range z.Disks {
|
||||
err = en.WriteString(z.Disks[za0001])
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Disks", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "Cache"
|
||||
err = en.Append(0xa5, 0x43, 0x61, 0x63, 0x68, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteMapHeader(uint32(len(z.Cache)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Cache")
|
||||
return
|
||||
}
|
||||
for za0002, za0003 := range z.Cache {
|
||||
err = en.WriteString(za0002)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Cache")
|
||||
return
|
||||
}
|
||||
err = za0003.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Cache", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *dataUsageCacheV3) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 3
|
||||
// string "Info"
|
||||
o = append(o, 0x83, 0xa4, 0x49, 0x6e, 0x66, 0x6f)
|
||||
o, err = z.Info.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Info")
|
||||
return
|
||||
}
|
||||
// string "Disks"
|
||||
o = append(o, 0xa5, 0x44, 0x69, 0x73, 0x6b, 0x73)
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z.Disks)))
|
||||
for za0001 := range z.Disks {
|
||||
o = msgp.AppendString(o, z.Disks[za0001])
|
||||
}
|
||||
// string "Cache"
|
||||
o = append(o, 0xa5, 0x43, 0x61, 0x63, 0x68, 0x65)
|
||||
o = msgp.AppendMapHeader(o, uint32(len(z.Cache)))
|
||||
for za0002, za0003 := range z.Cache {
|
||||
o = msgp.AppendString(o, za0002)
|
||||
o, err = za0003.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Cache", za0002)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *dataUsageCacheV3) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
@ -1051,6 +879,191 @@ func (z *dataUsageCacheV3) Msgsize() (s int) {
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *dataUsageCacheV4) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "Info":
|
||||
err = z.Info.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Info")
|
||||
return
|
||||
}
|
||||
case "Disks":
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Disks")
|
||||
return
|
||||
}
|
||||
if cap(z.Disks) >= int(zb0002) {
|
||||
z.Disks = (z.Disks)[:zb0002]
|
||||
} else {
|
||||
z.Disks = make([]string, zb0002)
|
||||
}
|
||||
for za0001 := range z.Disks {
|
||||
z.Disks[za0001], err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Disks", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Cache":
|
||||
var zb0003 uint32
|
||||
zb0003, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Cache")
|
||||
return
|
||||
}
|
||||
if z.Cache == nil {
|
||||
z.Cache = make(map[string]dataUsageEntryV4, zb0003)
|
||||
} else if len(z.Cache) > 0 {
|
||||
for key := range z.Cache {
|
||||
delete(z.Cache, key)
|
||||
}
|
||||
}
|
||||
for zb0003 > 0 {
|
||||
zb0003--
|
||||
var za0002 string
|
||||
var za0003 dataUsageEntryV4
|
||||
za0002, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Cache")
|
||||
return
|
||||
}
|
||||
err = za0003.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Cache", za0002)
|
||||
return
|
||||
}
|
||||
z.Cache[za0002] = za0003
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *dataUsageCacheV4) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "Info":
|
||||
bts, err = z.Info.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Info")
|
||||
return
|
||||
}
|
||||
case "Disks":
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Disks")
|
||||
return
|
||||
}
|
||||
if cap(z.Disks) >= int(zb0002) {
|
||||
z.Disks = (z.Disks)[:zb0002]
|
||||
} else {
|
||||
z.Disks = make([]string, zb0002)
|
||||
}
|
||||
for za0001 := range z.Disks {
|
||||
z.Disks[za0001], bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Disks", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Cache":
|
||||
var zb0003 uint32
|
||||
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Cache")
|
||||
return
|
||||
}
|
||||
if z.Cache == nil {
|
||||
z.Cache = make(map[string]dataUsageEntryV4, zb0003)
|
||||
} else if len(z.Cache) > 0 {
|
||||
for key := range z.Cache {
|
||||
delete(z.Cache, key)
|
||||
}
|
||||
}
|
||||
for zb0003 > 0 {
|
||||
var za0002 string
|
||||
var za0003 dataUsageEntryV4
|
||||
zb0003--
|
||||
za0002, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Cache")
|
||||
return
|
||||
}
|
||||
bts, err = za0003.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Cache", za0002)
|
||||
return
|
||||
}
|
||||
z.Cache[za0002] = za0003
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *dataUsageCacheV4) Msgsize() (s int) {
|
||||
s = 1 + 5 + z.Info.Msgsize() + 6 + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.Disks {
|
||||
s += msgp.StringPrefixSize + len(z.Disks[za0001])
|
||||
}
|
||||
s += 6 + msgp.MapHeaderSize
|
||||
if z.Cache != nil {
|
||||
for za0002, za0003 := range z.Cache {
|
||||
_ = za0003
|
||||
s += msgp.StringPrefixSize + len(za0002) + za0003.Msgsize()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *dataUsageEntry) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var zb0001 uint32
|
||||
@ -1059,8 +1072,8 @@ func (z *dataUsageEntry) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if zb0001 != 5 {
|
||||
err = msgp.ArrayError{Wanted: 5, Got: zb0001}
|
||||
if zb0001 != 7 {
|
||||
err = msgp.ArrayError{Wanted: 7, Got: zb0001}
|
||||
return
|
||||
}
|
||||
err = z.Children.DecodeMsg(dc)
|
||||
@ -1078,6 +1091,11 @@ func (z *dataUsageEntry) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "Objects")
|
||||
return
|
||||
}
|
||||
z.Versions, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Versions")
|
||||
return
|
||||
}
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
@ -1095,9 +1113,26 @@ func (z *dataUsageEntry) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
err = z.ReplicationStats.DecodeMsg(dc)
|
||||
if dc.IsNil() {
|
||||
err = dc.ReadNil()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationStats")
|
||||
return
|
||||
}
|
||||
z.ReplicationStats = nil
|
||||
} else {
|
||||
if z.ReplicationStats == nil {
|
||||
z.ReplicationStats = new(replicationStats)
|
||||
}
|
||||
err = z.ReplicationStats.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationStats")
|
||||
return
|
||||
}
|
||||
}
|
||||
z.Compacted, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationStats")
|
||||
err = msgp.WrapError(err, "Compacted")
|
||||
return
|
||||
}
|
||||
return
|
||||
@ -1105,8 +1140,8 @@ func (z *dataUsageEntry) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *dataUsageEntry) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// array header, size 5
|
||||
err = en.Append(0x95)
|
||||
// array header, size 7
|
||||
err = en.Append(0x97)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -1125,6 +1160,11 @@ func (z *dataUsageEntry) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "Objects")
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.Versions)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Versions")
|
||||
return
|
||||
}
|
||||
err = en.WriteArrayHeader(uint32(dataUsageBucketLen))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjSizes")
|
||||
@ -1137,9 +1177,21 @@ func (z *dataUsageEntry) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
err = z.ReplicationStats.EncodeMsg(en)
|
||||
if z.ReplicationStats == nil {
|
||||
err = en.WriteNil()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = z.ReplicationStats.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationStats")
|
||||
return
|
||||
}
|
||||
}
|
||||
err = en.WriteBool(z.Compacted)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationStats")
|
||||
err = msgp.WrapError(err, "Compacted")
|
||||
return
|
||||
}
|
||||
return
|
||||
@ -1148,8 +1200,8 @@ func (z *dataUsageEntry) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *dataUsageEntry) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// array header, size 5
|
||||
o = append(o, 0x95)
|
||||
// array header, size 7
|
||||
o = append(o, 0x97)
|
||||
o, err = z.Children.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Children")
|
||||
@ -1157,15 +1209,21 @@ func (z *dataUsageEntry) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
}
|
||||
o = msgp.AppendInt64(o, z.Size)
|
||||
o = msgp.AppendUint64(o, z.Objects)
|
||||
o = msgp.AppendUint64(o, z.Versions)
|
||||
o = msgp.AppendArrayHeader(o, uint32(dataUsageBucketLen))
|
||||
for za0001 := range z.ObjSizes {
|
||||
o = msgp.AppendUint64(o, z.ObjSizes[za0001])
|
||||
}
|
||||
o, err = z.ReplicationStats.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationStats")
|
||||
return
|
||||
if z.ReplicationStats == nil {
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o, err = z.ReplicationStats.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationStats")
|
||||
return
|
||||
}
|
||||
}
|
||||
o = msgp.AppendBool(o, z.Compacted)
|
||||
return
|
||||
}
|
||||
|
||||
@ -1177,8 +1235,8 @@ func (z *dataUsageEntry) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if zb0001 != 5 {
|
||||
err = msgp.ArrayError{Wanted: 5, Got: zb0001}
|
||||
if zb0001 != 7 {
|
||||
err = msgp.ArrayError{Wanted: 7, Got: zb0001}
|
||||
return
|
||||
}
|
||||
bts, err = z.Children.UnmarshalMsg(bts)
|
||||
@ -1196,6 +1254,11 @@ func (z *dataUsageEntry) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "Objects")
|
||||
return
|
||||
}
|
||||
z.Versions, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Versions")
|
||||
return
|
||||
}
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
@ -1213,9 +1276,25 @@ func (z *dataUsageEntry) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
bts, err = z.ReplicationStats.UnmarshalMsg(bts)
|
||||
if msgp.IsNil(bts) {
|
||||
bts, err = msgp.ReadNilBytes(bts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
z.ReplicationStats = nil
|
||||
} else {
|
||||
if z.ReplicationStats == nil {
|
||||
z.ReplicationStats = new(replicationStats)
|
||||
}
|
||||
bts, err = z.ReplicationStats.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationStats")
|
||||
return
|
||||
}
|
||||
}
|
||||
z.Compacted, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationStats")
|
||||
err = msgp.WrapError(err, "Compacted")
|
||||
return
|
||||
}
|
||||
o = bts
|
||||
@ -1224,7 +1303,13 @@ func (z *dataUsageEntry) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *dataUsageEntry) Msgsize() (s int) {
|
||||
s = 1 + z.Children.Msgsize() + msgp.Int64Size + msgp.Uint64Size + msgp.ArrayHeaderSize + (dataUsageBucketLen * (msgp.Uint64Size)) + z.ReplicationStats.Msgsize()
|
||||
s = 1 + z.Children.Msgsize() + msgp.Int64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.ArrayHeaderSize + (dataUsageBucketLen * (msgp.Uint64Size))
|
||||
if z.ReplicationStats == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
s += z.ReplicationStats.Msgsize()
|
||||
}
|
||||
s += msgp.BoolSize
|
||||
return
|
||||
}
|
||||
|
||||
@ -1275,62 +1360,6 @@ func (z *dataUsageEntryV2) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *dataUsageEntryV2) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// array header, size 4
|
||||
err = en.Append(0x94)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt64(z.Size)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Size")
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.Objects)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Objects")
|
||||
return
|
||||
}
|
||||
err = en.WriteArrayHeader(uint32(dataUsageBucketLen))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjSizes")
|
||||
return
|
||||
}
|
||||
for za0001 := range z.ObjSizes {
|
||||
err = en.WriteUint64(z.ObjSizes[za0001])
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjSizes", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
err = z.Children.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Children")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *dataUsageEntryV2) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// array header, size 4
|
||||
o = append(o, 0x94)
|
||||
o = msgp.AppendInt64(o, z.Size)
|
||||
o = msgp.AppendUint64(o, z.Objects)
|
||||
o = msgp.AppendArrayHeader(o, uint32(dataUsageBucketLen))
|
||||
for za0001 := range z.ObjSizes {
|
||||
o = msgp.AppendUint64(o, z.ObjSizes[za0001])
|
||||
}
|
||||
o, err = z.Children.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Children")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *dataUsageEntryV2) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var zb0001 uint32
|
||||
@ -1452,86 +1481,6 @@ func (z *dataUsageEntryV3) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *dataUsageEntryV3) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// array header, size 8
|
||||
err = en.Append(0x98)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt64(z.Size)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Size")
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ReplicatedSize)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicatedSize")
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ReplicationPendingSize)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationPendingSize")
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ReplicationFailedSize)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationFailedSize")
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ReplicaSize)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicaSize")
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.Objects)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Objects")
|
||||
return
|
||||
}
|
||||
err = en.WriteArrayHeader(uint32(dataUsageBucketLen))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjSizes")
|
||||
return
|
||||
}
|
||||
for za0001 := range z.ObjSizes {
|
||||
err = en.WriteUint64(z.ObjSizes[za0001])
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjSizes", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
err = z.Children.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Children")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *dataUsageEntryV3) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// array header, size 8
|
||||
o = append(o, 0x98)
|
||||
o = msgp.AppendInt64(o, z.Size)
|
||||
o = msgp.AppendUint64(o, z.ReplicatedSize)
|
||||
o = msgp.AppendUint64(o, z.ReplicationPendingSize)
|
||||
o = msgp.AppendUint64(o, z.ReplicationFailedSize)
|
||||
o = msgp.AppendUint64(o, z.ReplicaSize)
|
||||
o = msgp.AppendUint64(o, z.Objects)
|
||||
o = msgp.AppendArrayHeader(o, uint32(dataUsageBucketLen))
|
||||
for za0001 := range z.ObjSizes {
|
||||
o = msgp.AppendUint64(o, z.ObjSizes[za0001])
|
||||
}
|
||||
o, err = z.Children.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Children")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *dataUsageEntryV3) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var zb0001 uint32
|
||||
@ -1606,6 +1555,117 @@ func (z *dataUsageEntryV3) Msgsize() (s int) {
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *dataUsageEntryV4) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if zb0001 != 5 {
|
||||
err = msgp.ArrayError{Wanted: 5, Got: zb0001}
|
||||
return
|
||||
}
|
||||
err = z.Children.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Children")
|
||||
return
|
||||
}
|
||||
z.Size, err = dc.ReadInt64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Size")
|
||||
return
|
||||
}
|
||||
z.Objects, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Objects")
|
||||
return
|
||||
}
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjSizes")
|
||||
return
|
||||
}
|
||||
if zb0002 != uint32(dataUsageBucketLen) {
|
||||
err = msgp.ArrayError{Wanted: uint32(dataUsageBucketLen), Got: zb0002}
|
||||
return
|
||||
}
|
||||
for za0001 := range z.ObjSizes {
|
||||
z.ObjSizes[za0001], err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjSizes", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
err = z.ReplicationStats.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationStats")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *dataUsageEntryV4) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if zb0001 != 5 {
|
||||
err = msgp.ArrayError{Wanted: 5, Got: zb0001}
|
||||
return
|
||||
}
|
||||
bts, err = z.Children.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Children")
|
||||
return
|
||||
}
|
||||
z.Size, bts, err = msgp.ReadInt64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Size")
|
||||
return
|
||||
}
|
||||
z.Objects, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Objects")
|
||||
return
|
||||
}
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjSizes")
|
||||
return
|
||||
}
|
||||
if zb0002 != uint32(dataUsageBucketLen) {
|
||||
err = msgp.ArrayError{Wanted: uint32(dataUsageBucketLen), Got: zb0002}
|
||||
return
|
||||
}
|
||||
for za0001 := range z.ObjSizes {
|
||||
z.ObjSizes[za0001], bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjSizes", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
bts, err = z.ReplicationStats.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ReplicationStats")
|
||||
return
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *dataUsageEntryV4) Msgsize() (s int) {
|
||||
s = 1 + z.Children.Msgsize() + msgp.Int64Size + msgp.Uint64Size + msgp.ArrayHeaderSize + (dataUsageBucketLen * (msgp.Uint64Size)) + z.ReplicationStats.Msgsize()
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *dataUsageHash) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
{
|
||||
|
@ -235,232 +235,6 @@ func BenchmarkDecodedataUsageCacheInfo(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshaldataUsageCacheV2(t *testing.T) {
|
||||
v := dataUsageCacheV2{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgdataUsageCacheV2(b *testing.B) {
|
||||
v := dataUsageCacheV2{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgdataUsageCacheV2(b *testing.B) {
|
||||
v := dataUsageCacheV2{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshaldataUsageCacheV2(b *testing.B) {
|
||||
v := dataUsageCacheV2{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodedataUsageCacheV2(t *testing.T) {
|
||||
v := dataUsageCacheV2{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodedataUsageCacheV2 Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := dataUsageCacheV2{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodedataUsageCacheV2(b *testing.B) {
|
||||
v := dataUsageCacheV2{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodedataUsageCacheV2(b *testing.B) {
|
||||
v := dataUsageCacheV2{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshaldataUsageCacheV3(t *testing.T) {
|
||||
v := dataUsageCacheV3{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgdataUsageCacheV3(b *testing.B) {
|
||||
v := dataUsageCacheV3{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgdataUsageCacheV3(b *testing.B) {
|
||||
v := dataUsageCacheV3{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshaldataUsageCacheV3(b *testing.B) {
|
||||
v := dataUsageCacheV3{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodedataUsageCacheV3(t *testing.T) {
|
||||
v := dataUsageCacheV3{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodedataUsageCacheV3 Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := dataUsageCacheV3{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodedataUsageCacheV3(b *testing.B) {
|
||||
v := dataUsageCacheV3{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodedataUsageCacheV3(b *testing.B) {
|
||||
v := dataUsageCacheV3{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshaldataUsageEntry(t *testing.T) {
|
||||
v := dataUsageEntry{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
@ -574,232 +348,6 @@ func BenchmarkDecodedataUsageEntry(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshaldataUsageEntryV2(t *testing.T) {
|
||||
v := dataUsageEntryV2{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgdataUsageEntryV2(b *testing.B) {
|
||||
v := dataUsageEntryV2{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgdataUsageEntryV2(b *testing.B) {
|
||||
v := dataUsageEntryV2{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshaldataUsageEntryV2(b *testing.B) {
|
||||
v := dataUsageEntryV2{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodedataUsageEntryV2(t *testing.T) {
|
||||
v := dataUsageEntryV2{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodedataUsageEntryV2 Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := dataUsageEntryV2{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodedataUsageEntryV2(b *testing.B) {
|
||||
v := dataUsageEntryV2{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodedataUsageEntryV2(b *testing.B) {
|
||||
v := dataUsageEntryV2{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshaldataUsageEntryV3(t *testing.T) {
|
||||
v := dataUsageEntryV3{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgdataUsageEntryV3(b *testing.B) {
|
||||
v := dataUsageEntryV3{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgdataUsageEntryV3(b *testing.B) {
|
||||
v := dataUsageEntryV3{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshaldataUsageEntryV3(b *testing.B) {
|
||||
v := dataUsageEntryV3{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodedataUsageEntryV3(t *testing.T) {
|
||||
v := dataUsageEntryV3{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodedataUsageEntryV3 Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := dataUsageEntryV3{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodedataUsageEntryV3(b *testing.B) {
|
||||
v := dataUsageEntryV3{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodedataUsageEntryV3(b *testing.B) {
|
||||
v := dataUsageEntryV3{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalreplicationStats(t *testing.T) {
|
||||
v := replicationStats{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
|
@ -20,6 +20,7 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -60,6 +61,7 @@ func TestDataUsageUpdate(t *testing.T) {
|
||||
return
|
||||
}
|
||||
sizeS.totalSize = s.Size()
|
||||
sizeS.versions++
|
||||
return sizeS, nil
|
||||
}
|
||||
return
|
||||
@ -93,36 +95,13 @@ func TestDataUsageUpdate(t *testing.T) {
|
||||
},
|
||||
{
|
||||
path: "/dir1",
|
||||
size: 2000,
|
||||
objs: 1,
|
||||
oSizes: sizeHistogram{1: 1},
|
||||
size: 1302010,
|
||||
objs: 5,
|
||||
oSizes: sizeHistogram{0: 1, 1: 4},
|
||||
},
|
||||
{
|
||||
path: "/dir1/dira",
|
||||
flatten: true,
|
||||
size: 1300010,
|
||||
objs: 4,
|
||||
oSizes: sizeHistogram{0: 1, 1: 3},
|
||||
},
|
||||
{
|
||||
path: "/dir1/dira/",
|
||||
flatten: true,
|
||||
size: 1300010,
|
||||
objs: 4,
|
||||
oSizes: sizeHistogram{0: 1, 1: 3},
|
||||
},
|
||||
{
|
||||
path: "/dir1",
|
||||
size: 2000,
|
||||
objs: 1,
|
||||
oSizes: sizeHistogram{0: 0, 1: 1},
|
||||
},
|
||||
{
|
||||
// Children are flattened
|
||||
path: "/dir1/dira/",
|
||||
size: 1300010,
|
||||
objs: 4,
|
||||
oSizes: sizeHistogram{0: 1, 1: 3},
|
||||
path: "/dir1/dira",
|
||||
isNil: true,
|
||||
},
|
||||
{
|
||||
path: "/nonexistying",
|
||||
@ -143,7 +122,6 @@ func TestDataUsageUpdate(t *testing.T) {
|
||||
if e == nil {
|
||||
t.Fatal("got nil result")
|
||||
}
|
||||
t.Log(e.Children)
|
||||
if w.flatten {
|
||||
*e = got.flatten(*e)
|
||||
}
|
||||
@ -153,6 +131,9 @@ func TestDataUsageUpdate(t *testing.T) {
|
||||
if e.Objects != uint64(w.objs) {
|
||||
t.Error("got objects", e.Objects, "want", w.objs)
|
||||
}
|
||||
if e.Versions != uint64(w.objs) {
|
||||
t.Error("got versions", e.Versions, "want", w.objs)
|
||||
}
|
||||
if e.ObjSizes != w.oSizes {
|
||||
t.Error("got histogram", e.ObjSizes, "want", w.oSizes)
|
||||
}
|
||||
@ -184,80 +165,6 @@ func TestDataUsageUpdate(t *testing.T) {
|
||||
name: "rootfile3",
|
||||
size: 1000,
|
||||
},
|
||||
}
|
||||
createUsageTestFiles(t, base, bucket, files)
|
||||
got, err = scanDataFolder(context.Background(), base, got, getSize)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
want = []struct {
|
||||
path string
|
||||
isNil bool
|
||||
size, objs int
|
||||
flatten bool
|
||||
oSizes sizeHistogram
|
||||
}{
|
||||
{
|
||||
path: "/",
|
||||
size: 1363315,
|
||||
flatten: true,
|
||||
objs: 14,
|
||||
oSizes: sizeHistogram{0: 6, 1: 8},
|
||||
},
|
||||
{
|
||||
path: "/",
|
||||
size: 21000,
|
||||
objs: 3,
|
||||
oSizes: sizeHistogram{0: 1, 1: 2},
|
||||
},
|
||||
{
|
||||
path: "/newfolder",
|
||||
size: 5,
|
||||
objs: 3,
|
||||
oSizes: sizeHistogram{0: 3},
|
||||
},
|
||||
{
|
||||
path: "/dir1/dira",
|
||||
size: 1300010,
|
||||
flatten: true,
|
||||
objs: 4,
|
||||
oSizes: sizeHistogram{0: 1, 1: 3},
|
||||
},
|
||||
{
|
||||
path: "/nonexistying",
|
||||
isNil: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, w := range want {
|
||||
t.Run(w.path, func(t *testing.T) {
|
||||
e := got.find(path.Join(bucket, w.path))
|
||||
if w.isNil {
|
||||
if e != nil {
|
||||
t.Error("want nil, got", e)
|
||||
}
|
||||
return
|
||||
}
|
||||
if e == nil {
|
||||
t.Fatal("got nil result")
|
||||
}
|
||||
if w.flatten {
|
||||
*e = got.flatten(*e)
|
||||
}
|
||||
if e.Size != int64(w.size) {
|
||||
t.Error("got size", e.Size, "want", w.size)
|
||||
}
|
||||
if e.Objects != uint64(w.objs) {
|
||||
t.Error("got objects", e.Objects, "want", w.objs)
|
||||
}
|
||||
if e.ObjSizes != w.oSizes {
|
||||
t.Error("got histogram", e.ObjSizes, "want", w.oSizes)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
files = []usageTestFile{
|
||||
{
|
||||
name: "dir1/dira/dirasub/fileindira2",
|
||||
size: 200,
|
||||
@ -292,11 +199,21 @@ func TestDataUsageUpdate(t *testing.T) {
|
||||
oSizes: sizeHistogram{0: 7, 1: 7},
|
||||
},
|
||||
{
|
||||
path: "/dir1/dira",
|
||||
size: 300210,
|
||||
objs: 4,
|
||||
flatten: true,
|
||||
oSizes: sizeHistogram{0: 2, 1: 2},
|
||||
path: "/dir1",
|
||||
size: 342210,
|
||||
objs: 7,
|
||||
flatten: false,
|
||||
oSizes: sizeHistogram{0: 2, 1: 5},
|
||||
},
|
||||
{
|
||||
path: "/newfolder",
|
||||
size: 5,
|
||||
objs: 3,
|
||||
oSizes: sizeHistogram{0: 3},
|
||||
},
|
||||
{
|
||||
path: "/nonexistying",
|
||||
isNil: true,
|
||||
},
|
||||
}
|
||||
|
||||
@ -322,6 +239,9 @@ func TestDataUsageUpdate(t *testing.T) {
|
||||
if e.Objects != uint64(w.objs) {
|
||||
t.Error("got objects", e.Objects, "want", w.objs)
|
||||
}
|
||||
if e.Versions != uint64(w.objs) {
|
||||
t.Error("got versions", e.Versions, "want", w.objs)
|
||||
}
|
||||
if e.ObjSizes != w.oSizes {
|
||||
t.Error("got histogram", e.ObjSizes, "want", w.oSizes)
|
||||
}
|
||||
@ -334,7 +254,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Skip(err)
|
||||
}
|
||||
base = filepath.Join(base, "bucket")
|
||||
scannerSleeper.Update(0, 0)
|
||||
defer os.RemoveAll(base)
|
||||
var files = []usageTestFile{
|
||||
{name: "bucket/rootfile", size: 10000},
|
||||
@ -347,6 +267,13 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
||||
{name: "bucket/dir1/dira/dirasub/sublevel3/dccccfile", size: 10},
|
||||
}
|
||||
createUsageTestFiles(t, base, "", files)
|
||||
const foldersBelow = 3
|
||||
const filesBelowT = dataScannerCompactLeastObject / 2
|
||||
const filesAboveT = dataScannerCompactAtFolders + 1
|
||||
const expectSize = foldersBelow*filesBelowT + filesAboveT
|
||||
|
||||
generateUsageTestFiles(t, base, "bucket/dirwithalot", foldersBelow, filesBelowT, 1)
|
||||
generateUsageTestFiles(t, base, "bucket/dirwithevenmore", filesAboveT, 1, 1)
|
||||
|
||||
getSize := func(item scannerItem) (sizeS sizeSummary, err error) {
|
||||
if item.Typ&os.ModeDir == 0 {
|
||||
@ -356,6 +283,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
||||
return
|
||||
}
|
||||
sizeS.totalSize = s.Size()
|
||||
sizeS.versions++
|
||||
return
|
||||
}
|
||||
return
|
||||
@ -381,9 +309,9 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
path: "flat",
|
||||
size: 1322310,
|
||||
objs: 8,
|
||||
oSizes: sizeHistogram{0: 2, 1: 6},
|
||||
size: 1322310 + expectSize,
|
||||
objs: 8 + expectSize,
|
||||
oSizes: sizeHistogram{0: 2 + expectSize, 1: 6},
|
||||
},
|
||||
{
|
||||
path: "bucket/",
|
||||
@ -392,22 +320,32 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
||||
oSizes: sizeHistogram{1: 2},
|
||||
},
|
||||
{
|
||||
// Gets compacted...
|
||||
path: "bucket/dir1",
|
||||
size: 2000,
|
||||
objs: 1,
|
||||
oSizes: sizeHistogram{1: 1},
|
||||
size: 1302010,
|
||||
objs: 5,
|
||||
oSizes: sizeHistogram{0: 1, 1: 4},
|
||||
},
|
||||
{
|
||||
path: "bucket/dir1/dira",
|
||||
size: 1300010,
|
||||
objs: 4,
|
||||
oSizes: sizeHistogram{0: 1, 1: 3},
|
||||
// Gets compacted at this level...
|
||||
path: "bucket/dirwithalot/0",
|
||||
size: filesBelowT,
|
||||
objs: filesBelowT,
|
||||
oSizes: sizeHistogram{0: filesBelowT},
|
||||
},
|
||||
{
|
||||
path: "bucket/dir1/dira/",
|
||||
size: 1300010,
|
||||
objs: 4,
|
||||
oSizes: sizeHistogram{0: 1, 1: 3},
|
||||
// Gets compacted at this level (below obj threshold)...
|
||||
path: "bucket/dirwithalot/0",
|
||||
size: filesBelowT,
|
||||
objs: filesBelowT,
|
||||
oSizes: sizeHistogram{0: filesBelowT},
|
||||
},
|
||||
{
|
||||
// Gets compacted at this level...
|
||||
path: "bucket/dirwithevenmore",
|
||||
size: filesAboveT,
|
||||
objs: filesAboveT,
|
||||
oSizes: sizeHistogram{0: filesAboveT},
|
||||
},
|
||||
{
|
||||
path: "bucket/nonexistying",
|
||||
@ -437,6 +375,9 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
||||
if e.Objects != uint64(w.objs) {
|
||||
t.Error("got objects", e.Objects, "want", w.objs)
|
||||
}
|
||||
if e.Versions != uint64(w.objs) {
|
||||
t.Error("got versions", e.Versions, "want", w.objs)
|
||||
}
|
||||
if e.ObjSizes != w.oSizes {
|
||||
t.Error("got histogram", e.ObjSizes, "want", w.oSizes)
|
||||
}
|
||||
@ -468,78 +409,6 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
||||
name: "bucket/rootfile3",
|
||||
size: 1000,
|
||||
},
|
||||
}
|
||||
createUsageTestFiles(t, base, "", files)
|
||||
got, err = scanDataFolder(context.Background(), base, got, getSize)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
want = []struct {
|
||||
path string
|
||||
isNil bool
|
||||
size, objs int
|
||||
oSizes sizeHistogram
|
||||
}{
|
||||
{
|
||||
path: "flat",
|
||||
size: 1363315,
|
||||
objs: 14,
|
||||
oSizes: sizeHistogram{0: 6, 1: 8},
|
||||
},
|
||||
{
|
||||
path: "bucket/",
|
||||
size: 21000,
|
||||
objs: 3,
|
||||
oSizes: sizeHistogram{0: 1, 1: 2},
|
||||
},
|
||||
{
|
||||
path: "bucket/newfolder",
|
||||
size: 5,
|
||||
objs: 3,
|
||||
oSizes: sizeHistogram{0: 3},
|
||||
},
|
||||
{
|
||||
path: "bucket/dir1/dira",
|
||||
size: 1300010,
|
||||
objs: 4,
|
||||
oSizes: sizeHistogram{0: 1, 1: 3},
|
||||
},
|
||||
{
|
||||
path: "bucket/nonexistying",
|
||||
isNil: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, w := range want {
|
||||
t.Run(w.path, func(t *testing.T) {
|
||||
e := got.find(w.path)
|
||||
if w.path == "flat" {
|
||||
f := got.flatten(*got.root())
|
||||
e = &f
|
||||
}
|
||||
if w.isNil {
|
||||
if e != nil {
|
||||
t.Error("want nil, got", e)
|
||||
}
|
||||
return
|
||||
}
|
||||
if e == nil {
|
||||
t.Fatal("got nil result")
|
||||
}
|
||||
if e.Size != int64(w.size) {
|
||||
t.Error("got size", e.Size, "want", w.size)
|
||||
}
|
||||
if e.Objects != uint64(w.objs) {
|
||||
t.Error("got objects", e.Objects, "want", w.objs)
|
||||
}
|
||||
if e.ObjSizes != w.oSizes {
|
||||
t.Error("got histogram", e.ObjSizes, "want", w.oSizes)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
files = []usageTestFile{
|
||||
{
|
||||
name: "bucket/dir1/dira/dirasub/fileindira2",
|
||||
size: 200,
|
||||
@ -567,15 +436,36 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
path: "flat",
|
||||
size: 363515,
|
||||
objs: 14,
|
||||
oSizes: sizeHistogram{0: 7, 1: 7},
|
||||
size: 363515 + expectSize,
|
||||
objs: 14 + expectSize,
|
||||
oSizes: sizeHistogram{0: 7 + expectSize, 1: 7},
|
||||
},
|
||||
{
|
||||
path: "bucket/dir1/dira",
|
||||
size: 300210,
|
||||
objs: 4,
|
||||
oSizes: sizeHistogram{0: 2, 1: 2},
|
||||
path: "bucket/dir1",
|
||||
size: 342210,
|
||||
objs: 7,
|
||||
oSizes: sizeHistogram{0: 2, 1: 5},
|
||||
},
|
||||
{
|
||||
path: "bucket/",
|
||||
size: 21000,
|
||||
objs: 3,
|
||||
oSizes: sizeHistogram{0: 1, 1: 2},
|
||||
},
|
||||
{
|
||||
path: "bucket/newfolder",
|
||||
size: 5,
|
||||
objs: 3,
|
||||
oSizes: sizeHistogram{0: 3},
|
||||
},
|
||||
{
|
||||
// Compacted into bucket/dir1
|
||||
path: "bucket/dir1/dira",
|
||||
isNil: true,
|
||||
},
|
||||
{
|
||||
path: "bucket/nonexistying",
|
||||
isNil: true,
|
||||
},
|
||||
}
|
||||
|
||||
@ -593,7 +483,8 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
||||
return
|
||||
}
|
||||
if e == nil {
|
||||
t.Fatal("got nil result")
|
||||
t.Error("got nil result")
|
||||
return
|
||||
}
|
||||
if e.Size != int64(w.size) {
|
||||
t.Error("got size", e.Size, "want", w.size)
|
||||
@ -601,6 +492,9 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
||||
if e.Objects != uint64(w.objs) {
|
||||
t.Error("got objects", e.Objects, "want", w.objs)
|
||||
}
|
||||
if e.Versions != uint64(w.objs) {
|
||||
t.Error("got versions", e.Versions, "want", w.objs)
|
||||
}
|
||||
if e.ObjSizes != w.oSizes {
|
||||
t.Error("got histogram", e.ObjSizes, "want", w.oSizes)
|
||||
}
|
||||
@ -621,6 +515,25 @@ func createUsageTestFiles(t *testing.T, base, bucket string, files []usageTestFi
|
||||
}
|
||||
}
|
||||
|
||||
// generateUsageTestFiles create nFolders * nFiles files of size bytes each.
|
||||
func generateUsageTestFiles(t *testing.T, base, bucket string, nFolders, nFiles, size int) {
|
||||
pl := make([]byte, size)
|
||||
for i := 0; i < nFolders; i++ {
|
||||
name := filepath.Join(base, bucket, fmt.Sprint(i), "0.txt")
|
||||
err := os.MkdirAll(filepath.Dir(name), os.ModePerm)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for j := 0; j < nFiles; j++ {
|
||||
name := filepath.Join(base, bucket, fmt.Sprint(i), fmt.Sprint(j)+".txt")
|
||||
err = ioutil.WriteFile(name, pl, os.ModePerm)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataUsageCacheSerialize(t *testing.T) {
|
||||
base, err := ioutil.TempDir("", "TestDataUsageCacheSerialize")
|
||||
if err != nil {
|
||||
@ -654,6 +567,7 @@ func TestDataUsageCacheSerialize(t *testing.T) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
sizeS.versions++
|
||||
sizeS.totalSize = s.Size()
|
||||
return
|
||||
}
|
||||
@ -663,6 +577,20 @@ func TestDataUsageCacheSerialize(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
e := want.find("abucket/dir2")
|
||||
e.ReplicationStats = &replicationStats{
|
||||
PendingSize: 1,
|
||||
ReplicatedSize: 2,
|
||||
FailedSize: 3,
|
||||
ReplicaSize: 4,
|
||||
FailedCount: 5,
|
||||
PendingCount: 6,
|
||||
MissedThresholdSize: 7,
|
||||
AfterThresholdSize: 8,
|
||||
MissedThresholdCount: 9,
|
||||
AfterThresholdCount: 10,
|
||||
}
|
||||
want.replace("abucket/dir2", "", *e)
|
||||
var buf bytes.Buffer
|
||||
err = want.serializeTo(&buf)
|
||||
if err != nil {
|
||||
@ -686,9 +614,21 @@ func TestDataUsageCacheSerialize(t *testing.T) {
|
||||
}
|
||||
for wkey, wval := range want.Cache {
|
||||
gotv := got.Cache[wkey]
|
||||
if fmt.Sprint(gotv) != fmt.Sprint(wval) {
|
||||
t.Errorf("deserialize mismatch, key %v\nwant: %+v\ngot: %+v", wkey, wval, gotv)
|
||||
if !equalAsJSON(gotv, wval) {
|
||||
t.Errorf("deserialize mismatch, key %v\nwant: %#v\ngot: %#v", wkey, wval, gotv)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// equalAsJSON returns whether the values are equal when encoded as JSON.
|
||||
func equalAsJSON(a, b interface{}) bool {
|
||||
aj, err := json.Marshal(a)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bj, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bytes.Equal(aj, bj)
|
||||
}
|
||||
|
@ -362,10 +362,10 @@ func (fs *FSObjects) scanBucket(ctx context.Context, bucket string, cache dataUs
|
||||
oi := fsMeta.ToObjectInfo(bucket, object, fi)
|
||||
sz := item.applyActions(ctx, fs, actionMeta{oi: oi}, &sizeSummary{})
|
||||
if sz >= 0 {
|
||||
return sizeSummary{totalSize: sz}, nil
|
||||
return sizeSummary{totalSize: sz, versions: 1}, nil
|
||||
}
|
||||
|
||||
return sizeSummary{totalSize: fi.Size()}, nil
|
||||
return sizeSummary{totalSize: fi.Size(), versions: 1}, nil
|
||||
})
|
||||
|
||||
return cache, err
|
||||
|
@ -157,21 +157,16 @@ func (o *listPathOptions) gatherResults(in <-chan metaCacheEntry) func() (metaCa
|
||||
if !o.IncludeDirectories && entry.isDir() {
|
||||
continue
|
||||
}
|
||||
o.debugln("gather got:", entry.name)
|
||||
if o.Marker != "" && entry.name < o.Marker {
|
||||
o.debugln("pre marker")
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(entry.name, o.Prefix) {
|
||||
o.debugln("not in prefix")
|
||||
continue
|
||||
}
|
||||
if !o.Recursive && !entry.isInDir(o.Prefix, o.Separator) {
|
||||
o.debugln("not in dir", o.Prefix, o.Separator)
|
||||
continue
|
||||
}
|
||||
if !o.InclDeleted && entry.isObject() && entry.isLatestDeletemarker() {
|
||||
o.debugln("latest is delete marker")
|
||||
continue
|
||||
}
|
||||
if o.Limit > 0 && results.len() >= o.Limit {
|
||||
@ -184,7 +179,6 @@ func (o *listPathOptions) gatherResults(in <-chan metaCacheEntry) func() (metaCa
|
||||
}
|
||||
continue
|
||||
}
|
||||
o.debugln("adding...")
|
||||
results.o = append(results.o, entry)
|
||||
}
|
||||
if resCh != nil {
|
||||
|
@ -427,17 +427,18 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache) (dataUs
|
||||
return sizeSummary{}, errSkipFile
|
||||
}
|
||||
|
||||
var totalSize int64
|
||||
|
||||
sizeS := sizeSummary{}
|
||||
for _, version := range fivs.Versions {
|
||||
oi := version.ToObjectInfo(item.bucket, item.objectPath())
|
||||
totalSize += item.applyActions(ctx, objAPI, actionMeta{
|
||||
sz := item.applyActions(ctx, objAPI, actionMeta{
|
||||
oi: oi,
|
||||
bitRotScan: healOpts.Bitrot,
|
||||
}, &sizeS)
|
||||
if !oi.DeleteMarker && sz == oi.Size {
|
||||
sizeS.versions++
|
||||
}
|
||||
sizeS.totalSize += sz
|
||||
}
|
||||
sizeS.totalSize = totalSize
|
||||
return sizeS, nil
|
||||
})
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user