2021-04-18 15:41:13 -04:00
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2020-03-18 19:19:29 -04:00
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2020-04-27 13:06:21 -04:00
|
|
|
"errors"
|
2020-03-18 19:19:29 -04:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2020-12-31 12:45:09 -05:00
|
|
|
"net/http"
|
2020-03-18 19:19:29 -04:00
|
|
|
"path"
|
|
|
|
"path/filepath"
|
2021-05-11 21:36:15 -04:00
|
|
|
"sort"
|
2020-03-18 19:19:29 -04:00
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/cespare/xxhash/v2"
|
2020-06-12 13:28:21 -04:00
|
|
|
"github.com/klauspost/compress/zstd"
|
2021-05-06 11:52:02 -04:00
|
|
|
"github.com/minio/madmin-go"
|
2020-03-18 19:19:29 -04:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
2020-06-12 13:28:21 -04:00
|
|
|
"github.com/minio/minio/pkg/bucket/lifecycle"
|
2020-03-18 19:19:29 -04:00
|
|
|
"github.com/minio/minio/pkg/hash"
|
|
|
|
"github.com/tinylib/msgp/msgp"
|
|
|
|
)
|
|
|
|
|
|
|
|
//go:generate msgp -file $GOFILE -unexported
|
|
|
|
|
|
|
|
// dataUsageHash is the hash type used.
|
2020-06-12 13:28:21 -04:00
|
|
|
type dataUsageHash string
|
2020-03-18 19:19:29 -04:00
|
|
|
|
|
|
|
// sizeHistogram is a size histogram.
|
|
|
|
type sizeHistogram [dataUsageBucketLen]uint64
|
|
|
|
|
|
|
|
//msgp:tuple dataUsageEntry
|
|
|
|
type dataUsageEntry struct {
|
2021-04-03 12:03:42 -04:00
|
|
|
Children dataUsageHashMap
|
2020-03-18 19:19:29 -04:00
|
|
|
// These fields do no include any children.
|
2021-04-03 12:03:42 -04:00
|
|
|
Size int64
|
|
|
|
Objects uint64
|
2021-05-11 21:36:15 -04:00
|
|
|
Versions uint64 // Versions that are not delete markers.
|
2021-04-03 12:03:42 -04:00
|
|
|
ObjSizes sizeHistogram
|
2021-05-11 21:36:15 -04:00
|
|
|
ReplicationStats *replicationStats
|
|
|
|
Compacted bool
|
2021-04-03 12:03:42 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
//msgp:tuple replicationStats
|
|
|
|
type replicationStats struct {
|
|
|
|
PendingSize uint64
|
|
|
|
ReplicatedSize uint64
|
|
|
|
FailedSize uint64
|
|
|
|
ReplicaSize uint64
|
|
|
|
FailedCount uint64
|
|
|
|
PendingCount uint64
|
|
|
|
MissedThresholdSize uint64
|
|
|
|
AfterThresholdSize uint64
|
|
|
|
MissedThresholdCount uint64
|
|
|
|
AfterThresholdCount uint64
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
//msgp:encode ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4
|
|
|
|
//msgp:marshal ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4
|
|
|
|
|
2021-01-13 12:58:08 -05:00
|
|
|
//msgp:tuple dataUsageEntryV2
|
|
|
|
type dataUsageEntryV2 struct {
|
|
|
|
// These fields do no include any children.
|
|
|
|
Size int64
|
|
|
|
Objects uint64
|
|
|
|
ObjSizes sizeHistogram
|
|
|
|
Children dataUsageHashMap
|
|
|
|
}
|
|
|
|
|
2021-04-03 12:03:42 -04:00
|
|
|
//msgp:tuple dataUsageEntryV3
|
|
|
|
type dataUsageEntryV3 struct {
|
|
|
|
// These fields do no include any children.
|
|
|
|
Size int64
|
|
|
|
ReplicatedSize uint64
|
|
|
|
ReplicationPendingSize uint64
|
|
|
|
ReplicationFailedSize uint64
|
|
|
|
ReplicaSize uint64
|
|
|
|
Objects uint64
|
|
|
|
ObjSizes sizeHistogram
|
|
|
|
Children dataUsageHashMap
|
|
|
|
}
|
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
//msgp:tuple dataUsageEntryV4
|
|
|
|
type dataUsageEntryV4 struct {
|
|
|
|
Children dataUsageHashMap
|
|
|
|
// These fields do no include any children.
|
|
|
|
Size int64
|
|
|
|
Objects uint64
|
|
|
|
ObjSizes sizeHistogram
|
|
|
|
ReplicationStats replicationStats
|
|
|
|
}
|
|
|
|
|
|
|
|
// dataUsageCache contains a cache of data usage entries latest version.
|
2020-06-12 13:28:21 -04:00
|
|
|
type dataUsageCache struct {
|
|
|
|
Info dataUsageCacheInfo
|
|
|
|
Cache map[string]dataUsageEntry
|
2021-04-03 12:03:42 -04:00
|
|
|
Disks []string
|
2020-06-12 13:28:21 -04:00
|
|
|
}
|
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
//msgp:encode ignore dataUsageCacheV2 dataUsageCacheV3 dataUsageCacheV4
|
|
|
|
//msgp:marshal ignore dataUsageCacheV2 dataUsageCacheV3 dataUsageCacheV4
|
|
|
|
|
2021-04-03 12:03:42 -04:00
|
|
|
// dataUsageCacheV2 contains a cache of data usage entries version 2.
|
2021-01-13 12:58:08 -05:00
|
|
|
type dataUsageCacheV2 struct {
|
|
|
|
Info dataUsageCacheInfo
|
|
|
|
Disks []string
|
|
|
|
Cache map[string]dataUsageEntryV2
|
|
|
|
}
|
|
|
|
|
2021-04-03 12:03:42 -04:00
|
|
|
// dataUsageCache contains a cache of data usage entries version 3.
|
|
|
|
type dataUsageCacheV3 struct {
|
|
|
|
Info dataUsageCacheInfo
|
|
|
|
Disks []string
|
|
|
|
Cache map[string]dataUsageEntryV3
|
|
|
|
}
|
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
// dataUsageCache contains a cache of data usage entries version 4.
|
|
|
|
type dataUsageCacheV4 struct {
|
|
|
|
Info dataUsageCacheInfo
|
|
|
|
Disks []string
|
|
|
|
Cache map[string]dataUsageEntryV4
|
|
|
|
}
|
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
//msgp:ignore dataUsageEntryInfo
|
|
|
|
type dataUsageEntryInfo struct {
|
|
|
|
Name string
|
|
|
|
Parent string
|
|
|
|
Entry dataUsageEntry
|
|
|
|
}
|
|
|
|
|
|
|
|
type dataUsageCacheInfo struct {
|
|
|
|
// Name of the bucket. Also root element.
|
2021-01-19 05:40:52 -05:00
|
|
|
Name string
|
|
|
|
NextCycle uint32
|
2021-04-03 12:03:42 -04:00
|
|
|
LastUpdate time.Time
|
2021-02-17 15:04:11 -05:00
|
|
|
// indicates if the disk is being healed and scanner
|
2021-01-19 05:40:52 -05:00
|
|
|
// should skip healing the disk
|
|
|
|
SkipHealing bool
|
2021-05-19 17:38:30 -04:00
|
|
|
BloomFilter []byte `msg:"BloomFilter,omitempty"`
|
|
|
|
|
|
|
|
// Active lifecycle, if any on the bucket
|
|
|
|
lifeCycle *lifecycle.Lifecycle `msg:"-"`
|
|
|
|
|
|
|
|
// optional updates channel.
|
|
|
|
// If set updates will be sent regularly to this channel.
|
|
|
|
// Will not be closed when returned.
|
|
|
|
updates chan<- dataUsageEntry `msg:"-"`
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
|
2020-12-07 16:47:48 -05:00
|
|
|
func (e *dataUsageEntry) addSizes(summary sizeSummary) {
|
|
|
|
e.Size += summary.totalSize
|
2021-05-11 21:36:15 -04:00
|
|
|
e.Versions += summary.versions
|
|
|
|
e.ObjSizes.add(summary.totalSize)
|
2021-04-03 12:03:42 -04:00
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
if summary.replicaSize > 0 || summary.pendingSize > 0 || summary.replicatedSize > 0 ||
|
|
|
|
summary.failedCount > 0 || summary.pendingCount > 0 || summary.failedSize > 0 {
|
|
|
|
if e.ReplicationStats == nil {
|
|
|
|
e.ReplicationStats = &replicationStats{}
|
|
|
|
}
|
|
|
|
e.ReplicationStats.ReplicatedSize += uint64(summary.replicatedSize)
|
|
|
|
e.ReplicationStats.FailedSize += uint64(summary.failedSize)
|
|
|
|
e.ReplicationStats.PendingSize += uint64(summary.pendingSize)
|
|
|
|
e.ReplicationStats.ReplicaSize += uint64(summary.replicaSize)
|
|
|
|
e.ReplicationStats.PendingCount += summary.pendingCount
|
|
|
|
e.ReplicationStats.FailedCount += summary.failedCount
|
|
|
|
}
|
2020-12-07 16:47:48 -05:00
|
|
|
}
|
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
// merge other data usage entry into this, excluding children.
|
|
|
|
func (e *dataUsageEntry) merge(other dataUsageEntry) {
|
|
|
|
e.Objects += other.Objects
|
2021-05-11 21:36:15 -04:00
|
|
|
e.Versions += other.Versions
|
2020-03-18 19:19:29 -04:00
|
|
|
e.Size += other.Size
|
2021-05-11 21:36:15 -04:00
|
|
|
ors := other.ReplicationStats
|
|
|
|
empty := replicationStats{}
|
|
|
|
if ors != nil && *ors != empty {
|
|
|
|
if e.ReplicationStats == nil {
|
|
|
|
e.ReplicationStats = &replicationStats{}
|
|
|
|
}
|
|
|
|
e.ReplicationStats.PendingSize += other.ReplicationStats.PendingSize
|
|
|
|
e.ReplicationStats.FailedSize += other.ReplicationStats.FailedSize
|
|
|
|
e.ReplicationStats.ReplicatedSize += other.ReplicationStats.ReplicatedSize
|
|
|
|
e.ReplicationStats.ReplicaSize += other.ReplicationStats.ReplicaSize
|
|
|
|
e.ReplicationStats.PendingCount += other.ReplicationStats.PendingCount
|
|
|
|
e.ReplicationStats.FailedCount += other.ReplicationStats.FailedCount
|
|
|
|
|
|
|
|
}
|
2020-12-07 16:47:48 -05:00
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
for i, v := range other.ObjSizes[:] {
|
|
|
|
e.ObjSizes[i] += v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// mod returns true if the hash mod cycles == cycle.
|
2020-08-24 16:47:01 -04:00
|
|
|
// If cycles is 0 false is always returned.
|
|
|
|
// If cycles is 1 true is always returned (as expected).
|
2020-04-27 13:06:21 -04:00
|
|
|
func (h dataUsageHash) mod(cycle uint32, cycles uint32) bool {
|
2020-08-24 16:47:01 -04:00
|
|
|
if cycles <= 1 {
|
|
|
|
return cycles == 1
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
return uint32(xxhash.Sum64String(string(h)))%cycles == cycle%cycles
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// addChild will add a child based on its hash.
|
|
|
|
// If it already exists it will not be added again.
|
|
|
|
func (e *dataUsageEntry) addChild(hash dataUsageHash) {
|
2020-06-12 13:28:21 -04:00
|
|
|
if _, ok := e.Children[hash.Key()]; ok {
|
2020-03-18 19:19:29 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if e.Children == nil {
|
|
|
|
e.Children = make(dataUsageHashMap, 1)
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
e.Children[hash.Key()] = struct{}{}
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// find a path in the cache.
|
|
|
|
// Returns nil if not found.
|
|
|
|
func (d *dataUsageCache) find(path string) *dataUsageEntry {
|
2020-06-12 13:28:21 -04:00
|
|
|
due, ok := d.Cache[hashPath(path).Key()]
|
2020-03-18 19:19:29 -04:00
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return &due
|
|
|
|
}
|
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
// isCompacted returns whether an entry is compacted.
|
|
|
|
// Returns false if not found.
|
|
|
|
func (d *dataUsageCache) isCompacted(h dataUsageHash) bool {
|
|
|
|
due, ok := d.Cache[h.Key()]
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return due.Compacted
|
|
|
|
}
|
|
|
|
|
2020-08-24 16:47:01 -04:00
|
|
|
// findChildrenCopy returns a copy of the children of the supplied hash.
|
|
|
|
func (d *dataUsageCache) findChildrenCopy(h dataUsageHash) dataUsageHashMap {
|
|
|
|
ch := d.Cache[h.String()].Children
|
|
|
|
res := make(dataUsageHashMap, len(ch))
|
|
|
|
for k := range ch {
|
|
|
|
res[k] = struct{}{}
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2021-05-19 17:38:30 -04:00
|
|
|
// searchParent will search for the parent of h.
|
|
|
|
// This is an O(N*N) operation if there is no parent or it cannot be guessed.
|
|
|
|
func (d *dataUsageCache) searchParent(h dataUsageHash) *dataUsageHash {
|
|
|
|
want := h.Key()
|
|
|
|
if idx := strings.LastIndexByte(want, '/'); idx >= 0 {
|
|
|
|
if v := d.find(want[:idx]); v != nil {
|
|
|
|
for child := range v.Children {
|
|
|
|
if child == want {
|
|
|
|
found := hashPath(want[:idx])
|
|
|
|
return &found
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for k, v := range d.Cache {
|
|
|
|
for child := range v.Children {
|
|
|
|
if child == want {
|
|
|
|
found := dataUsageHash(k)
|
|
|
|
return &found
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
// deleteRecursive will delete an entry recursively, but not change its parent.
|
2020-06-12 13:28:21 -04:00
|
|
|
func (d *dataUsageCache) deleteRecursive(h dataUsageHash) {
|
|
|
|
if existing, ok := d.Cache[h.String()]; ok {
|
|
|
|
// Delete first if there should be a loop.
|
|
|
|
delete(d.Cache, h.Key())
|
|
|
|
for child := range existing.Children {
|
|
|
|
d.deleteRecursive(dataUsageHash(child))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// keepBuckets will keep only the buckets specified specified by delete all others.
|
|
|
|
func (d *dataUsageCache) keepBuckets(b []BucketInfo) {
|
|
|
|
lu := make(map[dataUsageHash]struct{})
|
|
|
|
for _, v := range b {
|
|
|
|
lu[hashPath(v.Name)] = struct{}{}
|
|
|
|
}
|
|
|
|
d.keepRootChildren(lu)
|
|
|
|
}
|
|
|
|
|
|
|
|
// keepRootChildren will keep the root children specified by delete all others.
|
|
|
|
func (d *dataUsageCache) keepRootChildren(list map[dataUsageHash]struct{}) {
|
|
|
|
if d.root() == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
rh := d.rootHash()
|
|
|
|
for k := range d.Cache {
|
|
|
|
h := dataUsageHash(k)
|
|
|
|
if h == rh {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if _, ok := list[h]; !ok {
|
|
|
|
delete(d.Cache, k)
|
|
|
|
d.deleteRecursive(h)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-03 12:03:42 -04:00
|
|
|
// dui converts the flattened version of the path to madmin.DataUsageInfo.
|
2020-04-27 13:06:21 -04:00
|
|
|
// As a side effect d will be flattened, use a clone if this is not ok.
|
2021-04-03 12:03:42 -04:00
|
|
|
func (d *dataUsageCache) dui(path string, buckets []BucketInfo) madmin.DataUsageInfo {
|
2020-03-18 19:19:29 -04:00
|
|
|
e := d.find(path)
|
|
|
|
if e == nil {
|
2020-05-27 09:45:43 -04:00
|
|
|
// No entry found, return empty.
|
2021-04-03 12:03:42 -04:00
|
|
|
return madmin.DataUsageInfo{}
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
flat := d.flatten(*e)
|
2021-05-11 21:36:15 -04:00
|
|
|
dui := madmin.DataUsageInfo{
|
|
|
|
LastUpdate: d.Info.LastUpdate,
|
|
|
|
ObjectsTotalCount: flat.Objects,
|
|
|
|
ObjectsTotalSize: uint64(flat.Size),
|
|
|
|
BucketsCount: uint64(len(e.Children)),
|
|
|
|
BucketsUsage: d.bucketsUsageInfo(buckets),
|
|
|
|
}
|
|
|
|
if flat.ReplicationStats != nil {
|
|
|
|
dui.ReplicationPendingSize = flat.ReplicationStats.PendingSize
|
|
|
|
dui.ReplicatedSize = flat.ReplicationStats.ReplicatedSize
|
|
|
|
dui.ReplicationFailedSize = flat.ReplicationStats.FailedSize
|
|
|
|
dui.ReplicationPendingCount = flat.ReplicationStats.PendingCount
|
|
|
|
dui.ReplicationFailedCount = flat.ReplicationStats.FailedCount
|
|
|
|
dui.ReplicaSize = flat.ReplicationStats.ReplicaSize
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
2021-05-11 21:36:15 -04:00
|
|
|
return dui
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// replace will add or replace an entry in the cache.
|
|
|
|
// If a parent is specified it will be added to that if not already there.
|
|
|
|
// If the parent does not exist, it will be added.
|
|
|
|
func (d *dataUsageCache) replace(path, parent string, e dataUsageEntry) {
|
|
|
|
hash := hashPath(path)
|
|
|
|
if d.Cache == nil {
|
2020-06-12 13:28:21 -04:00
|
|
|
d.Cache = make(map[string]dataUsageEntry, 100)
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
d.Cache[hash.Key()] = e
|
2020-03-18 19:19:29 -04:00
|
|
|
if parent != "" {
|
|
|
|
phash := hashPath(parent)
|
2020-06-12 13:28:21 -04:00
|
|
|
p := d.Cache[phash.Key()]
|
2020-03-18 19:19:29 -04:00
|
|
|
p.addChild(hash)
|
2020-06-12 13:28:21 -04:00
|
|
|
d.Cache[phash.Key()] = p
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// replaceHashed add or replaces an entry to the cache based on its hash.
|
|
|
|
// If a parent is specified it will be added to that if not already there.
|
|
|
|
// If the parent does not exist, it will be added.
|
|
|
|
func (d *dataUsageCache) replaceHashed(hash dataUsageHash, parent *dataUsageHash, e dataUsageEntry) {
|
|
|
|
if d.Cache == nil {
|
2020-06-12 13:28:21 -04:00
|
|
|
d.Cache = make(map[string]dataUsageEntry, 100)
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
d.Cache[hash.Key()] = e
|
2020-03-18 19:19:29 -04:00
|
|
|
if parent != nil {
|
2020-06-12 13:28:21 -04:00
|
|
|
p := d.Cache[parent.Key()]
|
2020-03-18 19:19:29 -04:00
|
|
|
p.addChild(hash)
|
2020-06-12 13:28:21 -04:00
|
|
|
d.Cache[parent.Key()] = p
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-27 13:06:21 -04:00
|
|
|
// copyWithChildren will copy entry with hash from src if it exists along with any children.
|
|
|
|
// If a parent is specified it will be added to that if not already there.
|
|
|
|
// If the parent does not exist, it will be added.
|
|
|
|
func (d *dataUsageCache) copyWithChildren(src *dataUsageCache, hash dataUsageHash, parent *dataUsageHash) {
|
|
|
|
if d.Cache == nil {
|
2020-06-12 13:28:21 -04:00
|
|
|
d.Cache = make(map[string]dataUsageEntry, 100)
|
2020-04-27 13:06:21 -04:00
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
e, ok := src.Cache[hash.String()]
|
2020-04-27 13:06:21 -04:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
d.Cache[hash.Key()] = e
|
2020-04-27 13:06:21 -04:00
|
|
|
for ch := range e.Children {
|
2020-06-12 13:28:21 -04:00
|
|
|
if ch == hash.Key() {
|
2020-04-27 13:06:21 -04:00
|
|
|
logger.LogIf(GlobalContext, errors.New("dataUsageCache.copyWithChildren: Circular reference"))
|
|
|
|
return
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
d.copyWithChildren(src, dataUsageHash(ch), &hash)
|
2020-04-27 13:06:21 -04:00
|
|
|
}
|
|
|
|
if parent != nil {
|
2020-06-12 13:28:21 -04:00
|
|
|
p := d.Cache[parent.Key()]
|
2020-04-27 13:06:21 -04:00
|
|
|
p.addChild(hash)
|
2020-06-12 13:28:21 -04:00
|
|
|
d.Cache[parent.Key()] = p
|
2020-04-27 13:06:21 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
// reduceChildrenOf will reduce the recursive number of children to the limit
|
|
|
|
// by compacting the children with the least number of objects.
|
|
|
|
func (d *dataUsageCache) reduceChildrenOf(path dataUsageHash, limit int, compactSelf bool) {
|
|
|
|
e, ok := d.Cache[path.Key()]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if e.Compacted {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// If direct children have more, compact all.
|
|
|
|
if len(e.Children) > limit && compactSelf {
|
|
|
|
flat := d.sizeRecursive(path.Key())
|
|
|
|
flat.Compacted = true
|
|
|
|
d.deleteRecursive(path)
|
|
|
|
d.replaceHashed(path, nil, *flat)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
total := d.totalChildrenRec(path.Key())
|
|
|
|
if total < limit {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Appears to be printed with _MINIO_SERVER_DEBUG=off
|
|
|
|
// console.Debugf(" %d children found, compacting %v\n", total, path)
|
|
|
|
|
|
|
|
var leaves = make([]struct {
|
|
|
|
objects uint64
|
|
|
|
path dataUsageHash
|
|
|
|
}, total)
|
|
|
|
// Collect current leaves that have children.
|
|
|
|
leaves = leaves[:0]
|
|
|
|
remove := total - limit
|
|
|
|
var add func(path dataUsageHash)
|
|
|
|
add = func(path dataUsageHash) {
|
|
|
|
e, ok := d.Cache[path.Key()]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if len(e.Children) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
sz := d.sizeRecursive(path.Key())
|
|
|
|
leaves = append(leaves, struct {
|
|
|
|
objects uint64
|
|
|
|
path dataUsageHash
|
|
|
|
}{objects: sz.Objects, path: path})
|
|
|
|
for ch := range e.Children {
|
|
|
|
add(dataUsageHash(ch))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add path recursively.
|
|
|
|
add(path)
|
|
|
|
sort.Slice(leaves, func(i, j int) bool {
|
|
|
|
return leaves[i].objects < leaves[j].objects
|
|
|
|
})
|
|
|
|
for remove > 0 && len(leaves) > 0 {
|
|
|
|
// Remove top entry.
|
|
|
|
e := leaves[0]
|
|
|
|
candidate := e.path
|
|
|
|
if candidate == path && !compactSelf {
|
|
|
|
// We should be the biggest,
|
|
|
|
// if we cannot compact ourself, we are done.
|
|
|
|
break
|
|
|
|
}
|
|
|
|
removing := d.totalChildrenRec(candidate.Key())
|
|
|
|
flat := d.sizeRecursive(candidate.Key())
|
|
|
|
if flat == nil {
|
|
|
|
leaves = leaves[1:]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Appears to be printed with _MINIO_SERVER_DEBUG=off
|
|
|
|
// console.Debugf("compacting %v, removing %d children\n", candidate, removing)
|
|
|
|
|
|
|
|
flat.Compacted = true
|
|
|
|
d.deleteRecursive(candidate)
|
|
|
|
d.replaceHashed(candidate, nil, *flat)
|
|
|
|
|
|
|
|
// Remove top entry and subtract removed children.
|
|
|
|
remove -= removing
|
|
|
|
leaves = leaves[1:]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
// StringAll returns a detailed string representation of all entries in the cache.
|
|
|
|
func (d *dataUsageCache) StringAll() string {
|
|
|
|
s := fmt.Sprintf("info:%+v\n", d.Info)
|
|
|
|
for k, v := range d.Cache {
|
|
|
|
s += fmt.Sprintf("\t%v: %+v\n", k, v)
|
|
|
|
}
|
|
|
|
return strings.TrimSpace(s)
|
|
|
|
}
|
|
|
|
|
2020-06-12 13:28:21 -04:00
|
|
|
// String returns a human readable representation of the string.
|
|
|
|
func (h dataUsageHash) String() string {
|
|
|
|
return string(h)
|
2020-04-27 13:06:21 -04:00
|
|
|
}
|
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
// Key returns the key.
|
2020-06-12 13:28:21 -04:00
|
|
|
func (h dataUsageHash) Key() string {
|
|
|
|
return string(h)
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// flatten all children of the root into the root element and return it.
|
|
|
|
func (d *dataUsageCache) flatten(root dataUsageEntry) dataUsageEntry {
|
|
|
|
for id := range root.Children {
|
|
|
|
e := d.Cache[id]
|
|
|
|
if len(e.Children) > 0 {
|
|
|
|
e = d.flatten(e)
|
|
|
|
}
|
|
|
|
root.merge(e)
|
|
|
|
}
|
|
|
|
root.Children = nil
|
|
|
|
return root
|
|
|
|
}
|
|
|
|
|
|
|
|
// add a size to the histogram.
|
|
|
|
func (h *sizeHistogram) add(size int64) {
|
|
|
|
// Fetch the histogram interval corresponding
|
|
|
|
// to the passed object size.
|
|
|
|
for i, interval := range ObjectsHistogramIntervals {
|
|
|
|
if size >= interval.start && size <= interval.end {
|
|
|
|
h[i]++
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-27 09:45:43 -04:00
|
|
|
// toMap returns the map to a map[string]uint64.
|
|
|
|
func (h *sizeHistogram) toMap() map[string]uint64 {
|
|
|
|
res := make(map[string]uint64, dataUsageBucketLen)
|
2020-03-18 19:19:29 -04:00
|
|
|
for i, count := range h {
|
|
|
|
res[ObjectsHistogramIntervals[i].name] = count
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2020-05-27 09:45:43 -04:00
|
|
|
// bucketsUsageInfo returns the buckets usage info as a map, with
|
|
|
|
// key as bucket name
|
2021-04-03 12:03:42 -04:00
|
|
|
func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]madmin.BucketUsageInfo {
|
|
|
|
var dst = make(map[string]madmin.BucketUsageInfo, len(buckets))
|
2020-03-18 19:19:29 -04:00
|
|
|
for _, bucket := range buckets {
|
|
|
|
e := d.find(bucket.Name)
|
|
|
|
if e == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
flat := d.flatten(*e)
|
2021-05-11 21:36:15 -04:00
|
|
|
bui := madmin.BucketUsageInfo{
|
|
|
|
Size: uint64(flat.Size),
|
|
|
|
ObjectsCount: flat.Objects,
|
|
|
|
ObjectSizesHistogram: flat.ObjSizes.toMap(),
|
2020-05-27 09:45:43 -04:00
|
|
|
}
|
2021-05-11 21:36:15 -04:00
|
|
|
if flat.ReplicationStats != nil {
|
|
|
|
bui.ReplicationPendingSize = flat.ReplicationStats.PendingSize
|
|
|
|
bui.ReplicatedSize = flat.ReplicationStats.ReplicatedSize
|
|
|
|
bui.ReplicationFailedSize = flat.ReplicationStats.FailedSize
|
|
|
|
bui.ReplicationPendingCount = flat.ReplicationStats.PendingCount
|
|
|
|
bui.ReplicationFailedCount = flat.ReplicationStats.FailedCount
|
|
|
|
bui.ReplicaSize = flat.ReplicationStats.ReplicaSize
|
|
|
|
}
|
|
|
|
dst[bucket.Name] = bui
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
return dst
|
|
|
|
}
|
|
|
|
|
2020-07-14 21:59:05 -04:00
|
|
|
// bucketUsageInfo returns the buckets usage info.
|
|
|
|
// If not found all values returned are zero values.
|
2021-04-03 12:03:42 -04:00
|
|
|
func (d *dataUsageCache) bucketUsageInfo(bucket string) madmin.BucketUsageInfo {
|
2020-07-14 21:59:05 -04:00
|
|
|
e := d.find(bucket)
|
|
|
|
if e == nil {
|
2021-04-03 12:03:42 -04:00
|
|
|
return madmin.BucketUsageInfo{}
|
2020-07-14 21:59:05 -04:00
|
|
|
}
|
|
|
|
flat := d.flatten(*e)
|
2021-05-11 21:36:15 -04:00
|
|
|
bui := madmin.BucketUsageInfo{
|
|
|
|
Size: uint64(flat.Size),
|
|
|
|
ObjectsCount: flat.Objects,
|
|
|
|
ObjectSizesHistogram: flat.ObjSizes.toMap(),
|
|
|
|
}
|
|
|
|
if flat.ReplicationStats != nil {
|
|
|
|
bui.ReplicationPendingSize = flat.ReplicationStats.PendingSize
|
|
|
|
bui.ReplicatedSize = flat.ReplicationStats.ReplicatedSize
|
|
|
|
bui.ReplicationFailedSize = flat.ReplicationStats.FailedSize
|
|
|
|
bui.ReplicationPendingCount = flat.ReplicationStats.PendingCount
|
|
|
|
bui.ReplicationFailedCount = flat.ReplicationStats.FailedCount
|
|
|
|
bui.ReplicaSize = flat.ReplicationStats.ReplicaSize
|
2020-07-14 21:59:05 -04:00
|
|
|
}
|
2021-05-11 21:36:15 -04:00
|
|
|
return bui
|
2020-07-14 21:59:05 -04:00
|
|
|
}
|
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
// sizeRecursive returns the path as a flattened entry.
|
|
|
|
func (d *dataUsageCache) sizeRecursive(path string) *dataUsageEntry {
|
|
|
|
root := d.find(path)
|
|
|
|
if root == nil || len(root.Children) == 0 {
|
|
|
|
return root
|
|
|
|
}
|
|
|
|
flat := d.flatten(*root)
|
|
|
|
return &flat
|
|
|
|
}
|
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
// totalChildrenRec returns the total number of children recorded.
|
|
|
|
func (d *dataUsageCache) totalChildrenRec(path string) int {
|
|
|
|
root := d.find(path)
|
|
|
|
if root == nil || len(root.Children) == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
n := len(root.Children)
|
|
|
|
for ch := range root.Children {
|
|
|
|
n += d.totalChildrenRec(ch)
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
// root returns the root of the cache.
|
|
|
|
func (d *dataUsageCache) root() *dataUsageEntry {
|
|
|
|
return d.find(d.Info.Name)
|
|
|
|
}
|
|
|
|
|
|
|
|
// rootHash returns the root of the cache.
|
|
|
|
func (d *dataUsageCache) rootHash() dataUsageHash {
|
|
|
|
return hashPath(d.Info.Name)
|
|
|
|
}
|
|
|
|
|
|
|
|
// clone returns a copy of the cache with no references to the existing.
|
|
|
|
func (d *dataUsageCache) clone() dataUsageCache {
|
|
|
|
clone := dataUsageCache{
|
|
|
|
Info: d.Info,
|
2020-06-12 13:28:21 -04:00
|
|
|
Cache: make(map[string]dataUsageEntry, len(d.Cache)),
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
for k, v := range d.Cache {
|
|
|
|
clone.Cache[k] = v
|
|
|
|
}
|
|
|
|
return clone
|
|
|
|
}
|
|
|
|
|
|
|
|
// merge root of other into d.
|
|
|
|
// children of root will be flattened before being merged.
|
|
|
|
// Last update time will be set to the last updated.
|
|
|
|
func (d *dataUsageCache) merge(other dataUsageCache) {
|
|
|
|
existingRoot := d.root()
|
|
|
|
otherRoot := other.root()
|
|
|
|
if existingRoot == nil && otherRoot == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if otherRoot == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if existingRoot == nil {
|
|
|
|
*d = other.clone()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if other.Info.LastUpdate.After(d.Info.LastUpdate) {
|
|
|
|
d.Info.LastUpdate = other.Info.LastUpdate
|
|
|
|
}
|
|
|
|
existingRoot.merge(*otherRoot)
|
|
|
|
eHash := d.rootHash()
|
|
|
|
for key := range otherRoot.Children {
|
|
|
|
entry := other.Cache[key]
|
|
|
|
flat := other.flatten(entry)
|
|
|
|
existing := d.Cache[key]
|
|
|
|
// If not found, merging simply adds.
|
|
|
|
existing.merge(flat)
|
2020-06-12 13:28:21 -04:00
|
|
|
d.replaceHashed(dataUsageHash(key), &eHash, existing)
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-10 12:18:19 -04:00
|
|
|
type objectIO interface {
|
2020-12-31 12:45:09 -05:00
|
|
|
GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (reader *GetObjectReader, err error)
|
2020-09-10 12:18:19 -04:00
|
|
|
PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
|
|
|
}
|
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
// load the cache content with name from minioMetaBackgroundOpsBucket.
|
|
|
|
// Only backend errors are returned as errors.
|
|
|
|
// If the object is not found or unable to deserialize d is cleared and nil error is returned.
|
2020-09-10 12:18:19 -04:00
|
|
|
func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string) error {
|
2021-05-11 21:36:15 -04:00
|
|
|
// Abandon if more than 5 minutes, so we don't hold up scanner.
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
|
|
|
|
defer cancel()
|
2021-04-21 11:39:00 -04:00
|
|
|
r, err := store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, readLock, ObjectOptions{})
|
2020-03-18 19:19:29 -04:00
|
|
|
if err != nil {
|
2020-11-12 12:28:56 -05:00
|
|
|
switch err.(type) {
|
|
|
|
case ObjectNotFound:
|
|
|
|
case BucketNotFound:
|
|
|
|
case InsufficientReadQuorum:
|
|
|
|
default:
|
2020-03-18 19:19:29 -04:00
|
|
|
return toObjectErr(err, dataUsageBucket, name)
|
|
|
|
}
|
|
|
|
*d = dataUsageCache{}
|
|
|
|
return nil
|
|
|
|
}
|
2021-01-08 13:12:26 -05:00
|
|
|
defer r.Close()
|
2020-12-31 12:45:09 -05:00
|
|
|
if err := d.deserialize(r); err != nil {
|
2020-03-18 19:19:29 -04:00
|
|
|
*d = dataUsageCache{}
|
2020-12-31 12:45:09 -05:00
|
|
|
logger.LogOnceIf(ctx, err, err.Error())
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// save the content of the cache to minioMetaBackgroundOpsBucket with the provided name.
|
2020-09-10 12:18:19 -04:00
|
|
|
func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string) error {
|
2020-12-10 16:03:22 -05:00
|
|
|
pr, pw := io.Pipe()
|
|
|
|
go func() {
|
|
|
|
pw.CloseWithError(d.serializeTo(pw))
|
|
|
|
}()
|
|
|
|
defer pr.Close()
|
2020-12-31 12:45:09 -05:00
|
|
|
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
r, err := hash.NewReader(pr, -1, "", "", -1)
|
2020-03-18 19:19:29 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
// Abandon if more than 5 minutes, so we don't hold up scanner.
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
|
|
|
|
defer cancel()
|
2020-03-18 19:19:29 -04:00
|
|
|
_, err = store.PutObject(ctx,
|
|
|
|
dataUsageBucket,
|
|
|
|
name,
|
2021-02-10 11:52:50 -05:00
|
|
|
NewPutObjReader(r),
|
2021-04-05 19:07:53 -04:00
|
|
|
ObjectOptions{})
|
2020-04-28 04:16:57 -04:00
|
|
|
if isErrBucketNotFound(err) {
|
|
|
|
return nil
|
|
|
|
}
|
2020-03-18 19:19:29 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// dataUsageCacheVer indicates the cache version.
|
|
|
|
// Bumping the cache version will drop data from previous versions
|
|
|
|
// and write new data with the new version.
|
2021-01-13 12:58:08 -05:00
|
|
|
const (
|
2021-05-11 21:36:15 -04:00
|
|
|
dataUsageCacheVerCurrent = 5
|
|
|
|
dataUsageCacheVerV4 = 4
|
|
|
|
dataUsageCacheVerV3 = 3
|
|
|
|
dataUsageCacheVerV2 = 2
|
|
|
|
dataUsageCacheVerV1 = 1
|
2021-01-13 12:58:08 -05:00
|
|
|
)
|
2020-03-18 19:19:29 -04:00
|
|
|
|
|
|
|
// serialize the contents of the cache.
|
2020-12-10 16:03:22 -05:00
|
|
|
func (d *dataUsageCache) serializeTo(dst io.Writer) error {
|
|
|
|
// Add version and compress.
|
2021-05-11 21:36:15 -04:00
|
|
|
_, err := dst.Write([]byte{dataUsageCacheVerCurrent})
|
2020-12-10 16:03:22 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
enc, err := zstd.NewWriter(dst,
|
2020-06-12 13:28:21 -04:00
|
|
|
zstd.WithEncoderLevel(zstd.SpeedFastest),
|
|
|
|
zstd.WithWindowSize(1<<20),
|
|
|
|
zstd.WithEncoderConcurrency(2))
|
2020-03-18 19:19:29 -04:00
|
|
|
if err != nil {
|
2020-12-10 16:03:22 -05:00
|
|
|
return err
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
mEnc := msgp.NewWriter(enc)
|
|
|
|
err = d.EncodeMsg(mEnc)
|
|
|
|
if err != nil {
|
2020-12-10 16:03:22 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = mEnc.Flush()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
err = enc.Close()
|
|
|
|
if err != nil {
|
2020-12-10 16:03:22 -05:00
|
|
|
return err
|
2020-06-12 13:28:21 -04:00
|
|
|
}
|
2020-12-10 16:03:22 -05:00
|
|
|
return nil
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// deserialize the supplied byte slice into the cache.
|
2020-06-12 13:28:21 -04:00
|
|
|
func (d *dataUsageCache) deserialize(r io.Reader) error {
|
|
|
|
var b [1]byte
|
|
|
|
n, _ := r.Read(b[:])
|
|
|
|
if n != 1 {
|
2020-03-18 19:19:29 -04:00
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2021-05-12 11:07:02 -04:00
|
|
|
ver := int(b[0])
|
2021-05-11 21:36:15 -04:00
|
|
|
switch ver {
|
2021-01-13 12:58:08 -05:00
|
|
|
case dataUsageCacheVerV1:
|
2020-06-12 13:28:21 -04:00
|
|
|
return errors.New("cache version deprecated (will autoupdate)")
|
2021-01-13 12:58:08 -05:00
|
|
|
case dataUsageCacheVerV2:
|
|
|
|
// Zstd compressed.
|
|
|
|
dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer dec.Close()
|
2020-03-18 19:19:29 -04:00
|
|
|
|
2021-01-13 12:58:08 -05:00
|
|
|
dold := &dataUsageCacheV2{}
|
|
|
|
if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
d.Info = dold.Info
|
|
|
|
d.Disks = dold.Disks
|
|
|
|
d.Cache = make(map[string]dataUsageEntry, len(dold.Cache))
|
|
|
|
for k, v := range dold.Cache {
|
|
|
|
d.Cache[k] = dataUsageEntry{
|
2021-05-11 21:36:15 -04:00
|
|
|
Size: v.Size,
|
|
|
|
Objects: v.Objects,
|
|
|
|
ObjSizes: v.ObjSizes,
|
|
|
|
Children: v.Children,
|
|
|
|
Compacted: len(v.Children) == 0 && k != d.Info.Name,
|
2021-01-13 12:58:08 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
case dataUsageCacheVerV3:
|
|
|
|
// Zstd compressed.
|
|
|
|
dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer dec.Close()
|
2021-04-03 12:03:42 -04:00
|
|
|
dold := &dataUsageCacheV3{}
|
|
|
|
if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
d.Info = dold.Info
|
|
|
|
d.Disks = dold.Disks
|
|
|
|
d.Cache = make(map[string]dataUsageEntry, len(dold.Cache))
|
|
|
|
for k, v := range dold.Cache {
|
2021-05-11 21:36:15 -04:00
|
|
|
due := dataUsageEntry{
|
2021-04-03 12:03:42 -04:00
|
|
|
Size: v.Size,
|
|
|
|
Objects: v.Objects,
|
|
|
|
ObjSizes: v.ObjSizes,
|
|
|
|
Children: v.Children,
|
2021-05-11 21:36:15 -04:00
|
|
|
}
|
|
|
|
if v.ReplicatedSize > 0 || v.ReplicaSize > 0 || v.ReplicationFailedSize > 0 || v.ReplicationPendingSize > 0 {
|
|
|
|
due.ReplicationStats = &replicationStats{
|
2021-04-03 12:03:42 -04:00
|
|
|
ReplicatedSize: v.ReplicatedSize,
|
|
|
|
ReplicaSize: v.ReplicaSize,
|
|
|
|
FailedSize: v.ReplicationFailedSize,
|
|
|
|
PendingSize: v.ReplicationPendingSize,
|
2021-05-11 21:36:15 -04:00
|
|
|
}
|
2021-04-03 12:03:42 -04:00
|
|
|
}
|
2021-05-11 21:36:15 -04:00
|
|
|
due.Compacted = len(due.Children) == 0 && k != d.Info.Name
|
|
|
|
|
|
|
|
d.Cache[k] = due
|
2021-04-03 12:03:42 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
case dataUsageCacheVerV4:
|
|
|
|
// Zstd compressed.
|
|
|
|
dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer dec.Close()
|
2021-05-11 21:36:15 -04:00
|
|
|
dold := &dataUsageCacheV4{}
|
|
|
|
if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
d.Info = dold.Info
|
|
|
|
d.Disks = dold.Disks
|
|
|
|
d.Cache = make(map[string]dataUsageEntry, len(dold.Cache))
|
|
|
|
for k, v := range dold.Cache {
|
|
|
|
due := dataUsageEntry{
|
|
|
|
Size: v.Size,
|
|
|
|
Objects: v.Objects,
|
|
|
|
ObjSizes: v.ObjSizes,
|
|
|
|
Children: v.Children,
|
|
|
|
}
|
|
|
|
empty := replicationStats{}
|
|
|
|
if v.ReplicationStats != empty {
|
|
|
|
due.ReplicationStats = &v.ReplicationStats
|
|
|
|
}
|
|
|
|
due.Compacted = len(due.Children) == 0 && k != d.Info.Name
|
|
|
|
|
|
|
|
d.Cache[k] = due
|
|
|
|
}
|
2020-03-18 19:19:29 -04:00
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
// Populate compacted value and remove unneeded replica stats.
|
|
|
|
empty := replicationStats{}
|
|
|
|
for k, e := range d.Cache {
|
|
|
|
if e.ReplicationStats != nil && *e.ReplicationStats == empty {
|
|
|
|
e.ReplicationStats = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
d.Cache[k] = e
|
|
|
|
}
|
2021-05-12 11:07:02 -04:00
|
|
|
return nil
|
2021-05-11 21:36:15 -04:00
|
|
|
case dataUsageCacheVerCurrent:
|
|
|
|
// Zstd compressed.
|
|
|
|
dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer dec.Close()
|
2021-01-13 12:58:08 -05:00
|
|
|
return d.DecodeMsg(msgp.NewReader(dec))
|
2021-05-12 11:07:02 -04:00
|
|
|
default:
|
|
|
|
return fmt.Errorf("dataUsageCache: unknown version: %d", ver)
|
2021-01-13 12:58:08 -05:00
|
|
|
}
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Trim this from start+end of hashes.
|
|
|
|
var hashPathCutSet = dataUsageRoot
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
if dataUsageRoot != string(filepath.Separator) {
|
|
|
|
hashPathCutSet = dataUsageRoot + string(filepath.Separator)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// hashPath calculates a hash of the provided string.
|
|
|
|
func hashPath(data string) dataUsageHash {
|
|
|
|
if data != dataUsageRoot {
|
|
|
|
data = strings.Trim(data, hashPathCutSet)
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
return dataUsageHash(path.Clean(data))
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 13:28:21 -04:00
|
|
|
//msgp:ignore dataUsageHashMap
|
|
|
|
type dataUsageHashMap map[string]struct{}
|
2020-03-18 19:19:29 -04:00
|
|
|
|
2020-06-12 13:28:21 -04:00
|
|
|
// DecodeMsg implements msgp.Decodable
|
|
|
|
func (z *dataUsageHashMap) DecodeMsg(dc *msgp.Reader) (err error) {
|
|
|
|
var zb0002 uint32
|
|
|
|
zb0002, err = dc.ReadArrayHeader()
|
2020-03-18 19:19:29 -04:00
|
|
|
if err != nil {
|
2020-06-12 13:28:21 -04:00
|
|
|
err = msgp.WrapError(err)
|
2020-03-18 19:19:29 -04:00
|
|
|
return
|
|
|
|
}
|
2021-05-11 21:36:15 -04:00
|
|
|
if zb0002 == 0 {
|
|
|
|
*z = nil
|
|
|
|
return
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
*z = make(dataUsageHashMap, zb0002)
|
|
|
|
for i := uint32(0); i < zb0002; i++ {
|
|
|
|
{
|
|
|
|
var zb0003 string
|
|
|
|
zb0003, err = dc.ReadString()
|
|
|
|
if err != nil {
|
|
|
|
err = msgp.WrapError(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
(*z)[zb0003] = struct{}{}
|
|
|
|
}
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-06-12 13:28:21 -04:00
|
|
|
// EncodeMsg implements msgp.Encodable
|
|
|
|
func (z dataUsageHashMap) EncodeMsg(en *msgp.Writer) (err error) {
|
|
|
|
err = en.WriteArrayHeader(uint32(len(z)))
|
2020-03-18 19:19:29 -04:00
|
|
|
if err != nil {
|
|
|
|
err = msgp.WrapError(err)
|
|
|
|
return
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
for zb0004 := range z {
|
|
|
|
err = en.WriteString(zb0004)
|
2020-03-18 19:19:29 -04:00
|
|
|
if err != nil {
|
2020-06-12 13:28:21 -04:00
|
|
|
err = msgp.WrapError(err, zb0004)
|
2020-03-18 19:19:29 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// MarshalMsg implements msgp.Marshaler
|
|
|
|
func (z dataUsageHashMap) MarshalMsg(b []byte) (o []byte, err error) {
|
|
|
|
o = msgp.Require(b, z.Msgsize())
|
|
|
|
o = msgp.AppendArrayHeader(o, uint32(len(z)))
|
|
|
|
for zb0004 := range z {
|
|
|
|
o = msgp.AppendString(o, zb0004)
|
|
|
|
}
|
|
|
|
return
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
|
|
|
|
// UnmarshalMsg implements msgp.Unmarshaler
|
|
|
|
func (z *dataUsageHashMap) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|
|
|
var zb0002 uint32
|
|
|
|
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
2020-03-18 19:19:29 -04:00
|
|
|
if err != nil {
|
|
|
|
err = msgp.WrapError(err)
|
|
|
|
return
|
|
|
|
}
|
2021-05-11 21:36:15 -04:00
|
|
|
if zb0002 == 0 {
|
|
|
|
*z = nil
|
|
|
|
return bts, nil
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
*z = make(dataUsageHashMap, zb0002)
|
|
|
|
for i := uint32(0); i < zb0002; i++ {
|
|
|
|
{
|
|
|
|
var zb0003 string
|
|
|
|
zb0003, bts, err = msgp.ReadStringBytes(bts)
|
|
|
|
if err != nil {
|
|
|
|
err = msgp.WrapError(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
(*z)[zb0003] = struct{}{}
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
o = bts
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
|
|
|
func (z dataUsageHashMap) Msgsize() (s int) {
|
|
|
|
s = msgp.ArrayHeaderSize
|
|
|
|
for zb0004 := range z {
|
|
|
|
s += msgp.StringPrefixSize + len(zb0004)
|
|
|
|
}
|
|
|
|
return
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|