2021-04-18 15:41:13 -04:00
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2020-03-18 19:19:29 -04:00
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2020-04-27 13:06:21 -04:00
|
|
|
"errors"
|
2020-03-18 19:19:29 -04:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2020-12-31 12:45:09 -05:00
|
|
|
"net/http"
|
2020-03-18 19:19:29 -04:00
|
|
|
"path"
|
|
|
|
"path/filepath"
|
2021-05-11 21:36:15 -04:00
|
|
|
"sort"
|
2020-03-18 19:19:29 -04:00
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/cespare/xxhash/v2"
|
2020-06-12 13:28:21 -04:00
|
|
|
"github.com/klauspost/compress/zstd"
|
2021-10-23 21:38:33 -04:00
|
|
|
"github.com/minio/madmin-go"
|
2021-06-01 17:59:40 -04:00
|
|
|
"github.com/minio/minio/internal/bucket/lifecycle"
|
|
|
|
"github.com/minio/minio/internal/hash"
|
|
|
|
"github.com/minio/minio/internal/logger"
|
2020-03-18 19:19:29 -04:00
|
|
|
"github.com/tinylib/msgp/msgp"
|
|
|
|
)
|
|
|
|
|
|
|
|
//go:generate msgp -file $GOFILE -unexported
|
|
|
|
|
|
|
|
// dataUsageHash is the hash type used.
|
2020-06-12 13:28:21 -04:00
|
|
|
type dataUsageHash string
|
2020-03-18 19:19:29 -04:00
|
|
|
|
|
|
|
// sizeHistogram is a size histogram.
|
|
|
|
type sizeHistogram [dataUsageBucketLen]uint64
|
|
|
|
|
|
|
|
type dataUsageEntry struct {
|
2021-10-23 21:38:33 -04:00
|
|
|
Children dataUsageHashMap `msg:"ch"`
|
2020-03-18 19:19:29 -04:00
|
|
|
// These fields do no include any children.
|
2021-10-23 21:38:33 -04:00
|
|
|
Size int64 `msg:"sz"`
|
|
|
|
Objects uint64 `msg:"os"`
|
|
|
|
Versions uint64 `msg:"vs"` // Versions that are not delete markers.
|
|
|
|
ObjSizes sizeHistogram `msg:"szs"`
|
|
|
|
ReplicationStats *replicationAllStats `msg:"rs,omitempty"`
|
|
|
|
AllTierStats *allTierStats `msg:"ats,omitempty"`
|
|
|
|
Compacted bool `msg:"c"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// allTierStats is a collection of per-tier stats across all configured remote
|
|
|
|
// tiers.
|
|
|
|
type allTierStats struct {
|
|
|
|
Tiers map[string]tierStats `msg:"ts"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func newAllTierStats() *allTierStats {
|
|
|
|
return &allTierStats{
|
|
|
|
Tiers: make(map[string]tierStats),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ats *allTierStats) addSizes(sz sizeSummary) {
|
|
|
|
for tier, st := range sz.tiers {
|
|
|
|
ats.Tiers[tier] = ats.Tiers[tier].add(st)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ats *allTierStats) merge(other *allTierStats) {
|
|
|
|
for tier, st := range other.Tiers {
|
|
|
|
ats.Tiers[tier] = ats.Tiers[tier].add(st)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ats *allTierStats) adminStats(stats map[string]madmin.TierStats) map[string]madmin.TierStats {
|
|
|
|
if ats == nil {
|
|
|
|
return stats
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update stats for tiers as they become available.
|
|
|
|
for tier, st := range ats.Tiers {
|
|
|
|
stats[tier] = madmin.TierStats{
|
|
|
|
TotalSize: st.TotalSize,
|
|
|
|
NumVersions: st.NumVersions,
|
|
|
|
NumObjects: st.NumObjects,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return stats
|
|
|
|
}
|
|
|
|
|
|
|
|
// tierStats holds per-tier stats of a remote tier.
|
|
|
|
type tierStats struct {
|
|
|
|
TotalSize uint64 `msg:"ts"`
|
|
|
|
NumVersions int `msg:"nv"`
|
|
|
|
NumObjects int `msg:"no"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ts tierStats) add(u tierStats) tierStats {
|
|
|
|
ts.TotalSize += u.TotalSize
|
|
|
|
ts.NumVersions += u.NumVersions
|
|
|
|
ts.NumObjects += u.NumObjects
|
|
|
|
return ts
|
2021-04-03 12:03:42 -04:00
|
|
|
}
|
|
|
|
|
2021-09-21 12:01:51 -04:00
|
|
|
//msgp:tuple replicationStatsV1
|
|
|
|
type replicationStatsV1 struct {
|
|
|
|
PendingSize uint64
|
|
|
|
ReplicatedSize uint64
|
|
|
|
FailedSize uint64
|
|
|
|
ReplicaSize uint64
|
|
|
|
FailedCount uint64
|
|
|
|
PendingCount uint64
|
|
|
|
MissedThresholdSize uint64
|
|
|
|
AfterThresholdSize uint64
|
|
|
|
MissedThresholdCount uint64
|
|
|
|
AfterThresholdCount uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rsv1 replicationStatsV1) Empty() bool {
|
|
|
|
return rsv1.ReplicatedSize == 0 &&
|
|
|
|
rsv1.FailedSize == 0 &&
|
|
|
|
rsv1.FailedCount == 0
|
|
|
|
}
|
|
|
|
|
2021-04-03 12:03:42 -04:00
|
|
|
//msgp:tuple replicationStats
|
|
|
|
type replicationStats struct {
|
|
|
|
PendingSize uint64
|
|
|
|
ReplicatedSize uint64
|
|
|
|
FailedSize uint64
|
|
|
|
FailedCount uint64
|
|
|
|
PendingCount uint64
|
|
|
|
MissedThresholdSize uint64
|
|
|
|
AfterThresholdSize uint64
|
|
|
|
MissedThresholdCount uint64
|
|
|
|
AfterThresholdCount uint64
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
|
2021-09-18 16:31:35 -04:00
|
|
|
func (rs replicationStats) Empty() bool {
|
|
|
|
return rs.ReplicatedSize == 0 &&
|
|
|
|
rs.FailedSize == 0 &&
|
|
|
|
rs.FailedCount == 0
|
|
|
|
}
|
|
|
|
|
|
|
|
type replicationAllStats struct {
|
2021-10-23 21:38:33 -04:00
|
|
|
Targets map[string]replicationStats `msg:"t,omitempty"`
|
|
|
|
ReplicaSize uint64 `msg:"r,omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
//msgp:tuple replicationAllStatsV1
|
|
|
|
type replicationAllStatsV1 struct {
|
2021-09-18 16:31:35 -04:00
|
|
|
Targets map[string]replicationStats
|
2021-09-21 12:01:51 -04:00
|
|
|
ReplicaSize uint64 `msg:"ReplicaSize,omitempty"`
|
2021-09-18 16:31:35 -04:00
|
|
|
}
|
|
|
|
|
2021-10-23 21:38:33 -04:00
|
|
|
//msgp:encode ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4 dataUsageEntryV5 dataUsageEntryV6
|
|
|
|
//msgp:marshal ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4 dataUsageEntryV5 dataUsageEntryV6
|
2021-05-11 21:36:15 -04:00
|
|
|
|
2021-01-13 12:58:08 -05:00
|
|
|
//msgp:tuple dataUsageEntryV2
|
|
|
|
type dataUsageEntryV2 struct {
|
|
|
|
// These fields do no include any children.
|
|
|
|
Size int64
|
|
|
|
Objects uint64
|
|
|
|
ObjSizes sizeHistogram
|
|
|
|
Children dataUsageHashMap
|
|
|
|
}
|
|
|
|
|
2021-04-03 12:03:42 -04:00
|
|
|
//msgp:tuple dataUsageEntryV3
|
|
|
|
type dataUsageEntryV3 struct {
|
|
|
|
// These fields do no include any children.
|
|
|
|
Size int64
|
|
|
|
ReplicatedSize uint64
|
|
|
|
ReplicationPendingSize uint64
|
|
|
|
ReplicationFailedSize uint64
|
|
|
|
ReplicaSize uint64
|
|
|
|
Objects uint64
|
|
|
|
ObjSizes sizeHistogram
|
|
|
|
Children dataUsageHashMap
|
|
|
|
}
|
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
//msgp:tuple dataUsageEntryV4
|
|
|
|
type dataUsageEntryV4 struct {
|
|
|
|
Children dataUsageHashMap
|
|
|
|
// These fields do no include any children.
|
|
|
|
Size int64
|
|
|
|
Objects uint64
|
|
|
|
ObjSizes sizeHistogram
|
2021-09-21 12:01:51 -04:00
|
|
|
ReplicationStats replicationStatsV1
|
2021-05-11 21:36:15 -04:00
|
|
|
}
|
|
|
|
|
2021-09-18 16:31:35 -04:00
|
|
|
//msgp:tuple dataUsageEntryV5
|
|
|
|
type dataUsageEntryV5 struct {
|
|
|
|
Children dataUsageHashMap
|
|
|
|
// These fields do no include any children.
|
|
|
|
Size int64
|
|
|
|
Objects uint64
|
|
|
|
Versions uint64 // Versions that are not delete markers.
|
|
|
|
ObjSizes sizeHistogram
|
2021-09-21 12:01:51 -04:00
|
|
|
ReplicationStats *replicationStatsV1
|
2021-09-18 16:31:35 -04:00
|
|
|
Compacted bool
|
|
|
|
}
|
|
|
|
|
2021-10-23 21:38:33 -04:00
|
|
|
//msgp:tuple dataUsageEntryV6
|
|
|
|
type dataUsageEntryV6 struct {
|
|
|
|
Children dataUsageHashMap
|
|
|
|
// These fields do no include any children.
|
|
|
|
Size int64
|
|
|
|
Objects uint64
|
|
|
|
Versions uint64 // Versions that are not delete markers.
|
|
|
|
ObjSizes sizeHistogram
|
|
|
|
ReplicationStats *replicationAllStatsV1
|
|
|
|
Compacted bool
|
|
|
|
}
|
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
// dataUsageCache contains a cache of data usage entries latest version.
|
2020-06-12 13:28:21 -04:00
|
|
|
type dataUsageCache struct {
|
|
|
|
Info dataUsageCacheInfo
|
|
|
|
Cache map[string]dataUsageEntry
|
|
|
|
}
|
|
|
|
|
2021-10-23 21:38:33 -04:00
|
|
|
//msgp:encode ignore dataUsageCacheV2 dataUsageCacheV3 dataUsageCacheV4 dataUsageCacheV5 dataUsageCacheV6
|
|
|
|
//msgp:marshal ignore dataUsageCacheV2 dataUsageCacheV3 dataUsageCacheV4 dataUsageCacheV5 dataUsageCacheV6
|
2021-05-11 21:36:15 -04:00
|
|
|
|
2021-04-03 12:03:42 -04:00
|
|
|
// dataUsageCacheV2 contains a cache of data usage entries version 2.
|
2021-01-13 12:58:08 -05:00
|
|
|
type dataUsageCacheV2 struct {
|
|
|
|
Info dataUsageCacheInfo
|
|
|
|
Cache map[string]dataUsageEntryV2
|
|
|
|
}
|
|
|
|
|
2021-10-23 21:38:33 -04:00
|
|
|
// dataUsageCacheV3 contains a cache of data usage entries version 3.
|
2021-04-03 12:03:42 -04:00
|
|
|
type dataUsageCacheV3 struct {
|
|
|
|
Info dataUsageCacheInfo
|
|
|
|
Cache map[string]dataUsageEntryV3
|
|
|
|
}
|
|
|
|
|
2021-10-23 21:38:33 -04:00
|
|
|
// dataUsageCacheV4 contains a cache of data usage entries version 4.
|
2021-05-11 21:36:15 -04:00
|
|
|
type dataUsageCacheV4 struct {
|
|
|
|
Info dataUsageCacheInfo
|
|
|
|
Cache map[string]dataUsageEntryV4
|
|
|
|
}
|
|
|
|
|
2021-10-23 21:38:33 -04:00
|
|
|
// dataUsageCacheV5 contains a cache of data usage entries version 5.
|
2021-09-18 16:31:35 -04:00
|
|
|
type dataUsageCacheV5 struct {
|
|
|
|
Info dataUsageCacheInfo
|
|
|
|
Cache map[string]dataUsageEntryV5
|
|
|
|
}
|
|
|
|
|
2021-10-23 21:38:33 -04:00
|
|
|
// dataUsageCacheV6 contains a cache of data usage entries version 6.
|
|
|
|
type dataUsageCacheV6 struct {
|
|
|
|
Info dataUsageCacheInfo
|
|
|
|
Cache map[string]dataUsageEntryV6
|
|
|
|
}
|
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
//msgp:ignore dataUsageEntryInfo
|
|
|
|
type dataUsageEntryInfo struct {
|
|
|
|
Name string
|
|
|
|
Parent string
|
|
|
|
Entry dataUsageEntry
|
|
|
|
}
|
|
|
|
|
|
|
|
type dataUsageCacheInfo struct {
|
|
|
|
// Name of the bucket. Also root element.
|
2021-01-19 05:40:52 -05:00
|
|
|
Name string
|
|
|
|
NextCycle uint32
|
2021-04-03 12:03:42 -04:00
|
|
|
LastUpdate time.Time
|
2021-02-17 15:04:11 -05:00
|
|
|
// indicates if the disk is being healed and scanner
|
2021-01-19 05:40:52 -05:00
|
|
|
// should skip healing the disk
|
|
|
|
SkipHealing bool
|
2021-05-19 17:38:30 -04:00
|
|
|
BloomFilter []byte `msg:"BloomFilter,omitempty"`
|
|
|
|
|
|
|
|
// Active lifecycle, if any on the bucket
|
|
|
|
lifeCycle *lifecycle.Lifecycle `msg:"-"`
|
|
|
|
|
|
|
|
// optional updates channel.
|
|
|
|
// If set updates will be sent regularly to this channel.
|
|
|
|
// Will not be closed when returned.
|
2021-06-01 22:59:11 -04:00
|
|
|
updates chan<- dataUsageEntry `msg:"-"`
|
|
|
|
replication replicationConfig `msg:"-"`
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
|
2020-12-07 16:47:48 -05:00
|
|
|
func (e *dataUsageEntry) addSizes(summary sizeSummary) {
|
|
|
|
e.Size += summary.totalSize
|
2021-05-11 21:36:15 -04:00
|
|
|
e.Versions += summary.versions
|
|
|
|
e.ObjSizes.add(summary.totalSize)
|
2021-04-03 12:03:42 -04:00
|
|
|
|
2021-10-22 22:13:50 -04:00
|
|
|
if e.ReplicationStats == nil {
|
|
|
|
e.ReplicationStats = &replicationAllStats{
|
|
|
|
Targets: make(map[string]replicationStats),
|
2021-09-18 16:31:35 -04:00
|
|
|
}
|
2021-10-22 22:13:50 -04:00
|
|
|
} else if e.ReplicationStats.Targets == nil {
|
|
|
|
e.ReplicationStats.Targets = make(map[string]replicationStats)
|
|
|
|
}
|
|
|
|
e.ReplicationStats.ReplicaSize += uint64(summary.replicaSize)
|
|
|
|
|
|
|
|
if summary.replTargetStats != nil {
|
2021-09-18 16:31:35 -04:00
|
|
|
for arn, st := range summary.replTargetStats {
|
|
|
|
tgtStat, ok := e.ReplicationStats.Targets[arn]
|
|
|
|
if !ok {
|
|
|
|
tgtStat = replicationStats{}
|
|
|
|
}
|
2021-10-22 22:13:50 -04:00
|
|
|
tgtStat.PendingSize += uint64(st.pendingSize)
|
|
|
|
tgtStat.FailedSize += uint64(st.failedSize)
|
|
|
|
tgtStat.ReplicatedSize += uint64(st.replicatedSize)
|
|
|
|
tgtStat.FailedCount += st.failedCount
|
|
|
|
tgtStat.PendingCount += st.pendingCount
|
2021-09-18 16:31:35 -04:00
|
|
|
e.ReplicationStats.Targets[arn] = tgtStat
|
2021-05-11 21:36:15 -04:00
|
|
|
}
|
|
|
|
}
|
2021-10-23 21:38:33 -04:00
|
|
|
if summary.tiers != nil {
|
|
|
|
if e.AllTierStats == nil {
|
|
|
|
e.AllTierStats = newAllTierStats()
|
|
|
|
}
|
|
|
|
e.AllTierStats.addSizes(summary)
|
|
|
|
}
|
2020-12-07 16:47:48 -05:00
|
|
|
}
|
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
// merge other data usage entry into this, excluding children.
|
|
|
|
func (e *dataUsageEntry) merge(other dataUsageEntry) {
|
|
|
|
e.Objects += other.Objects
|
2021-05-11 21:36:15 -04:00
|
|
|
e.Versions += other.Versions
|
2020-03-18 19:19:29 -04:00
|
|
|
e.Size += other.Size
|
2021-10-22 22:13:50 -04:00
|
|
|
if other.ReplicationStats != nil {
|
2021-05-11 21:36:15 -04:00
|
|
|
if e.ReplicationStats == nil {
|
2021-09-18 16:31:35 -04:00
|
|
|
e.ReplicationStats = &replicationAllStats{Targets: make(map[string]replicationStats)}
|
2021-10-22 22:13:50 -04:00
|
|
|
} else if e.ReplicationStats.Targets == nil {
|
|
|
|
e.ReplicationStats.Targets = make(map[string]replicationStats)
|
2021-09-18 16:31:35 -04:00
|
|
|
}
|
2021-10-22 22:13:50 -04:00
|
|
|
e.ReplicationStats.ReplicaSize += other.ReplicationStats.ReplicaSize
|
|
|
|
for arn, stat := range other.ReplicationStats.Targets {
|
|
|
|
st := e.ReplicationStats.Targets[arn]
|
|
|
|
e.ReplicationStats.Targets[arn] = replicationStats{
|
|
|
|
PendingSize: stat.PendingSize + st.PendingSize,
|
|
|
|
FailedSize: stat.FailedSize + st.FailedSize,
|
|
|
|
ReplicatedSize: stat.ReplicatedSize + st.ReplicatedSize,
|
|
|
|
PendingCount: stat.PendingCount + st.PendingCount,
|
|
|
|
FailedCount: stat.FailedCount + st.FailedCount,
|
2021-09-18 16:31:35 -04:00
|
|
|
}
|
2021-05-11 21:36:15 -04:00
|
|
|
}
|
|
|
|
}
|
2020-12-07 16:47:48 -05:00
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
for i, v := range other.ObjSizes[:] {
|
|
|
|
e.ObjSizes[i] += v
|
|
|
|
}
|
2021-10-23 21:38:33 -04:00
|
|
|
|
|
|
|
if other.AllTierStats != nil {
|
|
|
|
if e.AllTierStats == nil {
|
|
|
|
e.AllTierStats = newAllTierStats()
|
|
|
|
}
|
|
|
|
e.AllTierStats.merge(other.AllTierStats)
|
|
|
|
}
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// mod returns true if the hash mod cycles == cycle.
|
2020-08-24 16:47:01 -04:00
|
|
|
// If cycles is 0 false is always returned.
|
|
|
|
// If cycles is 1 true is always returned (as expected).
|
2020-04-27 13:06:21 -04:00
|
|
|
func (h dataUsageHash) mod(cycle uint32, cycles uint32) bool {
|
2020-08-24 16:47:01 -04:00
|
|
|
if cycles <= 1 {
|
|
|
|
return cycles == 1
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
return uint32(xxhash.Sum64String(string(h)))%cycles == cycle%cycles
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
|
2022-03-07 12:25:53 -05:00
|
|
|
// modAlt returns true if the hash mod cycles == cycle.
|
|
|
|
// This is out of sync with mod.
|
|
|
|
// If cycles is 0 false is always returned.
|
|
|
|
// If cycles is 1 true is always returned (as expected).
|
|
|
|
func (h dataUsageHash) modAlt(cycle uint32, cycles uint32) bool {
|
|
|
|
if cycles <= 1 {
|
|
|
|
return cycles == 1
|
|
|
|
}
|
|
|
|
return uint32(xxhash.Sum64String(string(h))>>32)%(cycles) == cycle%cycles
|
|
|
|
}
|
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
// addChild will add a child based on its hash.
|
|
|
|
// If it already exists it will not be added again.
|
|
|
|
func (e *dataUsageEntry) addChild(hash dataUsageHash) {
|
2020-06-12 13:28:21 -04:00
|
|
|
if _, ok := e.Children[hash.Key()]; ok {
|
2020-03-18 19:19:29 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if e.Children == nil {
|
|
|
|
e.Children = make(dataUsageHashMap, 1)
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
e.Children[hash.Key()] = struct{}{}
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
|
2021-06-16 17:22:55 -04:00
|
|
|
// removeChild will remove a child based on its hash.
|
|
|
|
func (e *dataUsageEntry) removeChild(hash dataUsageHash) {
|
|
|
|
if len(e.Children) > 0 {
|
|
|
|
delete(e.Children, hash.Key())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-24 10:11:38 -04:00
|
|
|
// Create a clone of the entry.
|
|
|
|
func (e dataUsageEntry) clone() dataUsageEntry {
|
|
|
|
// We operate on a copy from the receiver.
|
|
|
|
if e.Children != nil {
|
|
|
|
ch := make(dataUsageHashMap, len(e.Children))
|
|
|
|
for k, v := range e.Children {
|
|
|
|
ch[k] = v
|
|
|
|
}
|
|
|
|
e.Children = ch
|
|
|
|
}
|
|
|
|
if e.ReplicationStats != nil {
|
|
|
|
// Copy to new struct
|
|
|
|
r := *e.ReplicationStats
|
|
|
|
e.ReplicationStats = &r
|
|
|
|
}
|
2021-10-23 21:38:33 -04:00
|
|
|
if e.AllTierStats != nil {
|
|
|
|
ats := newAllTierStats()
|
|
|
|
ats.merge(e.AllTierStats)
|
|
|
|
e.AllTierStats = ats
|
|
|
|
}
|
2021-08-24 10:11:38 -04:00
|
|
|
return e
|
|
|
|
}
|
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
// find a path in the cache.
|
|
|
|
// Returns nil if not found.
|
|
|
|
func (d *dataUsageCache) find(path string) *dataUsageEntry {
|
2020-06-12 13:28:21 -04:00
|
|
|
due, ok := d.Cache[hashPath(path).Key()]
|
2020-03-18 19:19:29 -04:00
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return &due
|
|
|
|
}
|
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
// isCompacted returns whether an entry is compacted.
|
|
|
|
// Returns false if not found.
|
|
|
|
func (d *dataUsageCache) isCompacted(h dataUsageHash) bool {
|
|
|
|
due, ok := d.Cache[h.Key()]
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return due.Compacted
|
|
|
|
}
|
|
|
|
|
2020-08-24 16:47:01 -04:00
|
|
|
// findChildrenCopy returns a copy of the children of the supplied hash.
|
|
|
|
func (d *dataUsageCache) findChildrenCopy(h dataUsageHash) dataUsageHashMap {
|
|
|
|
ch := d.Cache[h.String()].Children
|
|
|
|
res := make(dataUsageHashMap, len(ch))
|
|
|
|
for k := range ch {
|
|
|
|
res[k] = struct{}{}
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2021-05-19 17:38:30 -04:00
|
|
|
// searchParent will search for the parent of h.
|
|
|
|
// This is an O(N*N) operation if there is no parent or it cannot be guessed.
|
|
|
|
func (d *dataUsageCache) searchParent(h dataUsageHash) *dataUsageHash {
|
|
|
|
want := h.Key()
|
|
|
|
if idx := strings.LastIndexByte(want, '/'); idx >= 0 {
|
|
|
|
if v := d.find(want[:idx]); v != nil {
|
|
|
|
for child := range v.Children {
|
|
|
|
if child == want {
|
|
|
|
found := hashPath(want[:idx])
|
|
|
|
return &found
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for k, v := range d.Cache {
|
|
|
|
for child := range v.Children {
|
|
|
|
if child == want {
|
|
|
|
found := dataUsageHash(k)
|
|
|
|
return &found
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
// deleteRecursive will delete an entry recursively, but not change its parent.
|
2020-06-12 13:28:21 -04:00
|
|
|
func (d *dataUsageCache) deleteRecursive(h dataUsageHash) {
|
|
|
|
if existing, ok := d.Cache[h.String()]; ok {
|
|
|
|
// Delete first if there should be a loop.
|
|
|
|
delete(d.Cache, h.Key())
|
|
|
|
for child := range existing.Children {
|
|
|
|
d.deleteRecursive(dataUsageHash(child))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// keepBuckets will keep only the buckets specified specified by delete all others.
|
|
|
|
func (d *dataUsageCache) keepBuckets(b []BucketInfo) {
|
|
|
|
lu := make(map[dataUsageHash]struct{})
|
|
|
|
for _, v := range b {
|
|
|
|
lu[hashPath(v.Name)] = struct{}{}
|
|
|
|
}
|
|
|
|
d.keepRootChildren(lu)
|
|
|
|
}
|
|
|
|
|
|
|
|
// keepRootChildren will keep the root children specified by delete all others.
|
|
|
|
func (d *dataUsageCache) keepRootChildren(list map[dataUsageHash]struct{}) {
|
2021-06-16 17:22:55 -04:00
|
|
|
root := d.root()
|
|
|
|
if root == nil {
|
2020-06-12 13:28:21 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
rh := d.rootHash()
|
|
|
|
for k := range d.Cache {
|
|
|
|
h := dataUsageHash(k)
|
|
|
|
if h == rh {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if _, ok := list[h]; !ok {
|
|
|
|
delete(d.Cache, k)
|
|
|
|
d.deleteRecursive(h)
|
2021-06-16 17:22:55 -04:00
|
|
|
root.removeChild(h)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Clean up abandoned children.
|
|
|
|
for k := range root.Children {
|
|
|
|
h := dataUsageHash(k)
|
|
|
|
if _, ok := list[h]; !ok {
|
|
|
|
delete(root.Children, k)
|
2020-06-12 13:28:21 -04:00
|
|
|
}
|
|
|
|
}
|
2021-06-16 17:22:55 -04:00
|
|
|
d.Cache[rh.Key()] = *root
|
2020-06-12 13:28:21 -04:00
|
|
|
}
|
|
|
|
|
2021-04-03 12:03:42 -04:00
|
|
|
// dui converts the flattened version of the path to madmin.DataUsageInfo.
|
2020-04-27 13:06:21 -04:00
|
|
|
// As a side effect d will be flattened, use a clone if this is not ok.
|
2021-09-18 16:31:35 -04:00
|
|
|
func (d *dataUsageCache) dui(path string, buckets []BucketInfo) DataUsageInfo {
|
2020-03-18 19:19:29 -04:00
|
|
|
e := d.find(path)
|
|
|
|
if e == nil {
|
2020-05-27 09:45:43 -04:00
|
|
|
// No entry found, return empty.
|
2021-09-18 16:31:35 -04:00
|
|
|
return DataUsageInfo{}
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
flat := d.flatten(*e)
|
2021-09-18 16:31:35 -04:00
|
|
|
dui := DataUsageInfo{
|
2021-05-11 21:36:15 -04:00
|
|
|
LastUpdate: d.Info.LastUpdate,
|
|
|
|
ObjectsTotalCount: flat.Objects,
|
|
|
|
ObjectsTotalSize: uint64(flat.Size),
|
|
|
|
BucketsCount: uint64(len(e.Children)),
|
|
|
|
BucketsUsage: d.bucketsUsageInfo(buckets),
|
2021-10-23 21:38:33 -04:00
|
|
|
TierStats: d.tiersUsageInfo(buckets),
|
2021-05-11 21:36:15 -04:00
|
|
|
}
|
|
|
|
return dui
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// replace will add or replace an entry in the cache.
|
|
|
|
// If a parent is specified it will be added to that if not already there.
|
|
|
|
// If the parent does not exist, it will be added.
|
|
|
|
func (d *dataUsageCache) replace(path, parent string, e dataUsageEntry) {
|
|
|
|
hash := hashPath(path)
|
|
|
|
if d.Cache == nil {
|
2020-06-12 13:28:21 -04:00
|
|
|
d.Cache = make(map[string]dataUsageEntry, 100)
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
d.Cache[hash.Key()] = e
|
2020-03-18 19:19:29 -04:00
|
|
|
if parent != "" {
|
|
|
|
phash := hashPath(parent)
|
2020-06-12 13:28:21 -04:00
|
|
|
p := d.Cache[phash.Key()]
|
2020-03-18 19:19:29 -04:00
|
|
|
p.addChild(hash)
|
2020-06-12 13:28:21 -04:00
|
|
|
d.Cache[phash.Key()] = p
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// replaceHashed add or replaces an entry to the cache based on its hash.
|
|
|
|
// If a parent is specified it will be added to that if not already there.
|
|
|
|
// If the parent does not exist, it will be added.
|
|
|
|
func (d *dataUsageCache) replaceHashed(hash dataUsageHash, parent *dataUsageHash, e dataUsageEntry) {
|
|
|
|
if d.Cache == nil {
|
2020-06-12 13:28:21 -04:00
|
|
|
d.Cache = make(map[string]dataUsageEntry, 100)
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
d.Cache[hash.Key()] = e
|
2020-03-18 19:19:29 -04:00
|
|
|
if parent != nil {
|
2020-06-12 13:28:21 -04:00
|
|
|
p := d.Cache[parent.Key()]
|
2020-03-18 19:19:29 -04:00
|
|
|
p.addChild(hash)
|
2020-06-12 13:28:21 -04:00
|
|
|
d.Cache[parent.Key()] = p
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-27 13:06:21 -04:00
|
|
|
// copyWithChildren will copy entry with hash from src if it exists along with any children.
|
|
|
|
// If a parent is specified it will be added to that if not already there.
|
|
|
|
// If the parent does not exist, it will be added.
|
|
|
|
func (d *dataUsageCache) copyWithChildren(src *dataUsageCache, hash dataUsageHash, parent *dataUsageHash) {
|
|
|
|
if d.Cache == nil {
|
2020-06-12 13:28:21 -04:00
|
|
|
d.Cache = make(map[string]dataUsageEntry, 100)
|
2020-04-27 13:06:21 -04:00
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
e, ok := src.Cache[hash.String()]
|
2020-04-27 13:06:21 -04:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
d.Cache[hash.Key()] = e
|
2020-04-27 13:06:21 -04:00
|
|
|
for ch := range e.Children {
|
2020-06-12 13:28:21 -04:00
|
|
|
if ch == hash.Key() {
|
2020-04-27 13:06:21 -04:00
|
|
|
logger.LogIf(GlobalContext, errors.New("dataUsageCache.copyWithChildren: Circular reference"))
|
|
|
|
return
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
d.copyWithChildren(src, dataUsageHash(ch), &hash)
|
2020-04-27 13:06:21 -04:00
|
|
|
}
|
|
|
|
if parent != nil {
|
2020-06-12 13:28:21 -04:00
|
|
|
p := d.Cache[parent.Key()]
|
2020-04-27 13:06:21 -04:00
|
|
|
p.addChild(hash)
|
2020-06-12 13:28:21 -04:00
|
|
|
d.Cache[parent.Key()] = p
|
2020-04-27 13:06:21 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
// reduceChildrenOf will reduce the recursive number of children to the limit
|
|
|
|
// by compacting the children with the least number of objects.
|
|
|
|
func (d *dataUsageCache) reduceChildrenOf(path dataUsageHash, limit int, compactSelf bool) {
|
|
|
|
e, ok := d.Cache[path.Key()]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if e.Compacted {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// If direct children have more, compact all.
|
|
|
|
if len(e.Children) > limit && compactSelf {
|
|
|
|
flat := d.sizeRecursive(path.Key())
|
|
|
|
flat.Compacted = true
|
|
|
|
d.deleteRecursive(path)
|
|
|
|
d.replaceHashed(path, nil, *flat)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
total := d.totalChildrenRec(path.Key())
|
|
|
|
if total < limit {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Appears to be printed with _MINIO_SERVER_DEBUG=off
|
|
|
|
// console.Debugf(" %d children found, compacting %v\n", total, path)
|
|
|
|
|
2022-01-02 12:15:06 -05:00
|
|
|
leaves := make([]struct {
|
2021-05-11 21:36:15 -04:00
|
|
|
objects uint64
|
|
|
|
path dataUsageHash
|
|
|
|
}, total)
|
|
|
|
// Collect current leaves that have children.
|
|
|
|
leaves = leaves[:0]
|
|
|
|
remove := total - limit
|
|
|
|
var add func(path dataUsageHash)
|
|
|
|
add = func(path dataUsageHash) {
|
|
|
|
e, ok := d.Cache[path.Key()]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if len(e.Children) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
sz := d.sizeRecursive(path.Key())
|
|
|
|
leaves = append(leaves, struct {
|
|
|
|
objects uint64
|
|
|
|
path dataUsageHash
|
|
|
|
}{objects: sz.Objects, path: path})
|
|
|
|
for ch := range e.Children {
|
|
|
|
add(dataUsageHash(ch))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add path recursively.
|
|
|
|
add(path)
|
|
|
|
sort.Slice(leaves, func(i, j int) bool {
|
|
|
|
return leaves[i].objects < leaves[j].objects
|
|
|
|
})
|
|
|
|
for remove > 0 && len(leaves) > 0 {
|
|
|
|
// Remove top entry.
|
|
|
|
e := leaves[0]
|
|
|
|
candidate := e.path
|
|
|
|
if candidate == path && !compactSelf {
|
|
|
|
// We should be the biggest,
|
|
|
|
// if we cannot compact ourself, we are done.
|
|
|
|
break
|
|
|
|
}
|
|
|
|
removing := d.totalChildrenRec(candidate.Key())
|
|
|
|
flat := d.sizeRecursive(candidate.Key())
|
|
|
|
if flat == nil {
|
|
|
|
leaves = leaves[1:]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Appears to be printed with _MINIO_SERVER_DEBUG=off
|
|
|
|
// console.Debugf("compacting %v, removing %d children\n", candidate, removing)
|
|
|
|
|
|
|
|
flat.Compacted = true
|
|
|
|
d.deleteRecursive(candidate)
|
|
|
|
d.replaceHashed(candidate, nil, *flat)
|
|
|
|
|
|
|
|
// Remove top entry and subtract removed children.
|
|
|
|
remove -= removing
|
|
|
|
leaves = leaves[1:]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
// StringAll returns a detailed string representation of all entries in the cache.
|
|
|
|
func (d *dataUsageCache) StringAll() string {
|
2021-08-04 12:14:14 -04:00
|
|
|
// Remove bloom filter from print.
|
|
|
|
bf := d.Info.BloomFilter
|
|
|
|
d.Info.BloomFilter = nil
|
2020-03-18 19:19:29 -04:00
|
|
|
s := fmt.Sprintf("info:%+v\n", d.Info)
|
2021-08-04 12:14:14 -04:00
|
|
|
d.Info.BloomFilter = bf
|
2020-03-18 19:19:29 -04:00
|
|
|
for k, v := range d.Cache {
|
|
|
|
s += fmt.Sprintf("\t%v: %+v\n", k, v)
|
|
|
|
}
|
|
|
|
return strings.TrimSpace(s)
|
|
|
|
}
|
|
|
|
|
2020-06-12 13:28:21 -04:00
|
|
|
// String returns a human readable representation of the string.
|
|
|
|
func (h dataUsageHash) String() string {
|
|
|
|
return string(h)
|
2020-04-27 13:06:21 -04:00
|
|
|
}
|
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
// Key returns the key.
|
2020-06-12 13:28:21 -04:00
|
|
|
func (h dataUsageHash) Key() string {
|
|
|
|
return string(h)
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
|
2021-07-13 13:42:11 -04:00
|
|
|
func (d *dataUsageCache) flattenChildrens(root dataUsageEntry) (m map[string]dataUsageEntry) {
|
|
|
|
m = make(map[string]dataUsageEntry)
|
|
|
|
for id := range root.Children {
|
|
|
|
e := d.Cache[id]
|
|
|
|
if len(e.Children) > 0 {
|
|
|
|
e = d.flatten(e)
|
|
|
|
}
|
|
|
|
m[id] = e
|
|
|
|
}
|
|
|
|
return m
|
|
|
|
}
|
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
// flatten all children of the root into the root element and return it.
|
|
|
|
func (d *dataUsageCache) flatten(root dataUsageEntry) dataUsageEntry {
|
|
|
|
for id := range root.Children {
|
|
|
|
e := d.Cache[id]
|
|
|
|
if len(e.Children) > 0 {
|
|
|
|
e = d.flatten(e)
|
|
|
|
}
|
|
|
|
root.merge(e)
|
|
|
|
}
|
|
|
|
root.Children = nil
|
|
|
|
return root
|
|
|
|
}
|
|
|
|
|
|
|
|
// add a size to the histogram.
|
|
|
|
func (h *sizeHistogram) add(size int64) {
|
|
|
|
// Fetch the histogram interval corresponding
|
|
|
|
// to the passed object size.
|
|
|
|
for i, interval := range ObjectsHistogramIntervals {
|
|
|
|
if size >= interval.start && size <= interval.end {
|
|
|
|
h[i]++
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-27 09:45:43 -04:00
|
|
|
// toMap returns the map to a map[string]uint64.
|
|
|
|
func (h *sizeHistogram) toMap() map[string]uint64 {
|
|
|
|
res := make(map[string]uint64, dataUsageBucketLen)
|
2020-03-18 19:19:29 -04:00
|
|
|
for i, count := range h {
|
|
|
|
res[ObjectsHistogramIntervals[i].name] = count
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2021-10-23 21:38:33 -04:00
|
|
|
func (d *dataUsageCache) tiersUsageInfo(buckets []BucketInfo) *allTierStats {
|
|
|
|
dst := newAllTierStats()
|
|
|
|
for _, bucket := range buckets {
|
|
|
|
e := d.find(bucket.Name)
|
|
|
|
if e == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
flat := d.flatten(*e)
|
|
|
|
if flat.AllTierStats == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
dst.merge(flat.AllTierStats)
|
|
|
|
}
|
|
|
|
if len(dst.Tiers) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return dst
|
|
|
|
}
|
|
|
|
|
2020-05-27 09:45:43 -04:00
|
|
|
// bucketsUsageInfo returns the buckets usage info as a map, with
|
|
|
|
// key as bucket name
|
2021-09-18 16:31:35 -04:00
|
|
|
func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]BucketUsageInfo {
|
2022-01-02 12:15:06 -05:00
|
|
|
dst := make(map[string]BucketUsageInfo, len(buckets))
|
2020-03-18 19:19:29 -04:00
|
|
|
for _, bucket := range buckets {
|
|
|
|
e := d.find(bucket.Name)
|
|
|
|
if e == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
flat := d.flatten(*e)
|
2021-09-18 16:31:35 -04:00
|
|
|
bui := BucketUsageInfo{
|
2021-05-11 21:36:15 -04:00
|
|
|
Size: uint64(flat.Size),
|
|
|
|
ObjectsCount: flat.Objects,
|
|
|
|
ObjectSizesHistogram: flat.ObjSizes.toMap(),
|
2020-05-27 09:45:43 -04:00
|
|
|
}
|
2021-05-11 21:36:15 -04:00
|
|
|
if flat.ReplicationStats != nil {
|
|
|
|
bui.ReplicaSize = flat.ReplicationStats.ReplicaSize
|
2021-09-18 16:31:35 -04:00
|
|
|
bui.ReplicationInfo = make(map[string]BucketTargetUsageInfo, len(flat.ReplicationStats.Targets))
|
|
|
|
for arn, stat := range flat.ReplicationStats.Targets {
|
|
|
|
bui.ReplicationInfo[arn] = BucketTargetUsageInfo{
|
|
|
|
ReplicationPendingSize: stat.PendingSize,
|
|
|
|
ReplicatedSize: stat.ReplicatedSize,
|
|
|
|
ReplicationFailedSize: stat.FailedSize,
|
|
|
|
ReplicationPendingCount: stat.PendingCount,
|
|
|
|
ReplicationFailedCount: stat.FailedCount,
|
|
|
|
}
|
|
|
|
}
|
2021-05-11 21:36:15 -04:00
|
|
|
}
|
|
|
|
dst[bucket.Name] = bui
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
return dst
|
|
|
|
}
|
|
|
|
|
|
|
|
// sizeRecursive returns the path as a flattened entry.
|
|
|
|
func (d *dataUsageCache) sizeRecursive(path string) *dataUsageEntry {
|
|
|
|
root := d.find(path)
|
|
|
|
if root == nil || len(root.Children) == 0 {
|
|
|
|
return root
|
|
|
|
}
|
|
|
|
flat := d.flatten(*root)
|
|
|
|
return &flat
|
|
|
|
}
|
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
// totalChildrenRec returns the total number of children recorded.
|
|
|
|
func (d *dataUsageCache) totalChildrenRec(path string) int {
|
|
|
|
root := d.find(path)
|
|
|
|
if root == nil || len(root.Children) == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
n := len(root.Children)
|
|
|
|
for ch := range root.Children {
|
|
|
|
n += d.totalChildrenRec(ch)
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
// root returns the root of the cache.
|
|
|
|
func (d *dataUsageCache) root() *dataUsageEntry {
|
|
|
|
return d.find(d.Info.Name)
|
|
|
|
}
|
|
|
|
|
|
|
|
// rootHash returns the root of the cache.
|
|
|
|
func (d *dataUsageCache) rootHash() dataUsageHash {
|
|
|
|
return hashPath(d.Info.Name)
|
|
|
|
}
|
|
|
|
|
|
|
|
// clone returns a copy of the cache with no references to the existing.
|
|
|
|
func (d *dataUsageCache) clone() dataUsageCache {
|
|
|
|
clone := dataUsageCache{
|
|
|
|
Info: d.Info,
|
2020-06-12 13:28:21 -04:00
|
|
|
Cache: make(map[string]dataUsageEntry, len(d.Cache)),
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
for k, v := range d.Cache {
|
2021-08-24 10:11:38 -04:00
|
|
|
clone.Cache[k] = v.clone()
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
return clone
|
|
|
|
}
|
|
|
|
|
|
|
|
// merge root of other into d.
|
|
|
|
// children of root will be flattened before being merged.
|
|
|
|
// Last update time will be set to the last updated.
|
|
|
|
func (d *dataUsageCache) merge(other dataUsageCache) {
|
|
|
|
existingRoot := d.root()
|
|
|
|
otherRoot := other.root()
|
|
|
|
if existingRoot == nil && otherRoot == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if otherRoot == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if existingRoot == nil {
|
|
|
|
*d = other.clone()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if other.Info.LastUpdate.After(d.Info.LastUpdate) {
|
|
|
|
d.Info.LastUpdate = other.Info.LastUpdate
|
|
|
|
}
|
|
|
|
existingRoot.merge(*otherRoot)
|
|
|
|
eHash := d.rootHash()
|
|
|
|
for key := range otherRoot.Children {
|
|
|
|
entry := other.Cache[key]
|
|
|
|
flat := other.flatten(entry)
|
|
|
|
existing := d.Cache[key]
|
|
|
|
// If not found, merging simply adds.
|
|
|
|
existing.merge(flat)
|
2020-06-12 13:28:21 -04:00
|
|
|
d.replaceHashed(dataUsageHash(key), &eHash, existing)
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-10 12:18:19 -04:00
|
|
|
type objectIO interface {
|
2020-12-31 12:45:09 -05:00
|
|
|
GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (reader *GetObjectReader, err error)
|
2020-09-10 12:18:19 -04:00
|
|
|
PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
|
|
|
}
|
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
// load the cache content with name from minioMetaBackgroundOpsBucket.
|
|
|
|
// Only backend errors are returned as errors.
|
|
|
|
// If the object is not found or unable to deserialize d is cleared and nil error is returned.
|
2020-09-10 12:18:19 -04:00
|
|
|
func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string) error {
|
2021-05-11 21:36:15 -04:00
|
|
|
// Abandon if more than 5 minutes, so we don't hold up scanner.
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
|
|
|
|
defer cancel()
|
2022-01-03 13:22:58 -05:00
|
|
|
|
2021-04-21 11:39:00 -04:00
|
|
|
r, err := store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, readLock, ObjectOptions{})
|
2020-03-18 19:19:29 -04:00
|
|
|
if err != nil {
|
2020-11-12 12:28:56 -05:00
|
|
|
switch err.(type) {
|
|
|
|
case ObjectNotFound:
|
|
|
|
case BucketNotFound:
|
|
|
|
case InsufficientReadQuorum:
|
2021-09-09 14:24:22 -04:00
|
|
|
case StorageErr:
|
2020-11-12 12:28:56 -05:00
|
|
|
default:
|
2020-03-18 19:19:29 -04:00
|
|
|
return toObjectErr(err, dataUsageBucket, name)
|
|
|
|
}
|
|
|
|
*d = dataUsageCache{}
|
|
|
|
return nil
|
|
|
|
}
|
2021-01-08 13:12:26 -05:00
|
|
|
defer r.Close()
|
2020-12-31 12:45:09 -05:00
|
|
|
if err := d.deserialize(r); err != nil {
|
2020-03-18 19:19:29 -04:00
|
|
|
*d = dataUsageCache{}
|
2020-12-31 12:45:09 -05:00
|
|
|
logger.LogOnceIf(ctx, err, err.Error())
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// save the content of the cache to minioMetaBackgroundOpsBucket with the provided name.
|
2020-09-10 12:18:19 -04:00
|
|
|
func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string) error {
|
2020-12-10 16:03:22 -05:00
|
|
|
pr, pw := io.Pipe()
|
|
|
|
go func() {
|
|
|
|
pw.CloseWithError(d.serializeTo(pw))
|
|
|
|
}()
|
|
|
|
defer pr.Close()
|
2020-12-31 12:45:09 -05:00
|
|
|
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
r, err := hash.NewReader(pr, -1, "", "", -1)
|
2020-03-18 19:19:29 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
// Abandon if more than 5 minutes, so we don't hold up scanner.
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
|
|
|
|
defer cancel()
|
2020-03-18 19:19:29 -04:00
|
|
|
_, err = store.PutObject(ctx,
|
|
|
|
dataUsageBucket,
|
|
|
|
name,
|
2021-02-10 11:52:50 -05:00
|
|
|
NewPutObjReader(r),
|
2021-04-05 19:07:53 -04:00
|
|
|
ObjectOptions{})
|
2020-04-28 04:16:57 -04:00
|
|
|
if isErrBucketNotFound(err) {
|
|
|
|
return nil
|
|
|
|
}
|
2020-03-18 19:19:29 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// dataUsageCacheVer indicates the cache version.
|
|
|
|
// Bumping the cache version will drop data from previous versions
|
|
|
|
// and write new data with the new version.
|
2021-01-13 12:58:08 -05:00
|
|
|
const (
|
2021-10-23 21:38:33 -04:00
|
|
|
dataUsageCacheVerCurrent = 7
|
|
|
|
dataUsageCacheVerV6 = 6
|
2021-09-18 16:31:35 -04:00
|
|
|
dataUsageCacheVerV5 = 5
|
2021-05-11 21:36:15 -04:00
|
|
|
dataUsageCacheVerV4 = 4
|
|
|
|
dataUsageCacheVerV3 = 3
|
|
|
|
dataUsageCacheVerV2 = 2
|
|
|
|
dataUsageCacheVerV1 = 1
|
2021-01-13 12:58:08 -05:00
|
|
|
)
|
2020-03-18 19:19:29 -04:00
|
|
|
|
|
|
|
// serialize the contents of the cache.
|
2020-12-10 16:03:22 -05:00
|
|
|
func (d *dataUsageCache) serializeTo(dst io.Writer) error {
|
|
|
|
// Add version and compress.
|
2021-05-11 21:36:15 -04:00
|
|
|
_, err := dst.Write([]byte{dataUsageCacheVerCurrent})
|
2020-12-10 16:03:22 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
enc, err := zstd.NewWriter(dst,
|
2020-06-12 13:28:21 -04:00
|
|
|
zstd.WithEncoderLevel(zstd.SpeedFastest),
|
|
|
|
zstd.WithWindowSize(1<<20),
|
|
|
|
zstd.WithEncoderConcurrency(2))
|
2020-03-18 19:19:29 -04:00
|
|
|
if err != nil {
|
2020-12-10 16:03:22 -05:00
|
|
|
return err
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
mEnc := msgp.NewWriter(enc)
|
|
|
|
err = d.EncodeMsg(mEnc)
|
|
|
|
if err != nil {
|
2020-12-10 16:03:22 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = mEnc.Flush()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
err = enc.Close()
|
|
|
|
if err != nil {
|
2020-12-10 16:03:22 -05:00
|
|
|
return err
|
2020-06-12 13:28:21 -04:00
|
|
|
}
|
2020-12-10 16:03:22 -05:00
|
|
|
return nil
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// deserialize the supplied byte slice into the cache.
|
2020-06-12 13:28:21 -04:00
|
|
|
func (d *dataUsageCache) deserialize(r io.Reader) error {
|
|
|
|
var b [1]byte
|
|
|
|
n, _ := r.Read(b[:])
|
|
|
|
if n != 1 {
|
2020-03-18 19:19:29 -04:00
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
2021-05-12 11:07:02 -04:00
|
|
|
ver := int(b[0])
|
2021-05-11 21:36:15 -04:00
|
|
|
switch ver {
|
2021-01-13 12:58:08 -05:00
|
|
|
case dataUsageCacheVerV1:
|
2020-06-12 13:28:21 -04:00
|
|
|
return errors.New("cache version deprecated (will autoupdate)")
|
2021-01-13 12:58:08 -05:00
|
|
|
case dataUsageCacheVerV2:
|
|
|
|
// Zstd compressed.
|
|
|
|
dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer dec.Close()
|
2020-03-18 19:19:29 -04:00
|
|
|
|
2021-01-13 12:58:08 -05:00
|
|
|
dold := &dataUsageCacheV2{}
|
|
|
|
if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
d.Info = dold.Info
|
|
|
|
d.Cache = make(map[string]dataUsageEntry, len(dold.Cache))
|
|
|
|
for k, v := range dold.Cache {
|
|
|
|
d.Cache[k] = dataUsageEntry{
|
2021-05-11 21:36:15 -04:00
|
|
|
Size: v.Size,
|
|
|
|
Objects: v.Objects,
|
|
|
|
ObjSizes: v.ObjSizes,
|
|
|
|
Children: v.Children,
|
|
|
|
Compacted: len(v.Children) == 0 && k != d.Info.Name,
|
2021-01-13 12:58:08 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
case dataUsageCacheVerV3:
|
|
|
|
// Zstd compressed.
|
|
|
|
dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer dec.Close()
|
2021-04-03 12:03:42 -04:00
|
|
|
dold := &dataUsageCacheV3{}
|
|
|
|
if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
d.Info = dold.Info
|
|
|
|
d.Cache = make(map[string]dataUsageEntry, len(dold.Cache))
|
|
|
|
for k, v := range dold.Cache {
|
2021-05-11 21:36:15 -04:00
|
|
|
due := dataUsageEntry{
|
2021-04-03 12:03:42 -04:00
|
|
|
Size: v.Size,
|
|
|
|
Objects: v.Objects,
|
|
|
|
ObjSizes: v.ObjSizes,
|
|
|
|
Children: v.Children,
|
2021-05-11 21:36:15 -04:00
|
|
|
}
|
|
|
|
if v.ReplicatedSize > 0 || v.ReplicaSize > 0 || v.ReplicationFailedSize > 0 || v.ReplicationPendingSize > 0 {
|
2021-11-19 17:46:14 -05:00
|
|
|
cfg, _ := getReplicationConfig(GlobalContext, d.Info.Name)
|
|
|
|
if cfg != nil && cfg.RoleArn != "" {
|
|
|
|
due.ReplicationStats = &replicationAllStats{
|
|
|
|
Targets: make(map[string]replicationStats),
|
|
|
|
}
|
|
|
|
due.ReplicationStats.ReplicaSize = v.ReplicaSize
|
|
|
|
due.ReplicationStats.Targets[cfg.RoleArn] = replicationStats{
|
|
|
|
ReplicatedSize: v.ReplicatedSize,
|
|
|
|
FailedSize: v.ReplicationFailedSize,
|
|
|
|
PendingSize: v.ReplicationPendingSize,
|
|
|
|
}
|
2021-05-11 21:36:15 -04:00
|
|
|
}
|
2021-04-03 12:03:42 -04:00
|
|
|
}
|
2021-05-11 21:36:15 -04:00
|
|
|
due.Compacted = len(due.Children) == 0 && k != d.Info.Name
|
|
|
|
|
|
|
|
d.Cache[k] = due
|
2021-04-03 12:03:42 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
case dataUsageCacheVerV4:
|
|
|
|
// Zstd compressed.
|
|
|
|
dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer dec.Close()
|
2021-05-11 21:36:15 -04:00
|
|
|
dold := &dataUsageCacheV4{}
|
|
|
|
if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
d.Info = dold.Info
|
|
|
|
d.Cache = make(map[string]dataUsageEntry, len(dold.Cache))
|
|
|
|
for k, v := range dold.Cache {
|
|
|
|
due := dataUsageEntry{
|
|
|
|
Size: v.Size,
|
|
|
|
Objects: v.Objects,
|
|
|
|
ObjSizes: v.ObjSizes,
|
|
|
|
Children: v.Children,
|
|
|
|
}
|
2021-09-21 12:01:51 -04:00
|
|
|
empty := replicationStatsV1{}
|
2021-09-18 16:31:35 -04:00
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
if v.ReplicationStats != empty {
|
2021-11-19 17:46:14 -05:00
|
|
|
cfg, _ := getReplicationConfig(GlobalContext, d.Info.Name)
|
|
|
|
if cfg != nil && cfg.RoleArn != "" {
|
|
|
|
due.ReplicationStats = &replicationAllStats{
|
|
|
|
Targets: make(map[string]replicationStats),
|
|
|
|
}
|
|
|
|
due.ReplicationStats.Targets[cfg.RoleArn] = replicationStats{
|
|
|
|
ReplicatedSize: v.ReplicationStats.ReplicatedSize,
|
|
|
|
FailedSize: v.ReplicationStats.FailedSize,
|
|
|
|
FailedCount: v.ReplicationStats.FailedCount,
|
|
|
|
PendingSize: v.ReplicationStats.PendingSize,
|
|
|
|
PendingCount: v.ReplicationStats.PendingCount,
|
|
|
|
}
|
|
|
|
due.ReplicationStats.ReplicaSize = v.ReplicationStats.ReplicaSize
|
2021-09-21 12:01:51 -04:00
|
|
|
}
|
2021-05-11 21:36:15 -04:00
|
|
|
}
|
|
|
|
due.Compacted = len(due.Children) == 0 && k != d.Info.Name
|
|
|
|
|
|
|
|
d.Cache[k] = due
|
|
|
|
}
|
2020-03-18 19:19:29 -04:00
|
|
|
|
2021-05-11 21:36:15 -04:00
|
|
|
// Populate compacted value and remove unneeded replica stats.
|
|
|
|
for k, e := range d.Cache {
|
2021-09-18 16:31:35 -04:00
|
|
|
if e.ReplicationStats != nil && len(e.ReplicationStats.Targets) == 0 {
|
2021-05-11 21:36:15 -04:00
|
|
|
e.ReplicationStats = nil
|
|
|
|
}
|
2021-09-18 16:31:35 -04:00
|
|
|
d.Cache[k] = e
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
case dataUsageCacheVerV5:
|
|
|
|
// Zstd compressed.
|
|
|
|
dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer dec.Close()
|
|
|
|
dold := &dataUsageCacheV5{}
|
|
|
|
if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
d.Info = dold.Info
|
|
|
|
d.Cache = make(map[string]dataUsageEntry, len(dold.Cache))
|
|
|
|
for k, v := range dold.Cache {
|
|
|
|
due := dataUsageEntry{
|
|
|
|
Size: v.Size,
|
|
|
|
Objects: v.Objects,
|
|
|
|
ObjSizes: v.ObjSizes,
|
|
|
|
Children: v.Children,
|
|
|
|
}
|
|
|
|
if v.ReplicationStats != nil && !v.ReplicationStats.Empty() {
|
2021-11-19 17:46:14 -05:00
|
|
|
cfg, _ := getReplicationConfig(GlobalContext, d.Info.Name)
|
|
|
|
if cfg != nil && cfg.RoleArn != "" {
|
|
|
|
due.ReplicationStats = &replicationAllStats{
|
|
|
|
Targets: make(map[string]replicationStats),
|
2021-09-18 16:31:35 -04:00
|
|
|
}
|
|
|
|
d.Info.replication = replicationConfig{Config: cfg}
|
|
|
|
|
2021-11-19 17:46:14 -05:00
|
|
|
due.ReplicationStats.Targets[cfg.RoleArn] = replicationStats{
|
2021-09-21 12:01:51 -04:00
|
|
|
ReplicatedSize: v.ReplicationStats.ReplicatedSize,
|
|
|
|
FailedSize: v.ReplicationStats.FailedSize,
|
|
|
|
FailedCount: v.ReplicationStats.FailedCount,
|
|
|
|
PendingSize: v.ReplicationStats.PendingSize,
|
2021-09-22 13:48:45 -04:00
|
|
|
PendingCount: v.ReplicationStats.PendingCount,
|
2021-09-21 12:01:51 -04:00
|
|
|
}
|
|
|
|
due.ReplicationStats.ReplicaSize = v.ReplicationStats.ReplicaSize
|
2021-09-18 16:31:35 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
due.Compacted = len(due.Children) == 0 && k != d.Info.Name
|
|
|
|
|
|
|
|
d.Cache[k] = due
|
|
|
|
}
|
2021-05-11 21:36:15 -04:00
|
|
|
|
2021-09-18 16:31:35 -04:00
|
|
|
// Populate compacted value and remove unneeded replica stats.
|
|
|
|
for k, e := range d.Cache {
|
|
|
|
if e.ReplicationStats != nil && len(e.ReplicationStats.Targets) == 0 {
|
|
|
|
e.ReplicationStats = nil
|
|
|
|
}
|
2021-05-11 21:36:15 -04:00
|
|
|
d.Cache[k] = e
|
|
|
|
}
|
2021-05-12 11:07:02 -04:00
|
|
|
return nil
|
2021-10-23 21:38:33 -04:00
|
|
|
case dataUsageCacheVerV6:
|
|
|
|
// Zstd compressed.
|
|
|
|
dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer dec.Close()
|
|
|
|
dold := &dataUsageCacheV6{}
|
|
|
|
if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
d.Info = dold.Info
|
|
|
|
d.Cache = make(map[string]dataUsageEntry, len(dold.Cache))
|
|
|
|
for k, v := range dold.Cache {
|
|
|
|
var replicationStats *replicationAllStats
|
|
|
|
if v.ReplicationStats != nil {
|
|
|
|
replicationStats = &replicationAllStats{
|
|
|
|
Targets: v.ReplicationStats.Targets,
|
|
|
|
ReplicaSize: v.ReplicationStats.ReplicaSize,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
due := dataUsageEntry{
|
|
|
|
Children: v.Children,
|
|
|
|
Size: v.Size,
|
|
|
|
Objects: v.Objects,
|
|
|
|
Versions: v.Versions,
|
|
|
|
ObjSizes: v.ObjSizes,
|
|
|
|
ReplicationStats: replicationStats,
|
|
|
|
Compacted: v.Compacted,
|
|
|
|
}
|
|
|
|
d.Cache[k] = due
|
|
|
|
}
|
|
|
|
return nil
|
2021-05-11 21:36:15 -04:00
|
|
|
case dataUsageCacheVerCurrent:
|
|
|
|
// Zstd compressed.
|
|
|
|
dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer dec.Close()
|
2021-01-13 12:58:08 -05:00
|
|
|
return d.DecodeMsg(msgp.NewReader(dec))
|
2021-05-12 11:07:02 -04:00
|
|
|
default:
|
|
|
|
return fmt.Errorf("dataUsageCache: unknown version: %d", ver)
|
2021-01-13 12:58:08 -05:00
|
|
|
}
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Trim this from start+end of hashes.
|
|
|
|
var hashPathCutSet = dataUsageRoot
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
if dataUsageRoot != string(filepath.Separator) {
|
|
|
|
hashPathCutSet = dataUsageRoot + string(filepath.Separator)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// hashPath calculates a hash of the provided string.
|
|
|
|
func hashPath(data string) dataUsageHash {
|
|
|
|
if data != dataUsageRoot {
|
|
|
|
data = strings.Trim(data, hashPathCutSet)
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
return dataUsageHash(path.Clean(data))
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 13:28:21 -04:00
|
|
|
//msgp:ignore dataUsageHashMap
|
|
|
|
type dataUsageHashMap map[string]struct{}
|
2020-03-18 19:19:29 -04:00
|
|
|
|
2020-06-12 13:28:21 -04:00
|
|
|
// DecodeMsg implements msgp.Decodable
|
|
|
|
func (z *dataUsageHashMap) DecodeMsg(dc *msgp.Reader) (err error) {
|
|
|
|
var zb0002 uint32
|
|
|
|
zb0002, err = dc.ReadArrayHeader()
|
2020-03-18 19:19:29 -04:00
|
|
|
if err != nil {
|
2020-06-12 13:28:21 -04:00
|
|
|
err = msgp.WrapError(err)
|
2020-03-18 19:19:29 -04:00
|
|
|
return
|
|
|
|
}
|
2021-05-11 21:36:15 -04:00
|
|
|
if zb0002 == 0 {
|
|
|
|
*z = nil
|
|
|
|
return
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
*z = make(dataUsageHashMap, zb0002)
|
|
|
|
for i := uint32(0); i < zb0002; i++ {
|
|
|
|
{
|
|
|
|
var zb0003 string
|
|
|
|
zb0003, err = dc.ReadString()
|
|
|
|
if err != nil {
|
|
|
|
err = msgp.WrapError(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
(*z)[zb0003] = struct{}{}
|
|
|
|
}
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-06-12 13:28:21 -04:00
|
|
|
// EncodeMsg implements msgp.Encodable
|
|
|
|
func (z dataUsageHashMap) EncodeMsg(en *msgp.Writer) (err error) {
|
|
|
|
err = en.WriteArrayHeader(uint32(len(z)))
|
2020-03-18 19:19:29 -04:00
|
|
|
if err != nil {
|
|
|
|
err = msgp.WrapError(err)
|
|
|
|
return
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
for zb0004 := range z {
|
|
|
|
err = en.WriteString(zb0004)
|
2020-03-18 19:19:29 -04:00
|
|
|
if err != nil {
|
2020-06-12 13:28:21 -04:00
|
|
|
err = msgp.WrapError(err, zb0004)
|
2020-03-18 19:19:29 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// MarshalMsg implements msgp.Marshaler
|
|
|
|
func (z dataUsageHashMap) MarshalMsg(b []byte) (o []byte, err error) {
|
|
|
|
o = msgp.Require(b, z.Msgsize())
|
|
|
|
o = msgp.AppendArrayHeader(o, uint32(len(z)))
|
|
|
|
for zb0004 := range z {
|
|
|
|
o = msgp.AppendString(o, zb0004)
|
|
|
|
}
|
|
|
|
return
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
|
|
|
|
// UnmarshalMsg implements msgp.Unmarshaler
|
|
|
|
func (z *dataUsageHashMap) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|
|
|
var zb0002 uint32
|
|
|
|
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
2020-03-18 19:19:29 -04:00
|
|
|
if err != nil {
|
|
|
|
err = msgp.WrapError(err)
|
|
|
|
return
|
|
|
|
}
|
2021-05-11 21:36:15 -04:00
|
|
|
if zb0002 == 0 {
|
|
|
|
*z = nil
|
|
|
|
return bts, nil
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
*z = make(dataUsageHashMap, zb0002)
|
|
|
|
for i := uint32(0); i < zb0002; i++ {
|
|
|
|
{
|
|
|
|
var zb0003 string
|
|
|
|
zb0003, bts, err = msgp.ReadStringBytes(bts)
|
|
|
|
if err != nil {
|
|
|
|
err = msgp.WrapError(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
(*z)[zb0003] = struct{}{}
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
o = bts
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
|
|
|
func (z dataUsageHashMap) Msgsize() (s int) {
|
|
|
|
s = msgp.ArrayHeaderSize
|
|
|
|
for zb0004 := range z {
|
|
|
|
s += msgp.StringPrefixSize + len(zb0004)
|
|
|
|
}
|
|
|
|
return
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|