mirror of
https://github.com/minio/minio.git
synced 2025-11-10 14:09:48 -05:00
ilm: Make per-tier stats available via admin-tier-info (#13381)
This commit is contained in:
committed by
GitHub
parent
3b9dfa9d29
commit
939fbb3c38
@@ -192,6 +192,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.EditTierHandler)))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier").HandlerFunc(gz(httpTraceHdrs(adminAPI.ListTierHandler)))
|
||||
|
||||
// Tier stats
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier-stats").HandlerFunc(gz(httpTraceHdrs(adminAPI.TierStatsHandler)))
|
||||
|
||||
// Cluster Replication APIs
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/add").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationAdd)))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/site-replication/disable").HandlerFunc(gz(httpTraceHdrs(adminAPI.SiteReplicationDisable)))
|
||||
|
||||
@@ -838,6 +838,7 @@ type sizeSummary struct {
|
||||
pendingCount uint64
|
||||
failedCount uint64
|
||||
replTargetStats map[string]replTargetSizeSummary
|
||||
tiers map[string]tierStats
|
||||
}
|
||||
|
||||
// replTargetSizeSummary holds summary of replication stats by target
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
@@ -45,16 +46,70 @@ type dataUsageHash string
|
||||
// sizeHistogram is a size histogram.
|
||||
type sizeHistogram [dataUsageBucketLen]uint64
|
||||
|
||||
//msgp:tuple dataUsageEntry
|
||||
type dataUsageEntry struct {
|
||||
Children dataUsageHashMap
|
||||
Children dataUsageHashMap `msg:"ch"`
|
||||
// These fields do no include any children.
|
||||
Size int64
|
||||
Objects uint64
|
||||
Versions uint64 // Versions that are not delete markers.
|
||||
ObjSizes sizeHistogram
|
||||
ReplicationStats *replicationAllStats
|
||||
Compacted bool
|
||||
Size int64 `msg:"sz"`
|
||||
Objects uint64 `msg:"os"`
|
||||
Versions uint64 `msg:"vs"` // Versions that are not delete markers.
|
||||
ObjSizes sizeHistogram `msg:"szs"`
|
||||
ReplicationStats *replicationAllStats `msg:"rs,omitempty"`
|
||||
AllTierStats *allTierStats `msg:"ats,omitempty"`
|
||||
Compacted bool `msg:"c"`
|
||||
}
|
||||
|
||||
// allTierStats is a collection of per-tier stats across all configured remote
|
||||
// tiers.
|
||||
type allTierStats struct {
|
||||
Tiers map[string]tierStats `msg:"ts"`
|
||||
}
|
||||
|
||||
func newAllTierStats() *allTierStats {
|
||||
return &allTierStats{
|
||||
Tiers: make(map[string]tierStats),
|
||||
}
|
||||
}
|
||||
|
||||
func (ats *allTierStats) addSizes(sz sizeSummary) {
|
||||
for tier, st := range sz.tiers {
|
||||
ats.Tiers[tier] = ats.Tiers[tier].add(st)
|
||||
}
|
||||
}
|
||||
|
||||
func (ats *allTierStats) merge(other *allTierStats) {
|
||||
for tier, st := range other.Tiers {
|
||||
ats.Tiers[tier] = ats.Tiers[tier].add(st)
|
||||
}
|
||||
}
|
||||
|
||||
func (ats *allTierStats) adminStats(stats map[string]madmin.TierStats) map[string]madmin.TierStats {
|
||||
if ats == nil {
|
||||
return stats
|
||||
}
|
||||
|
||||
// Update stats for tiers as they become available.
|
||||
for tier, st := range ats.Tiers {
|
||||
stats[tier] = madmin.TierStats{
|
||||
TotalSize: st.TotalSize,
|
||||
NumVersions: st.NumVersions,
|
||||
NumObjects: st.NumObjects,
|
||||
}
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
// tierStats holds per-tier stats of a remote tier.
|
||||
type tierStats struct {
|
||||
TotalSize uint64 `msg:"ts"`
|
||||
NumVersions int `msg:"nv"`
|
||||
NumObjects int `msg:"no"`
|
||||
}
|
||||
|
||||
func (ts tierStats) add(u tierStats) tierStats {
|
||||
ts.TotalSize += u.TotalSize
|
||||
ts.NumVersions += u.NumVersions
|
||||
ts.NumObjects += u.NumObjects
|
||||
return ts
|
||||
}
|
||||
|
||||
//msgp:tuple replicationStatsV1
|
||||
@@ -96,14 +151,19 @@ func (rs replicationStats) Empty() bool {
|
||||
rs.FailedCount == 0
|
||||
}
|
||||
|
||||
//msgp:tuple replicationAllStats
|
||||
type replicationAllStats struct {
|
||||
Targets map[string]replicationStats `msg:"t,omitempty"`
|
||||
ReplicaSize uint64 `msg:"r,omitempty"`
|
||||
}
|
||||
|
||||
//msgp:tuple replicationAllStatsV1
|
||||
type replicationAllStatsV1 struct {
|
||||
Targets map[string]replicationStats
|
||||
ReplicaSize uint64 `msg:"ReplicaSize,omitempty"`
|
||||
}
|
||||
|
||||
//msgp:encode ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4
|
||||
//msgp:marshal ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4
|
||||
//msgp:encode ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4 dataUsageEntryV5 dataUsageEntryV6
|
||||
//msgp:marshal ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4 dataUsageEntryV5 dataUsageEntryV6
|
||||
|
||||
//msgp:tuple dataUsageEntryV2
|
||||
type dataUsageEntryV2 struct {
|
||||
@@ -149,6 +209,18 @@ type dataUsageEntryV5 struct {
|
||||
Compacted bool
|
||||
}
|
||||
|
||||
//msgp:tuple dataUsageEntryV6
|
||||
type dataUsageEntryV6 struct {
|
||||
Children dataUsageHashMap
|
||||
// These fields do no include any children.
|
||||
Size int64
|
||||
Objects uint64
|
||||
Versions uint64 // Versions that are not delete markers.
|
||||
ObjSizes sizeHistogram
|
||||
ReplicationStats *replicationAllStatsV1
|
||||
Compacted bool
|
||||
}
|
||||
|
||||
// dataUsageCache contains a cache of data usage entries latest version.
|
||||
type dataUsageCache struct {
|
||||
Info dataUsageCacheInfo
|
||||
@@ -156,8 +228,8 @@ type dataUsageCache struct {
|
||||
Disks []string
|
||||
}
|
||||
|
||||
//msgp:encode ignore dataUsageCacheV2 dataUsageCacheV3 dataUsageCacheV4 dataUsageCacheV5
|
||||
//msgp:marshal ignore dataUsageCacheV2 dataUsageCacheV3 dataUsageCacheV4 dataUsageCacheV5
|
||||
//msgp:encode ignore dataUsageCacheV2 dataUsageCacheV3 dataUsageCacheV4 dataUsageCacheV5 dataUsageCacheV6
|
||||
//msgp:marshal ignore dataUsageCacheV2 dataUsageCacheV3 dataUsageCacheV4 dataUsageCacheV5 dataUsageCacheV6
|
||||
|
||||
// dataUsageCacheV2 contains a cache of data usage entries version 2.
|
||||
type dataUsageCacheV2 struct {
|
||||
@@ -166,27 +238,34 @@ type dataUsageCacheV2 struct {
|
||||
Cache map[string]dataUsageEntryV2
|
||||
}
|
||||
|
||||
// dataUsageCache contains a cache of data usage entries version 3.
|
||||
// dataUsageCacheV3 contains a cache of data usage entries version 3.
|
||||
type dataUsageCacheV3 struct {
|
||||
Info dataUsageCacheInfo
|
||||
Disks []string
|
||||
Cache map[string]dataUsageEntryV3
|
||||
}
|
||||
|
||||
// dataUsageCache contains a cache of data usage entries version 4.
|
||||
// dataUsageCacheV4 contains a cache of data usage entries version 4.
|
||||
type dataUsageCacheV4 struct {
|
||||
Info dataUsageCacheInfo
|
||||
Disks []string
|
||||
Cache map[string]dataUsageEntryV4
|
||||
}
|
||||
|
||||
// dataUsageCache contains a cache of data usage entries version 5.
|
||||
// dataUsageCacheV5 contains a cache of data usage entries version 5.
|
||||
type dataUsageCacheV5 struct {
|
||||
Info dataUsageCacheInfo
|
||||
Disks []string
|
||||
Cache map[string]dataUsageEntryV5
|
||||
}
|
||||
|
||||
// dataUsageCacheV6 contains a cache of data usage entries version 6.
|
||||
type dataUsageCacheV6 struct {
|
||||
Info dataUsageCacheInfo
|
||||
Disks []string
|
||||
Cache map[string]dataUsageEntryV6
|
||||
}
|
||||
|
||||
//msgp:ignore dataUsageEntryInfo
|
||||
type dataUsageEntryInfo struct {
|
||||
Name string
|
||||
@@ -242,6 +321,12 @@ func (e *dataUsageEntry) addSizes(summary sizeSummary) {
|
||||
e.ReplicationStats.Targets[arn] = tgtStat
|
||||
}
|
||||
}
|
||||
if summary.tiers != nil {
|
||||
if e.AllTierStats == nil {
|
||||
e.AllTierStats = newAllTierStats()
|
||||
}
|
||||
e.AllTierStats.addSizes(summary)
|
||||
}
|
||||
}
|
||||
|
||||
// merge other data usage entry into this, excluding children.
|
||||
@@ -271,6 +356,13 @@ func (e *dataUsageEntry) merge(other dataUsageEntry) {
|
||||
for i, v := range other.ObjSizes[:] {
|
||||
e.ObjSizes[i] += v
|
||||
}
|
||||
|
||||
if other.AllTierStats != nil {
|
||||
if e.AllTierStats == nil {
|
||||
e.AllTierStats = newAllTierStats()
|
||||
}
|
||||
e.AllTierStats.merge(other.AllTierStats)
|
||||
}
|
||||
}
|
||||
|
||||
// mod returns true if the hash mod cycles == cycle.
|
||||
@@ -317,6 +409,11 @@ func (e dataUsageEntry) clone() dataUsageEntry {
|
||||
r := *e.ReplicationStats
|
||||
e.ReplicationStats = &r
|
||||
}
|
||||
if e.AllTierStats != nil {
|
||||
ats := newAllTierStats()
|
||||
ats.merge(e.AllTierStats)
|
||||
e.AllTierStats = ats
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
@@ -438,6 +535,7 @@ func (d *dataUsageCache) dui(path string, buckets []BucketInfo) DataUsageInfo {
|
||||
ObjectsTotalSize: uint64(flat.Size),
|
||||
BucketsCount: uint64(len(e.Children)),
|
||||
BucketsUsage: d.bucketsUsageInfo(buckets),
|
||||
TierStats: d.tiersUsageInfo(buckets),
|
||||
}
|
||||
return dui
|
||||
}
|
||||
@@ -654,6 +752,25 @@ func (h *sizeHistogram) toMap() map[string]uint64 {
|
||||
return res
|
||||
}
|
||||
|
||||
func (d *dataUsageCache) tiersUsageInfo(buckets []BucketInfo) *allTierStats {
|
||||
dst := newAllTierStats()
|
||||
for _, bucket := range buckets {
|
||||
e := d.find(bucket.Name)
|
||||
if e == nil {
|
||||
continue
|
||||
}
|
||||
flat := d.flatten(*e)
|
||||
if flat.AllTierStats == nil {
|
||||
continue
|
||||
}
|
||||
dst.merge(flat.AllTierStats)
|
||||
}
|
||||
if len(dst.Tiers) == 0 {
|
||||
return nil
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// bucketsUsageInfo returns the buckets usage info as a map, with
|
||||
// key as bucket name
|
||||
func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]BucketUsageInfo {
|
||||
@@ -857,7 +974,8 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string)
|
||||
// Bumping the cache version will drop data from previous versions
|
||||
// and write new data with the new version.
|
||||
const (
|
||||
dataUsageCacheVerCurrent = 6
|
||||
dataUsageCacheVerCurrent = 7
|
||||
dataUsageCacheVerV6 = 6
|
||||
dataUsageCacheVerV5 = 5
|
||||
dataUsageCacheVerV4 = 4
|
||||
dataUsageCacheVerV3 = 3
|
||||
@@ -1086,6 +1204,40 @@ func (d *dataUsageCache) deserialize(r io.Reader) error {
|
||||
d.Cache[k] = e
|
||||
}
|
||||
return nil
|
||||
case dataUsageCacheVerV6:
|
||||
// Zstd compressed.
|
||||
dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dec.Close()
|
||||
dold := &dataUsageCacheV6{}
|
||||
if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil {
|
||||
return err
|
||||
}
|
||||
d.Info = dold.Info
|
||||
d.Disks = dold.Disks
|
||||
d.Cache = make(map[string]dataUsageEntry, len(dold.Cache))
|
||||
for k, v := range dold.Cache {
|
||||
var replicationStats *replicationAllStats
|
||||
if v.ReplicationStats != nil {
|
||||
replicationStats = &replicationAllStats{
|
||||
Targets: v.ReplicationStats.Targets,
|
||||
ReplicaSize: v.ReplicationStats.ReplicaSize,
|
||||
}
|
||||
}
|
||||
due := dataUsageEntry{
|
||||
Children: v.Children,
|
||||
Size: v.Size,
|
||||
Objects: v.Objects,
|
||||
Versions: v.Versions,
|
||||
ObjSizes: v.ObjSizes,
|
||||
ReplicationStats: replicationStats,
|
||||
Compacted: v.Compacted,
|
||||
}
|
||||
d.Cache[k] = due
|
||||
}
|
||||
return nil
|
||||
case dataUsageCacheVerCurrent:
|
||||
// Zstd compressed.
|
||||
dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2))
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -9,6 +9,119 @@ import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalallTierStats(t *testing.T) {
|
||||
v := allTierStats{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgallTierStats(b *testing.B) {
|
||||
v := allTierStats{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgallTierStats(b *testing.B) {
|
||||
v := allTierStats{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalallTierStats(b *testing.B) {
|
||||
v := allTierStats{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeallTierStats(t *testing.T) {
|
||||
v := allTierStats{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeallTierStats Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := allTierStats{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeallTierStats(b *testing.B) {
|
||||
v := allTierStats{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeallTierStats(b *testing.B) {
|
||||
v := allTierStats{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshaldataUsageCache(t *testing.T) {
|
||||
v := dataUsageCache{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
@@ -348,119 +461,6 @@ func BenchmarkDecodedataUsageEntry(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshaldataUsageEntryV5(t *testing.T) {
|
||||
v := dataUsageEntryV5{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgdataUsageEntryV5(b *testing.B) {
|
||||
v := dataUsageEntryV5{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgdataUsageEntryV5(b *testing.B) {
|
||||
v := dataUsageEntryV5{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshaldataUsageEntryV5(b *testing.B) {
|
||||
v := dataUsageEntryV5{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodedataUsageEntryV5(t *testing.T) {
|
||||
v := dataUsageEntryV5{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodedataUsageEntryV5 Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := dataUsageEntryV5{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodedataUsageEntryV5(b *testing.B) {
|
||||
v := dataUsageEntryV5{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodedataUsageEntryV5(b *testing.B) {
|
||||
v := dataUsageEntryV5{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalreplicationAllStats(t *testing.T) {
|
||||
v := replicationAllStats{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
@@ -574,6 +574,119 @@ func BenchmarkDecodereplicationAllStats(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalreplicationAllStatsV1(t *testing.T) {
|
||||
v := replicationAllStatsV1{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgreplicationAllStatsV1(b *testing.B) {
|
||||
v := replicationAllStatsV1{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgreplicationAllStatsV1(b *testing.B) {
|
||||
v := replicationAllStatsV1{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalreplicationAllStatsV1(b *testing.B) {
|
||||
v := replicationAllStatsV1{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodereplicationAllStatsV1(t *testing.T) {
|
||||
v := replicationAllStatsV1{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodereplicationAllStatsV1 Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := replicationAllStatsV1{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodereplicationAllStatsV1(b *testing.B) {
|
||||
v := replicationAllStatsV1{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodereplicationAllStatsV1(b *testing.B) {
|
||||
v := replicationAllStatsV1{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalreplicationStats(t *testing.T) {
|
||||
v := replicationStats{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
@@ -912,3 +1025,116 @@ func BenchmarkDecodesizeHistogram(b *testing.B) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshaltierStats(t *testing.T) {
|
||||
v := tierStats{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgtierStats(b *testing.B) {
|
||||
v := tierStats{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgtierStats(b *testing.B) {
|
||||
v := tierStats{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshaltierStats(b *testing.B) {
|
||||
v := tierStats{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodetierStats(t *testing.T) {
|
||||
v := tierStats{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodetierStats Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := tierStats{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodetierStats(b *testing.B) {
|
||||
v := tierStats{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodetierStats(b *testing.B) {
|
||||
v := tierStats{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,10 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
)
|
||||
|
||||
// BucketTargetUsageInfo - bucket target usage info provides
|
||||
@@ -81,7 +84,50 @@ type DataUsageInfo struct {
|
||||
// - total objects in a bucket
|
||||
// - object size histogram per bucket
|
||||
BucketsUsage map[string]BucketUsageInfo `json:"bucketsUsageInfo"`
|
||||
|
||||
// Deprecated kept here for backward compatibility reasons.
|
||||
BucketSizes map[string]uint64 `json:"bucketsSizes"`
|
||||
|
||||
// TierStats contains per-tier stats of all configured remote tiers
|
||||
TierStats *allTierStats `json:"tierStats,omitempty"`
|
||||
}
|
||||
|
||||
func (dui DataUsageInfo) tierStats() []madmin.TierInfo {
|
||||
if globalTierConfigMgr.Empty() {
|
||||
return nil
|
||||
}
|
||||
|
||||
ts := make(map[string]madmin.TierStats)
|
||||
// Add configured remote tiers
|
||||
for tier := range globalTierConfigMgr.Tiers {
|
||||
ts[tier] = madmin.TierStats{}
|
||||
}
|
||||
// Add STANDARD (hot-tier)
|
||||
ts[minioHotTier] = madmin.TierStats{}
|
||||
|
||||
ts = dui.TierStats.adminStats(ts)
|
||||
infos := make([]madmin.TierInfo, 0, len(ts))
|
||||
for tier, st := range ts {
|
||||
var tierType string
|
||||
if tier == minioHotTier {
|
||||
tierType = "internal"
|
||||
} else {
|
||||
tierType = globalTierConfigMgr.Tiers[tier].Type.String()
|
||||
}
|
||||
infos = append(infos, madmin.TierInfo{
|
||||
Name: tier,
|
||||
Type: tierType,
|
||||
Stats: st,
|
||||
})
|
||||
}
|
||||
|
||||
sort.Slice(infos, func(i, j int) bool {
|
||||
if infos[i].Type == "internal" {
|
||||
return true
|
||||
}
|
||||
if infos[j].Type == "internal" {
|
||||
return false
|
||||
}
|
||||
return infos[i].Name < infos[j].Name
|
||||
})
|
||||
return infos
|
||||
}
|
||||
|
||||
@@ -225,6 +225,18 @@ func (o ObjectInfo) Clone() (cinfo ObjectInfo) {
|
||||
return cinfo
|
||||
}
|
||||
|
||||
func (o ObjectInfo) tierStats() tierStats {
|
||||
ts := tierStats{
|
||||
TotalSize: uint64(o.Size),
|
||||
NumVersions: 1,
|
||||
}
|
||||
// the current version of an object is accounted towards objects count
|
||||
if o.IsLatest {
|
||||
ts.NumObjects = 1
|
||||
}
|
||||
return ts
|
||||
}
|
||||
|
||||
// ReplicateObjectInfo represents object info to be replicated
|
||||
type ReplicateObjectInfo struct {
|
||||
ObjectInfo
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/config/storageclass"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
iampolicy "github.com/minio/pkg/iam/policy"
|
||||
)
|
||||
@@ -60,6 +61,12 @@ var (
|
||||
Message: "Invalid remote tier credentials",
|
||||
StatusCode: http.StatusBadRequest,
|
||||
}
|
||||
// error returned when reserved internal names are used.
|
||||
errTierReservedName = AdminError{
|
||||
Code: "XMinioAdminTierReserved",
|
||||
Message: "Cannot use reserved tier name",
|
||||
StatusCode: http.StatusBadRequest,
|
||||
}
|
||||
)
|
||||
|
||||
func (api adminAPIHandlers) AddTierHandler(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -92,6 +99,12 @@ func (api adminAPIHandlers) AddTierHandler(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
// Disallow remote tiers with internal storage class names
|
||||
switch cfg.Name {
|
||||
case storageclass.STANDARD, storageclass.RRS:
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errTierReservedName), r.URL)
|
||||
return
|
||||
}
|
||||
// Refresh from the disk in case we had missed notifications about edits from peers.
|
||||
if err := globalTierConfigMgr.Reload(ctx, objAPI); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -191,3 +204,33 @@ func (api adminAPIHandlers) EditTierHandler(w http.ResponseWriter, r *http.Reque
|
||||
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
func (api adminAPIHandlers) TierStatsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "TierStats")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
objAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListTierAction)
|
||||
if objAPI == nil || globalNotificationSys == nil || globalTierConfigMgr == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
dui, err := loadDataUsageFromBackend(ctx, objAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(dui.tierStats())
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
@@ -46,6 +46,8 @@ const (
|
||||
tierConfigFile = "tier-config.bin"
|
||||
tierConfigFormat = 1
|
||||
tierConfigVersion = 1
|
||||
|
||||
minioHotTier = "STANDARD"
|
||||
)
|
||||
|
||||
// tierConfigPath refers to remote tier config object name
|
||||
@@ -85,7 +87,6 @@ func (config *TierConfigMgr) Add(ctx context.Context, tier madmin.TierConfig) er
|
||||
defer config.Unlock()
|
||||
|
||||
// check if tier name is in all caps
|
||||
|
||||
tierName := tier.Name
|
||||
if tierName != strings.ToUpper(tierName) {
|
||||
return errTierNameNotUppercase
|
||||
|
||||
@@ -463,6 +463,10 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates
|
||||
return sizeSummary{}, errSkipFile
|
||||
}
|
||||
sizeS := sizeSummary{}
|
||||
var noTiers bool
|
||||
if noTiers = globalTierConfigMgr.Empty(); !noTiers {
|
||||
sizeS.tiers = make(map[string]tierStats)
|
||||
}
|
||||
atomic.AddUint64(&globalScannerStats.accTotalObjects, 1)
|
||||
for _, version := range fivs.Versions {
|
||||
atomic.AddUint64(&globalScannerStats.accTotalVersions, 1)
|
||||
@@ -472,6 +476,21 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates
|
||||
sizeS.versions++
|
||||
}
|
||||
sizeS.totalSize += sz
|
||||
|
||||
// Skip tier accounting if,
|
||||
// 1. no tiers configured
|
||||
// 2. object version is a delete-marker or a free-version
|
||||
// tracking deleted transitioned objects
|
||||
switch {
|
||||
case noTiers, oi.DeleteMarker, oi.TransitionedObject.FreeVersion:
|
||||
|
||||
continue
|
||||
}
|
||||
tier := minioHotTier
|
||||
if oi.TransitionedObject.Status == lifecycle.TransitionComplete {
|
||||
tier = oi.TransitionedObject.Tier
|
||||
}
|
||||
sizeS.tiers[tier] = sizeS.tiers[tier].add(oi.tierStats())
|
||||
}
|
||||
return sizeS, nil
|
||||
})
|
||||
|
||||
Reference in New Issue
Block a user