remove replication stats from data usage cache (#20524)

This is no longer needed since historical stats are not maintained anymore.
This commit is contained in:
Poorna 2024-10-04 15:23:33 -07:00 committed by GitHub
parent cbfe9de3e7
commit 28322124e2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 74 additions and 1922 deletions

View File

@ -57,15 +57,14 @@ type versionsHistogram [dataUsageVersionLen]uint64
type dataUsageEntry struct {
Children dataUsageHashMap `msg:"ch"`
// These fields do no include any children.
Size int64 `msg:"sz"`
Objects uint64 `msg:"os"`
Versions uint64 `msg:"vs"` // Versions that are not delete markers.
DeleteMarkers uint64 `msg:"dms"`
ObjSizes sizeHistogram `msg:"szs"`
ObjVersions versionsHistogram `msg:"vh"`
ReplicationStats *replicationAllStats `msg:"rs,omitempty"`
AllTierStats *allTierStats `msg:"ats,omitempty"`
Compacted bool `msg:"c"`
Size int64 `msg:"sz"`
Objects uint64 `msg:"os"`
Versions uint64 `msg:"vs"` // Versions that are not delete markers.
DeleteMarkers uint64 `msg:"dms"`
ObjSizes sizeHistogram `msg:"szs"`
ObjVersions versionsHistogram `msg:"vh"`
AllTierStats *allTierStats `msg:"ats,omitempty"`
Compacted bool `msg:"c"`
}
// allTierStats is a collection of per-tier stats across all configured remote
@ -135,93 +134,6 @@ func (ts tierStats) add(u tierStats) tierStats {
}
}
//msgp:tuple replicationStatsV1
type replicationStatsV1 struct {
PendingSize uint64
ReplicatedSize uint64
FailedSize uint64
ReplicaSize uint64
FailedCount uint64
PendingCount uint64
MissedThresholdSize uint64
AfterThresholdSize uint64
MissedThresholdCount uint64
AfterThresholdCount uint64
}
func (rsv1 replicationStatsV1) Empty() bool {
return rsv1.ReplicatedSize == 0 &&
rsv1.FailedSize == 0 &&
rsv1.FailedCount == 0
}
//msgp:tuple replicationStats
type replicationStats struct {
PendingSize uint64
ReplicatedSize uint64
FailedSize uint64
FailedCount uint64
PendingCount uint64
MissedThresholdSize uint64
AfterThresholdSize uint64
MissedThresholdCount uint64
AfterThresholdCount uint64
ReplicatedCount uint64
}
func (rs replicationStats) Empty() bool {
return rs.ReplicatedSize == 0 &&
rs.FailedSize == 0 &&
rs.FailedCount == 0
}
type replicationAllStats struct {
Targets map[string]replicationStats `msg:"t,omitempty"`
ReplicaSize uint64 `msg:"r,omitempty"`
ReplicaCount uint64 `msg:"rc,omitempty"`
}
//msgp:tuple replicationAllStatsV1
type replicationAllStatsV1 struct {
Targets map[string]replicationStats
ReplicaSize uint64 `msg:"ReplicaSize,omitempty"`
ReplicaCount uint64 `msg:"ReplicaCount,omitempty"`
}
// empty returns true if the replicationAllStats is empty (contains no entries).
func (r *replicationAllStats) empty() bool {
if r == nil {
return true
}
if r.ReplicaSize != 0 || r.ReplicaCount != 0 {
return false
}
for _, v := range r.Targets {
if !v.Empty() {
return false
}
}
return true
}
// clone creates a deep-copy clone.
func (r *replicationAllStats) clone() *replicationAllStats {
if r == nil {
return nil
}
// Shallow copy
dst := *r
// Copy individual targets.
dst.Targets = make(map[string]replicationStats, len(r.Targets))
for k, v := range r.Targets {
dst.Targets[k] = v
}
return &dst
}
//msgp:encode ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4 dataUsageEntryV5 dataUsageEntryV6 dataUsageEntryV7
//msgp:marshal ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4 dataUsageEntryV5 dataUsageEntryV6 dataUsageEntryV7
@ -237,62 +149,54 @@ type dataUsageEntryV2 struct {
//msgp:tuple dataUsageEntryV3
type dataUsageEntryV3 struct {
// These fields do no include any children.
Size int64
ReplicatedSize uint64
ReplicationPendingSize uint64
ReplicationFailedSize uint64
ReplicaSize uint64
Objects uint64
ObjSizes sizeHistogram
Children dataUsageHashMap
Size int64
Objects uint64
ObjSizes sizeHistogram
Children dataUsageHashMap
}
//msgp:tuple dataUsageEntryV4
type dataUsageEntryV4 struct {
Children dataUsageHashMap
// These fields do no include any children.
Size int64
Objects uint64
ObjSizes sizeHistogram
ReplicationStats replicationStatsV1
Size int64
Objects uint64
ObjSizes sizeHistogram
}
//msgp:tuple dataUsageEntryV5
type dataUsageEntryV5 struct {
Children dataUsageHashMap
// These fields do no include any children.
Size int64
Objects uint64
Versions uint64 // Versions that are not delete markers.
ObjSizes sizeHistogram
ReplicationStats *replicationStatsV1
Compacted bool
Size int64
Objects uint64
Versions uint64 // Versions that are not delete markers.
ObjSizes sizeHistogram
Compacted bool
}
//msgp:tuple dataUsageEntryV6
type dataUsageEntryV6 struct {
Children dataUsageHashMap
// These fields do no include any children.
Size int64
Objects uint64
Versions uint64 // Versions that are not delete markers.
ObjSizes sizeHistogram
ReplicationStats *replicationAllStatsV1
Compacted bool
Size int64
Objects uint64
Versions uint64 // Versions that are not delete markers.
ObjSizes sizeHistogram
Compacted bool
}
type dataUsageEntryV7 struct {
Children dataUsageHashMap `msg:"ch"`
// These fields do no include any children.
Size int64 `msg:"sz"`
Objects uint64 `msg:"os"`
Versions uint64 `msg:"vs"` // Versions that are not delete markers.
DeleteMarkers uint64 `msg:"dms"`
ObjSizes sizeHistogramV1 `msg:"szs"`
ObjVersions versionsHistogram `msg:"vh"`
ReplicationStats *replicationAllStats `msg:"rs,omitempty"`
AllTierStats *allTierStats `msg:"ats,omitempty"`
Compacted bool `msg:"c"`
Size int64 `msg:"sz"`
Objects uint64 `msg:"os"`
Versions uint64 `msg:"vs"` // Versions that are not delete markers.
DeleteMarkers uint64 `msg:"dms"`
ObjSizes sizeHistogramV1 `msg:"szs"`
ObjVersions versionsHistogram `msg:"vh"`
AllTierStats *allTierStats `msg:"ats,omitempty"`
Compacted bool `msg:"c"`
}
// dataUsageCache contains a cache of data usage entries latest version.
@ -373,29 +277,6 @@ func (e *dataUsageEntry) addSizes(summary sizeSummary) {
e.ObjSizes.add(summary.totalSize)
e.ObjVersions.add(summary.versions)
if e.ReplicationStats == nil {
e.ReplicationStats = &replicationAllStats{
Targets: make(map[string]replicationStats),
}
} else if e.ReplicationStats.Targets == nil {
e.ReplicationStats.Targets = make(map[string]replicationStats)
}
e.ReplicationStats.ReplicaSize += uint64(summary.replicaSize)
e.ReplicationStats.ReplicaCount += uint64(summary.replicaCount)
for arn, st := range summary.replTargetStats {
tgtStat, ok := e.ReplicationStats.Targets[arn]
if !ok {
tgtStat = replicationStats{}
}
tgtStat.PendingSize += uint64(st.pendingSize)
tgtStat.FailedSize += uint64(st.failedSize)
tgtStat.ReplicatedSize += uint64(st.replicatedSize)
tgtStat.ReplicatedCount += uint64(st.replicatedCount)
tgtStat.FailedCount += st.failedCount
tgtStat.PendingCount += st.pendingCount
e.ReplicationStats.Targets[arn] = tgtStat
}
if len(summary.tiers) != 0 {
if e.AllTierStats == nil {
e.AllTierStats = newAllTierStats()
@ -410,26 +291,6 @@ func (e *dataUsageEntry) merge(other dataUsageEntry) {
e.Versions += other.Versions
e.DeleteMarkers += other.DeleteMarkers
e.Size += other.Size
if other.ReplicationStats != nil {
if e.ReplicationStats == nil {
e.ReplicationStats = &replicationAllStats{Targets: make(map[string]replicationStats)}
} else if e.ReplicationStats.Targets == nil {
e.ReplicationStats.Targets = make(map[string]replicationStats)
}
e.ReplicationStats.ReplicaSize += other.ReplicationStats.ReplicaSize
e.ReplicationStats.ReplicaCount += other.ReplicationStats.ReplicaCount
for arn, stat := range other.ReplicationStats.Targets {
st := e.ReplicationStats.Targets[arn]
e.ReplicationStats.Targets[arn] = replicationStats{
PendingSize: stat.PendingSize + st.PendingSize,
FailedSize: stat.FailedSize + st.FailedSize,
ReplicatedSize: stat.ReplicatedSize + st.ReplicatedSize,
PendingCount: stat.PendingCount + st.PendingCount,
FailedCount: stat.FailedCount + st.FailedCount,
ReplicatedCount: stat.ReplicatedCount + st.ReplicatedCount,
}
}
}
for i, v := range other.ObjSizes[:] {
e.ObjSizes[i] += v
@ -490,10 +351,7 @@ func (e dataUsageEntry) clone() dataUsageEntry {
}
e.Children = ch
}
if e.ReplicationStats != nil {
// Clone ReplicationStats
e.ReplicationStats = e.ReplicationStats.clone()
}
if e.AllTierStats != nil {
e.AllTierStats = e.AllTierStats.clone()
}
@ -931,22 +789,6 @@ func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]Bucke
ObjectSizesHistogram: flat.ObjSizes.toMap(),
ObjectVersionsHistogram: flat.ObjVersions.toMap(),
}
if flat.ReplicationStats != nil {
bui.ReplicaSize = flat.ReplicationStats.ReplicaSize
bui.ReplicaCount = flat.ReplicationStats.ReplicaCount
bui.ReplicationInfo = make(map[string]BucketTargetUsageInfo, len(flat.ReplicationStats.Targets))
for arn, stat := range flat.ReplicationStats.Targets {
bui.ReplicationInfo[arn] = BucketTargetUsageInfo{
ReplicationPendingSize: stat.PendingSize,
ReplicatedSize: stat.ReplicatedSize,
ReplicationFailedSize: stat.FailedSize,
ReplicationPendingCount: stat.PendingCount,
ReplicationFailedCount: stat.FailedCount,
ReplicatedCount: stat.ReplicatedCount,
}
}
}
dst[bucket.Name] = bui
}
return dst
@ -959,9 +801,6 @@ func (d *dataUsageCache) sizeRecursive(path string) *dataUsageEntry {
return root
}
flat := d.flatten(*root)
if flat.ReplicationStats.empty() {
flat.ReplicationStats = nil
}
return &flat
}
@ -1238,20 +1077,6 @@ func (d *dataUsageCache) deserialize(r io.Reader) error {
ObjSizes: v.ObjSizes,
Children: v.Children,
}
if v.ReplicatedSize > 0 || v.ReplicaSize > 0 || v.ReplicationFailedSize > 0 || v.ReplicationPendingSize > 0 {
cfg, _ := getReplicationConfig(GlobalContext, d.Info.Name)
if cfg != nil && cfg.RoleArn != "" {
due.ReplicationStats = &replicationAllStats{
Targets: make(map[string]replicationStats),
}
due.ReplicationStats.ReplicaSize = v.ReplicaSize
due.ReplicationStats.Targets[cfg.RoleArn] = replicationStats{
ReplicatedSize: v.ReplicatedSize,
FailedSize: v.ReplicationFailedSize,
PendingSize: v.ReplicationPendingSize,
}
}
}
due.Compacted = len(due.Children) == 0 && k != d.Info.Name
d.Cache[k] = due
@ -1277,36 +1102,10 @@ func (d *dataUsageCache) deserialize(r io.Reader) error {
ObjSizes: v.ObjSizes,
Children: v.Children,
}
empty := replicationStatsV1{}
if v.ReplicationStats != empty {
cfg, _ := getReplicationConfig(GlobalContext, d.Info.Name)
if cfg != nil && cfg.RoleArn != "" {
due.ReplicationStats = &replicationAllStats{
Targets: make(map[string]replicationStats),
}
due.ReplicationStats.Targets[cfg.RoleArn] = replicationStats{
ReplicatedSize: v.ReplicationStats.ReplicatedSize,
FailedSize: v.ReplicationStats.FailedSize,
FailedCount: v.ReplicationStats.FailedCount,
PendingSize: v.ReplicationStats.PendingSize,
PendingCount: v.ReplicationStats.PendingCount,
}
due.ReplicationStats.ReplicaSize = v.ReplicationStats.ReplicaSize
}
}
due.Compacted = len(due.Children) == 0 && k != d.Info.Name
d.Cache[k] = due
}
// Populate compacted value and remove unneeded replica stats.
for k, e := range d.Cache {
if e.ReplicationStats != nil && len(e.ReplicationStats.Targets) == 0 {
e.ReplicationStats = nil
}
d.Cache[k] = e
}
return nil
case dataUsageCacheVerV5:
// Zstd compressed.
@ -1328,36 +1127,10 @@ func (d *dataUsageCache) deserialize(r io.Reader) error {
ObjSizes: v.ObjSizes,
Children: v.Children,
}
if v.ReplicationStats != nil && !v.ReplicationStats.Empty() {
cfg, _ := getReplicationConfig(GlobalContext, d.Info.Name)
if cfg != nil && cfg.RoleArn != "" {
due.ReplicationStats = &replicationAllStats{
Targets: make(map[string]replicationStats),
}
d.Info.replication = replicationConfig{Config: cfg}
due.ReplicationStats.Targets[cfg.RoleArn] = replicationStats{
ReplicatedSize: v.ReplicationStats.ReplicatedSize,
FailedSize: v.ReplicationStats.FailedSize,
FailedCount: v.ReplicationStats.FailedCount,
PendingSize: v.ReplicationStats.PendingSize,
PendingCount: v.ReplicationStats.PendingCount,
}
due.ReplicationStats.ReplicaSize = v.ReplicationStats.ReplicaSize
}
}
due.Compacted = len(due.Children) == 0 && k != d.Info.Name
d.Cache[k] = due
}
// Populate compacted value and remove unneeded replica stats.
for k, e := range d.Cache {
if e.ReplicationStats != nil && len(e.ReplicationStats.Targets) == 0 {
e.ReplicationStats = nil
}
d.Cache[k] = e
}
return nil
case dataUsageCacheVerV6:
// Zstd compressed.
@ -1373,22 +1146,13 @@ func (d *dataUsageCache) deserialize(r io.Reader) error {
d.Info = dold.Info
d.Cache = make(map[string]dataUsageEntry, len(dold.Cache))
for k, v := range dold.Cache {
var replicationStats *replicationAllStats
if v.ReplicationStats != nil {
replicationStats = &replicationAllStats{
Targets: v.ReplicationStats.Targets,
ReplicaSize: v.ReplicationStats.ReplicaSize,
ReplicaCount: v.ReplicationStats.ReplicaCount,
}
}
due := dataUsageEntry{
Children: v.Children,
Size: v.Size,
Objects: v.Objects,
Versions: v.Versions,
ObjSizes: v.ObjSizes,
ReplicationStats: replicationStats,
Compacted: v.Compacted,
Children: v.Children,
Size: v.Size,
Objects: v.Objects,
Versions: v.Versions,
ObjSizes: v.ObjSizes,
Compacted: v.Compacted,
}
d.Cache[k] = due
}
@ -1410,13 +1174,12 @@ func (d *dataUsageCache) deserialize(r io.Reader) error {
var szHist sizeHistogram
szHist.mergeV1(v.ObjSizes)
d.Cache[k] = dataUsageEntry{
Children: v.Children,
Size: v.Size,
Objects: v.Objects,
Versions: v.Versions,
ObjSizes: szHist,
ReplicationStats: v.ReplicationStats,
Compacted: v.Compacted,
Children: v.Children,
Size: v.Size,
Objects: v.Objects,
Versions: v.Versions,
ObjSizes: szHist,
Compacted: v.Compacted,
}
}

File diff suppressed because it is too large Load Diff

View File

@ -519,458 +519,6 @@ func BenchmarkDecodedataUsageEntry(b *testing.B) {
}
}
func TestMarshalUnmarshalreplicationAllStats(t *testing.T) {
v := replicationAllStats{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgreplicationAllStats(b *testing.B) {
v := replicationAllStats{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgreplicationAllStats(b *testing.B) {
v := replicationAllStats{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalreplicationAllStats(b *testing.B) {
v := replicationAllStats{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodereplicationAllStats(t *testing.T) {
v := replicationAllStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodereplicationAllStats Msgsize() is inaccurate")
}
vn := replicationAllStats{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodereplicationAllStats(b *testing.B) {
v := replicationAllStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodereplicationAllStats(b *testing.B) {
v := replicationAllStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalreplicationAllStatsV1(t *testing.T) {
v := replicationAllStatsV1{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgreplicationAllStatsV1(b *testing.B) {
v := replicationAllStatsV1{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgreplicationAllStatsV1(b *testing.B) {
v := replicationAllStatsV1{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalreplicationAllStatsV1(b *testing.B) {
v := replicationAllStatsV1{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodereplicationAllStatsV1(t *testing.T) {
v := replicationAllStatsV1{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodereplicationAllStatsV1 Msgsize() is inaccurate")
}
vn := replicationAllStatsV1{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodereplicationAllStatsV1(b *testing.B) {
v := replicationAllStatsV1{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodereplicationAllStatsV1(b *testing.B) {
v := replicationAllStatsV1{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalreplicationStats(t *testing.T) {
v := replicationStats{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgreplicationStats(b *testing.B) {
v := replicationStats{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgreplicationStats(b *testing.B) {
v := replicationStats{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalreplicationStats(b *testing.B) {
v := replicationStats{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodereplicationStats(t *testing.T) {
v := replicationStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodereplicationStats Msgsize() is inaccurate")
}
vn := replicationStats{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodereplicationStats(b *testing.B) {
v := replicationStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodereplicationStats(b *testing.B) {
v := replicationStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalreplicationStatsV1(t *testing.T) {
v := replicationStatsV1{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgreplicationStatsV1(b *testing.B) {
v := replicationStatsV1{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgreplicationStatsV1(b *testing.B) {
v := replicationStatsV1{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalreplicationStatsV1(b *testing.B) {
v := replicationStatsV1{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodereplicationStatsV1(t *testing.T) {
v := replicationStatsV1{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodereplicationStatsV1 Msgsize() is inaccurate")
}
vn := replicationStatsV1{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodereplicationStatsV1(b *testing.B) {
v := replicationStatsV1{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodereplicationStatsV1(b *testing.B) {
v := replicationStatsV1{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalsizeHistogram(t *testing.T) {
v := sizeHistogram{}
bts, err := v.MarshalMsg(nil)

View File

@ -587,17 +587,6 @@ func TestDataUsageCacheSerialize(t *testing.T) {
t.Fatal(err)
}
e := want.find("abucket/dir2")
e.ReplicationStats = &replicationAllStats{
Targets: map[string]replicationStats{
"arn": {
PendingSize: 1,
ReplicatedSize: 2,
FailedSize: 3,
FailedCount: 5,
PendingCount: 6,
},
},
}
want.replace("abucket/dir2", "", *e)
var buf bytes.Buffer
err = want.serializeTo(&buf)

View File

@ -551,9 +551,6 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa
var root dataUsageEntry
if r := cache.root(); r != nil {
root = cache.flatten(*r)
if root.ReplicationStats.empty() {
root.ReplicationStats = nil
}
}
select {
case <-ctx.Done():