mirror of
https://github.com/minio/minio.git
synced 2025-11-07 12:52:58 -05:00
fetch bucket replication stats across peers in single call (#14956)
current implementation relied on recursively calling one bucket at a time across all peers, this would be very slow and chatty when there are 100's of buckets which would mean 100*peerCount amount of network operations. This PR attempts to reduce this entire call into `peerCount` amount of network calls only. This functionality addresses also a concern where the Prometheus metrics would significantly slow down when one of the peers is offline.
This commit is contained in:
@@ -792,6 +792,183 @@ func (z *BucketStats) Msgsize() (s int) {
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BucketStatsMap) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var zb0003 uint32
|
||||
zb0003, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if (*z) == nil {
|
||||
(*z) = make(BucketStatsMap, zb0003)
|
||||
} else if len((*z)) > 0 {
|
||||
for key := range *z {
|
||||
delete((*z), key)
|
||||
}
|
||||
}
|
||||
for zb0003 > 0 {
|
||||
zb0003--
|
||||
var zb0001 string
|
||||
var zb0002 BucketStats
|
||||
zb0001, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0004 uint32
|
||||
zb0004, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001)
|
||||
return
|
||||
}
|
||||
for zb0004 > 0 {
|
||||
zb0004--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "ReplicationStats":
|
||||
err = zb0002.ReplicationStats.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001, "ReplicationStats")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
(*z)[zb0001] = zb0002
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z BucketStatsMap) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = en.WriteMapHeader(uint32(len(z)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0005, zb0006 := range z {
|
||||
err = en.WriteString(zb0005)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
// map header, size 1
|
||||
// write "ReplicationStats"
|
||||
err = en.Append(0x81, 0xb0, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = zb0006.ReplicationStats.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0005, "ReplicationStats")
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z BucketStatsMap) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
o = msgp.AppendMapHeader(o, uint32(len(z)))
|
||||
for zb0005, zb0006 := range z {
|
||||
o = msgp.AppendString(o, zb0005)
|
||||
// map header, size 1
|
||||
// string "ReplicationStats"
|
||||
o = append(o, 0x81, 0xb0, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x73)
|
||||
o, err = zb0006.ReplicationStats.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0005, "ReplicationStats")
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BucketStatsMap) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var zb0003 uint32
|
||||
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if (*z) == nil {
|
||||
(*z) = make(BucketStatsMap, zb0003)
|
||||
} else if len((*z)) > 0 {
|
||||
for key := range *z {
|
||||
delete((*z), key)
|
||||
}
|
||||
}
|
||||
for zb0003 > 0 {
|
||||
var zb0001 string
|
||||
var zb0002 BucketStats
|
||||
zb0003--
|
||||
zb0001, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0004 uint32
|
||||
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001)
|
||||
return
|
||||
}
|
||||
for zb0004 > 0 {
|
||||
zb0004--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "ReplicationStats":
|
||||
bts, err = zb0002.ReplicationStats.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001, "ReplicationStats")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
(*z)[zb0001] = zb0002
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z BucketStatsMap) Msgsize() (s int) {
|
||||
s = msgp.MapHeaderSize
|
||||
if z != nil {
|
||||
for zb0005, zb0006 := range z {
|
||||
_ = zb0006
|
||||
s += msgp.StringPrefixSize + len(zb0005) + 1 + 17 + zb0006.ReplicationStats.Msgsize()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *ReplicationLatency) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
|
||||
Reference in New Issue
Block a user