fix: simplify usage calculation and progress (#14086)

This commit is contained in:
Harshavardhana 2022-01-11 18:48:43 -08:00 committed by GitHub
parent 404b05a44c
commit d50442da01
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 605 additions and 96 deletions

View File

@ -88,6 +88,12 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
}
case SRError:
apiErr = errorCodes.ToAPIErrWithErr(e.Code, e.Cause)
case decomError:
apiErr = APIError{
Code: "XMinioDecommissionNotAllowed",
Description: e.Err,
HTTPStatusCode: http.StatusBadRequest,
}
default:
switch {
case errors.Is(err, errDecommissionAlreadyRunning):

View File

@ -27,8 +27,6 @@ import (
"time"
"github.com/dustin/go-humanize"
"github.com/minio/madmin-go"
"github.com/minio/minio/internal/config/storageclass"
"github.com/minio/minio/internal/hash"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/console"
@ -54,10 +52,10 @@ type PoolDecommissionInfo struct {
Object string `json:"-" msg:"obj"`
// Verbose information
ItemsDecommissioned uint64 `json:"-" msg:"id"`
ItemsDecommissionFailed uint64 `json:"-" msg:"idf"`
BytesDone uint64 `json:"-" msg:"bd"`
BytesFailed uint64 `json:"-" msg:"bf"`
ItemsDecommissioned int64 `json:"-" msg:"id"`
ItemsDecommissionFailed int64 `json:"-" msg:"idf"`
BytesDone int64 `json:"-" msg:"bd"`
BytesFailed int64 `json:"-" msg:"bf"`
}
// bucketPop should be called when a bucket is done decommissioning.
@ -144,7 +142,7 @@ func (p *poolMeta) returnResumablePools(n int) []PoolStatus {
func (p *poolMeta) DecommissionComplete(idx int) bool {
if p.Pools[idx].Decommission != nil {
p.Pools[idx].LastUpdate = time.Now().UTC()
p.Pools[idx].LastUpdate = UTCNow()
p.Pools[idx].Decommission.Complete = true
p.Pools[idx].Decommission.Failed = false
p.Pools[idx].Decommission.Canceled = false
@ -155,7 +153,7 @@ func (p *poolMeta) DecommissionComplete(idx int) bool {
func (p *poolMeta) DecommissionFailed(idx int) bool {
if p.Pools[idx].Decommission != nil {
p.Pools[idx].LastUpdate = time.Now().UTC()
p.Pools[idx].LastUpdate = UTCNow()
p.Pools[idx].Decommission.StartTime = time.Time{}
p.Pools[idx].Decommission.Complete = false
p.Pools[idx].Decommission.Failed = true
@ -167,7 +165,7 @@ func (p *poolMeta) DecommissionFailed(idx int) bool {
func (p *poolMeta) DecommissionCancel(idx int) bool {
if p.Pools[idx].Decommission != nil {
p.Pools[idx].LastUpdate = time.Now().UTC()
p.Pools[idx].LastUpdate = UTCNow()
p.Pools[idx].Decommission.StartTime = time.Time{}
p.Pools[idx].Decommission.Complete = false
p.Pools[idx].Decommission.Failed = false
@ -227,7 +225,7 @@ var (
errDecommissionComplete = errors.New("decommission is complete, please remove the servers from command-line")
)
func (p *poolMeta) Decommission(idx int, info StorageInfo) error {
func (p *poolMeta) Decommission(idx int, pi poolSpaceInfo) error {
for i, pool := range p.Pools {
if idx == i {
continue
@ -239,15 +237,15 @@ func (p *poolMeta) Decommission(idx int, info StorageInfo) error {
return fmt.Errorf("%w at index: %d", errDecommissionAlreadyRunning, i)
}
}
now := UTCNow()
if p.Pools[idx].Decommission == nil {
startSize := TotalUsableCapacityFree(info)
totalSize := TotalUsableCapacity(info)
p.Pools[idx].LastUpdate = time.Now().UTC()
p.Pools[idx].LastUpdate = now
p.Pools[idx].Decommission = &PoolDecommissionInfo{
StartTime: UTCNow(),
StartSize: startSize,
CurrentSize: startSize,
TotalSize: totalSize,
StartTime: now,
StartSize: pi.Free,
CurrentSize: pi.Free,
TotalSize: pi.Total,
}
return nil
}
@ -260,14 +258,12 @@ func (p *poolMeta) Decommission(idx int, info StorageInfo) error {
// Canceled or Failed decommission can be triggered again.
if p.Pools[idx].Decommission.StartTime.IsZero() {
if p.Pools[idx].Decommission.Canceled || p.Pools[idx].Decommission.Failed {
startSize := TotalUsableCapacityFree(info)
totalSize := TotalUsableCapacity(info)
p.Pools[idx].LastUpdate = time.Now().UTC()
p.Pools[idx].LastUpdate = now
p.Pools[idx].Decommission = &PoolDecommissionInfo{
StartTime: UTCNow(),
StartSize: startSize,
CurrentSize: startSize,
TotalSize: totalSize,
StartTime: now,
StartSize: pi.Free,
CurrentSize: pi.Free,
TotalSize: pi.Total,
}
return nil
}
@ -385,24 +381,31 @@ func (p *poolMeta) CountItem(idx int, size int64, failed bool) {
if pd != nil {
if failed {
pd.ItemsDecommissionFailed++
pd.BytesFailed += uint64(size)
pd.BytesFailed += size
} else {
pd.ItemsDecommissioned++
pd.BytesDone += uint64(size)
pd.BytesDone += size
}
p.Pools[idx].Decommission = pd
}
}
func (p *poolMeta) updateAfter(ctx context.Context, idx int, pools []*erasureSets, duration time.Duration) error {
func (p *poolMeta) updateAfter(ctx context.Context, idx int, pools []*erasureSets, duration time.Duration) (bool, error) {
if p.Pools[idx].Decommission == nil {
return errInvalidArgument
return false, errInvalidArgument
}
if time.Since(p.Pools[idx].LastUpdate) > duration {
p.Pools[idx].LastUpdate = time.Now().UTC()
return p.save(ctx, pools)
now := UTCNow()
if now.Sub(p.Pools[idx].LastUpdate) >= duration {
if serverDebugLog {
console.Debugf("decommission: persisting poolMeta on disk: threshold:%s, poolMeta:%#v\n", now.Sub(p.Pools[idx].LastUpdate), p.Pools[idx])
}
return nil
p.Pools[idx].LastUpdate = now
if err := p.save(ctx, pools); err != nil {
return false, err
}
return true, nil
}
return false, nil
}
func (p poolMeta) save(ctx context.Context, pools []*erasureSets) error {
@ -464,6 +467,7 @@ func (z *erasureServerPools) Init(ctx context.Context) error {
return nil
}
meta = poolMeta{} // to update write poolMeta fresh.
// looks like new pool was added we need to update,
// or this is a fresh installation (or an existing
// installation with pool removed)
@ -472,7 +476,7 @@ func (z *erasureServerPools) Init(ctx context.Context) error {
meta.Pools = append(meta.Pools, PoolStatus{
CmdLine: pool.endpoints.CmdLine,
ID: idx,
LastUpdate: time.Now().UTC(),
LastUpdate: UTCNow(),
})
}
if err = meta.save(ctx, z.serverPools); err != nil {
@ -650,7 +654,11 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
}
z.poolMetaMutex.Lock()
z.poolMeta.TrackCurrentBucketObject(idx, bName, entry.name)
logger.LogIf(ctx, z.poolMeta.updateAfter(ctx, idx, z.serverPools, time.Minute))
ok, err := z.poolMeta.updateAfter(ctx, idx, z.serverPools, 30*time.Second)
logger.LogIf(ctx, err)
if ok {
globalNotificationSys.ReloadPoolMeta(ctx)
}
z.poolMetaMutex.Unlock()
}
@ -754,6 +762,51 @@ func (z *erasureServerPools) Decommission(ctx context.Context, idx int) error {
return nil
}
type decomError struct {
Err string
}
func (d decomError) Error() string {
return d.Err
}
type poolSpaceInfo struct {
Free int64
Total int64
Used int64
}
func (z *erasureServerPools) getDecommissionPoolSpaceInfo(idx int) (pi poolSpaceInfo, err error) {
if idx < 0 {
return pi, errInvalidArgument
}
if idx+1 > len(z.serverPools) {
return pi, errInvalidArgument
}
info, errs := z.serverPools[idx].StorageInfo(context.Background())
for _, err := range errs {
if err != nil {
return pi, errInvalidArgument
}
}
info.Backend = z.BackendInfo()
for _, disk := range info.Disks {
if disk.Healing {
return pi, decomError{
Err: fmt.Sprintf("%s drive is healing, decommission will not be started", disk.Endpoint),
}
}
}
usableTotal := int64(GetTotalUsableCapacity(info.Disks, info))
usableFree := int64(GetTotalUsableCapacityFree(info.Disks, info))
return poolSpaceInfo{
Total: usableTotal,
Free: usableFree,
Used: usableTotal - usableFree,
}, nil
}
func (z *erasureServerPools) Status(ctx context.Context, idx int) (PoolStatus, error) {
if idx < 0 {
return PoolStatus{}, errInvalidArgument
@ -762,35 +815,19 @@ func (z *erasureServerPools) Status(ctx context.Context, idx int) (PoolStatus, e
z.poolMetaMutex.RLock()
defer z.poolMetaMutex.RUnlock()
if idx+1 > len(z.poolMeta.Pools) {
pi, err := z.getDecommissionPoolSpaceInfo(idx)
if err != nil {
return PoolStatus{}, errInvalidArgument
}
pool := z.serverPools[idx]
info, _ := pool.StorageInfo(ctx)
info.Backend.Type = madmin.Erasure
scParity := globalStorageClass.GetParityForSC(storageclass.STANDARD)
if scParity <= 0 {
scParity = z.serverPools[0].defaultParityCount
}
rrSCParity := globalStorageClass.GetParityForSC(storageclass.RRS)
info.Backend.StandardSCData = append(info.Backend.StandardSCData, pool.SetDriveCount()-scParity)
info.Backend.RRSCData = append(info.Backend.RRSCData, pool.SetDriveCount()-rrSCParity)
info.Backend.StandardSCParity = scParity
info.Backend.RRSCParity = rrSCParity
currentSize := TotalUsableCapacityFree(info)
totalSize := TotalUsableCapacity(info)
poolInfo := z.poolMeta.Pools[idx]
if poolInfo.Decommission != nil {
poolInfo.Decommission.TotalSize = totalSize
poolInfo.Decommission.CurrentSize = currentSize
poolInfo.Decommission.TotalSize = pi.Total
poolInfo.Decommission.CurrentSize = poolInfo.Decommission.StartSize + poolInfo.Decommission.BytesDone
} else {
poolInfo.Decommission = &PoolDecommissionInfo{
CurrentSize: currentSize,
TotalSize: totalSize,
TotalSize: pi.Total,
CurrentSize: pi.Free,
}
}
return poolInfo, nil
@ -893,7 +930,9 @@ func (z *erasureServerPools) StartDecommission(ctx context.Context, idx int) (er
for _, bucket := range buckets {
if lc, err := globalLifecycleSys.Get(bucket.Name); err == nil {
if lc.HasTransition() {
return fmt.Errorf("Bucket is part of transitioned tier %s: decommission is not allowed in Tier'd setups", bucket.Name)
return decomError{
Err: fmt.Sprintf("Bucket is part of transitioned tier %s: decommission is not allowed in Tier'd setups", bucket.Name),
}
}
}
}
@ -919,23 +958,15 @@ func (z *erasureServerPools) StartDecommission(ctx context.Context, idx int) (er
return errInvalidArgument
}
info, _ := pool.StorageInfo(ctx)
info.Backend.Type = madmin.Erasure
scParity := globalStorageClass.GetParityForSC(storageclass.STANDARD)
if scParity <= 0 {
scParity = z.serverPools[0].defaultParityCount
pi, err := z.getDecommissionPoolSpaceInfo(idx)
if err != nil {
return err
}
rrSCParity := globalStorageClass.GetParityForSC(storageclass.RRS)
info.Backend.StandardSCData = append(info.Backend.StandardSCData, pool.SetDriveCount()-scParity)
info.Backend.RRSCData = append(info.Backend.RRSCData, pool.SetDriveCount()-rrSCParity)
info.Backend.StandardSCParity = scParity
info.Backend.RRSCParity = rrSCParity
z.poolMetaMutex.Lock()
defer z.poolMetaMutex.Unlock()
if err = z.poolMeta.Decommission(idx, info); err != nil {
if err = z.poolMeta.Decommission(idx, pi); err != nil {
return err
}
z.poolMeta.QueueBuckets(idx, buckets)

View File

@ -117,25 +117,25 @@ func (z *PoolDecommissionInfo) DecodeMsg(dc *msgp.Reader) (err error) {
return
}
case "id":
z.ItemsDecommissioned, err = dc.ReadUint64()
z.ItemsDecommissioned, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "ItemsDecommissioned")
return
}
case "idf":
z.ItemsDecommissionFailed, err = dc.ReadUint64()
z.ItemsDecommissionFailed, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "ItemsDecommissionFailed")
return
}
case "bd":
z.BytesDone, err = dc.ReadUint64()
z.BytesDone, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "BytesDone")
return
}
case "bf":
z.BytesFailed, err = dc.ReadUint64()
z.BytesFailed, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
@ -283,7 +283,7 @@ func (z *PoolDecommissionInfo) EncodeMsg(en *msgp.Writer) (err error) {
if err != nil {
return
}
err = en.WriteUint64(z.ItemsDecommissioned)
err = en.WriteInt64(z.ItemsDecommissioned)
if err != nil {
err = msgp.WrapError(err, "ItemsDecommissioned")
return
@ -293,7 +293,7 @@ func (z *PoolDecommissionInfo) EncodeMsg(en *msgp.Writer) (err error) {
if err != nil {
return
}
err = en.WriteUint64(z.ItemsDecommissionFailed)
err = en.WriteInt64(z.ItemsDecommissionFailed)
if err != nil {
err = msgp.WrapError(err, "ItemsDecommissionFailed")
return
@ -303,7 +303,7 @@ func (z *PoolDecommissionInfo) EncodeMsg(en *msgp.Writer) (err error) {
if err != nil {
return
}
err = en.WriteUint64(z.BytesDone)
err = en.WriteInt64(z.BytesDone)
if err != nil {
err = msgp.WrapError(err, "BytesDone")
return
@ -313,7 +313,7 @@ func (z *PoolDecommissionInfo) EncodeMsg(en *msgp.Writer) (err error) {
if err != nil {
return
}
err = en.WriteUint64(z.BytesFailed)
err = en.WriteInt64(z.BytesFailed)
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
@ -366,16 +366,16 @@ func (z *PoolDecommissionInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.AppendString(o, z.Object)
// string "id"
o = append(o, 0xa2, 0x69, 0x64)
o = msgp.AppendUint64(o, z.ItemsDecommissioned)
o = msgp.AppendInt64(o, z.ItemsDecommissioned)
// string "idf"
o = append(o, 0xa3, 0x69, 0x64, 0x66)
o = msgp.AppendUint64(o, z.ItemsDecommissionFailed)
o = msgp.AppendInt64(o, z.ItemsDecommissionFailed)
// string "bd"
o = append(o, 0xa2, 0x62, 0x64)
o = msgp.AppendUint64(o, z.BytesDone)
o = msgp.AppendInt64(o, z.BytesDone)
// string "bf"
o = append(o, 0xa2, 0x62, 0x66)
o = msgp.AppendUint64(o, z.BytesFailed)
o = msgp.AppendInt64(o, z.BytesFailed)
return
}
@ -490,25 +490,25 @@ func (z *PoolDecommissionInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "id":
z.ItemsDecommissioned, bts, err = msgp.ReadUint64Bytes(bts)
z.ItemsDecommissioned, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ItemsDecommissioned")
return
}
case "idf":
z.ItemsDecommissionFailed, bts, err = msgp.ReadUint64Bytes(bts)
z.ItemsDecommissionFailed, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ItemsDecommissionFailed")
return
}
case "bd":
z.BytesDone, bts, err = msgp.ReadUint64Bytes(bts)
z.BytesDone, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BytesDone")
return
}
case "bf":
z.BytesFailed, bts, err = msgp.ReadUint64Bytes(bts)
z.BytesFailed, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
@ -535,7 +535,7 @@ func (z *PoolDecommissionInfo) Msgsize() (s int) {
for za0002 := range z.DecommissionedBuckets {
s += msgp.StringPrefixSize + len(z.DecommissionedBuckets[za0002])
}
s += 4 + msgp.StringPrefixSize + len(z.Bucket) + 4 + msgp.StringPrefixSize + len(z.Object) + 3 + msgp.Uint64Size + 4 + msgp.Uint64Size + 3 + msgp.Uint64Size + 3 + msgp.Uint64Size
s += 4 + msgp.StringPrefixSize + len(z.Bucket) + 4 + msgp.StringPrefixSize + len(z.Object) + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 3 + msgp.Int64Size + 3 + msgp.Int64Size
return
}
@ -760,6 +760,109 @@ func (z *PoolStatus) Msgsize() (s int) {
return
}
// DecodeMsg implements msgp.Decodable
func (z *decomError) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Err":
z.Err, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Err")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z decomError) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 1
// write "Err"
err = en.Append(0x81, 0xa3, 0x45, 0x72, 0x72)
if err != nil {
return
}
err = en.WriteString(z.Err)
if err != nil {
err = msgp.WrapError(err, "Err")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z decomError) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 1
// string "Err"
o = append(o, 0x81, 0xa3, 0x45, 0x72, 0x72)
o = msgp.AppendString(o, z.Err)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *decomError) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Err":
z.Err, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Err")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z decomError) Msgsize() (s int) {
s = 1 + 4 + msgp.StringPrefixSize + len(z.Err)
return
}
// DecodeMsg implements msgp.Decodable
func (z *poolMeta) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
@ -930,3 +1033,156 @@ func (z *poolMeta) Msgsize() (s int) {
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *poolSpaceInfo) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Free":
z.Free, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Free")
return
}
case "Total":
z.Total, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Total")
return
}
case "Used":
z.Used, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "Used")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z poolSpaceInfo) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 3
// write "Free"
err = en.Append(0x83, 0xa4, 0x46, 0x72, 0x65, 0x65)
if err != nil {
return
}
err = en.WriteInt64(z.Free)
if err != nil {
err = msgp.WrapError(err, "Free")
return
}
// write "Total"
err = en.Append(0xa5, 0x54, 0x6f, 0x74, 0x61, 0x6c)
if err != nil {
return
}
err = en.WriteInt64(z.Total)
if err != nil {
err = msgp.WrapError(err, "Total")
return
}
// write "Used"
err = en.Append(0xa4, 0x55, 0x73, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteInt64(z.Used)
if err != nil {
err = msgp.WrapError(err, "Used")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z poolSpaceInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 3
// string "Free"
o = append(o, 0x83, 0xa4, 0x46, 0x72, 0x65, 0x65)
o = msgp.AppendInt64(o, z.Free)
// string "Total"
o = append(o, 0xa5, 0x54, 0x6f, 0x74, 0x61, 0x6c)
o = msgp.AppendInt64(o, z.Total)
// string "Used"
o = append(o, 0xa4, 0x55, 0x73, 0x65, 0x64)
o = msgp.AppendInt64(o, z.Used)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *poolSpaceInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Free":
z.Free, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Free")
return
}
case "Total":
z.Total, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Total")
return
}
case "Used":
z.Used, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Used")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z poolSpaceInfo) Msgsize() (s int) {
s = 1 + 5 + msgp.Int64Size + 6 + msgp.Int64Size + 5 + msgp.Int64Size
return
}

View File

@ -235,6 +235,119 @@ func BenchmarkDecodePoolStatus(b *testing.B) {
}
}
func TestMarshalUnmarshaldecomError(t *testing.T) {
v := decomError{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgdecomError(b *testing.B) {
v := decomError{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgdecomError(b *testing.B) {
v := decomError{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshaldecomError(b *testing.B) {
v := decomError{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodedecomError(t *testing.T) {
v := decomError{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodedecomError Msgsize() is inaccurate")
}
vn := decomError{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodedecomError(b *testing.B) {
v := decomError{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodedecomError(b *testing.B) {
v := decomError{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalpoolMeta(t *testing.T) {
v := poolMeta{}
bts, err := v.MarshalMsg(nil)
@ -347,3 +460,116 @@ func BenchmarkDecodepoolMeta(b *testing.B) {
}
}
}
func TestMarshalUnmarshalpoolSpaceInfo(t *testing.T) {
v := poolSpaceInfo{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgpoolSpaceInfo(b *testing.B) {
v := poolSpaceInfo{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgpoolSpaceInfo(b *testing.B) {
v := poolSpaceInfo{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalpoolSpaceInfo(b *testing.B) {
v := poolSpaceInfo{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodepoolSpaceInfo(t *testing.T) {
v := poolSpaceInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodepoolSpaceInfo Msgsize() is inaccurate")
}
vn := poolSpaceInfo{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodepoolSpaceInfo(b *testing.B) {
v := poolSpaceInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodepoolSpaceInfo(b *testing.B) {
v := poolSpaceInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View File

@ -46,16 +46,6 @@ const (
// StorageInfo - represents total capacity of underlying storage.
type StorageInfo = madmin.StorageInfo
// TotalUsableCapacity - total usable capacity
func TotalUsableCapacity(s StorageInfo) int64 {
return int64(GetTotalUsableCapacity(s.Disks, s))
}
// TotalUsableCapacityFree - total usable capacity free
func TotalUsableCapacityFree(s StorageInfo) int64 {
return int64(GetTotalUsableCapacityFree(s.Disks, s))
}
// objectHistogramInterval is an interval that will be
// used to report the histogram of objects data sizes
type objectHistogramInterval struct {