Convert more peer <--> peer REST calls (#19004)

* Convert more peer <--> peer REST calls
* Clean up in general.
* Add JSON wrapper.
* Add slice wrapper.
* Add option to make handler return nil error if no connection is given, `IgnoreNilConn`.

Converts the following:

```
+	HandlerGetMetrics
+	HandlerGetResourceMetrics
+	HandlerGetMemInfo
+	HandlerGetProcInfo
+	HandlerGetOSInfo
+	HandlerGetPartitions
+	HandlerGetNetInfo
+	HandlerGetCPUs
+	HandlerServerInfo
+	HandlerGetSysConfig
+	HandlerGetSysServices
+	HandlerGetSysErrors
+	HandlerGetAllBucketStats
+	HandlerGetBucketStats
+	HandlerGetSRMetrics
+	HandlerGetPeerMetrics
+	HandlerGetMetacacheListing
+	HandlerUpdateMetacacheListing
+	HandlerGetPeerBucketMetrics
+	HandlerStorageInfo
+	HandlerGetLocks
+	HandlerBackgroundHealStatus
+	HandlerGetLastDayTierStats
+	HandlerSignalService
+	HandlerGetBandwidth
```
This commit is contained in:
Klaus Post 2024-02-19 14:54:46 -08:00 committed by GitHub
parent 4c8197a119
commit e06168596f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
29 changed files with 4794 additions and 979 deletions

View File

@ -1984,7 +1984,6 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
// The ConsoleLogHandler handler sends console logs to the connected HTTP client. // The ConsoleLogHandler handler sends console logs to the connected HTTP client.
func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Request) { func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ConsoleLogAdminAction) objectAPI, _ := validateAdminReq(ctx, w, r, policy.ConsoleLogAdminAction)
if objectAPI == nil { if objectAPI == nil {
return return
@ -2009,44 +2008,65 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
setEventStreamHeaders(w) setEventStreamHeaders(w)
logCh := make(chan log.Info, 4000) logCh := make(chan log.Info, 1000)
peers, _ := newPeerRestClients(globalEndpoints) peers, _ := newPeerRestClients(globalEndpoints)
encodedCh := make(chan []byte, 1000+len(peers)*1000)
err = globalConsoleSys.Subscribe(logCh, ctx.Done(), node, limitLines, logKind, nil) err = globalConsoleSys.Subscribe(logCh, ctx.Done(), node, limitLines, logKind, nil)
if err != nil { if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
// Convert local entries to JSON
go func() {
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
for {
select {
case <-ctx.Done():
return
case li := <-logCh:
if !li.SendLog(node, logKind) {
continue
}
buf.Reset()
if err := enc.Encode(li); err != nil {
continue
}
select {
case <-ctx.Done():
return
case encodedCh <- append(grid.GetByteBuffer()[:0], buf.Bytes()...):
}
}
}
}()
// Collect from matching peers
for _, peer := range peers { for _, peer := range peers {
if peer == nil { if peer == nil {
continue continue
} }
if node == "" || strings.EqualFold(peer.host.Name, node) { if node == "" || strings.EqualFold(peer.host.Name, node) {
peer.ConsoleLog(logCh, ctx.Done()) peer.ConsoleLog(ctx, logKind, encodedCh)
} }
} }
enc := json.NewEncoder(w)
keepAliveTicker := time.NewTicker(500 * time.Millisecond) keepAliveTicker := time.NewTicker(500 * time.Millisecond)
defer keepAliveTicker.Stop() defer keepAliveTicker.Stop()
for { for {
select { select {
case log, ok := <-logCh: case log, ok := <-encodedCh:
if !ok { if !ok {
return return
} }
if log.SendLog(node, logKind) { _, err = w.Write(log)
if err := enc.Encode(log); err != nil { if err != nil {
return return
} }
if len(logCh) == 0 { grid.PutByteBuffer(log)
// Flush if nothing is queued if len(logCh) == 0 {
w.(http.Flusher).Flush() // Flush if nothing is queued
} w.(http.Flusher).Flush()
} }
case <-keepAliveTicker.C: case <-keepAliveTicker.C:
if len(logCh) > 0 { if len(logCh) > 0 {

View File

@ -17,6 +17,8 @@
package cmd package cmd
//go:generate msgp -file=$GOFILE -unexported
import ( import (
"context" "context"
"fmt" "fmt"
@ -36,12 +38,9 @@ type lockRequesterInfo struct {
TimeLastRefresh time.Time // Timestamp for last lock refresh. TimeLastRefresh time.Time // Timestamp for last lock refresh.
Source string // Contains line, function and filename requesting the lock. Source string // Contains line, function and filename requesting the lock.
Group bool // indicates if it was a group lock. Group bool // indicates if it was a group lock.
// Owner represents the UUID of the owner who originally requested the lock Owner string // Owner represents the UUID of the owner who originally requested the lock.
// useful in expiry. Quorum int // Quorum represents the quorum required for this lock to be active.
Owner string idx int `msg:"-"` // index of the lock in the lockMap.
// Quorum represents the quorum required for this lock to be active.
Quorum int
idx int
} }
// isWriteLock returns whether the lock is a write or read lock. // isWriteLock returns whether the lock is a write or read lock.
@ -50,6 +49,8 @@ func isWriteLock(lri []lockRequesterInfo) bool {
} }
// localLocker implements Dsync.NetLocker // localLocker implements Dsync.NetLocker
//
//msgp:ignore localLocker
type localLocker struct { type localLocker struct {
mutex sync.Mutex mutex sync.Mutex
lockMap map[string][]lockRequesterInfo lockMap map[string][]lockRequesterInfo
@ -238,7 +239,9 @@ func (l *localLocker) stats() lockStats {
return st return st
} }
func (l *localLocker) DupLockMap() map[string][]lockRequesterInfo { type localLockMap map[string][]lockRequesterInfo
func (l *localLocker) DupLockMap() localLockMap {
l.mutex.Lock() l.mutex.Lock()
defer l.mutex.Unlock() defer l.mutex.Unlock()

620
cmd/local-locker_gen.go Normal file
View File

@ -0,0 +1,620 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *localLockMap) DecodeMsg(dc *msgp.Reader) (err error) {
var zb0004 uint32
zb0004, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
if (*z) == nil {
(*z) = make(localLockMap, zb0004)
} else if len((*z)) > 0 {
for key := range *z {
delete((*z), key)
}
}
for zb0004 > 0 {
zb0004--
var zb0001 string
var zb0002 []lockRequesterInfo
zb0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0005 uint32
zb0005, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
if cap(zb0002) >= int(zb0005) {
zb0002 = (zb0002)[:zb0005]
} else {
zb0002 = make([]lockRequesterInfo, zb0005)
}
for zb0003 := range zb0002 {
err = zb0002[zb0003].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, zb0001, zb0003)
return
}
}
(*z)[zb0001] = zb0002
}
return
}
// EncodeMsg implements msgp.Encodable
func (z localLockMap) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteMapHeader(uint32(len(z)))
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0006, zb0007 := range z {
err = en.WriteString(zb0006)
if err != nil {
err = msgp.WrapError(err)
return
}
err = en.WriteArrayHeader(uint32(len(zb0007)))
if err != nil {
err = msgp.WrapError(err, zb0006)
return
}
for zb0008 := range zb0007 {
err = zb0007[zb0008].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, zb0006, zb0008)
return
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z localLockMap) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendMapHeader(o, uint32(len(z)))
for zb0006, zb0007 := range z {
o = msgp.AppendString(o, zb0006)
o = msgp.AppendArrayHeader(o, uint32(len(zb0007)))
for zb0008 := range zb0007 {
o, err = zb0007[zb0008].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, zb0006, zb0008)
return
}
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *localLockMap) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
if (*z) == nil {
(*z) = make(localLockMap, zb0004)
} else if len((*z)) > 0 {
for key := range *z {
delete((*z), key)
}
}
for zb0004 > 0 {
var zb0001 string
var zb0002 []lockRequesterInfo
zb0004--
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0005 uint32
zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
if cap(zb0002) >= int(zb0005) {
zb0002 = (zb0002)[:zb0005]
} else {
zb0002 = make([]lockRequesterInfo, zb0005)
}
for zb0003 := range zb0002 {
bts, err = zb0002[zb0003].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, zb0001, zb0003)
return
}
}
(*z)[zb0001] = zb0002
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z localLockMap) Msgsize() (s int) {
s = msgp.MapHeaderSize
if z != nil {
for zb0006, zb0007 := range z {
_ = zb0007
s += msgp.StringPrefixSize + len(zb0006) + msgp.ArrayHeaderSize
for zb0008 := range zb0007 {
s += zb0007[zb0008].Msgsize()
}
}
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *lockRequesterInfo) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Name":
z.Name, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "Writer":
z.Writer, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Writer")
return
}
case "UID":
z.UID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "UID")
return
}
case "Timestamp":
z.Timestamp, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "Timestamp")
return
}
case "TimeLastRefresh":
z.TimeLastRefresh, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "TimeLastRefresh")
return
}
case "Source":
z.Source, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Source")
return
}
case "Group":
z.Group, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Group")
return
}
case "Owner":
z.Owner, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Owner")
return
}
case "Quorum":
z.Quorum, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Quorum")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *lockRequesterInfo) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 9
// write "Name"
err = en.Append(0x89, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Name)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
// write "Writer"
err = en.Append(0xa6, 0x57, 0x72, 0x69, 0x74, 0x65, 0x72)
if err != nil {
return
}
err = en.WriteBool(z.Writer)
if err != nil {
err = msgp.WrapError(err, "Writer")
return
}
// write "UID"
err = en.Append(0xa3, 0x55, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.UID)
if err != nil {
err = msgp.WrapError(err, "UID")
return
}
// write "Timestamp"
err = en.Append(0xa9, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70)
if err != nil {
return
}
err = en.WriteTime(z.Timestamp)
if err != nil {
err = msgp.WrapError(err, "Timestamp")
return
}
// write "TimeLastRefresh"
err = en.Append(0xaf, 0x54, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68)
if err != nil {
return
}
err = en.WriteTime(z.TimeLastRefresh)
if err != nil {
err = msgp.WrapError(err, "TimeLastRefresh")
return
}
// write "Source"
err = en.Append(0xa6, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Source)
if err != nil {
err = msgp.WrapError(err, "Source")
return
}
// write "Group"
err = en.Append(0xa5, 0x47, 0x72, 0x6f, 0x75, 0x70)
if err != nil {
return
}
err = en.WriteBool(z.Group)
if err != nil {
err = msgp.WrapError(err, "Group")
return
}
// write "Owner"
err = en.Append(0xa5, 0x4f, 0x77, 0x6e, 0x65, 0x72)
if err != nil {
return
}
err = en.WriteString(z.Owner)
if err != nil {
err = msgp.WrapError(err, "Owner")
return
}
// write "Quorum"
err = en.Append(0xa6, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d)
if err != nil {
return
}
err = en.WriteInt(z.Quorum)
if err != nil {
err = msgp.WrapError(err, "Quorum")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *lockRequesterInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 9
// string "Name"
o = append(o, 0x89, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Name)
// string "Writer"
o = append(o, 0xa6, 0x57, 0x72, 0x69, 0x74, 0x65, 0x72)
o = msgp.AppendBool(o, z.Writer)
// string "UID"
o = append(o, 0xa3, 0x55, 0x49, 0x44)
o = msgp.AppendString(o, z.UID)
// string "Timestamp"
o = append(o, 0xa9, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70)
o = msgp.AppendTime(o, z.Timestamp)
// string "TimeLastRefresh"
o = append(o, 0xaf, 0x54, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68)
o = msgp.AppendTime(o, z.TimeLastRefresh)
// string "Source"
o = append(o, 0xa6, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65)
o = msgp.AppendString(o, z.Source)
// string "Group"
o = append(o, 0xa5, 0x47, 0x72, 0x6f, 0x75, 0x70)
o = msgp.AppendBool(o, z.Group)
// string "Owner"
o = append(o, 0xa5, 0x4f, 0x77, 0x6e, 0x65, 0x72)
o = msgp.AppendString(o, z.Owner)
// string "Quorum"
o = append(o, 0xa6, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d)
o = msgp.AppendInt(o, z.Quorum)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *lockRequesterInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Name":
z.Name, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "Writer":
z.Writer, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Writer")
return
}
case "UID":
z.UID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "UID")
return
}
case "Timestamp":
z.Timestamp, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Timestamp")
return
}
case "TimeLastRefresh":
z.TimeLastRefresh, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TimeLastRefresh")
return
}
case "Source":
z.Source, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Source")
return
}
case "Group":
z.Group, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Group")
return
}
case "Owner":
z.Owner, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Owner")
return
}
case "Quorum":
z.Quorum, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Quorum")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *lockRequesterInfo) Msgsize() (s int) {
s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 7 + msgp.BoolSize + 4 + msgp.StringPrefixSize + len(z.UID) + 10 + msgp.TimeSize + 16 + msgp.TimeSize + 7 + msgp.StringPrefixSize + len(z.Source) + 6 + msgp.BoolSize + 6 + msgp.StringPrefixSize + len(z.Owner) + 7 + msgp.IntSize
return
}
// DecodeMsg implements msgp.Decodable
func (z *lockStats) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Total":
z.Total, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Total")
return
}
case "Writes":
z.Writes, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Writes")
return
}
case "Reads":
z.Reads, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Reads")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z lockStats) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 3
// write "Total"
err = en.Append(0x83, 0xa5, 0x54, 0x6f, 0x74, 0x61, 0x6c)
if err != nil {
return
}
err = en.WriteInt(z.Total)
if err != nil {
err = msgp.WrapError(err, "Total")
return
}
// write "Writes"
err = en.Append(0xa6, 0x57, 0x72, 0x69, 0x74, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.Writes)
if err != nil {
err = msgp.WrapError(err, "Writes")
return
}
// write "Reads"
err = en.Append(0xa5, 0x52, 0x65, 0x61, 0x64, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.Reads)
if err != nil {
err = msgp.WrapError(err, "Reads")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z lockStats) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 3
// string "Total"
o = append(o, 0x83, 0xa5, 0x54, 0x6f, 0x74, 0x61, 0x6c)
o = msgp.AppendInt(o, z.Total)
// string "Writes"
o = append(o, 0xa6, 0x57, 0x72, 0x69, 0x74, 0x65, 0x73)
o = msgp.AppendInt(o, z.Writes)
// string "Reads"
o = append(o, 0xa5, 0x52, 0x65, 0x61, 0x64, 0x73)
o = msgp.AppendInt(o, z.Reads)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *lockStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Total":
z.Total, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Total")
return
}
case "Writes":
z.Writes, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Writes")
return
}
case "Reads":
z.Reads, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Reads")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z lockStats) Msgsize() (s int) {
s = 1 + 6 + msgp.IntSize + 7 + msgp.IntSize + 6 + msgp.IntSize
return
}

View File

@ -0,0 +1,349 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshallocalLockMap(t *testing.T) {
v := localLockMap{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsglocalLockMap(b *testing.B) {
v := localLockMap{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsglocalLockMap(b *testing.B) {
v := localLockMap{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshallocalLockMap(b *testing.B) {
v := localLockMap{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodelocalLockMap(t *testing.T) {
v := localLockMap{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodelocalLockMap Msgsize() is inaccurate")
}
vn := localLockMap{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodelocalLockMap(b *testing.B) {
v := localLockMap{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodelocalLockMap(b *testing.B) {
v := localLockMap{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshallockRequesterInfo(t *testing.T) {
v := lockRequesterInfo{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsglockRequesterInfo(b *testing.B) {
v := lockRequesterInfo{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsglockRequesterInfo(b *testing.B) {
v := lockRequesterInfo{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshallockRequesterInfo(b *testing.B) {
v := lockRequesterInfo{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodelockRequesterInfo(t *testing.T) {
v := lockRequesterInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodelockRequesterInfo Msgsize() is inaccurate")
}
vn := lockRequesterInfo{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodelockRequesterInfo(b *testing.B) {
v := lockRequesterInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodelockRequesterInfo(b *testing.B) {
v := lockRequesterInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshallockStats(t *testing.T) {
v := lockStats{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsglockStats(b *testing.B) {
v := lockStats{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsglockStats(b *testing.B) {
v := lockStats{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshallockStats(b *testing.B) {
v := lockStats{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodelockStats(t *testing.T) {
v := lockStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodelockStats Msgsize() is inaccurate")
}
vn := lockStats{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodelockStats(b *testing.B) {
v := lockStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodelockStats(b *testing.B) {
v := lockStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View File

@ -42,6 +42,8 @@ import (
"github.com/minio/pkg/v2/console" "github.com/minio/pkg/v2/console"
) )
//go:generate msgp -file $GOFILE -unexported
type listPathOptions struct { type listPathOptions struct {
// ID of the listing. // ID of the listing.
// This will be used to persist the list. // This will be used to persist the list.
@ -99,18 +101,18 @@ type listPathOptions struct {
// Versioning config is used for if the path // Versioning config is used for if the path
// has versioning enabled. // has versioning enabled.
Versioning *versioning.Versioning Versioning *versioning.Versioning `msg:"-"`
// Lifecycle performs filtering based on lifecycle. // Lifecycle performs filtering based on lifecycle.
// This will filter out objects if the most recent version should be deleted by lifecycle. // This will filter out objects if the most recent version should be deleted by lifecycle.
// Is not transferred across request calls. // Is not transferred across request calls.
Lifecycle *lifecycle.Lifecycle Lifecycle *lifecycle.Lifecycle `msg:"-"`
// Retention configuration, needed to be passed along with lifecycle if set. // Retention configuration, needed to be passed along with lifecycle if set.
Retention lock.Retention Retention lock.Retention `msg:"-"`
// Replication configuration // Replication configuration
Replication replicationConfig Replication replicationConfig `msg:"-"`
// StopDiskAtLimit will stop listing on each disk when limit number off objects has been returned. // StopDiskAtLimit will stop listing on each disk when limit number off objects has been returned.
StopDiskAtLimit bool StopDiskAtLimit bool
@ -767,6 +769,7 @@ func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions, resul
}) })
} }
//msgp:ignore metaCacheRPC
type metaCacheRPC struct { type metaCacheRPC struct {
o listPathOptions o listPathOptions
mu sync.Mutex mu sync.Mutex
@ -917,6 +920,7 @@ func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCache
return nil return nil
} }
//msgp:ignore listPathRawOptions
type listPathRawOptions struct { type listPathRawOptions struct {
disks []StorageAPI disks []StorageAPI
fallbackDisks []StorageAPI fallbackDisks []StorageAPI

535
cmd/metacache-set_gen.go Normal file
View File

@ -0,0 +1,535 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *listPathOptions) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ID":
z.ID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
case "Bucket":
z.Bucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "BaseDir":
z.BaseDir, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "BaseDir")
return
}
case "Prefix":
z.Prefix, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
case "FilterPrefix":
z.FilterPrefix, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "FilterPrefix")
return
}
case "Marker":
z.Marker, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Marker")
return
}
case "Limit":
z.Limit, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Limit")
return
}
case "AskDisks":
z.AskDisks, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "AskDisks")
return
}
case "InclDeleted":
z.InclDeleted, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "InclDeleted")
return
}
case "Recursive":
z.Recursive, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Recursive")
return
}
case "Separator":
z.Separator, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Separator")
return
}
case "Create":
z.Create, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Create")
return
}
case "IncludeDirectories":
z.IncludeDirectories, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "IncludeDirectories")
return
}
case "Transient":
z.Transient, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Transient")
return
}
case "Versioned":
z.Versioned, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Versioned")
return
}
case "StopDiskAtLimit":
z.StopDiskAtLimit, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "StopDiskAtLimit")
return
}
case "pool":
z.pool, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "pool")
return
}
case "set":
z.set, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "set")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *listPathOptions) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 18
// write "ID"
err = en.Append(0xde, 0x0, 0x12, 0xa2, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.ID)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
// write "Bucket"
err = en.Append(0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Bucket)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
// write "BaseDir"
err = en.Append(0xa7, 0x42, 0x61, 0x73, 0x65, 0x44, 0x69, 0x72)
if err != nil {
return
}
err = en.WriteString(z.BaseDir)
if err != nil {
err = msgp.WrapError(err, "BaseDir")
return
}
// write "Prefix"
err = en.Append(0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
if err != nil {
return
}
err = en.WriteString(z.Prefix)
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
// write "FilterPrefix"
err = en.Append(0xac, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
if err != nil {
return
}
err = en.WriteString(z.FilterPrefix)
if err != nil {
err = msgp.WrapError(err, "FilterPrefix")
return
}
// write "Marker"
err = en.Append(0xa6, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72)
if err != nil {
return
}
err = en.WriteString(z.Marker)
if err != nil {
err = msgp.WrapError(err, "Marker")
return
}
// write "Limit"
err = en.Append(0xa5, 0x4c, 0x69, 0x6d, 0x69, 0x74)
if err != nil {
return
}
err = en.WriteInt(z.Limit)
if err != nil {
err = msgp.WrapError(err, "Limit")
return
}
// write "AskDisks"
err = en.Append(0xa8, 0x41, 0x73, 0x6b, 0x44, 0x69, 0x73, 0x6b, 0x73)
if err != nil {
return
}
err = en.WriteString(z.AskDisks)
if err != nil {
err = msgp.WrapError(err, "AskDisks")
return
}
// write "InclDeleted"
err = en.Append(0xab, 0x49, 0x6e, 0x63, 0x6c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteBool(z.InclDeleted)
if err != nil {
err = msgp.WrapError(err, "InclDeleted")
return
}
// write "Recursive"
err = en.Append(0xa9, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65)
if err != nil {
return
}
err = en.WriteBool(z.Recursive)
if err != nil {
err = msgp.WrapError(err, "Recursive")
return
}
// write "Separator"
err = en.Append(0xa9, 0x53, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72)
if err != nil {
return
}
err = en.WriteString(z.Separator)
if err != nil {
err = msgp.WrapError(err, "Separator")
return
}
// write "Create"
err = en.Append(0xa6, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65)
if err != nil {
return
}
err = en.WriteBool(z.Create)
if err != nil {
err = msgp.WrapError(err, "Create")
return
}
// write "IncludeDirectories"
err = en.Append(0xb2, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteBool(z.IncludeDirectories)
if err != nil {
err = msgp.WrapError(err, "IncludeDirectories")
return
}
// write "Transient"
err = en.Append(0xa9, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteBool(z.Transient)
if err != nil {
err = msgp.WrapError(err, "Transient")
return
}
// write "Versioned"
err = en.Append(0xa9, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteBool(z.Versioned)
if err != nil {
err = msgp.WrapError(err, "Versioned")
return
}
// write "StopDiskAtLimit"
err = en.Append(0xaf, 0x53, 0x74, 0x6f, 0x70, 0x44, 0x69, 0x73, 0x6b, 0x41, 0x74, 0x4c, 0x69, 0x6d, 0x69, 0x74)
if err != nil {
return
}
err = en.WriteBool(z.StopDiskAtLimit)
if err != nil {
err = msgp.WrapError(err, "StopDiskAtLimit")
return
}
// write "pool"
err = en.Append(0xa4, 0x70, 0x6f, 0x6f, 0x6c)
if err != nil {
return
}
err = en.WriteInt(z.pool)
if err != nil {
err = msgp.WrapError(err, "pool")
return
}
// write "set"
err = en.Append(0xa3, 0x73, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteInt(z.set)
if err != nil {
err = msgp.WrapError(err, "set")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *listPathOptions) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 18
// string "ID"
o = append(o, 0xde, 0x0, 0x12, 0xa2, 0x49, 0x44)
o = msgp.AppendString(o, z.ID)
// string "Bucket"
o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
o = msgp.AppendString(o, z.Bucket)
// string "BaseDir"
o = append(o, 0xa7, 0x42, 0x61, 0x73, 0x65, 0x44, 0x69, 0x72)
o = msgp.AppendString(o, z.BaseDir)
// string "Prefix"
o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
o = msgp.AppendString(o, z.Prefix)
// string "FilterPrefix"
o = append(o, 0xac, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
o = msgp.AppendString(o, z.FilterPrefix)
// string "Marker"
o = append(o, 0xa6, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72)
o = msgp.AppendString(o, z.Marker)
// string "Limit"
o = append(o, 0xa5, 0x4c, 0x69, 0x6d, 0x69, 0x74)
o = msgp.AppendInt(o, z.Limit)
// string "AskDisks"
o = append(o, 0xa8, 0x41, 0x73, 0x6b, 0x44, 0x69, 0x73, 0x6b, 0x73)
o = msgp.AppendString(o, z.AskDisks)
// string "InclDeleted"
o = append(o, 0xab, 0x49, 0x6e, 0x63, 0x6c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64)
o = msgp.AppendBool(o, z.InclDeleted)
// string "Recursive"
o = append(o, 0xa9, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65)
o = msgp.AppendBool(o, z.Recursive)
// string "Separator"
o = append(o, 0xa9, 0x53, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72)
o = msgp.AppendString(o, z.Separator)
// string "Create"
o = append(o, 0xa6, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65)
o = msgp.AppendBool(o, z.Create)
// string "IncludeDirectories"
o = append(o, 0xb2, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73)
o = msgp.AppendBool(o, z.IncludeDirectories)
// string "Transient"
o = append(o, 0xa9, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74)
o = msgp.AppendBool(o, z.Transient)
// string "Versioned"
o = append(o, 0xa9, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64)
o = msgp.AppendBool(o, z.Versioned)
// string "StopDiskAtLimit"
o = append(o, 0xaf, 0x53, 0x74, 0x6f, 0x70, 0x44, 0x69, 0x73, 0x6b, 0x41, 0x74, 0x4c, 0x69, 0x6d, 0x69, 0x74)
o = msgp.AppendBool(o, z.StopDiskAtLimit)
// string "pool"
o = append(o, 0xa4, 0x70, 0x6f, 0x6f, 0x6c)
o = msgp.AppendInt(o, z.pool)
// string "set"
o = append(o, 0xa3, 0x73, 0x65, 0x74)
o = msgp.AppendInt(o, z.set)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *listPathOptions) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ID":
z.ID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
case "Bucket":
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "BaseDir":
z.BaseDir, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "BaseDir")
return
}
case "Prefix":
z.Prefix, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
case "FilterPrefix":
z.FilterPrefix, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "FilterPrefix")
return
}
case "Marker":
z.Marker, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Marker")
return
}
case "Limit":
z.Limit, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Limit")
return
}
case "AskDisks":
z.AskDisks, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "AskDisks")
return
}
case "InclDeleted":
z.InclDeleted, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "InclDeleted")
return
}
case "Recursive":
z.Recursive, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Recursive")
return
}
case "Separator":
z.Separator, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Separator")
return
}
case "Create":
z.Create, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Create")
return
}
case "IncludeDirectories":
z.IncludeDirectories, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "IncludeDirectories")
return
}
case "Transient":
z.Transient, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Transient")
return
}
case "Versioned":
z.Versioned, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Versioned")
return
}
case "StopDiskAtLimit":
z.StopDiskAtLimit, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StopDiskAtLimit")
return
}
case "pool":
z.pool, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "pool")
return
}
case "set":
z.set, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "set")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *listPathOptions) Msgsize() (s int) {
s = 3 + 3 + msgp.StringPrefixSize + len(z.ID) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 8 + msgp.StringPrefixSize + len(z.BaseDir) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 13 + msgp.StringPrefixSize + len(z.FilterPrefix) + 7 + msgp.StringPrefixSize + len(z.Marker) + 6 + msgp.IntSize + 9 + msgp.StringPrefixSize + len(z.AskDisks) + 12 + msgp.BoolSize + 10 + msgp.BoolSize + 10 + msgp.StringPrefixSize + len(z.Separator) + 7 + msgp.BoolSize + 19 + msgp.BoolSize + 10 + msgp.BoolSize + 10 + msgp.BoolSize + 16 + msgp.BoolSize + 5 + msgp.IntSize + 4 + msgp.IntSize
return
}

View File

@ -0,0 +1,123 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshallistPathOptions(t *testing.T) {
v := listPathOptions{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsglistPathOptions(b *testing.B) {
v := listPathOptions{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsglistPathOptions(b *testing.B) {
v := listPathOptions{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshallistPathOptions(b *testing.B) {
v := listPathOptions{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodelistPathOptions(t *testing.T) {
v := listPathOptions{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodelistPathOptions Msgsize() is inaccurate")
}
vn := listPathOptions{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodelistPathOptions(b *testing.B) {
v := listPathOptions{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodelistPathOptions(b *testing.B) {
v := listPathOptions{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View File

@ -42,6 +42,8 @@ import (
"github.com/prometheus/procfs" "github.com/prometheus/procfs"
) )
//go:generate msgp -file=$GOFILE -unexported -io=false
var ( var (
nodeCollector *minioNodeCollector nodeCollector *minioNodeCollector
clusterCollector *minioClusterCollector clusterCollector *minioClusterCollector
@ -328,7 +330,7 @@ type Metric struct {
// MetricsGroup are a group of metrics that are initialized together. // MetricsGroup are a group of metrics that are initialized together.
type MetricsGroup struct { type MetricsGroup struct {
metricsCache timedValue metricsCache timedValue `msg:"-"`
cacheInterval time.Duration cacheInterval time.Duration
metricsGroupOpts MetricsGroupOpts metricsGroupOpts MetricsGroupOpts
} }
@ -3989,6 +3991,7 @@ func collectMetric(metric Metric, labels []string, values []string, metricName s
} }
} }
//msgp:ignore minioBucketCollector
type minioBucketCollector struct { type minioBucketCollector struct {
metricsGroups []*MetricsGroup metricsGroups []*MetricsGroup
desc *prometheus.Desc desc *prometheus.Desc
@ -4024,6 +4027,7 @@ func (c *minioBucketCollector) Collect(out chan<- prometheus.Metric) {
wg.Wait() wg.Wait()
} }
//msgp:ignore minioClusterCollector
type minioClusterCollector struct { type minioClusterCollector struct {
metricsGroups []*MetricsGroup metricsGroups []*MetricsGroup
desc *prometheus.Desc desc *prometheus.Desc
@ -4083,6 +4087,8 @@ func ReportMetrics(ctx context.Context, metricsGroups []*MetricsGroup) <-chan Me
} }
// minioNodeCollector is the Custom Collector // minioNodeCollector is the Custom Collector
//
//msgp:ignore minioNodeCollector
type minioNodeCollector struct { type minioNodeCollector struct {
metricsGroups []*MetricsGroup metricsGroups []*MetricsGroup
desc *prometheus.Desc desc *prometheus.Desc

644
cmd/metrics-v2_gen.go Normal file
View File

@ -0,0 +1,644 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// MarshalMsg implements msgp.Marshaler
func (z *Metric) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 6
// string "Description"
o = append(o, 0x86, 0xab, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e)
o, err = z.Description.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Description")
return
}
// string "StaticLabels"
o = append(o, 0xac, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.StaticLabels)))
for za0001, za0002 := range z.StaticLabels {
o = msgp.AppendString(o, za0001)
o = msgp.AppendString(o, za0002)
}
// string "Value"
o = append(o, 0xa5, 0x56, 0x61, 0x6c, 0x75, 0x65)
o = msgp.AppendFloat64(o, z.Value)
// string "VariableLabels"
o = append(o, 0xae, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.VariableLabels)))
for za0003, za0004 := range z.VariableLabels {
o = msgp.AppendString(o, za0003)
o = msgp.AppendString(o, za0004)
}
// string "HistogramBucketLabel"
o = append(o, 0xb4, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x61, 0x62, 0x65, 0x6c)
o = msgp.AppendString(o, z.HistogramBucketLabel)
// string "Histogram"
o = append(o, 0xa9, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d)
o = msgp.AppendMapHeader(o, uint32(len(z.Histogram)))
for za0005, za0006 := range z.Histogram {
o = msgp.AppendString(o, za0005)
o = msgp.AppendUint64(o, za0006)
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Metric) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Description":
bts, err = z.Description.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Description")
return
}
case "StaticLabels":
var zb0002 uint32
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StaticLabels")
return
}
if z.StaticLabels == nil {
z.StaticLabels = make(map[string]string, zb0002)
} else if len(z.StaticLabels) > 0 {
for key := range z.StaticLabels {
delete(z.StaticLabels, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 string
zb0002--
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StaticLabels")
return
}
za0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StaticLabels", za0001)
return
}
z.StaticLabels[za0001] = za0002
}
case "Value":
z.Value, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Value")
return
}
case "VariableLabels":
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "VariableLabels")
return
}
if z.VariableLabels == nil {
z.VariableLabels = make(map[string]string, zb0003)
} else if len(z.VariableLabels) > 0 {
for key := range z.VariableLabels {
delete(z.VariableLabels, key)
}
}
for zb0003 > 0 {
var za0003 string
var za0004 string
zb0003--
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "VariableLabels")
return
}
za0004, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "VariableLabels", za0003)
return
}
z.VariableLabels[za0003] = za0004
}
case "HistogramBucketLabel":
z.HistogramBucketLabel, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HistogramBucketLabel")
return
}
case "Histogram":
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Histogram")
return
}
if z.Histogram == nil {
z.Histogram = make(map[string]uint64, zb0004)
} else if len(z.Histogram) > 0 {
for key := range z.Histogram {
delete(z.Histogram, key)
}
}
for zb0004 > 0 {
var za0005 string
var za0006 uint64
zb0004--
za0005, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Histogram")
return
}
za0006, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Histogram", za0005)
return
}
z.Histogram[za0005] = za0006
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *Metric) Msgsize() (s int) {
s = 1 + 12 + z.Description.Msgsize() + 13 + msgp.MapHeaderSize
if z.StaticLabels != nil {
for za0001, za0002 := range z.StaticLabels {
_ = za0002
s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002)
}
}
s += 6 + msgp.Float64Size + 15 + msgp.MapHeaderSize
if z.VariableLabels != nil {
for za0003, za0004 := range z.VariableLabels {
_ = za0004
s += msgp.StringPrefixSize + len(za0003) + msgp.StringPrefixSize + len(za0004)
}
}
s += 21 + msgp.StringPrefixSize + len(z.HistogramBucketLabel) + 10 + msgp.MapHeaderSize
if z.Histogram != nil {
for za0005, za0006 := range z.Histogram {
_ = za0006
s += msgp.StringPrefixSize + len(za0005) + msgp.Uint64Size
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *MetricDescription) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 5
// string "Namespace"
o = append(o, 0x85, 0xa9, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65)
o = msgp.AppendString(o, string(z.Namespace))
// string "Subsystem"
o = append(o, 0xa9, 0x53, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d)
o = msgp.AppendString(o, string(z.Subsystem))
// string "Name"
o = append(o, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, string(z.Name))
// string "Help"
o = append(o, 0xa4, 0x48, 0x65, 0x6c, 0x70)
o = msgp.AppendString(o, z.Help)
// string "Type"
o = append(o, 0xa4, 0x54, 0x79, 0x70, 0x65)
o = msgp.AppendString(o, string(z.Type))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MetricDescription) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Namespace":
{
var zb0002 string
zb0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Namespace")
return
}
z.Namespace = MetricNamespace(zb0002)
}
case "Subsystem":
{
var zb0003 string
zb0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Subsystem")
return
}
z.Subsystem = MetricSubsystem(zb0003)
}
case "Name":
{
var zb0004 string
zb0004, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
z.Name = MetricName(zb0004)
}
case "Help":
z.Help, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Help")
return
}
case "Type":
{
var zb0005 string
zb0005, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
z.Type = MetricType(zb0005)
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *MetricDescription) Msgsize() (s int) {
s = 1 + 10 + msgp.StringPrefixSize + len(string(z.Namespace)) + 10 + msgp.StringPrefixSize + len(string(z.Subsystem)) + 5 + msgp.StringPrefixSize + len(string(z.Name)) + 5 + msgp.StringPrefixSize + len(z.Help) + 5 + msgp.StringPrefixSize + len(string(z.Type))
return
}
// MarshalMsg implements msgp.Marshaler
func (z MetricName) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendString(o, string(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MetricName) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 string
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = MetricName(zb0001)
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z MetricName) Msgsize() (s int) {
s = msgp.StringPrefixSize + len(string(z))
return
}
// MarshalMsg implements msgp.Marshaler
func (z MetricNamespace) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendString(o, string(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MetricNamespace) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 string
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = MetricNamespace(zb0001)
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z MetricNamespace) Msgsize() (s int) {
s = msgp.StringPrefixSize + len(string(z))
return
}
// MarshalMsg implements msgp.Marshaler
func (z MetricSubsystem) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendString(o, string(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MetricSubsystem) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 string
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = MetricSubsystem(zb0001)
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z MetricSubsystem) Msgsize() (s int) {
s = msgp.StringPrefixSize + len(string(z))
return
}
// MarshalMsg implements msgp.Marshaler
func (z MetricType) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendString(o, string(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MetricType) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 string
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = MetricType(zb0001)
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z MetricType) Msgsize() (s int) {
s = msgp.StringPrefixSize + len(string(z))
return
}
// MarshalMsg implements msgp.Marshaler
func (z *MetricsGroup) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
// string "cacheInterval"
o = append(o, 0x82, 0xad, 0x63, 0x61, 0x63, 0x68, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c)
o = msgp.AppendDuration(o, z.cacheInterval)
// string "metricsGroupOpts"
o = append(o, 0xb0, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4f, 0x70, 0x74, 0x73)
o, err = z.metricsGroupOpts.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "metricsGroupOpts")
return
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MetricsGroup) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "cacheInterval":
z.cacheInterval, bts, err = msgp.ReadDurationBytes(bts)
if err != nil {
err = msgp.WrapError(err, "cacheInterval")
return
}
case "metricsGroupOpts":
bts, err = z.metricsGroupOpts.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "metricsGroupOpts")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *MetricsGroup) Msgsize() (s int) {
s = 1 + 14 + msgp.DurationSize + 17 + z.metricsGroupOpts.Msgsize()
return
}
// MarshalMsg implements msgp.Marshaler
func (z *MetricsGroupOpts) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 12
// string "dependGlobalObjectAPI"
o = append(o, 0x8c, 0xb5, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x50, 0x49)
o = msgp.AppendBool(o, z.dependGlobalObjectAPI)
// string "dependGlobalAuthNPlugin"
o = append(o, 0xb7, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x4e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e)
o = msgp.AppendBool(o, z.dependGlobalAuthNPlugin)
// string "dependGlobalSiteReplicationSys"
o = append(o, 0xbe, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x69, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x73)
o = msgp.AppendBool(o, z.dependGlobalSiteReplicationSys)
// string "dependGlobalNotificationSys"
o = append(o, 0xbb, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x73)
o = msgp.AppendBool(o, z.dependGlobalNotificationSys)
// string "dependGlobalKMS"
o = append(o, 0xaf, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x4b, 0x4d, 0x53)
o = msgp.AppendBool(o, z.dependGlobalKMS)
// string "bucketOnly"
o = append(o, 0xaa, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x6e, 0x6c, 0x79)
o = msgp.AppendBool(o, z.bucketOnly)
// string "dependGlobalLambdaTargetList"
o = append(o, 0xbc, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x4c, 0x61, 0x6d, 0x62, 0x64, 0x61, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, 0x69, 0x73, 0x74)
o = msgp.AppendBool(o, z.dependGlobalLambdaTargetList)
// string "dependGlobalIAMSys"
o = append(o, 0xb2, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x49, 0x41, 0x4d, 0x53, 0x79, 0x73)
o = msgp.AppendBool(o, z.dependGlobalIAMSys)
// string "dependGlobalLockServer"
o = append(o, 0xb6, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x4c, 0x6f, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72)
o = msgp.AppendBool(o, z.dependGlobalLockServer)
// string "dependGlobalIsDistErasure"
o = append(o, 0xb9, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x49, 0x73, 0x44, 0x69, 0x73, 0x74, 0x45, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65)
o = msgp.AppendBool(o, z.dependGlobalIsDistErasure)
// string "dependGlobalBackgroundHealState"
o = append(o, 0xbf, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x48, 0x65, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65)
o = msgp.AppendBool(o, z.dependGlobalBackgroundHealState)
// string "dependBucketTargetSys"
o = append(o, 0xb5, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x79, 0x73)
o = msgp.AppendBool(o, z.dependBucketTargetSys)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MetricsGroupOpts) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "dependGlobalObjectAPI":
z.dependGlobalObjectAPI, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependGlobalObjectAPI")
return
}
case "dependGlobalAuthNPlugin":
z.dependGlobalAuthNPlugin, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependGlobalAuthNPlugin")
return
}
case "dependGlobalSiteReplicationSys":
z.dependGlobalSiteReplicationSys, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependGlobalSiteReplicationSys")
return
}
case "dependGlobalNotificationSys":
z.dependGlobalNotificationSys, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependGlobalNotificationSys")
return
}
case "dependGlobalKMS":
z.dependGlobalKMS, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependGlobalKMS")
return
}
case "bucketOnly":
z.bucketOnly, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "bucketOnly")
return
}
case "dependGlobalLambdaTargetList":
z.dependGlobalLambdaTargetList, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependGlobalLambdaTargetList")
return
}
case "dependGlobalIAMSys":
z.dependGlobalIAMSys, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependGlobalIAMSys")
return
}
case "dependGlobalLockServer":
z.dependGlobalLockServer, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependGlobalLockServer")
return
}
case "dependGlobalIsDistErasure":
z.dependGlobalIsDistErasure, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependGlobalIsDistErasure")
return
}
case "dependGlobalBackgroundHealState":
z.dependGlobalBackgroundHealState, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependGlobalBackgroundHealState")
return
}
case "dependBucketTargetSys":
z.dependBucketTargetSys, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependBucketTargetSys")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *MetricsGroupOpts) Msgsize() (s int) {
s = 1 + 22 + msgp.BoolSize + 24 + msgp.BoolSize + 31 + msgp.BoolSize + 28 + msgp.BoolSize + 16 + msgp.BoolSize + 11 + msgp.BoolSize + 29 + msgp.BoolSize + 19 + msgp.BoolSize + 23 + msgp.BoolSize + 26 + msgp.BoolSize + 32 + msgp.BoolSize + 22 + msgp.BoolSize
return
}

241
cmd/metrics-v2_gen_test.go Normal file
View File

@ -0,0 +1,241 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalMetric(t *testing.T) {
v := Metric{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgMetric(b *testing.B) {
v := Metric{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgMetric(b *testing.B) {
v := Metric{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalMetric(b *testing.B) {
v := Metric{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalMetricDescription(t *testing.T) {
v := MetricDescription{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgMetricDescription(b *testing.B) {
v := MetricDescription{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgMetricDescription(b *testing.B) {
v := MetricDescription{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalMetricDescription(b *testing.B) {
v := MetricDescription{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalMetricsGroup(t *testing.T) {
v := MetricsGroup{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgMetricsGroup(b *testing.B) {
v := MetricsGroup{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgMetricsGroup(b *testing.B) {
v := MetricsGroup{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalMetricsGroup(b *testing.B) {
v := MetricsGroup{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalMetricsGroupOpts(t *testing.T) {
v := MetricsGroupOpts{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgMetricsGroupOpts(b *testing.B) {
v := MetricsGroupOpts{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgMetricsGroupOpts(b *testing.B) {
v := MetricsGroupOpts{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalMetricsGroupOpts(b *testing.B) {
v := MetricsGroupOpts{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}

View File

@ -18,7 +18,6 @@
package cmd package cmd
import ( import (
"bytes"
"context" "context"
"encoding/gob" "encoding/gob"
"encoding/hex" "encoding/hex"
@ -28,7 +27,6 @@ import (
"io" "io"
"net/url" "net/url"
"strconv" "strconv"
"strings"
"sync/atomic" "sync/atomic"
"time" "time"
@ -38,9 +36,7 @@ import (
xhttp "github.com/minio/minio/internal/http" xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/rest" "github.com/minio/minio/internal/rest"
"github.com/minio/pkg/v2/logger/message/log"
xnet "github.com/minio/pkg/v2/net" xnet "github.com/minio/pkg/v2/net"
"github.com/tinylib/msgp/msgp"
) )
// client to talk to peer Nodes. // client to talk to peer Nodes.
@ -154,111 +150,65 @@ func (client *peerRESTClient) Close() error {
// GetLocks - fetch older locks for a remote node. // GetLocks - fetch older locks for a remote node.
func (client *peerRESTClient) GetLocks() (lockMap map[string][]lockRequesterInfo, err error) { func (client *peerRESTClient) GetLocks() (lockMap map[string][]lockRequesterInfo, err error) {
respBody, err := client.call(peerRESTMethodGetLocks, nil, nil, -1) resp, err := getLocksRPC.Call(context.Background(), client.gridConn(), grid.NewMSS())
if err != nil { if err != nil || resp == nil {
return return nil, err
} }
lockMap = map[string][]lockRequesterInfo{} return *resp, nil
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&lockMap)
return lockMap, err
} }
// LocalStorageInfo - fetch server information for a remote node. // LocalStorageInfo - fetch server information for a remote node.
func (client *peerRESTClient) LocalStorageInfo(metrics bool) (info StorageInfo, err error) { func (client *peerRESTClient) LocalStorageInfo(metrics bool) (info StorageInfo, err error) {
values := make(url.Values) resp, err := localStorageInfoRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
values.Set(peerRESTMetrics, strconv.FormatBool(metrics)) peerRESTMetrics: strconv.FormatBool(metrics),
respBody, err := client.call(peerRESTMethodLocalStorageInfo, values, nil, -1) }))
if err != nil { return resp.ValueOrZero(), err
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
} }
// ServerInfo - fetch server information for a remote node. // ServerInfo - fetch server information for a remote node.
func (client *peerRESTClient) ServerInfo(metrics bool) (info madmin.ServerProperties, err error) { func (client *peerRESTClient) ServerInfo(metrics bool) (info madmin.ServerProperties, err error) {
values := make(url.Values) resp, err := serverInfoRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{peerRESTMetrics: strconv.FormatBool(metrics)}))
values.Set(peerRESTMetrics, strconv.FormatBool(metrics)) return resp.ValueOrZero(), err
respBody, err := client.call(peerRESTMethodServerInfo, values, nil, -1)
if err != nil {
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
} }
// GetCPUs - fetch CPU information for a remote node. // GetCPUs - fetch CPU information for a remote node.
func (client *peerRESTClient) GetCPUs(ctx context.Context) (info madmin.CPUs, err error) { func (client *peerRESTClient) GetCPUs(ctx context.Context) (info madmin.CPUs, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodCPUInfo, nil, nil, -1) resp, err := getCPUsHandler.Call(ctx, client.gridConn(), grid.NewMSS())
if err != nil { return resp.ValueOrZero(), err
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
} }
// GetNetInfo - fetch network information for a remote node. // GetNetInfo - fetch network information for a remote node.
func (client *peerRESTClient) GetNetInfo(ctx context.Context) (info madmin.NetInfo, err error) { func (client *peerRESTClient) GetNetInfo(ctx context.Context) (info madmin.NetInfo, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodNetHwInfo, nil, nil, -1) resp, err := getNetInfoRPC.Call(ctx, client.gridConn(), grid.NewMSS())
if err != nil { return resp.ValueOrZero(), err
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
} }
// GetPartitions - fetch disk partition information for a remote node. // GetPartitions - fetch disk partition information for a remote node.
func (client *peerRESTClient) GetPartitions(ctx context.Context) (info madmin.Partitions, err error) { func (client *peerRESTClient) GetPartitions(ctx context.Context) (info madmin.Partitions, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodDiskHwInfo, nil, nil, -1) resp, err := getPartitionsRPC.Call(ctx, client.gridConn(), grid.NewMSS())
if err != nil { return resp.ValueOrZero(), err
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
} }
// GetOSInfo - fetch OS information for a remote node. // GetOSInfo - fetch OS information for a remote node.
func (client *peerRESTClient) GetOSInfo(ctx context.Context) (info madmin.OSInfo, err error) { func (client *peerRESTClient) GetOSInfo(ctx context.Context) (info madmin.OSInfo, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodOsInfo, nil, nil, -1) resp, err := getOSInfoRPC.Call(ctx, client.gridConn(), grid.NewMSS())
if err != nil { return resp.ValueOrZero(), err
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
} }
// GetSELinuxInfo - fetch SELinux information for a remote node. // GetSELinuxInfo - fetch SELinux information for a remote node.
func (client *peerRESTClient) GetSELinuxInfo(ctx context.Context) (info madmin.SysServices, err error) { func (client *peerRESTClient) GetSELinuxInfo(ctx context.Context) (info madmin.SysServices, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodSysServices, nil, nil, -1) resp, err := getSysServicesRPC.Call(ctx, client.gridConn(), grid.NewMSS())
if err != nil { return resp.ValueOrZero(), err
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
} }
// GetSysConfig - fetch sys config for a remote node. // GetSysConfig - fetch sys config for a remote node.
func (client *peerRESTClient) GetSysConfig(ctx context.Context) (info madmin.SysConfig, err error) { func (client *peerRESTClient) GetSysConfig(ctx context.Context) (info madmin.SysConfig, err error) {
sent := time.Now() sent := time.Now()
respBody, err := client.callWithContext(ctx, peerRESTMethodSysConfig, nil, nil, -1) resp, err := getSysConfigRPC.Call(ctx, client.gridConn(), grid.NewMSS())
if err != nil { info = resp.ValueOrZero()
return
}
roundtrip := int32(time.Since(sent).Milliseconds())
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
if ti, ok := info.Config["time-info"].(madmin.TimeInfo); ok { if ti, ok := info.Config["time-info"].(madmin.TimeInfo); ok {
ti.RoundtripDuration = roundtrip rt := int32(time.Since(sent).Milliseconds())
ti.RoundtripDuration = rt
info.Config["time-info"] = ti info.Config["time-info"] = ti
} }
return info, err return info, err
@ -266,24 +216,14 @@ func (client *peerRESTClient) GetSysConfig(ctx context.Context) (info madmin.Sys
// GetSysErrors - fetch sys errors for a remote node. // GetSysErrors - fetch sys errors for a remote node.
func (client *peerRESTClient) GetSysErrors(ctx context.Context) (info madmin.SysErrors, err error) { func (client *peerRESTClient) GetSysErrors(ctx context.Context) (info madmin.SysErrors, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodSysErrors, nil, nil, -1) resp, err := getSysErrorsRPC.Call(ctx, client.gridConn(), grid.NewMSS())
if err != nil { return resp.ValueOrZero(), err
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
} }
// GetMemInfo - fetch memory information for a remote node. // GetMemInfo - fetch memory information for a remote node.
func (client *peerRESTClient) GetMemInfo(ctx context.Context) (info madmin.MemInfo, err error) { func (client *peerRESTClient) GetMemInfo(ctx context.Context) (info madmin.MemInfo, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodMemInfo, nil, nil, -1) resp, err := getMemInfoRPC.Call(ctx, client.gridConn(), grid.NewMSS())
if err != nil { return resp.ValueOrZero(), err
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
} }
// GetMetrics - fetch metrics from a remote node. // GetMetrics - fetch metrics from a remote node.
@ -298,52 +238,34 @@ func (client *peerRESTClient) GetMetrics(ctx context.Context, t madmin.MetricTyp
} }
values.Set(peerRESTJobID, opts.jobID) values.Set(peerRESTJobID, opts.jobID)
values.Set(peerRESTDepID, opts.depID) values.Set(peerRESTDepID, opts.depID)
v, err := getMetricsRPC.Call(ctx, client.gridConn(), grid.NewURLValuesWith(values))
respBody, err := client.callWithContext(ctx, peerRESTMethodMetrics, values, nil, -1) return v.ValueOrZero(), err
if err != nil {
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
} }
func (client *peerRESTClient) GetResourceMetrics(ctx context.Context) (<-chan Metric, error) { func (client *peerRESTClient) GetResourceMetrics(ctx context.Context) (<-chan Metric, error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodResourceMetrics, nil, nil, -1) st, err := getResourceMetricsRPC.Call(ctx, client.gridConn(), grid.NewMSS())
if err != nil { if err != nil {
return nil, err return nil, err
} }
dec := gob.NewDecoder(respBody) ch := make(chan Metric, 1)
ch := make(chan Metric)
go func(ch chan<- Metric) { go func(ch chan<- Metric) {
defer func() { defer close(ch)
xhttp.DrainBody(respBody) st.Results(func(metric *Metric) error {
close(ch)
}()
for {
var metric Metric
if err := dec.Decode(&metric); err != nil {
return
}
select { select {
case <-ctx.Done(): case <-ctx.Done():
return return ctx.Err()
case ch <- metric: case ch <- *metric:
return nil
} }
} })
}(ch) }(ch)
return ch, nil return ch, nil
} }
// GetProcInfo - fetch MinIO process information for a remote node. // GetProcInfo - fetch MinIO process information for a remote node.
func (client *peerRESTClient) GetProcInfo(ctx context.Context) (info madmin.ProcInfo, err error) { func (client *peerRESTClient) GetProcInfo(ctx context.Context) (info madmin.ProcInfo, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodProcInfo, nil, nil, -1) resp, err := getProcInfoRPC.Call(ctx, client.gridConn(), grid.NewMSS())
if err != nil { return resp.ValueOrZero(), err
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
} }
// StartProfiling - Issues profiling command on the peer node. // StartProfiling - Issues profiling command on the peer node.
@ -371,51 +293,36 @@ func (client *peerRESTClient) DownloadProfileData() (data map[string][]byte, err
// GetBucketStats - load bucket statistics // GetBucketStats - load bucket statistics
func (client *peerRESTClient) GetBucketStats(bucket string) (BucketStats, error) { func (client *peerRESTClient) GetBucketStats(bucket string) (BucketStats, error) {
values := make(url.Values) resp, err := getBucketStatsRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
values.Set(peerRESTBucket, bucket) peerRESTBucket: bucket,
respBody, err := client.call(peerRESTMethodGetBucketStats, values, nil, -1) }))
if err != nil { if err != nil || resp == nil {
return BucketStats{}, err return BucketStats{}, err
} }
return *resp, nil
var bs BucketStats
defer xhttp.DrainBody(respBody)
return bs, msgp.Decode(respBody, &bs)
} }
// GetSRMetrics- loads site replication metrics, optionally for a specific bucket // GetSRMetrics loads site replication metrics, optionally for a specific bucket
func (client *peerRESTClient) GetSRMetrics() (SRMetricsSummary, error) { func (client *peerRESTClient) GetSRMetrics() (SRMetricsSummary, error) {
values := make(url.Values) resp, err := getSRMetricsRPC.Call(context.Background(), client.gridConn(), grid.NewMSS())
respBody, err := client.call(peerRESTMethodGetSRMetrics, values, nil, -1) if err != nil || resp == nil {
if err != nil {
return SRMetricsSummary{}, err return SRMetricsSummary{}, err
} }
return *resp, nil
var sm SRMetricsSummary
defer xhttp.DrainBody(respBody)
return sm, msgp.Decode(respBody, &sm)
} }
// GetAllBucketStats - load replication stats for all buckets // GetAllBucketStats - load replication stats for all buckets
func (client *peerRESTClient) GetAllBucketStats() (BucketStatsMap, error) { func (client *peerRESTClient) GetAllBucketStats() (BucketStatsMap, error) {
values := make(url.Values) resp, err := getAllBucketStatsRPC.Call(context.Background(), client.gridConn(), grid.NewMSS())
respBody, err := client.call(peerRESTMethodGetAllBucketStats, values, nil, -1) if err != nil || resp == nil {
if err != nil {
return BucketStatsMap{}, err return BucketStatsMap{}, err
} }
return *resp, nil
bsMap := BucketStatsMap{}
defer xhttp.DrainBody(respBody)
return bsMap, msgp.Decode(respBody, &bsMap)
} }
// LoadBucketMetadata - load bucket metadata // LoadBucketMetadata - load bucket metadata
func (client *peerRESTClient) LoadBucketMetadata(bucket string) error { func (client *peerRESTClient) LoadBucketMetadata(bucket string) error {
conn := client.gridConn() _, err := loadBucketMetadataRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
if conn == nil {
return nil
}
_, err := loadBucketMetadataHandler.Call(context.Background(), conn, grid.NewMSSWith(map[string]string{
peerRESTBucket: bucket, peerRESTBucket: bucket,
})) }))
return err return err
@ -423,11 +330,7 @@ func (client *peerRESTClient) LoadBucketMetadata(bucket string) error {
// DeleteBucketMetadata - Delete bucket metadata // DeleteBucketMetadata - Delete bucket metadata
func (client *peerRESTClient) DeleteBucketMetadata(bucket string) error { func (client *peerRESTClient) DeleteBucketMetadata(bucket string) error {
conn := client.gridConn() _, err := deleteBucketMetadataRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
if conn == nil {
return nil
}
_, err := deleteBucketMetadataHandler.Call(context.Background(), conn, grid.NewMSSWith(map[string]string{
peerRESTBucket: bucket, peerRESTBucket: bucket,
})) }))
return err return err
@ -435,12 +338,7 @@ func (client *peerRESTClient) DeleteBucketMetadata(bucket string) error {
// DeletePolicy - delete a specific canned policy. // DeletePolicy - delete a specific canned policy.
func (client *peerRESTClient) DeletePolicy(policyName string) (err error) { func (client *peerRESTClient) DeletePolicy(policyName string) (err error) {
conn := client.gridConn() _, err = deletePolicyRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
if conn == nil {
return nil
}
_, err = deletePolicyHandler.Call(context.Background(), conn, grid.NewMSSWith(map[string]string{
peerRESTPolicy: policyName, peerRESTPolicy: policyName,
})) }))
return err return err
@ -448,12 +346,7 @@ func (client *peerRESTClient) DeletePolicy(policyName string) (err error) {
// LoadPolicy - reload a specific canned policy. // LoadPolicy - reload a specific canned policy.
func (client *peerRESTClient) LoadPolicy(policyName string) (err error) { func (client *peerRESTClient) LoadPolicy(policyName string) (err error) {
conn := client.gridConn() _, err = loadPolicyRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
if conn == nil {
return nil
}
_, err = loadPolicyHandler.Call(context.Background(), conn, grid.NewMSSWith(map[string]string{
peerRESTPolicy: policyName, peerRESTPolicy: policyName,
})) }))
return err return err
@ -461,12 +354,7 @@ func (client *peerRESTClient) LoadPolicy(policyName string) (err error) {
// LoadPolicyMapping - reload a specific policy mapping // LoadPolicyMapping - reload a specific policy mapping
func (client *peerRESTClient) LoadPolicyMapping(userOrGroup string, userType IAMUserType, isGroup bool) error { func (client *peerRESTClient) LoadPolicyMapping(userOrGroup string, userType IAMUserType, isGroup bool) error {
conn := client.gridConn() _, err := loadPolicyMappingRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
if conn == nil {
return nil
}
_, err := loadPolicyMappingHandler.Call(context.Background(), conn, grid.NewMSSWith(map[string]string{
peerRESTUserOrGroup: userOrGroup, peerRESTUserOrGroup: userOrGroup,
peerRESTUserType: strconv.Itoa(int(userType)), peerRESTUserType: strconv.Itoa(int(userType)),
peerRESTIsGroup: strconv.FormatBool(isGroup), peerRESTIsGroup: strconv.FormatBool(isGroup),
@ -476,12 +364,7 @@ func (client *peerRESTClient) LoadPolicyMapping(userOrGroup string, userType IAM
// DeleteUser - delete a specific user. // DeleteUser - delete a specific user.
func (client *peerRESTClient) DeleteUser(accessKey string) (err error) { func (client *peerRESTClient) DeleteUser(accessKey string) (err error) {
conn := client.gridConn() _, err = deleteUserRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
if conn == nil {
return nil
}
_, err = deleteUserHandler.Call(context.Background(), conn, grid.NewMSSWith(map[string]string{
peerRESTUser: accessKey, peerRESTUser: accessKey,
})) }))
return err return err
@ -489,12 +372,7 @@ func (client *peerRESTClient) DeleteUser(accessKey string) (err error) {
// DeleteServiceAccount - delete a specific service account. // DeleteServiceAccount - delete a specific service account.
func (client *peerRESTClient) DeleteServiceAccount(accessKey string) (err error) { func (client *peerRESTClient) DeleteServiceAccount(accessKey string) (err error) {
conn := client.gridConn() _, err = deleteSvcActRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
if conn == nil {
return nil
}
_, err = deleteSvcActHandler.Call(context.Background(), conn, grid.NewMSSWith(map[string]string{
peerRESTUser: accessKey, peerRESTUser: accessKey,
})) }))
return err return err
@ -502,12 +380,7 @@ func (client *peerRESTClient) DeleteServiceAccount(accessKey string) (err error)
// LoadUser - reload a specific user. // LoadUser - reload a specific user.
func (client *peerRESTClient) LoadUser(accessKey string, temp bool) (err error) { func (client *peerRESTClient) LoadUser(accessKey string, temp bool) (err error) {
conn := client.gridConn() _, err = loadUserRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
if conn == nil {
return nil
}
_, err = loadUserHandler.Call(context.Background(), conn, grid.NewMSSWith(map[string]string{
peerRESTUser: accessKey, peerRESTUser: accessKey,
peerRESTUserTemp: strconv.FormatBool(temp), peerRESTUserTemp: strconv.FormatBool(temp),
})) }))
@ -516,12 +389,7 @@ func (client *peerRESTClient) LoadUser(accessKey string, temp bool) (err error)
// LoadServiceAccount - reload a specific service account. // LoadServiceAccount - reload a specific service account.
func (client *peerRESTClient) LoadServiceAccount(accessKey string) (err error) { func (client *peerRESTClient) LoadServiceAccount(accessKey string) (err error) {
conn := client.gridConn() _, err = loadSvcActRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
if conn == nil {
return nil
}
_, err = loadSvcActHandler.Call(context.Background(), conn, grid.NewMSSWith(map[string]string{
peerRESTUser: accessKey, peerRESTUser: accessKey,
})) }))
return err return err
@ -529,12 +397,7 @@ func (client *peerRESTClient) LoadServiceAccount(accessKey string) (err error) {
// LoadGroup - send load group command to peers. // LoadGroup - send load group command to peers.
func (client *peerRESTClient) LoadGroup(group string) error { func (client *peerRESTClient) LoadGroup(group string) error {
conn := client.gridConn() _, err := loadGroupRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
if conn == nil {
return nil
}
_, err := loadGroupHandler.Call(context.Background(), conn, grid.NewMSSWith(map[string]string{
peerRESTGroup: group, peerRESTGroup: group,
})) }))
return err return err
@ -546,7 +409,7 @@ func (client *peerRESTClient) ReloadSiteReplicationConfig(ctx context.Context) e
return nil return nil
} }
_, err := reloadSiteReplicationConfigHandler.Call(ctx, conn, grid.NewMSSWith(map[string]string{})) _, err := reloadSiteReplicationConfigRPC.Call(ctx, conn, grid.NewMSS())
return err return err
} }
@ -577,28 +440,17 @@ func (client *peerRESTClient) CommitBinary(ctx context.Context) error {
// SignalService - sends signal to peer nodes. // SignalService - sends signal to peer nodes.
func (client *peerRESTClient) SignalService(sig serviceSignal, subSys string, dryRun bool) error { func (client *peerRESTClient) SignalService(sig serviceSignal, subSys string, dryRun bool) error {
values := make(url.Values) values := grid.NewMSS()
values.Set(peerRESTSignal, strconv.Itoa(int(sig))) values.Set(peerRESTSignal, strconv.Itoa(int(sig)))
values.Set(peerRESTDryRun, strconv.FormatBool(dryRun)) values.Set(peerRESTDryRun, strconv.FormatBool(dryRun))
values.Set(peerRESTSubSys, subSys) values.Set(peerRESTSubSys, subSys)
respBody, err := client.call(peerRESTMethodSignalService, values, nil, -1) _, err := signalServiceRPC.Call(context.Background(), client.gridConn(), values)
if err != nil { return err
return err
}
defer xhttp.DrainBody(respBody)
return nil
} }
func (client *peerRESTClient) BackgroundHealStatus() (madmin.BgHealState, error) { func (client *peerRESTClient) BackgroundHealStatus() (madmin.BgHealState, error) {
respBody, err := client.call(peerRESTMethodBackgroundHealStatus, nil, nil, -1) resp, err := getBackgroundHealStatusRPC.Call(context.Background(), client.gridConn(), grid.NewMSS())
if err != nil { return resp.ValueOrZero(), err
return madmin.BgHealState{}, err
}
defer xhttp.DrainBody(respBody)
state := madmin.BgHealState{}
err = gob.NewDecoder(respBody).Decode(&state)
return state, err
} }
// GetMetacacheListing - get a new or existing metacache. // GetMetacacheListing - get a new or existing metacache.
@ -607,19 +459,7 @@ func (client *peerRESTClient) GetMetacacheListing(ctx context.Context, o listPat
resp := localMetacacheMgr.getBucket(ctx, o.Bucket).findCache(o) resp := localMetacacheMgr.getBucket(ctx, o.Bucket).findCache(o)
return &resp, nil return &resp, nil
} }
return getMetacacheListingRPC.Call(ctx, client.gridConn(), &o)
var reader bytes.Buffer
err := gob.NewEncoder(&reader).Encode(o)
if err != nil {
return nil, err
}
respBody, err := client.callWithContext(ctx, peerRESTMethodGetMetacacheListing, nil, &reader, int64(reader.Len()))
if err != nil {
return nil, err
}
var resp metacache
defer xhttp.DrainBody(respBody)
return &resp, msgp.Decode(respBody, &resp)
} }
// UpdateMetacacheListing - update an existing metacache it will unconditionally be updated to the new state. // UpdateMetacacheListing - update an existing metacache it will unconditionally be updated to the new state.
@ -627,17 +467,11 @@ func (client *peerRESTClient) UpdateMetacacheListing(ctx context.Context, m meta
if client == nil { if client == nil {
return localMetacacheMgr.updateCacheEntry(m) return localMetacacheMgr.updateCacheEntry(m)
} }
b, err := m.MarshalMsg(nil) resp, err := updateMetacacheListingRPC.Call(ctx, client.gridConn(), &m)
if err != nil { if err != nil || resp == nil {
return m, err return metacache{}, err
} }
respBody, err := client.callWithContext(ctx, peerRESTMethodUpdateMetacacheListing, nil, bytes.NewBuffer(b), int64(len(b))) return *resp, nil
if err != nil {
return m, err
}
defer xhttp.DrainBody(respBody)
var resp metacache
return resp, msgp.Decode(respBody, &resp)
} }
func (client *peerRESTClient) ReloadPoolMeta(ctx context.Context) error { func (client *peerRESTClient) ReloadPoolMeta(ctx context.Context) error {
@ -645,7 +479,7 @@ func (client *peerRESTClient) ReloadPoolMeta(ctx context.Context) error {
if conn == nil { if conn == nil {
return nil return nil
} }
_, err := reloadPoolMetaHandler.Call(ctx, conn, grid.NewMSSWith(map[string]string{})) _, err := reloadPoolMetaRPC.Call(ctx, conn, grid.NewMSSWith(map[string]string{}))
return err return err
} }
@ -654,7 +488,7 @@ func (client *peerRESTClient) StopRebalance(ctx context.Context) error {
if conn == nil { if conn == nil {
return nil return nil
} }
_, err := stopRebalanceHandler.Call(ctx, conn, grid.NewMSSWith(map[string]string{})) _, err := stopRebalanceRPC.Call(ctx, conn, grid.NewMSSWith(map[string]string{}))
return err return err
} }
@ -663,7 +497,7 @@ func (client *peerRESTClient) LoadRebalanceMeta(ctx context.Context, startRebala
if conn == nil { if conn == nil {
return nil return nil
} }
_, err := loadRebalanceMetaHandler.Call(ctx, conn, grid.NewMSSWith(map[string]string{ _, err := loadRebalanceMetaRPC.Call(ctx, conn, grid.NewMSSWith(map[string]string{
peerRESTStartRebalance: strconv.FormatBool(startRebalance), peerRESTStartRebalance: strconv.FormatBool(startRebalance),
})) }))
return err return err
@ -674,7 +508,7 @@ func (client *peerRESTClient) LoadTransitionTierConfig(ctx context.Context) erro
if conn == nil { if conn == nil {
return nil return nil
} }
_, err := loadTransitionTierConfigHandler.Call(ctx, conn, grid.NewMSSWith(map[string]string{})) _, err := loadTransitionTierConfigRPC.Call(ctx, conn, grid.NewMSSWith(map[string]string{}))
return err return err
} }
@ -711,7 +545,7 @@ func (client *peerRESTClient) doListen(ctx context.Context, listenCh chan<- []by
if conn == nil { if conn == nil {
return return
} }
st, err := listenHandler.Call(ctx, conn, grid.NewURLValuesWith(v)) st, err := listenRPC.Call(ctx, conn, grid.NewURLValuesWith(v))
if err != nil { if err != nil {
return return
} }
@ -759,48 +593,31 @@ func (client *peerRESTClient) Trace(ctx context.Context, traceCh chan<- []byte,
}() }()
} }
func (client *peerRESTClient) doConsoleLog(logCh chan log.Info, doneCh <-chan struct{}) { func (client *peerRESTClient) doConsoleLog(ctx context.Context, kind madmin.LogMask, logCh chan<- []byte) {
// To cancel the REST request in case doneCh gets closed. st, err := consoleLogRPC.Call(ctx, client.gridConn(), grid.NewMSSWith(map[string]string{
ctx, cancel := context.WithCancel(GlobalContext) peerRESTLogMask: strconv.Itoa(int(kind)),
}))
cancelCh := make(chan struct{})
defer close(cancelCh)
go func() {
select {
case <-doneCh:
case <-cancelCh:
// There was an error in the REST request.
}
cancel()
}()
respBody, err := client.callWithContext(ctx, peerRESTMethodLog, nil, nil, -1)
defer xhttp.DrainBody(respBody)
if err != nil { if err != nil {
return return
} }
st.Results(func(b *grid.Bytes) error {
dec := gob.NewDecoder(respBody)
for {
var lg log.Info
if err = dec.Decode(&lg); err != nil {
break
}
select { select {
case logCh <- lg: case logCh <- *b:
default: default:
consoleLogRPC.PutResponse(b)
// Do not block on slow receivers. // Do not block on slow receivers.
} }
} return nil
})
} }
// ConsoleLog - sends request to peer nodes to get console logs // ConsoleLog - sends request to peer nodes to get console logs
func (client *peerRESTClient) ConsoleLog(logCh chan log.Info, doneCh <-chan struct{}) { func (client *peerRESTClient) ConsoleLog(ctx context.Context, kind madmin.LogMask, logCh chan<- []byte) {
go func() { go func() {
for { for {
client.doConsoleLog(logCh, doneCh) client.doConsoleLog(ctx, kind, logCh)
select { select {
case <-doneCh: case <-ctx.Done():
return return
default: default:
// There was error in the REST request, retry after sometime as probably the peer is down. // There was error in the REST request, retry after sometime as probably the peer is down.
@ -838,71 +655,53 @@ func newPeerRestClients(endpoints EndpointServerPools) (remote, all []*peerRESTC
// MonitorBandwidth - send http trace request to peer nodes // MonitorBandwidth - send http trace request to peer nodes
func (client *peerRESTClient) MonitorBandwidth(ctx context.Context, buckets []string) (*bandwidth.BucketBandwidthReport, error) { func (client *peerRESTClient) MonitorBandwidth(ctx context.Context, buckets []string) (*bandwidth.BucketBandwidthReport, error) {
values := make(url.Values) values := grid.NewURLValuesWith(map[string][]string{
values.Set(peerRESTBuckets, strings.Join(buckets, ",")) peerRESTBuckets: buckets,
respBody, err := client.callWithContext(ctx, peerRESTMethodGetBandwidth, values, nil, -1) })
if err != nil { return getBandwidthRPC.Call(ctx, client.gridConn(), values)
return nil, err
}
defer xhttp.DrainBody(respBody)
dec := gob.NewDecoder(respBody)
var bandwidthReport bandwidth.BucketBandwidthReport
err = dec.Decode(&bandwidthReport)
return &bandwidthReport, err
} }
func (client *peerRESTClient) GetPeerMetrics(ctx context.Context) (<-chan Metric, error) { func (client *peerRESTClient) GetPeerMetrics(ctx context.Context) (<-chan Metric, error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodGetPeerMetrics, nil, nil, -1) resp, err := getPeerMetricsRPC.Call(ctx, client.gridConn(), grid.NewMSS())
if err != nil { if err != nil {
return nil, err return nil, err
} }
dec := gob.NewDecoder(respBody)
ch := make(chan Metric) ch := make(chan Metric)
go func(ch chan<- Metric) { go func() {
defer func() { defer close(ch)
xhttp.DrainBody(respBody) for _, m := range resp.Value() {
close(ch) if m == nil {
}() continue
for {
var metric Metric
if err := dec.Decode(&metric); err != nil {
return
} }
select { select {
case <-ctx.Done(): case <-ctx.Done():
return return
case ch <- metric: case ch <- *m:
} }
} }
}(ch) }()
return ch, nil return ch, nil
} }
func (client *peerRESTClient) GetPeerBucketMetrics(ctx context.Context) (<-chan Metric, error) { func (client *peerRESTClient) GetPeerBucketMetrics(ctx context.Context) (<-chan Metric, error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodGetPeerBucketMetrics, nil, nil, -1) resp, err := getPeerBucketMetricsRPC.Call(ctx, client.gridConn(), grid.NewMSS())
if err != nil { if err != nil {
return nil, err return nil, err
} }
dec := gob.NewDecoder(respBody)
ch := make(chan Metric) ch := make(chan Metric)
go func(ch chan<- Metric) { go func() {
defer func() { defer close(ch)
xhttp.DrainBody(respBody) for _, m := range resp.Value() {
close(ch) if m == nil {
}() continue
for {
var metric Metric
if err := dec.Decode(&metric); err != nil {
return
} }
select { select {
case <-ctx.Done(): case <-ctx.Done():
return return
case ch <- metric: case ch <- *m:
} }
} }
}(ch) }()
return ch, nil return ch, nil
} }
@ -966,18 +765,11 @@ func (client *peerRESTClient) DriveSpeedTest(ctx context.Context, opts madmin.Dr
} }
func (client *peerRESTClient) GetLastDayTierStats(ctx context.Context) (DailyAllTierStats, error) { func (client *peerRESTClient) GetLastDayTierStats(ctx context.Context) (DailyAllTierStats, error) {
var result map[string]lastDayTierStats resp, err := getLastDayTierStatsRPC.Call(ctx, client.gridConn(), grid.NewMSS())
respBody, err := client.callWithContext(context.Background(), peerRESTMethodGetLastDayTierStats, nil, nil, -1) if err != nil || resp == nil {
if err != nil {
return result, err
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&result)
if err != nil {
return DailyAllTierStats{}, err return DailyAllTierStats{}, err
} }
return DailyAllTierStats(result), nil return *resp, nil
} }
// DevNull - Used by netperf to pump data to peer // DevNull - Used by netperf to pump data to peer

View File

@ -18,50 +18,29 @@
package cmd package cmd
const ( const (
peerRESTVersion = "v37" // Add 'metrics' option for ServerInfo peerRESTVersion = "v38" // Convert RPC calls
peerRESTVersionPrefix = SlashSeparator + peerRESTVersion peerRESTVersionPrefix = SlashSeparator + peerRESTVersion
peerRESTPrefix = minioReservedBucketPath + "/peer" peerRESTPrefix = minioReservedBucketPath + "/peer"
peerRESTPath = peerRESTPrefix + peerRESTVersionPrefix peerRESTPath = peerRESTPrefix + peerRESTVersionPrefix
) )
const ( const (
peerRESTMethodHealth = "/health" peerRESTMethodHealth = "/health"
peerRESTMethodServerInfo = "/serverinfo" peerRESTMethodVerifyBinary = "/verifybinary"
peerRESTMethodLocalStorageInfo = "/localstorageinfo" peerRESTMethodCommitBinary = "/commitbinary"
peerRESTMethodCPUInfo = "/cpuinfo" peerRESTMethodSignalService = "/signalservice"
peerRESTMethodDiskHwInfo = "/diskhwinfo" peerRESTMethodBackgroundHealStatus = "/backgroundhealstatus"
peerRESTMethodNetHwInfo = "/nethwinfo" peerRESTMethodGetLocks = "/getlocks"
peerRESTMethodOsInfo = "/osinfo" peerRESTMethodStartProfiling = "/startprofiling"
peerRESTMethodMemInfo = "/meminfo" peerRESTMethodDownloadProfilingData = "/downloadprofilingdata"
peerRESTMethodProcInfo = "/procinfo" peerRESTMethodGetBandwidth = "/bandwidth"
peerRESTMethodSysErrors = "/syserrors" peerRESTMethodSpeedTest = "/speedtest"
peerRESTMethodSysServices = "/sysservices" peerRESTMethodDriveSpeedTest = "/drivespeedtest"
peerRESTMethodSysConfig = "/sysconfig" peerRESTMethodReloadSiteReplicationConfig = "/reloadsitereplicationconfig"
peerRESTMethodGetBucketStats = "/getbucketstats" peerRESTMethodGetLastDayTierStats = "/getlastdaytierstats"
peerRESTMethodGetAllBucketStats = "/getallbucketstats" peerRESTMethodDevNull = "/devnull"
peerRESTMethodVerifyBinary = "/verifybinary" peerRESTMethodNetperf = "/netperf"
peerRESTMethodCommitBinary = "/commitbinary" peerRESTMethodGetReplicationMRF = "/getreplicationmrf"
peerRESTMethodSignalService = "/signalservice"
peerRESTMethodBackgroundHealStatus = "/backgroundhealstatus"
peerRESTMethodGetLocks = "/getlocks"
peerRESTMethodStartProfiling = "/startprofiling"
peerRESTMethodDownloadProfilingData = "/downloadprofilingdata"
peerRESTMethodLog = "/log"
peerRESTMethodGetBandwidth = "/bandwidth"
peerRESTMethodGetMetacacheListing = "/getmetacache"
peerRESTMethodUpdateMetacacheListing = "/updatemetacache"
peerRESTMethodGetPeerMetrics = "/peermetrics"
peerRESTMethodGetPeerBucketMetrics = "/peerbucketmetrics"
peerRESTMethodSpeedTest = "/speedtest"
peerRESTMethodDriveSpeedTest = "/drivespeedtest"
peerRESTMethodStopRebalance = "/stoprebalance"
peerRESTMethodGetLastDayTierStats = "/getlastdaytierstats"
peerRESTMethodDevNull = "/devnull"
peerRESTMethodNetperf = "/netperf"
peerRESTMethodMetrics = "/metrics"
peerRESTMethodResourceMetrics = "/resourcemetrics"
peerRESTMethodGetReplicationMRF = "/getreplicationmrf"
peerRESTMethodGetSRMetrics = "/getsrmetrics"
) )
const ( const (
@ -99,4 +78,5 @@ const (
peerRESTListenPrefix = "prefix" peerRESTListenPrefix = "prefix"
peerRESTListenSuffix = "suffix" peerRESTListenSuffix = "suffix"
peerRESTListenEvents = "events" peerRESTListenEvents = "events"
peerRESTLogMask = "log-mask"
) )

View File

@ -37,6 +37,7 @@ import (
"github.com/dustin/go-humanize" "github.com/dustin/go-humanize"
"github.com/klauspost/compress/zstd" "github.com/klauspost/compress/zstd"
"github.com/minio/madmin-go/v3" "github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/bucket/bandwidth"
b "github.com/minio/minio/internal/bucket/bandwidth" b "github.com/minio/minio/internal/bucket/bandwidth"
"github.com/minio/minio/internal/event" "github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/grid" "github.com/minio/minio/internal/grid"
@ -45,34 +46,87 @@ import (
"github.com/minio/minio/internal/pubsub" "github.com/minio/minio/internal/pubsub"
"github.com/minio/mux" "github.com/minio/mux"
"github.com/minio/pkg/v2/logger/message/log" "github.com/minio/pkg/v2/logger/message/log"
"github.com/tinylib/msgp/msgp"
) )
// To abstract a node over network. // To abstract a node over network.
type peerRESTServer struct{} type peerRESTServer struct{}
// GetLocksHandler - returns list of older lock from the server.
func (s *peerRESTServer) GetLocksHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
ctx := newContext(r, w, "GetLocks")
logger.LogIf(ctx, gob.NewEncoder(w).Encode(globalLockServer.DupLockMap()))
}
var ( var (
deletePolicyHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerDeletePolicy, grid.NewMSS, grid.NewNoPayload) // Types & Wrappers
loadPolicyHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadPolicy, grid.NewMSS, grid.NewNoPayload) aoMetricsGroup = grid.NewArrayOf[*Metric](func() *Metric { return &Metric{} })
loadPolicyMappingHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadPolicyMapping, grid.NewMSS, grid.NewNoPayload) madminBgHealState = grid.NewJSONPool[madmin.BgHealState]()
deleteSvcActHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerDeleteServiceAccount, grid.NewMSS, grid.NewNoPayload) madminCPUs = grid.NewJSONPool[madmin.CPUs]()
loadSvcActHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadServiceAccount, grid.NewMSS, grid.NewNoPayload) madminMemInfo = grid.NewJSONPool[madmin.MemInfo]()
deleteUserHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerDeleteUser, grid.NewMSS, grid.NewNoPayload) madminNetInfo = grid.NewJSONPool[madmin.NetInfo]()
loadUserHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadUser, grid.NewMSS, grid.NewNoPayload) madminOSInfo = grid.NewJSONPool[madmin.OSInfo]()
loadGroupHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadGroup, grid.NewMSS, grid.NewNoPayload) madminPartitions = grid.NewJSONPool[madmin.Partitions]()
madminProcInfo = grid.NewJSONPool[madmin.ProcInfo]()
madminRealtimeMetrics = grid.NewJSONPool[madmin.RealtimeMetrics]()
madminServerProperties = grid.NewJSONPool[madmin.ServerProperties]()
madminStorageInfo = grid.NewJSONPool[madmin.StorageInfo]()
madminSysConfig = grid.NewJSONPool[madmin.SysConfig]()
madminSysErrors = grid.NewJSONPool[madmin.SysErrors]()
madminSysServices = grid.NewJSONPool[madmin.SysServices]()
// Request -> Response RPC calls
deleteBucketMetadataRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerDeleteBucketMetadata, grid.NewMSS, grid.NewNoPayload).IgnoreNilConn()
deleteBucketRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerDeleteBucket, grid.NewMSS, grid.NewNoPayload)
deletePolicyRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerDeletePolicy, grid.NewMSS, grid.NewNoPayload).IgnoreNilConn()
deleteSvcActRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerDeleteServiceAccount, grid.NewMSS, grid.NewNoPayload).IgnoreNilConn()
deleteUserRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerDeleteUser, grid.NewMSS, grid.NewNoPayload).IgnoreNilConn()
getAllBucketStatsRPC = grid.NewSingleHandler[*grid.MSS, *BucketStatsMap](grid.HandlerGetAllBucketStats, grid.NewMSS, func() *BucketStatsMap { return &BucketStatsMap{} })
getBackgroundHealStatusRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.BgHealState]](grid.HandlerBackgroundHealStatus, grid.NewMSS, madminBgHealState.NewJSON)
getBandwidthRPC = grid.NewSingleHandler[*grid.URLValues, *bandwidth.BucketBandwidthReport](grid.HandlerGetBandwidth, grid.NewURLValues, func() *bandwidth.BucketBandwidthReport { return &bandwidth.BucketBandwidthReport{} })
getBucketStatsRPC = grid.NewSingleHandler[*grid.MSS, *BucketStats](grid.HandlerGetBucketStats, grid.NewMSS, func() *BucketStats { return &BucketStats{} })
getCPUsHandler = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.CPUs]](grid.HandlerGetCPUs, grid.NewMSS, madminCPUs.NewJSON)
getLastDayTierStatsRPC = grid.NewSingleHandler[*grid.MSS, *DailyAllTierStats](grid.HandlerGetLastDayTierStats, grid.NewMSS, func() *DailyAllTierStats { return &DailyAllTierStats{} })
getLocksRPC = grid.NewSingleHandler[*grid.MSS, *localLockMap](grid.HandlerGetLocks, grid.NewMSS, func() *localLockMap { return &localLockMap{} })
getMemInfoRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.MemInfo]](grid.HandlerGetMemInfo, grid.NewMSS, madminMemInfo.NewJSON)
getMetacacheListingRPC = grid.NewSingleHandler[*listPathOptions, *metacache](grid.HandlerGetMetacacheListing, func() *listPathOptions { return &listPathOptions{} }, func() *metacache { return &metacache{} })
getMetricsRPC = grid.NewSingleHandler[*grid.URLValues, *grid.JSON[madmin.RealtimeMetrics]](grid.HandlerGetMetrics, grid.NewURLValues, madminRealtimeMetrics.NewJSON)
getNetInfoRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.NetInfo]](grid.HandlerGetNetInfo, grid.NewMSS, madminNetInfo.NewJSON)
getOSInfoRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.OSInfo]](grid.HandlerGetOSInfo, grid.NewMSS, madminOSInfo.NewJSON)
getPartitionsRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.Partitions]](grid.HandlerGetPartitions, grid.NewMSS, madminPartitions.NewJSON)
getPeerBucketMetricsRPC = grid.NewSingleHandler[*grid.MSS, *grid.Array[*Metric]](grid.HandlerGetPeerBucketMetrics, grid.NewMSS, aoMetricsGroup.New)
getPeerMetricsRPC = grid.NewSingleHandler[*grid.MSS, *grid.Array[*Metric]](grid.HandlerGetPeerMetrics, grid.NewMSS, aoMetricsGroup.New)
getProcInfoRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.ProcInfo]](grid.HandlerGetProcInfo, grid.NewMSS, madminProcInfo.NewJSON)
getSRMetricsRPC = grid.NewSingleHandler[*grid.MSS, *SRMetricsSummary](grid.HandlerGetSRMetrics, grid.NewMSS, func() *SRMetricsSummary { return &SRMetricsSummary{} })
getSysConfigRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.SysConfig]](grid.HandlerGetSysConfig, grid.NewMSS, madminSysConfig.NewJSON)
getSysErrorsRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.SysErrors]](grid.HandlerGetSysErrors, grid.NewMSS, madminSysErrors.NewJSON)
getSysServicesRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.SysServices]](grid.HandlerGetSysServices, grid.NewMSS, madminSysServices.NewJSON)
headBucketRPC = grid.NewSingleHandler[*grid.MSS, *VolInfo](grid.HandlerHeadBucket, grid.NewMSS, func() *VolInfo { return &VolInfo{} })
healBucketRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerHealBucket, grid.NewMSS, grid.NewNoPayload)
loadBucketMetadataRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadBucketMetadata, grid.NewMSS, grid.NewNoPayload).IgnoreNilConn()
loadGroupRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadGroup, grid.NewMSS, grid.NewNoPayload)
loadPolicyMappingRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadPolicyMapping, grid.NewMSS, grid.NewNoPayload).IgnoreNilConn()
loadPolicyRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadPolicy, grid.NewMSS, grid.NewNoPayload).IgnoreNilConn()
loadRebalanceMetaRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadRebalanceMeta, grid.NewMSS, grid.NewNoPayload)
loadSvcActRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadServiceAccount, grid.NewMSS, grid.NewNoPayload).IgnoreNilConn()
loadTransitionTierConfigRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadTransitionTierConfig, grid.NewMSS, grid.NewNoPayload)
loadUserRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadUser, grid.NewMSS, grid.NewNoPayload).IgnoreNilConn()
localStorageInfoRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.StorageInfo]](grid.HandlerStorageInfo, grid.NewMSS, madminStorageInfo.NewJSON)
makeBucketRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerMakeBucket, grid.NewMSS, grid.NewNoPayload)
reloadPoolMetaRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerReloadPoolMeta, grid.NewMSS, grid.NewNoPayload)
reloadSiteReplicationConfigRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerReloadSiteReplicationConfig, grid.NewMSS, grid.NewNoPayload)
serverInfoRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.ServerProperties]](grid.HandlerServerInfo, grid.NewMSS, madminServerProperties.NewJSON)
signalServiceRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerSignalService, grid.NewMSS, grid.NewNoPayload)
stopRebalanceRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerStopRebalance, grid.NewMSS, grid.NewNoPayload)
updateMetacacheListingRPC = grid.NewSingleHandler[*metacache, *metacache](grid.HandlerUpdateMetacacheListing, func() *metacache { return &metacache{} }, func() *metacache { return &metacache{} })
// STREAMS
// Set an output capacity of 100 for consoleLog and listenRPC
// There is another buffer that will buffer events.
consoleLogRPC = grid.NewStream[*grid.MSS, grid.NoPayload, *grid.Bytes](grid.HandlerConsoleLog, grid.NewMSS, nil, grid.NewBytes).WithOutCapacity(100)
listenRPC = grid.NewStream[*grid.URLValues, grid.NoPayload, *grid.Bytes](grid.HandlerListen, grid.NewURLValues, nil, grid.NewBytes).WithOutCapacity(100)
getResourceMetricsRPC = grid.NewStream[*grid.MSS, grid.NoPayload, *Metric](grid.HandlerGetResourceMetrics, grid.NewMSS, nil, func() *Metric { return &Metric{} })
) )
// GetLocksHandler - returns list of lock from the server.
func (s *peerRESTServer) GetLocksHandler(_ *grid.MSS) (*localLockMap, *grid.RemoteErr) {
res := globalLockServer.DupLockMap()
return &res, nil
}
// DeletePolicyHandler - deletes a policy on the server. // DeletePolicyHandler - deletes a policy on the server.
func (s *peerRESTServer) DeletePolicyHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { func (s *peerRESTServer) DeletePolicyHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
objAPI := newObjectLayerFn() objAPI := newObjectLayerFn()
@ -296,249 +350,123 @@ func (s *peerRESTServer) DownloadProfilingDataHandler(w http.ResponseWriter, r *
logger.LogIf(ctx, gob.NewEncoder(w).Encode(profileData)) logger.LogIf(ctx, gob.NewEncoder(w).Encode(profileData))
} }
func (s *peerRESTServer) LocalStorageInfoHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) LocalStorageInfoHandler(mss *grid.MSS) (*grid.JSON[madmin.StorageInfo], *grid.RemoteErr) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
ctx := newContext(r, w, "LocalStorageInfo")
objLayer := newObjectLayerFn() objLayer := newObjectLayerFn()
if objLayer == nil { if objLayer == nil {
s.writeErrorResponse(w, errServerNotInitialized) return nil, grid.NewRemoteErr(errServerNotInitialized)
return
} }
metrics, err := strconv.ParseBool(r.Form.Get(peerRESTMetrics)) metrics, err := strconv.ParseBool(mss.Get(peerRESTMetrics))
if err != nil { if err != nil {
s.writeErrorResponse(w, err) return nil, grid.NewRemoteErr(err)
return
} }
info := objLayer.LocalStorageInfo(context.Background(), metrics)
logger.LogIf(ctx, gob.NewEncoder(w).Encode(objLayer.LocalStorageInfo(r.Context(), metrics))) return madminStorageInfo.NewJSONWith(&info), nil
} }
// ServerInfoHandler - returns Server Info // ServerInfoHandler - returns Server Info
func (s *peerRESTServer) ServerInfoHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) ServerInfoHandler(params *grid.MSS) (*grid.JSON[madmin.ServerProperties], *grid.RemoteErr) {
if !s.IsValid(w, r) { r := http.Request{Host: globalMinioHost}
s.writeErrorResponse(w, errors.New("Invalid request")) metrics, err := strconv.ParseBool(params.Get(peerRESTMetrics))
return
}
ctx := newContext(r, w, "ServerInfo")
objLayer := newObjectLayerFn()
if objLayer == nil {
s.writeErrorResponse(w, errServerNotInitialized)
return
}
metrics, err := strconv.ParseBool(r.Form.Get(peerRESTMetrics))
if err != nil { if err != nil {
s.writeErrorResponse(w, err) return nil, grid.NewRemoteErr(err)
return
} }
info := getLocalServerProperty(globalEndpoints, &r, metrics)
info := getLocalServerProperty(globalEndpoints, r, metrics) return madminServerProperties.NewJSONWith(&info), nil
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
} }
// GetCPUsHandler - returns CPU info. // GetCPUsHandler - returns CPU info.
func (s *peerRESTServer) GetCPUsHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) GetCPUsHandler(_ *grid.MSS) (*grid.JSON[madmin.CPUs], *grid.RemoteErr) {
if !s.IsValid(w, r) { info := madmin.GetCPUs(context.Background(), globalMinioHost)
s.writeErrorResponse(w, errors.New("Invalid request")) return madminCPUs.NewJSONWith(&info), nil
return
}
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := madmin.GetCPUs(ctx, r.Host)
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
} }
// GetNetInfoHandler - returns network information. // GetNetInfoHandler - returns network information.
func (s *peerRESTServer) GetNetInfoHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) GetNetInfoHandler(_ *grid.MSS) (*grid.JSON[madmin.NetInfo], *grid.RemoteErr) {
if !s.IsValid(w, r) { info := madmin.GetNetInfo(globalMinioHost, globalInternodeInterface)
s.writeErrorResponse(w, errors.New("Invalid request")) return madminNetInfo.NewJSONWith(&info), nil
return
}
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := madmin.GetNetInfo(r.Host, globalInternodeInterface)
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
} }
// GetPartitionsHandler - returns disk partition information. // GetPartitionsHandler - returns disk partition information.
func (s *peerRESTServer) GetPartitionsHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) GetPartitionsHandler(_ *grid.MSS) (*grid.JSON[madmin.Partitions], *grid.RemoteErr) {
if !s.IsValid(w, r) { info := madmin.GetPartitions(context.Background(), globalMinioHost)
s.writeErrorResponse(w, errors.New("Invalid request")) return madminPartitions.NewJSONWith(&info), nil
return
}
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := madmin.GetPartitions(ctx, r.Host)
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
} }
// GetOSInfoHandler - returns operating system's information. // GetOSInfoHandler - returns operating system's information.
func (s *peerRESTServer) GetOSInfoHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) GetOSInfoHandler(_ *grid.MSS) (*grid.JSON[madmin.OSInfo], *grid.RemoteErr) {
if !s.IsValid(w, r) { info := madmin.GetOSInfo(context.Background(), globalMinioHost)
s.writeErrorResponse(w, errors.New("Invalid request")) return madminOSInfo.NewJSONWith(&info), nil
return
}
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := madmin.GetOSInfo(ctx, r.Host)
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
} }
// GetProcInfoHandler - returns this MinIO process information. // GetProcInfoHandler - returns this MinIO process information.
func (s *peerRESTServer) GetProcInfoHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) GetProcInfoHandler(_ *grid.MSS) (*grid.JSON[madmin.ProcInfo], *grid.RemoteErr) {
if !s.IsValid(w, r) { info := madmin.GetProcInfo(context.Background(), globalMinioHost)
s.writeErrorResponse(w, errors.New("Invalid request")) return madminProcInfo.NewJSONWith(&info), nil
return
}
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := madmin.GetProcInfo(ctx, r.Host)
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
} }
// GetMemInfoHandler - returns memory information. // GetMemInfoHandler - returns memory information.
func (s *peerRESTServer) GetMemInfoHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) GetMemInfoHandler(_ *grid.MSS) (*grid.JSON[madmin.MemInfo], *grid.RemoteErr) {
if !s.IsValid(w, r) { info := madmin.GetMemInfo(context.Background(), globalMinioHost)
s.writeErrorResponse(w, errors.New("Invalid request")) return madminMemInfo.NewJSONWith(&info), nil
return
}
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := madmin.GetMemInfo(ctx, r.Host)
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
} }
// GetMetricsHandler - returns server metrics. // GetMetricsHandler - returns server metrics.
func (s *peerRESTServer) GetMetricsHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) GetMetricsHandler(v *grid.URLValues) (*grid.JSON[madmin.RealtimeMetrics], *grid.RemoteErr) {
if !s.IsValid(w, r) { values := v.Values()
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
var types madmin.MetricType var types madmin.MetricType
if t, _ := strconv.ParseUint(r.Form.Get(peerRESTMetricsTypes), 10, 64); t != 0 { if t, _ := strconv.ParseUint(values.Get(peerRESTMetricsTypes), 10, 64); t != 0 {
types = madmin.MetricType(t) types = madmin.MetricType(t)
} else { } else {
types = madmin.MetricsAll types = madmin.MetricsAll
} }
diskMap := make(map[string]struct{}) diskMap := make(map[string]struct{})
for _, disk := range r.Form[peerRESTDisk] { for _, disk := range values[peerRESTDisk] {
diskMap[disk] = struct{}{} diskMap[disk] = struct{}{}
} }
hostMap := make(map[string]struct{}) hostMap := make(map[string]struct{})
for _, host := range r.Form[peerRESTHost] { for _, host := range values[peerRESTHost] {
hostMap[host] = struct{}{} hostMap[host] = struct{}{}
} }
jobID := r.Form.Get(peerRESTJobID)
depID := r.Form.Get(peerRESTDepID)
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := collectLocalMetrics(types, collectMetricsOpts{ info := collectLocalMetrics(types, collectMetricsOpts{
disks: diskMap, disks: diskMap,
hosts: hostMap, hosts: hostMap,
jobID: jobID, jobID: values.Get(peerRESTJobID),
depID: depID, depID: values.Get(peerRESTDepID),
}) })
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info)) return madminRealtimeMetrics.NewJSONWith(&info), nil
} }
func (s *peerRESTServer) GetResourceMetrics(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) GetResourceMetrics(ctx context.Context, _ *grid.MSS, out chan<- *Metric) *grid.RemoteErr {
if !s.IsValid(w, r) { for m := range ReportMetrics(ctx, resourceMetricsGroups) {
s.writeErrorResponse(w, errors.New("invalid request")) out <- &m
return
}
enc := gob.NewEncoder(w)
for m := range ReportMetrics(r.Context(), resourceMetricsGroups) {
if err := enc.Encode(m); err != nil {
s.writeErrorResponse(w, errors.New("Encoding metric failed: "+err.Error()))
return
}
} }
return nil
} }
// GetSysConfigHandler - returns system config information. // GetSysConfigHandler - returns system config information.
// (only the config that are of concern to minio) // (only the config that are of concern to minio)
func (s *peerRESTServer) GetSysConfigHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) GetSysConfigHandler(_ *grid.MSS) (*grid.JSON[madmin.SysConfig], *grid.RemoteErr) {
if !s.IsValid(w, r) { info := madmin.GetSysConfig(context.Background(), globalMinioHost)
s.writeErrorResponse(w, errors.New("Invalid request")) return madminSysConfig.NewJSONWith(&info), nil
return
}
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := madmin.GetSysConfig(ctx, r.Host)
logger.LogOnceIf(ctx, gob.NewEncoder(w).Encode(info), "get-sys-config")
} }
// GetSysServicesHandler - returns system services information. // GetSysServicesHandler - returns system services information.
// (only the services that are of concern to minio) // (only the services that are of concern to minio)
func (s *peerRESTServer) GetSysServicesHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) GetSysServicesHandler(_ *grid.MSS) (*grid.JSON[madmin.SysServices], *grid.RemoteErr) {
if !s.IsValid(w, r) { info := madmin.GetSysServices(context.Background(), globalMinioHost)
s.writeErrorResponse(w, errors.New("Invalid request")) return madminSysServices.NewJSONWith(&info), nil
return
}
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := madmin.GetSysServices(ctx, r.Host)
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
} }
// GetSysErrorsHandler - returns system level errors // GetSysErrorsHandler - returns system level errors
func (s *peerRESTServer) GetSysErrorsHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) GetSysErrorsHandler(_ *grid.MSS) (*grid.JSON[madmin.SysErrors], *grid.RemoteErr) {
if !s.IsValid(w, r) { info := madmin.GetSysErrors(context.Background(), globalMinioHost)
s.writeErrorResponse(w, errors.New("Invalid request")) return madminSysErrors.NewJSONWith(&info), nil
return
}
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := madmin.GetSysErrors(ctx, r.Host)
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
} }
var deleteBucketMetadataHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerDeleteBucketMetadata, grid.NewMSS, grid.NewNoPayload)
// DeleteBucketMetadataHandler - Delete in memory bucket metadata // DeleteBucketMetadataHandler - Delete in memory bucket metadata
func (s *peerRESTServer) DeleteBucketMetadataHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { func (s *peerRESTServer) DeleteBucketMetadataHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
bucketName := mss.Get(peerRESTBucket) bucketName := mss.Get(peerRESTBucket)
@ -559,12 +487,7 @@ func (s *peerRESTServer) DeleteBucketMetadataHandler(mss *grid.MSS) (np grid.NoP
} }
// GetAllBucketStatsHandler - fetches bucket replication stats for all buckets from this peer. // GetAllBucketStatsHandler - fetches bucket replication stats for all buckets from this peer.
func (s *peerRESTServer) GetAllBucketStatsHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) GetAllBucketStatsHandler(mss *grid.MSS) (*BucketStatsMap, *grid.RemoteErr) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
replicationStats := globalReplicationStats.GetAll() replicationStats := globalReplicationStats.GetAll()
bucketStatsMap := make(map[string]BucketStats, len(replicationStats)) bucketStatsMap := make(map[string]BucketStats, len(replicationStats))
for k, v := range replicationStats { for k, v := range replicationStats {
@ -573,22 +496,15 @@ func (s *peerRESTServer) GetAllBucketStatsHandler(w http.ResponseWriter, r *http
ProxyStats: globalReplicationStats.getProxyStats(k), ProxyStats: globalReplicationStats.getProxyStats(k),
} }
} }
logger.LogIf(r.Context(), msgp.Encode(w, &BucketStatsMap{Stats: bucketStatsMap, Timestamp: UTCNow()})) return &BucketStatsMap{Stats: bucketStatsMap, Timestamp: time.Now()}, nil
} }
// GetBucketStatsHandler - fetches current in-memory bucket stats, currently only // GetBucketStatsHandler - fetches current in-memory bucket stats, currently only
// returns BucketStats, that currently includes ReplicationStats. // returns BucketStats, that currently includes ReplicationStats.
func (s *peerRESTServer) GetBucketStatsHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) GetBucketStatsHandler(vars *grid.MSS) (*BucketStats, *grid.RemoteErr) {
if !s.IsValid(w, r) { bucketName := vars.Get(peerRESTBucket)
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
vars := mux.Vars(r)
bucketName := vars[peerRESTBucket]
if bucketName == "" { if bucketName == "" {
s.writeErrorResponse(w, errors.New("Bucket name is missing")) return nil, grid.NewRemoteErrString("Bucket name is missing")
return
} }
bs := BucketStats{ bs := BucketStats{
@ -596,27 +512,20 @@ func (s *peerRESTServer) GetBucketStatsHandler(w http.ResponseWriter, r *http.Re
QueueStats: ReplicationQueueStats{Nodes: []ReplQNodeStats{globalReplicationStats.getNodeQueueStats(bucketName)}}, QueueStats: ReplicationQueueStats{Nodes: []ReplQNodeStats{globalReplicationStats.getNodeQueueStats(bucketName)}},
ProxyStats: globalReplicationStats.getProxyStats(bucketName), ProxyStats: globalReplicationStats.getProxyStats(bucketName),
} }
logger.LogIf(r.Context(), msgp.Encode(w, &bs)) return &bs, nil
} }
// GetSRMetricsHandler - fetches current in-memory replication stats at site level from this peer // GetSRMetricsHandler - fetches current in-memory replication stats at site level from this peer
func (s *peerRESTServer) GetSRMetricsHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) GetSRMetricsHandler(mss *grid.MSS) (*SRMetricsSummary, *grid.RemoteErr) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
objAPI := newObjectLayerFn() objAPI := newObjectLayerFn()
if objAPI == nil { if objAPI == nil {
s.writeErrorResponse(w, errServerNotInitialized) return nil, grid.NewRemoteErr(errServerNotInitialized)
return
} }
sm := globalReplicationStats.getSRMetricsForNode() sm := globalReplicationStats.getSRMetricsForNode()
logger.LogIf(r.Context(), msgp.Encode(w, &sm)) return &sm, nil
} }
var loadBucketMetadataHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadBucketMetadata, grid.NewMSS, grid.NewNoPayload)
// LoadBucketMetadataHandler - reloads in memory bucket metadata // LoadBucketMetadataHandler - reloads in memory bucket metadata
func (s *peerRESTServer) LoadBucketMetadataHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { func (s *peerRESTServer) LoadBucketMetadataHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
bucketName := mss.Get(peerRESTBucket) bucketName := mss.Get(peerRESTBucket)
@ -647,43 +556,17 @@ func (s *peerRESTServer) LoadBucketMetadataHandler(mss *grid.MSS) (np grid.NoPay
return return
} }
func (s *peerRESTServer) GetMetacacheListingHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) GetMetacacheListingHandler(opts *listPathOptions) (*metacache, *grid.RemoteErr) {
if !s.IsValid(w, r) { resp := localMetacacheMgr.getBucket(context.Background(), opts.Bucket).findCache(*opts)
s.writeErrorResponse(w, errors.New("Invalid request")) return &resp, nil
return
}
ctx := newContext(r, w, "GetMetacacheListing")
var opts listPathOptions
err := gob.NewDecoder(r.Body).Decode(&opts)
if err != nil && err != io.EOF {
s.writeErrorResponse(w, err)
return
}
resp := localMetacacheMgr.getBucket(ctx, opts.Bucket).findCache(opts)
logger.LogIf(ctx, msgp.Encode(w, &resp))
} }
func (s *peerRESTServer) UpdateMetacacheListingHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) UpdateMetacacheListingHandler(req *metacache) (*metacache, *grid.RemoteErr) {
if !s.IsValid(w, r) { cache, err := localMetacacheMgr.updateCacheEntry(*req)
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
ctx := newContext(r, w, "UpdateMetacacheListing")
var req metacache
err := msgp.Decode(r.Body, &req)
if err != nil { if err != nil {
s.writeErrorResponse(w, err) return nil, grid.NewRemoteErr(err)
return
} }
cache, err := localMetacacheMgr.updateCacheEntry(req) return &cache, nil
if err != nil {
s.writeErrorResponse(w, err)
return
}
// Return updated metadata.
logger.LogIf(ctx, msgp.Encode(w, &cache))
} }
// PutBucketNotificationHandler - Set bucket policy. // PutBucketNotificationHandler - Set bucket policy.
@ -806,36 +689,27 @@ func waitingDrivesNode() map[string]madmin.DiskMetrics {
} }
// SignalServiceHandler - signal service handler. // SignalServiceHandler - signal service handler.
func (s *peerRESTServer) SignalServiceHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) SignalServiceHandler(vars *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
if !s.IsValid(w, r) { signalString := vars.Get(peerRESTSignal)
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
vars := mux.Vars(r)
signalString := vars[peerRESTSignal]
if signalString == "" { if signalString == "" {
s.writeErrorResponse(w, errors.New("signal name is missing")) return np, grid.NewRemoteErrString("signal name is missing")
return
} }
si, err := strconv.Atoi(signalString) si, err := strconv.Atoi(signalString)
if err != nil { if err != nil {
s.writeErrorResponse(w, err) return np, grid.NewRemoteErr(err)
return
} }
signal := serviceSignal(si) signal := serviceSignal(si)
switch signal { switch signal {
case serviceRestart, serviceStop: case serviceRestart, serviceStop:
dryRun := r.Form.Get("dry-run") == "true" // This is only supported for `restart/stop` dryRun := vars.Get("dry-run") == "true" // This is only supported for `restart/stop`
waitingDisks := waitingDrivesNode() waitingDisks := waitingDrivesNode()
if len(waitingDisks) > 0 { if len(waitingDisks) > 0 {
buf, err := json.Marshal(waitingDisks) buf, err := json.Marshal(waitingDisks)
if err != nil { if err != nil {
s.writeErrorResponse(w, err) return np, grid.NewRemoteErr(err)
return
} }
s.writeErrorResponse(w, errors.New(string(buf))) return np, grid.NewRemoteErrString(string(buf))
} }
if !dryRun { if !dryRun {
globalServiceSignalCh <- signal globalServiceSignalCh <- signal
@ -847,36 +721,30 @@ func (s *peerRESTServer) SignalServiceHandler(w http.ResponseWriter, r *http.Req
case serviceReloadDynamic: case serviceReloadDynamic:
objAPI := newObjectLayerFn() objAPI := newObjectLayerFn()
if objAPI == nil { if objAPI == nil {
s.writeErrorResponse(w, errServerNotInitialized) return np, grid.NewRemoteErr(errServerNotInitialized)
return
} }
srvCfg, err := getValidConfig(objAPI) srvCfg, err := getValidConfig(objAPI)
if err != nil { if err != nil {
s.writeErrorResponse(w, err) return np, grid.NewRemoteErr(err)
return
} }
subSys := r.Form.Get(peerRESTSubSys) subSys := vars.Get(peerRESTSubSys)
// Apply dynamic values. // Apply dynamic values.
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
if subSys == "" { if subSys == "" {
err = applyDynamicConfig(r.Context(), objAPI, srvCfg) err = applyDynamicConfig(ctx, objAPI, srvCfg)
} else { } else {
err = applyDynamicConfigForSubSys(r.Context(), objAPI, srvCfg, subSys) err = applyDynamicConfigForSubSys(ctx, objAPI, srvCfg, subSys)
} }
if err != nil { if err != nil {
s.writeErrorResponse(w, err) return np, grid.NewRemoteErr(err)
} }
return
default: default:
s.writeErrorResponse(w, errUnsupportedSignal) return np, grid.NewRemoteErr(errUnsupportedSignal)
return
} }
return np, nil
} }
// Set an output capacity of 100 for listenHandler
// There is another buffer that will buffer events.
var listenHandler = grid.NewStream[*grid.URLValues, grid.NoPayload, *grid.Bytes](grid.HandlerListen,
grid.NewURLValues, nil, grid.NewBytes).WithOutCapacity(100)
// ListenHandler sends http trace messages back to peer rest client // ListenHandler sends http trace messages back to peer rest client
func (s *peerRESTServer) ListenHandler(ctx context.Context, v *grid.URLValues, out chan<- *grid.Bytes) *grid.RemoteErr { func (s *peerRESTServer) ListenHandler(ctx context.Context, v *grid.URLValues, out chan<- *grid.Bytes) *grid.RemoteErr {
values := v.Values() values := v.Values()
@ -988,24 +856,14 @@ func (s *peerRESTServer) TraceHandler(ctx context.Context, payload []byte, _ <-c
return nil return nil
} }
func (s *peerRESTServer) BackgroundHealStatusHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) BackgroundHealStatusHandler(_ *grid.MSS) (*grid.JSON[madmin.BgHealState], *grid.RemoteErr) {
if !s.IsValid(w, r) { state, ok := getLocalBackgroundHealStatus(context.Background(), newObjectLayerFn())
s.writeErrorResponse(w, errors.New("invalid request"))
return
}
ctx := newContext(r, w, "BackgroundHealStatus")
state, ok := getLocalBackgroundHealStatus(ctx, newObjectLayerFn())
if !ok { if !ok {
s.writeErrorResponse(w, errServerNotInitialized) return nil, grid.NewRemoteErr(errServerNotInitialized)
return
} }
return madminBgHealState.NewJSONWith(&state), nil
logger.LogIf(ctx, gob.NewEncoder(w).Encode(state))
} }
var reloadSiteReplicationConfigHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerReloadSiteReplicationConfig, grid.NewMSS, grid.NewNoPayload)
// ReloadSiteReplicationConfigHandler - reloads site replication configuration from the disks // ReloadSiteReplicationConfigHandler - reloads site replication configuration from the disks
func (s *peerRESTServer) ReloadSiteReplicationConfigHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { func (s *peerRESTServer) ReloadSiteReplicationConfigHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
objAPI := newObjectLayerFn() objAPI := newObjectLayerFn()
@ -1017,8 +875,6 @@ func (s *peerRESTServer) ReloadSiteReplicationConfigHandler(mss *grid.MSS) (np g
return return
} }
var reloadPoolMetaHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerReloadPoolMeta, grid.NewMSS, grid.NewNoPayload)
func (s *peerRESTServer) ReloadPoolMetaHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { func (s *peerRESTServer) ReloadPoolMetaHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
objAPI := newObjectLayerFn() objAPI := newObjectLayerFn()
if objAPI == nil { if objAPI == nil {
@ -1037,8 +893,6 @@ func (s *peerRESTServer) ReloadPoolMetaHandler(mss *grid.MSS) (np grid.NoPayload
return return
} }
var stopRebalanceHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerStopRebalance, grid.NewMSS, grid.NewNoPayload)
func (s *peerRESTServer) StopRebalanceHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { func (s *peerRESTServer) StopRebalanceHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
objAPI := newObjectLayerFn() objAPI := newObjectLayerFn()
if objAPI == nil { if objAPI == nil {
@ -1054,8 +908,6 @@ func (s *peerRESTServer) StopRebalanceHandler(mss *grid.MSS) (np grid.NoPayload,
return return
} }
var loadRebalanceMetaHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadRebalanceMeta, grid.NewMSS, grid.NewNoPayload)
func (s *peerRESTServer) LoadRebalanceMetaHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { func (s *peerRESTServer) LoadRebalanceMetaHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
objAPI := newObjectLayerFn() objAPI := newObjectLayerFn()
if objAPI == nil { if objAPI == nil {
@ -1083,8 +935,6 @@ func (s *peerRESTServer) LoadRebalanceMetaHandler(mss *grid.MSS) (np grid.NoPayl
return return
} }
var loadTransitionTierConfigHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadTransitionTierConfig, grid.NewMSS, grid.NewNoPayload)
func (s *peerRESTServer) LoadTransitionTierConfigHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { func (s *peerRESTServer) LoadTransitionTierConfigHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
objAPI := newObjectLayerFn() objAPI := newObjectLayerFn()
if objAPI == nil { if objAPI == nil {
@ -1102,46 +952,34 @@ func (s *peerRESTServer) LoadTransitionTierConfigHandler(mss *grid.MSS) (np grid
} }
// ConsoleLogHandler sends console logs of this node back to peer rest client // ConsoleLogHandler sends console logs of this node back to peer rest client
func (s *peerRESTServer) ConsoleLogHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) ConsoleLogHandler(ctx context.Context, params *grid.MSS, out chan<- *grid.Bytes) *grid.RemoteErr {
if !s.IsValid(w, r) { mask, err := strconv.Atoi(params.Get(peerRESTLogMask))
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
doneCh := make(chan struct{})
defer xioutil.SafeClose(doneCh)
ch := make(chan log.Info, 100000)
err := globalConsoleSys.Subscribe(ch, doneCh, "", 0, madmin.LogMaskAll, nil)
if err != nil { if err != nil {
s.writeErrorResponse(w, err) mask = int(madmin.LogMaskAll)
return
} }
keepAliveTicker := time.NewTicker(time.Second) ch := make(chan log.Info, 1000)
defer keepAliveTicker.Stop() err = globalConsoleSys.Subscribe(ch, ctx.Done(), "", 0, madmin.LogMask(mask), nil)
if err != nil {
enc := gob.NewEncoder(w) return grid.NewRemoteErr(err)
}
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
for { for {
select { select {
case entry, ok := <-ch: case entry, ok := <-ch:
if !ok { if !ok {
return return grid.NewRemoteErrString("console log channel closed")
} }
if !entry.SendLog("", madmin.LogMask(mask)) {
continue
}
buf.Reset()
if err := enc.Encode(entry); err != nil { if err := enc.Encode(entry); err != nil {
return return grid.NewRemoteErr(err)
} }
if len(ch) == 0 { out <- grid.NewBytesWithCopyOf(buf.Bytes())
w.(http.Flusher).Flush() case <-ctx.Done():
} return grid.NewRemoteErr(ctx.Err())
case <-keepAliveTicker.C:
if len(ch) == 0 {
if err := enc.Encode(&madmin.LogInfo{}); err != nil {
return
}
w.(http.Flusher).Flush()
}
case <-r.Context().Done():
return
} }
} }
} }
@ -1161,59 +999,30 @@ func (s *peerRESTServer) IsValid(w http.ResponseWriter, r *http.Request) bool {
} }
// GetBandwidth gets the bandwidth for the buckets requested. // GetBandwidth gets the bandwidth for the buckets requested.
func (s *peerRESTServer) GetBandwidth(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) GetBandwidth(params *grid.URLValues) (*bandwidth.BucketBandwidthReport, *grid.RemoteErr) {
if !s.IsValid(w, r) { buckets := params.Values().Get("buckets")
s.writeErrorResponse(w, errors.New("invalid request")) selectBuckets := b.SelectBuckets(buckets)
return return globalBucketMonitor.GetReport(selectBuckets), nil
}
bucketsString := r.Form.Get("buckets")
doneCh := make(chan struct{})
defer xioutil.SafeClose(doneCh)
selectBuckets := b.SelectBuckets(strings.Split(bucketsString, ",")...)
report := globalBucketMonitor.GetReport(selectBuckets)
enc := gob.NewEncoder(w)
if err := enc.Encode(report); err != nil {
s.writeErrorResponse(w, errors.New("Encoding report failed: "+err.Error()))
return
}
} }
// GetPeerMetrics gets the metrics to be federated across peers. // GetPeerMetrics gets the metrics to be federated across peers.
func (s *peerRESTServer) GetPeerMetrics(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) GetPeerMetrics(_ *grid.MSS) (*grid.Array[*Metric], *grid.RemoteErr) {
if !s.IsValid(w, r) { m := ReportMetrics(context.Background(), peerMetricsGroups)
s.writeErrorResponse(w, errors.New("invalid request")) res := make([]*Metric, 0, len(m))
return for m := range m {
} res = append(res, &m)
enc := gob.NewEncoder(w)
for m := range ReportMetrics(r.Context(), peerMetricsGroups) {
if err := enc.Encode(m); err != nil {
s.writeErrorResponse(w, errors.New("Encoding metric failed: "+err.Error()))
return
}
} }
return aoMetricsGroup.NewWith(res), nil
} }
// GetPeerBucketMetrics gets the metrics to be federated across peers. // GetPeerBucketMetrics gets the metrics to be federated across peers.
func (s *peerRESTServer) GetPeerBucketMetrics(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) GetPeerBucketMetrics(_ *grid.MSS) (*grid.Array[*Metric], *grid.RemoteErr) {
if !s.IsValid(w, r) { m := ReportMetrics(context.Background(), bucketPeerMetricsGroups)
s.writeErrorResponse(w, errors.New("invalid request")) res := make([]*Metric, 0, len(m))
return for m := range m {
} res = append(res, &m)
enc := gob.NewEncoder(w)
for m := range ReportMetrics(r.Context(), bucketPeerMetricsGroups) {
if err := enc.Encode(m); err != nil {
s.writeErrorResponse(w, errors.New("Encoding metric failed: "+err.Error()))
return
}
} }
return aoMetricsGroup.NewWith(res), nil
} }
func (s *peerRESTServer) SpeedTestHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) SpeedTestHandler(w http.ResponseWriter, r *http.Request) {
@ -1269,20 +1078,13 @@ func (s *peerRESTServer) SpeedTestHandler(w http.ResponseWriter, r *http.Request
} }
// GetLastDayTierStatsHandler - returns per-tier stats in the last 24hrs for this server // GetLastDayTierStatsHandler - returns per-tier stats in the last 24hrs for this server
func (s *peerRESTServer) GetLastDayTierStatsHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) GetLastDayTierStatsHandler(_ *grid.MSS) (*DailyAllTierStats, *grid.RemoteErr) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("invalid request"))
return
}
ctx := newContext(r, w, "GetLastDayTierStats")
if objAPI := newObjectLayerFn(); objAPI == nil || globalTransitionState == nil { if objAPI := newObjectLayerFn(); objAPI == nil || globalTransitionState == nil {
s.writeErrorResponse(w, errServerNotInitialized) return nil, grid.NewRemoteErr(errServerNotInitialized)
return
} }
result := globalTransitionState.getDailyAllTierStats() result := globalTransitionState.getDailyAllTierStats()
logger.LogIf(ctx, gob.NewEncoder(w).Encode(result)) return &result, nil
} }
func (s *peerRESTServer) DriveSpeedTestHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) DriveSpeedTestHandler(w http.ResponseWriter, r *http.Request) {
@ -1393,8 +1195,6 @@ func (s *peerRESTServer) NetSpeedTestHandler(w http.ResponseWriter, r *http.Requ
logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result)) logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result))
} }
var healBucketHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerHealBucket, grid.NewMSS, grid.NewNoPayload)
func (s *peerRESTServer) HealBucketHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { func (s *peerRESTServer) HealBucketHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
bucket := mss.Get(peerS3Bucket) bucket := mss.Get(peerS3Bucket)
if isMinioMetaBucket(bucket) { if isMinioMetaBucket(bucket) {
@ -1412,8 +1212,6 @@ func (s *peerRESTServer) HealBucketHandler(mss *grid.MSS) (np grid.NoPayload, ne
return np, nil return np, nil
} }
var headBucketHandler = grid.NewSingleHandler[*grid.MSS, *VolInfo](grid.HandlerHeadBucket, grid.NewMSS, func() *VolInfo { return &VolInfo{} })
// HeadBucketHandler implements peer BuckeInfo call, returns bucket create date. // HeadBucketHandler implements peer BuckeInfo call, returns bucket create date.
func (s *peerRESTServer) HeadBucketHandler(mss *grid.MSS) (info *VolInfo, nerr *grid.RemoteErr) { func (s *peerRESTServer) HeadBucketHandler(mss *grid.MSS) (info *VolInfo, nerr *grid.RemoteErr) {
bucket := mss.Get(peerS3Bucket) bucket := mss.Get(peerS3Bucket)
@ -1436,8 +1234,6 @@ func (s *peerRESTServer) HeadBucketHandler(mss *grid.MSS) (info *VolInfo, nerr *
}, nil }, nil
} }
var deleteBucketHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerDeleteBucket, grid.NewMSS, grid.NewNoPayload)
// DeleteBucketHandler implements peer delete bucket call. // DeleteBucketHandler implements peer delete bucket call.
func (s *peerRESTServer) DeleteBucketHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { func (s *peerRESTServer) DeleteBucketHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
bucket := mss.Get(peerS3Bucket) bucket := mss.Get(peerS3Bucket)
@ -1456,8 +1252,6 @@ func (s *peerRESTServer) DeleteBucketHandler(mss *grid.MSS) (np grid.NoPayload,
return np, nil return np, nil
} }
var makeBucketHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerMakeBucket, grid.NewMSS, grid.NewNoPayload)
// MakeBucketHandler implements peer create bucket call. // MakeBucketHandler implements peer create bucket call.
func (s *peerRESTServer) MakeBucketHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { func (s *peerRESTServer) MakeBucketHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
bucket := mss.Get(peerS3Bucket) bucket := mss.Get(peerS3Bucket)
@ -1485,65 +1279,63 @@ func registerPeerRESTHandlers(router *mux.Router, gm *grid.Manager) {
server := &peerRESTServer{} server := &peerRESTServer{}
subrouter := router.PathPrefix(peerRESTPrefix).Subrouter() subrouter := router.PathPrefix(peerRESTPrefix).Subrouter()
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodHealth).HandlerFunc(h(server.HealthHandler)) subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodHealth).HandlerFunc(h(server.HealthHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetLocks).HandlerFunc(h(server.GetLocksHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodServerInfo).HandlerFunc(h(server.ServerInfoHandler)).Queries(restQueries(peerRESTMetrics)...)
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodLocalStorageInfo).HandlerFunc(h(server.LocalStorageInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodProcInfo).HandlerFunc(h(server.GetProcInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodMemInfo).HandlerFunc(h(server.GetMemInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodMetrics).HandlerFunc(h(server.GetMetricsHandler)).Queries(restQueries(peerRESTMetricsTypes)...)
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodResourceMetrics).HandlerFunc(h(server.GetResourceMetrics))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSysErrors).HandlerFunc(h(server.GetSysErrorsHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSysServices).HandlerFunc(h(server.GetSysServicesHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSysConfig).HandlerFunc(h(server.GetSysConfigHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodOsInfo).HandlerFunc(h(server.GetOSInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDiskHwInfo).HandlerFunc(h(server.GetPartitionsHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodNetHwInfo).HandlerFunc(h(server.GetNetInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodCPUInfo).HandlerFunc(h(server.GetCPUsHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetAllBucketStats).HandlerFunc(h(server.GetAllBucketStatsHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetBucketStats).HandlerFunc(h(server.GetBucketStatsHandler)).Queries(restQueries(peerRESTBucket)...)
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSignalService).HandlerFunc(h(server.SignalServiceHandler)).Queries(restQueries(peerRESTSignal)...)
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodVerifyBinary).HandlerFunc(h(server.VerifyBinaryHandler)).Queries(restQueries(peerRESTURL, peerRESTSha256Sum, peerRESTReleaseInfo)...) subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodVerifyBinary).HandlerFunc(h(server.VerifyBinaryHandler)).Queries(restQueries(peerRESTURL, peerRESTSha256Sum, peerRESTReleaseInfo)...)
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodCommitBinary).HandlerFunc(h(server.CommitBinaryHandler)) subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodCommitBinary).HandlerFunc(h(server.CommitBinaryHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetReplicationMRF).HandlerFunc(httpTraceHdrs(server.GetReplicationMRFHandler)).Queries(restQueries(peerRESTBucket)...) subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetReplicationMRF).HandlerFunc(httpTraceHdrs(server.GetReplicationMRFHandler)).Queries(restQueries(peerRESTBucket)...)
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetSRMetrics).HandlerFunc(h(server.GetSRMetricsHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodStartProfiling).HandlerFunc(h(server.StartProfilingHandler)).Queries(restQueries(peerRESTProfiler)...) subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodStartProfiling).HandlerFunc(h(server.StartProfilingHandler)).Queries(restQueries(peerRESTProfiler)...)
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDownloadProfilingData).HandlerFunc(h(server.DownloadProfilingDataHandler)) subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDownloadProfilingData).HandlerFunc(h(server.DownloadProfilingDataHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodBackgroundHealStatus).HandlerFunc(server.BackgroundHealStatusHandler)
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodLog).HandlerFunc(server.ConsoleLogHandler)
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetBandwidth).HandlerFunc(h(server.GetBandwidth))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetMetacacheListing).HandlerFunc(h(server.GetMetacacheListingHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodUpdateMetacacheListing).HandlerFunc(h(server.UpdateMetacacheListingHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetPeerMetrics).HandlerFunc(h(server.GetPeerMetrics))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetPeerBucketMetrics).HandlerFunc(h(server.GetPeerBucketMetrics))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSpeedTest).HandlerFunc(h(server.SpeedTestHandler)) subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSpeedTest).HandlerFunc(h(server.SpeedTestHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDriveSpeedTest).HandlerFunc(h(server.DriveSpeedTestHandler)) subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDriveSpeedTest).HandlerFunc(h(server.DriveSpeedTestHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodNetperf).HandlerFunc(h(server.NetSpeedTestHandler)) subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodNetperf).HandlerFunc(h(server.NetSpeedTestHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDevNull).HandlerFunc(h(server.DevNull)) subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDevNull).HandlerFunc(h(server.DevNull))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetLastDayTierStats).HandlerFunc(h(server.GetLastDayTierStatsHandler))
logger.FatalIf(makeBucketHandler.Register(gm, server.MakeBucketHandler), "unable to register handler") logger.FatalIf(consoleLogRPC.RegisterNoInput(gm, server.ConsoleLogHandler), "unable to register handler")
logger.FatalIf(deleteBucketHandler.Register(gm, server.DeleteBucketHandler), "unable to register handler") logger.FatalIf(deleteBucketMetadataRPC.Register(gm, server.DeleteBucketMetadataHandler), "unable to register handler")
logger.FatalIf(headBucketHandler.Register(gm, server.HeadBucketHandler), "unable to register handler") logger.FatalIf(deleteBucketRPC.Register(gm, server.DeleteBucketHandler), "unable to register handler")
logger.FatalIf(healBucketHandler.Register(gm, server.HealBucketHandler), "unable to register handler") logger.FatalIf(deletePolicyRPC.Register(gm, server.DeletePolicyHandler), "unable to register handler")
logger.FatalIf(deleteSvcActRPC.Register(gm, server.DeleteServiceAccountHandler), "unable to register handler")
logger.FatalIf(deleteUserRPC.Register(gm, server.DeleteUserHandler), "unable to register handler")
logger.FatalIf(getAllBucketStatsRPC.Register(gm, server.GetAllBucketStatsHandler), "unable to register handler")
logger.FatalIf(getBackgroundHealStatusRPC.Register(gm, server.BackgroundHealStatusHandler), "unable to register handler")
logger.FatalIf(getBandwidthRPC.Register(gm, server.GetBandwidth), "unable to register handler")
logger.FatalIf(getBucketStatsRPC.Register(gm, server.GetBucketStatsHandler), "unable to register handler")
logger.FatalIf(getCPUsHandler.Register(gm, server.GetCPUsHandler), "unable to register handler")
logger.FatalIf(getLastDayTierStatsRPC.Register(gm, server.GetLastDayTierStatsHandler), "unable to register handler")
logger.FatalIf(getLocksRPC.Register(gm, server.GetLocksHandler), "unable to register handler")
logger.FatalIf(getMemInfoRPC.Register(gm, server.GetMemInfoHandler), "unable to register handler")
logger.FatalIf(getMetacacheListingRPC.Register(gm, server.GetMetacacheListingHandler), "unable to register handler")
logger.FatalIf(getMetricsRPC.Register(gm, server.GetMetricsHandler), "unable to register handler")
logger.FatalIf(getNetInfoRPC.Register(gm, server.GetNetInfoHandler), "unable to register handler")
logger.FatalIf(getOSInfoRPC.Register(gm, server.GetOSInfoHandler), "unable to register handler")
logger.FatalIf(getPartitionsRPC.Register(gm, server.GetPartitionsHandler), "unable to register handler")
logger.FatalIf(getPeerBucketMetricsRPC.Register(gm, server.GetPeerBucketMetrics), "unable to register handler")
logger.FatalIf(getPeerMetricsRPC.Register(gm, server.GetPeerMetrics), "unable to register handler")
logger.FatalIf(getProcInfoRPC.Register(gm, server.GetProcInfoHandler), "unable to register handler")
logger.FatalIf(getResourceMetricsRPC.RegisterNoInput(gm, server.GetResourceMetrics), "unable to register handler")
logger.FatalIf(getSRMetricsRPC.Register(gm, server.GetSRMetricsHandler), "unable to register handler")
logger.FatalIf(getSysConfigRPC.Register(gm, server.GetSysConfigHandler), "unable to register handler")
logger.FatalIf(getSysErrorsRPC.Register(gm, server.GetSysErrorsHandler), "unable to register handler")
logger.FatalIf(getSysServicesRPC.Register(gm, server.GetSysServicesHandler), "unable to register handler")
logger.FatalIf(headBucketRPC.Register(gm, server.HeadBucketHandler), "unable to register handler")
logger.FatalIf(healBucketRPC.Register(gm, server.HealBucketHandler), "unable to register handler")
logger.FatalIf(listenRPC.RegisterNoInput(gm, server.ListenHandler), "unable to register handler")
logger.FatalIf(loadBucketMetadataRPC.Register(gm, server.LoadBucketMetadataHandler), "unable to register handler")
logger.FatalIf(loadGroupRPC.Register(gm, server.LoadGroupHandler), "unable to register handler")
logger.FatalIf(loadPolicyMappingRPC.Register(gm, server.LoadPolicyMappingHandler), "unable to register handler")
logger.FatalIf(loadPolicyRPC.Register(gm, server.LoadPolicyHandler), "unable to register handler")
logger.FatalIf(loadRebalanceMetaRPC.Register(gm, server.LoadRebalanceMetaHandler), "unable to register handler")
logger.FatalIf(loadSvcActRPC.Register(gm, server.LoadServiceAccountHandler), "unable to register handler")
logger.FatalIf(loadTransitionTierConfigRPC.Register(gm, server.LoadTransitionTierConfigHandler), "unable to register handler")
logger.FatalIf(loadUserRPC.Register(gm, server.LoadUserHandler), "unable to register handler")
logger.FatalIf(localStorageInfoRPC.Register(gm, server.LocalStorageInfoHandler), "unable to register handler")
logger.FatalIf(makeBucketRPC.Register(gm, server.MakeBucketHandler), "unable to register handler")
logger.FatalIf(reloadPoolMetaRPC.Register(gm, server.ReloadPoolMetaHandler), "unable to register handler")
logger.FatalIf(reloadSiteReplicationConfigRPC.Register(gm, server.ReloadSiteReplicationConfigHandler), "unable to register handler")
logger.FatalIf(serverInfoRPC.Register(gm, server.ServerInfoHandler), "unable to register handler")
logger.FatalIf(signalServiceRPC.Register(gm, server.SignalServiceHandler), "unable to register handler")
logger.FatalIf(stopRebalanceRPC.Register(gm, server.StopRebalanceHandler), "unable to register handler")
logger.FatalIf(updateMetacacheListingRPC.Register(gm, server.UpdateMetacacheListingHandler), "unable to register handler")
logger.FatalIf(deletePolicyHandler.Register(gm, server.DeletePolicyHandler), "unable to register handler")
logger.FatalIf(loadPolicyHandler.Register(gm, server.LoadPolicyHandler), "unable to register handler")
logger.FatalIf(loadPolicyMappingHandler.Register(gm, server.LoadPolicyMappingHandler), "unable to register handler")
logger.FatalIf(deleteUserHandler.Register(gm, server.DeleteUserHandler), "unable to register handler")
logger.FatalIf(deleteSvcActHandler.Register(gm, server.DeleteServiceAccountHandler), "unable to register handler")
logger.FatalIf(loadUserHandler.Register(gm, server.LoadUserHandler), "unable to register handler")
logger.FatalIf(loadSvcActHandler.Register(gm, server.LoadServiceAccountHandler), "unable to register handler")
logger.FatalIf(loadGroupHandler.Register(gm, server.LoadGroupHandler), "unable to register handler")
logger.FatalIf(loadTransitionTierConfigHandler.Register(gm, server.LoadTransitionTierConfigHandler), "unable to register handler")
logger.FatalIf(reloadPoolMetaHandler.Register(gm, server.ReloadPoolMetaHandler), "unable to register handler")
logger.FatalIf(loadRebalanceMetaHandler.Register(gm, server.LoadRebalanceMetaHandler), "unable to register handler")
logger.FatalIf(stopRebalanceHandler.Register(gm, server.StopRebalanceHandler), "unable to register handler")
logger.FatalIf(reloadSiteReplicationConfigHandler.Register(gm, server.ReloadSiteReplicationConfigHandler), "unable to register handler")
logger.FatalIf(loadBucketMetadataHandler.Register(gm, server.LoadBucketMetadataHandler), "unable to register handler")
logger.FatalIf(deleteBucketMetadataHandler.Register(gm, server.DeleteBucketMetadataHandler), "unable to register handler")
logger.FatalIf(listenHandler.RegisterNoInput(gm, server.ListenHandler), "unable to register handler")
logger.FatalIf(gm.RegisterStreamingHandler(grid.HandlerTrace, grid.StreamHandler{ logger.FatalIf(gm.RegisterStreamingHandler(grid.HandlerTrace, grid.StreamHandler{
Handle: server.TraceHandler, Handle: server.TraceHandler,
Subroute: "", Subroute: "",

View File

@ -376,7 +376,7 @@ func (client *remotePeerS3Client) HealBucket(ctx context.Context, bucket string,
peerS3BucketDeleted: strconv.FormatBool(opts.Remove), peerS3BucketDeleted: strconv.FormatBool(opts.Remove),
}) })
_, err := healBucketHandler.Call(ctx, conn, mss) _, err := healBucketRPC.Call(ctx, conn, mss)
// Initialize heal result info // Initialize heal result info
return madmin.HealResultItem{ return madmin.HealResultItem{
@ -398,7 +398,7 @@ func (client *remotePeerS3Client) GetBucketInfo(ctx context.Context, bucket stri
peerS3BucketDeleted: strconv.FormatBool(opts.Deleted), peerS3BucketDeleted: strconv.FormatBool(opts.Deleted),
}) })
volInfo, err := headBucketHandler.Call(ctx, conn, mss) volInfo, err := headBucketRPC.Call(ctx, conn, mss)
if err != nil { if err != nil {
return BucketInfo{}, toStorageErr(err) return BucketInfo{}, toStorageErr(err)
} }
@ -449,7 +449,7 @@ func (client *remotePeerS3Client) MakeBucket(ctx context.Context, bucket string,
peerS3BucketForceCreate: strconv.FormatBool(opts.ForceCreate), peerS3BucketForceCreate: strconv.FormatBool(opts.ForceCreate),
}) })
_, err := makeBucketHandler.Call(ctx, conn, mss) _, err := makeBucketRPC.Call(ctx, conn, mss)
return toStorageErr(err) return toStorageErr(err)
} }
@ -498,7 +498,7 @@ func (client *remotePeerS3Client) DeleteBucket(ctx context.Context, bucket strin
peerS3BucketForceDelete: strconv.FormatBool(opts.Force), peerS3BucketForceDelete: strconv.FormatBool(opts.Force),
}) })
_, err := deleteBucketHandler.Call(ctx, conn, mss) _, err := deleteBucketRPC.Call(ctx, conn, mss)
return toStorageErr(err) return toStorageErr(err)
} }

View File

@ -21,6 +21,8 @@ import (
"time" "time"
) )
//go:generate msgp -file=$GOFILE
// DeleteOptions represents the disk level delete options available for the APIs // DeleteOptions represents the disk level delete options available for the APIs
type DeleteOptions struct { type DeleteOptions struct {
BaseOptions BaseOptions
@ -44,8 +46,6 @@ type DiskInfoOptions struct {
NoOp bool `msg:"np"` NoOp bool `msg:"np"`
} }
//go:generate msgp -file=$GOFILE
// DiskInfo is an extended type which returns current // DiskInfo is an extended type which returns current
// disk usage per path. // disk usage per path.
// The above means that any added/deleted fields are incompatible. // The above means that any added/deleted fields are incompatible.
@ -445,3 +445,8 @@ type RenameDataResp struct {
type LocalDiskIDs struct { type LocalDiskIDs struct {
IDs []string IDs []string
} }
// ListDirResult - ListDir()'s response.
type ListDirResult struct {
Entries []string `msg:"e"`
}

View File

@ -2871,6 +2871,148 @@ func (z *FilesInfo) Msgsize() (s int) {
return return
} }
// DecodeMsg implements msgp.Decodable
func (z *ListDirResult) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "e":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Entries")
return
}
if cap(z.Entries) >= int(zb0002) {
z.Entries = (z.Entries)[:zb0002]
} else {
z.Entries = make([]string, zb0002)
}
for za0001 := range z.Entries {
z.Entries[za0001], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Entries", za0001)
return
}
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *ListDirResult) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 1
// write "e"
err = en.Append(0x81, 0xa1, 0x65)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Entries)))
if err != nil {
err = msgp.WrapError(err, "Entries")
return
}
for za0001 := range z.Entries {
err = en.WriteString(z.Entries[za0001])
if err != nil {
err = msgp.WrapError(err, "Entries", za0001)
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *ListDirResult) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 1
// string "e"
o = append(o, 0x81, 0xa1, 0x65)
o = msgp.AppendArrayHeader(o, uint32(len(z.Entries)))
for za0001 := range z.Entries {
o = msgp.AppendString(o, z.Entries[za0001])
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *ListDirResult) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "e":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Entries")
return
}
if cap(z.Entries) >= int(zb0002) {
z.Entries = (z.Entries)[:zb0002]
} else {
z.Entries = make([]string, zb0002)
}
for za0001 := range z.Entries {
z.Entries[za0001], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Entries", za0001)
return
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *ListDirResult) Msgsize() (s int) {
s = 1 + 2 + msgp.ArrayHeaderSize
for za0001 := range z.Entries {
s += msgp.StringPrefixSize + len(z.Entries[za0001])
}
return
}
// DecodeMsg implements msgp.Decodable // DecodeMsg implements msgp.Decodable
func (z *LocalDiskIDs) DecodeMsg(dc *msgp.Reader) (err error) { func (z *LocalDiskIDs) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte var field []byte

View File

@ -1252,6 +1252,119 @@ func BenchmarkDecodeFilesInfo(b *testing.B) {
} }
} }
func TestMarshalUnmarshalListDirResult(t *testing.T) {
v := ListDirResult{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgListDirResult(b *testing.B) {
v := ListDirResult{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgListDirResult(b *testing.B) {
v := ListDirResult{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalListDirResult(b *testing.B) {
v := ListDirResult{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeListDirResult(t *testing.T) {
v := ListDirResult{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeListDirResult Msgsize() is inaccurate")
}
vn := ListDirResult{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeListDirResult(b *testing.B) {
v := ListDirResult{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeListDirResult(b *testing.B) {
v := ListDirResult{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalLocalDiskIDs(t *testing.T) { func TestMarshalUnmarshalLocalDiskIDs(t *testing.T) {
v := LocalDiskIDs{} v := LocalDiskIDs{}
bts, err := v.MarshalMsg(nil) bts, err := v.MarshalMsg(nil)

View File

@ -237,7 +237,7 @@ func (client *storageRESTClient) NSScanner(ctx context.Context, cache dataUsageC
defer atomic.AddInt32(&client.scanning, -1) defer atomic.AddInt32(&client.scanning, -1)
defer xioutil.SafeClose(updates) defer xioutil.SafeClose(updates)
st, err := storageNSScannerHandler.Call(ctx, client.gridConn, &nsScannerOptions{ st, err := storageNSScannerRPC.Call(ctx, client.gridConn, &nsScannerOptions{
DiskID: client.diskID, DiskID: client.diskID,
ScanMode: int(scanMode), ScanMode: int(scanMode),
Cache: &cache, Cache: &cache,
@ -311,7 +311,7 @@ func (client *storageRESTClient) DiskInfo(ctx context.Context, opts DiskInfoOpti
opts.DiskID = client.diskID opts.DiskID = client.diskID
infop, err := storageDiskInfoHandler.Call(ctx, client.gridConn, &opts) infop, err := storageDiskInfoRPC.Call(ctx, client.gridConn, &opts)
if err != nil { if err != nil {
return info, toStorageErr(err) return info, toStorageErr(err)
} }
@ -340,7 +340,7 @@ func (client *storageRESTClient) ListVols(ctx context.Context) (vols []VolInfo,
// StatVol - get volume info over the network. // StatVol - get volume info over the network.
func (client *storageRESTClient) StatVol(ctx context.Context, volume string) (vol VolInfo, err error) { func (client *storageRESTClient) StatVol(ctx context.Context, volume string) (vol VolInfo, err error) {
v, err := storageStatVolHandler.Call(ctx, client.gridConn, grid.NewMSSWith(map[string]string{ v, err := storageStatVolRPC.Call(ctx, client.gridConn, grid.NewMSSWith(map[string]string{
storageRESTDiskID: client.diskID, storageRESTDiskID: client.diskID,
storageRESTVolume: volume, storageRESTVolume: volume,
})) }))
@ -349,7 +349,7 @@ func (client *storageRESTClient) StatVol(ctx context.Context, volume string) (vo
} }
vol = *v vol = *v
// Performs shallow copy, so we can reuse. // Performs shallow copy, so we can reuse.
storageStatVolHandler.PutResponse(v) storageStatVolRPC.PutResponse(v)
return vol, nil return vol, nil
} }
@ -386,7 +386,7 @@ func (client *storageRESTClient) CreateFile(ctx context.Context, origvolume, vol
} }
func (client *storageRESTClient) WriteMetadata(ctx context.Context, origvolume, volume, path string, fi FileInfo) error { func (client *storageRESTClient) WriteMetadata(ctx context.Context, origvolume, volume, path string, fi FileInfo) error {
_, err := storageWriteMetadataHandler.Call(ctx, client.gridConn, &MetadataHandlerParams{ _, err := storageWriteMetadataRPC.Call(ctx, client.gridConn, &MetadataHandlerParams{
DiskID: client.diskID, DiskID: client.diskID,
OrigVolume: origvolume, OrigVolume: origvolume,
Volume: volume, Volume: volume,
@ -397,7 +397,7 @@ func (client *storageRESTClient) WriteMetadata(ctx context.Context, origvolume,
} }
func (client *storageRESTClient) UpdateMetadata(ctx context.Context, volume, path string, fi FileInfo, opts UpdateMetadataOpts) error { func (client *storageRESTClient) UpdateMetadata(ctx context.Context, volume, path string, fi FileInfo, opts UpdateMetadataOpts) error {
_, err := storageUpdateMetadataHandler.Call(ctx, client.gridConn, &MetadataHandlerParams{ _, err := storageUpdateMetadataRPC.Call(ctx, client.gridConn, &MetadataHandlerParams{
DiskID: client.diskID, DiskID: client.diskID,
Volume: volume, Volume: volume,
FilePath: path, FilePath: path,
@ -408,7 +408,7 @@ func (client *storageRESTClient) UpdateMetadata(ctx context.Context, volume, pat
} }
func (client *storageRESTClient) DeleteVersion(ctx context.Context, volume, path string, fi FileInfo, forceDelMarker bool, opts DeleteOptions) (err error) { func (client *storageRESTClient) DeleteVersion(ctx context.Context, volume, path string, fi FileInfo, forceDelMarker bool, opts DeleteOptions) (err error) {
_, err = storageDeleteVersionHandler.Call(ctx, client.gridConn, &DeleteVersionHandlerParams{ _, err = storageDeleteVersionRPC.Call(ctx, client.gridConn, &DeleteVersionHandlerParams{
DiskID: client.diskID, DiskID: client.diskID,
Volume: volume, Volume: volume,
FilePath: path, FilePath: path,
@ -431,7 +431,7 @@ func (client *storageRESTClient) WriteAll(ctx context.Context, volume string, pa
// CheckParts - stat all file parts. // CheckParts - stat all file parts.
func (client *storageRESTClient) CheckParts(ctx context.Context, volume string, path string, fi FileInfo) error { func (client *storageRESTClient) CheckParts(ctx context.Context, volume string, path string, fi FileInfo) error {
_, err := storageCheckPartsHandler.Call(ctx, client.gridConn, &CheckPartsHandlerParams{ _, err := storageCheckPartsRPC.Call(ctx, client.gridConn, &CheckPartsHandlerParams{
DiskID: client.diskID, DiskID: client.diskID,
Volume: volume, Volume: volume,
FilePath: path, FilePath: path,
@ -442,7 +442,7 @@ func (client *storageRESTClient) CheckParts(ctx context.Context, volume string,
// RenameData - rename source path to destination path atomically, metadata and data file. // RenameData - rename source path to destination path atomically, metadata and data file.
func (client *storageRESTClient) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string, opts RenameOptions) (sign uint64, err error) { func (client *storageRESTClient) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string, opts RenameOptions) (sign uint64, err error) {
resp, err := storageRenameDataHandler.Call(ctx, client.gridConn, &RenameDataHandlerParams{ resp, err := storageRenameDataRPC.Call(ctx, client.gridConn, &RenameDataHandlerParams{
DiskID: client.diskID, DiskID: client.diskID,
SrcVolume: srcVolume, SrcVolume: srcVolume,
SrcPath: srcPath, SrcPath: srcPath,
@ -454,7 +454,7 @@ func (client *storageRESTClient) RenameData(ctx context.Context, srcVolume, srcP
if err != nil { if err != nil {
return 0, toStorageErr(err) return 0, toStorageErr(err)
} }
defer storageRenameDataHandler.PutResponse(resp) defer storageRenameDataRPC.PutResponse(resp)
return resp.Signature, nil return resp.Signature, nil
} }
@ -484,7 +484,7 @@ func readMsgpReaderPoolPut(r *msgp.Reader) {
func (client *storageRESTClient) ReadVersion(ctx context.Context, origvolume, volume, path, versionID string, opts ReadOptions) (fi FileInfo, err error) { func (client *storageRESTClient) ReadVersion(ctx context.Context, origvolume, volume, path, versionID string, opts ReadOptions) (fi FileInfo, err error) {
// Use websocket when not reading data. // Use websocket when not reading data.
if !opts.ReadData { if !opts.ReadData {
resp, err := storageReadVersionHandler.Call(ctx, client.gridConn, grid.NewMSSWith(map[string]string{ resp, err := storageReadVersionRPC.Call(ctx, client.gridConn, grid.NewMSSWith(map[string]string{
storageRESTDiskID: client.diskID, storageRESTDiskID: client.diskID,
storageRESTOrigVolume: origvolume, storageRESTOrigVolume: origvolume,
storageRESTVolume: volume, storageRESTVolume: volume,
@ -524,7 +524,7 @@ func (client *storageRESTClient) ReadVersion(ctx context.Context, origvolume, vo
func (client *storageRESTClient) ReadXL(ctx context.Context, volume string, path string, readData bool) (rf RawFileInfo, err error) { func (client *storageRESTClient) ReadXL(ctx context.Context, volume string, path string, readData bool) (rf RawFileInfo, err error) {
// Use websocket when not reading data. // Use websocket when not reading data.
if !readData { if !readData {
resp, err := storageReadXLHandler.Call(ctx, client.gridConn, grid.NewMSSWith(map[string]string{ resp, err := storageReadXLRPC.Call(ctx, client.gridConn, grid.NewMSSWith(map[string]string{
storageRESTDiskID: client.diskID, storageRESTDiskID: client.diskID,
storageRESTVolume: volume, storageRESTVolume: volume,
storageRESTFilePath: path, storageRESTFilePath: path,
@ -567,7 +567,7 @@ func (client *storageRESTClient) ReadAll(ctx context.Context, volume string, pat
} }
} }
gridBytes, err := storageReadAllHandler.Call(ctx, client.gridConn, &ReadAllHandlerParams{ gridBytes, err := storageReadAllRPC.Call(ctx, client.gridConn, &ReadAllHandlerParams{
DiskID: client.diskID, DiskID: client.diskID,
Volume: volume, Volume: volume,
FilePath: path, FilePath: path,
@ -618,24 +618,27 @@ func (client *storageRESTClient) ReadFile(ctx context.Context, volume string, pa
// ListDir - lists a directory. // ListDir - lists a directory.
func (client *storageRESTClient) ListDir(ctx context.Context, origvolume, volume, dirPath string, count int) (entries []string, err error) { func (client *storageRESTClient) ListDir(ctx context.Context, origvolume, volume, dirPath string, count int) (entries []string, err error) {
values := make(url.Values) values := grid.NewMSS()
values.Set(storageRESTVolume, volume) values.Set(storageRESTVolume, volume)
values.Set(storageRESTDirPath, dirPath) values.Set(storageRESTDirPath, dirPath)
values.Set(storageRESTCount, strconv.Itoa(count)) values.Set(storageRESTCount, strconv.Itoa(count))
values.Set(storageRESTOrigVolume, origvolume) values.Set(storageRESTOrigVolume, origvolume)
values.Set(storageRESTDiskID, client.diskID)
respBody, err := client.call(ctx, storageRESTMethodListDir, values, nil, -1) st, err := storageListDirRPC.Call(ctx, client.gridConn, values)
if err != nil { if err != nil {
return nil, err return nil, toStorageErr(err)
} }
defer xhttp.DrainBody(respBody) err = st.Results(func(resp *ListDirResult) error {
err = gob.NewDecoder(respBody).Decode(&entries) entries = resp.Entries
return entries, err return nil
})
return entries, toStorageErr(err)
} }
// DeleteFile - deletes a file. // DeleteFile - deletes a file.
func (client *storageRESTClient) Delete(ctx context.Context, volume string, path string, deleteOpts DeleteOptions) error { func (client *storageRESTClient) Delete(ctx context.Context, volume string, path string, deleteOpts DeleteOptions) error {
_, err := storageDeleteFileHandler.Call(ctx, client.gridConn, &DeleteFileHandlerParams{ _, err := storageDeleteFileRPC.Call(ctx, client.gridConn, &DeleteFileHandlerParams{
DiskID: client.diskID, DiskID: client.diskID,
Volume: volume, Volume: volume,
FilePath: path, FilePath: path,
@ -700,7 +703,7 @@ func (client *storageRESTClient) DeleteVersions(ctx context.Context, volume stri
// RenameFile - renames a file. // RenameFile - renames a file.
func (client *storageRESTClient) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) (err error) { func (client *storageRESTClient) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) (err error) {
_, err = storageRenameFileHandler.Call(ctx, client.gridConn, &RenameFileHandlerParams{ _, err = storageRenameFileRPC.Call(ctx, client.gridConn, &RenameFileHandlerParams{
DiskID: client.diskID, DiskID: client.diskID,
SrcVolume: srcVolume, SrcVolume: srcVolume,
SrcFilePath: srcPath, SrcFilePath: srcPath,

View File

@ -57,6 +57,23 @@ type storageRESTServer struct {
poolIndex, setIndex, diskIndex int poolIndex, setIndex, diskIndex int
} }
var (
storageCheckPartsRPC = grid.NewSingleHandler[*CheckPartsHandlerParams, grid.NoPayload](grid.HandlerCheckParts, func() *CheckPartsHandlerParams { return &CheckPartsHandlerParams{} }, grid.NewNoPayload)
storageDeleteFileRPC = grid.NewSingleHandler[*DeleteFileHandlerParams, grid.NoPayload](grid.HandlerDeleteFile, func() *DeleteFileHandlerParams { return &DeleteFileHandlerParams{} }, grid.NewNoPayload).AllowCallRequestPool(true)
storageDeleteVersionRPC = grid.NewSingleHandler[*DeleteVersionHandlerParams, grid.NoPayload](grid.HandlerDeleteVersion, func() *DeleteVersionHandlerParams { return &DeleteVersionHandlerParams{} }, grid.NewNoPayload)
storageDiskInfoRPC = grid.NewSingleHandler[*DiskInfoOptions, *DiskInfo](grid.HandlerDiskInfo, func() *DiskInfoOptions { return &DiskInfoOptions{} }, func() *DiskInfo { return &DiskInfo{} }).WithSharedResponse().AllowCallRequestPool(true)
storageNSScannerRPC = grid.NewStream[*nsScannerOptions, grid.NoPayload, *nsScannerResp](grid.HandlerNSScanner, func() *nsScannerOptions { return &nsScannerOptions{} }, nil, func() *nsScannerResp { return &nsScannerResp{} })
storageReadAllRPC = grid.NewSingleHandler[*ReadAllHandlerParams, *grid.Bytes](grid.HandlerReadAll, func() *ReadAllHandlerParams { return &ReadAllHandlerParams{} }, grid.NewBytes).AllowCallRequestPool(true)
storageReadVersionRPC = grid.NewSingleHandler[*grid.MSS, *FileInfo](grid.HandlerReadVersion, grid.NewMSS, func() *FileInfo { return &FileInfo{} })
storageReadXLRPC = grid.NewSingleHandler[*grid.MSS, *RawFileInfo](grid.HandlerReadXL, grid.NewMSS, func() *RawFileInfo { return &RawFileInfo{} })
storageRenameDataRPC = grid.NewSingleHandler[*RenameDataHandlerParams, *RenameDataResp](grid.HandlerRenameData, func() *RenameDataHandlerParams { return &RenameDataHandlerParams{} }, func() *RenameDataResp { return &RenameDataResp{} })
storageRenameFileRPC = grid.NewSingleHandler[*RenameFileHandlerParams, grid.NoPayload](grid.HandlerRenameFile, func() *RenameFileHandlerParams { return &RenameFileHandlerParams{} }, grid.NewNoPayload).AllowCallRequestPool(true)
storageStatVolRPC = grid.NewSingleHandler[*grid.MSS, *VolInfo](grid.HandlerStatVol, grid.NewMSS, func() *VolInfo { return &VolInfo{} })
storageUpdateMetadataRPC = grid.NewSingleHandler[*MetadataHandlerParams, grid.NoPayload](grid.HandlerUpdateMetadata, func() *MetadataHandlerParams { return &MetadataHandlerParams{} }, grid.NewNoPayload)
storageWriteMetadataRPC = grid.NewSingleHandler[*MetadataHandlerParams, grid.NoPayload](grid.HandlerWriteMetadata, func() *MetadataHandlerParams { return &MetadataHandlerParams{} }, grid.NewNoPayload)
storageListDirRPC = grid.NewStream[*grid.MSS, grid.NoPayload, *ListDirResult](grid.HandlerListDir, grid.NewMSS, nil, func() *ListDirResult { return &ListDirResult{} }).WithOutCapacity(1)
)
func (s *storageRESTServer) getStorage() StorageAPI { func (s *storageRESTServer) getStorage() StorageAPI {
globalLocalDrivesMu.RLock() globalLocalDrivesMu.RLock()
defer globalLocalDrivesMu.RUnlock() defer globalLocalDrivesMu.RUnlock()
@ -198,11 +215,6 @@ func (s *storageRESTServer) HealthHandler(w http.ResponseWriter, r *http.Request
s.IsValid(w, r) s.IsValid(w, r)
} }
// DiskInfo types.
// DiskInfo.Metrics elements are shared, so we cannot reuse.
var storageDiskInfoHandler = grid.NewSingleHandler[*DiskInfoOptions, *DiskInfo](grid.HandlerDiskInfo, func() *DiskInfoOptions { return &DiskInfoOptions{} },
func() *DiskInfo { return &DiskInfo{} }).WithSharedResponse().AllowCallRequestPool(true)
// DiskInfoHandler - returns disk info. // DiskInfoHandler - returns disk info.
func (s *storageRESTServer) DiskInfoHandler(opts *DiskInfoOptions) (*DiskInfo, *grid.RemoteErr) { func (s *storageRESTServer) DiskInfoHandler(opts *DiskInfoOptions) (*DiskInfo, *grid.RemoteErr) {
if !s.checkID(opts.DiskID) { if !s.checkID(opts.DiskID) {
@ -215,12 +227,6 @@ func (s *storageRESTServer) DiskInfoHandler(opts *DiskInfoOptions) (*DiskInfo, *
return &info, nil return &info, nil
} }
// scanner rpc handler.
var storageNSScannerHandler = grid.NewStream[*nsScannerOptions, grid.NoPayload, *nsScannerResp](grid.HandlerNSScanner,
func() *nsScannerOptions { return &nsScannerOptions{} },
nil,
func() *nsScannerResp { return &nsScannerResp{} })
func (s *storageRESTServer) NSScannerHandler(ctx context.Context, params *nsScannerOptions, out chan<- *nsScannerResp) *grid.RemoteErr { func (s *storageRESTServer) NSScannerHandler(ctx context.Context, params *nsScannerOptions, out chan<- *nsScannerResp) *grid.RemoteErr {
if !s.checkID(params.DiskID) { if !s.checkID(params.DiskID) {
return grid.NewRemoteErr(errDiskNotFound) return grid.NewRemoteErr(errDiskNotFound)
@ -236,7 +242,7 @@ func (s *storageRESTServer) NSScannerHandler(ctx context.Context, params *nsScan
go func() { go func() {
defer wg.Done() defer wg.Done()
for update := range updates { for update := range updates {
resp := storageNSScannerHandler.NewResponse() resp := storageNSScannerRPC.NewResponse()
resp.Update = &update resp.Update = &update
out <- resp out <- resp
} }
@ -247,7 +253,7 @@ func (s *storageRESTServer) NSScannerHandler(ctx context.Context, params *nsScan
return grid.NewRemoteErr(err) return grid.NewRemoteErr(err)
} }
// Send final response. // Send final response.
resp := storageNSScannerHandler.NewResponse() resp := storageNSScannerRPC.NewResponse()
resp.Final = &ui resp.Final = &ui
out <- resp out <- resp
return nil return nil
@ -277,22 +283,6 @@ func (s *storageRESTServer) MakeVolBulkHandler(w http.ResponseWriter, r *http.Re
} }
} }
// ListVolsHandler - list volumes.
func (s *storageRESTServer) ListVolsHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
infos, err := s.getStorage().ListVols(r.Context())
if err != nil {
s.writeErrorResponse(w, err)
return
}
logger.LogIf(r.Context(), msgp.Encode(w, VolsInfo(infos)))
}
// statvol types.
var storageStatVolHandler = grid.NewSingleHandler[*grid.MSS, *VolInfo](grid.HandlerStatVol, grid.NewMSS, func() *VolInfo { return &VolInfo{} })
// StatVolHandler - stat a volume. // StatVolHandler - stat a volume.
func (s *storageRESTServer) StatVolHandler(params *grid.MSS) (*VolInfo, *grid.RemoteErr) { func (s *storageRESTServer) StatVolHandler(params *grid.MSS) (*VolInfo, *grid.RemoteErr) {
if !s.checkID(params.Get(storageRESTDiskID)) { if !s.checkID(params.Get(storageRESTDiskID)) {
@ -346,10 +336,6 @@ func (s *storageRESTServer) CreateFileHandler(w http.ResponseWriter, r *http.Req
done(s.getStorage().CreateFile(r.Context(), origvolume, volume, filePath, int64(fileSize), body)) done(s.getStorage().CreateFile(r.Context(), origvolume, volume, filePath, int64(fileSize), body))
} }
var storageDeleteVersionHandler = grid.NewSingleHandler[*DeleteVersionHandlerParams, grid.NoPayload](grid.HandlerDeleteVersion, func() *DeleteVersionHandlerParams {
return &DeleteVersionHandlerParams{}
}, grid.NewNoPayload)
// DeleteVersionHandler delete updated metadata. // DeleteVersionHandler delete updated metadata.
func (s *storageRESTServer) DeleteVersionHandler(p *DeleteVersionHandlerParams) (np grid.NoPayload, gerr *grid.RemoteErr) { func (s *storageRESTServer) DeleteVersionHandler(p *DeleteVersionHandlerParams) (np grid.NoPayload, gerr *grid.RemoteErr) {
if !s.checkID(p.DiskID) { if !s.checkID(p.DiskID) {
@ -364,10 +350,6 @@ func (s *storageRESTServer) DeleteVersionHandler(p *DeleteVersionHandlerParams)
return np, grid.NewRemoteErr(err) return np, grid.NewRemoteErr(err)
} }
var storageReadVersionHandler = grid.NewSingleHandler[*grid.MSS, *FileInfo](grid.HandlerReadVersion, grid.NewMSS, func() *FileInfo {
return &FileInfo{}
})
// ReadVersionHandlerWS read metadata of versionID // ReadVersionHandlerWS read metadata of versionID
func (s *storageRESTServer) ReadVersionHandlerWS(params *grid.MSS) (*FileInfo, *grid.RemoteErr) { func (s *storageRESTServer) ReadVersionHandlerWS(params *grid.MSS) (*FileInfo, *grid.RemoteErr) {
if !s.checkID(params.Get(storageRESTDiskID)) { if !s.checkID(params.Get(storageRESTDiskID)) {
@ -422,10 +404,6 @@ func (s *storageRESTServer) ReadVersionHandler(w http.ResponseWriter, r *http.Re
logger.LogIf(r.Context(), msgp.Encode(w, &fi)) logger.LogIf(r.Context(), msgp.Encode(w, &fi))
} }
var storageWriteMetadataHandler = grid.NewSingleHandler[*MetadataHandlerParams, grid.NoPayload](grid.HandlerWriteMetadata, func() *MetadataHandlerParams {
return &MetadataHandlerParams{}
}, grid.NewNoPayload)
// WriteMetadataHandler rpc handler to write new updated metadata. // WriteMetadataHandler rpc handler to write new updated metadata.
func (s *storageRESTServer) WriteMetadataHandler(p *MetadataHandlerParams) (np grid.NoPayload, gerr *grid.RemoteErr) { func (s *storageRESTServer) WriteMetadataHandler(p *MetadataHandlerParams) (np grid.NoPayload, gerr *grid.RemoteErr) {
if !s.checkID(p.DiskID) { if !s.checkID(p.DiskID) {
@ -440,10 +418,6 @@ func (s *storageRESTServer) WriteMetadataHandler(p *MetadataHandlerParams) (np g
return np, grid.NewRemoteErr(err) return np, grid.NewRemoteErr(err)
} }
var storageUpdateMetadataHandler = grid.NewSingleHandler[*MetadataHandlerParams, grid.NoPayload](grid.HandlerUpdateMetadata, func() *MetadataHandlerParams {
return &MetadataHandlerParams{}
}, grid.NewNoPayload)
// UpdateMetadataHandler update new updated metadata. // UpdateMetadataHandler update new updated metadata.
func (s *storageRESTServer) UpdateMetadataHandler(p *MetadataHandlerParams) (grid.NoPayload, *grid.RemoteErr) { func (s *storageRESTServer) UpdateMetadataHandler(p *MetadataHandlerParams) (grid.NoPayload, *grid.RemoteErr) {
if !s.checkID(p.DiskID) { if !s.checkID(p.DiskID) {
@ -479,10 +453,6 @@ func (s *storageRESTServer) WriteAllHandler(w http.ResponseWriter, r *http.Reque
} }
} }
var storageCheckPartsHandler = grid.NewSingleHandler[*CheckPartsHandlerParams, grid.NoPayload](grid.HandlerCheckParts, func() *CheckPartsHandlerParams {
return &CheckPartsHandlerParams{}
}, grid.NewNoPayload)
// CheckPartsHandler - check if a file metadata exists. // CheckPartsHandler - check if a file metadata exists.
func (s *storageRESTServer) CheckPartsHandler(p *CheckPartsHandlerParams) (grid.NoPayload, *grid.RemoteErr) { func (s *storageRESTServer) CheckPartsHandler(p *CheckPartsHandlerParams) (grid.NoPayload, *grid.RemoteErr) {
if !s.checkID(p.DiskID) { if !s.checkID(p.DiskID) {
@ -493,10 +463,6 @@ func (s *storageRESTServer) CheckPartsHandler(p *CheckPartsHandlerParams) (grid.
return grid.NewNPErr(s.getStorage().CheckParts(context.Background(), volume, filePath, p.FI)) return grid.NewNPErr(s.getStorage().CheckParts(context.Background(), volume, filePath, p.FI))
} }
var storageReadAllHandler = grid.NewSingleHandler[*ReadAllHandlerParams, *grid.Bytes](grid.HandlerReadAll, func() *ReadAllHandlerParams {
return &ReadAllHandlerParams{}
}, grid.NewBytes).AllowCallRequestPool(true)
// ReadAllHandler - read all the contents of a file. // ReadAllHandler - read all the contents of a file.
func (s *storageRESTServer) ReadAllHandler(p *ReadAllHandlerParams) (*grid.Bytes, *grid.RemoteErr) { func (s *storageRESTServer) ReadAllHandler(p *ReadAllHandlerParams) (*grid.Bytes, *grid.RemoteErr) {
if !s.checkID(p.DiskID) { if !s.checkID(p.DiskID) {
@ -532,10 +498,6 @@ func (s *storageRESTServer) ReadXLHandler(w http.ResponseWriter, r *http.Request
logger.LogIf(r.Context(), msgp.Encode(w, &rf)) logger.LogIf(r.Context(), msgp.Encode(w, &rf))
} }
var storageReadXLHandler = grid.NewSingleHandler[*grid.MSS, *RawFileInfo](grid.HandlerReadXL, grid.NewMSS, func() *RawFileInfo {
return &RawFileInfo{}
})
// ReadXLHandlerWS - read xl.meta for an object at path. // ReadXLHandlerWS - read xl.meta for an object at path.
func (s *storageRESTServer) ReadXLHandlerWS(params *grid.MSS) (*RawFileInfo, *grid.RemoteErr) { func (s *storageRESTServer) ReadXLHandlerWS(params *grid.MSS) (*RawFileInfo, *grid.RemoteErr) {
if !s.checkID(params.Get(storageRESTDiskID)) { if !s.checkID(params.Get(storageRESTDiskID)) {
@ -650,31 +612,26 @@ func (s *storageRESTServer) ReadFileStreamHandler(w http.ResponseWriter, r *http
} }
// ListDirHandler - list a directory. // ListDirHandler - list a directory.
func (s *storageRESTServer) ListDirHandler(w http.ResponseWriter, r *http.Request) { func (s *storageRESTServer) ListDirHandler(ctx context.Context, params *grid.MSS, out chan<- *ListDirResult) *grid.RemoteErr {
if !s.IsValid(w, r) { if !s.checkID(params.Get(storageRESTDiskID)) {
return return grid.NewRemoteErr(errDiskNotFound)
} }
volume := r.Form.Get(storageRESTVolume) volume := params.Get(storageRESTVolume)
dirPath := r.Form.Get(storageRESTDirPath) dirPath := params.Get(storageRESTDirPath)
origvolume := r.Form.Get(storageRESTOrigVolume) origvolume := params.Get(storageRESTOrigVolume)
count, err := strconv.Atoi(r.Form.Get(storageRESTCount)) count, err := strconv.Atoi(params.Get(storageRESTCount))
if err != nil { if err != nil {
s.writeErrorResponse(w, err) return grid.NewRemoteErr(err)
return
} }
entries, err := s.getStorage().ListDir(r.Context(), origvolume, volume, dirPath, count) entries, err := s.getStorage().ListDir(ctx, origvolume, volume, dirPath, count)
if err != nil { if err != nil {
s.writeErrorResponse(w, err) return grid.NewRemoteErr(err)
return
} }
gob.NewEncoder(w).Encode(&entries) out <- &ListDirResult{Entries: entries}
return nil
} }
var storageDeleteFileHandler = grid.NewSingleHandler[*DeleteFileHandlerParams, grid.NoPayload](grid.HandlerDeleteFile, func() *DeleteFileHandlerParams {
return &DeleteFileHandlerParams{}
}, grid.NewNoPayload).AllowCallRequestPool(true)
// DeleteFileHandler - delete a file. // DeleteFileHandler - delete a file.
func (s *storageRESTServer) DeleteFileHandler(p *DeleteFileHandlerParams) (grid.NoPayload, *grid.RemoteErr) { func (s *storageRESTServer) DeleteFileHandler(p *DeleteFileHandlerParams) (grid.NoPayload, *grid.RemoteErr) {
if !s.checkID(p.DiskID) { if !s.checkID(p.DiskID) {
@ -730,12 +687,6 @@ func (s *storageRESTServer) DeleteVersionsHandler(w http.ResponseWriter, r *http
encoder.Encode(dErrsResp) encoder.Encode(dErrsResp)
} }
var storageRenameDataHandler = grid.NewSingleHandler[*RenameDataHandlerParams, *RenameDataResp](grid.HandlerRenameData, func() *RenameDataHandlerParams {
return &RenameDataHandlerParams{}
}, func() *RenameDataResp {
return &RenameDataResp{}
})
// RenameDataHandler - renames a meta object and data dir to destination. // RenameDataHandler - renames a meta object and data dir to destination.
func (s *storageRESTServer) RenameDataHandler(p *RenameDataHandlerParams) (*RenameDataResp, *grid.RemoteErr) { func (s *storageRESTServer) RenameDataHandler(p *RenameDataHandlerParams) (*RenameDataResp, *grid.RemoteErr) {
if !s.checkID(p.DiskID) { if !s.checkID(p.DiskID) {
@ -749,10 +700,6 @@ func (s *storageRESTServer) RenameDataHandler(p *RenameDataHandlerParams) (*Rena
return resp, grid.NewRemoteErr(err) return resp, grid.NewRemoteErr(err)
} }
var storageRenameFileHandler = grid.NewSingleHandler[*RenameFileHandlerParams, grid.NoPayload](grid.HandlerRenameFile, func() *RenameFileHandlerParams {
return &RenameFileHandlerParams{}
}, grid.NewNoPayload).AllowCallRequestPool(true)
// RenameFileHandler - rename a file from source to destination // RenameFileHandler - rename a file from source to destination
func (s *storageRESTServer) RenameFileHandler(p *RenameFileHandlerParams) (grid.NoPayload, *grid.RemoteErr) { func (s *storageRESTServer) RenameFileHandler(p *RenameFileHandlerParams) (grid.NoPayload, *grid.RemoteErr) {
if !s.checkID(p.DiskID) { if !s.checkID(p.DiskID) {
@ -1356,26 +1303,26 @@ func registerStorageRESTHandlers(router *mux.Router, endpointServerPools Endpoin
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCreateFile).HandlerFunc(h(server.CreateFileHandler)) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCreateFile).HandlerFunc(h(server.CreateFileHandler))
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadFile).HandlerFunc(h(server.ReadFileHandler)) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadFile).HandlerFunc(h(server.ReadFileHandler))
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadFileStream).HandlerFunc(h(server.ReadFileStreamHandler)) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadFileStream).HandlerFunc(h(server.ReadFileStreamHandler))
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodListDir).HandlerFunc(h(server.ListDirHandler))
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDeleteVersions).HandlerFunc(h(server.DeleteVersionsHandler)) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDeleteVersions).HandlerFunc(h(server.DeleteVersionsHandler))
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodVerifyFile).HandlerFunc(h(server.VerifyFileHandler)) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodVerifyFile).HandlerFunc(h(server.VerifyFileHandler))
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodStatInfoFile).HandlerFunc(h(server.StatInfoFile)) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodStatInfoFile).HandlerFunc(h(server.StatInfoFile))
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadMultiple).HandlerFunc(h(server.ReadMultiple)) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadMultiple).HandlerFunc(h(server.ReadMultiple))
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCleanAbandoned).HandlerFunc(h(server.CleanAbandonedDataHandler)) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCleanAbandoned).HandlerFunc(h(server.CleanAbandonedDataHandler))
logger.FatalIf(storageReadAllHandler.Register(gm, server.ReadAllHandler, endpoint.Path), "unable to register handler") logger.FatalIf(storageListDirRPC.RegisterNoInput(gm, server.ListDirHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageRenameFileHandler.Register(gm, server.RenameFileHandler, endpoint.Path), "unable to register handler") logger.FatalIf(storageReadAllRPC.Register(gm, server.ReadAllHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageRenameDataHandler.Register(gm, server.RenameDataHandler, endpoint.Path), "unable to register handler") logger.FatalIf(storageRenameFileRPC.Register(gm, server.RenameFileHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageDeleteFileHandler.Register(gm, server.DeleteFileHandler, endpoint.Path), "unable to register handler") logger.FatalIf(storageRenameDataRPC.Register(gm, server.RenameDataHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageCheckPartsHandler.Register(gm, server.CheckPartsHandler, endpoint.Path), "unable to register handler") logger.FatalIf(storageDeleteFileRPC.Register(gm, server.DeleteFileHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageReadVersionHandler.Register(gm, server.ReadVersionHandlerWS, endpoint.Path), "unable to register handler") logger.FatalIf(storageCheckPartsRPC.Register(gm, server.CheckPartsHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageWriteMetadataHandler.Register(gm, server.WriteMetadataHandler, endpoint.Path), "unable to register handler") logger.FatalIf(storageReadVersionRPC.Register(gm, server.ReadVersionHandlerWS, endpoint.Path), "unable to register handler")
logger.FatalIf(storageUpdateMetadataHandler.Register(gm, server.UpdateMetadataHandler, endpoint.Path), "unable to register handler") logger.FatalIf(storageWriteMetadataRPC.Register(gm, server.WriteMetadataHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageDeleteVersionHandler.Register(gm, server.DeleteVersionHandler, endpoint.Path), "unable to register handler") logger.FatalIf(storageUpdateMetadataRPC.Register(gm, server.UpdateMetadataHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageReadXLHandler.Register(gm, server.ReadXLHandlerWS, endpoint.Path), "unable to register handler") logger.FatalIf(storageDeleteVersionRPC.Register(gm, server.DeleteVersionHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageNSScannerHandler.RegisterNoInput(gm, server.NSScannerHandler, endpoint.Path), "unable to register handler") logger.FatalIf(storageReadXLRPC.Register(gm, server.ReadXLHandlerWS, endpoint.Path), "unable to register handler")
logger.FatalIf(storageDiskInfoHandler.Register(gm, server.DiskInfoHandler, endpoint.Path), "unable to register handler") logger.FatalIf(storageNSScannerRPC.RegisterNoInput(gm, server.NSScannerHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageStatVolHandler.Register(gm, server.StatVolHandler, endpoint.Path), "unable to register handler") logger.FatalIf(storageDiskInfoRPC.Register(gm, server.DiskInfoHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageStatVolRPC.Register(gm, server.StatVolHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(gm.RegisterStreamingHandler(grid.HandlerWalkDir, grid.StreamHandler{ logger.FatalIf(gm.RegisterStreamingHandler(grid.HandlerWalkDir, grid.StreamHandler{
Subroute: endpoint.Path, Subroute: endpoint.Path,
Handle: server.WalkDirHandler, Handle: server.WalkDirHandler,

View File

@ -23,6 +23,8 @@ import (
"github.com/minio/madmin-go/v3" "github.com/minio/madmin-go/v3"
) )
//go:generate msgp -file=$GOFILE -unexported
type lastDayTierStats struct { type lastDayTierStats struct {
Bins [24]tierStats Bins [24]tierStats
UpdatedAt time.Time UpdatedAt time.Time

View File

@ -0,0 +1,417 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *DailyAllTierStats) DecodeMsg(dc *msgp.Reader) (err error) {
var zb0004 uint32
zb0004, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
if (*z) == nil {
(*z) = make(DailyAllTierStats, zb0004)
} else if len((*z)) > 0 {
for key := range *z {
delete((*z), key)
}
}
for zb0004 > 0 {
zb0004--
var zb0001 string
var zb0002 lastDayTierStats
zb0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err)
return
}
var field []byte
_ = field
var zb0005 uint32
zb0005, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
for zb0005 > 0 {
zb0005--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
switch msgp.UnsafeString(field) {
case "Bins":
var zb0006 uint32
zb0006, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, zb0001, "Bins")
return
}
if zb0006 != uint32(24) {
err = msgp.ArrayError{Wanted: uint32(24), Got: zb0006}
return
}
for zb0003 := range zb0002.Bins {
err = zb0002.Bins[zb0003].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, zb0001, "Bins", zb0003)
return
}
}
case "UpdatedAt":
zb0002.UpdatedAt, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, zb0001, "UpdatedAt")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
}
}
(*z)[zb0001] = zb0002
}
return
}
// EncodeMsg implements msgp.Encodable
func (z DailyAllTierStats) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteMapHeader(uint32(len(z)))
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0007, zb0008 := range z {
err = en.WriteString(zb0007)
if err != nil {
err = msgp.WrapError(err)
return
}
// map header, size 2
// write "Bins"
err = en.Append(0x82, 0xa4, 0x42, 0x69, 0x6e, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(24))
if err != nil {
err = msgp.WrapError(err, zb0007, "Bins")
return
}
for zb0009 := range zb0008.Bins {
err = zb0008.Bins[zb0009].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, zb0007, "Bins", zb0009)
return
}
}
// write "UpdatedAt"
err = en.Append(0xa9, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
if err != nil {
return
}
err = en.WriteTime(zb0008.UpdatedAt)
if err != nil {
err = msgp.WrapError(err, zb0007, "UpdatedAt")
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z DailyAllTierStats) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendMapHeader(o, uint32(len(z)))
for zb0007, zb0008 := range z {
o = msgp.AppendString(o, zb0007)
// map header, size 2
// string "Bins"
o = append(o, 0x82, 0xa4, 0x42, 0x69, 0x6e, 0x73)
o = msgp.AppendArrayHeader(o, uint32(24))
for zb0009 := range zb0008.Bins {
o, err = zb0008.Bins[zb0009].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, zb0007, "Bins", zb0009)
return
}
}
// string "UpdatedAt"
o = append(o, 0xa9, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
o = msgp.AppendTime(o, zb0008.UpdatedAt)
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *DailyAllTierStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
if (*z) == nil {
(*z) = make(DailyAllTierStats, zb0004)
} else if len((*z)) > 0 {
for key := range *z {
delete((*z), key)
}
}
for zb0004 > 0 {
var zb0001 string
var zb0002 lastDayTierStats
zb0004--
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var field []byte
_ = field
var zb0005 uint32
zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
for zb0005 > 0 {
zb0005--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
switch msgp.UnsafeString(field) {
case "Bins":
var zb0006 uint32
zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, zb0001, "Bins")
return
}
if zb0006 != uint32(24) {
err = msgp.ArrayError{Wanted: uint32(24), Got: zb0006}
return
}
for zb0003 := range zb0002.Bins {
bts, err = zb0002.Bins[zb0003].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, zb0001, "Bins", zb0003)
return
}
}
case "UpdatedAt":
zb0002.UpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, zb0001, "UpdatedAt")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
}
}
(*z)[zb0001] = zb0002
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z DailyAllTierStats) Msgsize() (s int) {
s = msgp.MapHeaderSize
if z != nil {
for zb0007, zb0008 := range z {
_ = zb0008
s += msgp.StringPrefixSize + len(zb0007) + 1 + 5 + msgp.ArrayHeaderSize
for zb0009 := range zb0008.Bins {
s += zb0008.Bins[zb0009].Msgsize()
}
s += 10 + msgp.TimeSize
}
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *lastDayTierStats) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Bins":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Bins")
return
}
if zb0002 != uint32(24) {
err = msgp.ArrayError{Wanted: uint32(24), Got: zb0002}
return
}
for za0001 := range z.Bins {
err = z.Bins[za0001].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Bins", za0001)
return
}
}
case "UpdatedAt":
z.UpdatedAt, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "UpdatedAt")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *lastDayTierStats) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "Bins"
err = en.Append(0x82, 0xa4, 0x42, 0x69, 0x6e, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(24))
if err != nil {
err = msgp.WrapError(err, "Bins")
return
}
for za0001 := range z.Bins {
err = z.Bins[za0001].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Bins", za0001)
return
}
}
// write "UpdatedAt"
err = en.Append(0xa9, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
if err != nil {
return
}
err = en.WriteTime(z.UpdatedAt)
if err != nil {
err = msgp.WrapError(err, "UpdatedAt")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *lastDayTierStats) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
// string "Bins"
o = append(o, 0x82, 0xa4, 0x42, 0x69, 0x6e, 0x73)
o = msgp.AppendArrayHeader(o, uint32(24))
for za0001 := range z.Bins {
o, err = z.Bins[za0001].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Bins", za0001)
return
}
}
// string "UpdatedAt"
o = append(o, 0xa9, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
o = msgp.AppendTime(o, z.UpdatedAt)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *lastDayTierStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Bins":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bins")
return
}
if zb0002 != uint32(24) {
err = msgp.ArrayError{Wanted: uint32(24), Got: zb0002}
return
}
for za0001 := range z.Bins {
bts, err = z.Bins[za0001].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Bins", za0001)
return
}
}
case "UpdatedAt":
z.UpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "UpdatedAt")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *lastDayTierStats) Msgsize() (s int) {
s = 1 + 5 + msgp.ArrayHeaderSize
for za0001 := range z.Bins {
s += z.Bins[za0001].Msgsize()
}
s += 10 + msgp.TimeSize
return
}

View File

@ -0,0 +1,236 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalDailyAllTierStats(t *testing.T) {
v := DailyAllTierStats{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgDailyAllTierStats(b *testing.B) {
v := DailyAllTierStats{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgDailyAllTierStats(b *testing.B) {
v := DailyAllTierStats{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalDailyAllTierStats(b *testing.B) {
v := DailyAllTierStats{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeDailyAllTierStats(t *testing.T) {
v := DailyAllTierStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeDailyAllTierStats Msgsize() is inaccurate")
}
vn := DailyAllTierStats{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeDailyAllTierStats(b *testing.B) {
v := DailyAllTierStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeDailyAllTierStats(b *testing.B) {
v := DailyAllTierStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshallastDayTierStats(t *testing.T) {
v := lastDayTierStats{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsglastDayTierStats(b *testing.B) {
v := lastDayTierStats{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsglastDayTierStats(b *testing.B) {
v := lastDayTierStats{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshallastDayTierStats(b *testing.B) {
v := lastDayTierStats{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodelastDayTierStats(t *testing.T) {
v := lastDayTierStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodelastDayTierStats Msgsize() is inaccurate")
}
vn := lastDayTierStats{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodelastDayTierStats(b *testing.B) {
v := lastDayTierStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodelastDayTierStats(b *testing.B) {
v := lastDayTierStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View File

@ -17,6 +17,8 @@
package bandwidth package bandwidth
//go:generate msgp -file=$GOFILE -unexported
import ( import (
"context" "context"
"sync" "sync"
@ -25,6 +27,8 @@ import (
"golang.org/x/time/rate" "golang.org/x/time/rate"
) )
//msgp:ignore bucketThrottle Monitor
type bucketThrottle struct { type bucketThrottle struct {
*rate.Limiter *rate.Limiter
NodeBandwidthPerSec int64 NodeBandwidthPerSec int64

View File

@ -0,0 +1,218 @@
package bandwidth
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *BucketBandwidthReport) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z BucketBandwidthReport) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 0
err = en.Append(0x80)
if err != nil {
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z BucketBandwidthReport) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 0
o = append(o, 0x80)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BucketBandwidthReport) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z BucketBandwidthReport) Msgsize() (s int) {
s = 1
return
}
// DecodeMsg implements msgp.Decodable
func (z *Details) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "LimitInBytesPerSecond":
z.LimitInBytesPerSecond, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "LimitInBytesPerSecond")
return
}
case "CurrentBandwidthInBytesPerSecond":
z.CurrentBandwidthInBytesPerSecond, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "CurrentBandwidthInBytesPerSecond")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z Details) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "LimitInBytesPerSecond"
err = en.Append(0x82, 0xb5, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x49, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64)
if err != nil {
return
}
err = en.WriteInt64(z.LimitInBytesPerSecond)
if err != nil {
err = msgp.WrapError(err, "LimitInBytesPerSecond")
return
}
// write "CurrentBandwidthInBytesPerSecond"
err = en.Append(0xd9, 0x20, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x6e, 0x64, 0x77, 0x69, 0x64, 0x74, 0x68, 0x49, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64)
if err != nil {
return
}
err = en.WriteFloat64(z.CurrentBandwidthInBytesPerSecond)
if err != nil {
err = msgp.WrapError(err, "CurrentBandwidthInBytesPerSecond")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z Details) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
// string "LimitInBytesPerSecond"
o = append(o, 0x82, 0xb5, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x49, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64)
o = msgp.AppendInt64(o, z.LimitInBytesPerSecond)
// string "CurrentBandwidthInBytesPerSecond"
o = append(o, 0xd9, 0x20, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x6e, 0x64, 0x77, 0x69, 0x64, 0x74, 0x68, 0x49, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64)
o = msgp.AppendFloat64(o, z.CurrentBandwidthInBytesPerSecond)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Details) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "LimitInBytesPerSecond":
z.LimitInBytesPerSecond, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "LimitInBytesPerSecond")
return
}
case "CurrentBandwidthInBytesPerSecond":
z.CurrentBandwidthInBytesPerSecond, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "CurrentBandwidthInBytesPerSecond")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z Details) Msgsize() (s int) {
s = 1 + 22 + msgp.Int64Size + 34 + msgp.Float64Size
return
}

View File

@ -0,0 +1,236 @@
package bandwidth
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalBucketBandwidthReport(t *testing.T) {
v := BucketBandwidthReport{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBucketBandwidthReport(b *testing.B) {
v := BucketBandwidthReport{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBucketBandwidthReport(b *testing.B) {
v := BucketBandwidthReport{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBucketBandwidthReport(b *testing.B) {
v := BucketBandwidthReport{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBucketBandwidthReport(t *testing.T) {
v := BucketBandwidthReport{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBucketBandwidthReport Msgsize() is inaccurate")
}
vn := BucketBandwidthReport{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBucketBandwidthReport(b *testing.B) {
v := BucketBandwidthReport{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBucketBandwidthReport(b *testing.B) {
v := BucketBandwidthReport{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalDetails(t *testing.T) {
v := Details{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgDetails(b *testing.B) {
v := Details{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgDetails(b *testing.B) {
v := Details{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalDetails(b *testing.B) {
v := Details{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeDetails(t *testing.T) {
v := Details{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeDetails Msgsize() is inaccurate")
}
vn := Details{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeDetails(b *testing.B) {
v := Details{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeDetails(b *testing.B) {
v := Details{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View File

@ -36,6 +36,7 @@ import (
// HandlerID is a handler identifier. // HandlerID is a handler identifier.
// It is used to determine request routing on the server. // It is used to determine request routing on the server.
// Handlers can be registered with a static subroute. // Handlers can be registered with a static subroute.
// Do NOT remove or change the order of existing handlers.
const ( const (
// handlerInvalid is reserved to check for uninitialized values. // handlerInvalid is reserved to check for uninitialized values.
handlerInvalid HandlerID = iota handlerInvalid HandlerID = iota
@ -69,7 +70,6 @@ const (
HandlerStopRebalance HandlerStopRebalance
HandlerLoadRebalanceMeta HandlerLoadRebalanceMeta
HandlerLoadTransitionTierConfig HandlerLoadTransitionTierConfig
HandlerDeletePolicy HandlerDeletePolicy
HandlerLoadPolicy HandlerLoadPolicy
HandlerLoadPolicyMapping HandlerLoadPolicyMapping
@ -78,11 +78,37 @@ const (
HandlerDeleteUser HandlerDeleteUser
HandlerLoadUser HandlerLoadUser
HandlerLoadGroup HandlerLoadGroup
HandlerHealBucket HandlerHealBucket
HandlerMakeBucket HandlerMakeBucket
HandlerHeadBucket HandlerHeadBucket
HandlerDeleteBucket HandlerDeleteBucket
HandlerGetMetrics
HandlerGetResourceMetrics
HandlerGetMemInfo
HandlerGetProcInfo
HandlerGetOSInfo
HandlerGetPartitions
HandlerGetNetInfo
HandlerGetCPUs
HandlerServerInfo
HandlerGetSysConfig
HandlerGetSysServices
HandlerGetSysErrors
HandlerGetAllBucketStats
HandlerGetBucketStats
HandlerGetSRMetrics
HandlerGetPeerMetrics
HandlerGetMetacacheListing
HandlerUpdateMetacacheListing
HandlerGetPeerBucketMetrics
HandlerStorageInfo
HandlerConsoleLog
HandlerListDir
HandlerGetLocks
HandlerBackgroundHealStatus
HandlerGetLastDayTierStats
HandlerSignalService
HandlerGetBandwidth
// Add more above here ^^^ // Add more above here ^^^
// If all handlers are used, the type of Handler can be changed. // If all handlers are used, the type of Handler can be changed.
@ -137,6 +163,28 @@ var handlerPrefixes = [handlerLast]string{
HandlerHeadBucket: peerPrefixS3, HandlerHeadBucket: peerPrefixS3,
HandlerDeleteBucket: peerPrefixS3, HandlerDeleteBucket: peerPrefixS3,
HandlerHealBucket: healPrefix, HandlerHealBucket: healPrefix,
HandlerGetMetrics: peerPrefix,
HandlerGetResourceMetrics: peerPrefix,
HandlerGetMemInfo: peerPrefix,
HandlerGetProcInfo: peerPrefix,
HandlerGetOSInfo: peerPrefix,
HandlerGetPartitions: peerPrefix,
HandlerGetNetInfo: peerPrefix,
HandlerGetCPUs: peerPrefix,
HandlerServerInfo: peerPrefix,
HandlerGetSysConfig: peerPrefix,
HandlerGetSysServices: peerPrefix,
HandlerGetSysErrors: peerPrefix,
HandlerGetAllBucketStats: peerPrefix,
HandlerGetBucketStats: peerPrefix,
HandlerGetSRMetrics: peerPrefix,
HandlerGetPeerMetrics: peerPrefix,
HandlerGetMetacacheListing: peerPrefix,
HandlerUpdateMetacacheListing: peerPrefix,
HandlerGetPeerBucketMetrics: peerPrefix,
HandlerStorageInfo: peerPrefix,
HandlerConsoleLog: peerPrefix,
HandlerListDir: storagePrefix,
} }
const ( const (
@ -344,9 +392,10 @@ type RoundTripper interface {
// SingleHandler is a type safe handler for single roundtrip requests. // SingleHandler is a type safe handler for single roundtrip requests.
type SingleHandler[Req, Resp RoundTripper] struct { type SingleHandler[Req, Resp RoundTripper] struct {
id HandlerID id HandlerID
sharedResp bool sharedResp bool
callReuseReq bool callReuseReq bool
ignoreNilConn bool
newReq func() Req newReq func() Req
newResp func() Resp newResp func() Resp
@ -407,6 +456,17 @@ func (h *SingleHandler[Req, Resp]) AllowCallRequestPool(b bool) *SingleHandler[R
return h return h
} }
// IgnoreNilConn will ignore nil connections when calling.
// This will make Call return nil instead of ErrDisconnected when the connection is nil.
// This may only be set ONCE before use.
func (h *SingleHandler[Req, Resp]) IgnoreNilConn() *SingleHandler[Req, Resp] {
if h.ignoreNilConn {
logger.LogOnceIf(context.Background(), fmt.Errorf("%s: IgnoreNilConn called twice", h.id.String()), h.id.String()+"IgnoreNilConn")
}
h.ignoreNilConn = true
return h
}
// WithSharedResponse indicates it is unsafe to reuse the response // WithSharedResponse indicates it is unsafe to reuse the response
// when it has been returned on a handler. // when it has been returned on a handler.
// This will disable automatic response recycling/pooling. // This will disable automatic response recycling/pooling.
@ -476,6 +536,12 @@ type Requester interface {
// The response should be returned with PutResponse when no error. // The response should be returned with PutResponse when no error.
// If no deadline is set, a 1-minute deadline is added. // If no deadline is set, a 1-minute deadline is added.
func (h *SingleHandler[Req, Resp]) Call(ctx context.Context, c Requester, req Req) (resp Resp, err error) { func (h *SingleHandler[Req, Resp]) Call(ctx context.Context, c Requester, req Req) (resp Resp, err error) {
if c == nil {
if h.ignoreNilConn {
return resp, nil
}
return resp, ErrDisconnected
}
payload, err := req.MarshalMsg(GetByteBuffer()[:0]) payload, err := req.MarshalMsg(GetByteBuffer()[:0])
if err != nil { if err != nil {
return resp, err return resp, err
@ -777,6 +843,9 @@ type Streamer interface {
// Call the remove with the request and // Call the remove with the request and
func (h *StreamTypeHandler[Payload, Req, Resp]) Call(ctx context.Context, c Streamer, payload Payload) (st *TypedStream[Req, Resp], err error) { func (h *StreamTypeHandler[Payload, Req, Resp]) Call(ctx context.Context, c Streamer, payload Payload) (st *TypedStream[Req, Resp], err error) {
if c == nil {
return nil, ErrDisconnected
}
var payloadB []byte var payloadB []byte
if h.WithPayload { if h.WithPayload {
var err error var err error

View File

@ -51,14 +51,41 @@ func _() {
_ = x[HandlerMakeBucket-40] _ = x[HandlerMakeBucket-40]
_ = x[HandlerHeadBucket-41] _ = x[HandlerHeadBucket-41]
_ = x[HandlerDeleteBucket-42] _ = x[HandlerDeleteBucket-42]
_ = x[handlerTest-43] _ = x[HandlerGetMetrics-43]
_ = x[handlerTest2-44] _ = x[HandlerGetResourceMetrics-44]
_ = x[handlerLast-45] _ = x[HandlerGetMemInfo-45]
_ = x[HandlerGetProcInfo-46]
_ = x[HandlerGetOSInfo-47]
_ = x[HandlerGetPartitions-48]
_ = x[HandlerGetNetInfo-49]
_ = x[HandlerGetCPUs-50]
_ = x[HandlerServerInfo-51]
_ = x[HandlerGetSysConfig-52]
_ = x[HandlerGetSysServices-53]
_ = x[HandlerGetSysErrors-54]
_ = x[HandlerGetAllBucketStats-55]
_ = x[HandlerGetBucketStats-56]
_ = x[HandlerGetSRMetrics-57]
_ = x[HandlerGetPeerMetrics-58]
_ = x[HandlerGetMetacacheListing-59]
_ = x[HandlerUpdateMetacacheListing-60]
_ = x[HandlerGetPeerBucketMetrics-61]
_ = x[HandlerStorageInfo-62]
_ = x[HandlerConsoleLog-63]
_ = x[HandlerListDir-64]
_ = x[HandlerGetLocks-65]
_ = x[HandlerBackgroundHealStatus-66]
_ = x[HandlerGetLastDayTierStats-67]
_ = x[HandlerSignalService-68]
_ = x[HandlerGetBandwidth-69]
_ = x[handlerTest-70]
_ = x[handlerTest2-71]
_ = x[handlerLast-72]
} }
const _HandlerID_name = "handlerInvalidLockLockLockRLockLockUnlockLockRUnlockLockRefreshLockForceUnlockWalkDirStatVolDiskInfoNSScannerReadXLReadVersionDeleteFileDeleteVersionUpdateMetadataWriteMetadataCheckPartsRenameDataRenameFileReadAllServerVerifyTraceListenDeleteBucketMetadataLoadBucketMetadataReloadSiteReplicationConfigReloadPoolMetaStopRebalanceLoadRebalanceMetaLoadTransitionTierConfigDeletePolicyLoadPolicyLoadPolicyMappingDeleteServiceAccountLoadServiceAccountDeleteUserLoadUserLoadGroupHealBucketMakeBucketHeadBucketDeleteBuckethandlerTesthandlerTest2handlerLast" const _HandlerID_name = "handlerInvalidLockLockLockRLockLockUnlockLockRUnlockLockRefreshLockForceUnlockWalkDirStatVolDiskInfoNSScannerReadXLReadVersionDeleteFileDeleteVersionUpdateMetadataWriteMetadataCheckPartsRenameDataRenameFileReadAllServerVerifyTraceListenDeleteBucketMetadataLoadBucketMetadataReloadSiteReplicationConfigReloadPoolMetaStopRebalanceLoadRebalanceMetaLoadTransitionTierConfigDeletePolicyLoadPolicyLoadPolicyMappingDeleteServiceAccountLoadServiceAccountDeleteUserLoadUserLoadGroupHealBucketMakeBucketHeadBucketDeleteBucketGetMetricsGetResourceMetricsGetMemInfoGetProcInfoGetOSInfoGetPartitionsGetNetInfoGetCPUsServerInfoGetSysConfigGetSysServicesGetSysErrorsGetAllBucketStatsGetBucketStatsGetSRMetricsGetPeerMetricsGetMetacacheListingUpdateMetacacheListingGetPeerBucketMetricsStorageInfoConsoleLogListDirGetLocksBackgroundHealStatusGetLastDayTierStatsSignalServiceGetBandwidthhandlerTesthandlerTest2handlerLast"
var _HandlerID_index = [...]uint16{0, 14, 22, 31, 41, 52, 63, 78, 85, 92, 100, 109, 115, 126, 136, 149, 163, 176, 186, 196, 206, 213, 225, 230, 236, 256, 274, 301, 315, 328, 345, 369, 381, 391, 408, 428, 446, 456, 464, 473, 483, 493, 503, 515, 526, 538, 549} var _HandlerID_index = [...]uint16{0, 14, 22, 31, 41, 52, 63, 78, 85, 92, 100, 109, 115, 126, 136, 149, 163, 176, 186, 196, 206, 213, 225, 230, 236, 256, 274, 301, 315, 328, 345, 369, 381, 391, 408, 428, 446, 456, 464, 473, 483, 493, 503, 515, 525, 543, 553, 564, 573, 586, 596, 603, 613, 625, 639, 651, 668, 682, 694, 708, 727, 749, 769, 780, 790, 797, 805, 825, 844, 857, 869, 880, 892, 903}
func (i HandlerID) String() string { func (i HandlerID) String() string {
if i >= HandlerID(len(_HandlerID_index)-1) { if i >= HandlerID(len(_HandlerID_index)-1) {

View File

@ -250,7 +250,7 @@ func (m *Manager) RegisterSingleHandler(id HandlerID, h SingleHandlerFn, subrout
if len(subroute) == 0 { if len(subroute) == 0 {
if m.handlers.hasAny(id) && !id.isTestHandler() { if m.handlers.hasAny(id) && !id.isTestHandler() {
return ErrHandlerAlreadyExists return fmt.Errorf("handler %v: %w", id.String(), ErrHandlerAlreadyExists)
} }
m.handlers.single[id] = h m.handlers.single[id] = h
@ -258,7 +258,7 @@ func (m *Manager) RegisterSingleHandler(id HandlerID, h SingleHandlerFn, subrout
} }
subID := makeSubHandlerID(id, s) subID := makeSubHandlerID(id, s)
if m.handlers.hasSubhandler(subID) && !id.isTestHandler() { if m.handlers.hasSubhandler(subID) && !id.isTestHandler() {
return ErrHandlerAlreadyExists return fmt.Errorf("handler %v, subroute:%v: %w", id.String(), s, ErrHandlerAlreadyExists)
} }
m.handlers.subSingle[subID] = h m.handlers.subSingle[subID] = h
// Copy so clients can also pick it up for other subpaths. // Copy so clients can also pick it up for other subpaths.

View File

@ -18,7 +18,10 @@
package grid package grid
import ( import (
"bytes"
"encoding/json"
"errors" "errors"
"math"
"net/url" "net/url"
"sort" "sort"
"strings" "strings"
@ -394,6 +397,137 @@ func (u URLValues) Msgsize() (s int) {
return return
} }
// JSONPool is a pool for JSON objects that unmarshal into T.
type JSONPool[T any] struct {
pool sync.Pool
emptySz int
}
// NewJSONPool returns a new JSONPool.
func NewJSONPool[T any]() *JSONPool[T] {
var t T
sz := 128
if b, err := json.Marshal(t); err != nil {
sz = len(b)
}
return &JSONPool[T]{
pool: sync.Pool{
New: func() interface{} {
var t T
return &t
},
},
emptySz: sz,
}
}
func (p *JSONPool[T]) new() *T {
var zero T
t := p.pool.Get().(*T)
*t = zero
return t
}
// JSON is a wrapper around a T object that can be serialized.
// There is an internal value
type JSON[T any] struct {
p *JSONPool[T]
val *T
}
// NewJSON returns a new JSONPool.
// No initial value is set.
func (p *JSONPool[T]) NewJSON() *JSON[T] {
var j JSON[T]
j.p = p
return &j
}
// NewJSONWith returns a new JSON with the provided value.
func (p *JSONPool[T]) NewJSONWith(val *T) *JSON[T] {
var j JSON[T]
j.p = p
j.val = val
return &j
}
// Value returns the underlying value.
// If not set yet, a new value is created.
func (j *JSON[T]) Value() *T {
if j.val == nil {
j.val = j.p.new()
}
return j.val
}
// ValueOrZero returns the underlying value.
// If the underlying value is nil, a zero value is returned.
func (j *JSON[T]) ValueOrZero() T {
if j == nil || j.val == nil {
var t T
return t
}
return *j.val
}
// Set the underlying value.
func (j *JSON[T]) Set(v *T) {
j.val = v
}
// Recycle the underlying value.
func (j *JSON[T]) Recycle() {
if j.val != nil {
j.p.pool.Put(j.val)
j.val = nil
}
}
// MarshalMsg implements msgp.Marshaler
func (j *JSON[T]) MarshalMsg(b []byte) (o []byte, err error) {
if j.val == nil {
return msgp.AppendNil(b), nil
}
buf := bytes.NewBuffer(GetByteBuffer()[:0])
defer func() {
PutByteBuffer(buf.Bytes())
}()
enc := json.NewEncoder(buf)
err = enc.Encode(j.val)
if err != nil {
return b, err
}
return msgp.AppendBytes(b, buf.Bytes()), nil
}
// UnmarshalMsg will JSON marshal the value and wrap as a msgp byte array.
// Nil values are supported.
func (j *JSON[T]) UnmarshalMsg(bytes []byte) ([]byte, error) {
if bytes, err := msgp.ReadNilBytes(bytes); err == nil {
if j.val != nil {
j.p.pool.Put(j.val)
}
j.val = nil
return bytes, nil
}
val, bytes, err := msgp.ReadBytesZC(bytes)
if err != nil {
return bytes, err
}
if j.val == nil {
j.val = j.p.new()
} else {
var t T
*j.val = t
}
return bytes, json.Unmarshal(val, j.val)
}
// Msgsize returns the size of an empty JSON object.
func (j *JSON[T]) Msgsize() int {
return j.p.emptySz
}
// NoPayload is a type that can be used for handlers that do not use a payload. // NoPayload is a type that can be used for handlers that do not use a payload.
type NoPayload struct{} type NoPayload struct{}
@ -419,3 +553,156 @@ func NewNoPayload() NoPayload {
// Recycle is a no-op. // Recycle is a no-op.
func (NoPayload) Recycle() {} func (NoPayload) Recycle() {}
// ArrayOf wraps an array of Messagepack compatible objects.
type ArrayOf[T RoundTripper] struct {
aPool sync.Pool // Arrays
ePool sync.Pool // Elements
}
// NewArrayOf returns a new ArrayOf.
// You must provide a function that returns a new instance of T.
func NewArrayOf[T RoundTripper](newFn func() T) *ArrayOf[T] {
return &ArrayOf[T]{
ePool: sync.Pool{New: func() any {
return newFn()
}},
}
}
// New returns a new empty Array.
func (p *ArrayOf[T]) New() *Array[T] {
return &Array[T]{
p: p,
}
}
// NewWith returns a new Array with the provided value (not copied).
func (p *ArrayOf[T]) NewWith(val []T) *Array[T] {
return &Array[T]{
p: p,
val: val,
}
}
func (p *ArrayOf[T]) newA(sz uint32) []T {
t, ok := p.aPool.Get().(*[]T)
if !ok || t == nil {
return make([]T, 0, sz)
}
t2 := *t
return t2[:0]
}
func (p *ArrayOf[T]) putA(v []T) {
for _, t := range v {
p.ePool.Put(t)
}
if v != nil {
v = v[:0]
p.aPool.Put(&v)
}
}
func (p *ArrayOf[T]) newE() T {
return p.ePool.Get().(T)
}
// Array provides a wrapper for an underlying array of serializable objects.
type Array[T RoundTripper] struct {
p *ArrayOf[T]
val []T
}
// Msgsize returns the size of the array in bytes.
func (j *Array[T]) Msgsize() int {
if j.val == nil {
return msgp.NilSize
}
sz := msgp.ArrayHeaderSize
for _, v := range j.val {
sz += v.Msgsize()
}
return sz
}
// Value returns the underlying value.
// Regular append mechanics should be observed.
// If no value has been set yet, a new array is created.
func (j *Array[T]) Value() []T {
if j.val == nil {
j.val = j.p.newA(10)
}
return j.val
}
// Append a value to the underlying array.
// The returned Array is always the same as the one called.
func (j *Array[T]) Append(v ...T) *Array[T] {
if j.val == nil {
j.val = j.p.newA(uint32(len(v)))
}
j.val = append(j.val, v...)
return j
}
// Set the underlying value.
func (j *Array[T]) Set(val []T) {
j.val = val
}
// Recycle the underlying value.
func (j *Array[T]) Recycle() {
if j.val != nil {
j.p.putA(j.val)
j.val = nil
}
}
// MarshalMsg implements msgp.Marshaler
func (j *Array[T]) MarshalMsg(b []byte) (o []byte, err error) {
if j.val == nil {
return msgp.AppendNil(b), nil
}
if uint64(len(j.val)) > math.MaxUint32 {
return b, errors.New("array: length of array exceeds math.MaxUint32")
}
b = msgp.AppendArrayHeader(b, uint32(len(j.val)))
for _, v := range j.val {
b, err = v.MarshalMsg(b)
if err != nil {
return b, err
}
}
return b, err
}
// UnmarshalMsg will JSON marshal the value and wrap as a msgp byte array.
// Nil values are supported.
func (j *Array[T]) UnmarshalMsg(bytes []byte) ([]byte, error) {
if bytes, err := msgp.ReadNilBytes(bytes); err == nil {
if j.val != nil {
j.p.putA(j.val)
}
j.val = nil
return bytes, nil
}
l, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
if err != nil {
return bytes, err
}
if j.val == nil {
j.val = j.p.newA(l)
} else {
j.val = j.val[:0]
}
for i := uint32(0); i < l; i++ {
v := j.p.newE()
bytes, err = v.UnmarshalMsg(bytes)
if err != nil {
return bytes, err
}
j.val = append(j.val, v)
}
return bytes, nil
}