Use new gofumpt (#21613)

Update tinylib. Should fix CI.

`gofumpt -w .&&go generate ./...`
This commit is contained in:
Klaus Post 2025-09-28 22:59:21 +02:00 committed by GitHub
parent 456d9462e5
commit b8631cf531
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
171 changed files with 881 additions and 899 deletions

View File

@ -193,27 +193,27 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
func setConfigKV(ctx context.Context, objectAPI ObjectLayer, kvBytes []byte) (result setConfigResult, err error) { func setConfigKV(ctx context.Context, objectAPI ObjectLayer, kvBytes []byte) (result setConfigResult, err error) {
result.Cfg, err = readServerConfig(ctx, objectAPI, nil) result.Cfg, err = readServerConfig(ctx, objectAPI, nil)
if err != nil { if err != nil {
return return result, err
} }
result.Dynamic, err = result.Cfg.ReadConfig(bytes.NewReader(kvBytes)) result.Dynamic, err = result.Cfg.ReadConfig(bytes.NewReader(kvBytes))
if err != nil { if err != nil {
return return result, err
} }
result.SubSys, _, _, err = config.GetSubSys(string(kvBytes)) result.SubSys, _, _, err = config.GetSubSys(string(kvBytes))
if err != nil { if err != nil {
return return result, err
} }
tgts, err := config.ParseConfigTargetID(bytes.NewReader(kvBytes)) tgts, err := config.ParseConfigTargetID(bytes.NewReader(kvBytes))
if err != nil { if err != nil {
return return result, err
} }
ctx = context.WithValue(ctx, config.ContextKeyForTargetFromConfig, tgts) ctx = context.WithValue(ctx, config.ContextKeyForTargetFromConfig, tgts)
if verr := validateConfig(ctx, result.Cfg, result.SubSys); verr != nil { if verr := validateConfig(ctx, result.Cfg, result.SubSys); verr != nil {
err = badConfigErr{Err: verr} err = badConfigErr{Err: verr}
return return result, err
} }
// Check if subnet proxy being set and if so set the same value to proxy of subnet // Check if subnet proxy being set and if so set the same value to proxy of subnet
@ -222,12 +222,12 @@ func setConfigKV(ctx context.Context, objectAPI ObjectLayer, kvBytes []byte) (re
// Update the actual server config on disk. // Update the actual server config on disk.
if err = saveServerConfig(ctx, objectAPI, result.Cfg); err != nil { if err = saveServerConfig(ctx, objectAPI, result.Cfg); err != nil {
return return result, err
} }
// Write the config input KV to history. // Write the config input KV to history.
err = saveServerConfigHistory(ctx, objectAPI, kvBytes) err = saveServerConfigHistory(ctx, objectAPI, kvBytes)
return return result, err
} }
// GetConfigKVHandler - GET /minio/admin/v3/get-config-kv?key={key} // GetConfigKVHandler - GET /minio/admin/v3/get-config-kv?key={key}

View File

@ -380,7 +380,7 @@ func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request)
func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w http.ResponseWriter, r *http.Request) (proxy bool) { func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w http.ResponseWriter, r *http.Request) (proxy bool) {
host := env.Get("_MINIO_DECOM_ENDPOINT_HOST", defaultEndPoint.Host) host := env.Get("_MINIO_DECOM_ENDPOINT_HOST", defaultEndPoint.Host)
if host == "" { if host == "" {
return return proxy
} }
for nodeIdx, proxyEp := range globalProxyEndpoints { for nodeIdx, proxyEp := range globalProxyEndpoints {
if proxyEp.Host == host && !proxyEp.IsLocal { if proxyEp.Host == host && !proxyEp.IsLocal {
@ -389,5 +389,5 @@ func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w h
} }
} }
} }
return return proxy
} }

View File

@ -70,7 +70,7 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ
func getSRAddOptions(r *http.Request) (opts madmin.SRAddOptions) { func getSRAddOptions(r *http.Request) (opts madmin.SRAddOptions) {
opts.ReplicateILMExpiry = r.Form.Get("replicateILMExpiry") == "true" opts.ReplicateILMExpiry = r.Form.Get("replicateILMExpiry") == "true"
return return opts
} }
// SRPeerJoin - PUT /minio/admin/v3/site-replication/join // SRPeerJoin - PUT /minio/admin/v3/site-replication/join
@ -422,7 +422,7 @@ func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Req
func getSREditOptions(r *http.Request) (opts madmin.SREditOptions) { func getSREditOptions(r *http.Request) (opts madmin.SREditOptions) {
opts.DisableILMExpiryReplication = r.Form.Get("disableILMExpiryReplication") == "true" opts.DisableILMExpiryReplication = r.Form.Get("disableILMExpiryReplication") == "true"
opts.EnableILMExpiryReplication = r.Form.Get("enableILMExpiryReplication") == "true" opts.EnableILMExpiryReplication = r.Form.Get("enableILMExpiryReplication") == "true"
return return opts
} }
// SRPeerEdit - PUT /minio/admin/v3/site-replication/peer/edit // SRPeerEdit - PUT /minio/admin/v3/site-replication/peer/edit
@ -484,7 +484,7 @@ func getSRStatusOptions(r *http.Request) (opts madmin.SRStatusOptions) {
opts.EntityValue = q.Get("entityvalue") opts.EntityValue = q.Get("entityvalue")
opts.ShowDeleted = q.Get("showDeleted") == "true" opts.ShowDeleted = q.Get("showDeleted") == "true"
opts.Metrics = q.Get("metrics") == "true" opts.Metrics = q.Get("metrics") == "true"
return return opts
} }
// SiteReplicationRemove - PUT /minio/admin/v3/site-replication/remove // SiteReplicationRemove - PUT /minio/admin/v3/site-replication/remove

View File

@ -1243,17 +1243,17 @@ func extractHealInitParams(vars map[string]string, qParams url.Values, r io.Read
if hip.objPrefix != "" { if hip.objPrefix != "" {
// Bucket is required if object-prefix is given // Bucket is required if object-prefix is given
err = ErrHealMissingBucket err = ErrHealMissingBucket
return return hip, err
} }
} else if isReservedOrInvalidBucket(hip.bucket, false) { } else if isReservedOrInvalidBucket(hip.bucket, false) {
err = ErrInvalidBucketName err = ErrInvalidBucketName
return return hip, err
} }
// empty prefix is valid. // empty prefix is valid.
if !IsValidObjectPrefix(hip.objPrefix) { if !IsValidObjectPrefix(hip.objPrefix) {
err = ErrInvalidObjectName err = ErrInvalidObjectName
return return hip, err
} }
if len(qParams[mgmtClientToken]) > 0 { if len(qParams[mgmtClientToken]) > 0 {
@ -1275,7 +1275,7 @@ func extractHealInitParams(vars map[string]string, qParams url.Values, r io.Read
if (hip.forceStart && hip.forceStop) || if (hip.forceStart && hip.forceStop) ||
(hip.clientToken != "" && (hip.forceStart || hip.forceStop)) { (hip.clientToken != "" && (hip.forceStart || hip.forceStop)) {
err = ErrInvalidRequest err = ErrInvalidRequest
return return hip, err
} }
// ignore body if clientToken is provided // ignore body if clientToken is provided
@ -1284,12 +1284,12 @@ func extractHealInitParams(vars map[string]string, qParams url.Values, r io.Read
if jerr != nil { if jerr != nil {
adminLogIf(GlobalContext, jerr, logger.ErrorKind) adminLogIf(GlobalContext, jerr, logger.ErrorKind)
err = ErrRequestBodyParse err = ErrRequestBodyParse
return return hip, err
} }
} }
err = ErrNone err = ErrNone
return return hip, err
} }
// HealHandler - POST /minio/admin/v3/heal/ // HealHandler - POST /minio/admin/v3/heal/
@ -2022,7 +2022,7 @@ func extractTraceOptions(r *http.Request) (opts madmin.ServiceTraceOpts, err err
opts.OS = true opts.OS = true
// Older mc - cannot deal with more types... // Older mc - cannot deal with more types...
} }
return return opts, err
} }
// TraceHandler - POST /minio/admin/v3/trace // TraceHandler - POST /minio/admin/v3/trace

View File

@ -31,7 +31,7 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string,
var err error var err error
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil { if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
errCode = ErrInvalidMaxKeys errCode = ErrInvalidMaxKeys
return return prefix, marker, delimiter, maxkeys, encodingType, errCode
} }
} else { } else {
maxkeys = maxObjectList maxkeys = maxObjectList
@ -41,7 +41,7 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string,
marker = values.Get("marker") marker = values.Get("marker")
delimiter = values.Get("delimiter") delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type") encodingType = values.Get("encoding-type")
return return prefix, marker, delimiter, maxkeys, encodingType, errCode
} }
func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType, versionIDMarker string, errCode APIErrorCode) { func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType, versionIDMarker string, errCode APIErrorCode) {
@ -51,7 +51,7 @@ func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimit
var err error var err error
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil { if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
errCode = ErrInvalidMaxKeys errCode = ErrInvalidMaxKeys
return return prefix, marker, delimiter, maxkeys, encodingType, versionIDMarker, errCode
} }
} else { } else {
maxkeys = maxObjectList maxkeys = maxObjectList
@ -62,7 +62,7 @@ func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimit
delimiter = values.Get("delimiter") delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type") encodingType = values.Get("encoding-type")
versionIDMarker = values.Get("version-id-marker") versionIDMarker = values.Get("version-id-marker")
return return prefix, marker, delimiter, maxkeys, encodingType, versionIDMarker, errCode
} }
// Parse bucket url queries for ListObjects V2. // Parse bucket url queries for ListObjects V2.
@ -73,7 +73,7 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
if val, ok := values["continuation-token"]; ok { if val, ok := values["continuation-token"]; ok {
if len(val[0]) == 0 { if len(val[0]) == 0 {
errCode = ErrIncorrectContinuationToken errCode = ErrIncorrectContinuationToken
return return prefix, token, startAfter, delimiter, fetchOwner, maxkeys, encodingType, errCode
} }
} }
@ -81,7 +81,7 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
var err error var err error
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil { if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
errCode = ErrInvalidMaxKeys errCode = ErrInvalidMaxKeys
return return prefix, token, startAfter, delimiter, fetchOwner, maxkeys, encodingType, errCode
} }
} else { } else {
maxkeys = maxObjectList maxkeys = maxObjectList
@ -97,11 +97,11 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
decodedToken, err := base64.StdEncoding.DecodeString(token) decodedToken, err := base64.StdEncoding.DecodeString(token)
if err != nil { if err != nil {
errCode = ErrIncorrectContinuationToken errCode = ErrIncorrectContinuationToken
return return prefix, token, startAfter, delimiter, fetchOwner, maxkeys, encodingType, errCode
} }
token = string(decodedToken) token = string(decodedToken)
} }
return return prefix, token, startAfter, delimiter, fetchOwner, maxkeys, encodingType, errCode
} }
// Parse bucket url queries for ?uploads // Parse bucket url queries for ?uploads
@ -112,7 +112,7 @@ func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadID
var err error var err error
if maxUploads, err = strconv.Atoi(values.Get("max-uploads")); err != nil { if maxUploads, err = strconv.Atoi(values.Get("max-uploads")); err != nil {
errCode = ErrInvalidMaxUploads errCode = ErrInvalidMaxUploads
return return prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType, errCode
} }
} else { } else {
maxUploads = maxUploadsList maxUploads = maxUploadsList
@ -123,7 +123,7 @@ func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadID
uploadIDMarker = values.Get("upload-id-marker") uploadIDMarker = values.Get("upload-id-marker")
delimiter = values.Get("delimiter") delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type") encodingType = values.Get("encoding-type")
return return prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType, errCode
} }
// Parse object url queries // Parse object url queries
@ -134,7 +134,7 @@ func getObjectResources(values url.Values) (uploadID string, partNumberMarker, m
if values.Get("max-parts") != "" { if values.Get("max-parts") != "" {
if maxParts, err = strconv.Atoi(values.Get("max-parts")); err != nil { if maxParts, err = strconv.Atoi(values.Get("max-parts")); err != nil {
errCode = ErrInvalidMaxParts errCode = ErrInvalidMaxParts
return return uploadID, partNumberMarker, maxParts, encodingType, errCode
} }
} else { } else {
maxParts = maxPartsList maxParts = maxPartsList
@ -143,11 +143,11 @@ func getObjectResources(values url.Values) (uploadID string, partNumberMarker, m
if values.Get("part-number-marker") != "" { if values.Get("part-number-marker") != "" {
if partNumberMarker, err = strconv.Atoi(values.Get("part-number-marker")); err != nil { if partNumberMarker, err = strconv.Atoi(values.Get("part-number-marker")); err != nil {
errCode = ErrInvalidPartNumberMarker errCode = ErrInvalidPartNumberMarker
return return uploadID, partNumberMarker, maxParts, encodingType, errCode
} }
} }
uploadID = values.Get("uploadId") uploadID = values.Get("uploadId")
encodingType = values.Get("encoding-type") encodingType = values.Get("encoding-type")
return return uploadID, partNumberMarker, maxParts, encodingType, errCode
} }

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
) )

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"bytes" "bytes"
"testing" "testing"

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"time" "time"

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"bytes" "bytes"
"testing" "testing"

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
) )

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"bytes" "bytes"
"testing" "testing"

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
) )

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"bytes" "bytes"
"testing" "testing"

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
) )

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"bytes" "bytes"
"testing" "testing"

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
) )

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"bytes" "bytes"
"testing" "testing"

View File

@ -99,7 +99,7 @@ func BitrotAlgorithmFromString(s string) (a BitrotAlgorithm) {
return alg return alg
} }
} }
return return a
} }
func newBitrotWriter(disk StorageAPI, origvolume, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer { func newBitrotWriter(disk StorageAPI, origvolume, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
) )
@ -59,19 +59,17 @@ func (z *ServerSystemConfig) DecodeMsg(dc *msgp.Reader) (err error) {
if z.MinioEnv == nil { if z.MinioEnv == nil {
z.MinioEnv = make(map[string]string, zb0003) z.MinioEnv = make(map[string]string, zb0003)
} else if len(z.MinioEnv) > 0 { } else if len(z.MinioEnv) > 0 {
for key := range z.MinioEnv { clear(z.MinioEnv)
delete(z.MinioEnv, key)
}
} }
for zb0003 > 0 { for zb0003 > 0 {
zb0003-- zb0003--
var za0002 string var za0002 string
var za0003 string
za0002, err = dc.ReadString() za0002, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "MinioEnv") err = msgp.WrapError(err, "MinioEnv")
return return
} }
var za0003 string
za0003, err = dc.ReadString() za0003, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "MinioEnv", za0002) err = msgp.WrapError(err, "MinioEnv", za0002)
@ -240,14 +238,12 @@ func (z *ServerSystemConfig) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.MinioEnv == nil { if z.MinioEnv == nil {
z.MinioEnv = make(map[string]string, zb0003) z.MinioEnv = make(map[string]string, zb0003)
} else if len(z.MinioEnv) > 0 { } else if len(z.MinioEnv) > 0 {
for key := range z.MinioEnv { clear(z.MinioEnv)
delete(z.MinioEnv, key)
}
} }
for zb0003 > 0 { for zb0003 > 0 {
var za0002 string
var za0003 string var za0003 string
zb0003-- zb0003--
var za0002 string
za0002, bts, err = msgp.ReadStringBytes(bts) za0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "MinioEnv") err = msgp.WrapError(err, "MinioEnv")

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"bytes" "bytes"
"testing" "testing"

View File

@ -592,7 +592,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
output[idx] = obj output[idx] = obj
idx++ idx++
} }
return return output
} }
// Disable timeouts and cancellation // Disable timeouts and cancellation

View File

@ -248,19 +248,19 @@ func proxyRequestByToken(ctx context.Context, w http.ResponseWriter, r *http.Req
if subToken, nodeIndex = parseRequestToken(token); nodeIndex >= 0 { if subToken, nodeIndex = parseRequestToken(token); nodeIndex >= 0 {
proxied, success = proxyRequestByNodeIndex(ctx, w, r, nodeIndex, returnErr) proxied, success = proxyRequestByNodeIndex(ctx, w, r, nodeIndex, returnErr)
} }
return return subToken, proxied, success
} }
func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http.Request, index int, returnErr bool) (proxied, success bool) { func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http.Request, index int, returnErr bool) (proxied, success bool) {
if len(globalProxyEndpoints) == 0 { if len(globalProxyEndpoints) == 0 {
return return proxied, success
} }
if index < 0 || index >= len(globalProxyEndpoints) { if index < 0 || index >= len(globalProxyEndpoints) {
return return proxied, success
} }
ep := globalProxyEndpoints[index] ep := globalProxyEndpoints[index]
if ep.IsLocal { if ep.IsLocal {
return return proxied, success
} }
return true, proxyRequest(ctx, w, r, ep, returnErr) return true, proxyRequest(ctx, w, r, ep, returnErr)
} }

View File

@ -161,7 +161,7 @@ func (b BucketMetadata) lastUpdate() (t time.Time) {
t = b.BucketTargetsConfigMetaUpdatedAt t = b.BucketTargetsConfigMetaUpdatedAt
} }
return return t
} }
// Versioning returns true if versioning is enabled // Versioning returns true if versioning is enabled
@ -542,13 +542,13 @@ func (b *BucketMetadata) migrateTargetConfig(ctx context.Context, objectAPI Obje
func encryptBucketMetadata(ctx context.Context, bucket string, input []byte, kmsContext kms.Context) (output, metabytes []byte, err error) { func encryptBucketMetadata(ctx context.Context, bucket string, input []byte, kmsContext kms.Context) (output, metabytes []byte, err error) {
if GlobalKMS == nil { if GlobalKMS == nil {
output = input output = input
return return output, metabytes, err
} }
metadata := make(map[string]string) metadata := make(map[string]string)
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{AssociatedData: kmsContext}) key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{AssociatedData: kmsContext})
if err != nil { if err != nil {
return return output, metabytes, err
} }
outbuf := bytes.NewBuffer(nil) outbuf := bytes.NewBuffer(nil)
@ -561,7 +561,7 @@ func encryptBucketMetadata(ctx context.Context, bucket string, input []byte, kms
} }
metabytes, err = json.Marshal(metadata) metabytes, err = json.Marshal(metadata)
if err != nil { if err != nil {
return return output, metabytes, err
} }
return outbuf.Bytes(), metabytes, nil return outbuf.Bytes(), metabytes, nil
} }

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
) )

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"bytes" "bytes"
"testing" "testing"

View File

@ -97,7 +97,7 @@ func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota,
} }
return quotaCfg, fmt.Errorf("Invalid quota config %#v", quotaCfg) return quotaCfg, fmt.Errorf("Invalid quota config %#v", quotaCfg)
} }
return return quotaCfg, err
} }
func (sys *BucketQuotaSys) enforceQuotaHard(ctx context.Context, bucket string, size int64) error { func (sys *BucketQuotaSys) enforceQuotaHard(ctx context.Context, bucket string, size int64) error {

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
) )

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"bytes" "bytes"
"testing" "testing"

View File

@ -172,13 +172,13 @@ func (ri ReplicateObjectInfo) TargetReplicationStatus(arn string) (status replic
repStatMatches := replStatusRegex.FindAllStringSubmatch(ri.ReplicationStatusInternal, -1) repStatMatches := replStatusRegex.FindAllStringSubmatch(ri.ReplicationStatusInternal, -1)
for _, repStatMatch := range repStatMatches { for _, repStatMatch := range repStatMatches {
if len(repStatMatch) != 3 { if len(repStatMatch) != 3 {
return return status
} }
if repStatMatch[1] == arn { if repStatMatch[1] == arn {
return replication.StatusType(repStatMatch[2]) return replication.StatusType(repStatMatch[2])
} }
} }
return return status
} }
// TargetReplicationStatus - returns replication status of a target // TargetReplicationStatus - returns replication status of a target
@ -186,13 +186,13 @@ func (o ObjectInfo) TargetReplicationStatus(arn string) (status replication.Stat
repStatMatches := replStatusRegex.FindAllStringSubmatch(o.ReplicationStatusInternal, -1) repStatMatches := replStatusRegex.FindAllStringSubmatch(o.ReplicationStatusInternal, -1)
for _, repStatMatch := range repStatMatches { for _, repStatMatch := range repStatMatches {
if len(repStatMatch) != 3 { if len(repStatMatch) != 3 {
return return status
} }
if repStatMatch[1] == arn { if repStatMatch[1] == arn {
return replication.StatusType(repStatMatch[2]) return replication.StatusType(repStatMatch[2])
} }
} }
return return status
} }
type replicateTargetDecision struct { type replicateTargetDecision struct {
@ -310,7 +310,7 @@ func parseReplicateDecision(ctx context.Context, bucket, s string) (r ReplicateD
targetsMap: make(map[string]replicateTargetDecision), targetsMap: make(map[string]replicateTargetDecision),
} }
if len(s) == 0 { if len(s) == 0 {
return return r, err
} }
for p := range strings.SplitSeq(s, ",") { for p := range strings.SplitSeq(s, ",") {
if p == "" { if p == "" {
@ -327,7 +327,7 @@ func parseReplicateDecision(ctx context.Context, bucket, s string) (r ReplicateD
} }
r.targetsMap[slc[0]] = replicateTargetDecision{Replicate: tgt[0] == "true", Synchronous: tgt[1] == "true", Arn: tgt[2], ID: tgt[3]} r.targetsMap[slc[0]] = replicateTargetDecision{Replicate: tgt[0] == "true", Synchronous: tgt[1] == "true", Arn: tgt[2], ID: tgt[3]}
} }
return return r, err
} }
// ReplicationState represents internal replication state // ReplicationState represents internal replication state
@ -374,7 +374,7 @@ func (rs *ReplicationState) CompositeReplicationStatus() (st replication.StatusT
case !rs.ReplicaStatus.Empty(): case !rs.ReplicaStatus.Empty():
return rs.ReplicaStatus return rs.ReplicaStatus
default: default:
return return st
} }
} }
@ -737,7 +737,7 @@ type BucketReplicationResyncStatus struct {
func (rs *BucketReplicationResyncStatus) cloneTgtStats() (m map[string]TargetReplicationResyncStatus) { func (rs *BucketReplicationResyncStatus) cloneTgtStats() (m map[string]TargetReplicationResyncStatus) {
m = make(map[string]TargetReplicationResyncStatus) m = make(map[string]TargetReplicationResyncStatus)
maps.Copy(m, rs.TargetsMap) maps.Copy(m, rs.TargetsMap)
return return m
} }
func newBucketResyncStatus(bucket string) BucketReplicationResyncStatus { func newBucketResyncStatus(bucket string) BucketReplicationResyncStatus {
@ -774,7 +774,7 @@ func extractReplicateDiffOpts(q url.Values) (opts madmin.ReplDiffOpts) {
opts.Verbose = q.Get("verbose") == "true" opts.Verbose = q.Get("verbose") == "true"
opts.ARN = q.Get("arn") opts.ARN = q.Get("arn")
opts.Prefix = q.Get("prefix") opts.Prefix = q.Get("prefix")
return return opts
} }
const ( const (

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"github.com/minio/minio/internal/bucket/replication" "github.com/minio/minio/internal/bucket/replication"
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
@ -41,19 +41,17 @@ func (z *BucketReplicationResyncStatus) DecodeMsg(dc *msgp.Reader) (err error) {
if z.TargetsMap == nil { if z.TargetsMap == nil {
z.TargetsMap = make(map[string]TargetReplicationResyncStatus, zb0002) z.TargetsMap = make(map[string]TargetReplicationResyncStatus, zb0002)
} else if len(z.TargetsMap) > 0 { } else if len(z.TargetsMap) > 0 {
for key := range z.TargetsMap { clear(z.TargetsMap)
delete(z.TargetsMap, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
zb0002-- zb0002--
var za0001 string var za0001 string
var za0002 TargetReplicationResyncStatus
za0001, err = dc.ReadString() za0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "TargetsMap") err = msgp.WrapError(err, "TargetsMap")
return return
} }
var za0002 TargetReplicationResyncStatus
err = za0002.DecodeMsg(dc) err = za0002.DecodeMsg(dc)
if err != nil { if err != nil {
err = msgp.WrapError(err, "TargetsMap", za0001) err = msgp.WrapError(err, "TargetsMap", za0001)
@ -203,14 +201,12 @@ func (z *BucketReplicationResyncStatus) UnmarshalMsg(bts []byte) (o []byte, err
if z.TargetsMap == nil { if z.TargetsMap == nil {
z.TargetsMap = make(map[string]TargetReplicationResyncStatus, zb0002) z.TargetsMap = make(map[string]TargetReplicationResyncStatus, zb0002)
} else if len(z.TargetsMap) > 0 { } else if len(z.TargetsMap) > 0 {
for key := range z.TargetsMap { clear(z.TargetsMap)
delete(z.TargetsMap, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
var za0001 string
var za0002 TargetReplicationResyncStatus var za0002 TargetReplicationResyncStatus
zb0002-- zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "TargetsMap") err = msgp.WrapError(err, "TargetsMap")
@ -288,19 +284,17 @@ func (z *MRFReplicateEntries) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Entries == nil { if z.Entries == nil {
z.Entries = make(map[string]MRFReplicateEntry, zb0002) z.Entries = make(map[string]MRFReplicateEntry, zb0002)
} else if len(z.Entries) > 0 { } else if len(z.Entries) > 0 {
for key := range z.Entries { clear(z.Entries)
delete(z.Entries, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
zb0002-- zb0002--
var za0001 string var za0001 string
var za0002 MRFReplicateEntry
za0001, err = dc.ReadString() za0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Entries") err = msgp.WrapError(err, "Entries")
return return
} }
var za0002 MRFReplicateEntry
var zb0003 uint32 var zb0003 uint32
zb0003, err = dc.ReadMapHeader() zb0003, err = dc.ReadMapHeader()
if err != nil { if err != nil {
@ -478,14 +472,12 @@ func (z *MRFReplicateEntries) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Entries == nil { if z.Entries == nil {
z.Entries = make(map[string]MRFReplicateEntry, zb0002) z.Entries = make(map[string]MRFReplicateEntry, zb0002)
} else if len(z.Entries) > 0 { } else if len(z.Entries) > 0 {
for key := range z.Entries { clear(z.Entries)
delete(z.Entries, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
var za0001 string
var za0002 MRFReplicateEntry var za0002 MRFReplicateEntry
zb0002-- zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Entries") err = msgp.WrapError(err, "Entries")
@ -872,19 +864,17 @@ func (z *ReplicationState) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Targets == nil { if z.Targets == nil {
z.Targets = make(map[string]replication.StatusType, zb0002) z.Targets = make(map[string]replication.StatusType, zb0002)
} else if len(z.Targets) > 0 { } else if len(z.Targets) > 0 {
for key := range z.Targets { clear(z.Targets)
delete(z.Targets, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
zb0002-- zb0002--
var za0001 string var za0001 string
var za0002 replication.StatusType
za0001, err = dc.ReadString() za0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Targets") err = msgp.WrapError(err, "Targets")
return return
} }
var za0002 replication.StatusType
err = za0002.DecodeMsg(dc) err = za0002.DecodeMsg(dc)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Targets", za0001) err = msgp.WrapError(err, "Targets", za0001)
@ -902,19 +892,17 @@ func (z *ReplicationState) DecodeMsg(dc *msgp.Reader) (err error) {
if z.PurgeTargets == nil { if z.PurgeTargets == nil {
z.PurgeTargets = make(map[string]VersionPurgeStatusType, zb0003) z.PurgeTargets = make(map[string]VersionPurgeStatusType, zb0003)
} else if len(z.PurgeTargets) > 0 { } else if len(z.PurgeTargets) > 0 {
for key := range z.PurgeTargets { clear(z.PurgeTargets)
delete(z.PurgeTargets, key)
}
} }
for zb0003 > 0 { for zb0003 > 0 {
zb0003-- zb0003--
var za0003 string var za0003 string
var za0004 VersionPurgeStatusType
za0003, err = dc.ReadString() za0003, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "PurgeTargets") err = msgp.WrapError(err, "PurgeTargets")
return return
} }
var za0004 VersionPurgeStatusType
err = za0004.DecodeMsg(dc) err = za0004.DecodeMsg(dc)
if err != nil { if err != nil {
err = msgp.WrapError(err, "PurgeTargets", za0003) err = msgp.WrapError(err, "PurgeTargets", za0003)
@ -932,19 +920,17 @@ func (z *ReplicationState) DecodeMsg(dc *msgp.Reader) (err error) {
if z.ResetStatusesMap == nil { if z.ResetStatusesMap == nil {
z.ResetStatusesMap = make(map[string]string, zb0004) z.ResetStatusesMap = make(map[string]string, zb0004)
} else if len(z.ResetStatusesMap) > 0 { } else if len(z.ResetStatusesMap) > 0 {
for key := range z.ResetStatusesMap { clear(z.ResetStatusesMap)
delete(z.ResetStatusesMap, key)
}
} }
for zb0004 > 0 { for zb0004 > 0 {
zb0004-- zb0004--
var za0005 string var za0005 string
var za0006 string
za0005, err = dc.ReadString() za0005, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "ResetStatusesMap") err = msgp.WrapError(err, "ResetStatusesMap")
return return
} }
var za0006 string
za0006, err = dc.ReadString() za0006, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "ResetStatusesMap", za0005) err = msgp.WrapError(err, "ResetStatusesMap", za0005)
@ -1236,14 +1222,12 @@ func (z *ReplicationState) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Targets == nil { if z.Targets == nil {
z.Targets = make(map[string]replication.StatusType, zb0002) z.Targets = make(map[string]replication.StatusType, zb0002)
} else if len(z.Targets) > 0 { } else if len(z.Targets) > 0 {
for key := range z.Targets { clear(z.Targets)
delete(z.Targets, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
var za0001 string
var za0002 replication.StatusType var za0002 replication.StatusType
zb0002-- zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Targets") err = msgp.WrapError(err, "Targets")
@ -1266,14 +1250,12 @@ func (z *ReplicationState) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.PurgeTargets == nil { if z.PurgeTargets == nil {
z.PurgeTargets = make(map[string]VersionPurgeStatusType, zb0003) z.PurgeTargets = make(map[string]VersionPurgeStatusType, zb0003)
} else if len(z.PurgeTargets) > 0 { } else if len(z.PurgeTargets) > 0 {
for key := range z.PurgeTargets { clear(z.PurgeTargets)
delete(z.PurgeTargets, key)
}
} }
for zb0003 > 0 { for zb0003 > 0 {
var za0003 string
var za0004 VersionPurgeStatusType var za0004 VersionPurgeStatusType
zb0003-- zb0003--
var za0003 string
za0003, bts, err = msgp.ReadStringBytes(bts) za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "PurgeTargets") err = msgp.WrapError(err, "PurgeTargets")
@ -1296,14 +1278,12 @@ func (z *ReplicationState) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.ResetStatusesMap == nil { if z.ResetStatusesMap == nil {
z.ResetStatusesMap = make(map[string]string, zb0004) z.ResetStatusesMap = make(map[string]string, zb0004)
} else if len(z.ResetStatusesMap) > 0 { } else if len(z.ResetStatusesMap) > 0 {
for key := range z.ResetStatusesMap { clear(z.ResetStatusesMap)
delete(z.ResetStatusesMap, key)
}
} }
for zb0004 > 0 { for zb0004 > 0 {
var za0005 string
var za0006 string var za0006 string
zb0004-- zb0004--
var za0005 string
za0005, bts, err = msgp.ReadStringBytes(bts) za0005, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "ResetStatusesMap") err = msgp.WrapError(err, "ResetStatusesMap")

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"bytes" "bytes"
"testing" "testing"

View File

@ -253,31 +253,31 @@ func getMustReplicateOptions(userDefined map[string]string, userTags string, sta
func mustReplicate(ctx context.Context, bucket, object string, mopts mustReplicateOptions) (dsc ReplicateDecision) { func mustReplicate(ctx context.Context, bucket, object string, mopts mustReplicateOptions) (dsc ReplicateDecision) {
// object layer not initialized we return with no decision. // object layer not initialized we return with no decision.
if newObjectLayerFn() == nil { if newObjectLayerFn() == nil {
return return dsc
} }
// Disable server-side replication on object prefixes which are excluded // Disable server-side replication on object prefixes which are excluded
// from versioning via the MinIO bucket versioning extension. // from versioning via the MinIO bucket versioning extension.
if !globalBucketVersioningSys.PrefixEnabled(bucket, object) { if !globalBucketVersioningSys.PrefixEnabled(bucket, object) {
return return dsc
} }
replStatus := mopts.ReplicationStatus() replStatus := mopts.ReplicationStatus()
if replStatus == replication.Replica && !mopts.isMetadataReplication() { if replStatus == replication.Replica && !mopts.isMetadataReplication() {
return return dsc
} }
if mopts.replicationRequest { // incoming replication request on target cluster if mopts.replicationRequest { // incoming replication request on target cluster
return return dsc
} }
cfg, err := getReplicationConfig(ctx, bucket) cfg, err := getReplicationConfig(ctx, bucket)
if err != nil { if err != nil {
replLogOnceIf(ctx, err, bucket) replLogOnceIf(ctx, err, bucket)
return return dsc
} }
if cfg == nil { if cfg == nil {
return return dsc
} }
opts := replication.ObjectOpts{ opts := replication.ObjectOpts{
@ -348,16 +348,16 @@ func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelet
rcfg, err := getReplicationConfig(ctx, bucket) rcfg, err := getReplicationConfig(ctx, bucket)
if err != nil || rcfg == nil { if err != nil || rcfg == nil {
replLogOnceIf(ctx, err, bucket) replLogOnceIf(ctx, err, bucket)
return return dsc
} }
// If incoming request is a replication request, it does not need to be re-replicated. // If incoming request is a replication request, it does not need to be re-replicated.
if delOpts.ReplicationRequest { if delOpts.ReplicationRequest {
return return dsc
} }
// Skip replication if this object's prefix is excluded from being // Skip replication if this object's prefix is excluded from being
// versioned. // versioned.
if !delOpts.Versioned { if !delOpts.Versioned {
return return dsc
} }
opts := replication.ObjectOpts{ opts := replication.ObjectOpts{
Name: dobj.ObjectName, Name: dobj.ObjectName,
@ -617,10 +617,10 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
if dobj.VersionID == "" && rinfo.PrevReplicationStatus == replication.Completed && dobj.OpType != replication.ExistingObjectReplicationType { if dobj.VersionID == "" && rinfo.PrevReplicationStatus == replication.Completed && dobj.OpType != replication.ExistingObjectReplicationType {
rinfo.ReplicationStatus = rinfo.PrevReplicationStatus rinfo.ReplicationStatus = rinfo.PrevReplicationStatus
return return rinfo
} }
if dobj.VersionID != "" && rinfo.VersionPurgeStatus == replication.VersionPurgeComplete { if dobj.VersionID != "" && rinfo.VersionPurgeStatus == replication.VersionPurgeComplete {
return return rinfo
} }
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) { if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
replLogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", dobj.Bucket, tgt.ARN), "replication-target-offline-delete-"+tgt.ARN) replLogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", dobj.Bucket, tgt.ARN), "replication-target-offline-delete-"+tgt.ARN)
@ -641,7 +641,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
} else { } else {
rinfo.VersionPurgeStatus = replication.VersionPurgeFailed rinfo.VersionPurgeStatus = replication.VersionPurgeFailed
} }
return return rinfo
} }
// early return if already replicated delete marker for existing object replication/ healing delete markers // early return if already replicated delete marker for existing object replication/ healing delete markers
if dobj.DeleteMarkerVersionID != "" { if dobj.DeleteMarkerVersionID != "" {
@ -658,13 +658,13 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
// delete marker already replicated // delete marker already replicated
if dobj.VersionID == "" && rinfo.VersionPurgeStatus.Empty() { if dobj.VersionID == "" && rinfo.VersionPurgeStatus.Empty() {
rinfo.ReplicationStatus = replication.Completed rinfo.ReplicationStatus = replication.Completed
return return rinfo
} }
case isErrObjectNotFound(serr), isErrVersionNotFound(serr): case isErrObjectNotFound(serr), isErrVersionNotFound(serr):
// version being purged is already not found on target. // version being purged is already not found on target.
if !rinfo.VersionPurgeStatus.Empty() { if !rinfo.VersionPurgeStatus.Empty() {
rinfo.VersionPurgeStatus = replication.VersionPurgeComplete rinfo.VersionPurgeStatus = replication.VersionPurgeComplete
return return rinfo
} }
case isErrReadQuorum(serr), isErrWriteQuorum(serr): case isErrReadQuorum(serr), isErrWriteQuorum(serr):
// destination has some quorum issues, perform removeObject() anyways // destination has some quorum issues, perform removeObject() anyways
@ -678,7 +678,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
if err != nil && !toi.ReplicationReady { if err != nil && !toi.ReplicationReady {
rinfo.ReplicationStatus = replication.Failed rinfo.ReplicationStatus = replication.Failed
rinfo.Err = err rinfo.Err = err
return return rinfo
} }
} }
} }
@ -709,7 +709,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
rinfo.VersionPurgeStatus = replication.VersionPurgeComplete rinfo.VersionPurgeStatus = replication.VersionPurgeComplete
} }
} }
return return rinfo
} }
func getCopyObjMetadata(oi ObjectInfo, sc string) map[string]string { func getCopyObjMetadata(oi ObjectInfo, sc string) map[string]string {
@ -910,7 +910,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (put
} }
putOpts.ServerSideEncryption = sseEnc putOpts.ServerSideEncryption = sseEnc
} }
return return putOpts, isMP, err
} }
type replicationAction string type replicationAction string
@ -1208,7 +1208,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
if ri.TargetReplicationStatus(tgt.ARN) == replication.Completed && !ri.ExistingObjResync.Empty() && !ri.ExistingObjResync.mustResyncTarget(tgt.ARN) { if ri.TargetReplicationStatus(tgt.ARN) == replication.Completed && !ri.ExistingObjResync.Empty() && !ri.ExistingObjResync.mustResyncTarget(tgt.ARN) {
rinfo.ReplicationStatus = replication.Completed rinfo.ReplicationStatus = replication.Completed
rinfo.ReplicationResynced = true rinfo.ReplicationResynced = true
return return rinfo
} }
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) { if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
@ -1220,7 +1220,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
UserAgent: "Internal: [Replication]", UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName, Host: globalLocalNodeName,
}) })
return return rinfo
} }
versioned := globalBucketVersioningSys.PrefixEnabled(bucket, object) versioned := globalBucketVersioningSys.PrefixEnabled(bucket, object)
@ -1244,7 +1244,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
}) })
replLogOnceIf(ctx, fmt.Errorf("unable to read source object %s/%s(%s): %w", bucket, object, objInfo.VersionID, err), object+":"+objInfo.VersionID) replLogOnceIf(ctx, fmt.Errorf("unable to read source object %s/%s(%s): %w", bucket, object, objInfo.VersionID, err), object+":"+objInfo.VersionID)
} }
return return rinfo
} }
defer gr.Close() defer gr.Close()
@ -1268,7 +1268,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
UserAgent: "Internal: [Replication]", UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName, Host: globalLocalNodeName,
}) })
return return rinfo
} }
} }
@ -1307,7 +1307,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
UserAgent: "Internal: [Replication]", UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName, Host: globalLocalNodeName,
}) })
return return rinfo
} }
var headerSize int var headerSize int
@ -1344,7 +1344,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
globalBucketTargetSys.markOffline(tgt.EndpointURL()) globalBucketTargetSys.markOffline(tgt.EndpointURL())
} }
} }
return return rinfo
} }
// replicateAll replicates metadata for specified version of the object to destination bucket // replicateAll replicates metadata for specified version of the object to destination bucket
@ -1380,7 +1380,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
UserAgent: "Internal: [Replication]", UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName, Host: globalLocalNodeName,
}) })
return return rinfo
} }
versioned := globalBucketVersioningSys.PrefixEnabled(bucket, object) versioned := globalBucketVersioningSys.PrefixEnabled(bucket, object)
@ -1405,7 +1405,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
}) })
replLogIf(ctx, fmt.Errorf("unable to replicate to target %s for %s/%s(%s): %w", tgt.EndpointURL(), bucket, object, objInfo.VersionID, err)) replLogIf(ctx, fmt.Errorf("unable to replicate to target %s for %s/%s(%s): %w", tgt.EndpointURL(), bucket, object, objInfo.VersionID, err))
} }
return return rinfo
} }
defer gr.Close() defer gr.Close()
@ -1418,7 +1418,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
if objInfo.TargetReplicationStatus(tgt.ARN) == replication.Completed && !ri.ExistingObjResync.Empty() && !ri.ExistingObjResync.mustResyncTarget(tgt.ARN) { if objInfo.TargetReplicationStatus(tgt.ARN) == replication.Completed && !ri.ExistingObjResync.Empty() && !ri.ExistingObjResync.mustResyncTarget(tgt.ARN) {
rinfo.ReplicationStatus = replication.Completed rinfo.ReplicationStatus = replication.Completed
rinfo.ReplicationResynced = true rinfo.ReplicationResynced = true
return return rinfo
} }
size, err := objInfo.GetActualSize() size, err := objInfo.GetActualSize()
@ -1431,7 +1431,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
UserAgent: "Internal: [Replication]", UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName, Host: globalLocalNodeName,
}) })
return return rinfo
} }
// Set the encrypted size for SSE-C objects // Set the encrypted size for SSE-C objects
@ -1494,7 +1494,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
rinfo.ReplicationAction = rAction rinfo.ReplicationAction = rAction
rinfo.ReplicationStatus = replication.Completed rinfo.ReplicationStatus = replication.Completed
} }
return return rinfo
} }
} else { } else {
// SSEC objects will refuse HeadObject without the decryption key. // SSEC objects will refuse HeadObject without the decryption key.
@ -1528,7 +1528,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
UserAgent: "Internal: [Replication]", UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName, Host: globalLocalNodeName,
}) })
return return rinfo
} }
} }
applyAction: applyAction:
@ -1594,7 +1594,7 @@ applyAction:
UserAgent: "Internal: [Replication]", UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName, Host: globalLocalNodeName,
}) })
return return rinfo
} }
var headerSize int var headerSize int
for k, v := range putOpts.Header() { for k, v := range putOpts.Header() {
@ -1631,7 +1631,7 @@ applyAction:
} }
} }
} }
return return rinfo
} }
func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, object string, r io.Reader, objInfo ObjectInfo, opts minio.PutObjectOptions) (err error) { func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, object string, r io.Reader, objInfo ObjectInfo, opts minio.PutObjectOptions) (err error) {
@ -2677,7 +2677,7 @@ func (c replicationConfig) Replicate(opts replication.ObjectOpts) bool {
// Resync returns true if replication reset is requested // Resync returns true if replication reset is requested
func (c replicationConfig) Resync(ctx context.Context, oi ObjectInfo, dsc ReplicateDecision, tgtStatuses map[string]replication.StatusType) (r ResyncDecision) { func (c replicationConfig) Resync(ctx context.Context, oi ObjectInfo, dsc ReplicateDecision, tgtStatuses map[string]replication.StatusType) (r ResyncDecision) {
if c.Empty() { if c.Empty() {
return return r
} }
// Now overlay existing object replication choices for target // Now overlay existing object replication choices for target
@ -2693,7 +2693,7 @@ func (c replicationConfig) Resync(ctx context.Context, oi ObjectInfo, dsc Replic
tgtArns := c.Config.FilterTargetArns(opts) tgtArns := c.Config.FilterTargetArns(opts)
// indicates no matching target with Existing object replication enabled. // indicates no matching target with Existing object replication enabled.
if len(tgtArns) == 0 { if len(tgtArns) == 0 {
return return r
} }
for _, t := range tgtArns { for _, t := range tgtArns {
opts.TargetArn = t opts.TargetArn = t
@ -2719,7 +2719,7 @@ func (c replicationConfig) resync(oi ObjectInfo, dsc ReplicateDecision, tgtStatu
targets: make(map[string]ResyncTargetDecision, len(dsc.targetsMap)), targets: make(map[string]ResyncTargetDecision, len(dsc.targetsMap)),
} }
if c.remotes == nil { if c.remotes == nil {
return return r
} }
for _, tgt := range c.remotes.Targets { for _, tgt := range c.remotes.Targets {
d, ok := dsc.targetsMap[tgt.Arn] d, ok := dsc.targetsMap[tgt.Arn]
@ -2731,7 +2731,7 @@ func (c replicationConfig) resync(oi ObjectInfo, dsc ReplicateDecision, tgtStatu
} }
r.targets[d.Arn] = resyncTarget(oi, tgt.Arn, tgt.ResetID, tgt.ResetBeforeDate, tgtStatuses[tgt.Arn]) r.targets[d.Arn] = resyncTarget(oi, tgt.Arn, tgt.ResetID, tgt.ResetBeforeDate, tgtStatuses[tgt.Arn])
} }
return return r
} }
func targetResetHeader(arn string) string { func targetResetHeader(arn string) string {
@ -2750,28 +2750,28 @@ func resyncTarget(oi ObjectInfo, arn string, resetID string, resetBeforeDate tim
if !ok { // existing object replication is enabled and object version is unreplicated so far. if !ok { // existing object replication is enabled and object version is unreplicated so far.
if resetID != "" && oi.ModTime.Before(resetBeforeDate) { // trigger replication if `mc replicate reset` requested if resetID != "" && oi.ModTime.Before(resetBeforeDate) { // trigger replication if `mc replicate reset` requested
rd.Replicate = true rd.Replicate = true
return return rd
} }
// For existing object reset - this condition is needed // For existing object reset - this condition is needed
rd.Replicate = tgtStatus == "" rd.Replicate = tgtStatus == ""
return return rd
} }
if resetID == "" || resetBeforeDate.Equal(timeSentinel) { // no reset in progress if resetID == "" || resetBeforeDate.Equal(timeSentinel) { // no reset in progress
return return rd
} }
// if already replicated, return true if a new reset was requested. // if already replicated, return true if a new reset was requested.
splits := strings.SplitN(rs, ";", 2) splits := strings.SplitN(rs, ";", 2)
if len(splits) != 2 { if len(splits) != 2 {
return return rd
} }
newReset := splits[1] != resetID newReset := splits[1] != resetID
if !newReset && tgtStatus == replication.Completed { if !newReset && tgtStatus == replication.Completed {
// already replicated and no reset requested // already replicated and no reset requested
return return rd
} }
rd.Replicate = newReset && oi.ModTime.Before(resetBeforeDate) rd.Replicate = newReset && oi.ModTime.Before(resetBeforeDate)
return return rd
} }
const resyncTimeInterval = time.Minute * 1 const resyncTimeInterval = time.Minute * 1
@ -3422,12 +3422,12 @@ func queueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, rcf
roi = getHealReplicateObjectInfo(oi, rcfg) roi = getHealReplicateObjectInfo(oi, rcfg)
roi.RetryCount = uint32(retryCount) roi.RetryCount = uint32(retryCount)
if !roi.Dsc.ReplicateAny() { if !roi.Dsc.ReplicateAny() {
return return roi
} }
// early return if replication already done, otherwise we need to determine if this // early return if replication already done, otherwise we need to determine if this
// version is an existing object that needs healing. // version is an existing object that needs healing.
if oi.ReplicationStatus == replication.Completed && oi.VersionPurgeStatus.Empty() && !roi.ExistingObjResync.mustResync() { if oi.ReplicationStatus == replication.Completed && oi.VersionPurgeStatus.Empty() && !roi.ExistingObjResync.mustResync() {
return return roi
} }
if roi.DeleteMarker || !roi.VersionPurgeStatus.Empty() { if roi.DeleteMarker || !roi.VersionPurgeStatus.Empty() {
@ -3457,14 +3457,14 @@ func queueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, rcf
roi.ReplicationStatus == replication.Failed || roi.ReplicationStatus == replication.Failed ||
roi.VersionPurgeStatus == replication.VersionPurgeFailed || roi.VersionPurgeStatus == replication.VersionPurgePending { roi.VersionPurgeStatus == replication.VersionPurgeFailed || roi.VersionPurgeStatus == replication.VersionPurgePending {
globalReplicationPool.Get().queueReplicaDeleteTask(dv) globalReplicationPool.Get().queueReplicaDeleteTask(dv)
return return roi
} }
// if replication status is Complete on DeleteMarker and existing object resync required // if replication status is Complete on DeleteMarker and existing object resync required
if roi.ExistingObjResync.mustResync() && (roi.ReplicationStatus == replication.Completed || roi.ReplicationStatus.Empty()) { if roi.ExistingObjResync.mustResync() && (roi.ReplicationStatus == replication.Completed || roi.ReplicationStatus.Empty()) {
queueReplicateDeletesWrapper(dv, roi.ExistingObjResync) queueReplicateDeletesWrapper(dv, roi.ExistingObjResync)
return return roi
} }
return return roi
} }
if roi.ExistingObjResync.mustResync() { if roi.ExistingObjResync.mustResync() {
roi.OpType = replication.ExistingObjectReplicationType roi.OpType = replication.ExistingObjectReplicationType
@ -3473,13 +3473,13 @@ func queueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, rcf
case replication.Pending, replication.Failed: case replication.Pending, replication.Failed:
roi.EventType = ReplicateHeal roi.EventType = ReplicateHeal
globalReplicationPool.Get().queueReplicaTask(roi) globalReplicationPool.Get().queueReplicaTask(roi)
return return roi
} }
if roi.ExistingObjResync.mustResync() { if roi.ExistingObjResync.mustResync() {
roi.EventType = ReplicateExisting roi.EventType = ReplicateExisting
globalReplicationPool.Get().queueReplicaTask(roi) globalReplicationPool.Get().queueReplicaTask(roi)
} }
return return roi
} }
const ( const (

View File

@ -38,7 +38,7 @@ type ReplicationLatency struct {
// Merge two replication latency into a new one // Merge two replication latency into a new one
func (rl ReplicationLatency) merge(other ReplicationLatency) (newReplLatency ReplicationLatency) { func (rl ReplicationLatency) merge(other ReplicationLatency) (newReplLatency ReplicationLatency) {
newReplLatency.UploadHistogram = rl.UploadHistogram.Merge(other.UploadHistogram) newReplLatency.UploadHistogram = rl.UploadHistogram.Merge(other.UploadHistogram)
return return newReplLatency
} }
// Get upload latency of each object size range // Get upload latency of each object size range
@ -49,7 +49,7 @@ func (rl ReplicationLatency) getUploadLatency() (ret map[string]uint64) {
// Convert nanoseconds to milliseconds // Convert nanoseconds to milliseconds
ret[sizeTagToString(k)] = uint64(v.avg() / time.Millisecond) ret[sizeTagToString(k)] = uint64(v.avg() / time.Millisecond)
} }
return return ret
} }
// Update replication upload latency with a new value // Update replication upload latency with a new value
@ -64,7 +64,7 @@ type ReplicationLastMinute struct {
func (rl ReplicationLastMinute) merge(other ReplicationLastMinute) (nl ReplicationLastMinute) { func (rl ReplicationLastMinute) merge(other ReplicationLastMinute) (nl ReplicationLastMinute) {
nl = ReplicationLastMinute{rl.LastMinute.merge(other.LastMinute)} nl = ReplicationLastMinute{rl.LastMinute.merge(other.LastMinute)}
return return nl
} }
func (rl *ReplicationLastMinute) addsize(n int64) { func (rl *ReplicationLastMinute) addsize(n int64) {

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
) )
@ -617,19 +617,17 @@ func (z *BucketReplicationStats) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Stats == nil { if z.Stats == nil {
z.Stats = make(map[string]*BucketReplicationStat, zb0002) z.Stats = make(map[string]*BucketReplicationStat, zb0002)
} else if len(z.Stats) > 0 { } else if len(z.Stats) > 0 {
for key := range z.Stats { clear(z.Stats)
delete(z.Stats, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
zb0002-- zb0002--
var za0001 string var za0001 string
var za0002 *BucketReplicationStat
za0001, err = dc.ReadString() za0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Stats") err = msgp.WrapError(err, "Stats")
return return
} }
var za0002 *BucketReplicationStat
if dc.IsNil() { if dc.IsNil() {
err = dc.ReadNil() err = dc.ReadNil()
if err != nil { if err != nil {
@ -943,14 +941,12 @@ func (z *BucketReplicationStats) UnmarshalMsg(bts []byte) (o []byte, err error)
if z.Stats == nil { if z.Stats == nil {
z.Stats = make(map[string]*BucketReplicationStat, zb0002) z.Stats = make(map[string]*BucketReplicationStat, zb0002)
} else if len(z.Stats) > 0 { } else if len(z.Stats) > 0 {
for key := range z.Stats { clear(z.Stats)
delete(z.Stats, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
var za0001 string
var za0002 *BucketReplicationStat var za0002 *BucketReplicationStat
zb0002-- zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Stats") err = msgp.WrapError(err, "Stats")
@ -1402,19 +1398,17 @@ func (z *BucketStatsMap) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Stats == nil { if z.Stats == nil {
z.Stats = make(map[string]BucketStats, zb0002) z.Stats = make(map[string]BucketStats, zb0002)
} else if len(z.Stats) > 0 { } else if len(z.Stats) > 0 {
for key := range z.Stats { clear(z.Stats)
delete(z.Stats, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
zb0002-- zb0002--
var za0001 string var za0001 string
var za0002 BucketStats
za0001, err = dc.ReadString() za0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Stats") err = msgp.WrapError(err, "Stats")
return return
} }
var za0002 BucketStats
err = za0002.DecodeMsg(dc) err = za0002.DecodeMsg(dc)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Stats", za0001) err = msgp.WrapError(err, "Stats", za0001)
@ -1526,14 +1520,12 @@ func (z *BucketStatsMap) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Stats == nil { if z.Stats == nil {
z.Stats = make(map[string]BucketStats, zb0002) z.Stats = make(map[string]BucketStats, zb0002)
} else if len(z.Stats) > 0 { } else if len(z.Stats) > 0 {
for key := range z.Stats { clear(z.Stats)
delete(z.Stats, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
var za0001 string
var za0002 BucketStats var za0002 BucketStats
zb0002-- zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Stats") err = msgp.WrapError(err, "Stats")

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"bytes" "bytes"
"testing" "testing"

View File

@ -285,7 +285,7 @@ func (sys *BucketTargetSys) ListTargets(ctx context.Context, bucket, arnType str
} }
} }
} }
return return targets
} }
// ListBucketTargets - gets list of bucket targets for this bucket. // ListBucketTargets - gets list of bucket targets for this bucket.
@ -668,7 +668,7 @@ func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*T
// getRemoteARN gets existing ARN for an endpoint or generates a new one. // getRemoteARN gets existing ARN for an endpoint or generates a new one.
func (sys *BucketTargetSys) getRemoteARN(bucket string, target *madmin.BucketTarget, deplID string) (arn string, exists bool) { func (sys *BucketTargetSys) getRemoteARN(bucket string, target *madmin.BucketTarget, deplID string) (arn string, exists bool) {
if target == nil { if target == nil {
return return arn, exists
} }
sys.RLock() sys.RLock()
defer sys.RUnlock() defer sys.RUnlock()
@ -682,7 +682,7 @@ func (sys *BucketTargetSys) getRemoteARN(bucket string, target *madmin.BucketTar
} }
} }
if !target.Type.IsValid() { if !target.Type.IsValid() {
return return arn, exists
} }
return generateARN(target, deplID), false return generateARN(target, deplID), false
} }

View File

@ -167,7 +167,7 @@ func (sys *HTTPConsoleLoggerSys) Content() (logs []log.Entry) {
}) })
sys.RUnlock() sys.RUnlock()
return return logs
} }
// Cancel - cancels the target // Cancel - cancels the target

View File

@ -1221,11 +1221,11 @@ func (z *dataUsageHashMap) DecodeMsg(dc *msgp.Reader) (err error) {
zb0002, err = dc.ReadArrayHeader() zb0002, err = dc.ReadArrayHeader()
if err != nil { if err != nil {
err = msgp.WrapError(err) err = msgp.WrapError(err)
return return err
} }
if zb0002 == 0 { if zb0002 == 0 {
*z = nil *z = nil
return return err
} }
*z = make(dataUsageHashMap, zb0002) *z = make(dataUsageHashMap, zb0002)
for i := uint32(0); i < zb0002; i++ { for i := uint32(0); i < zb0002; i++ {
@ -1234,12 +1234,12 @@ func (z *dataUsageHashMap) DecodeMsg(dc *msgp.Reader) (err error) {
zb0003, err = dc.ReadString() zb0003, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err) err = msgp.WrapError(err)
return return err
} }
(*z)[zb0003] = struct{}{} (*z)[zb0003] = struct{}{}
} }
} }
return return err
} }
// EncodeMsg implements msgp.Encodable // EncodeMsg implements msgp.Encodable
@ -1247,16 +1247,16 @@ func (z dataUsageHashMap) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteArrayHeader(uint32(len(z))) err = en.WriteArrayHeader(uint32(len(z)))
if err != nil { if err != nil {
err = msgp.WrapError(err) err = msgp.WrapError(err)
return return err
} }
for zb0004 := range z { for zb0004 := range z {
err = en.WriteString(zb0004) err = en.WriteString(zb0004)
if err != nil { if err != nil {
err = msgp.WrapError(err, zb0004) err = msgp.WrapError(err, zb0004)
return return err
} }
} }
return return err
} }
// MarshalMsg implements msgp.Marshaler // MarshalMsg implements msgp.Marshaler
@ -1266,7 +1266,7 @@ func (z dataUsageHashMap) MarshalMsg(b []byte) (o []byte, err error) {
for zb0004 := range z { for zb0004 := range z {
o = msgp.AppendString(o, zb0004) o = msgp.AppendString(o, zb0004)
} }
return return o, err
} }
// UnmarshalMsg implements msgp.Unmarshaler // UnmarshalMsg implements msgp.Unmarshaler
@ -1275,7 +1275,7 @@ func (z *dataUsageHashMap) UnmarshalMsg(bts []byte) (o []byte, err error) {
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err) err = msgp.WrapError(err)
return return o, err
} }
if zb0002 == 0 { if zb0002 == 0 {
*z = nil *z = nil
@ -1288,13 +1288,13 @@ func (z *dataUsageHashMap) UnmarshalMsg(bts []byte) (o []byte, err error) {
zb0003, bts, err = msgp.ReadStringBytes(bts) zb0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err) err = msgp.WrapError(err)
return return o, err
} }
(*z)[zb0003] = struct{}{} (*z)[zb0003] = struct{}{}
} }
} }
o = bts o = bts
return return o, err
} }
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
@ -1303,7 +1303,7 @@ func (z dataUsageHashMap) Msgsize() (s int) {
for zb0004 := range z { for zb0004 := range z {
s += msgp.StringPrefixSize + len(zb0004) s += msgp.StringPrefixSize + len(zb0004)
} }
return return s
} }
//msgp:encode ignore currentScannerCycle //msgp:encode ignore currentScannerCycle

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"time" "time"
@ -36,19 +36,17 @@ func (z *allTierStats) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Tiers == nil { if z.Tiers == nil {
z.Tiers = make(map[string]tierStats, zb0002) z.Tiers = make(map[string]tierStats, zb0002)
} else if len(z.Tiers) > 0 { } else if len(z.Tiers) > 0 {
for key := range z.Tiers { clear(z.Tiers)
delete(z.Tiers, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
zb0002-- zb0002--
var za0001 string var za0001 string
var za0002 tierStats
za0001, err = dc.ReadString() za0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Tiers") err = msgp.WrapError(err, "Tiers")
return return
} }
var za0002 tierStats
var zb0003 uint32 var zb0003 uint32
zb0003, err = dc.ReadMapHeader() zb0003, err = dc.ReadMapHeader()
if err != nil { if err != nil {
@ -207,14 +205,12 @@ func (z *allTierStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Tiers == nil { if z.Tiers == nil {
z.Tiers = make(map[string]tierStats, zb0002) z.Tiers = make(map[string]tierStats, zb0002)
} else if len(z.Tiers) > 0 { } else if len(z.Tiers) > 0 {
for key := range z.Tiers { clear(z.Tiers)
delete(z.Tiers, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
var za0001 string
var za0002 tierStats var za0002 tierStats
zb0002-- zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Tiers") err = msgp.WrapError(err, "Tiers")
@ -415,19 +411,17 @@ func (z *dataUsageCache) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntry, zb0002) z.Cache = make(map[string]dataUsageEntry, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { clear(z.Cache)
delete(z.Cache, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
zb0002-- zb0002--
var za0001 string var za0001 string
var za0002 dataUsageEntry
za0001, err = dc.ReadString() za0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
var za0002 dataUsageEntry
err = za0002.DecodeMsg(dc) err = za0002.DecodeMsg(dc)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache", za0001) err = msgp.WrapError(err, "Cache", za0001)
@ -543,14 +537,12 @@ func (z *dataUsageCache) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntry, zb0002) z.Cache = make(map[string]dataUsageEntry, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { clear(z.Cache)
delete(z.Cache, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
var za0001 string
var za0002 dataUsageEntry var za0002 dataUsageEntry
zb0002-- zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
@ -799,19 +791,17 @@ func (z *dataUsageCacheV2) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV2, zb0002) z.Cache = make(map[string]dataUsageEntryV2, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { clear(z.Cache)
delete(z.Cache, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
zb0002-- zb0002--
var za0001 string var za0001 string
var za0002 dataUsageEntryV2
za0001, err = dc.ReadString() za0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
var za0002 dataUsageEntryV2
err = za0002.DecodeMsg(dc) err = za0002.DecodeMsg(dc)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache", za0001) err = msgp.WrapError(err, "Cache", za0001)
@ -864,14 +854,12 @@ func (z *dataUsageCacheV2) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV2, zb0002) z.Cache = make(map[string]dataUsageEntryV2, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { clear(z.Cache)
delete(z.Cache, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
var za0001 string
var za0002 dataUsageEntryV2 var za0002 dataUsageEntryV2
zb0002-- zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
@ -942,19 +930,17 @@ func (z *dataUsageCacheV3) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV3, zb0002) z.Cache = make(map[string]dataUsageEntryV3, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { clear(z.Cache)
delete(z.Cache, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
zb0002-- zb0002--
var za0001 string var za0001 string
var za0002 dataUsageEntryV3
za0001, err = dc.ReadString() za0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
var za0002 dataUsageEntryV3
err = za0002.DecodeMsg(dc) err = za0002.DecodeMsg(dc)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache", za0001) err = msgp.WrapError(err, "Cache", za0001)
@ -1007,14 +993,12 @@ func (z *dataUsageCacheV3) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV3, zb0002) z.Cache = make(map[string]dataUsageEntryV3, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { clear(z.Cache)
delete(z.Cache, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
var za0001 string
var za0002 dataUsageEntryV3 var za0002 dataUsageEntryV3
zb0002-- zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
@ -1085,19 +1069,17 @@ func (z *dataUsageCacheV4) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV4, zb0002) z.Cache = make(map[string]dataUsageEntryV4, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { clear(z.Cache)
delete(z.Cache, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
zb0002-- zb0002--
var za0001 string var za0001 string
var za0002 dataUsageEntryV4
za0001, err = dc.ReadString() za0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
var za0002 dataUsageEntryV4
err = za0002.DecodeMsg(dc) err = za0002.DecodeMsg(dc)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache", za0001) err = msgp.WrapError(err, "Cache", za0001)
@ -1150,14 +1132,12 @@ func (z *dataUsageCacheV4) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV4, zb0002) z.Cache = make(map[string]dataUsageEntryV4, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { clear(z.Cache)
delete(z.Cache, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
var za0001 string
var za0002 dataUsageEntryV4 var za0002 dataUsageEntryV4
zb0002-- zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
@ -1228,19 +1208,17 @@ func (z *dataUsageCacheV5) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV5, zb0002) z.Cache = make(map[string]dataUsageEntryV5, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { clear(z.Cache)
delete(z.Cache, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
zb0002-- zb0002--
var za0001 string var za0001 string
var za0002 dataUsageEntryV5
za0001, err = dc.ReadString() za0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
var za0002 dataUsageEntryV5
err = za0002.DecodeMsg(dc) err = za0002.DecodeMsg(dc)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache", za0001) err = msgp.WrapError(err, "Cache", za0001)
@ -1293,14 +1271,12 @@ func (z *dataUsageCacheV5) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV5, zb0002) z.Cache = make(map[string]dataUsageEntryV5, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { clear(z.Cache)
delete(z.Cache, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
var za0001 string
var za0002 dataUsageEntryV5 var za0002 dataUsageEntryV5
zb0002-- zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
@ -1371,19 +1347,17 @@ func (z *dataUsageCacheV6) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV6, zb0002) z.Cache = make(map[string]dataUsageEntryV6, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { clear(z.Cache)
delete(z.Cache, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
zb0002-- zb0002--
var za0001 string var za0001 string
var za0002 dataUsageEntryV6
za0001, err = dc.ReadString() za0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
var za0002 dataUsageEntryV6
err = za0002.DecodeMsg(dc) err = za0002.DecodeMsg(dc)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache", za0001) err = msgp.WrapError(err, "Cache", za0001)
@ -1436,14 +1410,12 @@ func (z *dataUsageCacheV6) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV6, zb0002) z.Cache = make(map[string]dataUsageEntryV6, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { clear(z.Cache)
delete(z.Cache, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
var za0001 string
var za0002 dataUsageEntryV6 var za0002 dataUsageEntryV6
zb0002-- zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
@ -1514,19 +1486,17 @@ func (z *dataUsageCacheV7) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV7, zb0002) z.Cache = make(map[string]dataUsageEntryV7, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { clear(z.Cache)
delete(z.Cache, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
zb0002-- zb0002--
var za0001 string var za0001 string
var za0002 dataUsageEntryV7
za0001, err = dc.ReadString() za0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
return return
} }
var za0002 dataUsageEntryV7
err = za0002.DecodeMsg(dc) err = za0002.DecodeMsg(dc)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache", za0001) err = msgp.WrapError(err, "Cache", za0001)
@ -1579,14 +1549,12 @@ func (z *dataUsageCacheV7) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Cache == nil { if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV7, zb0002) z.Cache = make(map[string]dataUsageEntryV7, zb0002)
} else if len(z.Cache) > 0 { } else if len(z.Cache) > 0 {
for key := range z.Cache { clear(z.Cache)
delete(z.Cache, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
var za0001 string
var za0002 dataUsageEntryV7 var za0002 dataUsageEntryV7
zb0002-- zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Cache") err = msgp.WrapError(err, "Cache")
@ -1745,19 +1713,17 @@ func (z *dataUsageEntry) DecodeMsg(dc *msgp.Reader) (err error) {
if z.AllTierStats.Tiers == nil { if z.AllTierStats.Tiers == nil {
z.AllTierStats.Tiers = make(map[string]tierStats, zb0005) z.AllTierStats.Tiers = make(map[string]tierStats, zb0005)
} else if len(z.AllTierStats.Tiers) > 0 { } else if len(z.AllTierStats.Tiers) > 0 {
for key := range z.AllTierStats.Tiers { clear(z.AllTierStats.Tiers)
delete(z.AllTierStats.Tiers, key)
}
} }
for zb0005 > 0 { for zb0005 > 0 {
zb0005-- zb0005--
var za0003 string var za0003 string
var za0004 tierStats
za0003, err = dc.ReadString() za0003, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "AllTierStats", "Tiers") err = msgp.WrapError(err, "AllTierStats", "Tiers")
return return
} }
var za0004 tierStats
var zb0006 uint32 var zb0006 uint32
zb0006, err = dc.ReadMapHeader() zb0006, err = dc.ReadMapHeader()
if err != nil { if err != nil {
@ -2211,14 +2177,12 @@ func (z *dataUsageEntry) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.AllTierStats.Tiers == nil { if z.AllTierStats.Tiers == nil {
z.AllTierStats.Tiers = make(map[string]tierStats, zb0005) z.AllTierStats.Tiers = make(map[string]tierStats, zb0005)
} else if len(z.AllTierStats.Tiers) > 0 { } else if len(z.AllTierStats.Tiers) > 0 {
for key := range z.AllTierStats.Tiers { clear(z.AllTierStats.Tiers)
delete(z.AllTierStats.Tiers, key)
}
} }
for zb0005 > 0 { for zb0005 > 0 {
var za0003 string
var za0004 tierStats var za0004 tierStats
zb0005-- zb0005--
var za0003 string
za0003, bts, err = msgp.ReadStringBytes(bts) za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "AllTierStats", "Tiers") err = msgp.WrapError(err, "AllTierStats", "Tiers")
@ -2984,19 +2948,17 @@ func (z *dataUsageEntryV7) DecodeMsg(dc *msgp.Reader) (err error) {
if z.AllTierStats.Tiers == nil { if z.AllTierStats.Tiers == nil {
z.AllTierStats.Tiers = make(map[string]tierStats, zb0005) z.AllTierStats.Tiers = make(map[string]tierStats, zb0005)
} else if len(z.AllTierStats.Tiers) > 0 { } else if len(z.AllTierStats.Tiers) > 0 {
for key := range z.AllTierStats.Tiers { clear(z.AllTierStats.Tiers)
delete(z.AllTierStats.Tiers, key)
}
} }
for zb0005 > 0 { for zb0005 > 0 {
zb0005-- zb0005--
var za0003 string var za0003 string
var za0004 tierStats
za0003, err = dc.ReadString() za0003, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "AllTierStats", "Tiers") err = msgp.WrapError(err, "AllTierStats", "Tiers")
return return
} }
var za0004 tierStats
var zb0006 uint32 var zb0006 uint32
zb0006, err = dc.ReadMapHeader() zb0006, err = dc.ReadMapHeader()
if err != nil { if err != nil {
@ -3192,14 +3154,12 @@ func (z *dataUsageEntryV7) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.AllTierStats.Tiers == nil { if z.AllTierStats.Tiers == nil {
z.AllTierStats.Tiers = make(map[string]tierStats, zb0005) z.AllTierStats.Tiers = make(map[string]tierStats, zb0005)
} else if len(z.AllTierStats.Tiers) > 0 { } else if len(z.AllTierStats.Tiers) > 0 {
for key := range z.AllTierStats.Tiers { clear(z.AllTierStats.Tiers)
delete(z.AllTierStats.Tiers, key)
}
} }
for zb0005 > 0 { for zb0005 > 0 {
var za0003 string
var za0004 tierStats var za0004 tierStats
zb0005-- zb0005--
var za0003 string
za0003, bts, err = msgp.ReadStringBytes(bts) za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "AllTierStats", "Tiers") err = msgp.WrapError(err, "AllTierStats", "Tiers")

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"bytes" "bytes"
"testing" "testing"

View File

@ -56,13 +56,13 @@ func TestDataUsageUpdate(t *testing.T) {
var s os.FileInfo var s os.FileInfo
s, err = os.Stat(item.Path) s, err = os.Stat(item.Path)
if err != nil { if err != nil {
return return sizeS, err
} }
sizeS.totalSize = s.Size() sizeS.totalSize = s.Size()
sizeS.versions++ sizeS.versions++
return sizeS, nil return sizeS, nil
} }
return return sizeS, err
} }
xls := xlStorage{drivePath: base, diskInfoCache: cachevalue.New[DiskInfo]()} xls := xlStorage{drivePath: base, diskInfoCache: cachevalue.New[DiskInfo]()}
xls.diskInfoCache.InitOnce(time.Second, cachevalue.Opts{}, func(ctx context.Context) (DiskInfo, error) { xls.diskInfoCache.InitOnce(time.Second, cachevalue.Opts{}, func(ctx context.Context) (DiskInfo, error) {
@ -279,13 +279,13 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
var s os.FileInfo var s os.FileInfo
s, err = os.Stat(item.Path) s, err = os.Stat(item.Path)
if err != nil { if err != nil {
return return sizeS, err
} }
sizeS.totalSize = s.Size() sizeS.totalSize = s.Size()
sizeS.versions++ sizeS.versions++
return return sizeS, err
} }
return return sizeS, err
} }
weSleep := func() bool { return false } weSleep := func() bool { return false }
@ -569,13 +569,13 @@ func TestDataUsageCacheSerialize(t *testing.T) {
var s os.FileInfo var s os.FileInfo
s, err = os.Stat(item.Path) s, err = os.Stat(item.Path)
if err != nil { if err != nil {
return return sizeS, err
} }
sizeS.versions++ sizeS.versions++
sizeS.totalSize = s.Size() sizeS.totalSize = s.Size()
return return sizeS, err
} }
return return sizeS, err
} }
xls := xlStorage{drivePath: base, diskInfoCache: cachevalue.New[DiskInfo]()} xls := xlStorage{drivePath: base, diskInfoCache: cachevalue.New[DiskInfo]()}
xls.diskInfoCache.InitOnce(time.Second, cachevalue.Opts{}, func(ctx context.Context) (DiskInfo, error) { xls.diskInfoCache.InitOnce(time.Second, cachevalue.Opts{}, func(ctx context.Context) (DiskInfo, error) {

View File

@ -87,7 +87,7 @@ func (d *DummyDataGen) Read(b []byte) (n int, err error) {
} }
err = io.EOF err = io.EOF
} }
return return n, err
} }
func (d *DummyDataGen) Seek(offset int64, whence int) (int64, error) { func (d *DummyDataGen) Seek(offset int64, whence int) (int64, error) {

View File

@ -450,7 +450,7 @@ func setEncryptionMetadata(r *http.Request, bucket, object string, metadata map[
} }
} }
_, err = newEncryptMetadata(r.Context(), kind, keyID, key, bucket, object, metadata, kmsCtx) _, err = newEncryptMetadata(r.Context(), kind, keyID, key, bucket, object, metadata, kmsCtx)
return return err
} }
// EncryptRequest takes the client provided content and encrypts the data // EncryptRequest takes the client provided content and encrypts the data
@ -855,7 +855,7 @@ func tryDecryptETag(key []byte, encryptedETag string, sses3 bool) string {
func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, skipLen int64, seqNumber uint32, partStart int, err error) { func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, skipLen int64, seqNumber uint32, partStart int, err error) {
if _, ok := crypto.IsEncrypted(o.UserDefined); !ok { if _, ok := crypto.IsEncrypted(o.UserDefined); !ok {
err = errors.New("Object is not encrypted") err = errors.New("Object is not encrypted")
return return encOff, encLength, skipLen, seqNumber, partStart, err
} }
if rs == nil { if rs == nil {
@ -873,7 +873,7 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk
partSize, err = sio.DecryptedSize(uint64(part.Size)) partSize, err = sio.DecryptedSize(uint64(part.Size))
if err != nil { if err != nil {
err = errObjectTampered err = errObjectTampered
return return encOff, encLength, skipLen, seqNumber, partStart, err
} }
sizes[i] = int64(partSize) sizes[i] = int64(partSize)
decObjSize += int64(partSize) decObjSize += int64(partSize)
@ -883,7 +883,7 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk
partSize, err = sio.DecryptedSize(uint64(o.Size)) partSize, err = sio.DecryptedSize(uint64(o.Size))
if err != nil { if err != nil {
err = errObjectTampered err = errObjectTampered
return return encOff, encLength, skipLen, seqNumber, partStart, err
} }
sizes = []int64{int64(partSize)} sizes = []int64{int64(partSize)}
decObjSize = sizes[0] decObjSize = sizes[0]
@ -892,7 +892,7 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk
var off, length int64 var off, length int64
off, length, err = rs.GetOffsetLength(decObjSize) off, length, err = rs.GetOffsetLength(decObjSize)
if err != nil { if err != nil {
return return encOff, encLength, skipLen, seqNumber, partStart, err
} }
// At this point, we have: // At this point, we have:

View File

@ -483,7 +483,7 @@ func TestGetDecryptedRange(t *testing.T) {
cumulativeSum += v cumulativeSum += v
cumulativeEncSum += getEncSize(v) cumulativeEncSum += getEncSize(v)
} }
return return o, l, skip, sn, ps
} }
for i, test := range testMPs { for i, test := range testMPs {

View File

@ -443,7 +443,7 @@ func buildDisksLayoutFromConfFile(pools []poolArgs) (layout disksLayout, err err
layout: setArgs, layout: setArgs,
}) })
} }
return return layout, err
} }
// mergeDisksLayoutFromArgs supports with and without ellipses transparently. // mergeDisksLayoutFromArgs supports with and without ellipses transparently.
@ -475,7 +475,7 @@ func mergeDisksLayoutFromArgs(args []string, ctxt *serverCtxt) (err error) {
legacy: true, legacy: true,
pools: []poolDisksLayout{{layout: setArgs, cmdline: strings.Join(args, " ")}}, pools: []poolDisksLayout{{layout: setArgs, cmdline: strings.Join(args, " ")}},
} }
return return err
} }
for _, arg := range args { for _, arg := range args {
@ -489,7 +489,7 @@ func mergeDisksLayoutFromArgs(args []string, ctxt *serverCtxt) (err error) {
} }
ctxt.Layout.pools = append(ctxt.Layout.pools, poolDisksLayout{cmdline: arg, layout: setArgs}) ctxt.Layout.pools = append(ctxt.Layout.pools, poolDisksLayout{cmdline: arg, layout: setArgs})
} }
return return err
} }
// CreateServerEndpoints - validates and creates new endpoints from input args, supports // CreateServerEndpoints - validates and creates new endpoints from input args, supports

View File

@ -267,7 +267,7 @@ func (l EndpointServerPools) ESCount() (count int) {
for _, p := range l { for _, p := range l {
count += p.SetCount count += p.SetCount
} }
return return count
} }
// GetNodes returns a sorted list of nodes in this cluster // GetNodes returns a sorted list of nodes in this cluster
@ -297,7 +297,7 @@ func (l EndpointServerPools) GetNodes() (nodes []Node) {
sort.Slice(nodes, func(i, j int) bool { sort.Slice(nodes, func(i, j int) bool {
return nodes[i].Host < nodes[j].Host return nodes[i].Host < nodes[j].Host
}) })
return return nodes
} }
// GetPoolIdx return pool index // GetPoolIdx return pool index
@ -588,7 +588,7 @@ func (endpoints Endpoints) GetAllStrings() (all []string) {
for _, e := range endpoints { for _, e := range endpoints {
all = append(all, e.String()) all = append(all, e.String())
} }
return return all
} }
func hostResolveToLocalhost(endpoint Endpoint) bool { func hostResolveToLocalhost(endpoint Endpoint) bool {

View File

@ -69,7 +69,7 @@ func NewErasure(ctx context.Context, dataBlocks, parityBlocks int, blockSize int
}) })
return enc return enc
} }
return return e, err
} }
// EncodeData encodes the given data and returns the erasure-coded data. // EncodeData encodes the given data and returns the erasure-coded data.

View File

@ -283,7 +283,7 @@ func countPartNotSuccess(partErrs []int) (c int) {
c++ c++
} }
} }
return return c
} }
// checkObjectWithAllParts sets partsMetadata and onlineDisks when xl.meta is inexistant/corrupted or outdated // checkObjectWithAllParts sets partsMetadata and onlineDisks when xl.meta is inexistant/corrupted or outdated
@ -436,5 +436,5 @@ func checkObjectWithAllParts(ctx context.Context, onlineDisks []StorageAPI, part
dataErrsByDisk[disk][part] = dataErrsByPart[part][disk] dataErrsByDisk[disk][part] = dataErrsByPart[part][disk]
} }
} }
return return dataErrsByDisk, dataErrsByPart
} }

View File

@ -965,7 +965,7 @@ func danglingMetaErrsCount(cerrs []error) (notFoundCount int, nonActionableCount
nonActionableCount++ nonActionableCount++
} }
} }
return return notFoundCount, nonActionableCount
} }
func danglingPartErrsCount(results []int) (notFoundCount int, nonActionableCount int) { func danglingPartErrsCount(results []int) (notFoundCount int, nonActionableCount int) {
@ -980,7 +980,7 @@ func danglingPartErrsCount(results []int) (notFoundCount int, nonActionableCount
nonActionableCount++ nonActionableCount++
} }
} }
return return notFoundCount, nonActionableCount
} }
// Object is considered dangling/corrupted if and only // Object is considered dangling/corrupted if and only

View File

@ -521,7 +521,7 @@ func listObjectParities(partsMetadata []FileInfo, errs []error) (parities []int)
parities[index] = metadata.Erasure.ParityBlocks parities[index] = metadata.Erasure.ParityBlocks
} }
} }
return return parities
} }
// Returns per object readQuorum and writeQuorum // Returns per object readQuorum and writeQuorum

View File

@ -233,7 +233,7 @@ func (p poolMeta) ResumeBucketObject(idx int) (bucket, object string) {
bucket = p.Pools[idx].Decommission.Bucket bucket = p.Pools[idx].Decommission.Bucket
object = p.Pools[idx].Decommission.Object object = p.Pools[idx].Decommission.Object
} }
return return bucket, object
} }
func (p *poolMeta) TrackCurrentBucketObject(idx int, bucket string, object string) { func (p *poolMeta) TrackCurrentBucketObject(idx int, bucket string, object string) {

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
) )

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"bytes" "bytes"
"testing" "testing"

View File

@ -446,7 +446,7 @@ func (z *erasureServerPools) rebalanceBuckets(ctx context.Context, poolIdx int)
select { select {
case <-ctx.Done(): case <-ctx.Done():
doneCh <- ctx.Err() doneCh <- ctx.Err()
return return err
default: default:
} }
@ -464,7 +464,7 @@ func (z *erasureServerPools) rebalanceBuckets(ctx context.Context, poolIdx int)
} }
rebalanceLogIf(GlobalContext, err) rebalanceLogIf(GlobalContext, err)
doneCh <- err doneCh <- err
return return err
} }
stopFn(0, nil) stopFn(0, nil)
z.bucketRebalanceDone(bucket, poolIdx) z.bucketRebalanceDone(bucket, poolIdx)

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
) )

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"bytes" "bytes"
"testing" "testing"

View File

@ -702,7 +702,7 @@ func (z *erasureServerPools) BackendInfo() (b madmin.BackendInfo) {
b.StandardSCParity = scParity b.StandardSCParity = scParity
b.RRSCParity = rrSCParity b.RRSCParity = rrSCParity
return return b
} }
func (z *erasureServerPools) LocalStorageInfo(ctx context.Context, metrics bool) StorageInfo { func (z *erasureServerPools) LocalStorageInfo(ctx context.Context, metrics bool) StorageInfo {

View File

@ -116,7 +116,7 @@ func diskErrToDriveState(err error) (state string) {
state = fmt.Sprintf("%s (cause: %s)", madmin.DriveStateUnknown, err) state = fmt.Sprintf("%s (cause: %s)", madmin.DriveStateUnknown, err)
} }
return return state
} }
func getOnlineOfflineDisksStats(disksInfo []madmin.Disk) (onlineDisks, offlineDisks madmin.BackendDisks) { func getOnlineOfflineDisksStats(disksInfo []madmin.Disk) (onlineDisks, offlineDisks madmin.BackendDisks) {

View File

@ -94,18 +94,18 @@ func availableMemory() (available uint64) {
if limit > 0 { if limit > 0 {
// A valid value is found, return its 90% // A valid value is found, return its 90%
available = (limit * 9) / 10 available = (limit * 9) / 10
return return available
} }
} // for all other platforms limits are based on virtual memory. } // for all other platforms limits are based on virtual memory.
memStats, err := mem.VirtualMemory() memStats, err := mem.VirtualMemory()
if err != nil { if err != nil {
return return available
} }
// A valid value is available return its 90% // A valid value is available return its 90%
available = (memStats.Available * 9) / 10 available = (memStats.Available * 9) / 10
return return available
} }
func (t *apiConfig) init(cfg api.Config, setDriveCounts []int, legacy bool) { func (t *apiConfig) init(cfg api.Config, setDriveCounts []int, legacy bool) {

View File

@ -466,7 +466,7 @@ func getHostName(r *http.Request) (hostName string) {
} else { } else {
hostName = r.Host hostName = r.Host
} }
return return hostName
} }
// Proxy any request to an endpoint. // Proxy any request to an endpoint.
@ -500,5 +500,5 @@ func proxyRequest(ctx context.Context, w http.ResponseWriter, r *http.Request, e
r.URL.Host = ep.Host r.URL.Host = ep.Host
f.ServeHTTP(w, r) f.ServeHTTP(w, r)
return return success
} }

View File

@ -18,7 +18,11 @@
package cmd package cmd
import ( import (
"sync"
"testing" "testing"
"time"
xhttp "github.com/minio/minio/internal/http"
) )
// Test redactLDAPPwd() // Test redactLDAPPwd()
@ -52,3 +56,129 @@ func TestRedactLDAPPwd(t *testing.T) {
} }
} }
} }
// TestHTTPStatsRaceCondition tests the race condition fix for HTTPStats.
// This test specifically addresses the race between:
// - Write operations via updateStats.
// - Read operations via toServerHTTPStats(false).
func TestRaulStatsRaceCondition(t *testing.T) {
httpStats := newHTTPStats()
// Simulate the concurrent scenario from the original race condition:
// Multiple HTTP request handlers updating stats concurrently,
// while background processes are reading the stats for persistence.
const numWriters = 100 // Simulate many HTTP request handlers.
const numReaders = 50 // Simulate background stats readers.
const opsPerGoroutine = 100
var wg sync.WaitGroup
for i := range numWriters {
wg.Add(1)
go func(writerID int) {
defer wg.Done()
for j := 0; j < opsPerGoroutine; j++ {
switch j % 4 {
case 0:
httpStats.updateStats("GetObject", &xhttp.ResponseRecorder{})
case 1:
httpStats.totalS3Requests.Inc("PutObject")
case 2:
httpStats.totalS3Errors.Inc("DeleteObject")
case 3:
httpStats.currentS3Requests.Inc("ListObjects")
}
}
}(i)
}
for i := range numReaders {
wg.Add(1)
go func(readerID int) {
defer wg.Done()
for range opsPerGoroutine {
_ = httpStats.toServerHTTPStats(false)
_ = httpStats.totalS3Requests.Load(false)
_ = httpStats.currentS3Requests.Load(false)
time.Sleep(1 * time.Microsecond)
}
}(i)
}
wg.Wait()
finalStats := httpStats.toServerHTTPStats(false)
totalRequests := 0
for _, v := range finalStats.TotalS3Requests.APIStats {
totalRequests += v
}
if totalRequests == 0 {
t.Error("Expected some total requests to be recorded, but got zero")
}
t.Logf("Total requests recorded: %d", totalRequests)
t.Logf("Race condition test passed - no races detected")
}
// TestHTTPAPIStatsRaceCondition tests concurrent access to HTTPAPIStats specifically.
func TestRaulHTTPAPIStatsRaceCondition(t *testing.T) {
stats := &HTTPAPIStats{}
const numGoroutines = 50
const opsPerGoroutine = 1000
var wg sync.WaitGroup
for i := range numGoroutines {
wg.Add(1)
go func(id int) {
defer wg.Done()
for j := 0; j < opsPerGoroutine; j++ {
stats.Inc("TestAPI")
}
}(i)
}
for i := range numGoroutines / 2 {
wg.Add(1)
go func(id int) {
defer wg.Done()
for range opsPerGoroutine / 2 {
_ = stats.Load(false)
}
}(i)
}
wg.Wait()
finalStats := stats.Load(false)
expected := numGoroutines * opsPerGoroutine
actual := finalStats["TestAPI"]
if actual != expected {
t.Errorf("Race condition detected: expected %d, got %d (lost %d increments)",
expected, actual, expected-actual)
}
}
// TestBucketHTTPStatsRaceCondition tests concurrent access to bucket-level HTTP stats.
func TestRaulBucketHTTPStatsRaceCondition(t *testing.T) {
bucketStats := newBucketHTTPStats()
const numGoroutines = 50
const opsPerGoroutine = 100
var wg sync.WaitGroup
for i := range numGoroutines {
wg.Add(1)
go func(id int) {
defer wg.Done()
bucketName := "test-bucket"
for range opsPerGoroutine {
bucketStats.updateHTTPStats(bucketName, "GetObject", nil)
recorder := &xhttp.ResponseRecorder{}
bucketStats.updateHTTPStats(bucketName, "GetObject", recorder)
_ = bucketStats.load(bucketName)
}
}(i)
}
wg.Wait()
stats := bucketStats.load("test-bucket")
if stats.totalS3Requests == nil {
t.Error("Expected bucket stats to be initialized")
}
t.Logf("Bucket HTTP stats race test passed")
}

View File

@ -1128,7 +1128,7 @@ func (store *IAMStoreSys) listGroups(ctx context.Context) (res []string, err err
return true return true
}) })
} }
return return res, err
} }
// PolicyDBUpdate - adds or removes given policies to/from the user or group's // PolicyDBUpdate - adds or removes given policies to/from the user or group's
@ -1139,7 +1139,7 @@ func (store *IAMStoreSys) PolicyDBUpdate(ctx context.Context, name string, isGro
) { ) {
if name == "" { if name == "" {
err = errInvalidArgument err = errInvalidArgument
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
cache := store.lock() cache := store.lock()
@ -1163,12 +1163,12 @@ func (store *IAMStoreSys) PolicyDBUpdate(ctx context.Context, name string, isGro
g, ok := cache.iamGroupsMap[name] g, ok := cache.iamGroupsMap[name]
if !ok { if !ok {
err = errNoSuchGroup err = errNoSuchGroup
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
if g.Status == statusDisabled { if g.Status == statusDisabled {
err = errGroupDisabled err = errGroupDisabled
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
} }
mp, _ = cache.iamGroupPolicyMap.Load(name) mp, _ = cache.iamGroupPolicyMap.Load(name)
@ -1186,7 +1186,7 @@ func (store *IAMStoreSys) PolicyDBUpdate(ctx context.Context, name string, isGro
for _, p := range policiesToUpdate.ToSlice() { for _, p := range policiesToUpdate.ToSlice() {
if _, found := cache.iamPolicyDocsMap[p]; !found { if _, found := cache.iamPolicyDocsMap[p]; !found {
err = errNoSuchPolicy err = errNoSuchPolicy
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
} }
newPolicySet = existingPolicySet.Union(policiesToUpdate) newPolicySet = existingPolicySet.Union(policiesToUpdate)
@ -1198,7 +1198,7 @@ func (store *IAMStoreSys) PolicyDBUpdate(ctx context.Context, name string, isGro
// We return an error if the requested policy update will have no effect. // We return an error if the requested policy update will have no effect.
if policiesToUpdate.IsEmpty() { if policiesToUpdate.IsEmpty() {
err = errNoPolicyToAttachOrDetach err = errNoPolicyToAttachOrDetach
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
newPolicies := newPolicySet.ToSlice() newPolicies := newPolicySet.ToSlice()
@ -1210,7 +1210,7 @@ func (store *IAMStoreSys) PolicyDBUpdate(ctx context.Context, name string, isGro
// in this case, we delete the mapping from the store. // in this case, we delete the mapping from the store.
if len(newPolicies) == 0 { if len(newPolicies) == 0 {
if err = store.deleteMappedPolicy(ctx, name, userType, isGroup); err != nil && !errors.Is(err, errNoSuchPolicy) { if err = store.deleteMappedPolicy(ctx, name, userType, isGroup); err != nil && !errors.Is(err, errNoSuchPolicy) {
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
if !isGroup { if !isGroup {
if userType == stsUser { if userType == stsUser {
@ -1223,7 +1223,7 @@ func (store *IAMStoreSys) PolicyDBUpdate(ctx context.Context, name string, isGro
} }
} else { } else {
if err = store.saveMappedPolicy(ctx, name, userType, isGroup, newPolicyMapping); err != nil { if err = store.saveMappedPolicy(ctx, name, userType, isGroup, newPolicyMapping); err != nil {
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
if !isGroup { if !isGroup {
if userType == stsUser { if userType == stsUser {
@ -3052,7 +3052,7 @@ func extractJWTClaims(u UserIdentity) (jwtClaims *jwt.MapClaims, err error) {
break break
} }
} }
return return jwtClaims, err
} }
func validateSvcExpirationInUTC(expirationInUTC time.Time) error { func validateSvcExpirationInUTC(expirationInUTC time.Time) error {

View File

@ -1029,7 +1029,7 @@ func (sys *IAMSys) SetUserStatus(ctx context.Context, accessKey string, status m
updatedAt, err = sys.store.SetUserStatus(ctx, accessKey, status) updatedAt, err = sys.store.SetUserStatus(ctx, accessKey, status)
if err != nil { if err != nil {
return return updatedAt, err
} }
sys.notifyForUser(ctx, accessKey, false) sys.notifyForUser(ctx, accessKey, false)
@ -1985,7 +1985,7 @@ func (sys *IAMSys) PolicyDBSet(ctx context.Context, name, policy string, userTyp
updatedAt, err = sys.store.PolicyDBSet(ctx, name, policy, userType, isGroup) updatedAt, err = sys.store.PolicyDBSet(ctx, name, policy, userType, isGroup)
if err != nil { if err != nil {
return return updatedAt, err
} }
// Notify all other MinIO peers to reload policy // Notify all other MinIO peers to reload policy
@ -2008,7 +2008,7 @@ func (sys *IAMSys) PolicyDBUpdateBuiltin(ctx context.Context, isAttach bool,
) (updatedAt time.Time, addedOrRemoved, effectivePolicies []string, err error) { ) (updatedAt time.Time, addedOrRemoved, effectivePolicies []string, err error) {
if !sys.Initialized() { if !sys.Initialized() {
err = errServerNotInitialized err = errServerNotInitialized
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
userOrGroup := r.User userOrGroup := r.User
@ -2021,24 +2021,24 @@ func (sys *IAMSys) PolicyDBUpdateBuiltin(ctx context.Context, isAttach bool,
if isGroup { if isGroup {
_, err = sys.GetGroupDescription(userOrGroup) _, err = sys.GetGroupDescription(userOrGroup)
if err != nil { if err != nil {
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
} else { } else {
var isTemp bool var isTemp bool
isTemp, _, err = sys.IsTempUser(userOrGroup) isTemp, _, err = sys.IsTempUser(userOrGroup)
if err != nil && err != errNoSuchUser { if err != nil && err != errNoSuchUser {
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
if isTemp { if isTemp {
err = errIAMActionNotAllowed err = errIAMActionNotAllowed
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
// When the user is root credential you are not allowed to // When the user is root credential you are not allowed to
// add policies for root user. // add policies for root user.
if userOrGroup == globalActiveCred.AccessKey { if userOrGroup == globalActiveCred.AccessKey {
err = errIAMActionNotAllowed err = errIAMActionNotAllowed
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
// Validate that user exists. // Validate that user exists.
@ -2046,14 +2046,14 @@ func (sys *IAMSys) PolicyDBUpdateBuiltin(ctx context.Context, isAttach bool,
_, userExists = sys.GetUser(ctx, userOrGroup) _, userExists = sys.GetUser(ctx, userOrGroup)
if !userExists { if !userExists {
err = errNoSuchUser err = errNoSuchUser
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
} }
updatedAt, addedOrRemoved, effectivePolicies, err = sys.store.PolicyDBUpdate(ctx, userOrGroup, isGroup, updatedAt, addedOrRemoved, effectivePolicies, err = sys.store.PolicyDBUpdate(ctx, userOrGroup, isGroup,
regUser, r.Policies, isAttach) regUser, r.Policies, isAttach)
if err != nil { if err != nil {
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
// Notify all other MinIO peers to reload policy // Notify all other MinIO peers to reload policy
@ -2077,7 +2077,7 @@ func (sys *IAMSys) PolicyDBUpdateBuiltin(ctx context.Context, isAttach bool,
UpdatedAt: updatedAt, UpdatedAt: updatedAt,
})) }))
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
// PolicyDBUpdateLDAP - adds or removes policies from a user or a group verified // PolicyDBUpdateLDAP - adds or removes policies from a user or a group verified
@ -2087,7 +2087,7 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool,
) (updatedAt time.Time, addedOrRemoved, effectivePolicies []string, err error) { ) (updatedAt time.Time, addedOrRemoved, effectivePolicies []string, err error) {
if !sys.Initialized() { if !sys.Initialized() {
err = errServerNotInitialized err = errServerNotInitialized
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
var dn string var dn string
@ -2097,7 +2097,7 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool,
dnResult, err = sys.LDAPConfig.GetValidatedDNForUsername(r.User) dnResult, err = sys.LDAPConfig.GetValidatedDNForUsername(r.User)
if err != nil { if err != nil {
iamLogIf(ctx, err) iamLogIf(ctx, err)
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
if dnResult == nil { if dnResult == nil {
// dn not found - still attempt to detach if provided user is a DN. // dn not found - still attempt to detach if provided user is a DN.
@ -2105,7 +2105,7 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool,
dn = sys.LDAPConfig.QuickNormalizeDN(r.User) dn = sys.LDAPConfig.QuickNormalizeDN(r.User)
} else { } else {
err = errNoSuchUser err = errNoSuchUser
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
} else { } else {
dn = dnResult.NormDN dn = dnResult.NormDN
@ -2115,14 +2115,14 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool,
var underBaseDN bool var underBaseDN bool
if dnResult, underBaseDN, err = sys.LDAPConfig.GetValidatedGroupDN(nil, r.Group); err != nil { if dnResult, underBaseDN, err = sys.LDAPConfig.GetValidatedGroupDN(nil, r.Group); err != nil {
iamLogIf(ctx, err) iamLogIf(ctx, err)
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
if dnResult == nil || !underBaseDN { if dnResult == nil || !underBaseDN {
if !isAttach { if !isAttach {
dn = sys.LDAPConfig.QuickNormalizeDN(r.Group) dn = sys.LDAPConfig.QuickNormalizeDN(r.Group)
} else { } else {
err = errNoSuchGroup err = errNoSuchGroup
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
} else { } else {
// We use the group DN returned by the LDAP server (this may not // We use the group DN returned by the LDAP server (this may not
@ -2149,7 +2149,7 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool,
updatedAt, addedOrRemoved, effectivePolicies, err = sys.store.PolicyDBUpdate( updatedAt, addedOrRemoved, effectivePolicies, err = sys.store.PolicyDBUpdate(
ctx, dn, isGroup, userType, r.Policies, isAttach) ctx, dn, isGroup, userType, r.Policies, isAttach)
if err != nil { if err != nil {
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
// Notify all other MinIO peers to reload policy // Notify all other MinIO peers to reload policy
@ -2173,7 +2173,7 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool,
UpdatedAt: updatedAt, UpdatedAt: updatedAt,
})) }))
return return updatedAt, addedOrRemoved, effectivePolicies, err
} }
// PolicyDBGet - gets policy set on a user or group. If a list of groups is // PolicyDBGet - gets policy set on a user or group. If a list of groups is
@ -2376,7 +2376,7 @@ func isAllowedBySessionPolicyForServiceAccount(args policy.Args) (hasSessionPoli
// Now check if we have a sessionPolicy. // Now check if we have a sessionPolicy.
spolicy, ok := args.Claims[sessionPolicyNameExtracted] spolicy, ok := args.Claims[sessionPolicyNameExtracted]
if !ok { if !ok {
return return hasSessionPolicy, isAllowed
} }
hasSessionPolicy = true hasSessionPolicy = true
@ -2385,7 +2385,7 @@ func isAllowedBySessionPolicyForServiceAccount(args policy.Args) (hasSessionPoli
if !ok { if !ok {
// Sub policy if set, should be a string reject // Sub policy if set, should be a string reject
// malformed/malicious requests. // malformed/malicious requests.
return return hasSessionPolicy, isAllowed
} }
// Check if policy is parseable. // Check if policy is parseable.
@ -2393,7 +2393,7 @@ func isAllowedBySessionPolicyForServiceAccount(args policy.Args) (hasSessionPoli
if err != nil { if err != nil {
// Log any error in input session policy config. // Log any error in input session policy config.
iamLogIf(GlobalContext, err) iamLogIf(GlobalContext, err)
return return hasSessionPolicy, isAllowed
} }
// SPECIAL CASE: For service accounts, any valid JSON is allowed as a // SPECIAL CASE: For service accounts, any valid JSON is allowed as a
@ -2417,7 +2417,7 @@ func isAllowedBySessionPolicyForServiceAccount(args policy.Args) (hasSessionPoli
// 2. do not allow empty statement policies for service accounts. // 2. do not allow empty statement policies for service accounts.
if subPolicy.Version == "" && subPolicy.Statements == nil && subPolicy.ID == "" { if subPolicy.Version == "" && subPolicy.Statements == nil && subPolicy.ID == "" {
hasSessionPolicy = false hasSessionPolicy = false
return return hasSessionPolicy, isAllowed
} }
// As the session policy exists, even if the parent is the root account, it // As the session policy exists, even if the parent is the root account, it
@ -2437,7 +2437,7 @@ func isAllowedBySessionPolicy(args policy.Args) (hasSessionPolicy bool, isAllowe
// Now check if we have a sessionPolicy. // Now check if we have a sessionPolicy.
spolicy, ok := args.Claims[sessionPolicyNameExtracted] spolicy, ok := args.Claims[sessionPolicyNameExtracted]
if !ok { if !ok {
return return hasSessionPolicy, isAllowed
} }
hasSessionPolicy = true hasSessionPolicy = true
@ -2446,7 +2446,7 @@ func isAllowedBySessionPolicy(args policy.Args) (hasSessionPolicy bool, isAllowe
if !ok { if !ok {
// Sub policy if set, should be a string reject // Sub policy if set, should be a string reject
// malformed/malicious requests. // malformed/malicious requests.
return return hasSessionPolicy, isAllowed
} }
// Check if policy is parseable. // Check if policy is parseable.
@ -2454,12 +2454,12 @@ func isAllowedBySessionPolicy(args policy.Args) (hasSessionPolicy bool, isAllowe
if err != nil { if err != nil {
// Log any error in input session policy config. // Log any error in input session policy config.
iamLogIf(GlobalContext, err) iamLogIf(GlobalContext, err)
return return hasSessionPolicy, isAllowed
} }
// Policy without Version string value reject it. // Policy without Version string value reject it.
if subPolicy.Version == "" { if subPolicy.Version == "" {
return return hasSessionPolicy, isAllowed
} }
// As the session policy exists, even if the parent is the root account, it // As the session policy exists, even if the parent is the root account, it

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
) )

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"bytes" "bytes"
"testing" "testing"

View File

@ -159,5 +159,5 @@ func pickRelevantGoroutines() (gs []string) {
gs = append(gs, g) gs = append(gs, g)
} }
sort.Strings(gs) sort.Strings(gs)
return return gs
} }

View File

@ -162,7 +162,7 @@ func (l *localLocker) Unlock(_ context.Context, args dsync.LockArgs) (reply bool
reply = l.removeEntry(resource, args, &lri) || reply reply = l.removeEntry(resource, args, &lri) || reply
} }
} }
return return reply, err
} }
// removeEntry based on the uid of the lock message, removes a single entry from the // removeEntry based on the uid of the lock message, removes a single entry from the

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"time" "time"
@ -19,21 +19,19 @@ func (z *localLockMap) DecodeMsg(dc *msgp.Reader) (err error) {
if (*z) == nil { if (*z) == nil {
(*z) = make(localLockMap, zb0004) (*z) = make(localLockMap, zb0004)
} else if len((*z)) > 0 { } else if len((*z)) > 0 {
for key := range *z { clear((*z))
delete((*z), key)
}
} }
var field []byte var field []byte
_ = field _ = field
for zb0004 > 0 { for zb0004 > 0 {
zb0004-- zb0004--
var zb0001 string var zb0001 string
var zb0002 []lockRequesterInfo
zb0001, err = dc.ReadString() zb0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err) err = msgp.WrapError(err)
return return
} }
var zb0002 []lockRequesterInfo
var zb0005 uint32 var zb0005 uint32
zb0005, err = dc.ReadArrayHeader() zb0005, err = dc.ReadArrayHeader()
if err != nil { if err != nil {
@ -115,16 +113,14 @@ func (z *localLockMap) UnmarshalMsg(bts []byte) (o []byte, err error) {
if (*z) == nil { if (*z) == nil {
(*z) = make(localLockMap, zb0004) (*z) = make(localLockMap, zb0004)
} else if len((*z)) > 0 { } else if len((*z)) > 0 {
for key := range *z { clear((*z))
delete((*z), key)
}
} }
var field []byte var field []byte
_ = field _ = field
for zb0004 > 0 { for zb0004 > 0 {
var zb0001 string
var zb0002 []lockRequesterInfo var zb0002 []lockRequesterInfo
zb0004-- zb0004--
var zb0001 string
zb0001, bts, err = msgp.ReadStringBytes(bts) zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err) err = msgp.WrapError(err)

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"bytes" "bytes"
"testing" "testing"

View File

@ -339,7 +339,7 @@ func triggerExpiryAndRepl(ctx context.Context, o listPathOptions, obj metaCacheE
if !o.Versioned && !o.V1 { if !o.Versioned && !o.V1 {
fi, err := obj.fileInfo(o.Bucket) fi, err := obj.fileInfo(o.Bucket)
if err != nil { if err != nil {
return return skip
} }
objInfo := fi.ToObjectInfo(o.Bucket, obj.name, versioned) objInfo := fi.ToObjectInfo(o.Bucket, obj.name, versioned)
if o.Lifecycle != nil { if o.Lifecycle != nil {
@ -350,7 +350,7 @@ func triggerExpiryAndRepl(ctx context.Context, o listPathOptions, obj metaCacheE
fiv, err := obj.fileInfoVersions(o.Bucket) fiv, err := obj.fileInfoVersions(o.Bucket)
if err != nil { if err != nil {
return return skip
} }
// Expire all versions if needed, if not attempt to queue for replication. // Expire all versions if needed, if not attempt to queue for replication.
@ -369,7 +369,7 @@ func triggerExpiryAndRepl(ctx context.Context, o listPathOptions, obj metaCacheE
queueReplicationHeal(ctx, o.Bucket, objInfo, o.Replication, 0) queueReplicationHeal(ctx, o.Bucket, objInfo, o.Replication, 0)
} }
return return skip
} }
func (z *erasureServerPools) listAndSave(ctx context.Context, o *listPathOptions) (entries metaCacheEntriesSorted, err error) { func (z *erasureServerPools) listAndSave(ctx context.Context, o *listPathOptions) (entries metaCacheEntriesSorted, err error) {

View File

@ -653,7 +653,7 @@ func calcCommonWritesDeletes(infos []DiskInfo, readQuorum int) (commonWrite, com
commonWrite = filter(writes) commonWrite = filter(writes)
commonDelete = filter(deletes) commonDelete = filter(deletes)
return return commonWrite, commonDelete
} }
func calcCommonCounter(infos []DiskInfo, readQuorum int) (commonCount uint64) { func calcCommonCounter(infos []DiskInfo, readQuorum int) (commonCount uint64) {

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
) )

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"bytes" "bytes"
"testing" "testing"

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
) )

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"bytes" "bytes"
"testing" "testing"

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
) )

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"bytes" "bytes"
"testing" "testing"

View File

@ -40,7 +40,7 @@ type collectMetricsOpts struct {
func collectLocalMetrics(types madmin.MetricType, opts collectMetricsOpts) (m madmin.RealtimeMetrics) { func collectLocalMetrics(types madmin.MetricType, opts collectMetricsOpts) (m madmin.RealtimeMetrics) {
if types == madmin.MetricsNone { if types == madmin.MetricsNone {
return return m
} }
byHostName := globalMinioAddr byHostName := globalMinioAddr
@ -51,7 +51,7 @@ func collectLocalMetrics(types madmin.MetricType, opts collectMetricsOpts) (m ma
if _, ok := opts.hosts[server.Endpoint]; ok { if _, ok := opts.hosts[server.Endpoint]; ok {
byHostName = server.Endpoint byHostName = server.Endpoint
} else { } else {
return return m
} }
} }
@ -221,7 +221,7 @@ func collectLocalDisksMetrics(disks map[string]struct{}) map[string]madmin.DiskM
func collectRemoteMetrics(ctx context.Context, types madmin.MetricType, opts collectMetricsOpts) (m madmin.RealtimeMetrics) { func collectRemoteMetrics(ctx context.Context, types madmin.MetricType, opts collectMetricsOpts) (m madmin.RealtimeMetrics) {
if !globalIsDistErasure { if !globalIsDistErasure {
return return m
} }
all := globalNotificationSys.GetMetrics(ctx, types, opts) all := globalNotificationSys.GetMetrics(ctx, types, opts)
for _, remote := range all { for _, remote := range all {

View File

@ -1704,7 +1704,7 @@ func getMinioProcMetrics() *MetricsGroupV2 {
p, err := procfs.Self() p, err := procfs.Self()
if err != nil { if err != nil {
internalLogOnceIf(ctx, err, string(nodeMetricNamespace)) internalLogOnceIf(ctx, err, string(nodeMetricNamespace))
return return metrics
} }
openFDs, _ := p.FileDescriptorsLen() openFDs, _ := p.FileDescriptorsLen()
@ -1819,7 +1819,7 @@ func getMinioProcMetrics() *MetricsGroupV2 {
Value: stat.CPUTime(), Value: stat.CPUTime(),
}) })
} }
return return metrics
}) })
return mg return mg
} }
@ -1833,7 +1833,7 @@ func getGoMetrics() *MetricsGroupV2 {
Description: getMinIOGORoutineCountMD(), Description: getMinIOGORoutineCountMD(),
Value: float64(runtime.NumGoroutine()), Value: float64(runtime.NumGoroutine()),
}) })
return return metrics
}) })
return mg return mg
} }
@ -2632,7 +2632,7 @@ func getMinioVersionMetrics() *MetricsGroupV2 {
Description: getMinIOVersionMD(), Description: getMinIOVersionMD(),
VariableLabels: map[string]string{"version": Version}, VariableLabels: map[string]string{"version": Version},
}) })
return return metrics
}) })
return mg return mg
} }
@ -2653,7 +2653,7 @@ func getNodeHealthMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
Description: getNodeOfflineTotalMD(), Description: getNodeOfflineTotalMD(),
Value: float64(nodesDown), Value: float64(nodesDown),
}) })
return return metrics
}) })
return mg return mg
} }
@ -2666,11 +2666,11 @@ func getMinioHealingMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
mg.RegisterRead(func(_ context.Context) (metrics []MetricV2) { mg.RegisterRead(func(_ context.Context) (metrics []MetricV2) {
bgSeq, exists := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID) bgSeq, exists := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID)
if !exists { if !exists {
return return metrics
} }
if bgSeq.lastHealActivity.IsZero() { if bgSeq.lastHealActivity.IsZero() {
return return metrics
} }
metrics = make([]MetricV2, 0, 5) metrics = make([]MetricV2, 0, 5)
@ -2681,7 +2681,7 @@ func getMinioHealingMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
metrics = append(metrics, getObjectsScanned(bgSeq)...) metrics = append(metrics, getObjectsScanned(bgSeq)...)
metrics = append(metrics, getHealedItems(bgSeq)...) metrics = append(metrics, getHealedItems(bgSeq)...)
metrics = append(metrics, getFailedItems(bgSeq)...) metrics = append(metrics, getFailedItems(bgSeq)...)
return return metrics
}) })
return mg return mg
} }
@ -2696,7 +2696,7 @@ func getFailedItems(seq *healSequence) (m []MetricV2) {
Value: float64(v), Value: float64(v),
}) })
} }
return return m
} }
func getHealedItems(seq *healSequence) (m []MetricV2) { func getHealedItems(seq *healSequence) (m []MetricV2) {
@ -2709,7 +2709,7 @@ func getHealedItems(seq *healSequence) (m []MetricV2) {
Value: float64(v), Value: float64(v),
}) })
} }
return return m
} }
func getObjectsScanned(seq *healSequence) (m []MetricV2) { func getObjectsScanned(seq *healSequence) (m []MetricV2) {
@ -2722,7 +2722,7 @@ func getObjectsScanned(seq *healSequence) (m []MetricV2) {
Value: float64(v), Value: float64(v),
}) })
} }
return return m
} }
func getDistLockMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { func getDistLockMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
@ -3030,7 +3030,7 @@ func getHTTPMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
VariableLabels: map[string]string{"api": api}, VariableLabels: map[string]string{"api": api},
}) })
} }
return return metrics
} }
// If we have too many, limit them // If we have too many, limit them
@ -3099,7 +3099,7 @@ func getHTTPMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
} }
} }
return return metrics
}) })
return mg return mg
} }
@ -3142,7 +3142,7 @@ func getNetworkMetrics() *MetricsGroupV2 {
Description: getS3ReceivedBytesMD(), Description: getS3ReceivedBytesMD(),
Value: float64(connStats.s3InputBytes), Value: float64(connStats.s3InputBytes),
}) })
return return metrics
}) })
return mg return mg
} }
@ -3155,19 +3155,19 @@ func getClusterUsageMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
mg.RegisterRead(func(ctx context.Context) (metrics []MetricV2) { mg.RegisterRead(func(ctx context.Context) (metrics []MetricV2) {
objLayer := newObjectLayerFn() objLayer := newObjectLayerFn()
if objLayer == nil { if objLayer == nil {
return return metrics
} }
metrics = make([]MetricV2, 0, 50) metrics = make([]MetricV2, 0, 50)
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objLayer) dataUsageInfo, err := loadDataUsageFromBackend(ctx, objLayer)
if err != nil { if err != nil {
metricsLogIf(ctx, err) metricsLogIf(ctx, err)
return return metrics
} }
// data usage has not captured any data yet. // data usage has not captured any data yet.
if dataUsageInfo.LastUpdate.IsZero() { if dataUsageInfo.LastUpdate.IsZero() {
return return metrics
} }
metrics = append(metrics, MetricV2{ metrics = append(metrics, MetricV2{
@ -3248,7 +3248,7 @@ func getClusterUsageMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
Value: float64(clusterBuckets), Value: float64(clusterBuckets),
}) })
return return metrics
}) })
return mg return mg
} }
@ -3265,12 +3265,12 @@ func getBucketUsageMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objLayer) dataUsageInfo, err := loadDataUsageFromBackend(ctx, objLayer)
if err != nil { if err != nil {
metricsLogIf(ctx, err) metricsLogIf(ctx, err)
return return metrics
} }
// data usage has not captured any data yet. // data usage has not captured any data yet.
if dataUsageInfo.LastUpdate.IsZero() { if dataUsageInfo.LastUpdate.IsZero() {
return return metrics
} }
metrics = append(metrics, MetricV2{ metrics = append(metrics, MetricV2{
@ -3454,7 +3454,7 @@ func getBucketUsageMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
VariableLabels: map[string]string{"bucket": bucket}, VariableLabels: map[string]string{"bucket": bucket},
}) })
} }
return return metrics
}) })
return mg return mg
} }
@ -3498,17 +3498,17 @@ func getClusterTierMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
objLayer := newObjectLayerFn() objLayer := newObjectLayerFn()
if globalTierConfigMgr.Empty() { if globalTierConfigMgr.Empty() {
return return metrics
} }
dui, err := loadDataUsageFromBackend(ctx, objLayer) dui, err := loadDataUsageFromBackend(ctx, objLayer)
if err != nil { if err != nil {
metricsLogIf(ctx, err) metricsLogIf(ctx, err)
return return metrics
} }
// data usage has not captured any tier stats yet. // data usage has not captured any tier stats yet.
if dui.TierStats == nil { if dui.TierStats == nil {
return return metrics
} }
return dui.tierMetrics() return dui.tierMetrics()
@ -3614,7 +3614,7 @@ func getLocalStorageMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
Value: float64(storageInfo.Backend.RRSCParity), Value: float64(storageInfo.Backend.RRSCParity),
}) })
return return metrics
}) })
return mg return mg
} }
@ -3755,7 +3755,7 @@ func getClusterHealthMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
}) })
} }
return return metrics
}) })
return mg return mg
@ -3776,7 +3776,7 @@ func getBatchJobsMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
m.Merge(&mRemote) m.Merge(&mRemote)
if m.Aggregated.BatchJobs == nil { if m.Aggregated.BatchJobs == nil {
return return metrics
} }
for _, mj := range m.Aggregated.BatchJobs.Jobs { for _, mj := range m.Aggregated.BatchJobs.Jobs {
@ -3822,7 +3822,7 @@ func getBatchJobsMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
}, },
) )
} }
return return metrics
}) })
return mg return mg
} }
@ -3875,7 +3875,7 @@ func getClusterStorageMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
Description: getClusterDrivesTotalMD(), Description: getClusterDrivesTotalMD(),
Value: float64(totalDrives.Sum()), Value: float64(totalDrives.Sum()),
}) })
return return metrics
}) })
return mg return mg
} }
@ -4264,7 +4264,7 @@ func getOrderedLabelValueArrays(labelsWithValue map[string]string) (labels, valu
labels = append(labels, l) labels = append(labels, l)
values = append(values, v) values = append(values, v)
} }
return return labels, values
} }
// newMinioCollectorNode describes the collector // newMinioCollectorNode describes the collector

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
) )
@ -297,14 +297,12 @@ func (z *MetricV2) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.StaticLabels == nil { if z.StaticLabels == nil {
z.StaticLabels = make(map[string]string, zb0002) z.StaticLabels = make(map[string]string, zb0002)
} else if len(z.StaticLabels) > 0 { } else if len(z.StaticLabels) > 0 {
for key := range z.StaticLabels { clear(z.StaticLabels)
delete(z.StaticLabels, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
var za0001 string
var za0002 string var za0002 string
zb0002-- zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "StaticLabels") err = msgp.WrapError(err, "StaticLabels")
@ -333,14 +331,12 @@ func (z *MetricV2) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.VariableLabels == nil { if z.VariableLabels == nil {
z.VariableLabels = make(map[string]string, zb0003) z.VariableLabels = make(map[string]string, zb0003)
} else if len(z.VariableLabels) > 0 { } else if len(z.VariableLabels) > 0 {
for key := range z.VariableLabels { clear(z.VariableLabels)
delete(z.VariableLabels, key)
}
} }
for zb0003 > 0 { for zb0003 > 0 {
var za0003 string
var za0004 string var za0004 string
zb0003-- zb0003--
var za0003 string
za0003, bts, err = msgp.ReadStringBytes(bts) za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "VariableLabels") err = msgp.WrapError(err, "VariableLabels")
@ -369,14 +365,12 @@ func (z *MetricV2) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Histogram == nil { if z.Histogram == nil {
z.Histogram = make(map[string]uint64, zb0004) z.Histogram = make(map[string]uint64, zb0004)
} else if len(z.Histogram) > 0 { } else if len(z.Histogram) > 0 {
for key := range z.Histogram { clear(z.Histogram)
delete(z.Histogram, key)
}
} }
for zb0004 > 0 { for zb0004 > 0 {
var za0005 string
var za0006 uint64 var za0006 uint64
zb0004-- zb0004--
var za0005 string
za0005, bts, err = msgp.ReadStringBytes(bts) za0005, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Histogram") err = msgp.WrapError(err, "Histogram")

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"testing" "testing"

View File

@ -60,7 +60,7 @@ type nodesOnline struct {
func newNodesUpDownCache() *cachevalue.Cache[nodesOnline] { func newNodesUpDownCache() *cachevalue.Cache[nodesOnline] {
loadNodesUpDown := func(ctx context.Context) (v nodesOnline, err error) { loadNodesUpDown := func(ctx context.Context) (v nodesOnline, err error) {
v.Online, v.Offline = globalNotificationSys.GetPeerOnlineCount() v.Online, v.Offline = globalNotificationSys.GetPeerOnlineCount()
return return v, err
} }
return cachevalue.NewFromFunc(1*time.Minute, return cachevalue.NewFromFunc(1*time.Minute,
cachevalue.Opts{ReturnLastGood: true}, cachevalue.Opts{ReturnLastGood: true},
@ -88,12 +88,12 @@ func newDataUsageInfoCache() *cachevalue.Cache[DataUsageInfo] {
loadDataUsage := func(ctx context.Context) (u DataUsageInfo, err error) { loadDataUsage := func(ctx context.Context) (u DataUsageInfo, err error) {
objLayer := newObjectLayerFn() objLayer := newObjectLayerFn()
if objLayer == nil { if objLayer == nil {
return return u, err
} }
// Collect cluster level object metrics. // Collect cluster level object metrics.
u, err = loadDataUsageFromBackend(GlobalContext, objLayer) u, err = loadDataUsageFromBackend(GlobalContext, objLayer)
return return u, err
} }
return cachevalue.NewFromFunc(1*time.Minute, return cachevalue.NewFromFunc(1*time.Minute,
cachevalue.Opts{ReturnLastGood: true}, cachevalue.Opts{ReturnLastGood: true},
@ -104,11 +104,11 @@ func newESetHealthResultCache() *cachevalue.Cache[HealthResult] {
loadHealth := func(ctx context.Context) (r HealthResult, err error) { loadHealth := func(ctx context.Context) (r HealthResult, err error) {
objLayer := newObjectLayerFn() objLayer := newObjectLayerFn()
if objLayer == nil { if objLayer == nil {
return return r, err
} }
r = objLayer.Health(GlobalContext, HealthOptions{}) r = objLayer.Health(GlobalContext, HealthOptions{})
return return r, err
} }
return cachevalue.NewFromFunc(1*time.Minute, return cachevalue.NewFromFunc(1*time.Minute,
cachevalue.Opts{ReturnLastGood: true}, cachevalue.Opts{ReturnLastGood: true},
@ -146,7 +146,7 @@ func getDriveIOStatMetrics(ioStats madmin.DiskIOStats, duration time.Duration) (
// TotalTicks is in milliseconds // TotalTicks is in milliseconds
m.percUtil = float64(ioStats.TotalTicks) * 100 / (durationSecs * 1000) m.percUtil = float64(ioStats.TotalTicks) * 100 / (durationSecs * 1000)
return return m
} }
func newDriveMetricsCache() *cachevalue.Cache[storageMetrics] { func newDriveMetricsCache() *cachevalue.Cache[storageMetrics] {
@ -161,7 +161,7 @@ func newDriveMetricsCache() *cachevalue.Cache[storageMetrics] {
loadDriveMetrics := func(ctx context.Context) (v storageMetrics, err error) { loadDriveMetrics := func(ctx context.Context) (v storageMetrics, err error) {
objLayer := newObjectLayerFn() objLayer := newObjectLayerFn()
if objLayer == nil { if objLayer == nil {
return return v, err
} }
storageInfo := objLayer.LocalStorageInfo(GlobalContext, true) storageInfo := objLayer.LocalStorageInfo(GlobalContext, true)
@ -195,7 +195,7 @@ func newDriveMetricsCache() *cachevalue.Cache[storageMetrics] {
prevDriveIOStatsRefreshedAt = now prevDriveIOStatsRefreshedAt = now
prevDriveIOStatsMu.Unlock() prevDriveIOStatsMu.Unlock()
return return v, err
} }
return cachevalue.NewFromFunc(1*time.Minute, return cachevalue.NewFromFunc(1*time.Minute,
@ -220,7 +220,7 @@ func newCPUMetricsCache() *cachevalue.Cache[madmin.CPUMetrics] {
} }
} }
return return v, err
} }
return cachevalue.NewFromFunc(1*time.Minute, return cachevalue.NewFromFunc(1*time.Minute,
@ -245,7 +245,7 @@ func newMemoryMetricsCache() *cachevalue.Cache[madmin.MemInfo] {
} }
} }
return return v, err
} }
return cachevalue.NewFromFunc(1*time.Minute, return cachevalue.NewFromFunc(1*time.Minute,
@ -268,7 +268,7 @@ func newClusterStorageInfoCache() *cachevalue.Cache[storageMetrics] {
offlineDrives: offlineDrives.Sum(), offlineDrives: offlineDrives.Sum(),
totalDrives: totalDrives.Sum(), totalDrives: totalDrives.Sum(),
} }
return return v, err
} }
return cachevalue.NewFromFunc(1*time.Minute, return cachevalue.NewFromFunc(1*time.Minute,
cachevalue.Opts{ReturnLastGood: true}, cachevalue.Opts{ReturnLastGood: true},

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
) )

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"bytes" "bytes"
"testing" "testing"

View File

@ -126,7 +126,7 @@ func (n *nsLockMap) lock(ctx context.Context, volume string, path string, lockSo
n.lockMapMutex.Unlock() n.lockMapMutex.Unlock()
} }
return return locked
} }
// Unlock the namespace resource. // Unlock the namespace resource.

View File

@ -88,7 +88,7 @@ func mustGetLocalLoopbacks() (ipList set.StringSet) {
ipList.Add(ip.String()) ipList.Add(ip.String())
} }
} }
return return ipList
} }
// mustGetLocalIP4 returns IPv4 addresses of localhost. It panics on error. // mustGetLocalIP4 returns IPv4 addresses of localhost. It panics on error.
@ -99,7 +99,7 @@ func mustGetLocalIP4() (ipList set.StringSet) {
ipList.Add(ip.String()) ipList.Add(ip.String())
} }
} }
return return ipList
} }
// mustGetLocalIP6 returns IPv6 addresses of localhost. It panics on error. // mustGetLocalIP6 returns IPv6 addresses of localhost. It panics on error.
@ -110,7 +110,7 @@ func mustGetLocalIP6() (ipList set.StringSet) {
ipList.Add(ip.String()) ipList.Add(ip.String())
} }
} }
return return ipList
} }
// getHostIP returns IP address of given host. // getHostIP returns IP address of given host.

View File

@ -26,7 +26,7 @@ func GetTotalCapacity(diskInfo []madmin.Disk) (capacity uint64) {
for _, disk := range diskInfo { for _, disk := range diskInfo {
capacity += disk.TotalSpace capacity += disk.TotalSpace
} }
return return capacity
} }
// GetTotalUsableCapacity gets the total usable capacity in the cluster. // GetTotalUsableCapacity gets the total usable capacity in the cluster.
@ -42,7 +42,7 @@ func GetTotalUsableCapacity(diskInfo []madmin.Disk, s StorageInfo) (capacity uin
capacity += disk.TotalSpace capacity += disk.TotalSpace
} }
} }
return return capacity
} }
// GetTotalCapacityFree gets the total capacity free in the cluster. // GetTotalCapacityFree gets the total capacity free in the cluster.
@ -50,7 +50,7 @@ func GetTotalCapacityFree(diskInfo []madmin.Disk) (capacity uint64) {
for _, d := range diskInfo { for _, d := range diskInfo {
capacity += d.AvailableSpace capacity += d.AvailableSpace
} }
return return capacity
} }
// GetTotalUsableCapacityFree gets the total usable capacity free in the cluster. // GetTotalUsableCapacityFree gets the total usable capacity free in the cluster.
@ -66,5 +66,5 @@ func GetTotalUsableCapacityFree(diskInfo []madmin.Disk, s StorageInfo) (capacity
capacity += disk.AvailableSpace capacity += disk.AvailableSpace
} }
} }
return return capacity
} }

View File

@ -375,7 +375,7 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io
internalLogIf(ctx, err) internalLogIf(ctx, err)
} }
return return profilingDataFound
} }
// VerifyBinary - asks remote peers to verify the checksum // VerifyBinary - asks remote peers to verify the checksum
@ -1180,7 +1180,7 @@ func (sys *NotificationSys) GetPeerOnlineCount() (nodesOnline, nodesOffline int)
nodesOffline++ nodesOffline++
} }
} }
return return nodesOnline, nodesOffline
} }
// NewNotificationSys - creates new notification system object. // NewNotificationSys - creates new notification system object.

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"github.com/minio/minio/internal/bucket/replication" "github.com/minio/minio/internal/bucket/replication"
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
@ -1120,14 +1120,12 @@ func (z *ListPartsInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.UserDefined == nil { if z.UserDefined == nil {
z.UserDefined = make(map[string]string, zb0003) z.UserDefined = make(map[string]string, zb0003)
} else if len(z.UserDefined) > 0 { } else if len(z.UserDefined) > 0 {
for key := range z.UserDefined { clear(z.UserDefined)
delete(z.UserDefined, key)
}
} }
for zb0003 > 0 { for zb0003 > 0 {
var za0002 string
var za0003 string var za0003 string
zb0003-- zb0003--
var za0002 string
za0002, bts, err = msgp.ReadStringBytes(bts) za0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "UserDefined") err = msgp.WrapError(err, "UserDefined")
@ -1259,14 +1257,12 @@ func (z *MultipartInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.UserDefined == nil { if z.UserDefined == nil {
z.UserDefined = make(map[string]string, zb0002) z.UserDefined = make(map[string]string, zb0002)
} else if len(z.UserDefined) > 0 { } else if len(z.UserDefined) > 0 {
for key := range z.UserDefined { clear(z.UserDefined)
delete(z.UserDefined, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
var za0001 string
var za0002 string var za0002 string
zb0002-- zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "UserDefined") err = msgp.WrapError(err, "UserDefined")
@ -1665,14 +1661,12 @@ func (z *ObjectInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.UserDefined == nil { if z.UserDefined == nil {
z.UserDefined = make(map[string]string, zb0002) z.UserDefined = make(map[string]string, zb0002)
} else if len(z.UserDefined) > 0 { } else if len(z.UserDefined) > 0 {
for key := range z.UserDefined { clear(z.UserDefined)
delete(z.UserDefined, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
var za0001 string
var za0002 string var za0002 string
zb0002-- zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "UserDefined") err = msgp.WrapError(err, "UserDefined")
@ -2223,14 +2217,12 @@ func (z *ReplicateObjectInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.TargetStatuses == nil { if z.TargetStatuses == nil {
z.TargetStatuses = make(map[string]replication.StatusType, zb0002) z.TargetStatuses = make(map[string]replication.StatusType, zb0002)
} else if len(z.TargetStatuses) > 0 { } else if len(z.TargetStatuses) > 0 {
for key := range z.TargetStatuses { clear(z.TargetStatuses)
delete(z.TargetStatuses, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
var za0001 string
var za0002 replication.StatusType var za0002 replication.StatusType
zb0002-- zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "TargetStatuses") err = msgp.WrapError(err, "TargetStatuses")
@ -2253,14 +2245,12 @@ func (z *ReplicateObjectInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.TargetPurgeStatuses == nil { if z.TargetPurgeStatuses == nil {
z.TargetPurgeStatuses = make(map[string]VersionPurgeStatusType, zb0003) z.TargetPurgeStatuses = make(map[string]VersionPurgeStatusType, zb0003)
} else if len(z.TargetPurgeStatuses) > 0 { } else if len(z.TargetPurgeStatuses) > 0 {
for key := range z.TargetPurgeStatuses { clear(z.TargetPurgeStatuses)
delete(z.TargetPurgeStatuses, key)
}
} }
for zb0003 > 0 { for zb0003 > 0 {
var za0003 string
var za0004 VersionPurgeStatusType var za0004 VersionPurgeStatusType
zb0003-- zb0003--
var za0003 string
za0003, bts, err = msgp.ReadStringBytes(bts) za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "TargetPurgeStatuses") err = msgp.WrapError(err, "TargetPurgeStatuses")

View File

@ -225,11 +225,11 @@ func (o *ObjectOptions) SetDeleteReplicationState(dsc ReplicateDecision, vID str
func (o *ObjectOptions) PutReplicationState() (r ReplicationState) { func (o *ObjectOptions) PutReplicationState() (r ReplicationState) {
rstatus, ok := o.UserDefined[ReservedMetadataPrefixLower+ReplicationStatus] rstatus, ok := o.UserDefined[ReservedMetadataPrefixLower+ReplicationStatus]
if !ok { if !ok {
return return r
} }
r.ReplicationStatusInternal = rstatus r.ReplicationStatusInternal = rstatus
r.Targets = replicationStatusesMap(rstatus) r.Targets = replicationStatusesMap(rstatus)
return return r
} }
// SetEvalMetadataFn sets the metadata evaluation function // SetEvalMetadataFn sets the metadata evaluation function

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
) )

View File

@ -50,33 +50,33 @@ func getDefaultOpts(header http.Header, copySource bool, metadata map[string]str
if crypto.SSECopy.IsRequested(header) { if crypto.SSECopy.IsRequested(header) {
clientKey, err = crypto.SSECopy.ParseHTTP(header) clientKey, err = crypto.SSECopy.ParseHTTP(header)
if err != nil { if err != nil {
return return opts, err
} }
if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil { if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil {
return return opts, err
} }
opts.ServerSideEncryption = encrypt.SSECopy(sse) opts.ServerSideEncryption = encrypt.SSECopy(sse)
return return opts, err
} }
return return opts, err
} }
if crypto.SSEC.IsRequested(header) { if crypto.SSEC.IsRequested(header) {
clientKey, err = crypto.SSEC.ParseHTTP(header) clientKey, err = crypto.SSEC.ParseHTTP(header)
if err != nil { if err != nil {
return return opts, err
} }
if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil { if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil {
return return opts, err
} }
opts.ServerSideEncryption = sse opts.ServerSideEncryption = sse
return return opts, err
} }
if crypto.S3.IsRequested(header) || (metadata != nil && crypto.S3.IsEncrypted(metadata)) { if crypto.S3.IsRequested(header) || (metadata != nil && crypto.S3.IsEncrypted(metadata)) {
opts.ServerSideEncryption = encrypt.NewSSE() opts.ServerSideEncryption = encrypt.NewSSE()
} }
return return opts, err
} }
// get ObjectOptions for GET calls from encryption headers // get ObjectOptions for GET calls from encryption headers
@ -173,7 +173,7 @@ func getAndValidateAttributesOpts(ctx context.Context, w http.ResponseWriter, r
apiErr = toAPIError(ctx, vErr) apiErr = toAPIError(ctx, vErr)
} }
valid = false valid = false
return return opts, valid
} }
opts.MaxParts, err = parseIntHeader(bucket, object, r.Header, xhttp.AmzMaxParts) opts.MaxParts, err = parseIntHeader(bucket, object, r.Header, xhttp.AmzMaxParts)
@ -181,7 +181,7 @@ func getAndValidateAttributesOpts(ctx context.Context, w http.ResponseWriter, r
apiErr = toAPIError(ctx, err) apiErr = toAPIError(ctx, err)
argumentName = strings.ToLower(xhttp.AmzMaxParts) argumentName = strings.ToLower(xhttp.AmzMaxParts)
valid = false valid = false
return return opts, valid
} }
if opts.MaxParts == 0 { if opts.MaxParts == 0 {
@ -193,7 +193,7 @@ func getAndValidateAttributesOpts(ctx context.Context, w http.ResponseWriter, r
apiErr = toAPIError(ctx, err) apiErr = toAPIError(ctx, err)
argumentName = strings.ToLower(xhttp.AmzPartNumberMarker) argumentName = strings.ToLower(xhttp.AmzPartNumberMarker)
valid = false valid = false
return return opts, valid
} }
opts.ObjectAttributes = parseObjectAttributes(r.Header) opts.ObjectAttributes = parseObjectAttributes(r.Header)
@ -201,7 +201,7 @@ func getAndValidateAttributesOpts(ctx context.Context, w http.ResponseWriter, r
apiErr = errorCodes.ToAPIErr(ErrInvalidAttributeName) apiErr = errorCodes.ToAPIErr(ErrInvalidAttributeName)
argumentName = strings.ToLower(xhttp.AmzObjectAttributes) argumentName = strings.ToLower(xhttp.AmzObjectAttributes)
valid = false valid = false
return return opts, valid
} }
for tag := range opts.ObjectAttributes { for tag := range opts.ObjectAttributes {
@ -216,11 +216,11 @@ func getAndValidateAttributesOpts(ctx context.Context, w http.ResponseWriter, r
argumentName = strings.ToLower(xhttp.AmzObjectAttributes) argumentName = strings.ToLower(xhttp.AmzObjectAttributes)
argumentValue = tag argumentValue = tag
valid = false valid = false
return return opts, valid
} }
} }
return return opts, valid
} }
func parseObjectAttributes(h http.Header) (attributes map[string]struct{}) { func parseObjectAttributes(h http.Header) (attributes map[string]struct{}) {
@ -233,13 +233,13 @@ func parseObjectAttributes(h http.Header) (attributes map[string]struct{}) {
} }
} }
return return attributes
} }
func parseIntHeader(bucket, object string, h http.Header, headerName string) (value int, err error) { func parseIntHeader(bucket, object string, h http.Header, headerName string) (value int, err error) {
stringInt := strings.TrimSpace(h.Get(headerName)) stringInt := strings.TrimSpace(h.Get(headerName))
if stringInt == "" { if stringInt == "" {
return return value, err
} }
value, err = strconv.Atoi(stringInt) value, err = strconv.Atoi(stringInt)
if err != nil { if err != nil {
@ -249,7 +249,7 @@ func parseIntHeader(bucket, object string, h http.Header, headerName string) (va
Err: fmt.Errorf("Unable to parse %s, value should be an integer", headerName), Err: fmt.Errorf("Unable to parse %s, value should be an integer", headerName),
} }
} }
return return value, err
} }
func parseBoolHeader(bucket, object string, h http.Header, headerName string) (bool, error) { func parseBoolHeader(bucket, object string, h http.Header, headerName string) (bool, error) {

View File

@ -2662,7 +2662,7 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
return err return err
} }
} }
return return err
}) })
deleteObject := objectAPI.DeleteObject deleteObject := objectAPI.DeleteObject

View File

@ -245,7 +245,7 @@ func testAPIHeadObjectHandlerWithEncryption(obj ObjectLayer, instanceType, bucke
for _, l := range oi.partLengths { for _, l := range oi.partLengths {
sum += l sum += l
} }
return return sum
} }
// set of inputs for uploading the objects before tests for // set of inputs for uploading the objects before tests for
@ -677,7 +677,7 @@ func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName str
for _, l := range oi.partLengths { for _, l := range oi.partLengths {
sum += l sum += l
} }
return return sum
} }
// set of inputs for uploading the objects before tests for // set of inputs for uploading the objects before tests for

View File

@ -136,7 +136,7 @@ func parseDirEnt(buf []byte) (consumed int, name []byte, typ os.FileMode, err er
} }
consumed = int(dirent.Reclen) consumed = int(dirent.Reclen)
if direntInode(dirent) == 0 { // File absent in directory. if direntInode(dirent) == 0 { // File absent in directory.
return return consumed, name, typ, err
} }
switch dirent.Type { switch dirent.Type {
case syscall.DT_REG: case syscall.DT_REG:
@ -349,7 +349,7 @@ func readDirWithOpts(dirPath string, opts readDirOpts) (entries []string, err er
entries = append(entries, nameStr) entries = append(entries, nameStr)
} }
return return entries, err
} }
func globalSync() { func globalSync() {

View File

@ -265,7 +265,7 @@ func (client *peerRESTClient) StartProfiling(ctx context.Context, profiler strin
func (client *peerRESTClient) DownloadProfileData(ctx context.Context) (data map[string][]byte, err error) { func (client *peerRESTClient) DownloadProfileData(ctx context.Context) (data map[string][]byte, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodDownloadProfilingData, nil, nil, -1) respBody, err := client.callWithContext(ctx, peerRESTMethodDownloadProfilingData, nil, nil, -1)
if err != nil { if err != nil {
return return data, err
} }
defer xhttp.DrainBody(respBody) defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&data) err = gob.NewDecoder(respBody).Decode(&data)

View File

@ -146,7 +146,7 @@ func (s *peerRESTServer) DeletePolicyHandler(mss *grid.MSS) (np grid.NoPayload,
return np, grid.NewRemoteErr(err) return np, grid.NewRemoteErr(err)
} }
return return np, nerr
} }
// LoadPolicyHandler - reloads a policy on the server. // LoadPolicyHandler - reloads a policy on the server.
@ -165,7 +165,7 @@ func (s *peerRESTServer) LoadPolicyHandler(mss *grid.MSS) (np grid.NoPayload, ne
return np, grid.NewRemoteErr(err) return np, grid.NewRemoteErr(err)
} }
return return np, nerr
} }
// LoadPolicyMappingHandler - reloads a policy mapping on the server. // LoadPolicyMappingHandler - reloads a policy mapping on the server.
@ -189,7 +189,7 @@ func (s *peerRESTServer) LoadPolicyMappingHandler(mss *grid.MSS) (np grid.NoPayl
return np, grid.NewRemoteErr(err) return np, grid.NewRemoteErr(err)
} }
return return np, nerr
} }
// DeleteServiceAccountHandler - deletes a service account on the server. // DeleteServiceAccountHandler - deletes a service account on the server.
@ -208,7 +208,7 @@ func (s *peerRESTServer) DeleteServiceAccountHandler(mss *grid.MSS) (np grid.NoP
return np, grid.NewRemoteErr(err) return np, grid.NewRemoteErr(err)
} }
return return np, nerr
} }
// LoadServiceAccountHandler - reloads a service account on the server. // LoadServiceAccountHandler - reloads a service account on the server.
@ -227,7 +227,7 @@ func (s *peerRESTServer) LoadServiceAccountHandler(mss *grid.MSS) (np grid.NoPay
return np, grid.NewRemoteErr(err) return np, grid.NewRemoteErr(err)
} }
return return np, nerr
} }
// DeleteUserHandler - deletes a user on the server. // DeleteUserHandler - deletes a user on the server.
@ -246,7 +246,7 @@ func (s *peerRESTServer) DeleteUserHandler(mss *grid.MSS) (np grid.NoPayload, ne
return np, grid.NewRemoteErr(err) return np, grid.NewRemoteErr(err)
} }
return return np, nerr
} }
// LoadUserHandler - reloads a user on the server. // LoadUserHandler - reloads a user on the server.
@ -275,7 +275,7 @@ func (s *peerRESTServer) LoadUserHandler(mss *grid.MSS) (np grid.NoPayload, nerr
return np, grid.NewRemoteErr(err) return np, grid.NewRemoteErr(err)
} }
return return np, nerr
} }
// LoadGroupHandler - reloads group along with members list. // LoadGroupHandler - reloads group along with members list.
@ -295,7 +295,7 @@ func (s *peerRESTServer) LoadGroupHandler(mss *grid.MSS) (np grid.NoPayload, ner
return np, grid.NewRemoteErr(err) return np, grid.NewRemoteErr(err)
} }
return return np, nerr
} }
// StartProfilingHandler - Issues the start profiling command. // StartProfilingHandler - Issues the start profiling command.
@ -479,7 +479,7 @@ func (s *peerRESTServer) DeleteBucketMetadataHandler(mss *grid.MSS) (np grid.NoP
if localMetacacheMgr != nil { if localMetacacheMgr != nil {
localMetacacheMgr.deleteBucketCache(bucketName) localMetacacheMgr.deleteBucketCache(bucketName)
} }
return return np, nerr
} }
// GetAllBucketStatsHandler - fetches bucket replication stats for all buckets from this peer. // GetAllBucketStatsHandler - fetches bucket replication stats for all buckets from this peer.
@ -554,7 +554,7 @@ func (s *peerRESTServer) LoadBucketMetadataHandler(mss *grid.MSS) (np grid.NoPay
globalBucketTargetSys.UpdateAllTargets(bucketName, meta.bucketTargetConfig) globalBucketTargetSys.UpdateAllTargets(bucketName, meta.bucketTargetConfig)
} }
return return np, nerr
} }
func (s *peerRESTServer) GetMetacacheListingHandler(opts *listPathOptions) (*metacache, *grid.RemoteErr) { func (s *peerRESTServer) GetMetacacheListingHandler(opts *listPathOptions) (*metacache, *grid.RemoteErr) {
@ -885,7 +885,7 @@ func (s *peerRESTServer) ReloadSiteReplicationConfigHandler(mss *grid.MSS) (np g
} }
peersLogIf(context.Background(), globalSiteReplicationSys.Init(context.Background(), objAPI)) peersLogIf(context.Background(), globalSiteReplicationSys.Init(context.Background(), objAPI))
return return np, nerr
} }
func (s *peerRESTServer) ReloadPoolMetaHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { func (s *peerRESTServer) ReloadPoolMetaHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
@ -896,14 +896,14 @@ func (s *peerRESTServer) ReloadPoolMetaHandler(mss *grid.MSS) (np grid.NoPayload
pools, ok := objAPI.(*erasureServerPools) pools, ok := objAPI.(*erasureServerPools)
if !ok { if !ok {
return return np, nerr
} }
if err := pools.ReloadPoolMeta(context.Background()); err != nil { if err := pools.ReloadPoolMeta(context.Background()); err != nil {
return np, grid.NewRemoteErr(err) return np, grid.NewRemoteErr(err)
} }
return return np, nerr
} }
func (s *peerRESTServer) HandlerClearUploadID(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { func (s *peerRESTServer) HandlerClearUploadID(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
@ -914,7 +914,7 @@ func (s *peerRESTServer) HandlerClearUploadID(mss *grid.MSS) (np grid.NoPayload,
pools, ok := objAPI.(*erasureServerPools) pools, ok := objAPI.(*erasureServerPools)
if !ok { if !ok {
return return np, nerr
} }
// No need to return errors, this is not a highly strict operation. // No need to return errors, this is not a highly strict operation.
@ -923,7 +923,7 @@ func (s *peerRESTServer) HandlerClearUploadID(mss *grid.MSS) (np grid.NoPayload,
pools.ClearUploadID(uploadID) pools.ClearUploadID(uploadID)
} }
return return np, nerr
} }
func (s *peerRESTServer) StopRebalanceHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { func (s *peerRESTServer) StopRebalanceHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
@ -938,7 +938,7 @@ func (s *peerRESTServer) StopRebalanceHandler(mss *grid.MSS) (np grid.NoPayload,
} }
pools.StopRebalance() pools.StopRebalance()
return return np, nerr
} }
func (s *peerRESTServer) LoadRebalanceMetaHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { func (s *peerRESTServer) LoadRebalanceMetaHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
@ -965,7 +965,7 @@ func (s *peerRESTServer) LoadRebalanceMetaHandler(mss *grid.MSS) (np grid.NoPayl
go pools.StartRebalance() go pools.StartRebalance()
} }
return return np, nerr
} }
func (s *peerRESTServer) LoadTransitionTierConfigHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { func (s *peerRESTServer) LoadTransitionTierConfigHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
@ -981,7 +981,7 @@ func (s *peerRESTServer) LoadTransitionTierConfigHandler(mss *grid.MSS) (np grid
} }
}() }()
return return np, nerr
} }
// ConsoleLogHandler sends console logs of this node back to peer rest client // ConsoleLogHandler sends console logs of this node back to peer rest client

View File

@ -422,7 +422,7 @@ func perfNetRequest(ctx context.Context, deploymentID, reqPath string, reader io
cli, err := globalSiteReplicationSys.getAdminClient(ctx, deploymentID) cli, err := globalSiteReplicationSys.getAdminClient(ctx, deploymentID)
if err != nil { if err != nil {
result.Error = err.Error() result.Error = err.Error()
return return result
} }
rp := cli.GetEndpointURL() rp := cli.GetEndpointURL()
reqURL := &url.URL{ reqURL := &url.URL{
@ -434,7 +434,7 @@ func perfNetRequest(ctx context.Context, deploymentID, reqPath string, reader io
req, err := http.NewRequestWithContext(ctx, http.MethodPost, reqURL.String(), reader) req, err := http.NewRequestWithContext(ctx, http.MethodPost, reqURL.String(), reader)
if err != nil { if err != nil {
result.Error = err.Error() result.Error = err.Error()
return return result
} }
client := &http.Client{ client := &http.Client{
Transport: globalRemoteTargetTransport, Transport: globalRemoteTargetTransport,
@ -442,7 +442,7 @@ func perfNetRequest(ctx context.Context, deploymentID, reqPath string, reader io
resp, err := client.Do(req) resp, err := client.Do(req)
if err != nil { if err != nil {
result.Error = err.Error() result.Error = err.Error()
return return result
} }
defer xhttp.DrainBody(resp.Body) defer xhttp.DrainBody(resp.Body)
err = gob.NewDecoder(resp.Body).Decode(&result) err = gob.NewDecoder(resp.Body).Decode(&result)
@ -451,5 +451,5 @@ func perfNetRequest(ctx context.Context, deploymentID, reqPath string, reader io
if err != nil { if err != nil {
result.Error = err.Error() result.Error = err.Error()
} }
return return result
} }

View File

@ -344,7 +344,7 @@ func validateClientKeyIsTrusted(c ssh.ConnMetadata, clientKey ssh.PublicKey) (er
} }
_, err = checker.Authenticate(c, clientKey) _, err = checker.Authenticate(c, clientKey)
return return err
} }
type sftpLogger struct{} type sftpLogger struct{}

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT. // Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import ( import (
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
) )
@ -209,19 +209,17 @@ func (z *RTimedMetrics) DecodeMsg(dc *msgp.Reader) (err error) {
if z.ErrCounts == nil { if z.ErrCounts == nil {
z.ErrCounts = make(map[string]int, zb0003) z.ErrCounts = make(map[string]int, zb0003)
} else if len(z.ErrCounts) > 0 { } else if len(z.ErrCounts) > 0 {
for key := range z.ErrCounts { clear(z.ErrCounts)
delete(z.ErrCounts, key)
}
} }
for zb0003 > 0 { for zb0003 > 0 {
zb0003-- zb0003--
var za0001 string var za0001 string
var za0002 int
za0001, err = dc.ReadString() za0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "ErrCounts") err = msgp.WrapError(err, "ErrCounts")
return return
} }
var za0002 int
za0002, err = dc.ReadInt() za0002, err = dc.ReadInt()
if err != nil { if err != nil {
err = msgp.WrapError(err, "ErrCounts", za0001) err = msgp.WrapError(err, "ErrCounts", za0001)
@ -426,14 +424,12 @@ func (z *RTimedMetrics) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.ErrCounts == nil { if z.ErrCounts == nil {
z.ErrCounts = make(map[string]int, zb0003) z.ErrCounts = make(map[string]int, zb0003)
} else if len(z.ErrCounts) > 0 { } else if len(z.ErrCounts) > 0 {
for key := range z.ErrCounts { clear(z.ErrCounts)
delete(z.ErrCounts, key)
}
} }
for zb0003 > 0 { for zb0003 > 0 {
var za0001 string
var za0002 int var za0002 int
zb0003-- zb0003--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "ErrCounts") err = msgp.WrapError(err, "ErrCounts")
@ -839,19 +835,17 @@ func (z *SRMetricsSummary) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Metrics == nil { if z.Metrics == nil {
z.Metrics = make(map[string]SRMetric, zb0002) z.Metrics = make(map[string]SRMetric, zb0002)
} else if len(z.Metrics) > 0 { } else if len(z.Metrics) > 0 {
for key := range z.Metrics { clear(z.Metrics)
delete(z.Metrics, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
zb0002-- zb0002--
var za0001 string var za0001 string
var za0002 SRMetric
za0001, err = dc.ReadString() za0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "Metrics") err = msgp.WrapError(err, "Metrics")
return return
} }
var za0002 SRMetric
err = za0002.DecodeMsg(dc) err = za0002.DecodeMsg(dc)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Metrics", za0001) err = msgp.WrapError(err, "Metrics", za0001)
@ -1070,14 +1064,12 @@ func (z *SRMetricsSummary) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Metrics == nil { if z.Metrics == nil {
z.Metrics = make(map[string]SRMetric, zb0002) z.Metrics = make(map[string]SRMetric, zb0002)
} else if len(z.Metrics) > 0 { } else if len(z.Metrics) > 0 {
for key := range z.Metrics { clear(z.Metrics)
delete(z.Metrics, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
var za0001 string
var za0002 SRMetric var za0002 SRMetric
zb0002-- zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "Metrics") err = msgp.WrapError(err, "Metrics")
@ -1161,19 +1153,17 @@ func (z *SRStats) DecodeMsg(dc *msgp.Reader) (err error) {
if z.M == nil { if z.M == nil {
z.M = make(map[string]*SRStatus, zb0002) z.M = make(map[string]*SRStatus, zb0002)
} else if len(z.M) > 0 { } else if len(z.M) > 0 {
for key := range z.M { clear(z.M)
delete(z.M, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
zb0002-- zb0002--
var za0001 string var za0001 string
var za0002 *SRStatus
za0001, err = dc.ReadString() za0001, err = dc.ReadString()
if err != nil { if err != nil {
err = msgp.WrapError(err, "M") err = msgp.WrapError(err, "M")
return return
} }
var za0002 *SRStatus
if dc.IsNil() { if dc.IsNil() {
err = dc.ReadNil() err = dc.ReadNil()
if err != nil { if err != nil {
@ -1327,14 +1317,12 @@ func (z *SRStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.M == nil { if z.M == nil {
z.M = make(map[string]*SRStatus, zb0002) z.M = make(map[string]*SRStatus, zb0002)
} else if len(z.M) > 0 { } else if len(z.M) > 0 {
for key := range z.M { clear(z.M)
delete(z.M, key)
}
} }
for zb0002 > 0 { for zb0002 > 0 {
var za0001 string
var za0002 *SRStatus var za0002 *SRStatus
zb0002-- zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts) za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
err = msgp.WrapError(err, "M") err = msgp.WrapError(err, "M")

Some files were not shown because too many files have changed in this diff Show More