allow quorum fileInfo to pick same parityBlocks (#17454)

Bonus: allow replication to proceed for 503 errors such as
with error code SlowDownRead
This commit is contained in:
Harshavardhana 2023-06-18 18:20:15 -07:00 committed by GitHub
parent 35ef35b5c1
commit 1443b5927a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 339 additions and 271 deletions

View File

@ -1525,7 +1525,7 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
return shouldTrace(entry, traceOpts) return shouldTrace(entry, traceOpts)
}) })
if err != nil { if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrSlowDown), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
@ -1605,7 +1605,7 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
err = globalConsoleSys.Subscribe(logCh, ctx.Done(), node, limitLines, logKind, nil) err = globalConsoleSys.Subscribe(logCh, ctx.Done(), node, limitLines, logKind, nil)
if err != nil { if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrSlowDown), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }

View File

@ -185,7 +185,8 @@ const (
ErrMetadataTooLarge ErrMetadataTooLarge
ErrUnsupportedMetadata ErrUnsupportedMetadata
ErrMaximumExpires ErrMaximumExpires
ErrSlowDown ErrSlowDownRead
ErrSlowDownWrite
ErrInvalidPrefixMarker ErrInvalidPrefixMarker
ErrBadRequest ErrBadRequest
ErrKeyTooLongError ErrKeyTooLongError
@ -257,9 +258,9 @@ const (
ErrInvalidResourceName ErrInvalidResourceName
ErrInvalidLifecycleQueryParameter ErrInvalidLifecycleQueryParameter
ErrServerNotInitialized ErrServerNotInitialized
ErrOperationTimedOut ErrRequestTimedout
ErrClientDisconnected ErrClientDisconnected
ErrOperationMaxedOut ErrTooManyRequests
ErrInvalidRequest ErrInvalidRequest
ErrTransitionStorageClassNotFoundError ErrTransitionStorageClassNotFoundError
// MinIO storage class error codes // MinIO storage class error codes
@ -843,11 +844,16 @@ var errorCodes = errorCodeMap{
Description: "Request is not valid yet", Description: "Request is not valid yet",
HTTPStatusCode: http.StatusForbidden, HTTPStatusCode: http.StatusForbidden,
}, },
ErrSlowDown: { ErrSlowDownRead: {
Code: "SlowDown", Code: "SlowDownRead",
Description: "Resource requested is unreadable, please reduce your request rate", Description: "Resource requested is unreadable, please reduce your request rate",
HTTPStatusCode: http.StatusServiceUnavailable, HTTPStatusCode: http.StatusServiceUnavailable,
}, },
ErrSlowDownWrite: {
Code: "SlowDownWrite",
Description: "Resource requested is unwritable, please reduce your request rate",
HTTPStatusCode: http.StatusServiceUnavailable,
},
ErrInvalidPrefixMarker: { ErrInvalidPrefixMarker: {
Code: "InvalidPrefixMarker", Code: "InvalidPrefixMarker",
Description: "Invalid marker prefix combination", Description: "Invalid marker prefix combination",
@ -1416,7 +1422,7 @@ var errorCodes = errorCodeMap{
Description: "Cannot respond to plain-text request from TLS-encrypted server", Description: "Cannot respond to plain-text request from TLS-encrypted server",
HTTPStatusCode: http.StatusBadRequest, HTTPStatusCode: http.StatusBadRequest,
}, },
ErrOperationTimedOut: { ErrRequestTimedout: {
Code: "RequestTimeout", Code: "RequestTimeout",
Description: "A timeout occurred while trying to lock a resource, please reduce your request rate", Description: "A timeout occurred while trying to lock a resource, please reduce your request rate",
HTTPStatusCode: http.StatusServiceUnavailable, HTTPStatusCode: http.StatusServiceUnavailable,
@ -1426,9 +1432,9 @@ var errorCodes = errorCodeMap{
Description: "Client disconnected before response was ready", Description: "Client disconnected before response was ready",
HTTPStatusCode: 499, // No official code, use nginx value. HTTPStatusCode: 499, // No official code, use nginx value.
}, },
ErrOperationMaxedOut: { ErrTooManyRequests: {
Code: "SlowDown", Code: "TooManyRequests",
Description: "A timeout exceeded while waiting to proceed with the request, please reduce your request rate", Description: "Deadline exceeded while waiting in incoming queue, please reduce your request rate",
HTTPStatusCode: http.StatusServiceUnavailable, HTTPStatusCode: http.StatusServiceUnavailable,
}, },
ErrUnsupportedMetadata: { ErrUnsupportedMetadata: {
@ -2039,7 +2045,7 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
} }
// Only return ErrClientDisconnected if the provided context is actually canceled. // Only return ErrClientDisconnected if the provided context is actually canceled.
// This way downstream context.Canceled will still report ErrOperationTimedOut // This way downstream context.Canceled will still report ErrRequestTimedout
if contextCanceled(ctx) && errors.Is(ctx.Err(), context.Canceled) { if contextCanceled(ctx) && errors.Is(ctx.Err(), context.Canceled) {
return ErrClientDisconnected return ErrClientDisconnected
} }
@ -2083,9 +2089,9 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
case errInvalidStorageClass: case errInvalidStorageClass:
apiErr = ErrInvalidStorageClass apiErr = ErrInvalidStorageClass
case errErasureReadQuorum: case errErasureReadQuorum:
apiErr = ErrSlowDown apiErr = ErrSlowDownRead
case errErasureWriteQuorum: case errErasureWriteQuorum:
apiErr = ErrSlowDown apiErr = ErrSlowDownWrite
// SSE errors // SSE errors
case errInvalidEncryptionParameters: case errInvalidEncryptionParameters:
apiErr = ErrInvalidEncryptionParameters apiErr = ErrInvalidEncryptionParameters
@ -2119,10 +2125,10 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrKMSKeyNotFoundException apiErr = ErrKMSKeyNotFoundException
case errKMSDefaultKeyAlreadyConfigured: case errKMSDefaultKeyAlreadyConfigured:
apiErr = ErrKMSDefaultKeyAlreadyConfigured apiErr = ErrKMSDefaultKeyAlreadyConfigured
case context.Canceled, context.DeadlineExceeded: case context.Canceled:
apiErr = ErrOperationTimedOut apiErr = ErrClientDisconnected
case errDiskNotFound: case context.DeadlineExceeded:
apiErr = ErrSlowDown apiErr = ErrRequestTimedout
case objectlock.ErrInvalidRetentionDate: case objectlock.ErrInvalidRetentionDate:
apiErr = ErrInvalidRetentionDate apiErr = ErrInvalidRetentionDate
case objectlock.ErrPastObjectLockRetainDate: case objectlock.ErrPastObjectLockRetainDate:
@ -2201,9 +2207,9 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
case InvalidPart: case InvalidPart:
apiErr = ErrInvalidPart apiErr = ErrInvalidPart
case InsufficientWriteQuorum: case InsufficientWriteQuorum:
apiErr = ErrSlowDown apiErr = ErrSlowDownWrite
case InsufficientReadQuorum: case InsufficientReadQuorum:
apiErr = ErrSlowDown apiErr = ErrSlowDownRead
case InvalidMarkerPrefixCombination: case InvalidMarkerPrefixCombination:
apiErr = ErrNotImplemented apiErr = ErrNotImplemented
case InvalidUploadIDKeyCombination: case InvalidUploadIDKeyCombination:
@ -2297,7 +2303,7 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
case *event.ErrUnsupportedConfiguration: case *event.ErrUnsupportedConfiguration:
apiErr = ErrUnsupportedNotification apiErr = ErrUnsupportedNotification
case OperationTimedOut: case OperationTimedOut:
apiErr = ErrOperationTimedOut apiErr = ErrRequestTimedout
case BackendDown: case BackendDown:
apiErr = ErrBackendDown apiErr = ErrBackendDown
case ObjectNameTooLong: case ObjectNameTooLong:

View File

@ -40,8 +40,8 @@ var toAPIErrorTests = []struct {
{err: ObjectNameInvalid{}, errCode: ErrInvalidObjectName}, {err: ObjectNameInvalid{}, errCode: ErrInvalidObjectName},
{err: InvalidUploadID{}, errCode: ErrNoSuchUpload}, {err: InvalidUploadID{}, errCode: ErrNoSuchUpload},
{err: InvalidPart{}, errCode: ErrInvalidPart}, {err: InvalidPart{}, errCode: ErrInvalidPart},
{err: InsufficientReadQuorum{}, errCode: ErrSlowDown}, {err: InsufficientReadQuorum{}, errCode: ErrSlowDownRead},
{err: InsufficientWriteQuorum{}, errCode: ErrSlowDown}, {err: InsufficientWriteQuorum{}, errCode: ErrSlowDownWrite},
{err: InvalidMarkerPrefixCombination{}, errCode: ErrNotImplemented}, {err: InvalidMarkerPrefixCombination{}, errCode: ErrNotImplemented},
{err: InvalidUploadIDKeyCombination{}, errCode: ErrNotImplemented}, {err: InvalidUploadIDKeyCombination{}, errCode: ErrNotImplemented},
{err: MalformedUploadID{}, errCode: ErrNoSuchUpload}, {err: MalformedUploadID{}, errCode: ErrNoSuchUpload},

View File

@ -945,6 +945,8 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
} }
func writeErrorResponseHeadersOnly(w http.ResponseWriter, err APIError) { func writeErrorResponseHeadersOnly(w http.ResponseWriter, err APIError) {
w.Header().Set(xMinIOErrCodeHeader, err.Code)
w.Header().Set(xMinIOErrDescHeader, "\""+err.Description+"\"")
writeResponse(w, err.HTTPStatusCode, nil, mimeNone) writeResponse(w, err.HTTPStatusCode, nil, mimeNone)
} }

File diff suppressed because one or more lines are too long

View File

@ -32,7 +32,9 @@ import (
"github.com/dustin/go-humanize" "github.com/dustin/go-humanize"
"github.com/minio/madmin-go/v2" "github.com/minio/madmin-go/v2"
"github.com/minio/minio-go/v7/pkg/set" "github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/logger"
"github.com/minio/pkg/env"
) )
const ( const (
@ -345,7 +347,9 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...) globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
go monitorLocalDisksAndHeal(ctx, z) if env.Get("_MINIO_AUTO_DISK_HEALING", config.EnableOn) == config.EnableOn {
go monitorLocalDisksAndHeal(ctx, z)
}
} }
func getLocalDisksToHeal() (disksToHeal Endpoints) { func getLocalDisksToHeal() (disksToHeal Endpoints) {

View File

@ -1532,10 +1532,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
} }
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
errCode := errorCodes.ToAPIErr(s3Error) writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
w.Header().Set(xMinIOErrCodeHeader, errCode.Code)
w.Header().Set(xMinIOErrDescHeader, "\""+errCode.Description+"\"")
writeErrorResponseHeadersOnly(w, errCode)
return return
} }

View File

@ -1385,16 +1385,23 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
} }
} }
// if target returns error other than NoSuchKey, defer replication attempt // if target returns error other than NoSuchKey, defer replication attempt
if cerr != nil && minio.ToErrorResponse(cerr).Code != "NoSuchKey" && minio.ToErrorResponse(cerr).Code != "NoSuchVersion" { if cerr != nil {
logger.LogIf(ctx, fmt.Errorf("unable to replicate %s/%s (%s). Target returned %s error on HEAD", bucket, object, objInfo.VersionID, cerr)) errResp := minio.ToErrorResponse(cerr)
sendEvent(eventArgs{ switch errResp.Code {
EventName: event.ObjectReplicationNotTracked, case "NoSuchKey", "NoSuchVersion", "SlowDownRead":
BucketName: bucket, rAction = replicateAll
Object: objInfo, default:
UserAgent: "Internal: [Replication]", logger.LogIf(ctx, fmt.Errorf("unable to replicate %s/%s (%s). Target (%s) returned %s error on HEAD",
Host: globalLocalNodeName, bucket, object, objInfo.VersionID, tgt.EndpointURL(), cerr))
}) sendEvent(eventArgs{
return EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return
}
} }
rinfo.ReplicationStatus = replication.Completed rinfo.ReplicationStatus = replication.Completed
rinfo.Size = size rinfo.Size = size

View File

@ -503,7 +503,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
if !latestMeta.XLV1 && !latestMeta.Deleted && !recreate && disksToHealCount > latestMeta.Erasure.ParityBlocks { if !latestMeta.XLV1 && !latestMeta.Deleted && !recreate && disksToHealCount > latestMeta.Erasure.ParityBlocks {
// When disk to heal count is greater than parity blocks we should simply error out. // When disk to heal count is greater than parity blocks we should simply error out.
err := fmt.Errorf("more drives are expected to heal than parity, returned errors: %v (dataErrs %v) -> %s/%s(%s)", errs, dataErrs, bucket, object, versionID) err := fmt.Errorf("(%d > %d) more drives are expected to heal than parity, returned errors: %v (dataErrs %v) -> %s/%s(%s)", disksToHealCount, latestMeta.Erasure.ParityBlocks, errs, dataErrs, bucket, object, versionID)
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return er.defaultHealResult(latestMeta, storageDisks, storageEndpoints, errs, return er.defaultHealResult(latestMeta, storageDisks, storageEndpoints, errs,
bucket, object, versionID), err bucket, object, versionID), err
@ -1012,9 +1012,6 @@ func isObjectDangling(metaArr []FileInfo, errs []error, dataErrs []error) (valid
notFoundMetaErrs, corruptedMetaErrs, driveNotFoundMetaErrs := danglingErrsCount(errs) notFoundMetaErrs, corruptedMetaErrs, driveNotFoundMetaErrs := danglingErrsCount(errs)
notFoundPartsErrs, corruptedPartsErrs, driveNotFoundPartsErrs := danglingErrsCount(ndataErrs) notFoundPartsErrs, corruptedPartsErrs, driveNotFoundPartsErrs := danglingErrsCount(ndataErrs)
if driveNotFoundMetaErrs > 0 || driveNotFoundPartsErrs > 0 {
return validMeta, false
}
for _, m := range metaArr { for _, m := range metaArr {
if m.IsValid() { if m.IsValid() {
validMeta = m validMeta = m
@ -1027,6 +1024,10 @@ func isObjectDangling(metaArr []FileInfo, errs []error, dataErrs []error) (valid
return validMeta, false return validMeta, false
} }
if driveNotFoundMetaErrs > 0 || driveNotFoundPartsErrs > 0 {
return validMeta, false
}
if validMeta.Deleted { if validMeta.Deleted {
// notFoundPartsErrs is ignored since // notFoundPartsErrs is ignored since
// - delete marker does not have any parts // - delete marker does not have any parts

View File

@ -350,6 +350,7 @@ func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.
for _, part := range meta.Parts { for _, part := range meta.Parts {
fmt.Fprintf(h, "part.%d", part.Number) fmt.Fprintf(h, "part.%d", part.Number)
} }
fmt.Fprintf(h, "%v+%v", meta.Erasure.DataBlocks, meta.Erasure.ParityBlocks)
fmt.Fprintf(h, "%v", meta.Erasure.Distribution) fmt.Fprintf(h, "%v", meta.Erasure.Distribution)
// ILM transition fields // ILM transition fields

View File

@ -374,6 +374,9 @@ func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, obje
if !metaArr[index].IsValid() { if !metaArr[index].IsValid() {
continue continue
} }
if !metaArr[index].Erasure.Equal(fi.Erasure) {
continue
}
checksumInfo := metaArr[index].Erasure.GetChecksumInfo(partNumber) checksumInfo := metaArr[index].Erasure.GetChecksumInfo(partNumber)
partPath := pathJoin(object, metaArr[index].DataDir, fmt.Sprintf("part.%d", partNumber)) partPath := pathJoin(object, metaArr[index].DataDir, fmt.Sprintf("part.%d", partNumber))
readers[index] = newBitrotReader(disk, metaArr[index].Data, bucket, partPath, tillOffset, readers[index] = newBitrotReader(disk, metaArr[index].Data, bucket, partPath, tillOffset,
@ -1008,13 +1011,24 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
data := r.Reader data := r.Reader
if opts.CheckPrecondFn != nil { if opts.CheckPrecondFn != nil {
obj, err := er.getObjectInfo(ctx, bucket, object, opts) if !opts.NoLock {
if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) { ns := er.NewNSLock(bucket, object)
return objInfo, err lkctx, err := ns.GetLock(ctx, globalOperationTimeout)
if err != nil {
return ObjectInfo{}, err
}
ctx = lkctx.Context()
defer ns.Unlock(lkctx)
opts.NoLock = true
} }
if opts.CheckPrecondFn(obj) {
obj, err := er.getObjectInfo(ctx, bucket, object, opts)
if err == nil && opts.CheckPrecondFn(obj) {
return objInfo, PreConditionFailed{} return objInfo, PreConditionFailed{}
} }
if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) && !isErrReadQuorum(err) {
return objInfo, err
}
} }
// Validate input data size and it can never be less than -1. // Validate input data size and it can never be less than -1.

View File

@ -317,7 +317,7 @@ func maxClients(f http.HandlerFunc) http.HandlerFunc {
case <-deadlineTimer.C: case <-deadlineTimer.C:
// Send a http timeout message // Send a http timeout message
writeErrorResponse(r.Context(), w, writeErrorResponse(r.Context(), w,
errorCodes.ToAPIErr(ErrOperationMaxedOut), errorCodes.ToAPIErr(ErrTooManyRequests),
r.URL) r.URL)
globalHTTPStats.addRequestsInQueue(-1) globalHTTPStats.addRequestsInQueue(-1)
return return

View File

@ -129,7 +129,7 @@ func (api objectAPIHandlers) ListenNotificationHandler(w http.ResponseWriter, r
return rulesMap.MatchSimple(ev.EventName, ev.S3.Object.Key) return rulesMap.MatchSimple(ev.EventName, ev.S3.Object.Key)
}) })
if err != nil { if err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSlowDown), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
if bucketName != "" { if bucketName != "" {

View File

@ -662,10 +662,7 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
} }
} }
} }
errCode := errorCodes.ToAPIErr(s3Error) writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
w.Header().Set(xMinIOErrCodeHeader, errCode.Code)
w.Header().Set(xMinIOErrDescHeader, "\""+errCode.Description+"\"")
writeErrorResponseHeadersOnly(w, errCode)
return return
} }
@ -700,10 +697,7 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
} }
if s3Error := authorizeRequest(ctx, r, policy.GetObjectAction); s3Error != ErrNone { if s3Error := authorizeRequest(ctx, r, policy.GetObjectAction); s3Error != ErrNone {
errCode := errorCodes.ToAPIErr(s3Error) writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
w.Header().Set(xMinIOErrCodeHeader, errCode.Code)
w.Header().Set(xMinIOErrDescHeader, "\""+errCode.Description+"\"")
writeErrorResponseHeadersOnly(w, errCode)
return return
} }

View File

@ -265,7 +265,10 @@ func (fi FileInfo) Equals(ofi FileInfo) (ok bool) {
if !fi.TransitionInfoEquals(ofi) { if !fi.TransitionInfoEquals(ofi) {
return false return false
} }
return fi.ModTime.Equal(ofi.ModTime) if !fi.ModTime.Equal(ofi.ModTime) {
return false
}
return fi.Erasure.Equal(ofi.Erasure)
} }
// GetDataDir returns an expected dataDir given FileInfo // GetDataDir returns an expected dataDir given FileInfo

View File

@ -106,6 +106,38 @@ type ErasureInfo struct {
Checksums []ChecksumInfo `json:"checksum,omitempty"` Checksums []ChecksumInfo `json:"checksum,omitempty"`
} }
// Equal equates current erasure info with newer erasure info.
// returns false if one of the following check fails
// - erasure algorithm is different
// - data blocks are different
// - parity blocks are different
// - block size is different
// - distribution array size is different
// - distribution indexes are different
func (ei ErasureInfo) Equal(nei ErasureInfo) bool {
if ei.Algorithm != nei.Algorithm {
return false
}
if ei.DataBlocks != nei.DataBlocks {
return false
}
if ei.ParityBlocks != nei.ParityBlocks {
return false
}
if ei.BlockSize != nei.BlockSize {
return false
}
if len(ei.Distribution) != len(nei.Distribution) {
return false
}
for i, ecindex := range ei.Distribution {
if ecindex != nei.Distribution[i] {
return false
}
}
return true
}
// BitrotAlgorithm specifies a algorithm used for bitrot protection. // BitrotAlgorithm specifies a algorithm used for bitrot protection.
type BitrotAlgorithm uint type BitrotAlgorithm uint

View File

@ -2139,9 +2139,15 @@ func (s *xlStorage) Delete(ctx context.Context, volume string, path string, dele
} }
func skipAccessChecks(volume string) (ok bool) { func skipAccessChecks(volume string) (ok bool) {
switch volume { for _, prefix := range []string{
case minioMetaTmpBucket, minioMetaBucket, minioMetaMultipartBucket, minioMetaTmpDeletedBucket: minioMetaTmpBucket,
ok = true minioMetaBucket,
minioMetaMultipartBucket,
minioMetaTmpDeletedBucket,
} {
if strings.HasPrefix(volume, prefix) {
return true
}
} }
return ok return ok
} }

2
go.mod
View File

@ -49,7 +49,7 @@ require (
github.com/minio/highwayhash v1.0.2 github.com/minio/highwayhash v1.0.2
github.com/minio/kes-go v0.1.0 github.com/minio/kes-go v0.1.0
github.com/minio/madmin-go/v2 v2.2.0 github.com/minio/madmin-go/v2 v2.2.0
github.com/minio/minio-go/v7 v7.0.57 github.com/minio/minio-go/v7 v7.0.58-0.20230618181316-a73fe95ad965
github.com/minio/mux v1.9.0 github.com/minio/mux v1.9.0
github.com/minio/pkg v1.7.4 github.com/minio/pkg v1.7.4
github.com/minio/selfupdate v0.6.0 github.com/minio/selfupdate v0.6.0

4
go.sum
View File

@ -792,8 +792,8 @@ github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v6 v6.0.46/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= github.com/minio/minio-go/v6 v6.0.46/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg=
github.com/minio/minio-go/v7 v7.0.41/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw= github.com/minio/minio-go/v7 v7.0.41/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
github.com/minio/minio-go/v7 v7.0.57 h1:xsFiOiWjpC1XAGbFEUOzj1/gMXGz7ljfxifwcb/5YXU= github.com/minio/minio-go/v7 v7.0.58-0.20230618181316-a73fe95ad965 h1:FWBzNlpXsqpkKlQhp3Sq/ONxFkAVPdxBC9CuzRF/4es=
github.com/minio/minio-go/v7 v7.0.57/go.mod h1:NUDy4A4oXPq1l2yK6LTSvCEzAMeIcoz9lcj5dbzSrRE= github.com/minio/minio-go/v7 v7.0.58-0.20230618181316-a73fe95ad965/go.mod h1:NUDy4A4oXPq1l2yK6LTSvCEzAMeIcoz9lcj5dbzSrRE=
github.com/minio/mux v1.9.0 h1:dWafQFyEfGhJvK6AwLOt83bIG5bxKxKJnKMCi0XAaoA= github.com/minio/mux v1.9.0 h1:dWafQFyEfGhJvK6AwLOt83bIG5bxKxKJnKMCi0XAaoA=
github.com/minio/mux v1.9.0/go.mod h1:1pAare17ZRL5GpmNL+9YmqHoWnLmMZF9C/ioUCfy0BQ= github.com/minio/mux v1.9.0/go.mod h1:1pAare17ZRL5GpmNL+9YmqHoWnLmMZF9C/ioUCfy0BQ=
github.com/minio/pkg v1.5.4/go.mod h1:2MOaRFdmFKULD+uOLc3qHLGTQTuxCNPKNPfLBTxC8CA= github.com/minio/pkg v1.5.4/go.mod h1:2MOaRFdmFKULD+uOLc3qHLGTQTuxCNPKNPfLBTxC8CA=