From 74e0c9ab9b3fc3f9d159a8815df100aa1a593a65 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Tue, 10 Oct 2023 00:33:42 -0700 Subject: [PATCH] reduce unnecessary logging, simplify certain error handling (#18196) remove a bunch of unnecessary logs --- cmd/admin-bucket-handlers.go | 68 +++++++------------------- cmd/bucket-handlers.go | 5 +- cmd/bucket-replication.go | 12 +++-- cmd/object-api-options.go | 82 +++++++++++++------------------- cmd/object-handlers-common.go | 25 +++++----- cmd/object-handlers.go | 18 ++----- cmd/object-multipart-handlers.go | 3 +- cmd/s3-zip-handlers.go | 9 ---- cmd/sts-errors.go | 18 +++---- 9 files changed, 84 insertions(+), 156 deletions(-) diff --git a/cmd/admin-bucket-handlers.go b/cmd/admin-bucket-handlers.go index 21b39c58b..ec72ef913 100644 --- a/cmd/admin-bucket-handlers.go +++ b/cmd/admin-bucket-handlers.go @@ -392,7 +392,8 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r * // of bucket metadata zipWriter := zip.NewWriter(w) defer zipWriter.Close() - rawDataFn := func(r io.Reader, filename string, sz int) error { + + rawDataFn := func(r io.Reader, filename string, sz int) { header, zerr := zip.FileInfoHeader(dummyFileInfo{ name: filename, size: int64(sz), @@ -401,20 +402,13 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r * isDir: false, sys: nil, }) - if zerr != nil { - logger.LogIf(ctx, zerr) - return nil + if zerr == nil { + header.Method = zip.Deflate + zwriter, zerr := zipWriter.CreateHeader(header) + if zerr == nil { + io.Copy(zwriter, r) + } } - header.Method = zip.Deflate - zwriter, zerr := zipWriter.CreateHeader(header) - if zerr != nil { - logger.LogIf(ctx, zerr) - return nil - } - if _, err := io.Copy(zwriter, r); err != nil { - logger.LogIf(ctx, err) - } - return nil } cfgFiles := []string{ @@ -446,10 +440,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r * writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) return } - if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil { - writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) - return - } + rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)) case bucketLifecycleConfig: config, _, err := globalBucketMetadataSys.GetLifecycleConfig(bucket) if err != nil { @@ -465,10 +456,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r * writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) return } - if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil { - writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) - return - } + rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)) case bucketQuotaConfigFile: config, _, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket) if err != nil { @@ -483,10 +471,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r * writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } - if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil { - writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) - return - } + rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)) case bucketSSEConfig: config, _, err := globalBucketMetadataSys.GetSSEConfig(bucket) if err != nil { @@ -501,10 +486,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r * writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) return } - if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil { - writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) - return - } + rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)) case bucketTaggingConfig: config, _, err := globalBucketMetadataSys.GetTaggingConfig(bucket) if err != nil { @@ -519,10 +501,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r * writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) return } - if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil { - writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) - return - } + rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)) case objectLockConfig: config, _, err := globalBucketMetadataSys.GetObjectLockConfig(bucket) if err != nil { @@ -538,10 +517,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r * writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) return } - if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil { - writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) - return - } + rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)) case bucketVersioningConfig: config, _, err := globalBucketMetadataSys.GetVersioningConfig(bucket) if err != nil { @@ -557,10 +533,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r * writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) return } - if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil { - writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) - return - } + rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)) case bucketReplicationConfig: config, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket) if err != nil { @@ -575,11 +548,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r * writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) return } - - if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil { - writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) - return - } + rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)) case bucketTargetsFile: config, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket) if err != nil { @@ -595,10 +564,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r * writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) return } - if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil { - writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) - return - } + rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)) } } } diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index 1e219c1da..ed3a98011 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -716,7 +716,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, if os == nil { // skip objects that weren't deleted due to invalid versionID etc. continue } - logger.LogIf(ctx, os.Sweep()) + os.Sweep() } } @@ -794,7 +794,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req // check if client is attempting to create more buckets, complain about it. if currBuckets := globalBucketMetadataSys.Count(); currBuckets+1 > maxBuckets { - logger.LogIf(ctx, fmt.Errorf("An attempt to create %d buckets beyond recommended %d", currBuckets+1, maxBuckets)) + logger.LogIf(ctx, fmt.Errorf("Please avoid creating more buckets %d beyond recommended %d", currBuckets+1, maxBuckets)) } opts := MakeBucketOptions{ @@ -1141,7 +1141,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h hashReader, err := hash.NewReader(ctx, reader, fileSize, "", "", fileSize) if err != nil { - logger.LogIf(ctx, err) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } diff --git a/cmd/bucket-replication.go b/cmd/bucket-replication.go index 26c134d5f..92f4587ba 100644 --- a/cmd/bucket-replication.go +++ b/cmd/bucket-replication.go @@ -2634,16 +2634,18 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object roi.EventType = ReplicateExisting replicateObject(ctx, roi, objectAPI) } - _, err = tgt.StatObject(ctx, tgt.Bucket, roi.Name, minio.StatObjectOptions{ + + st := TargetReplicationResyncStatus{ + Object: roi.Name, + Bucket: roi.Bucket, + } + + _, err := tgt.StatObject(ctx, tgt.Bucket, roi.Name, minio.StatObjectOptions{ VersionID: roi.VersionID, Internal: minio.AdvancedGetOptions{ ReplicationProxyRequest: "false", }, }) - st := TargetReplicationResyncStatus{ - Object: roi.Name, - Bucket: roi.Bucket, - } if err != nil { if roi.DeleteMarker && isErrMethodNotAllowed(ErrorRespToObjectError(err, opts.bucket, roi.Name)) { st.ReplicatedCount++ diff --git a/cmd/object-api-options.go b/cmd/object-api-options.go index d4791491f..d1e388c88 100644 --- a/cmd/object-api-options.go +++ b/cmd/object-api-options.go @@ -30,7 +30,6 @@ import ( "github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/hash" xhttp "github.com/minio/minio/internal/http" - "github.com/minio/minio/internal/logger" ) func getDefaultOpts(header http.Header, copySource bool, metadata map[string]string) (opts ObjectOptions, err error) { @@ -115,44 +114,43 @@ func getOpts(ctx context.Context, r *http.Request, bucket, object string) (Objec } opts.PartNumber = partNumber opts.VersionID = vid - delMarker := strings.TrimSpace(r.Header.Get(xhttp.MinIOSourceDeleteMarker)) - if delMarker != "" { - switch delMarker { - case "true": - opts.DeleteMarker = true - case "false": - default: - err = fmt.Errorf("Unable to parse %s, failed with %w", xhttp.MinIOSourceDeleteMarker, fmt.Errorf("DeleteMarker should be true or false")) - logger.LogIf(ctx, err) - return opts, InvalidArgument{ - Bucket: bucket, - Object: object, - Err: err, - } - } + + delMarker, err := parseBoolHeader(bucket, object, r.Header, xhttp.MinIOSourceDeleteMarker) + if err != nil { + return opts, err } - replReadyCheck := strings.TrimSpace(r.Header.Get(xhttp.MinIOCheckDMReplicationReady)) - if replReadyCheck != "" { - switch replReadyCheck { - case "true": - opts.CheckDMReplicationReady = true - case "false": - default: - err = fmt.Errorf("Unable to parse %s, failed with %w", xhttp.MinIOCheckDMReplicationReady, fmt.Errorf("should be true or false")) - logger.LogIf(ctx, err) - return opts, InvalidArgument{ - Bucket: bucket, - Object: object, - Err: err, - } - } + opts.DeleteMarker = delMarker + + replReadyCheck, err := parseBoolHeader(bucket, object, r.Header, xhttp.MinIOCheckDMReplicationReady) + if err != nil { + return opts, err } + opts.CheckDMReplicationReady = replReadyCheck + opts.Tagging = r.Header.Get(xhttp.AmzTagDirective) == accessDirective opts.Versioned = globalBucketVersioningSys.PrefixEnabled(bucket, object) opts.VersionSuspended = globalBucketVersioningSys.PrefixSuspended(bucket, object) return opts, nil } +func parseBoolHeader(bucket, object string, h http.Header, headerName string) (bool, error) { + value := strings.TrimSpace(h.Get(headerName)) + if value != "" { + switch value { + case "true": + return true, nil + case "false": + default: + return false, InvalidArgument{ + Bucket: bucket, + Object: object, + Err: fmt.Errorf("Unable to parse %s, value should be either 'true' or 'false'", headerName), + } + } + } + return false, nil +} + func delOpts(ctx context.Context, r *http.Request, bucket, object string) (opts ObjectOptions, err error) { opts, err = getOpts(ctx, r, bucket, object) if err != nil { @@ -180,22 +178,11 @@ func delOpts(ctx context.Context, r *http.Request, bucket, object string) (opts opts.VersionID = nullVersionID } - delMarker := strings.TrimSpace(r.Header.Get(xhttp.MinIOSourceDeleteMarker)) - if delMarker != "" { - switch delMarker { - case "true": - opts.DeleteMarker = true - case "false": - default: - err = fmt.Errorf("Unable to parse %s, failed with %w", xhttp.MinIOSourceDeleteMarker, fmt.Errorf("DeleteMarker should be true or false")) - logger.LogIf(ctx, err) - return opts, InvalidArgument{ - Bucket: bucket, - Object: object, - Err: err, - } - } + delMarker, err := parseBoolHeader(bucket, object, r.Header, xhttp.MinIOSourceDeleteMarker) + if err != nil { + return opts, err } + opts.DeleteMarker = delMarker mtime := strings.TrimSpace(r.Header.Get(xhttp.MinIOSourceMTime)) if mtime != "" { @@ -222,7 +209,6 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada if vid != "" && vid != nullVersionID { _, err := uuid.Parse(vid) if err != nil { - logger.LogIf(ctx, err) return opts, InvalidVersionID{ Bucket: bucket, Object: object, @@ -233,7 +219,7 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada return opts, InvalidArgument{ Bucket: bucket, Object: object, - Err: fmt.Errorf("VersionID specified %s, but versioning not enabled on %s", opts.VersionID, bucket), + Err: fmt.Errorf("VersionID specified %s, but versioning not enabled on bucket=%s", opts.VersionID, bucket), } } } diff --git a/cmd/object-handlers-common.go b/cmd/object-handlers-common.go index 3f3fae7a0..c53287f3e 100644 --- a/cmd/object-handlers-common.go +++ b/cmd/object-handlers-common.go @@ -19,6 +19,7 @@ package cmd import ( "context" + "fmt" "net/http" "regexp" "strconv" @@ -29,7 +30,6 @@ import ( "github.com/minio/minio/internal/event" "github.com/minio/minio/internal/hash" xhttp "github.com/minio/minio/internal/http" - "github.com/minio/minio/internal/logger" ) var etagRegex = regexp.MustCompile("\"*?([^\"]*?)\"*?$") @@ -356,17 +356,8 @@ func deleteObjectVersions(ctx context.Context, o ObjectLayer, bucket string, toD PrefixEnabledFn: vc.PrefixEnabled, VersionSuspended: vc.Suspended(), }) - var logged bool - for i, err := range errs { - if err != nil { - if !logged { - // log the first error - logger.LogIf(ctx, err) - logged = true - } - continue - } - dobj := deletedObjs[i] + + for i, dobj := range deletedObjs { oi := ObjectInfo{ Bucket: bucket, Name: dobj.ObjectName, @@ -382,7 +373,7 @@ func deleteObjectVersions(ctx context.Context, o ObjectLayer, bucket string, toD oi, ILMExpiry, tags, traceFn) - sendEvent(eventArgs{ + evArgs := eventArgs{ EventName: event.ObjectRemovedDelete, BucketName: bucket, Object: ObjectInfo{ @@ -391,7 +382,13 @@ func deleteObjectVersions(ctx context.Context, o ObjectLayer, bucket string, toD }, UserAgent: "Internal: [ILM-Expiry]", Host: globalLocalNodeName, - }) + } + if errs[i] != nil { + evArgs.RespElements = map[string]string{ + "error": fmt.Sprintf("failed to delete %s(%s), with error %v", dobj.ObjectName, dobj.VersionID, errs[i]), + } + } + sendEvent(evArgs) } } } diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index 8d3e9574d..bf2118feb 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -59,7 +59,6 @@ import ( "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/s3select" "github.com/minio/mux" - xnet "github.com/minio/pkg/v2/net" "github.com/minio/pkg/v2/policy" ) @@ -547,9 +546,6 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } - if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients - logger.LogOnceIf(ctx, fmt.Errorf("Unable to write all the data to client: %w", err), "get-object-handler-write") - } return } @@ -558,9 +554,6 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } - if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients - logger.LogOnceIf(ctx, fmt.Errorf("Unable to write all the data to client: %w", err), "get-object-handler-close") - } return } @@ -780,8 +773,6 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrInvalidRange)) return } - - logger.LogIf(ctx, err, logger.Application) } } @@ -1067,7 +1058,6 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re var srcOpts, dstOpts ObjectOptions srcOpts, err = copySrcOpts(ctx, r, srcBucket, srcObject) if err != nil { - logger.LogIf(ctx, err) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } @@ -1086,7 +1076,6 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re dstOpts, err = copyDstOpts(ctx, r, dstBucket, dstObject, nil) if err != nil { - logger.LogIf(ctx, err) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } @@ -1572,7 +1561,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re objInfo.ETag = origETag enqueueTransitionImmediate(objInfo, lcEventSrc_s3CopyObject) // Remove the transitioned object whose object version is being overwritten. - logger.LogIf(ctx, os.Sweep()) + os.Sweep() } } @@ -1972,7 +1961,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req // Schedule object for immediate transition if eligible. objInfo.ETag = origETag enqueueTransitionImmediate(objInfo, lcEventSrc_s3PutObject) - logger.LogIf(ctx, os.Sweep()) + os.Sweep() } } @@ -3144,7 +3133,6 @@ func (api objectAPIHandlers) PostRestoreObjectHandler(w http.ResponseWriter, r * }, ObjectOptions{ VersionID: objInfo.VersionID, }); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s: %s", objInfo.VersionID, err)) writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidObjectState), r.URL) return } @@ -3224,7 +3212,7 @@ func (api objectAPIHandlers) PostRestoreObjectHandler(w http.ResponseWriter, r * VersionID: objInfo.VersionID, } if err := objectAPI.RestoreTransitionedObject(rctx, bucket, object, opts); err != nil { - logger.LogIf(ctx, err) + logger.LogIf(ctx, fmt.Errorf("Unable to restore transitioned bucket/object %s/%s: %w", bucket, object, err)) return } diff --git a/cmd/object-multipart-handlers.go b/cmd/object-multipart-handlers.go index b7255ab4e..7fe825bb3 100644 --- a/cmd/object-multipart-handlers.go +++ b/cmd/object-multipart-handlers.go @@ -356,7 +356,6 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt return true } if parseRangeErr != nil { - logger.LogIf(ctx, parseRangeErr) writeCopyPartErr(ctx, w, parseRangeErr, r.URL) // Range header mismatch is pre-condition like failure // so return true to indicate Range precondition failed. @@ -1035,7 +1034,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite if !globalTierConfigMgr.Empty() { // Schedule object for immediate transition if eligible. enqueueTransitionImmediate(objInfo, lcEventSrc_s3CompleteMultipartUpload) - logger.LogIf(ctx, os.Sweep()) + os.Sweep() } } diff --git a/cmd/s3-zip-handlers.go b/cmd/s3-zip-handlers.go index ac91cfdaa..2ef2aee2e 100644 --- a/cmd/s3-zip-handlers.go +++ b/cmd/s3-zip-handlers.go @@ -21,7 +21,6 @@ import ( "bytes" "context" "errors" - "fmt" "io" "net/http" "sort" @@ -31,8 +30,6 @@ import ( "github.com/minio/minio/internal/crypto" xhttp "github.com/minio/minio/internal/http" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" - xnet "github.com/minio/pkg/v2/net" "github.com/minio/pkg/v2/policy" "github.com/minio/zipindex" ) @@ -223,9 +220,6 @@ func (api objectAPIHandlers) getObjectInArchiveFileHandler(ctx context.Context, writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } - if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients - logger.LogIf(ctx, fmt.Errorf("Unable to write all the data to client: %w", err)) - } return } @@ -234,9 +228,6 @@ func (api objectAPIHandlers) getObjectInArchiveFileHandler(ctx context.Context, writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } - if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients - logger.LogIf(ctx, fmt.Errorf("Unable to write all the data to client: %w", err)) - } return } } diff --git a/cmd/sts-errors.go b/cmd/sts-errors.go index ba26b9e19..eea346394 100644 --- a/cmd/sts-errors.go +++ b/cmd/sts-errors.go @@ -27,23 +27,23 @@ import ( ) // writeSTSErrorRespone writes error headers -func writeSTSErrorResponse(ctx context.Context, w http.ResponseWriter, errCode STSErrorCode, errCtxt error) { - err := stsErrCodes.ToSTSErr(errCode) +func writeSTSErrorResponse(ctx context.Context, w http.ResponseWriter, errCode STSErrorCode, err error) { + stsErr := stsErrCodes.ToSTSErr(errCode) // Generate error response. stsErrorResponse := STSErrorResponse{} - stsErrorResponse.Error.Code = err.Code + stsErrorResponse.Error.Code = stsErr.Code stsErrorResponse.RequestID = w.Header().Get(xhttp.AmzRequestID) - stsErrorResponse.Error.Message = err.Description - if errCtxt != nil { - stsErrorResponse.Error.Message = errCtxt.Error() + stsErrorResponse.Error.Message = stsErr.Description + if err != nil { + stsErrorResponse.Error.Message = err.Error() } switch errCode { - case ErrSTSInternalError, ErrSTSNotInitialized, ErrSTSUpstreamError, ErrSTSIAMNotInitialized: - logger.LogIf(ctx, errCtxt, logger.Minio) + case ErrSTSInternalError, ErrSTSUpstreamError: + logger.LogIf(ctx, err, logger.Minio) } encodedErrorResponse := encodeResponse(stsErrorResponse) - writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeXML) + writeResponse(w, stsErr.HTTPStatusCode, encodedErrorResponse, mimeXML) } // STSError structure