reduce unnecessary logging, simplify certain error handling (#18196)

remove a bunch of unnecessary logs
This commit is contained in:
Harshavardhana 2023-10-10 00:33:42 -07:00 committed by GitHub
parent dcce83b288
commit 74e0c9ab9b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 84 additions and 156 deletions

View File

@ -392,7 +392,8 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
// of bucket metadata
zipWriter := zip.NewWriter(w)
defer zipWriter.Close()
rawDataFn := func(r io.Reader, filename string, sz int) error {
rawDataFn := func(r io.Reader, filename string, sz int) {
header, zerr := zip.FileInfoHeader(dummyFileInfo{
name: filename,
size: int64(sz),
@ -401,20 +402,13 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
isDir: false,
sys: nil,
})
if zerr != nil {
logger.LogIf(ctx, zerr)
return nil
}
if zerr == nil {
header.Method = zip.Deflate
zwriter, zerr := zipWriter.CreateHeader(header)
if zerr != nil {
logger.LogIf(ctx, zerr)
return nil
if zerr == nil {
io.Copy(zwriter, r)
}
if _, err := io.Copy(zwriter, r); err != nil {
logger.LogIf(ctx, err)
}
return nil
}
cfgFiles := []string{
@ -446,10 +440,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
case bucketLifecycleConfig:
config, _, err := globalBucketMetadataSys.GetLifecycleConfig(bucket)
if err != nil {
@ -465,10 +456,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
case bucketQuotaConfigFile:
config, _, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket)
if err != nil {
@ -483,10 +471,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
case bucketSSEConfig:
config, _, err := globalBucketMetadataSys.GetSSEConfig(bucket)
if err != nil {
@ -501,10 +486,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
case bucketTaggingConfig:
config, _, err := globalBucketMetadataSys.GetTaggingConfig(bucket)
if err != nil {
@ -519,10 +501,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
case objectLockConfig:
config, _, err := globalBucketMetadataSys.GetObjectLockConfig(bucket)
if err != nil {
@ -538,10 +517,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
case bucketVersioningConfig:
config, _, err := globalBucketMetadataSys.GetVersioningConfig(bucket)
if err != nil {
@ -557,10 +533,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
case bucketReplicationConfig:
config, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
if err != nil {
@ -575,11 +548,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
case bucketTargetsFile:
config, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket)
if err != nil {
@ -595,10 +564,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
}
}
}

View File

@ -716,7 +716,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
if os == nil { // skip objects that weren't deleted due to invalid versionID etc.
continue
}
logger.LogIf(ctx, os.Sweep())
os.Sweep()
}
}
@ -794,7 +794,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
// check if client is attempting to create more buckets, complain about it.
if currBuckets := globalBucketMetadataSys.Count(); currBuckets+1 > maxBuckets {
logger.LogIf(ctx, fmt.Errorf("An attempt to create %d buckets beyond recommended %d", currBuckets+1, maxBuckets))
logger.LogIf(ctx, fmt.Errorf("Please avoid creating more buckets %d beyond recommended %d", currBuckets+1, maxBuckets))
}
opts := MakeBucketOptions{
@ -1141,7 +1141,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
hashReader, err := hash.NewReader(ctx, reader, fileSize, "", "", fileSize)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}

View File

@ -2634,16 +2634,18 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
roi.EventType = ReplicateExisting
replicateObject(ctx, roi, objectAPI)
}
_, err = tgt.StatObject(ctx, tgt.Bucket, roi.Name, minio.StatObjectOptions{
st := TargetReplicationResyncStatus{
Object: roi.Name,
Bucket: roi.Bucket,
}
_, err := tgt.StatObject(ctx, tgt.Bucket, roi.Name, minio.StatObjectOptions{
VersionID: roi.VersionID,
Internal: minio.AdvancedGetOptions{
ReplicationProxyRequest: "false",
},
})
st := TargetReplicationResyncStatus{
Object: roi.Name,
Bucket: roi.Bucket,
}
if err != nil {
if roi.DeleteMarker && isErrMethodNotAllowed(ErrorRespToObjectError(err, opts.bucket, roi.Name)) {
st.ReplicatedCount++

View File

@ -30,7 +30,6 @@ import (
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
)
func getDefaultOpts(header http.Header, copySource bool, metadata map[string]string) (opts ObjectOptions, err error) {
@ -115,44 +114,43 @@ func getOpts(ctx context.Context, r *http.Request, bucket, object string) (Objec
}
opts.PartNumber = partNumber
opts.VersionID = vid
delMarker := strings.TrimSpace(r.Header.Get(xhttp.MinIOSourceDeleteMarker))
if delMarker != "" {
switch delMarker {
case "true":
opts.DeleteMarker = true
case "false":
default:
err = fmt.Errorf("Unable to parse %s, failed with %w", xhttp.MinIOSourceDeleteMarker, fmt.Errorf("DeleteMarker should be true or false"))
logger.LogIf(ctx, err)
return opts, InvalidArgument{
Bucket: bucket,
Object: object,
Err: err,
}
}
}
replReadyCheck := strings.TrimSpace(r.Header.Get(xhttp.MinIOCheckDMReplicationReady))
if replReadyCheck != "" {
switch replReadyCheck {
case "true":
opts.CheckDMReplicationReady = true
case "false":
default:
err = fmt.Errorf("Unable to parse %s, failed with %w", xhttp.MinIOCheckDMReplicationReady, fmt.Errorf("should be true or false"))
logger.LogIf(ctx, err)
return opts, InvalidArgument{
Bucket: bucket,
Object: object,
Err: err,
}
delMarker, err := parseBoolHeader(bucket, object, r.Header, xhttp.MinIOSourceDeleteMarker)
if err != nil {
return opts, err
}
opts.DeleteMarker = delMarker
replReadyCheck, err := parseBoolHeader(bucket, object, r.Header, xhttp.MinIOCheckDMReplicationReady)
if err != nil {
return opts, err
}
opts.CheckDMReplicationReady = replReadyCheck
opts.Tagging = r.Header.Get(xhttp.AmzTagDirective) == accessDirective
opts.Versioned = globalBucketVersioningSys.PrefixEnabled(bucket, object)
opts.VersionSuspended = globalBucketVersioningSys.PrefixSuspended(bucket, object)
return opts, nil
}
func parseBoolHeader(bucket, object string, h http.Header, headerName string) (bool, error) {
value := strings.TrimSpace(h.Get(headerName))
if value != "" {
switch value {
case "true":
return true, nil
case "false":
default:
return false, InvalidArgument{
Bucket: bucket,
Object: object,
Err: fmt.Errorf("Unable to parse %s, value should be either 'true' or 'false'", headerName),
}
}
}
return false, nil
}
func delOpts(ctx context.Context, r *http.Request, bucket, object string) (opts ObjectOptions, err error) {
opts, err = getOpts(ctx, r, bucket, object)
if err != nil {
@ -180,22 +178,11 @@ func delOpts(ctx context.Context, r *http.Request, bucket, object string) (opts
opts.VersionID = nullVersionID
}
delMarker := strings.TrimSpace(r.Header.Get(xhttp.MinIOSourceDeleteMarker))
if delMarker != "" {
switch delMarker {
case "true":
opts.DeleteMarker = true
case "false":
default:
err = fmt.Errorf("Unable to parse %s, failed with %w", xhttp.MinIOSourceDeleteMarker, fmt.Errorf("DeleteMarker should be true or false"))
logger.LogIf(ctx, err)
return opts, InvalidArgument{
Bucket: bucket,
Object: object,
Err: err,
}
}
delMarker, err := parseBoolHeader(bucket, object, r.Header, xhttp.MinIOSourceDeleteMarker)
if err != nil {
return opts, err
}
opts.DeleteMarker = delMarker
mtime := strings.TrimSpace(r.Header.Get(xhttp.MinIOSourceMTime))
if mtime != "" {
@ -222,7 +209,6 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada
if vid != "" && vid != nullVersionID {
_, err := uuid.Parse(vid)
if err != nil {
logger.LogIf(ctx, err)
return opts, InvalidVersionID{
Bucket: bucket,
Object: object,
@ -233,7 +219,7 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada
return opts, InvalidArgument{
Bucket: bucket,
Object: object,
Err: fmt.Errorf("VersionID specified %s, but versioning not enabled on %s", opts.VersionID, bucket),
Err: fmt.Errorf("VersionID specified %s, but versioning not enabled on bucket=%s", opts.VersionID, bucket),
}
}
}

View File

@ -19,6 +19,7 @@ package cmd
import (
"context"
"fmt"
"net/http"
"regexp"
"strconv"
@ -29,7 +30,6 @@ import (
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
)
var etagRegex = regexp.MustCompile("\"*?([^\"]*?)\"*?$")
@ -356,17 +356,8 @@ func deleteObjectVersions(ctx context.Context, o ObjectLayer, bucket string, toD
PrefixEnabledFn: vc.PrefixEnabled,
VersionSuspended: vc.Suspended(),
})
var logged bool
for i, err := range errs {
if err != nil {
if !logged {
// log the first error
logger.LogIf(ctx, err)
logged = true
}
continue
}
dobj := deletedObjs[i]
for i, dobj := range deletedObjs {
oi := ObjectInfo{
Bucket: bucket,
Name: dobj.ObjectName,
@ -382,7 +373,7 @@ func deleteObjectVersions(ctx context.Context, o ObjectLayer, bucket string, toD
oi,
ILMExpiry, tags, traceFn)
sendEvent(eventArgs{
evArgs := eventArgs{
EventName: event.ObjectRemovedDelete,
BucketName: bucket,
Object: ObjectInfo{
@ -391,7 +382,13 @@ func deleteObjectVersions(ctx context.Context, o ObjectLayer, bucket string, toD
},
UserAgent: "Internal: [ILM-Expiry]",
Host: globalLocalNodeName,
})
}
if errs[i] != nil {
evArgs.RespElements = map[string]string{
"error": fmt.Sprintf("failed to delete %s(%s), with error %v", dobj.ObjectName, dobj.VersionID, errs[i]),
}
}
sendEvent(evArgs)
}
}
}

View File

@ -59,7 +59,6 @@ import (
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/s3select"
"github.com/minio/mux"
xnet "github.com/minio/pkg/v2/net"
"github.com/minio/pkg/v2/policy"
)
@ -547,9 +546,6 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients
logger.LogOnceIf(ctx, fmt.Errorf("Unable to write all the data to client: %w", err), "get-object-handler-write")
}
return
}
@ -558,9 +554,6 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients
logger.LogOnceIf(ctx, fmt.Errorf("Unable to write all the data to client: %w", err), "get-object-handler-close")
}
return
}
@ -780,8 +773,6 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrInvalidRange))
return
}
logger.LogIf(ctx, err, logger.Application)
}
}
@ -1067,7 +1058,6 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
var srcOpts, dstOpts ObjectOptions
srcOpts, err = copySrcOpts(ctx, r, srcBucket, srcObject)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
@ -1086,7 +1076,6 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
dstOpts, err = copyDstOpts(ctx, r, dstBucket, dstObject, nil)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
@ -1572,7 +1561,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
objInfo.ETag = origETag
enqueueTransitionImmediate(objInfo, lcEventSrc_s3CopyObject)
// Remove the transitioned object whose object version is being overwritten.
logger.LogIf(ctx, os.Sweep())
os.Sweep()
}
}
@ -1972,7 +1961,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
// Schedule object for immediate transition if eligible.
objInfo.ETag = origETag
enqueueTransitionImmediate(objInfo, lcEventSrc_s3PutObject)
logger.LogIf(ctx, os.Sweep())
os.Sweep()
}
}
@ -3144,7 +3133,6 @@ func (api objectAPIHandlers) PostRestoreObjectHandler(w http.ResponseWriter, r *
}, ObjectOptions{
VersionID: objInfo.VersionID,
}); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s: %s", objInfo.VersionID, err))
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidObjectState), r.URL)
return
}
@ -3224,7 +3212,7 @@ func (api objectAPIHandlers) PostRestoreObjectHandler(w http.ResponseWriter, r *
VersionID: objInfo.VersionID,
}
if err := objectAPI.RestoreTransitionedObject(rctx, bucket, object, opts); err != nil {
logger.LogIf(ctx, err)
logger.LogIf(ctx, fmt.Errorf("Unable to restore transitioned bucket/object %s/%s: %w", bucket, object, err))
return
}

View File

@ -356,7 +356,6 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
return true
}
if parseRangeErr != nil {
logger.LogIf(ctx, parseRangeErr)
writeCopyPartErr(ctx, w, parseRangeErr, r.URL)
// Range header mismatch is pre-condition like failure
// so return true to indicate Range precondition failed.
@ -1035,7 +1034,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
if !globalTierConfigMgr.Empty() {
// Schedule object for immediate transition if eligible.
enqueueTransitionImmediate(objInfo, lcEventSrc_s3CompleteMultipartUpload)
logger.LogIf(ctx, os.Sweep())
os.Sweep()
}
}

View File

@ -21,7 +21,6 @@ import (
"bytes"
"context"
"errors"
"fmt"
"io"
"net/http"
"sort"
@ -31,8 +30,6 @@ import (
"github.com/minio/minio/internal/crypto"
xhttp "github.com/minio/minio/internal/http"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
xnet "github.com/minio/pkg/v2/net"
"github.com/minio/pkg/v2/policy"
"github.com/minio/zipindex"
)
@ -223,9 +220,6 @@ func (api objectAPIHandlers) getObjectInArchiveFileHandler(ctx context.Context,
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients
logger.LogIf(ctx, fmt.Errorf("Unable to write all the data to client: %w", err))
}
return
}
@ -234,9 +228,6 @@ func (api objectAPIHandlers) getObjectInArchiveFileHandler(ctx context.Context,
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients
logger.LogIf(ctx, fmt.Errorf("Unable to write all the data to client: %w", err))
}
return
}
}

View File

@ -27,23 +27,23 @@ import (
)
// writeSTSErrorRespone writes error headers
func writeSTSErrorResponse(ctx context.Context, w http.ResponseWriter, errCode STSErrorCode, errCtxt error) {
err := stsErrCodes.ToSTSErr(errCode)
func writeSTSErrorResponse(ctx context.Context, w http.ResponseWriter, errCode STSErrorCode, err error) {
stsErr := stsErrCodes.ToSTSErr(errCode)
// Generate error response.
stsErrorResponse := STSErrorResponse{}
stsErrorResponse.Error.Code = err.Code
stsErrorResponse.Error.Code = stsErr.Code
stsErrorResponse.RequestID = w.Header().Get(xhttp.AmzRequestID)
stsErrorResponse.Error.Message = err.Description
if errCtxt != nil {
stsErrorResponse.Error.Message = errCtxt.Error()
stsErrorResponse.Error.Message = stsErr.Description
if err != nil {
stsErrorResponse.Error.Message = err.Error()
}
switch errCode {
case ErrSTSInternalError, ErrSTSNotInitialized, ErrSTSUpstreamError, ErrSTSIAMNotInitialized:
logger.LogIf(ctx, errCtxt, logger.Minio)
case ErrSTSInternalError, ErrSTSUpstreamError:
logger.LogIf(ctx, err, logger.Minio)
}
encodedErrorResponse := encodeResponse(stsErrorResponse)
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeXML)
writeResponse(w, stsErr.HTTPStatusCode, encodedErrorResponse, mimeXML)
}
// STSError structure