diff --git a/cmd/admin-bucket-handlers.go b/cmd/admin-bucket-handlers.go index 5f21d8d0d..f22221d01 100644 --- a/cmd/admin-bucket-handlers.go +++ b/cmd/admin-bucket-handlers.go @@ -39,7 +39,6 @@ import ( "github.com/minio/minio/internal/bucket/versioning" "github.com/minio/minio/internal/event" "github.com/minio/minio/internal/kms" - "github.com/minio/minio/internal/logger" "github.com/minio/mux" "github.com/minio/pkg/v2/policy" ) @@ -99,7 +98,7 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r * } // Call site replication hook. - logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta)) + replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta)) // Write success response. writeSuccessResponseHeadersOnly(w) @@ -431,7 +430,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r * case bucketNotificationConfig: config, err := globalBucketMetadataSys.GetNotificationConfig(bucket) if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) return } @@ -447,7 +446,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r * if errors.Is(err, BucketLifecycleNotFound{Bucket: bucket}) { continue } - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) return } diff --git a/cmd/admin-handlers-config-kv.go b/cmd/admin-handlers-config-kv.go index dd500d6a2..902d40c29 100644 --- a/cmd/admin-handlers-config-kv.go +++ b/cmd/admin-handlers-config-kv.go @@ -58,7 +58,7 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ password := cred.SecretKey kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength)) if err != nil { - logger.LogIf(ctx, err, logger.ErrorKind) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL) return } @@ -162,7 +162,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ password := cred.SecretKey kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength)) if err != nil { - logger.LogIf(ctx, err, logger.ErrorKind) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL) return } @@ -443,7 +443,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques password := cred.SecretKey kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength)) if err != nil { - logger.LogIf(ctx, err, logger.ErrorKind) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL) return } diff --git a/cmd/admin-handlers-idp-config.go b/cmd/admin-handlers-idp-config.go index c054b0d34..b8336f0ad 100644 --- a/cmd/admin-handlers-idp-config.go +++ b/cmd/admin-handlers-idp-config.go @@ -31,7 +31,6 @@ import ( "github.com/minio/minio/internal/config" cfgldap "github.com/minio/minio/internal/config/identity/ldap" "github.com/minio/minio/internal/config/identity/openid" - "github.com/minio/minio/internal/logger" "github.com/minio/mux" "github.com/minio/pkg/v2/ldap" "github.com/minio/pkg/v2/policy" @@ -60,7 +59,7 @@ func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.R password := cred.SecretKey reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength)) if err != nil { - logger.LogIf(ctx, err, logger.ErrorKind) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL) return } diff --git a/cmd/admin-handlers-idp-ldap.go b/cmd/admin-handlers-idp-ldap.go index 90e6ca38e..850d78524 100644 --- a/cmd/admin-handlers-idp-ldap.go +++ b/cmd/admin-handlers-idp-ldap.go @@ -27,7 +27,6 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/auth" - "github.com/minio/minio/internal/logger" "github.com/minio/mux" "github.com/minio/pkg/v2/policy" ) @@ -132,7 +131,7 @@ func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http. password := cred.SecretKey reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength)) if err != nil { - logger.LogIf(ctx, err, logger.ErrorKind) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL) return } @@ -301,7 +300,7 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R // Call hook for cluster-replication if the service account is not for a // root user. if newCred.ParentUser != globalActiveCred.AccessKey { - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemSvcAcc, SvcAccChange: &madmin.SRSvcAccChange{ Create: &madmin.SRSvcAccCreate{ diff --git a/cmd/admin-handlers-pools.go b/cmd/admin-handlers-pools.go index 26f48ee7f..9fc729d04 100644 --- a/cmd/admin-handlers-pools.go +++ b/cmd/admin-handlers-pools.go @@ -26,7 +26,6 @@ import ( "strconv" "strings" - "github.com/minio/minio/internal/logger" "github.com/minio/mux" "github.com/minio/pkg/v2/env" "github.com/minio/pkg/v2/policy" @@ -210,7 +209,7 @@ func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) { return } - logger.LogIf(r.Context(), json.NewEncoder(w).Encode(&status)) + adminLogIf(r.Context(), json.NewEncoder(w).Encode(&status)) } func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) { @@ -243,7 +242,7 @@ func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) { poolsStatus[idx] = status } - logger.LogIf(r.Context(), json.NewEncoder(w).Encode(poolsStatus)) + adminLogIf(r.Context(), json.NewEncoder(w).Encode(poolsStatus)) } func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request) { @@ -350,11 +349,11 @@ func (a adminAPIHandlers) RebalanceStatus(w http.ResponseWriter, r *http.Request writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceNotStarted), r.URL) return } - logger.LogIf(ctx, fmt.Errorf("failed to fetch rebalance status: %w", err)) + adminLogIf(ctx, fmt.Errorf("failed to fetch rebalance status: %w", err)) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } - logger.LogIf(r.Context(), json.NewEncoder(w).Encode(rs)) + adminLogIf(r.Context(), json.NewEncoder(w).Encode(rs)) } func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request) { @@ -374,7 +373,7 @@ func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request) // Cancel any ongoing rebalance operation globalNotificationSys.StopRebalance(r.Context()) writeSuccessResponseHeadersOnly(w) - logger.LogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt)) + adminLogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt)) } func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w http.ResponseWriter, r *http.Request) (proxy bool) { diff --git a/cmd/admin-handlers-site-replication.go b/cmd/admin-handlers-site-replication.go index 03dc0351f..b50a4d5f5 100644 --- a/cmd/admin-handlers-site-replication.go +++ b/cmd/admin-handlers-site-replication.go @@ -32,7 +32,6 @@ import ( "github.com/dustin/go-humanize" "github.com/minio/madmin-go/v3" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" "github.com/minio/mux" "github.com/minio/pkg/v2/policy" ) @@ -55,7 +54,7 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ opts := getSRAddOptions(r) status, err := globalSiteReplicationSys.AddPeerClusters(ctx, sites, opts) if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -93,7 +92,7 @@ func (a adminAPIHandlers) SRPeerJoin(w http.ResponseWriter, r *http.Request) { } if err := globalSiteReplicationSys.PeerJoinReq(ctx, joinArg); err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -140,7 +139,7 @@ func (a adminAPIHandlers) SRPeerBucketOps(w http.ResponseWriter, r *http.Request globalSiteReplicationSys.purgeDeletedBucket(ctx, objectAPI, bucket) } if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -192,7 +191,7 @@ func (a adminAPIHandlers) SRPeerReplicateIAMItem(w http.ResponseWriter, r *http. err = globalSiteReplicationSys.PeerGroupInfoChangeHandler(ctx, item.GroupInfo, item.UpdatedAt) } if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -263,7 +262,7 @@ func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *ht err = globalSiteReplicationSys.PeerBucketLCConfigHandler(ctx, item.Bucket, item.ExpiryLCConfig, item.UpdatedAt) } if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -316,7 +315,6 @@ func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptio if encryptionKey != "" { data, err = madmin.DecryptData(encryptionKey, bytes.NewReader(data)) if err != nil { - logger.LogIf(ctx, err) return SRError{ Cause: err, Code: ErrSiteReplicationInvalidRequest, @@ -396,7 +394,7 @@ func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Req opts := getSREditOptions(r) status, err := globalSiteReplicationSys.EditPeerCluster(ctx, site, opts) if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -433,7 +431,7 @@ func (a adminAPIHandlers) SRPeerEdit(w http.ResponseWriter, r *http.Request) { } if err := globalSiteReplicationSys.PeerEditReq(ctx, pi); err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -456,7 +454,7 @@ func (a adminAPIHandlers) SRStateEdit(w http.ResponseWriter, r *http.Request) { return } if err := globalSiteReplicationSys.PeerStateEditReq(ctx, state); err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -493,7 +491,7 @@ func (a adminAPIHandlers) SiteReplicationRemove(w http.ResponseWriter, r *http.R } status, err := globalSiteReplicationSys.RemovePeerCluster(ctx, objectAPI, rreq) if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -524,7 +522,7 @@ func (a adminAPIHandlers) SRPeerRemove(w http.ResponseWriter, r *http.Request) { } if err := globalSiteReplicationSys.InternalRemoveReq(ctx, objectAPI, req); err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -586,7 +584,7 @@ func (a adminAPIHandlers) SiteReplicationDevNull(w http.ResponseWriter, r *http. // If there is a disconnection before globalNetPerfMinDuration (we give a margin of error of 1 sec) // would mean the network is not stable. Logging here will help in debugging network issues. if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) } } if err != nil { @@ -609,5 +607,5 @@ func (a adminAPIHandlers) SiteReplicationNetPerf(w http.ResponseWriter, r *http. duration = globalNetPerfMinDuration } result := siteNetperf(r.Context(), duration) - logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result)) + adminLogIf(r.Context(), gob.NewEncoder(w).Encode(result)) } diff --git a/cmd/admin-handlers-users.go b/cmd/admin-handlers-users.go index b0b85ac3f..b3b22d5a8 100644 --- a/cmd/admin-handlers-users.go +++ b/cmd/admin-handlers-users.go @@ -35,7 +35,6 @@ import ( "github.com/minio/minio/internal/auth" "github.com/minio/minio/internal/cachevalue" "github.com/minio/minio/internal/config/dns" - "github.com/minio/minio/internal/logger" "github.com/minio/mux" "github.com/minio/pkg/v2/policy" "github.com/puzpuzpuz/xsync/v3" @@ -75,7 +74,7 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) { return } - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemIAMUser, IAMUser: &madmin.SRIAMUser{ AccessKey: accessKey, @@ -279,7 +278,7 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ return } - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemGroupInfo, GroupInfo: &madmin.SRGroupInfo{ UpdateReq: updReq, @@ -369,7 +368,7 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) return } - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemGroupInfo, GroupInfo: &madmin.SRGroupInfo{ UpdateReq: madmin.GroupAddRemove{ @@ -407,7 +406,7 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request) return } - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemIAMUser, IAMUser: &madmin.SRIAMUser{ AccessKey: accessKey, @@ -496,14 +495,14 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) { password := cred.SecretKey configBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength)) if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL) return } var ureq madmin.AddOrUpdateUserReq if err = json.Unmarshal(configBytes, &ureq); err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL) return } @@ -514,7 +513,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) { return } - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemIAMUser, IAMUser: &madmin.SRIAMUser{ AccessKey: accessKey, @@ -732,7 +731,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque // Call hook for cluster-replication if the service account is not for a // root user. if newCred.ParentUser != globalActiveCred.AccessKey { - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemSvcAcc, SvcAccChange: &madmin.SRSvcAccChange{ Create: &madmin.SRSvcAccCreate{ @@ -854,7 +853,7 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re // Call site replication hook - non-root user accounts are replicated. if svcAccount.ParentUser != globalActiveCred.AccessKey { - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemSvcAcc, SvcAccChange: &madmin.SRSvcAccChange{ Update: &madmin.SRSvcAccUpdate{ @@ -1116,7 +1115,7 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re // Call site replication hook - non-root user accounts are replicated. if svcAccount.ParentUser != "" && svcAccount.ParentUser != globalActiveCred.AccessKey { - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemSvcAcc, SvcAccChange: &madmin.SRSvcAccChange{ Delete: &madmin.SRSvcAccDelete{ @@ -1274,7 +1273,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ default: policies, err := globalIAMSys.PolicyDBGet(accountName, cred.Groups...) if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -1426,7 +1425,7 @@ func (a adminAPIHandlers) ListBucketPolicies(w http.ResponseWriter, r *http.Requ for name, p := range policies { _, err = json.Marshal(p) if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) continue } newPolicies[name] = p @@ -1456,7 +1455,7 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ for name, p := range policies { _, err = json.Marshal(p) if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) continue } newPolicies[name] = p @@ -1486,7 +1485,7 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ // Call cluster-replication policy creation hook to replicate policy deletion to // other minio clusters. - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemPolicy, Name: policyName, UpdatedAt: UTCNow(), @@ -1549,7 +1548,7 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request // Call cluster-replication policy creation hook to replicate policy to // other minio clusters. - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemPolicy, Name: policyName, Policy: iamPolicyBytes, @@ -1617,7 +1616,7 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http return } - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemPolicyMapping, PolicyMapping: &madmin.SRPolicyMapping{ UserOrGroup: entityName, @@ -1791,17 +1790,17 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) { sys: nil, }) if zerr != nil { - logger.LogIf(ctx, zerr) + adminLogIf(ctx, zerr) return nil } header.Method = zip.Deflate zwriter, zerr := zipWriter.CreateHeader(header) if zerr != nil { - logger.LogIf(ctx, zerr) + adminLogIf(ctx, zerr) return nil } if _, err := io.Copy(zwriter, r); err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) } return nil } @@ -1822,7 +1821,7 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) { case allPoliciesFile: allPolicies, err := globalIAMSys.ListPolicies(ctx, "") if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL) return } diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index bded0edcd..6b7a68029 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -130,7 +130,7 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R // Download Binary Once binC, bin, err := downloadBinary(u, mode) if err != nil { - logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err)) + adminLogIf(ctx, fmt.Errorf("server update failed with %w", err)) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -354,7 +354,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req // Download Binary Once binC, bin, err := downloadBinary(u, mode) if err != nil { - logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err)) + adminLogIf(ctx, fmt.Errorf("server update failed with %w", err)) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -368,7 +368,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req StatusCode: http.StatusInternalServerError, } logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err)) + adminLogIf(ctx, fmt.Errorf("server update failed with %w", err)) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -376,7 +376,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req err = verifyBinary(u, sha256Sum, releaseInfo, mode, bytes.NewReader(bin)) if err != nil { - logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err)) + adminLogIf(ctx, fmt.Errorf("server update failed with %w", err)) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -389,7 +389,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req StatusCode: http.StatusInternalServerError, } logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err)) + adminLogIf(ctx, fmt.Errorf("server update failed with %w", err)) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -397,7 +397,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req err = commitBinary() if err != nil { - logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err)) + adminLogIf(ctx, fmt.Errorf("server update failed with %w", err)) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -420,7 +420,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req for _, nerr := range globalNotificationSys.SignalService(serviceRestart) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + adminLogIf(ctx, nerr.Err) } } @@ -451,7 +451,7 @@ func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request) case madmin.ServiceActionUnfreeze: serviceSig = serviceUnFreeze default: - logger.LogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind) + adminLogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL) return } @@ -473,7 +473,7 @@ func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request) for _, nerr := range globalNotificationSys.SignalService(serviceSig) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + adminLogIf(ctx, nerr.Err) } } @@ -534,7 +534,7 @@ func (a adminAPIHandlers) ServiceV2Handler(w http.ResponseWriter, r *http.Reques case madmin.ServiceActionUnfreeze: serviceSig = serviceUnFreeze default: - logger.LogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind) + adminLogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL) return } @@ -1239,7 +1239,7 @@ func extractHealInitParams(vars map[string]string, qParams url.Values, r io.Read if hip.clientToken == "" { jerr := json.NewDecoder(r).Decode(&hip.hs) if jerr != nil { - logger.LogIf(GlobalContext, jerr, logger.ErrorKind) + adminLogIf(GlobalContext, jerr, logger.ErrorKind) err = ErrRequestBodyParse return } @@ -1433,7 +1433,7 @@ func getAggregatedBackgroundHealState(ctx context.Context, o ObjectLayer) (madmi var errCount int for _, nerr := range nerrs { if nerr.Err != nil { - logger.LogIf(ctx, nerr.Err) + adminLogIf(ctx, nerr.Err) errCount++ } } @@ -1561,7 +1561,7 @@ func (a adminAPIHandlers) ClientDevNull(w http.ResponseWriter, r *http.Request) if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { // would mean the network is not stable. Logging here will help in debugging network issues. if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) } } totalRx += n @@ -2800,7 +2800,7 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque w.Header().Get(xhttp.AmzRequestID), w.Header().Get(xhttp.AmzRequestHostID)) encodedErrorResponse := encodeResponse(errorResponse) healthInfo.Error = string(encodedErrorResponse) - logger.LogIf(ctx, enc.Encode(healthInfo)) + adminLogIf(ctx, enc.Encode(healthInfo)) } deadline := 10 * time.Second // Default deadline is 10secs for health diagnostics. @@ -3113,7 +3113,7 @@ func getClusterMetaInfo(ctx context.Context) []byte { case ci := <-resultCh: out, err := json.MarshalIndent(ci, "", " ") if err != nil { - logger.LogIf(ctx, err) + bugLogIf(ctx, err) return nil } return out @@ -3206,18 +3206,18 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ clusterKey, err := bytesToPublicKey(getSubnetAdminPublicKey()) if err != nil { - logger.LogIf(ctx, stream.AddError(err.Error())) + bugLogIf(ctx, stream.AddError(err.Error())) return } err = stream.AddKeyEncrypted(clusterKey) if err != nil { - logger.LogIf(ctx, stream.AddError(err.Error())) + bugLogIf(ctx, stream.AddError(err.Error())) return } if b := getClusterMetaInfo(ctx); len(b) > 0 { w, err := stream.AddEncryptedStream("cluster.info", nil) if err != nil { - logger.LogIf(ctx, err) + bugLogIf(ctx, err) return } w.Write(b) @@ -3226,12 +3226,12 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ // Add new key for inspect data. if err := stream.AddKeyEncrypted(publicKey); err != nil { - logger.LogIf(ctx, stream.AddError(err.Error())) + bugLogIf(ctx, stream.AddError(err.Error())) return } encStream, err := stream.AddEncryptedStream("inspect.zip", nil) if err != nil { - logger.LogIf(ctx, stream.AddError(err.Error())) + bugLogIf(ctx, stream.AddError(err.Error())) return } defer encStream.Close() @@ -3244,7 +3244,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ // MUST use crypto/rand n, err := crand.Read(key[:]) if err != nil || n != len(key) { - logger.LogIf(ctx, err) + bugLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -3258,7 +3258,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ stream, err := sio.AES_256_GCM.Stream(key[:]) if err != nil { - logger.LogIf(ctx, err) + bugLogIf(ctx, err) return } // Zero nonce, we only use each key once, and 32 bytes is plenty. @@ -3272,7 +3272,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ defer inspectZipW.Close() if b := getClusterMetaInfo(ctx); len(b) > 0 { - logger.LogIf(ctx, embedFileInZip(inspectZipW, "cluster.info", b, 0o600)) + adminLogIf(ctx, embedFileInZip(inspectZipW, "cluster.info", b, 0o600)) } } @@ -3300,23 +3300,23 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ sys: nil, }) if zerr != nil { - logger.LogIf(ctx, zerr) + bugLogIf(ctx, zerr) return nil } header.Method = zip.Deflate zwriter, zerr := inspectZipW.CreateHeader(header) if zerr != nil { - logger.LogIf(ctx, zerr) + bugLogIf(ctx, zerr) return nil } if _, err := io.Copy(zwriter, r); err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) } return nil } err := o.GetRawData(ctx, volume, file, rawDataFn) if !errors.Is(err, errFileNotFound) { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) } // save the format.json as part of inspect by default @@ -3324,7 +3324,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn) } if !errors.Is(err, errFileNotFound) { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) } // save args passed to inspect command @@ -3336,7 +3336,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ sb.WriteString(pool.CmdLine) } sb.WriteString("\n") - logger.LogIf(ctx, embedFileInZip(inspectZipW, "inspect-input.txt", sb.Bytes(), 0o600)) + adminLogIf(ctx, embedFileInZip(inspectZipW, "inspect-input.txt", sb.Bytes(), 0o600)) scheme := "https" if !globalIsTLS { @@ -3370,7 +3370,7 @@ function main() { } main "$@"`, scheme) - logger.LogIf(ctx, embedFileInZip(inspectZipW, "start-minio.sh", scrb.Bytes(), 0o755)) + adminLogIf(ctx, embedFileInZip(inspectZipW, "start-minio.sh", scrb.Bytes(), 0o755)) } func getSubnetAdminPublicKey() []byte { diff --git a/cmd/admin-heal-ops.go b/cmd/admin-heal-ops.go index 04cdac8f0..40091553f 100644 --- a/cmd/admin-heal-ops.go +++ b/cmd/admin-heal-ops.go @@ -347,7 +347,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay StartTime: h.startTime, }) if err != nil { - logger.LogIf(h.ctx, err) + bugLogIf(h.ctx, err) return nil, toAdminAPIErr(h.ctx, err), "" } return b, noError, "" @@ -394,7 +394,7 @@ func (ahs *allHealState) PopHealStatusJSON(hpath string, if err != nil { h.currentStatus.Items = nil - logger.LogIf(h.ctx, err) + bugLogIf(h.ctx, err) return nil, ErrInternalError } diff --git a/cmd/admin-server-info.go b/cmd/admin-server-info.go index 7e57cb652..33655bfa0 100644 --- a/cmd/admin-server-info.go +++ b/cmd/admin-server-info.go @@ -31,7 +31,6 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/config" "github.com/minio/minio/internal/kms" - "github.com/minio/minio/internal/logger" ) // getLocalServerProperty - returns madmin.ServerProperties for only the @@ -67,7 +66,7 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req } else { network[nodeName] = string(madmin.ItemOffline) // log once the error - logger.LogOnceIf(context.Background(), err, nodeName) + peersLogOnceIf(context.Background(), err, nodeName) } } } diff --git a/cmd/api-errors.go b/cmd/api-errors.go index 6d8b1e817..fe164c4f1 100644 --- a/cmd/api-errors.go +++ b/cmd/api-errors.go @@ -2519,7 +2519,7 @@ func toAPIError(ctx context.Context, err error) APIError { // Make sure to log the errors which we cannot translate // to a meaningful S3 API errors. This is added to aid in // debugging unexpected/unhandled errors. - logger.LogIf(ctx, err) + internalLogIf(ctx, err) } return apiErr diff --git a/cmd/api-headers.go b/cmd/api-headers.go index 3936227a3..ab6a229d5 100644 --- a/cmd/api-headers.go +++ b/cmd/api-headers.go @@ -30,7 +30,6 @@ import ( "github.com/minio/minio-go/v7/pkg/tags" "github.com/minio/minio/internal/crypto" xhttp "github.com/minio/minio/internal/http" - "github.com/minio/minio/internal/logger" xxml "github.com/minio/xxml" ) @@ -68,7 +67,7 @@ func encodeResponse(response interface{}) []byte { var buf bytes.Buffer buf.WriteString(xml.Header) if err := xml.NewEncoder(&buf).Encode(response); err != nil { - logger.LogIf(GlobalContext, err) + bugLogIf(GlobalContext, err) return nil } return buf.Bytes() @@ -86,7 +85,7 @@ func encodeResponseList(response interface{}) []byte { var buf bytes.Buffer buf.WriteString(xxml.Header) if err := xxml.NewEncoder(&buf).Encode(response); err != nil { - logger.LogIf(GlobalContext, err) + bugLogIf(GlobalContext, err) return nil } return buf.Bytes() diff --git a/cmd/api-response.go b/cmd/api-response.go index fb2ce4f91..83b32183c 100644 --- a/cmd/api-response.go +++ b/cmd/api-response.go @@ -891,7 +891,7 @@ func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType } // Similar check to http.checkWriteHeaderCode if statusCode < 100 || statusCode > 999 { - logger.LogIf(context.Background(), fmt.Errorf("invalid WriteHeader code %v", statusCode)) + bugLogIf(context.Background(), fmt.Errorf("invalid WriteHeader code %v", statusCode)) statusCode = http.StatusInternalServerError } setCommonHeaders(w) @@ -961,7 +961,7 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError // Similar check to http.checkWriteHeaderCode if err.HTTPStatusCode < 100 || err.HTTPStatusCode > 999 { - logger.LogIf(ctx, fmt.Errorf("invalid WriteHeader code %v from %v", err.HTTPStatusCode, err.Code)) + bugLogIf(ctx, fmt.Errorf("invalid WriteHeader code %v from %v", err.HTTPStatusCode, err.Code)) err.HTTPStatusCode = http.StatusInternalServerError } diff --git a/cmd/auth-handler.go b/cmd/auth-handler.go index 752fdd530..935e6ff9a 100644 --- a/cmd/auth-handler.go +++ b/cmd/auth-handler.go @@ -126,7 +126,7 @@ func getRequestAuthType(r *http.Request) (at authType) { var err error r.Form, err = url.ParseQuery(r.URL.RawQuery) if err != nil { - logger.LogIf(r.Context(), err) + authNLogIf(r.Context(), err) return authTypeUnknown } } @@ -257,7 +257,7 @@ func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{}, if err != nil { // Base64 decoding fails, we should log to indicate // something is malforming the request sent by client. - logger.LogIf(GlobalContext, err, logger.ErrorKind) + authNLogIf(GlobalContext, err, logger.ErrorKind) return nil, errAuthentication } claims.MapClaims[sessionPolicyNameExtracted] = string(spBytes) @@ -353,7 +353,7 @@ func checkRequestAuthTypeWithVID(ctx context.Context, r *http.Request, action po func authenticateRequest(ctx context.Context, r *http.Request, action policy.Action) (s3Err APIErrorCode) { if logger.GetReqInfo(ctx) == nil { - logger.LogIf(ctx, errors.New("unexpected context.Context does not have a logger.ReqInfo"), logger.ErrorKind) + bugLogIf(ctx, errors.New("unexpected context.Context does not have a logger.ReqInfo"), logger.ErrorKind) return ErrAccessDenied } @@ -392,7 +392,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act // To extract region from XML in request body, get copy of request body. payload, err := io.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize)) if err != nil { - logger.LogIf(ctx, err, logger.ErrorKind) + authZLogIf(ctx, err, logger.ErrorKind) return ErrMalformedXML } diff --git a/cmd/background-heal-ops.go b/cmd/background-heal-ops.go index 7085a389c..15aab7762 100644 --- a/cmd/background-heal-ops.go +++ b/cmd/background-heal-ops.go @@ -25,7 +25,6 @@ import ( "time" "github.com/minio/madmin-go/v3" - "github.com/minio/minio/internal/logger" "github.com/minio/pkg/v2/env" ) @@ -158,7 +157,7 @@ func newHealRoutine() *healRoutine { if envHealWorkers := env.Get("_MINIO_HEAL_WORKERS", ""); envHealWorkers != "" { if numHealers, err := strconv.Atoi(envHealWorkers); err != nil { - logger.LogIf(context.Background(), fmt.Errorf("invalid _MINIO_HEAL_WORKERS value: %w", err)) + bugLogIf(context.Background(), fmt.Errorf("invalid _MINIO_HEAL_WORKERS value: %w", err)) } else { workers = numHealers } diff --git a/cmd/background-newdisks-heal-ops.go b/cmd/background-newdisks-heal-ops.go index 4c96bc7f1..acea42454 100644 --- a/cmd/background-newdisks-heal-ops.go +++ b/cmd/background-newdisks-heal-ops.go @@ -33,7 +33,6 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/minio-go/v7/pkg/set" "github.com/minio/minio/internal/config" - "github.com/minio/minio/internal/logger" "github.com/minio/pkg/v2/env" ) @@ -409,11 +408,11 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint if errors.Is(err, errFileNotFound) { return nil } - logger.LogIf(ctx, fmt.Errorf("Unable to load healing tracker on '%s': %w, re-initializing..", disk, err)) + healingLogIf(ctx, fmt.Errorf("Unable to load healing tracker on '%s': %w, re-initializing..", disk, err)) tracker = initHealingTracker(disk, mustGetUUID()) } - logger.Event(ctx, "Healing drive '%s' - 'mc admin heal alias/ --verbose' to check the current status.", endpoint) + healingLogEvent(ctx, "Healing drive '%s' - 'mc admin heal alias/ --verbose' to check the current status.", endpoint) buckets, _ := z.ListBuckets(ctx, BucketOptions{}) // Buckets data are dispersed in multiple pools/sets, make @@ -452,7 +451,7 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint return err } - logger.Event(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed) + healingLogEvent(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed) if len(tracker.QueuedBuckets) > 0 { return fmt.Errorf("not all buckets were healed: %v", tracker.QueuedBuckets) @@ -464,7 +463,7 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint } if tracker.HealID == "" { // HealID was empty only before Feb 2023 - logger.LogIf(ctx, tracker.delete(ctx)) + bugLogIf(ctx, tracker.delete(ctx)) return nil } @@ -482,7 +481,7 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint t, err := loadHealingTracker(ctx, disk) if err != nil { if !errors.Is(err, errFileNotFound) { - logger.LogIf(ctx, err) + healingLogIf(ctx, err) } continue } @@ -517,7 +516,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools) { // Reformat disks immediately _, err := z.HealFormat(context.Background(), false) if err != nil && !errors.Is(err, errNoHealRequired) { - logger.LogIf(ctx, err) + healingLogIf(ctx, err) // Reset for next interval. diskCheckTimer.Reset(defaultMonitorNewDiskInterval) continue diff --git a/cmd/batch-expire.go b/cmd/batch-expire.go index dd171ed13..5128f5da4 100644 --- a/cmd/batch-expire.go +++ b/cmd/batch-expire.go @@ -33,7 +33,6 @@ import ( "github.com/minio/minio/internal/bucket/versioning" xhttp "github.com/minio/minio/internal/http" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" "github.com/minio/pkg/v2/env" "github.com/minio/pkg/v2/wildcard" "github.com/minio/pkg/v2/workers" @@ -156,7 +155,7 @@ func (ef BatchJobExpireFilter) Matches(obj ObjectInfo, now time.Time) bool { } default: // we should never come here, Validate should have caught this. - logger.LogOnceIf(context.Background(), fmt.Errorf("invalid filter type: %s", ef.Type), ef.Type) + batchLogOnceIf(context.Background(), fmt.Errorf("invalid filter type: %s", ef.Type), ef.Type) return false } @@ -433,7 +432,7 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo }) if err != nil { stopFn(exp, err) - logger.LogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", toExpire[i].Bucket, toExpire[i].Name, toExpire[i].VersionID, err, attempts)) + batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", toExpire[i].Bucket, toExpire[i].Name, toExpire[i].VersionID, err, attempts)) } else { stopFn(exp, err) success = true @@ -471,7 +470,7 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo for i, err := range errs { if err != nil { stopFn(toDelCopy[i], err) - logger.LogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID, err, attempts)) + batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID, err, attempts)) failed++ if attempts == retryAttempts { // all retry attempts failed, record failure if oi, ok := oiCache.Get(toDelCopy[i]); ok { @@ -557,16 +556,16 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo select { case <-saveTicker.C: // persist in-memory state to disk after every 10secs. - logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) + batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) case <-ctx.Done(): // persist in-memory state immediately before exiting due to context cancellation. - logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job)) + batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job)) return case <-saverQuitCh: // persist in-memory state immediately to disk. - logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job)) + batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job)) return } } @@ -670,7 +669,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo // Notify expire jobs final status to the configured endpoint buf, _ := json.Marshal(ri) if err := r.Notify(context.Background(), bytes.NewReader(buf)); err != nil { - logger.LogIf(context.Background(), fmt.Errorf("unable to notify %v", err)) + batchLogIf(context.Background(), fmt.Errorf("unable to notify %v", err)) } return nil diff --git a/cmd/batch-handlers.go b/cmd/batch-handlers.go index 25d6b6faf..db9eb1b56 100644 --- a/cmd/batch-handlers.go +++ b/cmd/batch-handlers.go @@ -48,7 +48,6 @@ import ( xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" "github.com/minio/pkg/v2/console" "github.com/minio/pkg/v2/env" "github.com/minio/pkg/v2/policy" @@ -206,7 +205,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a if aerr == nil { return } - logger.LogIf(ctx, + batchLogIf(ctx, fmt.Errorf("trying %s: Unable to cleanup failed multipart replication %s on remote %s/%s: %w - this may consume space on remote cluster", humanize.Ordinal(attempts), res.UploadID, tgtBucket, tgtObject, aerr)) attempts++ @@ -402,7 +401,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay } else { if !isErrMethodNotAllowed(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) && !isErrObjectNotFound(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) { - logger.LogIf(ctx, err) + batchLogIf(ctx, err) } continue } @@ -414,7 +413,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay } else { if !isErrMethodNotAllowed(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) && !isErrObjectNotFound(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) { - logger.LogIf(ctx, err) + batchLogIf(ctx, err) } continue } @@ -443,7 +442,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay return } stopFn(oi, err) - logger.LogIf(ctx, err) + batchLogIf(ctx, err) success = false } else { stopFn(oi, nil) @@ -451,7 +450,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay ri.trackCurrentBucketObject(r.Target.Bucket, oi, success) globalBatchJobsMetrics.save(job.ID, ri) // persist in-memory state to disk after every 10secs. - logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) + batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) if wait := globalBatchConfig.ReplicationWait(); wait > 0 { time.Sleep(wait) @@ -466,10 +465,10 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay globalBatchJobsMetrics.save(job.ID, ri) // persist in-memory state to disk. - logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job)) + batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job)) if err := r.Notify(ctx, ri); err != nil { - logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err)) + batchLogIf(ctx, fmt.Errorf("unable to notify %v", err)) } cancel() @@ -553,7 +552,7 @@ func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLa VersionID: entry.VersionID, }) if err != nil { - logger.LogIf(ctx, err) + batchLogIf(ctx, err) continue } @@ -572,7 +571,7 @@ func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLa opts, err := batchReplicationOpts(ctx, "", gr.ObjInfo) if err != nil { - logger.LogIf(ctx, err) + batchLogIf(ctx, err) continue } @@ -1072,7 +1071,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba BucketLookup: lookupStyle(r.Target.Path), }) if err != nil { - logger.LogIf(ctx, err) + batchLogIf(ctx, err) return } @@ -1083,7 +1082,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba writeFn := func(batch []ObjectInfo) { if len(batch) > 0 { if err := r.writeAsArchive(ctx, api, cl, batch); err != nil { - logger.LogIf(ctx, err) + batchLogIf(ctx, err) for _, b := range batch { slowCh <- b } @@ -1091,7 +1090,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba ri.trackCurrentBucketBatch(r.Source.Bucket, batch) globalBatchJobsMetrics.save(job.ID, ri) // persist in-memory state to disk after every 10secs. - logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) + batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) } } } @@ -1179,7 +1178,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba return } stopFn(result, err) - logger.LogIf(ctx, err) + batchLogIf(ctx, err) success = false } else { stopFn(result, nil) @@ -1187,7 +1186,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba ri.trackCurrentBucketObject(r.Source.Bucket, result, success) globalBatchJobsMetrics.save(job.ID, ri) // persist in-memory state to disk after every 10secs. - logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) + batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) if wait := globalBatchConfig.ReplicationWait(); wait > 0 { time.Sleep(wait) @@ -1202,10 +1201,10 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba globalBatchJobsMetrics.save(job.ID, ri) // persist in-memory state to disk. - logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job)) + batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job)) if err := r.Notify(ctx, ri); err != nil { - logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err)) + batchLogIf(ctx, fmt.Errorf("unable to notify %v", err)) } cancel() @@ -1500,7 +1499,7 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request) req := &BatchJobRequest{} if err := req.load(ctx, objectAPI, result.Name); err != nil { if !errors.Is(err, errNoSuchJob) { - logger.LogIf(ctx, err) + batchLogIf(ctx, err) } continue } @@ -1516,7 +1515,7 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request) } } - logger.LogIf(ctx, json.NewEncoder(w).Encode(&listResult)) + batchLogIf(ctx, json.NewEncoder(w).Encode(&listResult)) } var errNoSuchJob = errors.New("no such job") @@ -1539,7 +1538,7 @@ func (a adminAPIHandlers) DescribeBatchJob(w http.ResponseWriter, r *http.Reques req := &BatchJobRequest{} if err := req.load(ctx, objectAPI, pathJoin(batchJobPrefix, jobID)); err != nil { if !errors.Is(err, errNoSuchJob) { - logger.LogIf(ctx, err) + batchLogIf(ctx, err) } writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) @@ -1548,7 +1547,7 @@ func (a adminAPIHandlers) DescribeBatchJob(w http.ResponseWriter, r *http.Reques buf, err := yaml.Marshal(req) if err != nil { - logger.LogIf(ctx, err) + batchLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) return } @@ -1707,7 +1706,7 @@ func (j *BatchJobPool) resume() { ctx, cancel := context.WithCancel(j.ctx) defer cancel() if err := j.objLayer.Walk(ctx, minioMetaBucket, batchJobPrefix, results, WalkOptions{}); err != nil { - logger.LogIf(j.ctx, err) + batchLogIf(j.ctx, err) return } for result := range results { @@ -1717,7 +1716,7 @@ func (j *BatchJobPool) resume() { } req := &BatchJobRequest{} if err := req.load(ctx, j.objLayer, result.Name); err != nil { - logger.LogIf(ctx, err) + batchLogIf(ctx, err) continue } _, nodeIdx := parseRequestToken(req.ID) @@ -1726,7 +1725,7 @@ func (j *BatchJobPool) resume() { continue } if err := j.queueJob(req); err != nil { - logger.LogIf(ctx, err) + batchLogIf(ctx, err) continue } } @@ -1750,7 +1749,7 @@ func (j *BatchJobPool) AddWorker() { if job.Replicate.RemoteToLocal() { if err := job.Replicate.StartFromSource(job.ctx, j.objLayer, *job); err != nil { if !isErrBucketNotFound(err) { - logger.LogIf(j.ctx, err) + batchLogIf(j.ctx, err) j.canceler(job.ID, false) continue } @@ -1759,7 +1758,7 @@ func (j *BatchJobPool) AddWorker() { } else { if err := job.Replicate.Start(job.ctx, j.objLayer, *job); err != nil { if !isErrBucketNotFound(err) { - logger.LogIf(j.ctx, err) + batchLogIf(j.ctx, err) j.canceler(job.ID, false) continue } @@ -1769,14 +1768,14 @@ func (j *BatchJobPool) AddWorker() { case job.KeyRotate != nil: if err := job.KeyRotate.Start(job.ctx, j.objLayer, *job); err != nil { if !isErrBucketNotFound(err) { - logger.LogIf(j.ctx, err) + batchLogIf(j.ctx, err) continue } } case job.Expire != nil: if err := job.Expire.Start(job.ctx, j.objLayer, *job); err != nil { if !isErrBucketNotFound(err) { - logger.LogIf(j.ctx, err) + batchLogIf(j.ctx, err) continue } } diff --git a/cmd/batch-rotate.go b/cmd/batch-rotate.go index b50bfc236..b8d6dc2cc 100644 --- a/cmd/batch-rotate.go +++ b/cmd/batch-rotate.go @@ -33,7 +33,6 @@ import ( "github.com/minio/minio/internal/crypto" xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/kms" - "github.com/minio/minio/internal/logger" "github.com/minio/pkg/v2/env" "github.com/minio/pkg/v2/workers" ) @@ -383,7 +382,7 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba success := true if err := r.KeyRotate(ctx, api, result); err != nil { stopFn(result, err) - logger.LogIf(ctx, err) + batchLogIf(ctx, err) success = false } else { stopFn(result, nil) @@ -392,7 +391,7 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba ri.RetryAttempts = attempts globalBatchJobsMetrics.save(job.ID, ri) // persist in-memory state to disk after every 10secs. - logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) + batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) if success { break } @@ -412,10 +411,10 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba ri.Failed = ri.ObjectsFailed > 0 globalBatchJobsMetrics.save(job.ID, ri) // persist in-memory state to disk. - logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job)) + batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job)) if err := r.Notify(ctx, ri); err != nil { - logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err)) + batchLogIf(ctx, fmt.Errorf("unable to notify %v", err)) } cancel() diff --git a/cmd/bootstrap-peer-server.go b/cmd/bootstrap-peer-server.go index 52dc77298..14a5baa2c 100644 --- a/cmd/bootstrap-peer-server.go +++ b/cmd/bootstrap-peer-server.go @@ -198,7 +198,7 @@ func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointS if err != nil { bootstrapTraceMsg(fmt.Sprintf("clnt.Verify: %v, endpoint: %s", err, clnt)) if !isNetworkError(err) { - logger.LogOnceIf(context.Background(), fmt.Errorf("%s has incorrect configuration: %w", clnt, err), "incorrect_"+clnt.String()) + bootLogOnceIf(context.Background(), fmt.Errorf("%s has incorrect configuration: %w", clnt, err), "incorrect_"+clnt.String()) incorrectConfigs = append(incorrectConfigs, fmt.Errorf("%s has incorrect configuration: %w", clnt, err)) } else { offlineEndpoints = append(offlineEndpoints, fmt.Errorf("%s is unreachable: %w", clnt, err)) diff --git a/cmd/bucket-encryption-handlers.go b/cmd/bucket-encryption-handlers.go index 7b5c0cad8..018aee6c9 100644 --- a/cmd/bucket-encryption-handlers.go +++ b/cmd/bucket-encryption-handlers.go @@ -114,7 +114,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r // We encode the xml bytes as base64 to ensure there are no encoding // errors. cfgStr := base64.StdEncoding.EncodeToString(configData) - logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ + replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ Type: madmin.SRBucketMetaTypeSSEConfig, Bucket: bucket, SSEConfig: &cfgStr, @@ -203,7 +203,7 @@ func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter } // Call site replication hook. - logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ + replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ Type: madmin.SRBucketMetaTypeSSEConfig, Bucket: bucket, SSEConfig: nil, diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index 13c53511a..2edca90b9 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -98,7 +98,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) { // Get buckets in the DNS dnsBuckets, err := globalDNSConfig.List() if err != nil && !IsErrIgnored(err, dns.ErrNoEntriesFound, dns.ErrNotImplemented, dns.ErrDomainMissing) { - logger.LogIf(GlobalContext, err) + dnsLogIf(GlobalContext, err) return } @@ -160,13 +160,13 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) { ctx := GlobalContext for _, err := range g.Wait() { if err != nil { - logger.LogIf(ctx, err) + dnsLogIf(ctx, err) return } } for _, bucket := range bucketsInConflict.ToSlice() { - logger.LogIf(ctx, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket", bucket, globalDomainIPs.ToSlice())) + dnsLogIf(ctx, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket", bucket, globalDomainIPs.ToSlice())) } var wg sync.WaitGroup @@ -187,7 +187,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) { // We go to here, so we know the bucket no longer exists, // but is registered in DNS to this server if err := globalDNSConfig.Delete(bucket); err != nil { - logger.LogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w", + dnsLogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w", bucket, err)) } }(bucket) @@ -790,7 +790,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req // check if client is attempting to create more buckets, complain about it. if currBuckets := globalBucketMetadataSys.Count(); currBuckets+1 > maxBuckets { - logger.LogIf(ctx, fmt.Errorf("Please avoid creating more buckets %d beyond recommended %d", currBuckets+1, maxBuckets)) + internalLogIf(ctx, fmt.Errorf("Please avoid creating more buckets %d beyond recommended %d", currBuckets+1, maxBuckets), logger.WarningKind) } opts := MakeBucketOptions{ @@ -871,7 +871,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket) // Call site replication hook - logger.LogIf(ctx, globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts)) + replLogIf(ctx, globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts)) // Make sure to add Location information here only for bucket w.Header().Set(xhttp.Location, pathJoin(SlashSeparator, bucket)) @@ -1693,7 +1693,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http. if globalDNSConfig != nil { if err := globalDNSConfig.Delete(bucket); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually, bucket on MinIO no longer exists", err)) + dnsLogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually, bucket on MinIO no longer exists", err)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } @@ -1703,7 +1703,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http. globalReplicationPool.deleteResyncMetadata(ctx, bucket) // Call site replication hook. - logger.LogIf(ctx, globalSiteReplicationSys.DeleteBucketHook(ctx, bucket, forceDelete)) + replLogIf(ctx, globalSiteReplicationSys.DeleteBucketHook(ctx, bucket, forceDelete)) // Write success response. writeSuccessNoContent(w) @@ -1776,7 +1776,7 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri // We encode the xml bytes as base64 to ensure there are no encoding // errors. cfgStr := base64.StdEncoding.EncodeToString(configData) - logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ + replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ Type: madmin.SRBucketMetaTypeObjectLockConfig, Bucket: bucket, ObjectLockConfig: &cfgStr, @@ -1880,7 +1880,7 @@ func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *h // We encode the xml bytes as base64 to ensure there are no encoding // errors. cfgStr := base64.StdEncoding.EncodeToString(configData) - logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ + replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ Type: madmin.SRBucketMetaTypeTags, Bucket: bucket, Tags: &cfgStr, @@ -1956,7 +1956,7 @@ func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r return } - logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ + replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ Type: madmin.SRBucketMetaTypeTags, Bucket: bucket, UpdatedAt: updatedAt, diff --git a/cmd/bucket-lifecycle.go b/cmd/bucket-lifecycle.go index f6c74fd17..d5b942e87 100644 --- a/cmd/bucket-lifecycle.go +++ b/cmd/bucket-lifecycle.go @@ -336,7 +336,7 @@ func (es *expiryState) Worker(input <-chan expiryOp) { case newerNoncurrentTask: deleteObjectVersions(es.ctx, es.objAPI, v.bucket, v.versions, v.event) case jentry: - logger.LogIf(es.ctx, deleteObjectFromRemoteTier(es.ctx, v.ObjName, v.VersionID, v.TierName)) + transitionLogIf(es.ctx, deleteObjectFromRemoteTier(es.ctx, v.ObjName, v.VersionID, v.TierName)) case freeVersionTask: oi := v.ObjectInfo traceFn := globalLifecycleSys.trace(oi) @@ -355,7 +355,7 @@ func (es *expiryState) Worker(input <-chan expiryOp) { // Remove the remote object err := deleteObjectFromRemoteTier(es.ctx, oi.TransitionedObject.Name, oi.TransitionedObject.VersionID, oi.TransitionedObject.Tier) if ignoreNotFoundErr(err) != nil { - logger.LogIf(es.ctx, err) + transitionLogIf(es.ctx, err) return } @@ -368,10 +368,10 @@ func (es *expiryState) Worker(input <-chan expiryOp) { auditLogLifecycle(es.ctx, oi, ILMFreeVersionDelete, nil, traceFn) } if ignoreNotFoundErr(err) != nil { - logger.LogIf(es.ctx, err) + transitionLogIf(es.ctx, err) } default: - logger.LogIf(es.ctx, fmt.Errorf("Invalid work type - %v", v)) + bugLogIf(es.ctx, fmt.Errorf("Invalid work type - %v", v)) } } } @@ -486,7 +486,7 @@ func (t *transitionState) worker(objectAPI ObjectLayer) { if err := transitionObject(t.ctx, objectAPI, task.objInfo, newLifecycleAuditEvent(task.src, task.event)); err != nil { if !isErrVersionNotFound(err) && !isErrObjectNotFound(err) && !xnet.IsNetworkOrHostDown(err, false) { if !strings.Contains(err.Error(), "use of closed network connection") { - logger.LogIf(t.ctx, fmt.Errorf("Transition to %s failed for %s/%s version:%s with %w", + transitionLogIf(t.ctx, fmt.Errorf("Transition to %s failed for %s/%s version:%s with %w", task.event.StorageClass, task.objInfo.Bucket, task.objInfo.Name, task.objInfo.VersionID, err)) } } @@ -614,7 +614,7 @@ func expireTransitionedObject(ctx context.Context, objectAPI ObjectLayer, oi *Ob // remote object opts.SkipFreeVersion = true } else { - logger.LogIf(ctx, err) + transitionLogIf(ctx, err) } // Now, delete object from hot-tier namespace @@ -879,7 +879,7 @@ func postRestoreOpts(ctx context.Context, r *http.Request, bucket, object string if vid != "" && vid != nullVersionID { _, err := uuid.Parse(vid) if err != nil { - logger.LogIf(ctx, err) + s3LogIf(ctx, err) return opts, InvalidVersionID{ Bucket: bucket, Object: object, diff --git a/cmd/bucket-metadata-sys.go b/cmd/bucket-metadata-sys.go index 865171773..d6c9a9786 100644 --- a/cmd/bucket-metadata-sys.go +++ b/cmd/bucket-metadata-sys.go @@ -500,7 +500,7 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck errs := g.Wait() for _, err := range errs { if err != nil { - logger.LogIf(ctx, err) + internalLogIf(ctx, err, logger.WarningKind) } } @@ -542,7 +542,7 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa case <-t.C: buckets, err := sys.objAPI.ListBuckets(ctx, BucketOptions{}) if err != nil { - logger.LogIf(ctx, err) + internalLogIf(ctx, err, logger.WarningKind) break } @@ -560,7 +560,7 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa meta, err := loadBucketMetadata(ctx, sys.objAPI, buckets[i].Name) if err != nil { - logger.LogIf(ctx, err) + internalLogIf(ctx, err, logger.WarningKind) wait() // wait to proceed to next entry. continue } diff --git a/cmd/bucket-metadata.go b/cmd/bucket-metadata.go index 848962cc6..591669022 100644 --- a/cmd/bucket-metadata.go +++ b/cmd/bucket-metadata.go @@ -145,7 +145,7 @@ func (b *BucketMetadata) SetCreatedAt(createdAt time.Time) { // If an error is returned the returned metadata will be default initialized. func readBucketMetadata(ctx context.Context, api ObjectLayer, name string) (BucketMetadata, error) { if name == "" { - logger.LogIf(ctx, errors.New("bucket name cannot be empty")) + internalLogIf(ctx, errors.New("bucket name cannot be empty"), logger.WarningKind) return BucketMetadata{}, errInvalidArgument } b := newBucketMetadata(name) @@ -400,7 +400,7 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj for legacyFile := range configs { configFile := path.Join(bucketMetaPrefix, b.Name, legacyFile) if err := deleteConfig(ctx, objectAPI, configFile); err != nil && !errors.Is(err, errConfigNotFound) { - logger.LogIf(ctx, err) + internalLogIf(ctx, err, logger.WarningKind) } } diff --git a/cmd/bucket-object-lock.go b/cmd/bucket-object-lock.go index 842458fea..a4a042cf0 100644 --- a/cmd/bucket-object-lock.go +++ b/cmd/bucket-object-lock.go @@ -66,7 +66,7 @@ func enforceRetentionForDeletion(ctx context.Context, objInfo ObjectInfo) (locke if ret.Mode.Valid() && (ret.Mode == objectlock.RetCompliance || ret.Mode == objectlock.RetGovernance) { t, err := objectlock.UTCNowNTP() if err != nil { - logger.LogIf(ctx, err) + internalLogIf(ctx, err, logger.WarningKind) return true } if ret.RetainUntilDate.After(t) { @@ -114,7 +114,7 @@ func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucke // duration of the retention period. t, err := objectlock.UTCNowNTP() if err != nil { - logger.LogIf(ctx, err) + internalLogIf(ctx, err, logger.WarningKind) return ObjectLocked{} } @@ -140,7 +140,7 @@ func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucke if !byPassSet { t, err := objectlock.UTCNowNTP() if err != nil { - logger.LogIf(ctx, err) + internalLogIf(ctx, err, logger.WarningKind) return ObjectLocked{} } @@ -170,7 +170,7 @@ func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, oi Objec t, err := objectlock.UTCNowNTP() if err != nil { - logger.LogIf(ctx, err) + internalLogIf(ctx, err, logger.WarningKind) return ObjectLocked{Bucket: oi.Bucket, Object: oi.Name, VersionID: oi.VersionID} } @@ -277,7 +277,7 @@ func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, ob r := objectlock.GetObjectRetentionMeta(objInfo.UserDefined) t, err := objectlock.UTCNowNTP() if err != nil { - logger.LogIf(ctx, err) + internalLogIf(ctx, err, logger.WarningKind) return mode, retainDate, legalHold, ErrObjectLocked } if r.Mode == objectlock.RetCompliance && r.RetainUntilDate.After(t) { @@ -324,7 +324,7 @@ func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, ob t, err := objectlock.UTCNowNTP() if err != nil { - logger.LogIf(ctx, err) + internalLogIf(ctx, err, logger.WarningKind) return mode, retainDate, legalHold, ErrObjectLocked } diff --git a/cmd/bucket-policy-handlers.go b/cmd/bucket-policy-handlers.go index 321635b73..3bea30ef7 100644 --- a/cmd/bucket-policy-handlers.go +++ b/cmd/bucket-policy-handlers.go @@ -113,7 +113,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht } // Call site replication hook. - logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ + replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ Type: madmin.SRBucketMetaTypePolicy, Bucket: bucket, Policy: bucketPolicyBytes, @@ -157,7 +157,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r } // Call site replication hook. - logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ + replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ Type: madmin.SRBucketMetaTypePolicy, Bucket: bucket, UpdatedAt: updatedAt, diff --git a/cmd/bucket-policy.go b/cmd/bucket-policy.go index 9e1330b63..b8e1da5c8 100644 --- a/cmd/bucket-policy.go +++ b/cmd/bucket-policy.go @@ -53,7 +53,7 @@ func (sys *PolicySys) IsAllowed(args policy.BucketPolicyArgs) bool { // Log unhandled errors. if _, ok := err.(BucketPolicyNotFound); !ok { - logger.LogIf(GlobalContext, err) + internalLogIf(GlobalContext, err, logger.WarningKind) } // As policy is not available for given bucket name, returns IsOwner i.e. diff --git a/cmd/bucket-quota.go b/cmd/bucket-quota.go index 78eabfa0b..294a1ba60 100644 --- a/cmd/bucket-quota.go +++ b/cmd/bucket-quota.go @@ -64,9 +64,9 @@ func (sys *BucketQuotaSys) GetBucketUsageInfo(bucket string) (BucketUsageInfo, e timedout := OperationTimedOut{} if err != nil && !errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &timedout) { if len(dui.BucketsUsage) > 0 { - logger.LogOnceIf(GlobalContext, fmt.Errorf("unable to retrieve usage information for bucket: %s, relying on older value cached in-memory: err(%v)", bucket, err), "bucket-usage-cache-"+bucket) + internalLogOnceIf(GlobalContext, fmt.Errorf("unable to retrieve usage information for bucket: %s, relying on older value cached in-memory: err(%v)", bucket, err), "bucket-usage-cache-"+bucket) } else { - logger.LogOnceIf(GlobalContext, errors.New("unable to retrieve usage information for bucket: %s, no reliable usage value available - quota will not be enforced"), "bucket-usage-empty-"+bucket) + internalLogOnceIf(GlobalContext, errors.New("unable to retrieve usage information for bucket: %s, no reliable usage value available - quota will not be enforced"), "bucket-usage-empty-"+bucket) } } @@ -87,7 +87,7 @@ func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota, } if !quotaCfg.IsValid() { if quotaCfg.Type == "fifo" { - logger.LogIf(GlobalContext, errors.New("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore. Please clear your quota configs using 'mc admin bucket quota alias/bucket --clear' and use 'mc ilm add' for expiration of objects")) + internalLogIf(GlobalContext, errors.New("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore. Please clear your quota configs using 'mc admin bucket quota alias/bucket --clear' and use 'mc ilm add' for expiration of objects"), logger.WarningKind) return quotaCfg, fmt.Errorf("invalid quota type 'fifo'") } return quotaCfg, fmt.Errorf("Invalid quota config %#v", quotaCfg) diff --git a/cmd/bucket-replication.go b/cmd/bucket-replication.go index 932ca251a..90593b928 100644 --- a/cmd/bucket-replication.go +++ b/cmd/bucket-replication.go @@ -423,7 +423,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj rcfg, err := getReplicationConfig(ctx, bucket) if err != nil || rcfg == nil { - logger.LogOnceIf(ctx, fmt.Errorf("unable to obtain replication config for bucket: %s: err: %s", bucket, err), bucket) + replLogOnceIf(ctx, fmt.Errorf("unable to obtain replication config for bucket: %s: err: %s", bucket, err), bucket) sendEvent(eventArgs{ BucketName: bucket, Object: ObjectInfo{ @@ -440,7 +440,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj } dsc, err := parseReplicateDecision(ctx, bucket, dobj.ReplicationState.ReplicateDecisionStr) if err != nil { - logger.LogOnceIf(ctx, fmt.Errorf("unable to parse replication decision parameters for bucket: %s, err: %s, decision: %s", + replLogOnceIf(ctx, fmt.Errorf("unable to parse replication decision parameters for bucket: %s, err: %s, decision: %s", bucket, err, dobj.ReplicationState.ReplicateDecisionStr), dobj.ReplicationState.ReplicateDecisionStr) sendEvent(eventArgs{ BucketName: bucket, @@ -494,7 +494,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj tgtClnt := globalBucketTargetSys.GetRemoteTargetClient(bucket, tgtEntry.Arn) if tgtClnt == nil { // Skip stale targets if any and log them to be missing at least once. - logger.LogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtEntry.Arn), tgtEntry.Arn) + replLogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtEntry.Arn), tgtEntry.Arn) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -606,7 +606,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI return } if globalBucketTargetSys.isOffline(tgt.EndpointURL()) { - logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", dobj.Bucket, tgt.ARN), "replication-target-offline-delete-"+tgt.ARN) + replLogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", dobj.Bucket, tgt.ARN), "replication-target-offline-delete-"+tgt.ARN) sendEvent(eventArgs{ BucketName: dobj.Bucket, Object: ObjectInfo{ @@ -681,7 +681,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI } else { rinfo.VersionPurgeStatus = Failed } - logger.LogIf(ctx, fmt.Errorf("unable to replicate delete marker to %s: %s/%s(%s): %w", tgt.EndpointURL(), tgt.Bucket, dobj.ObjectName, versionID, rmErr)) + replLogIf(ctx, fmt.Errorf("unable to replicate delete marker to %s: %s/%s(%s): %w", tgt.EndpointURL(), tgt.Bucket, dobj.ObjectName, versionID, rmErr)) if rmErr != nil && minio.IsNetworkOrHostDown(rmErr, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) { globalBucketTargetSys.markOffline(tgt.EndpointURL()) } @@ -994,7 +994,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje cfg, err := getReplicationConfig(ctx, bucket) if err != nil { - logger.LogOnceIf(ctx, err, "get-replication-config-"+bucket) + replLogOnceIf(ctx, err, "get-replication-config-"+bucket) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -1033,7 +1033,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje for _, tgtArn := range tgtArns { tgt := globalBucketTargetSys.GetRemoteTargetClient(bucket, tgtArn) if tgt == nil { - logger.LogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtArn), tgtArn) + replLogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtArn), tgtArn) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -1155,7 +1155,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj } if globalBucketTargetSys.isOffline(tgt.EndpointURL()) { - logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s retry:%d", bucket, tgt.ARN, ri.RetryCount), "replication-target-offline"+tgt.ARN) + replLogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s retry:%d", bucket, tgt.ARN, ri.RetryCount), "replication-target-offline"+tgt.ARN) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -1185,7 +1185,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj UserAgent: "Internal: [Replication]", Host: globalLocalNodeName, }) - logger.LogOnceIf(ctx, fmt.Errorf("unable to read source object %s/%s(%s): %w", bucket, object, objInfo.VersionID, err), object+":"+objInfo.VersionID) + replLogOnceIf(ctx, fmt.Errorf("unable to read source object %s/%s(%s): %w", bucket, object, objInfo.VersionID, err), object+":"+objInfo.VersionID) } return } @@ -1198,7 +1198,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj size, err := objInfo.GetActualSize() if err != nil { - logger.LogIf(ctx, err) + replLogIf(ctx, err) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -1210,7 +1210,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj } if tgt.Bucket == "" { - logger.LogIf(ctx, fmt.Errorf("unable to replicate object %s(%s), bucket is empty for target %s", objInfo.Name, objInfo.VersionID, tgt.EndpointURL())) + replLogIf(ctx, fmt.Errorf("unable to replicate object %s(%s), bucket is empty for target %s", objInfo.Name, objInfo.VersionID, tgt.EndpointURL())) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -1236,7 +1236,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj putOpts, err := putReplicationOpts(ctx, tgt.StorageClass, objInfo) if err != nil { - logger.LogIf(ctx, fmt.Errorf("failure setting options for replication bucket:%s err:%w", bucket, err)) + replLogIf(ctx, fmt.Errorf("failure setting options for replication bucket:%s err:%w", bucket, err)) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -1271,14 +1271,14 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj r, objInfo, putOpts); rinfo.Err != nil { if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" { rinfo.ReplicationStatus = replication.Failed - logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s (target: %s)", bucket, objInfo.Name, objInfo.VersionID, rinfo.Err, tgt.EndpointURL())) + replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s (target: %s)", bucket, objInfo.Name, objInfo.VersionID, rinfo.Err, tgt.EndpointURL())) } } } else { if _, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts); rinfo.Err != nil { if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" { rinfo.ReplicationStatus = replication.Failed - logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s (target: %s)", bucket, objInfo.Name, objInfo.VersionID, rinfo.Err, tgt.EndpointURL())) + replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s (target: %s)", bucket, objInfo.Name, objInfo.VersionID, rinfo.Err, tgt.EndpointURL())) } } } @@ -1313,7 +1313,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object } if globalBucketTargetSys.isOffline(tgt.EndpointURL()) { - logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s retry:%d", bucket, tgt.ARN, ri.RetryCount), "replication-target-offline-heal"+tgt.ARN) + replLogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s retry:%d", bucket, tgt.ARN, ri.RetryCount), "replication-target-offline-heal"+tgt.ARN) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -1344,7 +1344,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object UserAgent: "Internal: [Replication]", Host: globalLocalNodeName, }) - logger.LogIf(ctx, fmt.Errorf("unable to replicate to target %s for %s/%s(%s): %w", tgt.EndpointURL(), bucket, object, objInfo.VersionID, err)) + replLogIf(ctx, fmt.Errorf("unable to replicate to target %s for %s/%s(%s): %w", tgt.EndpointURL(), bucket, object, objInfo.VersionID, err)) } return } @@ -1364,7 +1364,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object size, err := objInfo.GetActualSize() if err != nil { - logger.LogIf(ctx, err) + replLogIf(ctx, err) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -1381,7 +1381,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object } if tgt.Bucket == "" { - logger.LogIf(ctx, fmt.Errorf("unable to replicate object %s(%s) to %s, target bucket is missing", objInfo.Name, objInfo.VersionID, tgt.EndpointURL())) + replLogIf(ctx, fmt.Errorf("unable to replicate object %s(%s) to %s, target bucket is missing", objInfo.Name, objInfo.VersionID, tgt.EndpointURL())) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -1411,7 +1411,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object if rAction == replicateNone { if ri.OpType == replication.ExistingObjectReplicationType && objInfo.ModTime.Unix() > oi.LastModified.Unix() && objInfo.VersionID == nullVersionID { - logger.LogIf(ctx, fmt.Errorf("unable to replicate %s/%s (null). Newer version exists on target %s", bucket, object, tgt.EndpointURL())) + replLogIf(ctx, fmt.Errorf("unable to replicate %s/%s (null). Newer version exists on target %s", bucket, object, tgt.EndpointURL())) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -1451,7 +1451,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object rAction = replicateAll default: rinfo.Err = cerr - logger.LogIf(ctx, fmt.Errorf("unable to replicate %s/%s (%s). Target (%s) returned %s error on HEAD", + replLogIf(ctx, fmt.Errorf("unable to replicate %s/%s (%s). Target (%s) returned %s error on HEAD", bucket, object, objInfo.VersionID, tgt.EndpointURL(), cerr)) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, @@ -1501,13 +1501,13 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object } if _, rinfo.Err = c.CopyObject(ctx, tgt.Bucket, object, tgt.Bucket, object, getCopyObjMetadata(objInfo, tgt.StorageClass), srcOpts, dstOpts); rinfo.Err != nil { rinfo.ReplicationStatus = replication.Failed - logger.LogIf(ctx, fmt.Errorf("unable to replicate metadata for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err)) + replLogIf(ctx, fmt.Errorf("unable to replicate metadata for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err)) } } else { var putOpts minio.PutObjectOptions putOpts, err = putReplicationOpts(ctx, tgt.StorageClass, objInfo) if err != nil { - logger.LogIf(ctx, fmt.Errorf("failed to set replicate options for object %s/%s(%s) (target %s) err:%w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), err)) + replLogIf(ctx, fmt.Errorf("failed to set replicate options for object %s/%s(%s) (target %s) err:%w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), err)) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -1541,7 +1541,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object r, objInfo, putOpts); rinfo.Err != nil { if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" { rinfo.ReplicationStatus = replication.Failed - logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err)) + replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err)) } else { rinfo.ReplicationStatus = replication.Completed } @@ -1550,7 +1550,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object if _, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts); rinfo.Err != nil { if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" { rinfo.ReplicationStatus = replication.Failed - logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err)) + replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err)) } else { rinfo.ReplicationStatus = replication.Completed } @@ -1598,7 +1598,7 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob return } acancel() - logger.LogIf(actx, + replLogIf(actx, fmt.Errorf("trying %s: Unable to cleanup failed multipart replication %s on remote %s/%s: %w - this may consume space on remote cluster", humanize.Ordinal(attempts), uploadID, bucket, object, aerr)) attempts++ @@ -1866,7 +1866,7 @@ func (p *ReplicationPool) AddMRFWorker() { globalReplicationStats.decQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType) default: - logger.LogOnceIf(p.ctx, fmt.Errorf("unknown mrf replication type: %T", oi), "unknown-mrf-replicate-type") + bugLogIf(p.ctx, fmt.Errorf("unknown mrf replication type: %T", oi), "unknown-mrf-replicate-type") } case <-p.mrfWorkerKillCh: return @@ -1910,7 +1910,7 @@ func (p *ReplicationPool) AddWorker(input <-chan ReplicationWorkerOperation, opT atomic.AddInt32(opTracker, -1) } default: - logger.LogOnceIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type") + bugLogIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type") } } } @@ -1949,7 +1949,7 @@ func (p *ReplicationPool) AddLargeWorker(input <-chan ReplicationWorkerOperation case DeletedObjectReplicationInfo: replicateDelete(p.ctx, v, p.objLayer) default: - logger.LogOnceIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type") + bugLogIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type") } } } @@ -2096,9 +2096,9 @@ func (p *ReplicationPool) queueReplicaTask(ri ReplicateObjectInfo) { p.mu.RUnlock() switch prio { case "fast": - logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming traffic"), string(replicationSubsystem)) + replLogOnceIf(GlobalContext, fmt.Errorf("Unable to keep up with incoming traffic"), string(replicationSubsystem), logger.WarningKind) case "slow": - logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming traffic - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem)) + replLogOnceIf(GlobalContext, fmt.Errorf("Unable to keep up with incoming traffic - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem), logger.WarningKind) default: maxWorkers = min(maxWorkers, WorkerMaxLimit) if p.ActiveWorkers() < maxWorkers { @@ -2153,9 +2153,9 @@ func (p *ReplicationPool) queueReplicaDeleteTask(doi DeletedObjectReplicationInf p.mu.RUnlock() switch prio { case "fast": - logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming deletes"), string(replicationSubsystem)) + replLogOnceIf(GlobalContext, fmt.Errorf("Unable to keep up with incoming deletes"), string(replicationSubsystem), logger.WarningKind) case "slow": - logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming deletes - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem)) + replLogOnceIf(GlobalContext, fmt.Errorf("Unable to keep up with incoming deletes - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem), logger.WarningKind) default: maxWorkers = min(maxWorkers, WorkerMaxLimit) if p.ActiveWorkers() < maxWorkers { @@ -2288,7 +2288,7 @@ func proxyHeadToRepTarget(ctx context.Context, bucket, object string, rs *HTTPRa if rs != nil { h, err := rs.ToHeader() if err != nil { - logger.LogIf(ctx, fmt.Errorf("invalid range header for %s/%s(%s) - %w", bucket, object, opts.VersionID, err)) + replLogIf(ctx, fmt.Errorf("invalid range header for %s/%s(%s) - %w", bucket, object, opts.VersionID, err)) continue } gopts.Set(xhttp.Range, h) @@ -2656,7 +2656,7 @@ func (s *replicationResyncer) PersistToDisk(ctx context.Context, objectAPI Objec } if updt { if err := saveResyncStatus(ctx, bucket, brs, objectAPI); err != nil { - logger.LogIf(ctx, fmt.Errorf("could not save resync metadata to drive for %s - %w", bucket, err)) + replLogIf(ctx, fmt.Errorf("could not save resync metadata to drive for %s - %w", bucket, err)) } else { lastResyncStatusSave[bucket] = brs.LastUpdate } @@ -2744,12 +2744,12 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object objInfoCh := make(chan ObjectInfo) cfg, err := getReplicationConfig(ctx, opts.bucket) if err != nil { - logger.LogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed with %w", opts.bucket, opts.arn, err)) + replLogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed with %w", opts.bucket, opts.arn, err)) return } tgts, err := globalBucketTargetSys.ListBucketTargets(ctx, opts.bucket) if err != nil { - logger.LogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed %w", opts.bucket, opts.arn, err)) + replLogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed %w", opts.bucket, opts.arn, err)) return } rcfg := replicationConfig{ @@ -2762,12 +2762,12 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object TargetArn: opts.arn, }) if len(tgtArns) != 1 { - logger.LogIf(ctx, fmt.Errorf("replication resync failed for %s - arn specified %s is missing in the replication config", opts.bucket, opts.arn)) + replLogIf(ctx, fmt.Errorf("replication resync failed for %s - arn specified %s is missing in the replication config", opts.bucket, opts.arn)) return } tgt := globalBucketTargetSys.GetRemoteTargetClient(opts.bucket, opts.arn) if tgt == nil { - logger.LogIf(ctx, fmt.Errorf("replication resync failed for %s - target could not be created for arn %s", opts.bucket, opts.arn)) + replLogIf(ctx, fmt.Errorf("replication resync failed for %s - target could not be created for arn %s", opts.bucket, opts.arn)) return } // mark resync status as resync started @@ -2778,7 +2778,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object // Walk through all object versions - Walk() is always in ascending order needed to ensure // delete marker replicated to target after object version is first created. if err := objectAPI.Walk(ctx, opts.bucket, "", objInfoCh, WalkOptions{}); err != nil { - logger.LogIf(ctx, err) + replLogIf(ctx, err) return } @@ -3053,7 +3053,7 @@ func (p *ReplicationPool) loadResync(ctx context.Context, buckets []BucketInfo, meta, err := loadBucketResyncMetadata(ctx, bucket, objAPI) if err != nil { if !errors.Is(err, errVolumeNotFound) { - logger.LogIf(ctx, err) + replLogIf(ctx, err) } continue } @@ -3140,18 +3140,18 @@ func saveResyncStatus(ctx context.Context, bucket string, brs BucketReplicationR func getReplicationDiff(ctx context.Context, objAPI ObjectLayer, bucket string, opts madmin.ReplDiffOpts) (chan madmin.DiffInfo, error) { cfg, err := getReplicationConfig(ctx, bucket) if err != nil { - logger.LogIf(ctx, err) + replLogIf(ctx, err) return nil, err } tgts, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket) if err != nil { - logger.LogIf(ctx, err) + replLogIf(ctx, err) return nil, err } objInfoCh := make(chan ObjectInfo, 10) if err := objAPI.Walk(ctx, bucket, opts.Prefix, objInfoCh, WalkOptions{}); err != nil { - logger.LogIf(ctx, err) + replLogIf(ctx, err) return nil, err } rcfg := replicationConfig{ @@ -3535,7 +3535,7 @@ func (p *ReplicationPool) processMRF() { continue } if err := p.queueMRFHeal(); err != nil && !osIsNotExist(err) { - logger.LogIf(p.ctx, err) + replLogIf(p.ctx, err) } pTimer.Reset(mrfQueueInterval) case <-p.ctx.Done(): diff --git a/cmd/bucket-targets.go b/cmd/bucket-targets.go index ca901f8e4..149b30d5e 100644 --- a/cmd/bucket-targets.go +++ b/cmd/bucket-targets.go @@ -20,7 +20,6 @@ package cmd import ( "context" "errors" - "fmt" "net/url" "sync" "time" @@ -32,7 +31,6 @@ import ( "github.com/minio/minio/internal/bucket/replication" "github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/kms" - "github.com/minio/minio/internal/logger" ) const ( @@ -131,7 +129,7 @@ func (sys *BucketTargetSys) initHC(ep *url.URL) { func newHCClient() *madmin.AnonymousClient { clnt, e := madmin.NewAnonymousClientNoEndpoint() if e != nil { - logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to initialize health check client"), string(replicationSubsystem)) + bugLogIf(GlobalContext, errors.New("Unable to initialize health check client")) return nil } clnt.SetCustomTransport(globalRemoteTargetTransport) @@ -624,7 +622,7 @@ func (sys *BucketTargetSys) set(bucket BucketInfo, meta BucketMetadata) { for _, tgt := range cfg.Targets { tgtClient, err := sys.getRemoteTargetClient(&tgt) if err != nil { - logger.LogIf(GlobalContext, err) + replLogIf(GlobalContext, err) continue } sys.arnRemotesMap[tgt.Arn] = arnTarget{Client: tgtClient} diff --git a/cmd/bucket-versioning-handler.go b/cmd/bucket-versioning-handler.go index 64728a88b..5480748c9 100644 --- a/cmd/bucket-versioning-handler.go +++ b/cmd/bucket-versioning-handler.go @@ -108,7 +108,7 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r // We encode the xml bytes as base64 to ensure there are no encoding // errors. cfgStr := base64.StdEncoding.EncodeToString(configData) - logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ + replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ Type: madmin.SRBucketMetaTypeVersionConfig, Bucket: bucket, Versioning: &cfgStr, diff --git a/cmd/callhome.go b/cmd/callhome.go index f49e6ad5e..074985956 100644 --- a/cmd/callhome.go +++ b/cmd/callhome.go @@ -29,7 +29,6 @@ import ( "time" "github.com/minio/madmin-go/v3" - "github.com/minio/minio/internal/logger" ) var callhomeLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second) @@ -112,7 +111,7 @@ func performCallhome(ctx context.Context) { deadline := 10 * time.Second // Default deadline is 10secs for callhome objectAPI := newObjectLayerFn() if objectAPI == nil { - logger.LogIf(ctx, errors.New("Callhome: object layer not ready")) + internalLogIf(ctx, errors.New("Callhome: object layer not ready")) return } @@ -145,7 +144,7 @@ func performCallhome(ctx context.Context) { // Received all data. Send to SUBNET and return err := sendHealthInfo(ctx, healthInfo) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to perform callhome: %w", err)) + internalLogIf(ctx, fmt.Errorf("Unable to perform callhome: %w", err)) } return } @@ -180,12 +179,12 @@ func createHealthJSONGzip(ctx context.Context, healthInfo madmin.HealthInfo) []b enc := json.NewEncoder(gzWriter) if e := enc.Encode(header); e != nil { - logger.LogIf(ctx, fmt.Errorf("Could not encode health info header: %w", e)) + internalLogIf(ctx, fmt.Errorf("Could not encode health info header: %w", e)) return nil } if e := enc.Encode(healthInfo); e != nil { - logger.LogIf(ctx, fmt.Errorf("Could not encode health info: %w", e)) + internalLogIf(ctx, fmt.Errorf("Could not encode health info: %w", e)) return nil } diff --git a/cmd/common-main.go b/cmd/common-main.go index 221ae64b9..d24a6c7d7 100644 --- a/cmd/common-main.go +++ b/cmd/common-main.go @@ -1044,7 +1044,7 @@ func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secu } if err = manager.AddCertificate(certFile, keyFile); err != nil { err = fmt.Errorf("Unable to load TLS certificate '%s,%s': %w", certFile, keyFile, err) - logger.LogIf(GlobalContext, err, logger.ErrorKind) + bootLogIf(GlobalContext, err, logger.ErrorKind) } } secureConn = true diff --git a/cmd/config-current.go b/cmd/config-current.go index 824951e82..ed45acc2e 100644 --- a/cmd/config-current.go +++ b/cmd/config-current.go @@ -479,7 +479,7 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) { dnsURL, dnsUser, dnsPass, err := env.LookupEnv(config.EnvDNSWebhook) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err)) } if err == nil && dnsURL != "" { bootstrapTraceMsg("initialize remote bucket DNS store") @@ -487,27 +487,27 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) { dns.Authentication(dnsUser, dnsPass), dns.RootCAs(globalRootCAs)) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err)) } } etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err)) } if etcdCfg.Enabled { bootstrapTraceMsg("initialize etcd store") globalEtcdClient, err = etcd.New(etcdCfg) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err)) } if len(globalDomainNames) != 0 && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil { if globalDNSConfig != nil { // if global DNS is already configured, indicate with a warning, in case // users are confused. - logger.LogIf(ctx, fmt.Errorf("DNS store is already configured with %s, etcd is not used for DNS store", globalDNSConfig)) + configLogIf(ctx, fmt.Errorf("DNS store is already configured with %s, etcd is not used for DNS store", globalDNSConfig)) } else { globalDNSConfig, err = dns.NewCoreDNS(etcdCfg.Config, dns.DomainNames(globalDomainNames), @@ -516,7 +516,7 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) { dns.CoreDNSPath(etcdCfg.CoreDNSPath), ) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize DNS config for %s: %w", + configLogIf(ctx, fmt.Errorf("Unable to initialize DNS config for %s: %w", globalDomainNames, err)) } } @@ -532,7 +532,7 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) { globalSite, err = config.LookupSite(s[config.SiteSubSys][config.Default], s[config.RegionSubSys][config.Default]) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Invalid site configuration: %w", err)) + configLogIf(ctx, fmt.Errorf("Invalid site configuration: %w", err)) } globalAutoEncryption = crypto.LookupAutoEncryption() // Enable auto-encryption if enabled @@ -545,19 +545,19 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) { bootstrapTraceMsg("initialize the event notification targets") globalNotifyTargetList, err = notify.FetchEnabledTargets(GlobalContext, s, transport) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err)) } bootstrapTraceMsg("initialize the lambda targets") globalLambdaTargetList, err = lambda.FetchEnabledTargets(GlobalContext, s, transport) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize lambda target(s): %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to initialize lambda target(s): %w", err)) } bootstrapTraceMsg("applying the dynamic configuration") // Apply dynamic config values if err := applyDynamicConfig(ctx, objAPI, s); err != nil { - logger.LogIf(ctx, err) + configLogIf(ctx, err) } } @@ -571,7 +571,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf case config.APISubSys: apiConfig, err := api.LookupConfig(s[config.APISubSys][config.Default]) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err)) + configLogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err)) } globalAPIConfig.init(apiConfig, setDriveCounts) @@ -607,33 +607,33 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf scannerCycle.Store(scannerCfg.Cycle) scannerExcessObjectVersions.Store(scannerCfg.ExcessVersions) scannerExcessFolders.Store(scannerCfg.ExcessFolders) - logger.LogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait)) + configLogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait)) case config.LoggerWebhookSubSys: loggerCfg, err := logger.LookupConfigForSubSys(ctx, s, config.LoggerWebhookSubSys) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to load logger webhook config: %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to load logger webhook config: %w", err)) } userAgent := getUserAgent(getMinioMode()) for n, l := range loggerCfg.HTTP { if l.Enabled { - l.LogOnceIf = logger.LogOnceConsoleIf + l.LogOnceIf = configLogOnceConsoleIf l.UserAgent = userAgent l.Transport = NewHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey) } loggerCfg.HTTP[n] = l } if errs := logger.UpdateHTTPWebhooks(ctx, loggerCfg.HTTP); len(errs) > 0 { - logger.LogIf(ctx, fmt.Errorf("Unable to update logger webhook config: %v", errs)) + configLogIf(ctx, fmt.Errorf("Unable to update logger webhook config: %v", errs)) } case config.AuditWebhookSubSys: loggerCfg, err := logger.LookupConfigForSubSys(ctx, s, config.AuditWebhookSubSys) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to load audit webhook config: %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to load audit webhook config: %w", err)) } userAgent := getUserAgent(getMinioMode()) for n, l := range loggerCfg.AuditWebhook { if l.Enabled { - l.LogOnceIf = logger.LogOnceConsoleIf + l.LogOnceIf = configLogOnceConsoleIf l.UserAgent = userAgent l.Transport = NewHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey) } @@ -641,30 +641,30 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf } if errs := logger.UpdateAuditWebhooks(ctx, loggerCfg.AuditWebhook); len(errs) > 0 { - logger.LogIf(ctx, fmt.Errorf("Unable to update audit webhook targets: %v", errs)) + configLogIf(ctx, fmt.Errorf("Unable to update audit webhook targets: %v", errs)) } case config.AuditKafkaSubSys: loggerCfg, err := logger.LookupConfigForSubSys(ctx, s, config.AuditKafkaSubSys) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to load audit kafka config: %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to load audit kafka config: %w", err)) } for n, l := range loggerCfg.AuditKafka { if l.Enabled { if l.TLS.Enable { l.TLS.RootCAs = globalRootCAs } - l.LogOnce = logger.LogOnceIf + l.LogOnce = configLogOnceIf loggerCfg.AuditKafka[n] = l } } if errs := logger.UpdateAuditKafkaTargets(ctx, loggerCfg); len(errs) > 0 { - logger.LogIf(ctx, fmt.Errorf("Unable to update audit kafka targets: %v", errs)) + configLogIf(ctx, fmt.Errorf("Unable to update audit kafka targets: %v", errs)) } case config.StorageClassSubSys: for i, setDriveCount := range setDriveCounts { sc, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err)) break } // if we validated all setDriveCounts and it was successful @@ -676,7 +676,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf case config.SubnetSubSys: subnetConfig, err := subnet.LookupConfig(s[config.SubnetSubSys][config.Default], globalProxyTransport) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to parse subnet configuration: %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to parse subnet configuration: %w", err)) } else { globalSubnetConfig.Update(subnetConfig, globalIsCICD) globalSubnetConfig.ApplyEnv() // update environment settings for Console UI @@ -684,7 +684,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf case config.CallhomeSubSys: callhomeCfg, err := callhome.LookupConfig(s[config.CallhomeSubSys][config.Default]) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to load callhome config: %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to load callhome config: %w", err)) } else { enable := callhomeCfg.Enable && !globalCallhomeConfig.Enabled() globalCallhomeConfig.Update(callhomeCfg) @@ -694,17 +694,17 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf } case config.DriveSubSys: if driveConfig, err := drive.LookupConfig(s[config.DriveSubSys][config.Default]); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to load drive config: %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to load drive config: %w", err)) } else { err := globalDriveConfig.Update(driveConfig) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to update drive config: %v", err)) + configLogIf(ctx, fmt.Errorf("Unable to update drive config: %v", err)) } } case config.CacheSubSys: cacheCfg, err := cache.LookupConfig(s[config.CacheSubSys][config.Default], globalRemoteTargetTransport) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to load cache config: %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to load cache config: %w", err)) } else { globalCacheConfig.Update(cacheCfg) } @@ -749,7 +749,7 @@ func autoGenerateRootCredentials() { if manager, ok := GlobalKMS.(kms.KeyManager); ok { stat, err := GlobalKMS.Stat(GlobalContext) if err != nil { - logger.LogIf(GlobalContext, err, "Unable to generate root credentials using KMS") + kmsLogIf(GlobalContext, err, "Unable to generate root credentials using KMS") return } diff --git a/cmd/data-scanner.go b/cmd/data-scanner.go index 6099f29df..b03197c64 100644 --- a/cmd/data-scanner.go +++ b/cmd/data-scanner.go @@ -41,7 +41,6 @@ import ( "github.com/minio/minio/internal/config/heal" "github.com/minio/minio/internal/event" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" "github.com/minio/pkg/v2/console" uatomic "go.uber.org/atomic" ) @@ -122,13 +121,13 @@ func readBackgroundHealInfo(ctx context.Context, objAPI ObjectLayer) backgroundH buf, err := readConfig(ctx, objAPI, backgroundHealInfoPath) if err != nil { if !errors.Is(err, errConfigNotFound) { - logger.LogOnceIf(ctx, err, backgroundHealInfoPath) + internalLogOnceIf(ctx, err, backgroundHealInfoPath) } return backgroundHealInfo{} } var info backgroundHealInfo if err = json.Unmarshal(buf, &info); err != nil { - logger.LogOnceIf(ctx, err, backgroundHealInfoPath) + bugLogIf(ctx, err, backgroundHealInfoPath) } return info } @@ -140,13 +139,13 @@ func saveBackgroundHealInfo(ctx context.Context, objAPI ObjectLayer, info backgr b, err := json.Marshal(info) if err != nil { - logger.LogIf(ctx, err) + bugLogIf(ctx, err) return } // Get last healing information err = saveConfig(ctx, objAPI, backgroundHealInfoPath, b) if err != nil { - logger.LogIf(ctx, err) + internalLogIf(ctx, err) } } @@ -167,7 +166,7 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) { cycleInfo.next = binary.LittleEndian.Uint64(buf[:8]) buf = buf[8:] _, err := cycleInfo.UnmarshalMsg(buf) - logger.LogIf(ctx, err) + bugLogIf(ctx, err) } scannerTimer := time.NewTimer(scannerCycle.Load()) @@ -204,7 +203,7 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) { results := make(chan DataUsageInfo, 1) go storeDataUsageInBackend(ctx, objAPI, results) err := objAPI.NSScanner(ctx, results, uint32(cycleInfo.current), scanMode) - logger.LogOnceIf(ctx, err, "ns-scanner") + scannerLogIf(ctx, err) res := map[string]string{"cycle": strconv.FormatUint(cycleInfo.current, 10)} if err != nil { res["error"] = err.Error() @@ -224,7 +223,7 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) { binary.LittleEndian.PutUint64(tmp, cycleInfo.next) tmp, _ = cycleInfo.MarshalMsg(tmp) err = saveConfig(ctx, objAPI, dataUsageBloomNamePath, tmp) - logger.LogOnceIf(ctx, err, dataUsageBloomNamePath) + scannerLogIf(ctx, err, dataUsageBloomNamePath) } } } @@ -752,7 +751,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int versionID: "", }, madmin.HealItemObject) if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) { - logger.LogOnceIf(ctx, err, entry.name) + scannerLogIf(ctx, err) } foundObjs = foundObjs || err == nil return @@ -769,7 +768,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int }, madmin.HealItemObject) stopFn(int(ver.Size)) if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) { - logger.LogOnceIf(ctx, err, fiv.Name) + scannerLogIf(ctx, err, fiv.Name) } if err == nil { successVersions++ @@ -945,7 +944,7 @@ func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, oi Object func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi ObjectInfo) (action lifecycle.Action, size int64) { size, err := oi.GetActualSize() if i.debug { - logger.LogIf(ctx, err) + scannerLogIf(ctx, err) } if i.lifeCycle == nil { return action, size @@ -1123,7 +1122,7 @@ func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, oi Object err := o.CheckAbandonedParts(ctx, i.bucket, i.objectPath(), madmin.HealOpts{Remove: healDeleteDangling}) done() if err != nil { - logger.LogOnceIf(ctx, fmt.Errorf("unable to check object %s/%s for abandoned data: %w", i.bucket, i.objectPath(), err), i.objectPath()) + healingLogIf(ctx, fmt.Errorf("unable to check object %s/%s for abandoned data: %w", i.bucket, i.objectPath(), err), i.objectPath()) } } } @@ -1199,7 +1198,7 @@ func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer, if isErrObjectNotFound(err) || isErrVersionNotFound(err) { return false } - logger.LogOnceIf(ctx, err, obj.Name) + ilmLogIf(ctx, err) return false } // Notification already sent in *expireTransitionedObject*, just return 'true' here. @@ -1248,7 +1247,7 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay return false } // Assume it is still there. - logger.LogOnceIf(ctx, err, "non-transition-expiry") + ilmLogOnceIf(ctx, err, "non-transition-expiry") return false } if dobj.Name == "" { diff --git a/cmd/data-usage-cache.go b/cmd/data-usage-cache.go index 15ea11a0f..262948b28 100644 --- a/cmd/data-usage-cache.go +++ b/cmd/data-usage-cache.go @@ -37,7 +37,6 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/bucket/lifecycle" "github.com/minio/minio/internal/hash" - "github.com/minio/minio/internal/logger" "github.com/tinylib/msgp/msgp" "github.com/valyala/bytebufferpool" ) @@ -635,7 +634,7 @@ func (d *dataUsageCache) copyWithChildren(src *dataUsageCache, hash dataUsageHas d.Cache[hash.Key()] = e for ch := range e.Children { if ch == hash.Key() { - logger.LogIf(GlobalContext, errors.New("dataUsageCache.copyWithChildren: Circular reference")) + scannerLogIf(GlobalContext, errors.New("dataUsageCache.copyWithChildren: Circular reference")) return } d.copyWithChildren(src, dataUsageHash(ch), &hash) @@ -1041,7 +1040,7 @@ func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string) } if retries == 5 { - logger.LogOnceIf(ctx, fmt.Errorf("maximum retry reached to load the data usage cache `%s`", name), "retry-loading-data-usage-cache") + scannerLogOnceIf(ctx, fmt.Errorf("maximum retry reached to load the data usage cache `%s`", name), "retry-loading-data-usage-cache") } return nil diff --git a/cmd/data-usage.go b/cmd/data-usage.go index 13acb5af9..339ac16a2 100644 --- a/cmd/data-usage.go +++ b/cmd/data-usage.go @@ -25,7 +25,6 @@ import ( jsoniter "github.com/json-iterator/go" "github.com/minio/minio/internal/cachevalue" - "github.com/minio/minio/internal/logger" ) const ( @@ -49,7 +48,7 @@ func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan json := jsoniter.ConfigCompatibleWithStandardLibrary dataUsageJSON, err := json.Marshal(dataUsageInfo) if err != nil { - logger.LogIf(ctx, err) + scannerLogIf(ctx, err) continue } if attempts > 10 { @@ -57,7 +56,7 @@ func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan attempts = 1 } if err = saveConfig(ctx, objAPI, dataUsageObjNamePath, dataUsageJSON); err != nil { - logger.LogOnceIf(ctx, err, dataUsageObjNamePath) + scannerLogOnceIf(ctx, err, dataUsageObjNamePath) } attempts++ } diff --git a/cmd/encryption-v1.go b/cmd/encryption-v1.go index c112724c8..e2f062162 100644 --- a/cmd/encryption-v1.go +++ b/cmd/encryption-v1.go @@ -1089,7 +1089,7 @@ func (o *ObjectInfo) decryptPartsChecksums() { if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted { decrypted, err := o.metadataDecrypter()("object-checksum", data) if err != nil { - logger.LogIf(GlobalContext, err) + encLogIf(GlobalContext, err) return } data = decrypted @@ -1151,7 +1151,7 @@ func (o *ObjectInfo) decryptChecksums(part int) map[string]string { if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted { decrypted, err := o.metadataDecrypter()("object-checksum", data) if err != nil { - logger.LogIf(GlobalContext, err) + encLogIf(GlobalContext, err) return nil } data = decrypted diff --git a/cmd/endpoint.go b/cmd/endpoint.go index a80605dd4..c2f397aa0 100644 --- a/cmd/endpoint.go +++ b/cmd/endpoint.go @@ -514,7 +514,7 @@ func (l EndpointServerPools) hostsSorted() []*xnet.Host { } host, err := xnet.ParseHost(hostStr) if err != nil { - logger.LogIf(GlobalContext, err) + internalLogIf(GlobalContext, err) continue } hosts[i] = host @@ -645,7 +645,7 @@ func (endpoints Endpoints) UpdateIsLocal() error { )) ctx := logger.SetReqInfo(GlobalContext, reqInfo) - logger.LogOnceIf(ctx, fmt.Errorf("%s resolves to localhost in a containerized deployment, waiting for it to resolve to a valid IP", + bootLogOnceIf(ctx, fmt.Errorf("%s resolves to localhost in a containerized deployment, waiting for it to resolve to a valid IP", endpoints[i].Hostname()), endpoints[i].Hostname(), logger.ErrorKind) } @@ -675,7 +675,7 @@ func (endpoints Endpoints) UpdateIsLocal() error { )) ctx := logger.SetReqInfo(GlobalContext, reqInfo) - logger.LogOnceIf(ctx, err, endpoints[i].Hostname(), logger.ErrorKind) + bootLogOnceIf(ctx, err, endpoints[i].Hostname(), logger.ErrorKind) } } else { resolvedList[i] = true @@ -837,7 +837,7 @@ func (p PoolEndpointList) UpdateIsLocal() error { )) ctx := logger.SetReqInfo(GlobalContext, reqInfo) - logger.LogOnceIf(ctx, fmt.Errorf("%s resolves to localhost in a containerized deployment, waiting for it to resolve to a valid IP", + bootLogOnceIf(ctx, fmt.Errorf("%s resolves to localhost in a containerized deployment, waiting for it to resolve to a valid IP", endpoint.Hostname()), endpoint.Hostname(), logger.ErrorKind) } continue @@ -866,7 +866,7 @@ func (p PoolEndpointList) UpdateIsLocal() error { )) ctx := logger.SetReqInfo(GlobalContext, reqInfo) - logger.LogOnceIf(ctx, fmt.Errorf("Unable to resolve DNS for %s: %w", endpoint, err), endpoint.Hostname(), logger.ErrorKind) + bootLogOnceIf(ctx, fmt.Errorf("Unable to resolve DNS for %s: %w", endpoint, err), endpoint.Hostname(), logger.ErrorKind) } } else { resolvedList[endpoint] = true diff --git a/cmd/erasure-common.go b/cmd/erasure-common.go index e47aa8746..f413138e4 100644 --- a/cmd/erasure-common.go +++ b/cmd/erasure-common.go @@ -25,7 +25,6 @@ import ( "sync" "time" - "github.com/minio/minio/internal/logger" "github.com/minio/pkg/v2/sync/errgroup" ) @@ -163,7 +162,7 @@ func readMultipleFiles(ctx context.Context, disks []StorageAPI, req ReadMultiple continue } if !IsErr(err, ignoredErrs...) { - logger.LogOnceIf(ctx, fmt.Errorf("Drive %s, path (%s/%s) returned an error (%w)", + storageLogOnceIf(ctx, fmt.Errorf("Drive %s, path (%s/%s) returned an error (%w)", disks[index], req.Bucket, req.Prefix, err), disks[index].String()) } diff --git a/cmd/erasure-healing.go b/cmd/erasure-healing.go index 413a733ec..85704de1c 100644 --- a/cmd/erasure-healing.go +++ b/cmd/erasure-healing.go @@ -450,7 +450,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object if !latestMeta.Deleted && len(latestMeta.Erasure.Distribution) != len(availableDisks) { err := fmt.Errorf("unexpected file distribution (%v) from available disks (%v), looks like backend disks have been manually modified refusing to heal %s/%s(%s)", latestMeta.Erasure.Distribution, availableDisks, bucket, object, versionID) - logger.LogOnceIf(ctx, err, "heal-object-available-disks") + healingLogOnceIf(ctx, err, "heal-object-available-disks") return er.defaultHealResult(latestMeta, storageDisks, storageEndpoints, errs, bucket, object, versionID), err } @@ -460,7 +460,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object if !latestMeta.Deleted && len(latestMeta.Erasure.Distribution) != len(outDatedDisks) { err := fmt.Errorf("unexpected file distribution (%v) from outdated disks (%v), looks like backend disks have been manually modified refusing to heal %s/%s(%s)", latestMeta.Erasure.Distribution, outDatedDisks, bucket, object, versionID) - logger.LogOnceIf(ctx, err, "heal-object-outdated-disks") + healingLogOnceIf(ctx, err, "heal-object-outdated-disks") return er.defaultHealResult(latestMeta, storageDisks, storageEndpoints, errs, bucket, object, versionID), err } @@ -470,7 +470,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object if !latestMeta.Deleted && len(latestMeta.Erasure.Distribution) != len(partsMetadata) { err := fmt.Errorf("unexpected file distribution (%v) from metadata entries (%v), looks like backend disks have been manually modified refusing to heal %s/%s(%s)", latestMeta.Erasure.Distribution, len(partsMetadata), bucket, object, versionID) - logger.LogOnceIf(ctx, err, "heal-object-metadata-entries") + healingLogOnceIf(ctx, err, "heal-object-metadata-entries") return er.defaultHealResult(latestMeta, storageDisks, storageEndpoints, errs, bucket, object, versionID), err } diff --git a/cmd/erasure-metadata-utils.go b/cmd/erasure-metadata-utils.go index 82f91d5e8..17ceb47da 100644 --- a/cmd/erasure-metadata-utils.go +++ b/cmd/erasure-metadata-utils.go @@ -22,7 +22,6 @@ import ( "errors" "hash/crc32" - "github.com/minio/minio/internal/logger" "github.com/minio/pkg/v2/sync/errgroup" ) @@ -284,7 +283,7 @@ func shuffleDisks(disks []StorageAPI, distribution []int) (shuffledDisks []Stora // the corresponding error in errs slice is not nil func evalDisks(disks []StorageAPI, errs []error) []StorageAPI { if len(errs) != len(disks) { - logger.LogIf(GlobalContext, errors.New("unexpected drives/errors slice length")) + bugLogIf(GlobalContext, errors.New("unexpected drives/errors slice length")) return nil } newDisks := make([]StorageAPI, len(disks)) diff --git a/cmd/erasure-metadata.go b/cmd/erasure-metadata.go index 2154a8785..15b059d65 100644 --- a/cmd/erasure-metadata.go +++ b/cmd/erasure-metadata.go @@ -30,7 +30,6 @@ import ( "github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/hash/sha256" xhttp "github.com/minio/minio/internal/http" - "github.com/minio/minio/internal/logger" "github.com/minio/pkg/v2/sync/errgroup" ) @@ -268,7 +267,7 @@ func (fi FileInfo) ObjectToPartOffset(ctx context.Context, offset int64) (partIn // Continue to towards the next part. partOffset -= part.Size } - logger.LogIf(ctx, InvalidRange{}) + internalLogIf(ctx, InvalidRange{}) // Offset beyond the size of the object return InvalidRange. return 0, 0, InvalidRange{} } diff --git a/cmd/erasure-multipart.go b/cmd/erasure-multipart.go index 1179a2d7e..96c0f3edb 100644 --- a/cmd/erasure-multipart.go +++ b/cmd/erasure-multipart.go @@ -590,7 +590,7 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo data := r.Reader // Validate input data size and it can never be less than zero. if data.Size() < -1 { - logger.LogIf(ctx, errInvalidArgument, logger.ErrorKind) + bugLogIf(ctx, errInvalidArgument, logger.ErrorKind) return pi, toObjectErr(errInvalidArgument) } @@ -1026,7 +1026,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str if len(partInfoFiles) != len(parts) { // Should only happen through internal error err := fmt.Errorf("unexpected part result count: %d, want %d", len(partInfoFiles), len(parts)) - logger.LogIf(ctx, err) + bugLogIf(ctx, err) return oi, toObjectErr(err, bucket, object) } @@ -1096,7 +1096,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str _, err := pfi.UnmarshalMsg(part.Data) if err != nil { // Maybe crash or similar. - logger.LogIf(ctx, err) + bugLogIf(ctx, err) return oi, InvalidPart{ PartNumber: partID, } @@ -1105,7 +1105,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str partI := pfi.Parts[0] partNumber := partI.Number if partID != partNumber { - logger.LogIf(ctx, fmt.Errorf("part.%d.meta has incorrect corresponding part number: expected %d, got %d", partID, partID, partI.Number)) + internalLogIf(ctx, fmt.Errorf("part.%d.meta has incorrect corresponding part number: expected %d, got %d", partID, partID, partI.Number)) return oi, InvalidPart{ PartNumber: partID, } diff --git a/cmd/erasure-object.go b/cmd/erasure-object.go index 52f0081b8..ada14422e 100644 --- a/cmd/erasure-object.go +++ b/cmd/erasure-object.go @@ -928,7 +928,7 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s if !fi.Deleted && len(fi.Erasure.Distribution) != len(onlineDisks) { err := fmt.Errorf("unexpected file distribution (%v) from online disks (%v), looks like backend disks have been manually modified refusing to heal %s/%s(%s)", fi.Erasure.Distribution, onlineDisks, bucket, object, opts.VersionID) - logger.LogOnceIf(ctx, err, "get-object-file-info-manually-modified") + storageLogOnceIf(ctx, err, "get-object-file-info-manually-modified") return fi, nil, nil, toObjectErr(err, bucket, object, opts.VersionID) } @@ -1107,7 +1107,7 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r * // Validate input data size and it can never be less than zero. if data.Size() < -1 { - logger.LogIf(ctx, errInvalidArgument, logger.ErrorKind) + bugLogIf(ctx, errInvalidArgument, logger.ErrorKind) return ObjectInfo{}, toObjectErr(errInvalidArgument) } @@ -1297,7 +1297,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st // Validate input data size and it can never be less than -1. if data.Size() < -1 { - logger.LogIf(ctx, errInvalidArgument, logger.ErrorKind) + bugLogIf(ctx, errInvalidArgument, logger.ErrorKind) return ObjectInfo{}, toObjectErr(errInvalidArgument) } @@ -1459,7 +1459,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st toEncode = ra defer ra.Close() } - logger.LogIf(ctx, err) + bugLogIf(ctx, err) } n, erasureErr := erasure.Encode(ctx, toEncode, writers, buffer, writeQuorum) closeBitrotWriters(writers) @@ -2389,7 +2389,7 @@ func (er erasureObjects) updateRestoreMetadata(ctx context.Context, bucket, obje }, ObjectOptions{ VersionID: oi.VersionID, }); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to update transition restore metadata for %s/%s(%s): %s", bucket, object, oi.VersionID, err)) + storageLogIf(ctx, fmt.Errorf("Unable to update transition restore metadata for %s/%s(%s): %s", bucket, object, oi.VersionID, err)) return err } return nil diff --git a/cmd/erasure-server-pool-decom.go b/cmd/erasure-server-pool-decom.go index 6c5635030..8ff9ddc3c 100644 --- a/cmd/erasure-server-pool-decom.go +++ b/cmd/erasure-server-pool-decom.go @@ -467,7 +467,7 @@ func (p poolMeta) save(ctx context.Context, pools []*erasureSets) error { for i, eset := range pools { if err = saveConfig(ctx, eset, poolMetaName, buf); err != nil { if !errors.Is(err, context.Canceled) { - logger.LogIf(ctx, fmt.Errorf("saving pool.bin for pool index %d failed with: %v", i, err)) + storageLogIf(ctx, fmt.Errorf("saving pool.bin for pool index %d failed with: %v", i, err)) } return err } @@ -542,11 +542,11 @@ func (z *erasureServerPools) Init(ctx context.Context) error { return } if configRetriableErrors(err) { - logger.LogIf(ctx, fmt.Errorf("Unable to resume decommission of pools %v: %w: retrying..", pools, err)) + decomLogIf(ctx, fmt.Errorf("Unable to resume decommission of pools %v: %w: retrying..", pools, err)) time.Sleep(time.Second + time.Duration(r.Float64()*float64(5*time.Second))) continue } - logger.LogIf(ctx, fmt.Errorf("Unable to resume decommission of pool %v: %w", pools, err)) + decomLogIf(ctx, fmt.Errorf("Unable to resume decommission of pool %v: %w", pools, err)) return } } @@ -741,7 +741,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool const envDecomWorkers = "_MINIO_DECOMMISSION_WORKERS" workerSize, err := env.GetInt(envDecomWorkers, len(pool.sets)) if err != nil { - logger.LogIf(ctx, fmt.Errorf("invalid workers value err: %v, defaulting to %d", err, len(pool.sets))) + decomLogIf(ctx, fmt.Errorf("invalid workers value err: %v, defaulting to %d", err, len(pool.sets))) workerSize = len(pool.sets) } @@ -852,7 +852,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool } stopFn(err) if err != nil { - logger.LogIf(ctx, err) + decomLogIf(ctx, err) failure = true } z.poolMetaMutex.Lock() @@ -877,7 +877,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool }); err != nil { stopFn(err) failure = true - logger.LogIf(ctx, err) + decomLogIf(ctx, err) continue } stopFn(nil) @@ -906,20 +906,20 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool if bi.Name == minioMetaBucket && strings.Contains(version.Name, dataUsageCacheName) { ignore = true stopFn(err) - logger.LogIf(ctx, err) + decomLogIf(ctx, err) break } } if err != nil { failure = true - logger.LogIf(ctx, err) + decomLogIf(ctx, err) stopFn(err) continue } if err = z.decommissionObject(ctx, bi.Name, gr); err != nil { stopFn(err) failure = true - logger.LogIf(ctx, err) + decomLogIf(ctx, err) continue } stopFn(nil) @@ -953,13 +953,13 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool stopFn(err) auditLogDecom(ctx, "DecomDeleteObject", bi.Name, entry.name, "", err) if err != nil { - logger.LogIf(ctx, err) + decomLogIf(ctx, err) } } z.poolMetaMutex.Lock() z.poolMeta.TrackCurrentBucketObject(idx, bi.Name, entry.name) ok, err := z.poolMeta.updateAfter(ctx, idx, z.serverPools, 30*time.Second) - logger.LogIf(ctx, err) + decomLogIf(ctx, err) if ok { globalNotificationSys.ReloadPoolMeta(ctx) } @@ -987,7 +987,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool } setN := humanize.Ordinal(setIdx + 1) retryDur := time.Duration(rand.Float64() * float64(5*time.Second)) - logger.LogOnceIf(ctx, fmt.Errorf("listing objects from %s set failed with %v, retrying in %v", setN, err, retryDur), "decom-listing-failed"+setN) + decomLogOnceIf(ctx, fmt.Errorf("listing objects from %s set failed with %v, retrying in %v", setN, err, retryDur), "decom-listing-failed"+setN) time.Sleep(retryDur) } }(setIdx) @@ -1055,7 +1055,7 @@ func (z *erasureServerPools) decommissionInBackground(ctx context.Context, idx i z.poolMetaMutex.Lock() if z.poolMeta.BucketDone(idx, bucket) { // remove from pendingBuckets and persist. - logger.LogIf(ctx, z.poolMeta.save(ctx, z.serverPools)) + decomLogIf(ctx, z.poolMeta.save(ctx, z.serverPools)) } z.poolMetaMutex.Unlock() continue @@ -1072,7 +1072,7 @@ func (z *erasureServerPools) decommissionInBackground(ctx context.Context, idx i z.poolMetaMutex.Lock() if z.poolMeta.BucketDone(idx, bucket) { - logger.LogIf(ctx, z.poolMeta.save(ctx, z.serverPools)) + decomLogIf(ctx, z.poolMeta.save(ctx, z.serverPools)) } z.poolMetaMutex.Unlock() } @@ -1170,8 +1170,8 @@ func (z *erasureServerPools) doDecommissionInRoutine(ctx context.Context, idx in dctx = logger.SetReqInfo(dctx, &logger.ReqInfo{}) if err := z.decommissionInBackground(dctx, idx); err != nil { - logger.LogIf(GlobalContext, err) - logger.LogIf(GlobalContext, z.DecommissionFailed(dctx, idx)) + decomLogIf(GlobalContext, err) + decomLogIf(GlobalContext, z.DecommissionFailed(dctx, idx)) return } @@ -1181,20 +1181,20 @@ func (z *erasureServerPools) doDecommissionInRoutine(ctx context.Context, idx in z.poolMetaMutex.Unlock() if !failed { - logger.Event(dctx, "Decommissioning complete for pool '%s', verifying for any pending objects", poolCmdLine) + decomLogEvent(dctx, "Decommissioning complete for pool '%s', verifying for any pending objects", poolCmdLine) err := z.checkAfterDecom(dctx, idx) if err != nil { - logger.LogIf(ctx, err) + decomLogIf(ctx, err) failed = true } } if failed { // Decommission failed indicate as such. - logger.LogIf(GlobalContext, z.DecommissionFailed(dctx, idx)) + decomLogIf(GlobalContext, z.DecommissionFailed(dctx, idx)) } else { // Complete the decommission.. - logger.LogIf(GlobalContext, z.CompleteDecommission(dctx, idx)) + decomLogIf(GlobalContext, z.CompleteDecommission(dctx, idx)) } } diff --git a/cmd/erasure-server-pool-rebalance.go b/cmd/erasure-server-pool-rebalance.go index 17c1d05fd..ed9eddc27 100644 --- a/cmd/erasure-server-pool-rebalance.go +++ b/cmd/erasure-server-pool-rebalance.go @@ -146,7 +146,7 @@ func (z *erasureServerPools) updateRebalanceStats(ctx context.Context) error { lock := z.serverPools[0].NewNSLock(minioMetaBucket, rebalMetaName) lkCtx, err := lock.GetLock(ctx, globalOperationTimeout) if err != nil { - logger.LogIf(ctx, fmt.Errorf("failed to acquire write lock on %s/%s: %w", minioMetaBucket, rebalMetaName, err)) + rebalanceLogIf(ctx, fmt.Errorf("failed to acquire write lock on %s/%s: %w", minioMetaBucket, rebalMetaName, err)) return err } defer lock.Unlock(lkCtx) @@ -423,7 +423,7 @@ func (z *erasureServerPools) rebalanceBuckets(ctx context.Context, poolIdx int) stopFn := globalRebalanceMetrics.log(rebalanceMetricSaveMetadata, poolIdx, traceMsg) err := z.saveRebalanceStats(ctx, poolIdx, rebalSaveStats) stopFn(err) - logger.LogIf(ctx, err) + rebalanceLogIf(ctx, err) timer.Reset(randSleepFor()) if rebalDone { @@ -432,7 +432,7 @@ func (z *erasureServerPools) rebalanceBuckets(ctx context.Context, poolIdx int) } }() - logger.Event(ctx, "Pool %d rebalancing is started", poolIdx+1) + rebalanceLogEvent(ctx, "Pool %d rebalancing is started", poolIdx+1) for { select { @@ -451,14 +451,14 @@ func (z *erasureServerPools) rebalanceBuckets(ctx context.Context, poolIdx int) err = z.rebalanceBucket(ctx, bucket, poolIdx) if err != nil { stopFn(err) - logger.LogIf(ctx, err) + rebalanceLogIf(ctx, err) return } stopFn(nil) z.bucketRebalanceDone(bucket, poolIdx) } - logger.Event(ctx, "Pool %d rebalancing is done", poolIdx+1) + rebalanceLogEvent(ctx, "Pool %d rebalancing is done", poolIdx+1) return err } @@ -535,7 +535,7 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string, const envRebalanceWorkers = "_MINIO_REBALANCE_WORKERS" workerSize, err := env.GetInt(envRebalanceWorkers, len(pool.sets)) if err != nil { - logger.LogIf(ctx, fmt.Errorf("invalid workers value err: %v, defaulting to %d", err, len(pool.sets))) + rebalanceLogIf(ctx, fmt.Errorf("invalid workers value err: %v, defaulting to %d", err, len(pool.sets))) workerSize = len(pool.sets) } @@ -630,7 +630,7 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string, }) var failure bool if err != nil && !isErrObjectNotFound(err) && !isErrVersionNotFound(err) { - logger.LogIf(ctx, err) + rebalanceLogIf(ctx, err) failure = true } @@ -665,14 +665,14 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string, } if err != nil { failure = true - logger.LogIf(ctx, err) + rebalanceLogIf(ctx, err) stopFn(err) continue } if err = z.rebalanceObject(ctx, bucket, gr); err != nil { failure = true - logger.LogIf(ctx, err) + rebalanceLogIf(ctx, err) stopFn(err) continue } @@ -706,7 +706,7 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string, stopFn(err) auditLogRebalance(ctx, "Rebalance:DeleteObject", bucket, entry.name, "", err) if err != nil { - logger.LogIf(ctx, err) + rebalanceLogIf(ctx, err) } } } @@ -724,7 +724,7 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string, return } setN := humanize.Ordinal(setIdx + 1) - logger.LogOnceIf(ctx, fmt.Errorf("listing objects from %s set failed with %v", setN, err), "rebalance-listing-failed"+setN) + rebalanceLogIf(ctx, fmt.Errorf("listing objects from %s set failed with %v", setN, err), "rebalance-listing-failed"+setN) }(setIdx) } @@ -743,7 +743,7 @@ func (z *erasureServerPools) saveRebalanceStats(ctx context.Context, poolIdx int lock := z.serverPools[0].NewNSLock(minioMetaBucket, rebalMetaName) lkCtx, err := lock.GetLock(ctx, globalOperationTimeout) if err != nil { - logger.LogIf(ctx, fmt.Errorf("failed to acquire write lock on %s/%s: %w", minioMetaBucket, rebalMetaName, err)) + rebalanceLogIf(ctx, fmt.Errorf("failed to acquire write lock on %s/%s: %w", minioMetaBucket, rebalMetaName, err)) return err } defer lock.Unlock(lkCtx) diff --git a/cmd/erasure-server-pool.go b/cmd/erasure-server-pool.go index f65641c0c..4d230c964 100644 --- a/cmd/erasure-server-pool.go +++ b/cmd/erasure-server-pool.go @@ -207,7 +207,7 @@ func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServ logger.Fatal(err, "Unable to initialize backend") } retry := time.Duration(r.Float64() * float64(5*time.Second)) - logger.LogIf(ctx, fmt.Errorf("Unable to initialize backend: %w, retrying in %s", err, retry)) + storageLogIf(ctx, fmt.Errorf("Unable to initialize backend: %w, retrying in %s", err, retry)) time.Sleep(retry) attempt++ continue @@ -376,7 +376,7 @@ func (z *erasureServerPools) getAvailablePoolIdx(ctx context.Context, bucket, ob } } // Should not happen, but print values just in case. - logger.LogIf(ctx, fmt.Errorf("reached end of serverPools (total: %v, atTotal: %v, choose: %v)", total, atTotal, choose)) + storageLogIf(ctx, fmt.Errorf("reached end of serverPools (total: %v, atTotal: %v, choose: %v)", total, atTotal, choose)) return -1 } @@ -610,7 +610,7 @@ func (z *erasureServerPools) Shutdown(ctx context.Context) error { for _, err := range g.Wait() { if err != nil { - logger.LogIf(ctx, err) + storageLogIf(ctx, err) } // let's the rest shutdown } @@ -714,7 +714,7 @@ func (z *erasureServerPools) NSScanner(ctx context.Context, updates chan<- DataU // Start scanner. Blocks until done. err := erObj.nsScanner(ctx, allBuckets, wantCycle, updates, healScanMode) if err != nil { - logger.LogIf(ctx, err) + scannerLogIf(ctx, err) mu.Lock() if firstErr == nil { firstErr = err @@ -1329,7 +1329,7 @@ func (z *erasureServerPools) ListObjectVersions(ctx context.Context, bucket, pre merged, err := z.listPath(ctx, &opts) if err != nil && err != io.EOF { if !isErrBucketNotFound(err) { - logger.LogOnceIf(ctx, err, "erasure-list-objects-path-"+bucket) + storageLogOnceIf(ctx, err, "erasure-list-objects-path-"+bucket) } return loi, toObjectErr(err, bucket) } @@ -1523,7 +1523,7 @@ func (z *erasureServerPools) listObjectsGeneric(ctx context.Context, bucket, pre merged, err := z.listPath(ctx, &opts) if err != nil && err != io.EOF { if !isErrBucketNotFound(err) { - logger.LogOnceIf(ctx, err, "erasure-list-objects-path-"+bucket) + storageLogOnceIf(ctx, err, "erasure-list-objects-path-"+bucket) } return loi, toObjectErr(err, bucket) } @@ -1945,7 +1945,7 @@ func (z *erasureServerPools) HealFormat(ctx context.Context, dryRun bool) (madmi for _, pool := range z.serverPools { result, err := pool.HealFormat(ctx, dryRun) if err != nil && !errors.Is(err, errNoHealRequired) { - logger.LogOnceIf(ctx, err, "erasure-heal-format") + healingLogOnceIf(ctx, err, "erasure-heal-format") continue } // Count errNoHealRequired across all serverPools, @@ -2136,7 +2136,7 @@ func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, re } if err := listPathRaw(ctx, lopts); err != nil { - logger.LogIf(ctx, fmt.Errorf("listPathRaw returned %w: opts(%#v)", err, lopts)) + storageLogIf(ctx, fmt.Errorf("listPathRaw returned %w: opts(%#v)", err, lopts)) cancel() return } @@ -2181,7 +2181,7 @@ func (z *erasureServerPools) HealObjects(ctx context.Context, bucket, prefix str if opts.Remove && !opts.DryRun { err := z.CheckAbandonedParts(ctx, bucket, entry.name, opts) if err != nil { - logger.LogIf(ctx, fmt.Errorf("unable to check object %s/%s for abandoned data: %w", bucket, entry.name, err)) + healingLogIf(ctx, fmt.Errorf("unable to check object %s/%s for abandoned data: %w", bucket, entry.name, err)) } } for _, version := range fivs.Versions { @@ -2385,7 +2385,7 @@ func (z *erasureServerPools) Health(ctx context.Context, opts HealthOptions) Hea // Check if disks are healing on in-case of VMware vsphere deployments. if opts.Maintenance && opts.DeploymentType == vmware { if drivesHealing > 0 { - logger.LogIf(logger.SetReqInfo(ctx, reqInfo), fmt.Errorf("Total drives to be healed %d", drivesHealing)) + healingLogIf(logger.SetReqInfo(ctx, reqInfo), fmt.Errorf("Total drives to be healed %d", drivesHealing)) } } @@ -2445,15 +2445,15 @@ func (z *erasureServerPools) Health(ctx context.Context, opts HealthOptions) Hea healthy := erasureSetUpCount[poolIdx][setIdx].online >= poolWriteQuorums[poolIdx] if !healthy { - logger.LogIf(logger.SetReqInfo(ctx, reqInfo), + storageLogIf(logger.SetReqInfo(ctx, reqInfo), fmt.Errorf("Write quorum may be lost on pool: %d, set: %d, expected write quorum: %d", - poolIdx, setIdx, poolWriteQuorums[poolIdx])) + poolIdx, setIdx, poolWriteQuorums[poolIdx]), logger.FatalKind) } result.Healthy = result.Healthy && healthy healthyRead := erasureSetUpCount[poolIdx][setIdx].online >= poolReadQuorums[poolIdx] if !healthyRead { - logger.LogIf(logger.SetReqInfo(ctx, reqInfo), + storageLogIf(logger.SetReqInfo(ctx, reqInfo), fmt.Errorf("Read quorum may be lost on pool: %d, set: %d, expected read quorum: %d", poolIdx, setIdx, poolReadQuorums[poolIdx])) } diff --git a/cmd/erasure-sets.go b/cmd/erasure-sets.go index 58503558b..cb356deb7 100644 --- a/cmd/erasure-sets.go +++ b/cmd/erasure-sets.go @@ -448,7 +448,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [ diskID, err := disk.GetDiskID() if err != nil { if !errors.Is(err, errUnformattedDisk) { - logger.LogIf(ctx, err) + bootLogIf(ctx, err) } return } @@ -457,11 +457,11 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [ } m, n, err := findDiskIndexByDiskID(format, diskID) if err != nil { - logger.LogIf(ctx, err) + bootLogIf(ctx, err) return } if m != i || n != j { - logger.LogIf(ctx, fmt.Errorf("Detected unexpected drive ordering refusing to use the drive - poolID: %s, found drive mounted at (set=%s, drive=%s) expected mount at (set=%s, drive=%s): %s(%s)", humanize.Ordinal(poolIdx+1), humanize.Ordinal(m+1), humanize.Ordinal(n+1), humanize.Ordinal(i+1), humanize.Ordinal(j+1), disk, diskID)) + bootLogIf(ctx, fmt.Errorf("Detected unexpected drive ordering refusing to use the drive - poolID: %s, found drive mounted at (set=%s, drive=%s) expected mount at (set=%s, drive=%s): %s(%s)", humanize.Ordinal(poolIdx+1), humanize.Ordinal(m+1), humanize.Ordinal(n+1), humanize.Ordinal(i+1), humanize.Ordinal(j+1), disk, diskID)) s.erasureDisks[i][j] = &unrecognizedDisk{storage: disk} return } @@ -1083,7 +1083,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H if !reflect.DeepEqual(s.format, refFormat) { // Format is corrupted and unrecognized by the running instance. - logger.LogIf(ctx, fmt.Errorf("Unable to heal the newly replaced drives due to format.json inconsistencies, please engage MinIO support for further assistance: %w", + healingLogIf(ctx, fmt.Errorf("Unable to heal the newly replaced drives due to format.json inconsistencies, please engage MinIO support for further assistance: %w", errCorruptedFormat)) return res, errCorruptedFormat } @@ -1112,7 +1112,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H continue } if err := saveFormatErasure(storageDisks[index], format, formatOpID); err != nil { - logger.LogIf(ctx, fmt.Errorf("Drive %s failed to write updated 'format.json': %v", storageDisks[index], err)) + healingLogIf(ctx, fmt.Errorf("Drive %s failed to write updated 'format.json': %v", storageDisks[index], err)) storageDisks[index].Close() tmpNewFormats[index] = nil // this disk failed to write new format } @@ -1127,7 +1127,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H m, n, err := findDiskIndexByDiskID(refFormat, format.Erasure.This) if err != nil { - logger.LogIf(ctx, err) + healingLogIf(ctx, err) continue } diff --git a/cmd/erasure.go b/cmd/erasure.go index 838458b5e..6d265b470 100644 --- a/cmd/erasure.go +++ b/cmd/erasure.go @@ -31,7 +31,6 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/dsync" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" "github.com/minio/pkg/v2/sync/errgroup" ) @@ -384,7 +383,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa // Collect disks we can use. disks, healing := er.getOnlineDisksWithHealing(false) if len(disks) == 0 { - logger.LogIf(ctx, errors.New("data-scanner: all drives are offline or being healed, skipping scanner cycle")) + scannerLogIf(ctx, errors.New("data-scanner: all drives are offline or being healed, skipping scanner cycle")) return nil } @@ -449,7 +448,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa if cache.Info.LastUpdate.Equal(lastSave) { continue } - logger.LogOnceIf(ctx, cache.save(ctx, er, dataUsageCacheName), "nsscanner-cache-update") + scannerLogOnceIf(ctx, cache.save(ctx, er, dataUsageCacheName), "nsscanner-cache-update") updates <- cache.clone() lastSave = cache.Info.LastUpdate @@ -458,7 +457,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa // Save final state... cache.Info.NextCycle = wantCycle cache.Info.LastUpdate = time.Now() - logger.LogOnceIf(ctx, cache.save(ctx, er, dataUsageCacheName), "nsscanner-channel-closed") + scannerLogOnceIf(ctx, cache.save(ctx, er, dataUsageCacheName), "nsscanner-channel-closed") updates <- cache.clone() return } @@ -494,7 +493,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa // Load cache for bucket cacheName := pathJoin(bucket.Name, dataUsageCacheName) cache := dataUsageCache{} - logger.LogIf(ctx, cache.load(ctx, er, cacheName)) + scannerLogIf(ctx, cache.load(ctx, er, cacheName)) if cache.Info.Name == "" { cache.Info.Name = bucket.Name } @@ -530,9 +529,9 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa cache, err = disk.NSScanner(ctx, cache, updates, healScanMode, nil) if err != nil { if !cache.Info.LastUpdate.IsZero() && cache.Info.LastUpdate.After(before) { - logger.LogIf(ctx, cache.save(ctx, er, cacheName)) + scannerLogIf(ctx, cache.save(ctx, er, cacheName)) } else { - logger.LogIf(ctx, err) + scannerLogIf(ctx, err) } // This ensures that we don't close // bucketResults channel while the @@ -562,7 +561,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa } // Save cache - logger.LogIf(ctx, cache.save(ctx, er, cacheName)) + scannerLogIf(ctx, cache.save(ctx, er, cacheName)) } }(i) } diff --git a/cmd/etcd.go b/cmd/etcd.go index 028a72780..e3cdebb79 100644 --- a/cmd/etcd.go +++ b/cmd/etcd.go @@ -22,7 +22,6 @@ import ( "errors" "fmt" - "github.com/minio/minio/internal/logger" etcd "go.etcd.io/etcd/client/v3" ) @@ -48,7 +47,7 @@ func saveKeyEtcdWithTTL(ctx context.Context, client *etcd.Client, key string, da return etcdErrToErr(err, client.Endpoints()) } _, err = client.Put(timeoutCtx, key, string(data), etcd.WithLease(lease.ID)) - logger.LogIf(ctx, err) + etcdLogIf(ctx, err) return etcdErrToErr(err, client.Endpoints()) } @@ -59,7 +58,7 @@ func saveKeyEtcd(ctx context.Context, client *etcd.Client, key string, data []by return saveKeyEtcdWithTTL(ctx, client, key, data, opts[0].ttl) } _, err := client.Put(timeoutCtx, key, string(data)) - logger.LogIf(ctx, err) + etcdLogIf(ctx, err) return etcdErrToErr(err, client.Endpoints()) } @@ -68,7 +67,7 @@ func deleteKeyEtcd(ctx context.Context, client *etcd.Client, key string) error { defer cancel() _, err := client.Delete(timeoutCtx, key) - logger.LogIf(ctx, err) + etcdLogIf(ctx, err) return etcdErrToErr(err, client.Endpoints()) } @@ -77,7 +76,7 @@ func readKeyEtcd(ctx context.Context, client *etcd.Client, key string) ([]byte, defer cancel() resp, err := client.Get(timeoutCtx, key) if err != nil { - logger.LogOnceIf(ctx, err, "etcd-retrieve-keys") + etcdLogOnceIf(ctx, err, "etcd-retrieve-keys") return nil, etcdErrToErr(err, client.Endpoints()) } if resp.Count == 0 { diff --git a/cmd/event-notification.go b/cmd/event-notification.go index 9f56c4968..efa50368b 100644 --- a/cmd/event-notification.go +++ b/cmd/event-notification.go @@ -28,7 +28,6 @@ import ( "github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/event" xhttp "github.com/minio/minio/internal/http" - "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/pubsub" "github.com/minio/pkg/v2/policy" ) @@ -83,7 +82,7 @@ func (evnot *EventNotifier) set(bucket BucketInfo, meta BucketMetadata) { config.SetRegion(globalSite.Region) if err := config.Validate(globalSite.Region, globalEventNotifier.targetList); err != nil { if _, ok := err.(*event.ErrARNNotFound); !ok { - logger.LogIf(GlobalContext, err) + internalLogIf(GlobalContext, err) } } evnot.AddRulesMap(bucket.Name, config.ToRulesMap()) diff --git a/cmd/format-erasure.go b/cmd/format-erasure.go index eae474461..6258977c6 100644 --- a/cmd/format-erasure.go +++ b/cmd/format-erasure.go @@ -278,7 +278,7 @@ func formatErasureMigrateV2ToV3(data []byte, export, version string) ([]byte, er tmpOld := pathJoin(export, minioMetaTmpDeletedBucket, mustGetUUID()) if err := renameAll(pathJoin(export, minioMetaMultipartBucket), tmpOld, export); err != nil && err != errFileNotFound { - logger.LogIf(GlobalContext, fmt.Errorf("unable to rename (%s -> %s) %w, drive may be faulty please investigate", + bootLogIf(GlobalContext, fmt.Errorf("unable to rename (%s -> %s) %w, drive may be faulty please investigate", pathJoin(export, minioMetaMultipartBucket), tmpOld, osErrToFileErr(err))) @@ -570,7 +570,7 @@ func formatErasureFixLocalDeploymentID(endpoints Endpoints, storageDisks []Stora format.ID = refFormat.ID // Heal the drive if we fixed its deployment ID. if err := saveFormatErasure(storageDisks[index], format, mustGetUUID()); err != nil { - logger.LogIf(GlobalContext, err) + bootLogIf(GlobalContext, err) return fmt.Errorf("Unable to save format.json, %w", err) } } diff --git a/cmd/ftp-server-driver.go b/cmd/ftp-server-driver.go index 2b55357db..34ff6a991 100644 --- a/cmd/ftp-server-driver.go +++ b/cmd/ftp-server-driver.go @@ -33,7 +33,6 @@ import ( "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio/internal/auth" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" ftp "goftp.io/server/v2" ) @@ -323,7 +322,7 @@ func (driver *ftpDriver) getMinIOClient(ctx *ftp.Context) (*minio.Client, error) } // Call hook for site replication. - logger.LogIf(context.Background(), globalSiteReplicationSys.IAMChangeHook(context.Background(), madmin.SRIAMItem{ + replLogIf(context.Background(), globalSiteReplicationSys.IAMChangeHook(context.Background(), madmin.SRIAMItem{ Type: madmin.SRIAMItemSTSAcc, STSCredential: &madmin.SRSTSCredential{ AccessKey: cred.AccessKey, diff --git a/cmd/global-heal.go b/cmd/global-heal.go index ebec7e2a5..38f89fdcd 100644 --- a/cmd/global-heal.go +++ b/cmd/global-heal.go @@ -153,7 +153,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, _, err := objAPI.HealBucket(ctx, bucket, madmin.HealOpts{ScanMode: scanMode}) if err != nil { // Log bucket healing error if any, we shall retry again. - logger.LogIf(ctx, err) + healingLogIf(ctx, err) } } @@ -177,7 +177,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, numHealers = uint64(v) } - logger.Event(ctx, fmt.Sprintf("Healing drive '%s' - use %d parallel workers.", tracker.disk.String(), numHealers)) + healingLogEvent(ctx, fmt.Sprintf("Healing drive '%s' - use %d parallel workers.", tracker.disk.String(), numHealers)) jt, _ := workers.New(int(numHealers)) @@ -204,7 +204,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, if _, err := objAPI.HealBucket(ctx, bucket, madmin.HealOpts{ ScanMode: scanMode, }); err != nil { - logger.LogIf(ctx, err) + healingLogIf(ctx, err) continue } @@ -226,7 +226,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, if len(disks) == 0 { // No object healing necessary tracker.bucketDone(bucket) - logger.LogIf(ctx, tracker.update(ctx)) + healingLogIf(ctx, tracker.update(ctx)) continue } @@ -293,7 +293,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, if res.entryDone { tracker.setObject(res.name) if time.Since(tracker.getLastUpdate()) > time.Minute { - logger.LogIf(ctx, tracker.update(ctx)) + healingLogIf(ctx, tracker.update(ctx)) } continue } @@ -306,7 +306,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, select { case <-ctx.Done(): if !contextCanceled(ctx) { - logger.LogIf(ctx, ctx.Err()) + healingLogIf(ctx, ctx.Err()) } return false case results <- result: @@ -360,7 +360,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, return } result = healEntryFailure(0) - logger.LogIf(ctx, fmt.Errorf("unable to heal object %s/%s: %w", bucket, entry.name, err)) + healingLogIf(ctx, fmt.Errorf("unable to heal object %s/%s: %w", bucket, entry.name, err)) } else { result = healEntrySuccess(0) } @@ -399,9 +399,9 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, // If not deleted, assume they failed. result = healEntryFailure(uint64(version.Size)) if version.VersionID != "" { - logger.LogIf(ctx, fmt.Errorf("unable to heal object %s/%s-v(%s): %w", bucket, version.Name, version.VersionID, err)) + healingLogIf(ctx, fmt.Errorf("unable to heal object %s/%s-v(%s): %w", bucket, version.Name, version.VersionID, err)) } else { - logger.LogIf(ctx, fmt.Errorf("unable to heal object %s/%s: %w", bucket, version.Name, err)) + healingLogIf(ctx, fmt.Errorf("unable to heal object %s/%s: %w", bucket, version.Name, err)) } } else { result = healEntrySuccess(uint64(version.Size)) @@ -465,7 +465,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, // we let the caller retry this disk again for the // buckets it failed to list. retErr = err - logger.LogIf(ctx, err) + healingLogIf(ctx, err) continue } @@ -475,7 +475,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, return ctx.Err() default: tracker.bucketDone(bucket) - logger.LogIf(ctx, tracker.update(ctx)) + healingLogIf(ctx, tracker.update(ctx)) } } diff --git a/cmd/handler-utils.go b/cmd/handler-utils.go index 67d183f56..35fa7f136 100644 --- a/cmd/handler-utils.go +++ b/cmd/handler-utils.go @@ -51,7 +51,7 @@ func parseLocationConstraint(r *http.Request) (location string, s3Error APIError locationConstraint := createBucketLocationConfiguration{} err := xmlDecoder(r.Body, &locationConstraint, r.ContentLength) if err != nil && r.ContentLength != 0 { - logger.LogOnceIf(GlobalContext, err, "location-constraint-xml-parsing") + internalLogOnceIf(GlobalContext, err, "location-constraint-xml-parsing") // Treat all other failures as XML parsing errors. return "", ErrMalformedXML } // else for both err as nil or io.EOF @@ -191,7 +191,7 @@ func extractMetadata(ctx context.Context, mimesHeader ...textproto.MIMEHeader) ( // extractMetadata extracts metadata from map values. func extractMetadataFromMime(ctx context.Context, v textproto.MIMEHeader, m map[string]string) error { if v == nil { - logger.LogIf(ctx, errInvalidArgument) + bugLogIf(ctx, errInvalidArgument) return errInvalidArgument } @@ -461,7 +461,7 @@ func proxyRequest(ctx context.Context, w http.ResponseWriter, r *http.Request, e ErrorHandler: func(w http.ResponseWriter, r *http.Request, err error) { success = false if err != nil && !errors.Is(err, context.Canceled) { - logger.LogIf(GlobalContext, err) + replLogIf(GlobalContext, err) } }, }) diff --git a/cmd/iam-etcd-store.go b/cmd/iam-etcd-store.go index 52dff016c..2fe388dc8 100644 --- a/cmd/iam-etcd-store.go +++ b/cmd/iam-etcd-store.go @@ -30,7 +30,6 @@ import ( "github.com/minio/minio-go/v7/pkg/set" "github.com/minio/minio/internal/config" "github.com/minio/minio/internal/kms" - "github.com/minio/minio/internal/logger" "github.com/puzpuzpuz/xsync/v3" "go.etcd.io/etcd/api/v3/mvccpb" etcd "go.etcd.io/etcd/client/v3" @@ -460,7 +459,7 @@ func (ies *IAMEtcdStore) watch(ctx context.Context, keyPath string) <-chan iamWa goto outerLoop } if err := watchResp.Err(); err != nil { - logger.LogIf(ctx, err) + iamLogIf(ctx, err) // log and retry. time.Sleep(1 * time.Second) // Upon an error on watch channel diff --git a/cmd/iam-object-store.go b/cmd/iam-object-store.go index 6a24491f1..26b41ec71 100644 --- a/cmd/iam-object-store.go +++ b/cmd/iam-object-store.go @@ -34,7 +34,6 @@ import ( "github.com/minio/minio/internal/config" xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/kms" - "github.com/minio/minio/internal/logger" "github.com/puzpuzpuz/xsync/v3" ) @@ -448,7 +447,7 @@ func (iamOS *IAMObjectStore) PurgeExpiredSTS(ctx context.Context) error { iamListing, ok := iamOS.cachedIAMListing.Load().(map[string][]string) if !ok { // There has been no store yet. This should never happen! - logger.LogIf(GlobalContext, errors.New("WARNING: no cached IAM listing found")) + iamLogIf(GlobalContext, errors.New("WARNING: no cached IAM listing found")) return nil } @@ -461,7 +460,7 @@ func (iamOS *IAMObjectStore) PurgeExpiredSTS(ctx context.Context) error { // loadUser() will delete expired user during the load. err := iamOS.loadUser(ctx, userName, stsUser, stsAccountsFromStore) if err != nil && !errors.Is(err, errNoSuchUser) { - logger.LogIf(GlobalContext, + iamLogIf(GlobalContext, fmt.Errorf("unable to load user during STS purge: %w (%s)", err, item)) } @@ -472,7 +471,7 @@ func (iamOS *IAMObjectStore) PurgeExpiredSTS(ctx context.Context) error { stsName := strings.TrimSuffix(item, ".json") err := iamOS.loadMappedPolicy(ctx, stsName, stsUser, false, stsAccPoliciesFromStore) if err != nil && !errors.Is(err, errNoSuchPolicy) { - logger.LogIf(GlobalContext, + iamLogIf(GlobalContext, fmt.Errorf("unable to load policies during STS purge: %w (%s)", err, item)) } diff --git a/cmd/iam-store.go b/cmd/iam-store.go index 2e4e100d5..6d3dc38e9 100644 --- a/cmd/iam-store.go +++ b/cmd/iam-store.go @@ -33,7 +33,6 @@ import ( "github.com/minio/minio/internal/auth" "github.com/minio/minio/internal/config/identity/openid" "github.com/minio/minio/internal/jwt" - "github.com/minio/minio/internal/logger" "github.com/minio/pkg/v2/policy" "github.com/puzpuzpuz/xsync/v3" ) @@ -1882,7 +1881,7 @@ func (store *IAMStoreSys) DeleteUsers(ctx context.Context, users []string) error // we are only logging errors, not handling them. err := store.deleteUserIdentity(ctx, user, userType) - logger.LogIf(GlobalContext, err) + iamLogIf(GlobalContext, err) delete(cache.iamUsersMap, user) deleted = true diff --git a/cmd/iam.go b/cmd/iam.go index 43168527e..34f3fd83e 100644 --- a/cmd/iam.go +++ b/cmd/iam.go @@ -230,42 +230,42 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etc openidConfig, err := openid.LookupConfig(s, NewHTTPTransport(), xhttp.DrainBody, globalSite.Region) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize OpenID: %w", err)) + iamLogIf(ctx, fmt.Errorf("Unable to initialize OpenID: %w", err), logger.WarningKind) } // Initialize if LDAP is enabled ldapConfig, err := xldap.Lookup(s, globalRootCAs) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to parse LDAP configuration: %w", err)) + iamLogIf(ctx, fmt.Errorf("Unable to parse LDAP configuration: %w", err), logger.WarningKind) } stsTLSConfig, err := xtls.Lookup(s[config.IdentityTLSSubSys][config.Default]) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize X.509/TLS STS API: %w", err)) + iamLogIf(ctx, fmt.Errorf("Unable to initialize X.509/TLS STS API: %w", err), logger.WarningKind) } if stsTLSConfig.InsecureSkipVerify { - logger.LogIf(ctx, fmt.Errorf("CRITICAL: enabling %s is not recommended in a production environment", xtls.EnvIdentityTLSSkipVerify)) + iamLogIf(ctx, fmt.Errorf("Enabling %s is not recommended in a production environment", xtls.EnvIdentityTLSSkipVerify), logger.WarningKind) } authNPluginCfg, err := idplugin.LookupConfig(s[config.IdentityPluginSubSys][config.Default], NewHTTPTransport(), xhttp.DrainBody, globalSite.Region) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize AuthNPlugin: %w", err)) + iamLogIf(ctx, fmt.Errorf("Unable to initialize AuthNPlugin: %w", err), logger.WarningKind) } setGlobalAuthNPlugin(idplugin.New(GlobalContext, authNPluginCfg)) authZPluginCfg, err := polplugin.LookupConfig(s, GetDefaultConnSettings(), xhttp.DrainBody) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize AuthZPlugin: %w", err)) + iamLogIf(ctx, fmt.Errorf("Unable to initialize AuthZPlugin: %w", err), logger.WarningKind) } if authZPluginCfg.URL == nil { opaCfg, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default], NewHTTPTransport(), xhttp.DrainBody) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize AuthZPlugin from legacy OPA config: %w", err)) + iamLogIf(ctx, fmt.Errorf("Unable to initialize AuthZPlugin from legacy OPA config: %w", err)) } else { authZPluginCfg.URL = opaCfg.URL authZPluginCfg.AuthToken = opaCfg.AuthToken @@ -301,7 +301,7 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etc time.Sleep(time.Duration(r.Float64() * float64(time.Second))) continue } - logger.LogIf(ctx, fmt.Errorf("IAM sub-system is partially initialized, unable to write the IAM format: %w", err)) + iamLogIf(ctx, fmt.Errorf("IAM sub-system is partially initialized, unable to write the IAM format: %w", err), logger.WarningKind) return } @@ -317,7 +317,7 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etc continue } if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize IAM sub-system, some users may not be available: %w", err)) + iamLogIf(ctx, fmt.Errorf("Unable to initialize IAM sub-system, some users may not be available: %w", err), logger.WarningKind) } } break @@ -355,7 +355,7 @@ func (sys *IAMSys) periodicRoutines(ctx context.Context, baseInterval time.Durat for event := range ch { if err := sys.loadWatchedEvent(ctx, event); err != nil { // we simply log errors - logger.LogIf(ctx, fmt.Errorf("Failure in loading watch event: %v", err)) + iamLogIf(ctx, fmt.Errorf("Failure in loading watch event: %v", err), logger.WarningKind) } } }() @@ -388,7 +388,7 @@ func (sys *IAMSys) periodicRoutines(ctx context.Context, baseInterval time.Durat // Load all IAM items (except STS creds) periodically. refreshStart := time.Now() if err := sys.Load(ctx, false); err != nil { - logger.LogIf(ctx, fmt.Errorf("Failure in periodic refresh for IAM (took %.2fs): %v", time.Since(refreshStart).Seconds(), err)) + iamLogIf(ctx, fmt.Errorf("Failure in periodic refresh for IAM (took %.2fs): %v", time.Since(refreshStart).Seconds(), err), logger.WarningKind) } else { took := time.Since(refreshStart).Seconds() if took > maxDurationSecondsForLog { @@ -400,7 +400,7 @@ func (sys *IAMSys) periodicRoutines(ctx context.Context, baseInterval time.Durat // Purge expired STS credentials. purgeStart := time.Now() if err := sys.store.PurgeExpiredSTS(ctx); err != nil { - logger.LogIf(ctx, fmt.Errorf("Failure in periodic STS purge for IAM (took %.2fs): %v", time.Since(purgeStart).Seconds(), err)) + iamLogIf(ctx, fmt.Errorf("Failure in periodic STS purge for IAM (took %.2fs): %v", time.Since(purgeStart).Seconds(), err)) } else { took := time.Since(purgeStart).Seconds() if took > maxDurationSecondsForLog { @@ -450,7 +450,7 @@ func (sys *IAMSys) validateAndAddRolePolicyMappings(ctx context.Context, m map[a errMsg := fmt.Errorf( "The policies \"%s\" mapped to role ARN %s are not defined - this role may not work as expected.", unknownPoliciesSet.ToSlice(), arn.String()) - logger.LogIf(ctx, errMsg) + authZLogIf(ctx, errMsg, logger.WarningKind) } } sys.rolesMap[arn] = rolePolicies @@ -573,7 +573,7 @@ func (sys *IAMSys) DeletePolicy(ctx context.Context, policyName string, notifyPe for _, nerr := range globalNotificationSys.DeletePolicy(policyName) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + iamLogIf(ctx, nerr.Err) } } @@ -638,7 +638,7 @@ func (sys *IAMSys) SetPolicy(ctx context.Context, policyName string, p policy.Po for _, nerr := range globalNotificationSys.LoadPolicy(policyName) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + iamLogIf(ctx, nerr.Err) } } } @@ -660,7 +660,7 @@ func (sys *IAMSys) DeleteUser(ctx context.Context, accessKey string, notifyPeers for _, nerr := range globalNotificationSys.DeleteUser(accessKey) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + iamLogIf(ctx, nerr.Err) } } } @@ -686,7 +686,7 @@ func (sys *IAMSys) notifyForUser(ctx context.Context, accessKey string, isTemp b for _, nerr := range globalNotificationSys.LoadUser(accessKey, isTemp) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + iamLogIf(ctx, nerr.Err) } } } @@ -931,7 +931,7 @@ func (sys *IAMSys) notifyForServiceAccount(ctx context.Context, accessKey string for _, nerr := range globalNotificationSys.LoadServiceAccount(accessKey) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + iamLogIf(ctx, nerr.Err) } } } @@ -1252,7 +1252,7 @@ func (sys *IAMSys) DeleteServiceAccount(ctx context.Context, accessKey string, n for _, nerr := range globalNotificationSys.DeleteServiceAccount(accessKey) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + iamLogIf(ctx, nerr.Err) } } } @@ -1327,14 +1327,14 @@ func (sys *IAMSys) purgeExpiredCredentialsForExternalSSO(ctx context.Context) { roleArns := puInfo.roleArns.ToSlice() var roleArn string if len(roleArns) == 0 { - logger.LogIf(GlobalContext, + iamLogIf(GlobalContext, fmt.Errorf("parentUser: %s had no roleArns mapped!", parentUser)) continue } roleArn = roleArns[0] u, err := sys.OpenIDConfig.LookupUser(roleArn, puInfo.subClaimValue) if err != nil { - logger.LogIf(GlobalContext, err) + iamLogIf(GlobalContext, err) continue } // If user is set to "disabled", we will remove them @@ -1364,7 +1364,7 @@ func (sys *IAMSys) purgeExpiredCredentialsForLDAP(ctx context.Context) { expiredUsers, err := sys.LDAPConfig.GetNonEligibleUserDistNames(allDistNames) if err != nil { // Log and return on error - perhaps it'll work the next time. - logger.LogIf(GlobalContext, err) + iamLogIf(GlobalContext, err) return } @@ -1445,7 +1445,7 @@ func (sys *IAMSys) updateGroupMembershipsForLDAP(ctx context.Context) { updatedGroups, err := sys.LDAPConfig.LookupGroupMemberships(parentUsers, parentUserToLDAPUsernameMap) if err != nil { // Log and return on error - perhaps it'll work the next time. - logger.LogIf(GlobalContext, err) + iamLogIf(GlobalContext, err) return } @@ -1469,7 +1469,7 @@ func (sys *IAMSys) updateGroupMembershipsForLDAP(ctx context.Context) { cred.Groups = currGroups if err := sys.store.UpdateUserIdentity(ctx, cred); err != nil { // Log and continue error - perhaps it'll work the next time. - logger.LogIf(GlobalContext, err) + iamLogIf(GlobalContext, err) } } } @@ -1508,7 +1508,7 @@ func (sys *IAMSys) notifyForGroup(ctx context.Context, group string) { for _, nerr := range globalNotificationSys.LoadGroup(group) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + iamLogIf(ctx, nerr.Err) } } } @@ -1612,7 +1612,7 @@ func (sys *IAMSys) PolicyDBSet(ctx context.Context, name, policy string, userTyp for _, nerr := range globalNotificationSys.LoadPolicyMapping(name, userType, isGroup) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + iamLogIf(ctx, nerr.Err) } } } @@ -1680,12 +1680,12 @@ func (sys *IAMSys) PolicyDBUpdateBuiltin(ctx context.Context, isAttach bool, for _, nerr := range globalNotificationSys.LoadPolicyMapping(userOrGroup, regUser, isGroup) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + iamLogIf(ctx, nerr.Err) } } } - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemPolicyMapping, PolicyMapping: &madmin.SRPolicyMapping{ UserOrGroup: userOrGroup, @@ -1714,7 +1714,7 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool, if r.User != "" { dn, err = sys.LDAPConfig.GetValidatedDNForUsername(r.User) if err != nil { - logger.LogIf(ctx, err) + iamLogIf(ctx, err) return } if dn == "" { @@ -1731,7 +1731,7 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool, if isAttach { var foundGroupDN string if foundGroupDN, err = sys.LDAPConfig.GetValidatedGroupDN(r.Group); err != nil { - logger.LogIf(ctx, err) + iamLogIf(ctx, err) return } else if foundGroupDN == "" { err = errNoSuchGroup @@ -1758,12 +1758,12 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool, for _, nerr := range globalNotificationSys.LoadPolicyMapping(dn, userType, isGroup) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + iamLogIf(ctx, nerr.Err) } } } - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemPolicyMapping, PolicyMapping: &madmin.SRPolicyMapping{ UserOrGroup: dn, @@ -1826,7 +1826,7 @@ func (sys *IAMSys) IsAllowedServiceAccount(args policy.Args, parentUser string) case roleArn != "": arn, err := arn.Parse(roleArn) if err != nil { - logger.LogIf(GlobalContext, fmt.Errorf("error parsing role ARN %s: %v", roleArn, err)) + iamLogIf(GlobalContext, fmt.Errorf("error parsing role ARN %s: %v", roleArn, err)) return false } svcPolicies = newMappedPolicy(sys.rolesMap[arn]).toSlice() @@ -1835,7 +1835,7 @@ func (sys *IAMSys) IsAllowedServiceAccount(args policy.Args, parentUser string) // Check policy for parent user of service account. svcPolicies, err = sys.PolicyDBGet(parentUser, args.Groups...) if err != nil { - logger.LogIf(GlobalContext, err) + iamLogIf(GlobalContext, err) return false } @@ -1910,7 +1910,7 @@ func (sys *IAMSys) IsAllowedSTS(args policy.Args, parentUser string) bool { // If a roleARN is present, the role policy is applied. arn, err := arn.Parse(roleArn) if err != nil { - logger.LogIf(GlobalContext, fmt.Errorf("error parsing role ARN %s: %v", roleArn, err)) + iamLogIf(GlobalContext, fmt.Errorf("error parsing role ARN %s: %v", roleArn, err)) return false } policies = newMappedPolicy(sys.rolesMap[arn]).toSlice() @@ -1920,7 +1920,7 @@ func (sys *IAMSys) IsAllowedSTS(args policy.Args, parentUser string) bool { var err error policies, err = sys.store.PolicyDBGet(parentUser, args.Groups...) if err != nil { - logger.LogIf(GlobalContext, fmt.Errorf("error fetching policies on %s: %v", parentUser, err)) + iamLogIf(GlobalContext, fmt.Errorf("error fetching policies on %s: %v", parentUser, err)) return false } @@ -1955,11 +1955,11 @@ func (sys *IAMSys) IsAllowedSTS(args policy.Args, parentUser string) bool { _, err := sys.store.GetPolicy(pname) if errors.Is(err, errNoSuchPolicy) { // all policies presented in the claim should exist - logger.LogIf(GlobalContext, fmt.Errorf("expected policy (%s) missing from the JWT claim %s, rejecting the request", pname, iamPolicyClaimNameOpenID())) + iamLogIf(GlobalContext, fmt.Errorf("expected policy (%s) missing from the JWT claim %s, rejecting the request", pname, iamPolicyClaimNameOpenID())) return false } } - logger.LogIf(GlobalContext, fmt.Errorf("all policies were unexpectedly present!")) + iamLogIf(GlobalContext, fmt.Errorf("all policies were unexpectedly present!")) return false } @@ -2001,7 +2001,7 @@ func isAllowedBySessionPolicyForServiceAccount(args policy.Args) (hasSessionPoli subPolicy, err := policy.ParseConfig(bytes.NewReader([]byte(spolicyStr))) if err != nil { // Log any error in input session policy config. - logger.LogIf(GlobalContext, err) + iamLogIf(GlobalContext, err) return } @@ -2062,7 +2062,7 @@ func isAllowedBySessionPolicy(args policy.Args) (hasSessionPolicy bool, isAllowe subPolicy, err := policy.ParseConfig(bytes.NewReader([]byte(spolicyStr))) if err != nil { // Log any error in input session policy config. - logger.LogIf(GlobalContext, err) + iamLogIf(GlobalContext, err) return } @@ -2100,7 +2100,7 @@ func (sys *IAMSys) IsAllowed(args policy.Args) bool { if authz := newGlobalAuthZPluginFn(); authz != nil { ok, err := authz.IsAllowed(args) if err != nil { - logger.LogIf(GlobalContext, err) + authZLogIf(GlobalContext, err) } return ok } diff --git a/cmd/jwt.go b/cmd/jwt.go index 39740bb70..82b0d3f27 100644 --- a/cmd/jwt.go +++ b/cmd/jwt.go @@ -62,7 +62,7 @@ func cachedAuthenticateNode(ttl time.Duration) func(accessKey, secretKey, audien } cache, err := lru.NewARC(100) if err != nil { - logger.LogIf(GlobalContext, err) + bugLogIf(GlobalContext, err) return authenticateNode } return func(accessKey, secretKey, audience string) (string, error) { diff --git a/cmd/license-update.go b/cmd/license-update.go index 3cd4c2b23..df3565b54 100644 --- a/cmd/license-update.go +++ b/cmd/license-update.go @@ -23,7 +23,6 @@ import ( "math/rand" "time" - "github.com/minio/minio/internal/logger" "github.com/tidwall/gjson" ) @@ -85,13 +84,13 @@ func performLicenseUpdate(ctx context.Context, objectAPI ObjectLayer) { resp, err := globalSubnetConfig.Post(url, nil) if err != nil { - logger.LogIf(ctx, fmt.Errorf("error from %s: %w", url, err)) + subnetLogIf(ctx, fmt.Errorf("error from %s: %w", url, err)) return } r := gjson.Parse(resp).Get("license_v2") if r.Index == 0 { - logger.LogIf(ctx, fmt.Errorf("license not found in response from %s", url)) + internalLogIf(ctx, fmt.Errorf("license not found in response from %s", url)) return } @@ -104,13 +103,13 @@ func performLicenseUpdate(ctx context.Context, objectAPI ObjectLayer) { kv := "subnet license=" + lic result, err := setConfigKV(ctx, objectAPI, []byte(kv)) if err != nil { - logger.LogIf(ctx, fmt.Errorf("error setting subnet license config: %w", err)) + internalLogIf(ctx, fmt.Errorf("error setting subnet license config: %w", err)) return } if result.Dynamic { if err := applyDynamicConfigForSubSys(GlobalContext, objectAPI, result.Cfg, result.SubSys); err != nil { - logger.LogIf(ctx, fmt.Errorf("error applying subnet dynamic config: %w", err)) + subnetLogIf(ctx, fmt.Errorf("error applying subnet dynamic config: %w", err)) return } globalNotificationSys.SignalConfigReload(result.SubSys) diff --git a/cmd/listen-notification-handlers.go b/cmd/listen-notification-handlers.go index c8b72836d..709543047 100644 --- a/cmd/listen-notification-handlers.go +++ b/cmd/listen-notification-handlers.go @@ -132,7 +132,7 @@ func (api objectAPIHandlers) ListenNotificationHandler(w http.ResponseWriter, r buf.Reset() tmpEvt.Records[0] = ev if err := enc.Encode(tmpEvt); err != nil { - logger.LogOnceIf(ctx, err, "event: Encode failed") + bugLogIf(ctx, err, "event: Encode failed") continue } mergeCh <- append(grid.GetByteBuffer()[:0], buf.Bytes()...) diff --git a/cmd/logging.go b/cmd/logging.go new file mode 100644 index 000000000..490a8235a --- /dev/null +++ b/cmd/logging.go @@ -0,0 +1,195 @@ +package cmd + +import ( + "context" + + "github.com/minio/minio/internal/logger" +) + +func replLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "replication", err, errKind...) +} + +func replLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { + logger.LogOnceIf(ctx, "replication", err, id, errKind...) +} + +func iamLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "iam", err, errKind...) +} + +func rebalanceLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "rebalance", err, errKind...) +} + +func rebalanceLogEvent(ctx context.Context, msg string, args ...interface{}) { + logger.Event(ctx, "rebalance", msg, args...) +} + +func adminLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "admin", err, errKind...) +} + +func authNLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "authN", err, errKind...) +} + +func authZLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "authZ", err, errKind...) +} + +func peersLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "peers", err, errKind...) +} + +func peersLogAlwaysIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogAlwaysIf(ctx, "peers", err, errKind...) +} + +func peersLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { + logger.LogOnceIf(ctx, "peers", err, id, errKind...) +} + +func bugLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "internal", err, errKind...) +} + +func healingLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "healing", err, errKind...) +} + +func healingLogEvent(ctx context.Context, msg string, args ...interface{}) { + logger.Event(ctx, "healing", msg, args...) +} + +func healingLogOnceIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "healing", err, errKind...) +} + +func batchLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "batch", err, errKind...) +} + +func batchLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { + logger.LogOnceIf(ctx, "batch", err, id, errKind...) +} + +func bootLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "bootstrap", err, errKind...) +} + +func bootLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { + logger.LogOnceIf(ctx, "bootstrap", err, id, errKind...) +} + +func dnsLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "dns", err, errKind...) +} + +func internalLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "internal", err, errKind...) +} + +func internalLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { + logger.LogOnceIf(ctx, "internal", err, id, errKind...) +} + +func transitionLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "transition", err, errKind...) +} + +func configLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "config", err, errKind...) +} + +func configLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { + logger.LogOnceIf(ctx, "config", err, id, errKind...) +} + +func configLogOnceConsoleIf(ctx context.Context, err error, id string, errKind ...interface{}) { + logger.LogOnceConsoleIf(ctx, "config", err, id, errKind...) +} + +func scannerLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "scanner", err, errKind...) +} + +func scannerLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { + logger.LogOnceIf(ctx, "scanner", err, id, errKind...) +} + +func ilmLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "ilm", err, errKind...) +} + +func ilmLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { + logger.LogOnceIf(ctx, "ilm", err, id, errKind...) +} + +func encLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "encryption", err, errKind...) +} + +func storageLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "storage", err, errKind...) +} + +func storageLogAlwaysIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogAlwaysIf(ctx, "storage", err, errKind...) +} + +func storageLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { + logger.LogOnceIf(ctx, "storage", err, id, errKind...) +} + +func decomLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "decom", err, errKind...) +} + +func decomLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { + logger.LogOnceIf(ctx, "decom", err, id, errKind...) +} + +func decomLogEvent(ctx context.Context, msg string, args ...interface{}) { + logger.Event(ctx, "decom", msg, args...) +} + +func etcdLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "etcd", err, errKind...) +} + +func etcdLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { + logger.LogOnceIf(ctx, "etcd", err, id, errKind...) +} + +func subnetLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "subnet", err, errKind...) +} + +func metricsLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "metrics", err, errKind...) +} + +func s3LogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "s3", err, errKind...) +} + +func sftpLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { + logger.LogOnceIf(ctx, "sftp", err, id, errKind...) +} + +func shutdownLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "shutdown", err, errKind...) +} + +func stsLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "sts", err, errKind...) +} + +func tierLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "tier", err, errKind...) +} + +func kmsLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "kms", err, errKind...) +} diff --git a/cmd/metacache-bucket.go b/cmd/metacache-bucket.go index c8f334995..a0ecfe7ef 100644 --- a/cmd/metacache-bucket.go +++ b/cmd/metacache-bucket.go @@ -221,7 +221,7 @@ func (b *bucketMetacache) deleteAll() { ez, ok := objAPI.(deleteAllStorager) if !ok { - logger.LogIf(ctx, errors.New("bucketMetacache: expected objAPI to be 'deleteAllStorager'")) + bugLogIf(ctx, errors.New("bucketMetacache: expected objAPI to be 'deleteAllStorager'")) return } diff --git a/cmd/metacache-entries.go b/cmd/metacache-entries.go index b2d553a93..40d87f216 100644 --- a/cmd/metacache-entries.go +++ b/cmd/metacache-entries.go @@ -27,7 +27,6 @@ import ( "strings" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" "github.com/minio/pkg/v2/console" ) @@ -377,7 +376,7 @@ func (m metaCacheEntries) resolve(r *metadataResolutionParams) (selected *metaCa xl, err := entry.xlmeta() if err != nil { if !errors.Is(err, errFileNotFound) { - logger.LogIf(GlobalContext, err) + internalLogIf(GlobalContext, err) } continue } @@ -437,7 +436,7 @@ func (m metaCacheEntries) resolve(r *metadataResolutionParams) (selected *metaCa var err error selected.metadata, err = selected.cached.AppendTo(metaDataPoolGet()) if err != nil { - logger.LogIf(context.Background(), err) + bugLogIf(context.Background(), err) return nil, false } return selected, true diff --git a/cmd/metacache-marker.go b/cmd/metacache-marker.go index 3510f91f8..d85cbab56 100644 --- a/cmd/metacache-marker.go +++ b/cmd/metacache-marker.go @@ -22,8 +22,6 @@ import ( "fmt" "strconv" "strings" - - "github.com/minio/minio/internal/logger" ) // markerTagVersion is the marker version. @@ -86,7 +84,7 @@ func (o listPathOptions) encodeMarker(marker string) string { return fmt.Sprintf("%s[minio_cache:%s,return:]", marker, markerTagVersion) } if strings.ContainsAny(o.ID, "[:,") { - logger.LogIf(context.Background(), fmt.Errorf("encodeMarker: uuid %s contained invalid characters", o.ID)) + internalLogIf(context.Background(), fmt.Errorf("encodeMarker: uuid %s contained invalid characters", o.ID)) } return fmt.Sprintf("%s[minio_cache:%s,id:%s,p:%d,s:%d]", marker, markerTagVersion, o.ID, o.pool, o.set) } diff --git a/cmd/metacache-server-pool.go b/cmd/metacache-server-pool.go index 92bcc1e73..16dcd17fe 100644 --- a/cmd/metacache-server-pool.go +++ b/cmd/metacache-server-pool.go @@ -29,7 +29,6 @@ import ( "time" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" ) func renameAllBucketMetacache(epPath string) error { @@ -136,7 +135,7 @@ func (z *erasureServerPools) listPath(ctx context.Context, o *listPathOptions) ( } if !errors.Is(err, context.DeadlineExceeded) { // Report error once per bucket, but continue listing. - logger.LogOnceIf(ctx, err, "GetMetacacheListing:"+o.Bucket) + storageLogOnceIf(ctx, err, "GetMetacacheListing:"+o.Bucket) } o.Transient = true o.Create = false @@ -322,7 +321,7 @@ func (z *erasureServerPools) listMerged(ctx context.Context, o listPathOptions, allAtEOF = false continue } - logger.LogIf(ctx, err) + storageLogIf(ctx, err) return err } if allAtEOF { diff --git a/cmd/metacache-set.go b/cmd/metacache-set.go index d698ad10e..e2d56025a 100644 --- a/cmd/metacache-set.go +++ b/cmd/metacache-set.go @@ -38,7 +38,6 @@ import ( "github.com/minio/minio/internal/color" "github.com/minio/minio/internal/hash" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" "github.com/minio/pkg/v2/console" ) @@ -285,7 +284,7 @@ func (o *listPathOptions) findFirstPart(fi FileInfo) (int, error) { } err := json.Unmarshal([]byte(v), &tmp) if !ok { - logger.LogIf(context.Background(), err) + bugLogIf(context.Background(), err) return -1, err } if tmp.First == "" && tmp.Last == "" && tmp.EOS { @@ -538,7 +537,7 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt } loadedPart = partN bi, err := getMetacacheBlockInfo(fi, partN) - logger.LogIf(ctx, err) + internalLogIf(ctx, err) if err == nil { if bi.pastPrefix(o.Prefix) { return entries, io.EOF @@ -577,7 +576,7 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt time.Sleep(retryDelay250) continue default: - logger.LogIf(ctx, err) + internalLogIf(ctx, err) return entries, err } } @@ -585,7 +584,7 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt // We finished at the end of the block. // And should not expect any more results. bi, err := getMetacacheBlockInfo(fi, partN) - logger.LogIf(ctx, err) + internalLogIf(ctx, err) if err != nil || bi.EOS { // We are done and there are no more parts. return entries, io.EOF @@ -868,7 +867,7 @@ func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCache } o.debugln(color.Green("saveMetaCacheStream:")+" saving block", b.n, "to", o.objectPath(b.n)) r, err := hash.NewReader(ctx, bytes.NewReader(b.data), int64(len(b.data)), "", "", int64(len(b.data))) - logger.LogIf(ctx, err) + bugLogIf(ctx, err) custom := b.headerKV() _, err = er.putMetacacheObject(ctx, o.objectPath(b.n), NewPutObjReader(r), ObjectOptions{ UserDefined: custom, @@ -902,7 +901,7 @@ func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCache return err case InsufficientReadQuorum: default: - logger.LogIf(ctx, err) + internalLogIf(ctx, err) } if retries >= maxTries { return err diff --git a/cmd/metacache-stream.go b/cmd/metacache-stream.go index f6d08245e..c0023d801 100644 --- a/cmd/metacache-stream.go +++ b/cmd/metacache-stream.go @@ -28,7 +28,6 @@ import ( jsoniter "github.com/json-iterator/go" "github.com/klauspost/compress/s2" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" "github.com/tinylib/msgp/msgp" "github.com/valyala/bytebufferpool" ) @@ -845,7 +844,7 @@ func (b metacacheBlock) headerKV() map[string]string { json := jsoniter.ConfigCompatibleWithStandardLibrary v, err := json.Marshal(b) if err != nil { - logger.LogIf(context.Background(), err) // Unlikely + bugLogIf(context.Background(), err) // Unlikely return nil } return map[string]string{fmt.Sprintf("%s-metacache-part-%d", ReservedMetadataPrefixLower, b.n): string(v)} diff --git a/cmd/metacache-walk.go b/cmd/metacache-walk.go index e01760aad..cafee98c6 100644 --- a/cmd/metacache-walk.go +++ b/cmd/metacache-walk.go @@ -25,7 +25,6 @@ import ( "github.com/minio/minio/internal/grid" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" "github.com/valyala/bytebufferpool" ) @@ -171,7 +170,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ if err != nil { // Folder could have gone away in-between if err != errVolumeNotFound && err != errFileNotFound { - logger.LogOnceIf(ctx, err, "metacache-walk-scan-dir") + internalLogOnceIf(ctx, err, "metacache-walk-scan-dir") } if opts.ReportNotFound && err == errFileNotFound && current == opts.BaseDir { err = errFileNotFound @@ -239,7 +238,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ // while being concurrently listed at the same time in // such scenarios the 'xl.meta' might get truncated if !IsErrIgnored(err, io.EOF, io.ErrUnexpectedEOF) { - logger.LogOnceIf(ctx, err, "metacache-walk-read-metadata") + internalLogOnceIf(ctx, err, "metacache-walk-read-metadata") } continue } @@ -257,7 +256,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ diskHealthCheckOK(ctx, err) if err != nil { if !IsErrIgnored(err, io.EOF, io.ErrUnexpectedEOF) { - logger.LogIf(ctx, err) + internalLogIf(ctx, err) } continue } @@ -308,7 +307,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ // Scan folder we found. Should be in correct sort order where we are. err := scanDir(pop) if err != nil && !IsErrIgnored(err, context.Canceled) { - logger.LogIf(ctx, err) + internalLogIf(ctx, err) } } dirStack = dirStack[:len(dirStack)-1] @@ -379,7 +378,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ } if opts.Recursive { // Scan folder we found. Should be in correct sort order where we are. - logger.LogIf(ctx, scanDir(pop)) + internalLogIf(ctx, scanDir(pop)) } dirStack = dirStack[:len(dirStack)-1] } diff --git a/cmd/metacache.go b/cmd/metacache.go index 13a151b17..94de00da3 100644 --- a/cmd/metacache.go +++ b/cmd/metacache.go @@ -24,8 +24,6 @@ import ( "path" "strings" "time" - - "github.com/minio/minio/internal/logger" ) type scanStatus uint8 @@ -148,16 +146,17 @@ func (m *metacache) update(update metacache) { // delete all cache data on disks. func (m *metacache) delete(ctx context.Context) { if m.bucket == "" || m.id == "" { - logger.LogIf(ctx, fmt.Errorf("metacache.delete: bucket (%s) or id (%s) empty", m.bucket, m.id)) + bugLogIf(ctx, fmt.Errorf("metacache.delete: bucket (%s) or id (%s) empty", m.bucket, m.id)) + return } objAPI := newObjectLayerFn() if objAPI == nil { - logger.LogIf(ctx, errors.New("metacache.delete: no object layer")) + internalLogIf(ctx, errors.New("metacache.delete: no object layer")) return } ez, ok := objAPI.(deleteAllStorager) if !ok { - logger.LogIf(ctx, errors.New("metacache.delete: expected objAPI to be 'deleteAllStorager'")) + bugLogIf(ctx, errors.New("metacache.delete: expected objAPI to be 'deleteAllStorager'")) return } ez.deleteAll(ctx, minioMetaBucket, metacachePrefixForID(m.bucket, m.id)) diff --git a/cmd/metrics-v2.go b/cmd/metrics-v2.go index cbbc16a91..874fbec53 100644 --- a/cmd/metrics-v2.go +++ b/cmd/metrics-v2.go @@ -1690,7 +1690,7 @@ func getMinioProcMetrics() *MetricsGroupV2 { p, err := procfs.Self() if err != nil { - logger.LogOnceIf(ctx, err, string(nodeMetricNamespace)) + internalLogOnceIf(ctx, err, string(nodeMetricNamespace)) return } @@ -1846,7 +1846,7 @@ func getHistogramMetrics(hist *prometheus.HistogramVec, desc MetricDescription, if err != nil { // Log error and continue to receive other metric // values - logger.LogIf(GlobalContext, err) + bugLogIf(GlobalContext, err) continue } @@ -2476,7 +2476,7 @@ func getReplicationSiteMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { if globalSiteReplicationSys.isEnabled() { m, err := globalSiteReplicationSys.getSiteMetrics(GlobalContext) if err != nil { - logger.LogIf(GlobalContext, err) + metricsLogIf(GlobalContext, err) return ml } ml = append(ml, MetricV2{ @@ -3126,7 +3126,7 @@ func getClusterUsageMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { metrics = make([]MetricV2, 0, 50) dataUsageInfo, err := loadDataUsageFromBackend(ctx, objLayer) if err != nil { - logger.LogIf(ctx, err) + metricsLogIf(ctx, err) return } @@ -3229,7 +3229,7 @@ func getBucketUsageMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { metrics = make([]MetricV2, 0, 50) dataUsageInfo, err := loadDataUsageFromBackend(ctx, objLayer) if err != nil { - logger.LogIf(ctx, err) + metricsLogIf(ctx, err) return } @@ -3463,7 +3463,7 @@ func getClusterTierMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { dui, err := loadDataUsageFromBackend(ctx, objLayer) if err != nil { - logger.LogIf(ctx, err) + metricsLogIf(ctx, err) return } // data usage has not captured any tier stats yet. @@ -4013,7 +4013,7 @@ func collectMetric(metric MetricV2, labels []string, values []string, metricName if err != nil { // Enable for debugging if serverDebugLog { - logger.LogOnceIf(GlobalContext, fmt.Errorf("unable to validate prometheus metric (%w) %v+%v", err, values, metric.Histogram), metricName+"-metrics-histogram") + bugLogIf(GlobalContext, fmt.Errorf("unable to validate prometheus metric (%w) %v+%v", err, values, metric.Histogram)) } } else { out <- pmetric @@ -4040,7 +4040,7 @@ func collectMetric(metric MetricV2, labels []string, values []string, metricName if err != nil { // Enable for debugging if serverDebugLog { - logger.LogOnceIf(GlobalContext, fmt.Errorf("unable to validate prometheus metric (%w) %v", err, values), metricName+"-metrics") + bugLogIf(GlobalContext, fmt.Errorf("unable to validate prometheus metric (%w) %v", err, values)) } } else { out <- pmetric @@ -4366,7 +4366,7 @@ func metricsNodeHandler() http.Handler { enc := expfmt.NewEncoder(w, contentType) for _, mf := range mfs { if err := enc.Encode(mf); err != nil { - logger.LogIf(r.Context(), err) + metricsLogIf(r.Context(), err) return } } diff --git a/cmd/metrics-v3-cluster-usage.go b/cmd/metrics-v3-cluster-usage.go index d8844d9f9..653b127a4 100644 --- a/cmd/metrics-v3-cluster-usage.go +++ b/cmd/metrics-v3-cluster-usage.go @@ -20,8 +20,6 @@ package cmd import ( "context" "time" - - "github.com/minio/minio/internal/logger" ) const ( @@ -60,7 +58,7 @@ var ( func loadClusterUsageObjectMetrics(ctx context.Context, m MetricValues, c *metricsCache) error { dataUsageInfo, err := c.dataUsageInfo.Get() if err != nil { - logger.LogIf(ctx, err) + metricsLogIf(ctx, err) return nil } @@ -144,7 +142,7 @@ var ( func loadClusterUsageBucketMetrics(ctx context.Context, m MetricValues, c *metricsCache, buckets []string) error { dataUsageInfo, err := c.dataUsageInfo.Get() if err != nil { - logger.LogIf(ctx, err) + metricsLogIf(ctx, err) return nil } @@ -164,7 +162,7 @@ func loadClusterUsageBucketMetrics(ctx context.Context, m MetricValues, c *metri if err != nil { // Log and continue if we are unable to retrieve metrics for this // bucket. - logger.LogIf(ctx, err) + metricsLogIf(ctx, err) continue } diff --git a/cmd/metrics-v3-handler.go b/cmd/metrics-v3-handler.go index 2d31a08b6..a2876a82d 100644 --- a/cmd/metrics-v3-handler.go +++ b/cmd/metrics-v3-handler.go @@ -24,7 +24,6 @@ import ( "slices" "strings" - "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/mcontext" "github.com/minio/mux" "github.com/prometheus/client_golang/prometheus" @@ -39,7 +38,7 @@ func (p promLogger) Println(v ...interface{}) { s = append(s, fmt.Sprintf("%v", val)) } err := fmt.Errorf("metrics handler error: %v", strings.Join(s, " ")) - logger.LogIf(GlobalContext, err) + metricsLogIf(GlobalContext, err) } type metricsV3Server struct { diff --git a/cmd/metrics-v3-system-drive.go b/cmd/metrics-v3-system-drive.go index cb0d66a53..a4217b495 100644 --- a/cmd/metrics-v3-system-drive.go +++ b/cmd/metrics-v3-system-drive.go @@ -20,8 +20,6 @@ package cmd import ( "context" "strconv" - - "github.com/minio/minio/internal/logger" ) // label constants @@ -83,7 +81,7 @@ var ( func loadDriveMetrics(ctx context.Context, m MetricValues, c *metricsCache) error { driveMetrics, err := c.driveMetrics.Get() if err != nil { - logger.LogIf(ctx, err) + metricsLogIf(ctx, err) return nil } diff --git a/cmd/notification.go b/cmd/notification.go index 12fea844a..5f4a583cc 100644 --- a/cmd/notification.go +++ b/cmd/notification.go @@ -126,7 +126,7 @@ func (g *NotificationGroup) Go(ctx context.Context, f func() error, index int, a if i == g.retryCount-1 { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", addr.String()) ctx := logger.SetReqInfo(ctx, reqInfo) - logger.LogOnceIf(ctx, err, addr.String()) + peersLogOnceIf(ctx, err, addr.String()) } // Wait for a minimum of 100ms and dynamically increase this based on number of attempts. if i < g.retryCount-1 { @@ -312,7 +312,7 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io if err != nil { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", client.host.String()) ctx := logger.SetReqInfo(ctx, reqInfo) - logger.LogIf(ctx, err) + peersLogIf(ctx, err) continue } @@ -323,7 +323,7 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io if err != nil { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", client.host.String()) ctx := logger.SetReqInfo(ctx, reqInfo) - logger.LogIf(ctx, err) + peersLogIf(ctx, err) } } } @@ -331,7 +331,7 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io // Local host thisAddr, err := xnet.ParseHost(globalLocalNodeName) if err != nil { - logger.LogIf(ctx, err) + bugLogIf(ctx, err) return profilingDataFound } @@ -339,7 +339,7 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io if err != nil { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", thisAddr.String()) ctx := logger.SetReqInfo(ctx, reqInfo) - logger.LogIf(ctx, err) + bugLogIf(ctx, err) return profilingDataFound } @@ -348,10 +348,10 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io // Send profiling data to zip as file for typ, data := range data { err := embedFileInZip(zipWriter, fmt.Sprintf("profile-%s-%s", thisAddr, typ), data, 0o600) - logger.LogIf(ctx, err) + internalLogIf(ctx, err) } if b := getClusterMetaInfo(ctx); len(b) > 0 { - logger.LogIf(ctx, embedFileInZip(zipWriter, "cluster.info", b, 0o600)) + internalLogIf(ctx, embedFileInZip(zipWriter, "cluster.info", b, 0o600)) } return @@ -480,7 +480,7 @@ func (sys *NotificationSys) GetLocks(ctx context.Context, r *http.Request) []*Pe reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", sys.peerClients[index].host.String()) ctx := logger.SetReqInfo(ctx, reqInfo) - logger.LogOnceIf(ctx, err, sys.peerClients[index].host.String()) + peersLogOnceIf(ctx, err, sys.peerClients[index].host.String()) } locksResp = append(locksResp, &PeerLocks{ Addr: getHostName(r), @@ -504,7 +504,7 @@ func (sys *NotificationSys) LoadBucketMetadata(ctx context.Context, bucketName s for _, nErr := range ng.Wait() { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) if nErr.Err != nil { - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) } } } @@ -534,7 +534,7 @@ func (sys *NotificationSys) DeleteBucketMetadata(ctx context.Context, bucketName for _, nErr := range ng.Wait() { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) if nErr.Err != nil { - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) } } } @@ -561,7 +561,7 @@ func (sys *NotificationSys) GetClusterAllBucketStats(ctx context.Context) []Buck for _, nErr := range ng.Wait() { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) if nErr.Err != nil { - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) } } @@ -603,7 +603,7 @@ func (sys *NotificationSys) GetClusterBucketStats(ctx context.Context, bucketNam for _, nErr := range ng.Wait() { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) if nErr.Err != nil { - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) } } bucketStats = append(bucketStats, BucketStats{ @@ -636,7 +636,7 @@ func (sys *NotificationSys) GetClusterSiteMetrics(ctx context.Context) []SRMetri for _, nErr := range ng.Wait() { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) if nErr.Err != nil { - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) } } siteStats = append(siteStats, globalReplicationStats.getSRMetricsForNode()) @@ -658,7 +658,7 @@ func (sys *NotificationSys) ReloadPoolMeta(ctx context.Context) { for _, nErr := range ng.Wait() { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) if nErr.Err != nil { - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) } } } @@ -679,13 +679,13 @@ func (sys *NotificationSys) StopRebalance(ctx context.Context) { for _, nErr := range ng.Wait() { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) if nErr.Err != nil { - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) } } objAPI := newObjectLayerFn() if objAPI == nil { - logger.LogIf(ctx, errServerNotInitialized) + internalLogIf(ctx, errServerNotInitialized) return } @@ -711,7 +711,7 @@ func (sys *NotificationSys) LoadRebalanceMeta(ctx context.Context, startRebalanc for _, nErr := range ng.Wait() { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) if nErr.Err != nil { - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) } } } @@ -732,7 +732,7 @@ func (sys *NotificationSys) LoadTransitionTierConfig(ctx context.Context) { for _, nErr := range ng.Wait() { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) if nErr.Err != nil { - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) } } } @@ -946,7 +946,7 @@ func (sys *NotificationSys) addNodeErr(nodeInfo madmin.NodeInfo, peerClient *pee addr := peerClient.host.String() reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr) ctx := logger.SetReqInfo(GlobalContext, reqInfo) - logger.LogOnceIf(ctx, err, "add-node-err-"+addr) + peersLogOnceIf(ctx, err, "add-node-err-"+addr) nodeInfo.SetAddr(addr) nodeInfo.SetError(err.Error()) } @@ -1187,7 +1187,7 @@ func (sys *NotificationSys) GetBandwidthReports(ctx context.Context, buckets ... reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", sys.peerClients[index].host.String()) ctx := logger.SetReqInfo(ctx, reqInfo) - logger.LogOnceIf(ctx, err, sys.peerClients[index].host.String()) + peersLogOnceIf(ctx, err, sys.peerClients[index].host.String()) } reports = append(reports, globalBucketMonitor.GetReport(bandwidth.SelectBuckets(buckets...))) consolidatedReport := bandwidth.BucketBandwidthReport{ @@ -1222,9 +1222,9 @@ func (sys *NotificationSys) collectPeerMetrics(ctx context.Context, peerChannels if sys.peerClients[index] != nil { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", sys.peerClients[index].host.String()) - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), err, sys.peerClients[index].host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), err, sys.peerClients[index].host.String()) } else { - logger.LogOnceIf(ctx, err, "peer-offline") + peersLogOnceIf(ctx, err, "peer-offline") } continue } @@ -1460,7 +1460,7 @@ func (sys *NotificationSys) DriveSpeedTest(ctx context.Context, opts madmin.Driv reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", client.host.String()) ctx := logger.SetReqInfo(GlobalContext, reqInfo) - logger.LogOnceIf(ctx, err, client.host.String()) + peersLogOnceIf(ctx, err, client.host.String()) }(client) } @@ -1521,7 +1521,7 @@ func (sys *NotificationSys) GetLastDayTierStats(ctx context.Context) DailyAllTie merged := globalTransitionState.getDailyAllTierStats() for i, stat := range lastDayStats { if errs[i] != nil { - logger.LogOnceIf(ctx, fmt.Errorf("failed to fetch last day tier stats: %w", errs[i]), sys.peerClients[i].host.String()) + peersLogOnceIf(ctx, fmt.Errorf("failed to fetch last day tier stats: %w", errs[i]), sys.peerClients[i].host.String()) continue } merged.merge(stat) @@ -1556,9 +1556,9 @@ func (sys *NotificationSys) GetReplicationMRF(ctx context.Context, bucket, node if sys.peerClients[index] != nil { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", sys.peerClients[index].host.String()) - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), err, sys.peerClients[index].host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), err, sys.peerClients[index].host.String()) } else { - logger.LogOnceIf(ctx, err, "peer-offline") + peersLogOnceIf(ctx, err, "peer-offline") } continue } diff --git a/cmd/object-api-datatypes.go b/cmd/object-api-datatypes.go index 4e642e390..1e0d3f391 100644 --- a/cmd/object-api-datatypes.go +++ b/cmd/object-api-datatypes.go @@ -27,7 +27,6 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/bucket/replication" "github.com/minio/minio/internal/hash" - "github.com/minio/minio/internal/logger" ) //go:generate msgp -file $GOFILE -io=false -tests=false -unexported=false @@ -246,7 +245,7 @@ func (o *ObjectInfo) ArchiveInfo() []byte { if v, ok := o.UserDefined[archiveTypeMetadataKey]; ok && v == archiveTypeEnc { decrypted, err := o.metadataDecrypter()(archiveTypeEnc, data) if err != nil { - logger.LogIf(GlobalContext, err) + encLogIf(GlobalContext, err) return nil } data = decrypted diff --git a/cmd/object-api-input-checks.go b/cmd/object-api-input-checks.go index 8439a2150..9c8b213e4 100644 --- a/cmd/object-api-input-checks.go +++ b/cmd/object-api-input-checks.go @@ -24,7 +24,6 @@ import ( "strings" "github.com/minio/minio-go/v7/pkg/s3utils" - "github.com/minio/minio/internal/logger" ) // Checks on CopyObject arguments, bucket and object. @@ -71,10 +70,6 @@ func checkListObjsArgs(ctx context.Context, bucket, prefix, marker string) error // Validates object prefix validity after bucket exists. if !IsValidObjectPrefix(prefix) { - logger.LogIf(ctx, ObjectNameInvalid{ - Bucket: bucket, - Object: prefix, - }) return ObjectNameInvalid{ Bucket: bucket, Object: prefix, @@ -90,10 +85,6 @@ func checkListMultipartArgs(ctx context.Context, bucket, prefix, keyMarker, uplo } if uploadIDMarker != "" { if HasSuffix(keyMarker, SlashSeparator) { - logger.LogIf(ctx, InvalidUploadIDKeyCombination{ - UploadIDMarker: uploadIDMarker, - KeyMarker: keyMarker, - }) return InvalidUploadIDKeyCombination{ UploadIDMarker: uploadIDMarker, KeyMarker: keyMarker, @@ -101,7 +92,6 @@ func checkListMultipartArgs(ctx context.Context, bucket, prefix, keyMarker, uplo } _, err := base64.RawURLEncoding.DecodeString(uploadIDMarker) if err != nil { - logger.LogIf(ctx, err) return MalformedUploadID{ UploadID: uploadIDMarker, } diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index d74218865..b4aed71d6 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -498,7 +498,7 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj proxyGetErr := ErrorRespToObjectError(perr, bucket, object) if !isErrBucketNotFound(proxyGetErr) && !isErrObjectNotFound(proxyGetErr) && !isErrVersionNotFound(proxyGetErr) && !isErrPreconditionFailed(proxyGetErr) && !isErrInvalidRange(proxyGetErr) { - logger.LogIf(ctx, fmt.Errorf("Proxying request (replication) failed for %s/%s(%s) - %w", bucket, object, opts.VersionID, perr)) + replLogIf(ctx, fmt.Errorf("Proxying request (replication) failed for %s/%s(%s) - %w", bucket, object, opts.VersionID, perr)) } } if reader != nil && proxy.Proxy && perr == nil { @@ -3788,7 +3788,7 @@ func (api objectAPIHandlers) PostRestoreObjectHandler(w http.ResponseWriter, r * VersionID: objInfo.VersionID, } if err := objectAPI.RestoreTransitionedObject(rctx, bucket, object, opts); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to restore transitioned bucket/object %s/%s: %w", bucket, object, err)) + s3LogIf(ctx, fmt.Errorf("Unable to restore transitioned bucket/object %s/%s: %w", bucket, object, err)) return } diff --git a/cmd/peer-rest-client.go b/cmd/peer-rest-client.go index 404991358..8d58b4cb2 100644 --- a/cmd/peer-rest-client.go +++ b/cmd/peer-rest-client.go @@ -83,7 +83,7 @@ func newPeerRESTClient(peer *xnet.Host, gridHost string) *peerRESTClient { // Lazy initialization of grid connection. // When we create this peer client, the grid connection is likely not yet initialized. if gridHost == "" { - logger.LogOnceIf(context.Background(), fmt.Errorf("gridHost is empty for peer %s", peer.String()), peer.String()+":gridHost") + bugLogIf(context.Background(), fmt.Errorf("gridHost is empty for peer %s", peer.String()), peer.String()+":gridHost") return nil } gc := gridConn.Load() @@ -96,7 +96,7 @@ func newPeerRESTClient(peer *xnet.Host, gridHost string) *peerRESTClient { } gc = gm.Connection(gridHost) if gc == nil { - logger.LogOnceIf(context.Background(), fmt.Errorf("gridHost %q not found for peer %s", gridHost, peer.String()), peer.String()+":gridHost") + bugLogIf(context.Background(), fmt.Errorf("gridHost %q not found for peer %s", gridHost, peer.String()), peer.String()+":gridHost") return nil } gridConn.Store(gc) @@ -500,7 +500,7 @@ func (client *peerRESTClient) doTrace(ctx context.Context, traceCh chan<- []byte payload, err := json.Marshal(traceOpts) if err != nil { - logger.LogIf(ctx, err) + bugLogIf(ctx, err) return } @@ -628,7 +628,7 @@ func newPeerRestClients(endpoints EndpointServerPools) (remote, all []*peerRESTC remote = append(remote, all[i]) } if len(all) != len(remote)+1 { - logger.LogIf(context.Background(), fmt.Errorf("WARNING: Expected number of all hosts (%v) to be remote +1 (%v)", len(all), len(remote))) + peersLogIf(context.Background(), fmt.Errorf("Expected number of all hosts (%v) to be remote +1 (%v)", len(all), len(remote)), logger.WarningKind) } return remote, all } diff --git a/cmd/peer-rest-server.go b/cmd/peer-rest-server.go index db8b7839d..49b903574 100644 --- a/cmd/peer-rest-server.go +++ b/cmd/peer-rest-server.go @@ -349,7 +349,7 @@ func (s *peerRESTServer) DownloadProfilingDataHandler(w http.ResponseWriter, r * s.writeErrorResponse(w, err) return } - logger.LogIf(ctx, gob.NewEncoder(w).Encode(profileData)) + peersLogIf(ctx, gob.NewEncoder(w).Encode(profileData)) } func (s *peerRESTServer) LocalStorageInfoHandler(mss *grid.MSS) (*grid.JSON[madmin.StorageInfo], *grid.RemoteErr) { @@ -815,7 +815,7 @@ func (s *peerRESTServer) ListenHandler(ctx context.Context, v *grid.URLValues, o buf.Reset() tmpEvt.Records[0] = ev if err := enc.Encode(tmpEvt); err != nil { - logger.LogOnceIf(ctx, err, "event: Encode failed") + peersLogOnceIf(ctx, err, "event: Encode failed") continue } out <- grid.NewBytesWithCopyOf(buf.Bytes()) @@ -866,7 +866,7 @@ func (s *peerRESTServer) ReloadSiteReplicationConfigHandler(mss *grid.MSS) (np g return np, grid.NewRemoteErr(errServerNotInitialized) } - logger.LogIf(context.Background(), globalSiteReplicationSys.Init(context.Background(), objAPI)) + peersLogIf(context.Background(), globalSiteReplicationSys.Init(context.Background(), objAPI)) return } @@ -939,7 +939,7 @@ func (s *peerRESTServer) LoadTransitionTierConfigHandler(mss *grid.MSS) (np grid go func() { err := globalTierConfigMgr.Reload(context.Background(), newObjectLayerFn()) if err != nil { - logger.LogIf(context.Background(), fmt.Errorf("Failed to reload remote tier config %s", err)) + peersLogIf(context.Background(), fmt.Errorf("Failed to reload remote tier config %s", err)) } }() @@ -1090,7 +1090,7 @@ func (s *peerRESTServer) SpeedTestHandler(w http.ResponseWriter, r *http.Request } done(nil) - logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result)) + peersLogIf(r.Context(), gob.NewEncoder(w).Encode(result)) } // GetLastDayTierStatsHandler - returns per-tier stats in the last 24hrs for this server @@ -1139,7 +1139,7 @@ func (s *peerRESTServer) DriveSpeedTestHandler(w http.ResponseWriter, r *http.Re result := driveSpeedTest(r.Context(), opts) done(nil) - logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result)) + peersLogIf(r.Context(), gob.NewEncoder(w).Encode(result)) } // GetReplicationMRFHandler - returns replication MRF for bucket @@ -1186,7 +1186,7 @@ func (s *peerRESTServer) DevNull(w http.ResponseWriter, r *http.Request) { // If there is a disconnection before globalNetPerfMinDuration (we give a margin of error of 1 sec) // would mean the network is not stable. Logging here will help in debugging network issues. if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) { - logger.LogIf(ctx, err) + peersLogIf(ctx, err) } } if err != nil { @@ -1208,7 +1208,7 @@ func (s *peerRESTServer) NetSpeedTestHandler(w http.ResponseWriter, r *http.Requ duration = time.Second * 10 } result := netperf(r.Context(), duration.Round(time.Second)) - logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result)) + peersLogIf(r.Context(), gob.NewEncoder(w).Encode(result)) } func (s *peerRESTServer) HealBucketHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { diff --git a/cmd/peer-s3-client.go b/cmd/peer-s3-client.go index 9841baae7..aadbf2dce 100644 --- a/cmd/peer-s3-client.go +++ b/cmd/peer-s3-client.go @@ -28,7 +28,6 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/grid" - "github.com/minio/minio/internal/logger" "github.com/minio/pkg/v2/sync/errgroup" "golang.org/x/exp/slices" ) @@ -511,7 +510,7 @@ func newPeerS3Client(node Node) peerS3Client { // Lazy initialization of grid connection. // When we create this peer client, the grid connection is likely not yet initialized. if node.GridHost == "" { - logger.LogOnceIf(context.Background(), fmt.Errorf("gridHost is empty for peer %s", node.Host), node.Host+":gridHost") + bugLogIf(context.Background(), fmt.Errorf("gridHost is empty for peer %s", node.Host), node.Host+":gridHost") return nil } gc := gridConn.Load() @@ -524,7 +523,7 @@ func newPeerS3Client(node Node) peerS3Client { } gc = gm.Connection(node.GridHost) if gc == nil { - logger.LogOnceIf(context.Background(), fmt.Errorf("gridHost %s not found for peer %s", node.GridHost, node.Host), node.Host+":gridHost") + bugLogIf(context.Background(), fmt.Errorf("gridHost %s not found for peer %s", node.GridHost, node.Host), node.Host+":gridHost") return nil } gridConn.Store(gc) diff --git a/cmd/prepare-storage.go b/cmd/prepare-storage.go index e0ad6a942..1af72899a 100644 --- a/cmd/prepare-storage.go +++ b/cmd/prepare-storage.go @@ -48,7 +48,7 @@ var printEndpointError = func() func(Endpoint, error, bool) { printOnce[endpoint] = m if once { m[err.Error()]++ - logger.LogAlwaysIf(ctx, err) + peersLogAlwaysIf(ctx, err) return } } @@ -60,7 +60,7 @@ var printEndpointError = func() func(Endpoint, error, bool) { // once not set, check if same error occurred 3 times in // a row, then make sure we print it to call attention. if m[err.Error()] > 2 { - logger.LogAlwaysIf(ctx, fmt.Errorf("Following error has been printed %d times.. %w", m[err.Error()], err)) + peersLogAlwaysIf(ctx, fmt.Errorf("Following error has been printed %d times.. %w", m[err.Error()], err)) // Reduce the count to introduce further delay in printing // but let it again print after the 2th attempt m[err.Error()]-- @@ -86,14 +86,14 @@ func bgFormatErasureCleanupTmp(diskPath string) { tmpOld := pathJoin(diskPath, minioMetaTmpBucket+"-old", tmpID) if err := renameAll(pathJoin(diskPath, minioMetaTmpBucket), tmpOld, diskPath); err != nil && !errors.Is(err, errFileNotFound) { - logger.LogIf(GlobalContext, fmt.Errorf("unable to rename (%s -> %s) %w, drive may be faulty please investigate", + storageLogIf(GlobalContext, fmt.Errorf("unable to rename (%s -> %s) %w, drive may be faulty, please investigate", pathJoin(diskPath, minioMetaTmpBucket), tmpOld, osErrToFileErr(err))) } if err := mkdirAll(pathJoin(diskPath, minioMetaTmpDeletedBucket), 0o777, diskPath); err != nil { - logger.LogIf(GlobalContext, fmt.Errorf("unable to create (%s) %w, drive may be faulty please investigate", + storageLogIf(GlobalContext, fmt.Errorf("unable to create (%s) %w, drive may be faulty, please investigate", pathJoin(diskPath, minioMetaTmpBucket), err)) } @@ -240,7 +240,7 @@ func connectLoadInitFormats(verboseLogging bool, firstDisk bool, endpoints Endpo format, err = getFormatErasureInQuorum(formatConfigs) if err != nil { - logger.LogIf(GlobalContext, err) + internalLogIf(GlobalContext, err) return nil, nil, err } @@ -250,7 +250,7 @@ func connectLoadInitFormats(verboseLogging bool, firstDisk bool, endpoints Endpo return nil, nil, errNotFirstDisk } if err = formatErasureFixDeploymentID(endpoints, storageDisks, format, formatConfigs); err != nil { - logger.LogIf(GlobalContext, err) + storageLogIf(GlobalContext, err) return nil, nil, err } } @@ -258,7 +258,7 @@ func connectLoadInitFormats(verboseLogging bool, firstDisk bool, endpoints Endpo globalDeploymentIDPtr.Store(&format.ID) if err = formatErasureFixLocalDeploymentID(endpoints, storageDisks, format); err != nil { - logger.LogIf(GlobalContext, err) + storageLogIf(GlobalContext, err) return nil, nil, err } diff --git a/cmd/server-main.go b/cmd/server-main.go index 42166b293..38e6f6bf3 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -367,7 +367,7 @@ func serverHandleCmdArgs(ctxt serverCtxt) { RoundTripper: NewHTTPTransportWithTimeout(1 * time.Hour), Logger: func(err error) { if err != nil && !errors.Is(err, context.Canceled) { - logger.LogIf(GlobalContext, err) + replLogIf(GlobalContext, err) } }, }) @@ -577,7 +577,7 @@ func initConfigSubsystem(ctx context.Context, newObject ObjectLayer) error { } // Any other config errors we simply print a message and proceed forward. - logger.LogIf(ctx, fmt.Errorf("Unable to initialize config, some features may be missing: %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to initialize config, some features may be missing: %w", err)) } return nil @@ -777,7 +777,7 @@ func serverMain(ctx *cli.Context) { httpServer.TCPOptions.Trace = bootstrapTraceMsg go func() { serveFn, err := httpServer.Init(GlobalContext, func(listenAddr string, err error) { - logger.LogIf(GlobalContext, fmt.Errorf("Unable to listen on `%s`: %v", listenAddr, err)) + bootLogIf(GlobalContext, fmt.Errorf("Unable to listen on `%s`: %v", listenAddr, err)) }) if err != nil { globalHTTPServerErrorCh <- err @@ -839,7 +839,7 @@ func serverMain(ctx *cli.Context) { logger.FatalIf(err, "Server startup canceled upon user request") } - logger.LogIf(GlobalContext, err) + bootLogIf(GlobalContext, err) } if !globalServerCtxt.StrictS3Compat { @@ -935,14 +935,14 @@ func serverMain(ctx *cli.Context) { // Initialize transition tier configuration manager bootstrapTrace("globalTierConfigMgr.Init", func() { if err := globalTierConfigMgr.Init(GlobalContext, newObject); err != nil { - logger.LogIf(GlobalContext, err) + bootLogIf(GlobalContext, err) } }) }() // Initialize bucket notification system. bootstrapTrace("initBucketTargets", func() { - logger.LogIf(GlobalContext, globalEventNotifier.InitBucketTargets(GlobalContext, newObject)) + bootLogIf(GlobalContext, globalEventNotifier.InitBucketTargets(GlobalContext, newObject)) }) var buckets []BucketInfo @@ -956,7 +956,7 @@ func serverMain(ctx *cli.Context) { time.Sleep(time.Duration(r.Float64() * float64(time.Second))) continue } - logger.LogIf(GlobalContext, fmt.Errorf("Unable to list buckets to initialize bucket metadata sub-system: %w", err)) + bootLogIf(GlobalContext, fmt.Errorf("Unable to list buckets to initialize bucket metadata sub-system: %w", err)) } break diff --git a/cmd/sftp-server-driver.go b/cmd/sftp-server-driver.go index a713c30e3..fcdf31985 100644 --- a/cmd/sftp-server-driver.go +++ b/cmd/sftp-server-driver.go @@ -33,7 +33,6 @@ import ( "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio/internal/auth" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" "github.com/pkg/sftp" "golang.org/x/crypto/ssh" ) @@ -136,7 +135,7 @@ func (f *sftpDriver) getMinIOClient() (*minio.Client, error) { } // Call hook for site replication. - logger.LogIf(context.Background(), globalSiteReplicationSys.IAMChangeHook(context.Background(), madmin.SRIAMItem{ + replLogIf(context.Background(), globalSiteReplicationSys.IAMChangeHook(context.Background(), madmin.SRIAMItem{ Type: madmin.SRIAMItemSTSAcc, STSCredential: &madmin.SRSTSCredential{ AccessKey: cred.AccessKey, diff --git a/cmd/sftp-server.go b/cmd/sftp-server.go index ee9331aa5..04fa336a8 100644 --- a/cmd/sftp-server.go +++ b/cmd/sftp-server.go @@ -43,13 +43,13 @@ func (s *sftpLogger) Info(tag xsftp.LogType, msg string) { func (s *sftpLogger) Error(tag xsftp.LogType, err error) { switch tag { case xsftp.AcceptNetworkError: - logger.LogOnceIf(context.Background(), err, "accept-limit-sftp") + sftpLogOnceIf(context.Background(), err, "accept-limit-sftp") case xsftp.AcceptChannelError: - logger.LogOnceIf(context.Background(), err, "accept-channel-sftp") + sftpLogOnceIf(context.Background(), err, "accept-channel-sftp") case xsftp.SSHKeyExchangeError: - logger.LogOnceIf(context.Background(), err, "key-exchange-sftp") + sftpLogOnceIf(context.Background(), err, "key-exchange-sftp") default: - logger.LogOnceIf(context.Background(), err, "unknown-error-sftp") + sftpLogOnceIf(context.Background(), err, "unknown-error-sftp") } } diff --git a/cmd/signals.go b/cmd/signals.go index f5159a983..097242c7e 100644 --- a/cmd/signals.go +++ b/cmd/signals.go @@ -51,16 +51,16 @@ func handleSignals() { if httpServer := newHTTPServerFn(); httpServer != nil { if err := httpServer.Shutdown(); err != nil && !errors.Is(err, http.ErrServerClosed) { - logger.LogIf(context.Background(), err) + shutdownLogIf(context.Background(), err) } } if objAPI := newObjectLayerFn(); objAPI != nil { - logger.LogIf(context.Background(), objAPI.Shutdown(context.Background())) + shutdownLogIf(context.Background(), objAPI.Shutdown(context.Background())) } if srv := newConsoleServerFn(); srv != nil { - logger.LogIf(context.Background(), srv.Shutdown()) + shutdownLogIf(context.Background(), srv.Shutdown()) } if globalEventNotifier != nil { @@ -73,7 +73,7 @@ func handleSignals() { for { select { case err := <-globalHTTPServerErrorCh: - logger.LogIf(context.Background(), err) + shutdownLogIf(context.Background(), err) exit(stopProcess()) case osSignal := <-globalOSSignalCh: logger.Info("Exiting on signal: %s", strings.ToUpper(osSignal.String())) @@ -89,7 +89,7 @@ func handleSignals() { if rerr == nil { daemon.SdNotify(false, daemon.SdNotifyReady) } - logger.LogIf(context.Background(), rerr) + shutdownLogIf(context.Background(), rerr) exit(stop && rerr == nil) case serviceStop: logger.Info("Stopping on service signal") diff --git a/cmd/site-replication.go b/cmd/site-replication.go index 12cb3ed04..8b96b1914 100644 --- a/cmd/site-replication.go +++ b/cmd/site-replication.go @@ -238,7 +238,7 @@ func (c *SiteReplicationSys) Init(ctx context.Context, objAPI ObjectLayer) error if err == nil { break } - logger.LogOnceIf(context.Background(), fmt.Errorf("unable to initialize site replication subsystem: (%w)", err), "site-relication-init") + replLogOnceIf(context.Background(), fmt.Errorf("unable to initialize site replication subsystem: (%w)", err), "site-relication-init") duration := time.Duration(r.Float64() * float64(time.Minute)) if duration < time.Second { @@ -313,7 +313,7 @@ func (c *SiteReplicationSys) saveToDisk(ctx context.Context, state srState) erro } for _, err := range globalNotificationSys.ReloadSiteReplicationConfig(ctx) { - logger.LogIf(ctx, err) + replLogIf(ctx, err) } c.Lock() @@ -334,7 +334,7 @@ func (c *SiteReplicationSys) removeFromDisk(ctx context.Context) error { } for _, err := range globalNotificationSys.ReloadSiteReplicationConfig(ctx) { - logger.LogIf(ctx, err) + replLogIf(ctx, err) } c.Lock() @@ -1186,7 +1186,7 @@ func (c *SiteReplicationSys) PeerBucketDeleteHandler(ctx context.Context, bucket if err != nil { if globalDNSConfig != nil { if err2 := globalDNSConfig.Put(bucket); err2 != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to restore bucket DNS entry %w, please fix it manually", err2)) + replLogIf(ctx, fmt.Errorf("Unable to restore bucket DNS entry %w, please fix it manually", err2)) } } return err @@ -4074,7 +4074,7 @@ func (c *SiteReplicationSys) EditPeerCluster(ctx context.Context, peer madmin.Pe wg.Wait() for dID, err := range errs { - logger.LogOnceIf(ctx, fmt.Errorf("unable to update peer %s: %w", state.Peers[dID].Name, err), "site-relication-edit") + replLogOnceIf(ctx, fmt.Errorf("unable to update peer %s: %w", state.Peers[dID].Name, err), "site-relication-edit") } // we can now save the cluster replication configuration state. @@ -4141,21 +4141,21 @@ func (c *SiteReplicationSys) updateTargetEndpoints(ctx context.Context, prevInfo } err := globalBucketTargetSys.SetTarget(ctx, bucket, &bucketTarget, true) if err != nil { - logger.LogIf(ctx, c.annotatePeerErr(peer.Name, "Bucket target creation error", err)) + replLogIf(ctx, c.annotatePeerErr(peer.Name, "Bucket target creation error", err)) continue } targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket) if err != nil { - logger.LogIf(ctx, err) + replLogIf(ctx, err) continue } tgtBytes, err := json.Marshal(&targets) if err != nil { - logger.LogIf(ctx, err) + bugLogIf(ctx, err) continue } if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil { - logger.LogIf(ctx, err) + replLogIf(ctx, err) continue } } @@ -4390,7 +4390,7 @@ func (c *SiteReplicationSys) healILMExpiryConfig(ctx context.Context, objAPI Obj return wrapSRErr(err) } if err = admClient.SRStateEdit(ctx, madmin.SRStateEditReq{Peers: latestPeers, UpdatedAt: lastUpdate}); err != nil { - logger.LogIf(ctx, c.annotatePeerErr(ps.Name, siteReplicationEdit, + replLogIf(ctx, c.annotatePeerErr(ps.Name, siteReplicationEdit, fmt.Errorf("Unable to heal site replication state for peer %s from peer %s : %w", ps.Name, latestPeerName, err))) } @@ -4493,7 +4493,7 @@ func (c *SiteReplicationSys) healBucketILMExpiry(ctx context.Context, objAPI Obj if dID == globalDeploymentID() { if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketLifecycleConfig, finalConfigData); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal bucket ILM expiry data from peer site %s : %w", latestPeerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal bucket ILM expiry data from peer site %s : %w", latestPeerName, err)) } continue } @@ -4509,7 +4509,7 @@ func (c *SiteReplicationSys) healBucketILMExpiry(ctx context.Context, objAPI Obj ExpiryLCConfig: latestExpLCConfig, UpdatedAt: lastUpdate, }); err != nil { - logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, + replLogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, fmt.Errorf("Unable to heal bucket ILM expiry data for peer %s from peer %s : %w", peerName, latestPeerName, err))) } @@ -4566,7 +4566,7 @@ func (c *SiteReplicationSys) healTagMetadata(ctx context.Context, objAPI ObjectL } if dID == globalDeploymentID() { if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, latestTaggingConfigBytes); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal tagging metadata from peer site %s : %w", latestPeerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal tagging metadata from peer site %s : %w", latestPeerName, err)) } continue } @@ -4582,7 +4582,7 @@ func (c *SiteReplicationSys) healTagMetadata(ctx context.Context, objAPI ObjectL Tags: latestTaggingConfig, }) if err != nil { - logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, + replLogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, fmt.Errorf("Unable to heal tagging metadata for peer %s from peer %s : %w", peerName, latestPeerName, err))) } } @@ -4630,7 +4630,7 @@ func (c *SiteReplicationSys) healBucketPolicies(ctx context.Context, objAPI Obje } if dID == globalDeploymentID() { if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, latestIAMPolicy); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal bucket policy metadata from peer site %s : %w", latestPeerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal bucket policy metadata from peer site %s : %w", latestPeerName, err)) } continue } @@ -4646,7 +4646,7 @@ func (c *SiteReplicationSys) healBucketPolicies(ctx context.Context, objAPI Obje Policy: latestIAMPolicy, UpdatedAt: lastUpdate, }); err != nil { - logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, + replLogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, fmt.Errorf("Unable to heal bucket policy metadata for peer %s from peer %s : %w", peerName, latestPeerName, err))) } @@ -4705,7 +4705,7 @@ func (c *SiteReplicationSys) healBucketQuotaConfig(ctx context.Context, objAPI O } if dID == globalDeploymentID() { if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, latestQuotaConfigBytes); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal quota metadata from peer site %s : %w", latestPeerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal quota metadata from peer site %s : %w", latestPeerName, err)) } continue } @@ -4722,7 +4722,7 @@ func (c *SiteReplicationSys) healBucketQuotaConfig(ctx context.Context, objAPI O Quota: latestQuotaConfigBytes, UpdatedAt: lastUpdate, }); err != nil { - logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, + replLogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, fmt.Errorf("Unable to heal quota config metadata for peer %s from peer %s : %w", peerName, latestPeerName, err))) } @@ -4780,7 +4780,7 @@ func (c *SiteReplicationSys) healVersioningMetadata(ctx context.Context, objAPI } if dID == globalDeploymentID() { if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketVersioningConfig, latestVersioningConfigBytes); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal versioning metadata from peer site %s : %w", latestPeerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal versioning metadata from peer site %s : %w", latestPeerName, err)) } continue } @@ -4797,7 +4797,7 @@ func (c *SiteReplicationSys) healVersioningMetadata(ctx context.Context, objAPI UpdatedAt: lastUpdate, }) if err != nil { - logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, + replLogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, fmt.Errorf("Unable to heal versioning config metadata for peer %s from peer %s : %w", peerName, latestPeerName, err))) } @@ -4855,7 +4855,7 @@ func (c *SiteReplicationSys) healSSEMetadata(ctx context.Context, objAPI ObjectL } if dID == globalDeploymentID() { if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, latestSSEConfigBytes); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal sse metadata from peer site %s : %w", latestPeerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal sse metadata from peer site %s : %w", latestPeerName, err)) } continue } @@ -4872,7 +4872,7 @@ func (c *SiteReplicationSys) healSSEMetadata(ctx context.Context, objAPI ObjectL UpdatedAt: lastUpdate, }) if err != nil { - logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, + replLogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, fmt.Errorf("Unable to heal SSE config metadata for peer %s from peer %s : %w", peerName, latestPeerName, err))) } @@ -4930,7 +4930,7 @@ func (c *SiteReplicationSys) healOLockConfigMetadata(ctx context.Context, objAPI } if dID == globalDeploymentID() { if _, err := globalBucketMetadataSys.Update(ctx, bucket, objectLockConfig, latestObjLockConfigBytes); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal objectlock config metadata from peer site %s : %w", latestPeerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal objectlock config metadata from peer site %s : %w", latestPeerName, err)) } continue } @@ -4947,7 +4947,7 @@ func (c *SiteReplicationSys) healOLockConfigMetadata(ctx context.Context, objAPI UpdatedAt: lastUpdate, }) if err != nil { - logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, + replLogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, fmt.Errorf("Unable to heal object lock config metadata for peer %s from peer %s : %w", peerName, latestPeerName, err))) } @@ -5184,7 +5184,7 @@ func (c *SiteReplicationSys) healBucketReplicationConfig(ctx context.Context, ob } if replMismatch { - logger.LogIf(ctx, c.annotateErr(configureReplication, c.PeerBucketConfigureReplHandler(ctx, bucket))) + replLogIf(ctx, c.annotateErr(configureReplication, c.PeerBucketConfigureReplHandler(ctx, bucket))) } return nil } @@ -5277,7 +5277,7 @@ func (c *SiteReplicationSys) healPolicies(ctx context.Context, objAPI ObjectLaye UpdatedAt: lastUpdate, }) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal IAM policy %s from peer site %s -> site %s : %w", policy, latestPeerName, peerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal IAM policy %s from peer site %s -> site %s : %w", policy, latestPeerName, peerName, err)) } } return nil @@ -5338,7 +5338,7 @@ func (c *SiteReplicationSys) healUserPolicies(ctx context.Context, objAPI Object UpdatedAt: lastUpdate, }) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal IAM user policy mapping for %s from peer site %s -> site %s : %w", user, latestPeerName, peerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal IAM user policy mapping for %s from peer site %s -> site %s : %w", user, latestPeerName, peerName, err)) } } return nil @@ -5401,7 +5401,7 @@ func (c *SiteReplicationSys) healGroupPolicies(ctx context.Context, objAPI Objec UpdatedAt: lastUpdate, }) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal IAM group policy mapping for %s from peer site %s -> site %s : %w", group, latestPeerName, peerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal IAM group policy mapping for %s from peer site %s -> site %s : %w", group, latestPeerName, peerName, err)) } } return nil @@ -5462,13 +5462,13 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer, if creds.IsServiceAccount() { claims, err := globalIAMSys.GetClaimsForSvcAcc(ctx, creds.AccessKey) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) continue } _, policy, err := globalIAMSys.GetServiceAccount(ctx, creds.AccessKey) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) continue } @@ -5476,7 +5476,7 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer, if policy != nil { policyJSON, err = json.Marshal(policy) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) continue } } @@ -5499,7 +5499,7 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer, }, UpdatedAt: lastUpdate, }); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) } continue } @@ -5512,7 +5512,7 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer, // policy. The session token will contain info about policy to // be applied. if !errors.Is(err, errNoSuchUser) { - logger.LogIf(ctx, fmt.Errorf("Unable to heal temporary credentials %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal temporary credentials %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) continue } } else { @@ -5530,7 +5530,7 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer, }, UpdatedAt: lastUpdate, }); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal temporary credentials %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal temporary credentials %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) } continue } @@ -5546,7 +5546,7 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer, }, UpdatedAt: lastUpdate, }); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal user %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal user %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) } } return nil @@ -5610,7 +5610,7 @@ func (c *SiteReplicationSys) healGroups(ctx context.Context, objAPI ObjectLayer, }, UpdatedAt: lastUpdate, }); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal group %s from peer site %s -> site %s : %w", group, latestPeerName, peerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal group %s from peer site %s -> site %s : %w", group, latestPeerName, peerName, err)) } } return nil diff --git a/cmd/storage-errors.go b/cmd/storage-errors.go index 5aff111f0..b39d7c8ae 100644 --- a/cmd/storage-errors.go +++ b/cmd/storage-errors.go @@ -20,8 +20,6 @@ package cmd import ( "context" "errors" - - "github.com/minio/minio/internal/logger" ) // errMaxVersionsExceeded return error beyond 10000 (default) versions per object @@ -176,7 +174,7 @@ func osErrToFileErr(err error) error { return errFaultyDisk } if isSysErrInvalidArg(err) { - logger.LogIf(context.Background(), err) + storageLogIf(context.Background(), err) // For some odd calls with O_DIRECT reads // filesystems can return EINVAL, handle // these as FileNotFound instead. diff --git a/cmd/storage-rest-client.go b/cmd/storage-rest-client.go index a991284ad..b37298b2b 100644 --- a/cmd/storage-rest-client.go +++ b/cmd/storage-rest-client.go @@ -38,7 +38,6 @@ import ( "github.com/minio/minio/internal/grid" xhttp "github.com/minio/minio/internal/http" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/rest" xnet "github.com/minio/pkg/v2/net" xbufio "github.com/philhofer/fwd" @@ -695,7 +694,7 @@ func (client *storageRESTClient) DeleteVersions(ctx context.Context, volume stri for _, version := range versions { version.EncodeMsg(encoder) } - logger.LogIf(ctx, encoder.Flush()) + storageLogIf(ctx, encoder.Flush()) errs = make([]error, len(versions)) diff --git a/cmd/storage-rest-server.go b/cmd/storage-rest-server.go index b860ce2dc..b926e6759 100644 --- a/cmd/storage-rest-server.go +++ b/cmd/storage-rest-server.go @@ -414,7 +414,7 @@ func (s *storageRESTServer) ReadVersionHandler(w http.ResponseWriter, r *http.Re return } - logger.LogIf(r.Context(), msgp.Encode(w, &fi)) + storageLogIf(r.Context(), msgp.Encode(w, &fi)) } // WriteMetadataHandler rpc handler to write new updated metadata. @@ -495,7 +495,7 @@ func (s *storageRESTServer) ReadXLHandler(w http.ResponseWriter, r *http.Request return } - logger.LogIf(r.Context(), msgp.Encode(w, &rf)) + storageLogIf(r.Context(), msgp.Encode(w, &rf)) } // ReadXLHandlerWS - read xl.meta for an object at path. @@ -597,7 +597,7 @@ func (s *storageRESTServer) ReadFileStreamHandler(w http.ResponseWriter, r *http if ok { _, err = rf.ReadFrom(sr.Reader) if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients - logger.LogIf(r.Context(), err) + storageLogIf(r.Context(), err) } if err == nil || !errors.Is(err, xhttp.ErrNotImplemented) { return @@ -607,7 +607,7 @@ func (s *storageRESTServer) ReadFileStreamHandler(w http.ResponseWriter, r *http _, err = xioutil.Copy(w, rc) if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients - logger.LogIf(r.Context(), err) + storageLogIf(r.Context(), err) } } @@ -1180,25 +1180,25 @@ func logFatalErrs(err error, endpoint Endpoint, exit bool) { hint = fmt.Sprintf("Run the following command to add write permissions: `sudo chown -R %s. && sudo chmod u+rxw `", username) } if !exit { - logger.LogOnceIf(GlobalContext, fmt.Errorf("Drive is not writable %s, %s", endpoint, hint), "log-fatal-errs") + storageLogOnceIf(GlobalContext, fmt.Errorf("Drive is not writable %s, %s", endpoint, hint), "log-fatal-errs") } else { logger.Fatal(config.ErrUnableToWriteInBackend(err).Hint(hint), "Unable to initialize backend") } case errors.Is(err, errFaultyDisk): if !exit { - logger.LogOnceIf(GlobalContext, fmt.Errorf("Drive is faulty at %s, please replace the drive - drive will be offline", endpoint), "log-fatal-errs") + storageLogOnceIf(GlobalContext, fmt.Errorf("Drive is faulty at %s, please replace the drive - drive will be offline", endpoint), "log-fatal-errs") } else { logger.Fatal(err, "Unable to initialize backend") } case errors.Is(err, errDiskFull): if !exit { - logger.LogOnceIf(GlobalContext, fmt.Errorf("Drive is already full at %s, incoming I/O will fail - drive will be offline", endpoint), "log-fatal-errs") + storageLogOnceIf(GlobalContext, fmt.Errorf("Drive is already full at %s, incoming I/O will fail - drive will be offline", endpoint), "log-fatal-errs") } else { logger.Fatal(err, "Unable to initialize backend") } default: if !exit { - logger.LogOnceIf(GlobalContext, fmt.Errorf("Drive %s returned an unexpected error: %w, please investigate - drive will be offline", endpoint, err), "log-fatal-errs") + storageLogOnceIf(GlobalContext, fmt.Errorf("Drive %s returned an unexpected error: %w, please investigate - drive will be offline", endpoint, err), "log-fatal-errs") } else { logger.Fatal(err, "Unable to initialize backend") } diff --git a/cmd/sts-errors.go b/cmd/sts-errors.go index 085ce9c69..159331e8e 100644 --- a/cmd/sts-errors.go +++ b/cmd/sts-errors.go @@ -40,7 +40,7 @@ func writeSTSErrorResponse(ctx context.Context, w http.ResponseWriter, errCode S } switch errCode { case ErrSTSInternalError, ErrSTSUpstreamError: - logger.LogIf(ctx, err, logger.ErrorKind) + stsLogIf(ctx, err, logger.ErrorKind) } encodedErrorResponse := encodeResponse(stsErrorResponse) writeResponse(w, stsErr.HTTPStatusCode, encodedErrorResponse, mimeXML) diff --git a/cmd/sts-handlers.go b/cmd/sts-handlers.go index 4b755f8ea..35f34dcc7 100644 --- a/cmd/sts-handlers.go +++ b/cmd/sts-handlers.go @@ -314,7 +314,7 @@ func (sts *stsAPIHandlers) AssumeRole(w http.ResponseWriter, r *http.Request) { // Call hook for site replication. if cred.ParentUser != globalActiveCred.AccessKey { - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemSTSAcc, STSCredential: &madmin.SRSTSCredential{ AccessKey: cred.AccessKey, @@ -547,7 +547,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithSSO(w http.ResponseWriter, r *http.Requ } // Call hook for site replication. - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemSTSAcc, STSCredential: &madmin.SRSTSCredential{ AccessKey: cred.AccessKey, @@ -728,7 +728,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithLDAPIdentity(w http.ResponseWriter, r * } // Call hook for site replication. - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemSTSAcc, STSCredential: &madmin.SRSTSCredential{ AccessKey: cred.AccessKey, @@ -898,7 +898,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithCertificate(w http.ResponseWriter, r *h } // Call hook for site replication. - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemSTSAcc, STSCredential: &madmin.SRSTSCredential{ AccessKey: tmpCredentials.AccessKey, @@ -1028,7 +1028,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithCustomToken(w http.ResponseWriter, r *h } // Call hook for site replication. - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemSTSAcc, STSCredential: &madmin.SRSTSCredential{ AccessKey: tmpCredentials.AccessKey, diff --git a/cmd/tier.go b/cmd/tier.go index 4d5229673..8bd0e7d67 100644 --- a/cmd/tier.go +++ b/cmd/tier.go @@ -34,7 +34,6 @@ import ( "github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/hash" "github.com/minio/minio/internal/kms" - "github.com/minio/minio/internal/logger" "github.com/prometheus/client_golang/prometheus" ) @@ -534,7 +533,7 @@ func (config *TierConfigMgr) refreshTierConfig(ctx context.Context, objAPI Objec case <-t.C: err := config.Reload(ctx, objAPI) if err != nil { - logger.LogIf(ctx, err) + tierLogIf(ctx, err) } } t.Reset(tierCfgRefresh + randInterval()) diff --git a/cmd/untar.go b/cmd/untar.go index afbae59f8..345da01fb 100644 --- a/cmd/untar.go +++ b/cmd/untar.go @@ -36,7 +36,6 @@ import ( "github.com/klauspost/compress/s2" "github.com/klauspost/compress/zstd" gzip "github.com/klauspost/pgzip" - "github.com/minio/minio/internal/logger" "github.com/pierrec/lz4" ) @@ -249,7 +248,7 @@ func untar(ctx context.Context, r io.Reader, putObject func(reader io.Reader, in }() if err := putObject(&rc, fi, name); err != nil { if o.ignoreErrs { - logger.LogIf(ctx, err) + s3LogIf(ctx, err) return } asyncErrMu.Lock() @@ -273,7 +272,7 @@ func untar(ctx context.Context, r io.Reader, putObject func(reader io.Reader, in if err := putObject(&rc, header.FileInfo(), name); err != nil { rc.Close() if o.ignoreErrs { - logger.LogIf(ctx, err) + s3LogIf(ctx, err) continue } return err diff --git a/cmd/update.go b/cmd/update.go index 0d1fc03ad..08b445847 100644 --- a/cmd/update.go +++ b/cmd/update.go @@ -142,7 +142,7 @@ func IsDocker() bool { } // Log error, as we will not propagate it to caller - logger.LogIf(GlobalContext, err) + internalLogIf(GlobalContext, err) return err == nil } @@ -172,7 +172,7 @@ func IsBOSH() bool { } // Log error, as we will not propagate it to caller - logger.LogIf(GlobalContext, err) + internalLogIf(GlobalContext, err) return err == nil } @@ -189,7 +189,7 @@ func getHelmVersion(helmInfoFilePath string) string { if !osIsNotExist(err) { reqInfo := (&logger.ReqInfo{}).AppendTags("helmInfoFilePath", helmInfoFilePath) ctx := logger.SetReqInfo(GlobalContext, reqInfo) - logger.LogIf(ctx, err) + internalLogIf(ctx, err) } return "" } diff --git a/cmd/utils.go b/cmd/utils.go index c0578d5c4..554d3d949 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -624,7 +624,7 @@ func NewHTTPTransportWithClientCerts(clientCert, clientKey string) *http.Transpo defer cancel() transport, err := s.NewHTTPTransportWithClientCerts(ctx, clientCert, clientKey) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to load client key and cert, please check your client certificate configuration: %w", err)) + internalLogIf(ctx, fmt.Errorf("Unable to load client key and cert, please check your client certificate configuration: %w", err)) } return transport } diff --git a/cmd/xl-storage-disk-id-check.go b/cmd/xl-storage-disk-id-check.go index 8d24b3234..3477635c1 100644 --- a/cmd/xl-storage-disk-id-check.go +++ b/cmd/xl-storage-disk-id-check.go @@ -969,7 +969,7 @@ func (p *xlStorageDiskIDCheck) monitorDiskWritable(ctx context.Context) { goOffline := func(err error, spent time.Duration) { if p.health.status.CompareAndSwap(diskHealthOK, diskHealthFaulty) { - logger.LogAlwaysIf(ctx, fmt.Errorf("node(%s): taking drive %s offline: %v", globalLocalNodeName, p.storage.String(), err)) + storageLogAlwaysIf(ctx, fmt.Errorf("node(%s): taking drive %s offline: %v", globalLocalNodeName, p.storage.String(), err)) p.health.waiting.Add(1) go p.monitorDiskStatus(spent, fn) } diff --git a/cmd/xl-storage-format-v1.go b/cmd/xl-storage-format-v1.go index a9d0023f3..4d9da5565 100644 --- a/cmd/xl-storage-format-v1.go +++ b/cmd/xl-storage-format-v1.go @@ -26,7 +26,6 @@ import ( "github.com/cespare/xxhash/v2" jsoniter "github.com/json-iterator/go" - "github.com/minio/minio/internal/logger" ) // XL constants. @@ -210,7 +209,7 @@ func (c *ChecksumInfo) UnmarshalJSON(data []byte) error { } if !c.Algorithm.Available() { - logger.LogIf(GlobalContext, errBitrotHashAlgoInvalid) + internalLogIf(GlobalContext, errBitrotHashAlgoInvalid) return errBitrotHashAlgoInvalid } return nil diff --git a/cmd/xl-storage-format-v2.go b/cmd/xl-storage-format-v2.go index f36878fa9..93b14e201 100644 --- a/cmd/xl-storage-format-v2.go +++ b/cmd/xl-storage-format-v2.go @@ -36,7 +36,6 @@ import ( "github.com/minio/minio/internal/bucket/replication" "github.com/minio/minio/internal/config/storageclass" xhttp "github.com/minio/minio/internal/http" - "github.com/minio/minio/internal/logger" "github.com/tinylib/msgp/msgp" ) @@ -939,7 +938,7 @@ func (x *xlMetaV2) loadIndexed(buf xlMetaBuf, data xlMetaInlineData) error { x.metaV = metaV if err = x.data.validate(); err != nil { x.data.repair() - logger.LogIf(GlobalContext, fmt.Errorf("xlMetaV2.loadIndexed: data validation failed: %v. %d entries after repair", err, x.data.entries())) + storageLogIf(GlobalContext, fmt.Errorf("xlMetaV2.loadIndexed: data validation failed: %v. %d entries after repair", err, x.data.entries())) } return decodeVersions(buf, versions, func(i int, hdr, meta []byte) error { ver := &x.versions[i] @@ -1006,7 +1005,7 @@ func (x *xlMetaV2) loadLegacy(buf []byte) error { x.data = buf if err = x.data.validate(); err != nil { x.data.repair() - logger.LogIf(GlobalContext, fmt.Errorf("xlMetaV2.Load: data validation failed: %v. %d entries after repair", err, x.data.entries())) + storageLogIf(GlobalContext, fmt.Errorf("xlMetaV2.Load: data validation failed: %v. %d entries after repair", err, x.data.entries())) } default: return errors.New("unknown minor metadata version") @@ -1745,7 +1744,7 @@ func (x xlMetaV2) ToFileInfo(volume, path, versionID string, inclFreeVers, allPa if versionID != "" && versionID != nullVersionID { uv, err = uuid.Parse(versionID) if err != nil { - logger.LogIf(GlobalContext, fmt.Errorf("invalid versionID specified %s", versionID)) + storageLogIf(GlobalContext, fmt.Errorf("invalid versionID specified %s", versionID)) return fi, errFileVersionNotFound } } @@ -2051,7 +2050,7 @@ func (x xlMetaBuf) ToFileInfo(volume, path, versionID string, allParts bool) (fi if versionID != "" && versionID != nullVersionID { uv, err = uuid.Parse(versionID) if err != nil { - logger.LogIf(GlobalContext, fmt.Errorf("invalid versionID specified %s", versionID)) + storageLogIf(GlobalContext, fmt.Errorf("invalid versionID specified %s", versionID)) return fi, errFileVersionNotFound } } diff --git a/cmd/xl-storage-meta-inline.go b/cmd/xl-storage-meta-inline.go index d8259c555..08e3878ea 100644 --- a/cmd/xl-storage-meta-inline.go +++ b/cmd/xl-storage-meta-inline.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" - "github.com/minio/minio/internal/logger" "github.com/tinylib/msgp/msgp" ) @@ -390,13 +389,13 @@ func xlMetaV2TrimData(buf []byte) []byte { // Skip header _, metaBuf, err = msgp.ReadBytesZC(metaBuf) if err != nil { - logger.LogIf(GlobalContext, err) + storageLogIf(GlobalContext, err) return buf } // Skip CRC if maj > 1 || min >= 2 { _, metaBuf, err = msgp.ReadUint32Bytes(metaBuf) - logger.LogIf(GlobalContext, err) + storageLogIf(GlobalContext, err) } // = input - current pos ends := len(buf) - len(metaBuf) diff --git a/cmd/xl-storage.go b/cmd/xl-storage.go index b6eb30b7a..f38128cd9 100644 --- a/cmd/xl-storage.go +++ b/cmd/xl-storage.go @@ -435,7 +435,7 @@ func (s *xlStorage) Healing() *healingTracker { } h := newHealingTracker() _, err = h.UnmarshalMsg(b) - logger.LogIf(GlobalContext, err) + bugLogIf(GlobalContext, err) return h } @@ -804,12 +804,12 @@ func (s *xlStorage) checkFormatJSON() (os.FileInfo, error) { } else if osIsPermission(err) { return nil, errDiskAccessDenied } - logger.LogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors + storageLogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors return nil, errCorruptedBackend } else if osIsPermission(err) { return nil, errDiskAccessDenied } - logger.LogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors + storageLogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors return nil, errCorruptedBackend } return fi, nil @@ -855,19 +855,19 @@ func (s *xlStorage) GetDiskID() (string, error) { } else if osIsPermission(err) { return "", errDiskAccessDenied } - logger.LogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors + storageLogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors return "", errCorruptedBackend } else if osIsPermission(err) { return "", errDiskAccessDenied } - logger.LogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors + storageLogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors return "", errCorruptedBackend } format := &formatErasureV3{} json := jsoniter.ConfigCompatibleWithStandardLibrary if err = json.Unmarshal(b, &format); err != nil { - logger.LogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors + bugLogIf(GlobalContext, err) // log unexpected errors return "", errCorruptedFormat } @@ -2439,7 +2439,7 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f } if err != nil && !IsErr(err, ignoredErrs...) && !contextCanceled(ctx) { // Only log these errors if context is not yet canceled. - logger.LogOnceIf(ctx, fmt.Errorf("drive:%s, srcVolume: %s, srcPath: %s, dstVolume: %s:, dstPath: %s - error %v", + storageLogOnceIf(ctx, fmt.Errorf("drive:%s, srcVolume: %s, srcPath: %s, dstVolume: %s:, dstPath: %s - error %v", s.drivePath, srcVolume, srcPath, dstVolume, dstPath, @@ -2538,12 +2538,12 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f xlMetaLegacy := &xlMetaV1Object{} json := jsoniter.ConfigCompatibleWithStandardLibrary if err := json.Unmarshal(dstBuf, xlMetaLegacy); err != nil { - logger.LogOnceIf(ctx, err, "read-data-unmarshal-"+dstFilePath) + storageLogOnceIf(ctx, err, "read-data-unmarshal-"+dstFilePath) // Data appears corrupt. Drop data. } else { xlMetaLegacy.DataDir = legacyDataDir if err = xlMeta.AddLegacy(xlMetaLegacy); err != nil { - logger.LogOnceIf(ctx, err, "read-data-add-legacy-"+dstFilePath) + storageLogOnceIf(ctx, err, "read-data-add-legacy-"+dstFilePath) } legacyPreserved = true } @@ -2866,7 +2866,7 @@ func (s *xlStorage) VerifyFile(ctx context.Context, volume, path string, fi File errFileVersionNotFound, }...) { logger.GetReqInfo(ctx).AppendTags("disk", s.String()) - logger.LogOnceIf(ctx, err, partPath) + storageLogOnceIf(ctx, err, partPath) } return err } diff --git a/internal/bucket/object/lock/lock.go b/internal/bucket/object/lock/lock.go index bce57d949..d5ed3371c 100644 --- a/internal/bucket/object/lock/lock.go +++ b/internal/bucket/object/lock/lock.go @@ -37,6 +37,14 @@ import ( "github.com/minio/pkg/v2/env" ) +const ( + logSubsys = "locking" +) + +func lockLogIf(ctx context.Context, err error) { + logger.LogIf(ctx, logSubsys, err) +} + // Enabled indicates object locking is enabled const Enabled = "Enabled" @@ -153,7 +161,7 @@ type Retention struct { func (r Retention) Retain(created time.Time) bool { t, err := UTCNowNTP() if err != nil { - logger.LogIf(context.Background(), err) + lockLogIf(context.Background(), err) // Retain return true } @@ -262,7 +270,7 @@ func (config *Config) ToRetention() Retention { t, err := UTCNowNTP() if err != nil { - logger.LogIf(context.Background(), err) + lockLogIf(context.Background(), err) // Do not change any configuration // upon NTP failure. return r @@ -364,7 +372,7 @@ func ParseObjectRetention(reader io.Reader) (*ObjectRetention, error) { t, err := UTCNowNTP() if err != nil { - logger.LogIf(context.Background(), err) + lockLogIf(context.Background(), err) return &ret, ErrPastObjectLockRetainDate } @@ -427,7 +435,7 @@ func ParseObjectLockRetentionHeaders(h http.Header) (rmode RetMode, r RetentionD t, err := UTCNowNTP() if err != nil { - logger.LogIf(context.Background(), err) + lockLogIf(context.Background(), err) return rmode, r, ErrPastObjectLockRetainDate } diff --git a/internal/config/identity/plugin/config.go b/internal/config/identity/plugin/config.go index 93c66c9b5..8dc362ab0 100644 --- a/internal/config/identity/plugin/config.go +++ b/internal/config/identity/plugin/config.go @@ -38,6 +38,10 @@ import ( xnet "github.com/minio/pkg/v2/net" ) +func authNLogIf(ctx context.Context, err error) { + logger.LogIf(ctx, "authN", err) +} + // Authentication Plugin config and env variables const ( URL = "url" @@ -434,7 +438,7 @@ func (o *AuthNPlugin) checkConnectivity(ctx context.Context) bool { req, err := http.NewRequestWithContext(ctx, http.MethodHead, u.String(), nil) if err != nil { - logger.LogIf(ctx, err) + authNLogIf(ctx, err) return false } diff --git a/internal/config/lambda/parse.go b/internal/config/lambda/parse.go index 24bc2055a..83853e653 100644 --- a/internal/config/lambda/parse.go +++ b/internal/config/lambda/parse.go @@ -31,6 +31,14 @@ import ( xnet "github.com/minio/pkg/v2/net" ) +const ( + logSubsys = "notify" +) + +func logOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { + logger.LogOnceIf(ctx, logSubsys, err, id, errKind...) +} + // ErrTargetsOffline - Indicates single/multiple target failures. var ErrTargetsOffline = errors.New("one or more targets are offline. Please use `mc admin info --json` to check the offline targets") @@ -76,7 +84,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t if !args.Enable { continue } - t, err := target.NewWebhookTarget(ctx, id, args, logger.LogOnceIf, transport) + t, err := target.NewWebhookTarget(ctx, id, args, logOnceIf, transport) if err != nil { return nil, err } diff --git a/internal/config/notify/parse.go b/internal/config/notify/parse.go index 4f5d12fce..cf1f27a1c 100644 --- a/internal/config/notify/parse.go +++ b/internal/config/notify/parse.go @@ -40,6 +40,14 @@ const ( formatNamespace = "namespace" ) +const ( + logSubsys = "notify" +) + +func logOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { + logger.LogOnceIf(ctx, logSubsys, err, id, errKind...) +} + // ErrTargetsOffline - Indicates single/multiple target failures. var ErrTargetsOffline = errors.New("one or more targets are offline. Please use `mc admin info --json` to check the offline targets") @@ -97,7 +105,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t if !args.Enable { continue } - t, err := target.NewAMQPTarget(id, args, logger.LogOnceIf) + t, err := target.NewAMQPTarget(id, args, logOnceIf) if err != nil { return nil, err } @@ -112,7 +120,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t if !args.Enable { continue } - t, err := target.NewElasticsearchTarget(id, args, logger.LogOnceIf) + t, err := target.NewElasticsearchTarget(id, args, logOnceIf) if err != nil { return nil, err } @@ -129,7 +137,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t continue } args.TLS.RootCAs = transport.TLSClientConfig.RootCAs - t, err := target.NewKafkaTarget(id, args, logger.LogOnceIf) + t, err := target.NewKafkaTarget(id, args, logOnceIf) if err != nil { return nil, err } @@ -147,7 +155,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t continue } args.RootCAs = transport.TLSClientConfig.RootCAs - t, err := target.NewMQTTTarget(id, args, logger.LogOnceIf) + t, err := target.NewMQTTTarget(id, args, logOnceIf) if err != nil { return nil, err } @@ -162,7 +170,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t if !args.Enable { continue } - t, err := target.NewMySQLTarget(id, args, logger.LogOnceIf) + t, err := target.NewMySQLTarget(id, args, logOnceIf) if err != nil { return nil, err } @@ -177,7 +185,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t if !args.Enable { continue } - t, err := target.NewNATSTarget(id, args, logger.LogOnceIf) + t, err := target.NewNATSTarget(id, args, logOnceIf) if err != nil { return nil, err } @@ -192,7 +200,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t if !args.Enable { continue } - t, err := target.NewNSQTarget(id, args, logger.LogOnceIf) + t, err := target.NewNSQTarget(id, args, logOnceIf) if err != nil { return nil, err } @@ -207,7 +215,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t if !args.Enable { continue } - t, err := target.NewPostgreSQLTarget(id, args, logger.LogOnceIf) + t, err := target.NewPostgreSQLTarget(id, args, logOnceIf) if err != nil { return nil, err } @@ -222,7 +230,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t if !args.Enable { continue } - t, err := target.NewRedisTarget(id, args, logger.LogOnceIf) + t, err := target.NewRedisTarget(id, args, logOnceIf) if err != nil { return nil, err } @@ -237,7 +245,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t if !args.Enable { continue } - t, err := target.NewWebhookTarget(ctx, id, args, logger.LogOnceIf, transport) + t, err := target.NewWebhookTarget(ctx, id, args, logOnceIf, transport) if err != nil { return nil, err } diff --git a/internal/config/storageclass/storage-class.go b/internal/config/storageclass/storage-class.go index f6b6963ec..9224f37cf 100644 --- a/internal/config/storageclass/storage-class.go +++ b/internal/config/storageclass/storage-class.go @@ -397,7 +397,7 @@ func LookupConfig(kvs config.KVS, setDriveCount int) (cfg Config, err error) { return cfg, err } if inlineBlock > 128*humanize.KiByte { - logger.LogOnceIf(context.Background(), fmt.Errorf("inline block value bigger than recommended max of 128KiB -> %s, performance may degrade for PUT please benchmark the changes", inlineBlockStr), inlineBlockStr) + configLogOnceIf(context.Background(), fmt.Errorf("inline block value bigger than recommended max of 128KiB -> %s, performance may degrade for PUT please benchmark the changes", inlineBlockStr), inlineBlockStr) } cfg.inlineBlock = int64(inlineBlock) } else { @@ -408,3 +408,7 @@ func LookupConfig(kvs config.KVS, setDriveCount int) (cfg Config, err error) { return cfg, nil } + +func configLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { + logger.LogOnceIf(ctx, "config", err, id, errKind...) +} diff --git a/internal/event/targetlist.go b/internal/event/targetlist.go index c43c76a06..6a1eb6a69 100644 --- a/internal/event/targetlist.go +++ b/internal/event/targetlist.go @@ -30,6 +30,8 @@ import ( ) const ( + logSubsys = "notify" + // The maximum allowed number of concurrent Send() calls to all configured notifications targets maxConcurrentAsyncSend = 50000 ) @@ -290,7 +292,7 @@ func (list *TargetList) sendSync(event Event, targetIDset TargetIDSet) { list.incFailedEvents(id) reqInfo := &logger.ReqInfo{} reqInfo.AppendTags("targetID", id.String()) - logger.LogOnceIf(logger.SetReqInfo(context.Background(), reqInfo), err, id.String()) + logger.LogOnceIf(logger.SetReqInfo(context.Background(), reqInfo), logSubsys, err, id.String()) } }(id, target) } @@ -313,7 +315,7 @@ func (list *TargetList) sendAsync(event Event, targetIDset TargetIDSet) { for id := range targetIDset { reqInfo := &logger.ReqInfo{} reqInfo.AppendTags("targetID", id.String()) - logger.LogOnceIf(logger.SetReqInfo(context.Background(), reqInfo), err, id.String()) + logger.LogOnceIf(logger.SetReqInfo(context.Background(), reqInfo), logSubsys, err, id.String()) } return } diff --git a/internal/grid/connection.go b/internal/grid/connection.go index 022aa247d..0d13372de 100644 --- a/internal/grid/connection.go +++ b/internal/grid/connection.go @@ -47,6 +47,18 @@ import ( "github.com/zeebo/xxh3" ) +func gridLogIf(ctx context.Context, err error, errKind ...interface{}) { + logger.LogIf(ctx, "grid", err, errKind...) +} + +func gridLogIfNot(ctx context.Context, err error, ignored ...error) { + logger.LogIfNot(ctx, "grid", err, ignored...) +} + +func gridLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { + logger.LogOnceIf(ctx, "grid", err, id, errKind...) +} + // A Connection is a remote connection. // There is no distinction externally whether the connection was initiated from // this server or from the remote. @@ -667,7 +679,7 @@ func (c *Connection) connect() { if gotState != StateConnecting { // Don't print error on first attempt, // and after that only once per hour. - logger.LogOnceIf(c.ctx, fmt.Errorf("grid: %s connecting to %s: %w (%T) Sleeping %v (%v)", c.Local, toDial, err, err, sleep, gotState), toDial) + gridLogOnceIf(c.ctx, fmt.Errorf("grid: %s connecting to %s: %w (%T) Sleeping %v (%v)", c.Local, toDial, err, err, sleep, gotState), toDial) } c.updateState(StateConnectionError) time.Sleep(sleep) @@ -898,7 +910,7 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) { go func() { defer func() { if rec := recover(); rec != nil { - logger.LogIf(ctx, fmt.Errorf("handleMessages: panic recovered: %v", rec)) + gridLogIf(ctx, fmt.Errorf("handleMessages: panic recovered: %v", rec)) debug.PrintStack() } c.connChange.L.Lock() @@ -960,7 +972,7 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) { msg, err = readDataInto(msg, conn, c.side, ws.OpBinary) if err != nil { cancel(ErrDisconnected) - logger.LogIfNot(ctx, fmt.Errorf("ws read: %w", err), net.ErrClosed, io.EOF) + gridLogIfNot(ctx, fmt.Errorf("ws read: %w", err), net.ErrClosed, io.EOF) return } if c.incomingBytes != nil { @@ -971,7 +983,7 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) { var m message subID, remain, err := m.parse(msg) if err != nil { - logger.LogIf(ctx, fmt.Errorf("ws parse package: %w", err)) + gridLogIf(ctx, fmt.Errorf("ws parse package: %w", err)) cancel(ErrDisconnected) return } @@ -992,7 +1004,7 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) { var next []byte next, remain, err = msgp.ReadBytesZC(remain) if err != nil { - logger.LogIf(ctx, fmt.Errorf("ws read merged: %w", err)) + gridLogIf(ctx, fmt.Errorf("ws read merged: %w", err)) cancel(ErrDisconnected) return } @@ -1000,7 +1012,7 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) { m.Payload = nil subID, _, err = m.parse(next) if err != nil { - logger.LogIf(ctx, fmt.Errorf("ws parse merged: %w", err)) + gridLogIf(ctx, fmt.Errorf("ws parse merged: %w", err)) cancel(ErrDisconnected) return } @@ -1012,7 +1024,7 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) { // Write function. defer func() { if rec := recover(); rec != nil { - logger.LogIf(ctx, fmt.Errorf("handleMessages: panic recovered: %v", rec)) + gridLogIf(ctx, fmt.Errorf("handleMessages: panic recovered: %v", rec)) debug.PrintStack() } if debugPrint { @@ -1058,14 +1070,14 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) { if lastPong > 0 { lastPongTime := time.Unix(lastPong, 0) if d := time.Since(lastPongTime); d > connPingInterval*2 { - logger.LogIf(ctx, fmt.Errorf("host %s last pong too old (%v); disconnecting", c.Remote, d.Round(time.Millisecond))) + gridLogIf(ctx, fmt.Errorf("host %s last pong too old (%v); disconnecting", c.Remote, d.Round(time.Millisecond))) return } } var err error toSend, err = pingFrame.MarshalMsg(GetByteBuffer()[:0]) if err != nil { - logger.LogIf(ctx, err) + gridLogIf(ctx, err) // Fake it... atomic.StoreInt64(&c.LastPong, time.Now().Unix()) continue @@ -1107,18 +1119,18 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) { buf.Reset() err := wsw.writeMessage(&buf, c.side, ws.OpBinary, toSend) if err != nil { - logger.LogIf(ctx, fmt.Errorf("ws writeMessage: %w", err)) + gridLogIf(ctx, fmt.Errorf("ws writeMessage: %w", err)) return } PutByteBuffer(toSend) err = conn.SetWriteDeadline(time.Now().Add(connWriteTimeout)) if err != nil { - logger.LogIf(ctx, fmt.Errorf("conn.SetWriteDeadline: %w", err)) + gridLogIf(ctx, fmt.Errorf("conn.SetWriteDeadline: %w", err)) return } _, err = buf.WriteTo(conn) if err != nil { - logger.LogIf(ctx, fmt.Errorf("ws write: %w", err)) + gridLogIf(ctx, fmt.Errorf("ws write: %w", err)) return } continue @@ -1135,7 +1147,7 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) { var err error toSend, err = m.MarshalMsg(toSend) if err != nil { - logger.LogIf(ctx, fmt.Errorf("msg.MarshalMsg: %w", err)) + gridLogIf(ctx, fmt.Errorf("msg.MarshalMsg: %w", err)) return } // Append as byte slices. @@ -1151,18 +1163,18 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) { buf.Reset() err = wsw.writeMessage(&buf, c.side, ws.OpBinary, toSend) if err != nil { - logger.LogIf(ctx, fmt.Errorf("ws writeMessage: %w", err)) + gridLogIf(ctx, fmt.Errorf("ws writeMessage: %w", err)) return } // buf is our local buffer, so we can reuse it. err = conn.SetWriteDeadline(time.Now().Add(connWriteTimeout)) if err != nil { - logger.LogIf(ctx, fmt.Errorf("conn.SetWriteDeadline: %w", err)) + gridLogIf(ctx, fmt.Errorf("conn.SetWriteDeadline: %w", err)) return } _, err = buf.WriteTo(conn) if err != nil { - logger.LogIf(ctx, fmt.Errorf("ws write: %w", err)) + gridLogIf(ctx, fmt.Errorf("ws write: %w", err)) return } @@ -1202,7 +1214,7 @@ func (c *Connection) handleMsg(ctx context.Context, m message, subID *subHandler case OpMuxConnectError: c.handleConnectMuxError(ctx, m) default: - logger.LogIf(ctx, fmt.Errorf("unknown message type: %v", m.Op)) + gridLogIf(ctx, fmt.Errorf("unknown message type: %v", m.Op)) } } @@ -1211,7 +1223,7 @@ func (c *Connection) handleConnectMux(ctx context.Context, m message, subID *sub if m.Flags&FlagStateless != 0 { // Reject for now, so we can safely add it later. if true { - logger.LogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Stateless streams not supported"})) + gridLogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Stateless streams not supported"})) return } @@ -1222,7 +1234,7 @@ func (c *Connection) handleConnectMux(ctx context.Context, m message, subID *sub handler = c.handlers.subStateless[*subID] } if handler == nil { - logger.LogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler for type"})) + gridLogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler for type"})) return } _, _ = c.inStream.LoadOrCompute(m.MuxID, func() *muxServer { @@ -1233,7 +1245,7 @@ func (c *Connection) handleConnectMux(ctx context.Context, m message, subID *sub var handler *StreamHandler if subID == nil { if !m.Handler.valid() { - logger.LogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler"})) + gridLogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler"})) return } handler = c.handlers.streams[m.Handler] @@ -1241,7 +1253,7 @@ func (c *Connection) handleConnectMux(ctx context.Context, m message, subID *sub handler = c.handlers.subStreams[*subID] } if handler == nil { - logger.LogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler for type"})) + gridLogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler for type"})) return } @@ -1257,7 +1269,7 @@ func (c *Connection) handleConnectMuxError(ctx context.Context, m message) { if v, ok := c.outgoing.Load(m.MuxID); ok { var cErr muxConnectError _, err := cErr.UnmarshalMsg(m.Payload) - logger.LogIf(ctx, err) + gridLogIf(ctx, err) v.error(RemoteErr(cErr.Error)) return } @@ -1269,7 +1281,7 @@ func (c *Connection) handleAckMux(ctx context.Context, m message) { v, ok := c.outgoing.Load(m.MuxID) if !ok { if m.Flags&FlagEOF == 0 { - logger.LogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil)) + gridLogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil)) } return } @@ -1281,7 +1293,7 @@ func (c *Connection) handleAckMux(ctx context.Context, m message) { func (c *Connection) handleRequest(ctx context.Context, m message, subID *subHandlerID) { if !m.Handler.valid() { - logger.LogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler"})) + gridLogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler"})) return } if debugReqs { @@ -1295,7 +1307,7 @@ func (c *Connection) handleRequest(ctx context.Context, m message, subID *subHan handler = c.handlers.subSingle[*subID] } if handler == nil { - logger.LogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler for type"})) + gridLogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler for type"})) return } @@ -1313,7 +1325,7 @@ func (c *Connection) handleRequest(ctx context.Context, m message, subID *subHan if rec := recover(); rec != nil { err = NewRemoteErrString(fmt.Sprintf("handleMessages: panic recovered: %v", rec)) debug.PrintStack() - logger.LogIf(ctx, err) + gridLogIf(ctx, err) } }() b, err = handler(m.Payload) @@ -1346,7 +1358,7 @@ func (c *Connection) handleRequest(ctx context.Context, m message, subID *subHan m.Payload = b m.setZeroPayloadFlag() } - logger.LogIf(ctx, c.queueMsg(m, nil)) + gridLogIf(ctx, c.queueMsg(m, nil)) }(m) } @@ -1354,7 +1366,7 @@ func (c *Connection) handlePong(ctx context.Context, m message) { var pong pongMsg _, err := pong.UnmarshalMsg(m.Payload) PutByteBuffer(m.Payload) - logger.LogIf(ctx, err) + gridLogIf(ctx, err) if m.MuxID == 0 { atomic.StoreInt64(&c.LastPong, time.Now().Unix()) return @@ -1364,22 +1376,22 @@ func (c *Connection) handlePong(ctx context.Context, m message) { } else { // We don't care if the client was removed in the meantime, // but we send a disconnect message to the server just in case. - logger.LogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil)) + gridLogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil)) } } func (c *Connection) handlePing(ctx context.Context, m message) { if m.MuxID == 0 { - logger.LogIf(ctx, c.queueMsg(m, &pongMsg{})) + gridLogIf(ctx, c.queueMsg(m, &pongMsg{})) return } // Single calls do not support pinging. if v, ok := c.inStream.Load(m.MuxID); ok { pong := v.ping(m.Seq) - logger.LogIf(ctx, c.queueMsg(m, &pong)) + gridLogIf(ctx, c.queueMsg(m, &pong)) } else { pong := pongMsg{NotFound: true} - logger.LogIf(ctx, c.queueMsg(m, &pong)) + gridLogIf(ctx, c.queueMsg(m, &pong)) } return } @@ -1442,7 +1454,7 @@ func (c *Connection) handleMuxClientMsg(ctx context.Context, m message) { if debugPrint { fmt.Println(c.Local, "OpMuxClientMsg: Unknown Mux:", m.MuxID) } - logger.LogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil)) + gridLogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil)) PutByteBuffer(m.Payload) return } @@ -1486,7 +1498,7 @@ func (c *Connection) handleMuxServerMsg(ctx context.Context, m message) { v, ok := c.outgoing.Load(m.MuxID) if !ok { if m.Flags&FlagEOF == 0 { - logger.LogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil)) + gridLogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil)) } PutByteBuffer(m.Payload) return @@ -1522,7 +1534,7 @@ func (c *Connection) deleteMux(incoming bool, muxID uint64) { } v, loaded := c.inStream.LoadAndDelete(muxID) if loaded && v != nil { - logger.LogIf(c.ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: muxID}, nil)) + gridLogIf(c.ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: muxID}, nil)) v.close() } } else { @@ -1535,7 +1547,7 @@ func (c *Connection) deleteMux(incoming bool, muxID uint64) { fmt.Println(muxID, c.String(), "deleteMux: DELETING MUX") } v.close() - logger.LogIf(c.ctx, c.queueMsg(message{Op: OpDisconnectServerMux, MuxID: muxID}, nil)) + gridLogIf(c.ctx, c.queueMsg(message{Op: OpDisconnectServerMux, MuxID: muxID}, nil)) } } } diff --git a/internal/grid/handlers.go b/internal/grid/handlers.go index 656579bf9..358bb8a2a 100644 --- a/internal/grid/handlers.go +++ b/internal/grid/handlers.go @@ -27,7 +27,6 @@ import ( "github.com/minio/minio/internal/hash/sha256" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" "github.com/tinylib/msgp/msgp" ) @@ -466,7 +465,7 @@ func (h *SingleHandler[Req, Resp]) AllowCallRequestPool(b bool) *SingleHandler[R // This may only be set ONCE before use. func (h *SingleHandler[Req, Resp]) IgnoreNilConn() *SingleHandler[Req, Resp] { if h.ignoreNilConn { - logger.LogOnceIf(context.Background(), fmt.Errorf("%s: IgnoreNilConn called twice", h.id.String()), h.id.String()+"IgnoreNilConn") + gridLogOnceIf(context.Background(), fmt.Errorf("%s: IgnoreNilConn called twice", h.id.String()), h.id.String()+"IgnoreNilConn") } h.ignoreNilConn = true return h @@ -767,7 +766,7 @@ func (h *StreamTypeHandler[Payload, Req, Resp]) register(m *Manager, handle func input := h.NewRequest() _, err := input.UnmarshalMsg(v) if err != nil { - logger.LogOnceIf(ctx, err, err.Error()) + gridLogOnceIf(ctx, err, err.Error()) } PutByteBuffer(v) // Send input @@ -791,7 +790,7 @@ func (h *StreamTypeHandler[Payload, Req, Resp]) register(m *Manager, handle func } dst, err := v.MarshalMsg(GetByteBufferCap(v.Msgsize())) if err != nil { - logger.LogOnceIf(ctx, err, err.Error()) + gridLogOnceIf(ctx, err, err.Error()) } if !h.sharedResponse { h.PutResponse(v) @@ -877,7 +876,7 @@ func (h *StreamTypeHandler[Payload, Req, Resp]) Call(ctx context.Context, c Stre for req := range reqT { b, err := req.MarshalMsg(GetByteBufferCap(req.Msgsize())) if err != nil { - logger.LogOnceIf(ctx, err, err.Error()) + gridLogOnceIf(ctx, err, err.Error()) } h.PutRequest(req) stream.Requests <- b diff --git a/internal/grid/manager.go b/internal/grid/manager.go index 66afd3dab..94a00062b 100644 --- a/internal/grid/manager.go +++ b/internal/grid/manager.go @@ -29,7 +29,6 @@ import ( "github.com/gobwas/ws/wsutil" "github.com/google/uuid" "github.com/minio/madmin-go/v3" - "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/pubsub" "github.com/minio/mux" ) @@ -142,7 +141,7 @@ func (m *Manager) Handler() http.HandlerFunc { if r := recover(); r != nil { debug.PrintStack() err := fmt.Errorf("grid: panic: %v\n", r) - logger.LogIf(context.Background(), err, err.Error()) + gridLogIf(context.Background(), err, err.Error()) w.WriteHeader(http.StatusInternalServerError) } }() @@ -151,7 +150,7 @@ func (m *Manager) Handler() http.HandlerFunc { } ctx := req.Context() if err := m.authRequest(req); err != nil { - logger.LogOnceIf(ctx, fmt.Errorf("auth %s: %w", req.RemoteAddr, err), req.RemoteAddr+err.Error()) + gridLogOnceIf(ctx, fmt.Errorf("auth %s: %w", req.RemoteAddr, err), req.RemoteAddr+err.Error()) w.WriteHeader(http.StatusForbidden) return } @@ -168,7 +167,7 @@ func (m *Manager) Handler() http.HandlerFunc { if err == nil { return } - logger.LogOnceIf(ctx, err, err.Error()) + gridLogOnceIf(ctx, err, err.Error()) resp := connectResp{ ID: m.ID, Accepted: false, diff --git a/internal/grid/muxclient.go b/internal/grid/muxclient.go index feabddec3..5a981e513 100644 --- a/internal/grid/muxclient.go +++ b/internal/grid/muxclient.go @@ -27,7 +27,6 @@ import ( "time" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" "github.com/zeebo/xxh3" ) @@ -289,7 +288,7 @@ func (m *muxClient) addErrorNonBlockingClose(respHandler chan<- Response, err er xioutil.SafeClose(respHandler) }() } - logger.LogIf(m.ctx, m.sendLocked(message{Op: OpDisconnectServerMux, MuxID: m.MuxID})) + gridLogIf(m.ctx, m.sendLocked(message{Op: OpDisconnectServerMux, MuxID: m.MuxID})) m.closed = true } } @@ -336,7 +335,7 @@ func (m *muxClient) handleOneWayStream(respHandler chan<- Response, respServer < case respHandler <- resp: m.respMu.Lock() if !m.closed { - logger.LogIf(m.ctx, m.sendLocked(message{Op: OpUnblockSrvMux, MuxID: m.MuxID})) + gridLogIf(m.ctx, m.sendLocked(message{Op: OpUnblockSrvMux, MuxID: m.MuxID})) } m.respMu.Unlock() case <-m.ctx.Done(): @@ -349,7 +348,7 @@ func (m *muxClient) handleOneWayStream(respHandler chan<- Response, respServer < return } // Send new ping. - logger.LogIf(m.ctx, m.send(message{Op: OpPing, MuxID: m.MuxID})) + gridLogIf(m.ctx, m.send(message{Op: OpPing, MuxID: m.MuxID})) } } } @@ -509,7 +508,7 @@ func (m *muxClient) unblockSend(seq uint32) { select { case m.outBlock <- struct{}{}: default: - logger.LogIf(m.ctx, errors.New("output unblocked overflow")) + gridLogIf(m.ctx, errors.New("output unblocked overflow")) } } @@ -548,7 +547,7 @@ func (m *muxClient) addResponse(r Response) (ok bool) { return } err := errors.New("INTERNAL ERROR: Response was blocked") - logger.LogIf(m.ctx, err) + gridLogIf(m.ctx, err) m.closeLocked() return false } diff --git a/internal/grid/muxserver.go b/internal/grid/muxserver.go index 2bec67e17..e9d1db659 100644 --- a/internal/grid/muxserver.go +++ b/internal/grid/muxserver.go @@ -26,7 +26,6 @@ import ( "time" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" ) const lastPingThreshold = 4 * clientPingInterval @@ -220,7 +219,7 @@ func (m *muxServer) handleRequests(ctx context.Context, msg message, send chan<- fmt.Println("Mux", m.ID, "Handler took", time.Since(start).Round(time.Millisecond)) } if r := recover(); r != nil { - logger.LogIf(ctx, fmt.Errorf("grid handler (%v) panic: %v", msg.Handler, r)) + gridLogIf(ctx, fmt.Errorf("grid handler (%v) panic: %v", msg.Handler, r)) err := RemoteErr(fmt.Sprintf("handler panic: %v", r)) handlerErr = &err } @@ -244,7 +243,7 @@ func (m *muxServer) checkRemoteAlive() { case <-t.C: last := time.Since(time.Unix(atomic.LoadInt64(&m.LastPing), 0)) if last > lastPingThreshold { - logger.LogIf(m.ctx, fmt.Errorf("canceling remote connection %s not seen for %v", m.parent, last)) + gridLogIf(m.ctx, fmt.Errorf("canceling remote connection %s not seen for %v", m.parent, last)) m.close() return } @@ -281,7 +280,7 @@ func (m *muxServer) message(msg message) { // Note, on EOF no value can be sent. if msg.Flags&FlagEOF != 0 { if len(msg.Payload) > 0 { - logger.LogIf(m.ctx, fmt.Errorf("muxServer: EOF message with payload")) + gridLogIf(m.ctx, fmt.Errorf("muxServer: EOF message with payload")) } if m.inbound != nil { xioutil.SafeClose(m.inbound) @@ -314,7 +313,7 @@ func (m *muxServer) unblockSend(seq uint32) { select { case m.outBlock <- struct{}{}: default: - logger.LogIf(m.ctx, errors.New("output unblocked overflow")) + gridLogIf(m.ctx, errors.New("output unblocked overflow")) } } @@ -354,7 +353,7 @@ func (m *muxServer) send(msg message) { if debugPrint { fmt.Printf("Mux %d, Sending %+v\n", m.ID, msg) } - logger.LogIf(m.ctx, m.parent.queueMsg(msg, nil)) + gridLogIf(m.ctx, m.parent.queueMsg(msg, nil)) } func (m *muxServer) close() { diff --git a/internal/hash/checksum.go b/internal/hash/checksum.go index ddf21947a..eaf81de6d 100644 --- a/internal/hash/checksum.go +++ b/internal/hash/checksum.go @@ -34,6 +34,10 @@ import ( "github.com/minio/minio/internal/logger" ) +func hashLogIf(ctx context.Context, err error) { + logger.LogIf(ctx, "hash", err) +} + // MinIOMultipartChecksum is as metadata on multipart uploads to indicate checksum type. const MinIOMultipartChecksum = "x-minio-multipart-checksum" @@ -323,7 +327,7 @@ func (c *Checksum) AppendTo(b []byte, parts []byte) []byte { var checksums int // Ensure we don't divide by 0: if c.Type.RawByteLen() == 0 || len(parts)%c.Type.RawByteLen() != 0 { - logger.LogIf(context.Background(), fmt.Errorf("internal error: Unexpected checksum length: %d, each checksum %d", len(parts), c.Type.RawByteLen())) + hashLogIf(context.Background(), fmt.Errorf("internal error: Unexpected checksum length: %d, each checksum %d", len(parts), c.Type.RawByteLen())) checksums = 0 parts = nil } else { diff --git a/internal/logger/audit.go b/internal/logger/audit.go index 41464858b..bb5a7c3ab 100644 --- a/internal/logger/audit.go +++ b/internal/logger/audit.go @@ -36,7 +36,7 @@ const contextAuditKey = contextKeyType("audit-entry") // SetAuditEntry sets Audit info in the context. func SetAuditEntry(ctx context.Context, audit *audit.Entry) context.Context { if ctx == nil { - LogIf(context.Background(), fmt.Errorf("context is nil")) + LogIf(context.Background(), "audit", fmt.Errorf("context is nil")) return nil } return context.WithValue(ctx, contextAuditKey, audit) @@ -144,7 +144,7 @@ func AuditLog(ctx context.Context, w http.ResponseWriter, r *http.Request, reqCl // Send audit logs only to http targets. for _, t := range auditTgts { if err := t.Send(ctx, entry); err != nil { - LogOnceIf(ctx, fmt.Errorf("Unable to send an audit event to the target `%v`: %v", t, err), "send-audit-event-failure") + LogOnceIf(ctx, "logging", fmt.Errorf("Unable to send an audit event to the target `%v`: %v", t, err), "send-audit-event-failure") } } } diff --git a/internal/logger/config.go b/internal/logger/config.go index b2c8f3167..1d15a786b 100644 --- a/internal/logger/config.go +++ b/internal/logger/config.go @@ -299,7 +299,7 @@ func lookupLegacyConfigForSubSys(ctx context.Context, subSys string) Config { } url, err := xnet.ParseHTTPURL(endpoint) if err != nil { - LogOnceIf(ctx, err, "logger-webhook-"+endpoint) + LogOnceIf(ctx, "logging", err, "logger-webhook-"+endpoint) continue } cfg.HTTP[target] = http.Config{ @@ -327,7 +327,7 @@ func lookupLegacyConfigForSubSys(ctx context.Context, subSys string) Config { } url, err := xnet.ParseHTTPURL(endpoint) if err != nil { - LogOnceIf(ctx, err, "audit-webhook-"+endpoint) + LogOnceIf(ctx, "logging", err, "audit-webhook-"+endpoint) continue } cfg.AuditWebhook[target] = http.Config{ diff --git a/internal/logger/logger.go b/internal/logger/logger.go index 91f1c072a..093587526 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -242,27 +242,26 @@ func HashString(input string) string { // LogAlwaysIf prints a detailed error message during // the execution of the server. -func LogAlwaysIf(ctx context.Context, err error, errKind ...interface{}) { +func LogAlwaysIf(ctx context.Context, subsystem string, err error, errKind ...interface{}) { if err == nil { return } - - logIf(ctx, err, errKind...) + logIf(ctx, subsystem, err, errKind...) } // LogIf prints a detailed error message during // the execution of the server, if it is not an // ignored error. -func LogIf(ctx context.Context, err error, errKind ...interface{}) { +func LogIf(ctx context.Context, subsystem string, err error, errKind ...interface{}) { if logIgnoreError(err) { return } - logIf(ctx, err, errKind...) + logIf(ctx, subsystem, err, errKind...) } // LogIfNot prints a detailed error message during // the execution of the server, if it is not an ignored error (either internal or given). -func LogIfNot(ctx context.Context, err error, ignored ...error) { +func LogIfNot(ctx context.Context, subsystem string, err error, ignored ...error) { if logIgnoreError(err) { return } @@ -271,24 +270,24 @@ func LogIfNot(ctx context.Context, err error, ignored ...error) { return } } - logIf(ctx, err) + logIf(ctx, subsystem, err) } -func errToEntry(ctx context.Context, err error, errKind ...interface{}) log.Entry { +func errToEntry(ctx context.Context, subsystem string, err error, errKind ...interface{}) log.Entry { var l string if anonFlag { l = reflect.TypeOf(err).String() } else { l = fmt.Sprintf("%v (%T)", err, err) } - return buildLogEntry(ctx, l, getTrace(3), errKind...) + return buildLogEntry(ctx, subsystem, l, getTrace(3), errKind...) } -func logToEntry(ctx context.Context, message string, errKind ...interface{}) log.Entry { - return buildLogEntry(ctx, message, nil, errKind...) +func logToEntry(ctx context.Context, subsystem, message string, errKind ...interface{}) log.Entry { + return buildLogEntry(ctx, subsystem, message, nil, errKind...) } -func buildLogEntry(ctx context.Context, message string, trace []string, errKind ...interface{}) log.Entry { +func buildLogEntry(ctx context.Context, subsystem, message string, trace []string, errKind ...interface{}) log.Entry { logKind := madmin.LogKindError if len(errKind) > 0 { if ek, ok := errKind[0].(madmin.LogKind); ok { @@ -307,8 +306,11 @@ func buildLogEntry(ctx context.Context, message string, trace []string, errKind defer req.RUnlock() API := "SYSTEM" - if req.API != "" { + switch { + case req.API != "": API = req.API + case subsystem != "": + API += "." + subsystem } // Copy tags. We hold read lock already. @@ -374,7 +376,7 @@ func buildLogEntry(ctx context.Context, message string, trace []string, errKind // consoleLogIf prints a detailed error message during // the execution of the server. -func consoleLogIf(ctx context.Context, err error, errKind ...interface{}) { +func consoleLogIf(ctx context.Context, subsystem string, err error, errKind ...interface{}) { if DisableErrorLog { return } @@ -382,20 +384,22 @@ func consoleLogIf(ctx context.Context, err error, errKind ...interface{}) { return } if consoleTgt != nil { - consoleTgt.Send(ctx, errToEntry(ctx, err, errKind...)) + entry := errToEntry(ctx, subsystem, err, errKind...) + consoleTgt.Send(ctx, entry) } } // logIf prints a detailed error message during // the execution of the server. -func logIf(ctx context.Context, err error, errKind ...interface{}) { +func logIf(ctx context.Context, subsystem string, err error, errKind ...interface{}) { if DisableErrorLog { return } if err == nil { return } - sendLog(ctx, errToEntry(ctx, err, errKind...)) + entry := errToEntry(ctx, subsystem, err, errKind...) + sendLog(ctx, entry) } func sendLog(ctx context.Context, entry log.Entry) { @@ -416,11 +420,12 @@ func sendLog(ctx context.Context, entry log.Entry) { } // Event sends a event log to log targets -func Event(ctx context.Context, msg string, args ...interface{}) { +func Event(ctx context.Context, subsystem, msg string, args ...interface{}) { if DisableErrorLog { return } - sendLog(ctx, logToEntry(ctx, fmt.Sprintf(msg, args...), EventKind)) + entry := logToEntry(ctx, subsystem, fmt.Sprintf(msg, args...), EventKind) + sendLog(ctx, entry) } // ErrCritical is the value panic'd whenever CriticalIf is called. @@ -430,7 +435,7 @@ var ErrCritical struct{} // current go-routine by causing a `panic(ErrCritical)`. func CriticalIf(ctx context.Context, err error, errKind ...interface{}) { if err != nil { - LogIf(ctx, err, errKind...) + LogIf(ctx, "", err, errKind...) panic(ErrCritical) } } diff --git a/internal/logger/logonce.go b/internal/logger/logonce.go index 6cfa9998d..12b220d9f 100644 --- a/internal/logger/logonce.go +++ b/internal/logger/logonce.go @@ -38,7 +38,7 @@ type logOnceType struct { sync.Mutex } -func (l *logOnceType) logOnceConsoleIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func (l *logOnceType) logOnceConsoleIf(ctx context.Context, subsystem string, err error, id string, errKind ...interface{}) { if err == nil { return } @@ -61,7 +61,7 @@ func (l *logOnceType) logOnceConsoleIf(ctx context.Context, err error, id string l.Unlock() if shouldLog { - consoleLogIf(ctx, err, errKind...) + consoleLogIf(ctx, subsystem, err, errKind...) } } @@ -92,7 +92,7 @@ func unwrapErrs(err error) (leafErr error) { } // One log message per error. -func (l *logOnceType) logOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func (l *logOnceType) logOnceIf(ctx context.Context, subsystem string, err error, id string, errKind ...interface{}) { if err == nil { return } @@ -115,7 +115,7 @@ func (l *logOnceType) logOnceIf(ctx context.Context, err error, id string, errKi l.Unlock() if shouldLog { - logIf(ctx, err, errKind...) + logIf(ctx, subsystem, err, errKind...) } } @@ -142,17 +142,17 @@ var logOnce = newLogOnceType() // LogOnceIf - Logs notification errors - once per error. // id is a unique identifier for related log messages, refer to cmd/notification.go // on how it is used. -func LogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func LogOnceIf(ctx context.Context, subsystem string, err error, id string, errKind ...interface{}) { if logIgnoreError(err) { return } - logOnce.logOnceIf(ctx, err, id, errKind...) + logOnce.logOnceIf(ctx, subsystem, err, id, errKind...) } // LogOnceConsoleIf - similar to LogOnceIf but exclusively only logs to console target. -func LogOnceConsoleIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func LogOnceConsoleIf(ctx context.Context, subsystem string, err error, id string, errKind ...interface{}) { if logIgnoreError(err) { return } - logOnce.logOnceConsoleIf(ctx, err, id, errKind...) + logOnce.logOnceConsoleIf(ctx, subsystem, err, id, errKind...) } diff --git a/internal/logger/reqinfo.go b/internal/logger/reqinfo.go index 10d71a5d7..9520d2d58 100644 --- a/internal/logger/reqinfo.go +++ b/internal/logger/reqinfo.go @@ -153,7 +153,7 @@ func (r *ReqInfo) PopulateTagsMap(tagsMap map[string]interface{}) { // SetReqInfo sets ReqInfo in the context. func SetReqInfo(ctx context.Context, req *ReqInfo) context.Context { if ctx == nil { - LogIf(context.Background(), fmt.Errorf("context is nil")) + LogIf(context.Background(), "", fmt.Errorf("context is nil")) return nil } return context.WithValue(ctx, contextLogKey, req) diff --git a/internal/logger/target/console/console.go b/internal/logger/target/console/console.go index e96c373c2..719c1f991 100644 --- a/internal/logger/target/console/console.go +++ b/internal/logger/target/console/console.go @@ -88,22 +88,25 @@ func (c *Target) Send(e interface{}) error { var apiString string if entry.API != nil { - apiString = "API: " + entry.API.Name + "(" + apiString = "API: " + entry.API.Name if entry.API.Args != nil { + args := "" if entry.API.Args.Bucket != "" { - apiString = apiString + "bucket=" + entry.API.Args.Bucket + args = args + "bucket=" + entry.API.Args.Bucket } if entry.API.Args.Object != "" { - apiString = apiString + ", object=" + entry.API.Args.Object + args = args + ", object=" + entry.API.Args.Object } if entry.API.Args.VersionID != "" { - apiString = apiString + ", versionId=" + entry.API.Args.VersionID + args = args + ", versionId=" + entry.API.Args.VersionID } if len(entry.API.Args.Objects) > 0 { - apiString = apiString + ", multiObject=true, numberOfObjects=" + strconv.Itoa(len(entry.API.Args.Objects)) + args = args + ", multiObject=true, numberOfObjects=" + strconv.Itoa(len(entry.API.Args.Objects)) + } + if len(args) > 0 { + apiString += "(" + args + ")" } } - apiString += ")" } else { apiString = "INTERNAL" } diff --git a/internal/rest/client.go b/internal/rest/client.go index 1fb6d4b32..b143ce673 100644 --- a/internal/rest/client.go +++ b/internal/rest/client.go @@ -39,6 +39,8 @@ import ( xnet "github.com/minio/pkg/v2/net" ) +const logSubsys = "internodes" + // DefaultTimeout - default REST timeout is 10 seconds. const DefaultTimeout = 10 * time.Second @@ -316,7 +318,7 @@ func (c *Client) Call(ctx context.Context, method string, values url.Values, bod atomic.AddUint64(&globalStats.errs, 1) } if c.MarkOffline(err) { - logger.LogOnceIf(ctx, fmt.Errorf("Marking %s offline temporarily; caused by %w", c.url.Host, err), c.url.Host) + logger.LogOnceIf(ctx, logSubsys, fmt.Errorf("Marking %s offline temporarily; caused by %w", c.url.Host, err), c.url.Host) } } return nil, &NetworkError{err} @@ -340,7 +342,7 @@ func (c *Client) Call(ctx context.Context, method string, values url.Values, bod // instead, see cmd/storage-rest-server.go for ideas. if c.HealthCheckFn != nil && resp.StatusCode == http.StatusPreconditionFailed { err = fmt.Errorf("Marking %s offline temporarily; caused by PreconditionFailed with drive ID mismatch", c.url.Host) - logger.LogOnceIf(ctx, err, c.url.Host) + logger.LogOnceIf(ctx, logSubsys, err, c.url.Host) c.MarkOffline(err) } defer xhttp.DrainBody(resp.Body) @@ -352,7 +354,7 @@ func (c *Client) Call(ctx context.Context, method string, values url.Values, bod atomic.AddUint64(&globalStats.errs, 1) } if c.MarkOffline(err) { - logger.LogOnceIf(ctx, fmt.Errorf("Marking %s offline temporarily; caused by %w", c.url.Host, err), c.url.Host) + logger.LogOnceIf(ctx, logSubsys, fmt.Errorf("Marking %s offline temporarily; caused by %w", c.url.Host, err), c.url.Host) } } return nil, err