logging: Add subsystem to log API (#19002)

Create new code paths for multiple subsystems in the code. This will
make maintaing this easier later.

Also introduce bugLogIf() for errors that should not happen in the first
place.
This commit is contained in:
Anis Eleuch 2024-04-04 13:04:40 +01:00 committed by GitHub
parent 2228eb61cb
commit 95bf4a57b6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
123 changed files with 972 additions and 786 deletions

View File

@ -39,7 +39,6 @@ import (
"github.com/minio/minio/internal/bucket/versioning" "github.com/minio/minio/internal/bucket/versioning"
"github.com/minio/minio/internal/event" "github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/kms" "github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux" "github.com/minio/mux"
"github.com/minio/pkg/v2/policy" "github.com/minio/pkg/v2/policy"
) )
@ -99,7 +98,7 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
} }
// Call site replication hook. // Call site replication hook.
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta)) replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta))
// Write success response. // Write success response.
writeSuccessResponseHeadersOnly(w) writeSuccessResponseHeadersOnly(w)
@ -431,7 +430,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
case bucketNotificationConfig: case bucketNotificationConfig:
config, err := globalBucketMetadataSys.GetNotificationConfig(bucket) config, err := globalBucketMetadataSys.GetNotificationConfig(bucket)
if err != nil { if err != nil {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return return
} }
@ -447,7 +446,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
if errors.Is(err, BucketLifecycleNotFound{Bucket: bucket}) { if errors.Is(err, BucketLifecycleNotFound{Bucket: bucket}) {
continue continue
} }
logger.LogIf(ctx, err) adminLogIf(ctx, err)
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return return
} }

View File

@ -58,7 +58,7 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
password := cred.SecretKey password := cred.SecretKey
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength)) kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil { if err != nil {
logger.LogIf(ctx, err, logger.ErrorKind) adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return return
} }
@ -162,7 +162,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
password := cred.SecretKey password := cred.SecretKey
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength)) kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil { if err != nil {
logger.LogIf(ctx, err, logger.ErrorKind) adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return return
} }
@ -443,7 +443,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
password := cred.SecretKey password := cred.SecretKey
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength)) kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil { if err != nil {
logger.LogIf(ctx, err, logger.ErrorKind) adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return return
} }

View File

@ -31,7 +31,6 @@ import (
"github.com/minio/minio/internal/config" "github.com/minio/minio/internal/config"
cfgldap "github.com/minio/minio/internal/config/identity/ldap" cfgldap "github.com/minio/minio/internal/config/identity/ldap"
"github.com/minio/minio/internal/config/identity/openid" "github.com/minio/minio/internal/config/identity/openid"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux" "github.com/minio/mux"
"github.com/minio/pkg/v2/ldap" "github.com/minio/pkg/v2/ldap"
"github.com/minio/pkg/v2/policy" "github.com/minio/pkg/v2/policy"
@ -60,7 +59,7 @@ func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.R
password := cred.SecretKey password := cred.SecretKey
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength)) reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil { if err != nil {
logger.LogIf(ctx, err, logger.ErrorKind) adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return return
} }

View File

@ -27,7 +27,6 @@ import (
"github.com/minio/madmin-go/v3" "github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/auth" "github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux" "github.com/minio/mux"
"github.com/minio/pkg/v2/policy" "github.com/minio/pkg/v2/policy"
) )
@ -132,7 +131,7 @@ func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http.
password := cred.SecretKey password := cred.SecretKey
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength)) reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil { if err != nil {
logger.LogIf(ctx, err, logger.ErrorKind) adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return return
} }
@ -301,7 +300,7 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
// Call hook for cluster-replication if the service account is not for a // Call hook for cluster-replication if the service account is not for a
// root user. // root user.
if newCred.ParentUser != globalActiveCred.AccessKey { if newCred.ParentUser != globalActiveCred.AccessKey {
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemSvcAcc, Type: madmin.SRIAMItemSvcAcc,
SvcAccChange: &madmin.SRSvcAccChange{ SvcAccChange: &madmin.SRSvcAccChange{
Create: &madmin.SRSvcAccCreate{ Create: &madmin.SRSvcAccCreate{

View File

@ -26,7 +26,6 @@ import (
"strconv" "strconv"
"strings" "strings"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux" "github.com/minio/mux"
"github.com/minio/pkg/v2/env" "github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v2/policy" "github.com/minio/pkg/v2/policy"
@ -210,7 +209,7 @@ func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) {
return return
} }
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(&status)) adminLogIf(r.Context(), json.NewEncoder(w).Encode(&status))
} }
func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) { func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) {
@ -243,7 +242,7 @@ func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) {
poolsStatus[idx] = status poolsStatus[idx] = status
} }
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(poolsStatus)) adminLogIf(r.Context(), json.NewEncoder(w).Encode(poolsStatus))
} }
func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request) { func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request) {
@ -350,11 +349,11 @@ func (a adminAPIHandlers) RebalanceStatus(w http.ResponseWriter, r *http.Request
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceNotStarted), r.URL) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceNotStarted), r.URL)
return return
} }
logger.LogIf(ctx, fmt.Errorf("failed to fetch rebalance status: %w", err)) adminLogIf(ctx, fmt.Errorf("failed to fetch rebalance status: %w", err))
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(rs)) adminLogIf(r.Context(), json.NewEncoder(w).Encode(rs))
} }
func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request) { func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request) {
@ -374,7 +373,7 @@ func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request)
// Cancel any ongoing rebalance operation // Cancel any ongoing rebalance operation
globalNotificationSys.StopRebalance(r.Context()) globalNotificationSys.StopRebalance(r.Context())
writeSuccessResponseHeadersOnly(w) writeSuccessResponseHeadersOnly(w)
logger.LogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt)) adminLogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt))
} }
func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w http.ResponseWriter, r *http.Request) (proxy bool) { func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w http.ResponseWriter, r *http.Request) (proxy bool) {

View File

@ -32,7 +32,6 @@ import (
"github.com/dustin/go-humanize" "github.com/dustin/go-humanize"
"github.com/minio/madmin-go/v3" "github.com/minio/madmin-go/v3"
xioutil "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux" "github.com/minio/mux"
"github.com/minio/pkg/v2/policy" "github.com/minio/pkg/v2/policy"
) )
@ -55,7 +54,7 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ
opts := getSRAddOptions(r) opts := getSRAddOptions(r)
status, err := globalSiteReplicationSys.AddPeerClusters(ctx, sites, opts) status, err := globalSiteReplicationSys.AddPeerClusters(ctx, sites, opts)
if err != nil { if err != nil {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
@ -93,7 +92,7 @@ func (a adminAPIHandlers) SRPeerJoin(w http.ResponseWriter, r *http.Request) {
} }
if err := globalSiteReplicationSys.PeerJoinReq(ctx, joinArg); err != nil { if err := globalSiteReplicationSys.PeerJoinReq(ctx, joinArg); err != nil {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
@ -140,7 +139,7 @@ func (a adminAPIHandlers) SRPeerBucketOps(w http.ResponseWriter, r *http.Request
globalSiteReplicationSys.purgeDeletedBucket(ctx, objectAPI, bucket) globalSiteReplicationSys.purgeDeletedBucket(ctx, objectAPI, bucket)
} }
if err != nil { if err != nil {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
@ -192,7 +191,7 @@ func (a adminAPIHandlers) SRPeerReplicateIAMItem(w http.ResponseWriter, r *http.
err = globalSiteReplicationSys.PeerGroupInfoChangeHandler(ctx, item.GroupInfo, item.UpdatedAt) err = globalSiteReplicationSys.PeerGroupInfoChangeHandler(ctx, item.GroupInfo, item.UpdatedAt)
} }
if err != nil { if err != nil {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
@ -263,7 +262,7 @@ func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *ht
err = globalSiteReplicationSys.PeerBucketLCConfigHandler(ctx, item.Bucket, item.ExpiryLCConfig, item.UpdatedAt) err = globalSiteReplicationSys.PeerBucketLCConfigHandler(ctx, item.Bucket, item.ExpiryLCConfig, item.UpdatedAt)
} }
if err != nil { if err != nil {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
@ -316,7 +315,6 @@ func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptio
if encryptionKey != "" { if encryptionKey != "" {
data, err = madmin.DecryptData(encryptionKey, bytes.NewReader(data)) data, err = madmin.DecryptData(encryptionKey, bytes.NewReader(data))
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return SRError{ return SRError{
Cause: err, Cause: err,
Code: ErrSiteReplicationInvalidRequest, Code: ErrSiteReplicationInvalidRequest,
@ -396,7 +394,7 @@ func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Req
opts := getSREditOptions(r) opts := getSREditOptions(r)
status, err := globalSiteReplicationSys.EditPeerCluster(ctx, site, opts) status, err := globalSiteReplicationSys.EditPeerCluster(ctx, site, opts)
if err != nil { if err != nil {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
@ -433,7 +431,7 @@ func (a adminAPIHandlers) SRPeerEdit(w http.ResponseWriter, r *http.Request) {
} }
if err := globalSiteReplicationSys.PeerEditReq(ctx, pi); err != nil { if err := globalSiteReplicationSys.PeerEditReq(ctx, pi); err != nil {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
@ -456,7 +454,7 @@ func (a adminAPIHandlers) SRStateEdit(w http.ResponseWriter, r *http.Request) {
return return
} }
if err := globalSiteReplicationSys.PeerStateEditReq(ctx, state); err != nil { if err := globalSiteReplicationSys.PeerStateEditReq(ctx, state); err != nil {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
@ -493,7 +491,7 @@ func (a adminAPIHandlers) SiteReplicationRemove(w http.ResponseWriter, r *http.R
} }
status, err := globalSiteReplicationSys.RemovePeerCluster(ctx, objectAPI, rreq) status, err := globalSiteReplicationSys.RemovePeerCluster(ctx, objectAPI, rreq)
if err != nil { if err != nil {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
@ -524,7 +522,7 @@ func (a adminAPIHandlers) SRPeerRemove(w http.ResponseWriter, r *http.Request) {
} }
if err := globalSiteReplicationSys.InternalRemoveReq(ctx, objectAPI, req); err != nil { if err := globalSiteReplicationSys.InternalRemoveReq(ctx, objectAPI, req); err != nil {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
@ -586,7 +584,7 @@ func (a adminAPIHandlers) SiteReplicationDevNull(w http.ResponseWriter, r *http.
// If there is a disconnection before globalNetPerfMinDuration (we give a margin of error of 1 sec) // If there is a disconnection before globalNetPerfMinDuration (we give a margin of error of 1 sec)
// would mean the network is not stable. Logging here will help in debugging network issues. // would mean the network is not stable. Logging here will help in debugging network issues.
if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) { if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
} }
} }
if err != nil { if err != nil {
@ -609,5 +607,5 @@ func (a adminAPIHandlers) SiteReplicationNetPerf(w http.ResponseWriter, r *http.
duration = globalNetPerfMinDuration duration = globalNetPerfMinDuration
} }
result := siteNetperf(r.Context(), duration) result := siteNetperf(r.Context(), duration)
logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result)) adminLogIf(r.Context(), gob.NewEncoder(w).Encode(result))
} }

View File

@ -35,7 +35,6 @@ import (
"github.com/minio/minio/internal/auth" "github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/cachevalue" "github.com/minio/minio/internal/cachevalue"
"github.com/minio/minio/internal/config/dns" "github.com/minio/minio/internal/config/dns"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux" "github.com/minio/mux"
"github.com/minio/pkg/v2/policy" "github.com/minio/pkg/v2/policy"
"github.com/puzpuzpuz/xsync/v3" "github.com/puzpuzpuz/xsync/v3"
@ -75,7 +74,7 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
return return
} }
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemIAMUser, Type: madmin.SRIAMItemIAMUser,
IAMUser: &madmin.SRIAMUser{ IAMUser: &madmin.SRIAMUser{
AccessKey: accessKey, AccessKey: accessKey,
@ -279,7 +278,7 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
return return
} }
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemGroupInfo, Type: madmin.SRIAMItemGroupInfo,
GroupInfo: &madmin.SRGroupInfo{ GroupInfo: &madmin.SRGroupInfo{
UpdateReq: updReq, UpdateReq: updReq,
@ -369,7 +368,7 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
return return
} }
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemGroupInfo, Type: madmin.SRIAMItemGroupInfo,
GroupInfo: &madmin.SRGroupInfo{ GroupInfo: &madmin.SRGroupInfo{
UpdateReq: madmin.GroupAddRemove{ UpdateReq: madmin.GroupAddRemove{
@ -407,7 +406,7 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
return return
} }
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemIAMUser, Type: madmin.SRIAMItemIAMUser,
IAMUser: &madmin.SRIAMUser{ IAMUser: &madmin.SRIAMUser{
AccessKey: accessKey, AccessKey: accessKey,
@ -496,14 +495,14 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
password := cred.SecretKey password := cred.SecretKey
configBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength)) configBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil { if err != nil {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return return
} }
var ureq madmin.AddOrUpdateUserReq var ureq madmin.AddOrUpdateUserReq
if err = json.Unmarshal(configBytes, &ureq); err != nil { if err = json.Unmarshal(configBytes, &ureq); err != nil {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return return
} }
@ -514,7 +513,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
return return
} }
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemIAMUser, Type: madmin.SRIAMItemIAMUser,
IAMUser: &madmin.SRIAMUser{ IAMUser: &madmin.SRIAMUser{
AccessKey: accessKey, AccessKey: accessKey,
@ -732,7 +731,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
// Call hook for cluster-replication if the service account is not for a // Call hook for cluster-replication if the service account is not for a
// root user. // root user.
if newCred.ParentUser != globalActiveCred.AccessKey { if newCred.ParentUser != globalActiveCred.AccessKey {
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemSvcAcc, Type: madmin.SRIAMItemSvcAcc,
SvcAccChange: &madmin.SRSvcAccChange{ SvcAccChange: &madmin.SRSvcAccChange{
Create: &madmin.SRSvcAccCreate{ Create: &madmin.SRSvcAccCreate{
@ -854,7 +853,7 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re
// Call site replication hook - non-root user accounts are replicated. // Call site replication hook - non-root user accounts are replicated.
if svcAccount.ParentUser != globalActiveCred.AccessKey { if svcAccount.ParentUser != globalActiveCred.AccessKey {
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemSvcAcc, Type: madmin.SRIAMItemSvcAcc,
SvcAccChange: &madmin.SRSvcAccChange{ SvcAccChange: &madmin.SRSvcAccChange{
Update: &madmin.SRSvcAccUpdate{ Update: &madmin.SRSvcAccUpdate{
@ -1116,7 +1115,7 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
// Call site replication hook - non-root user accounts are replicated. // Call site replication hook - non-root user accounts are replicated.
if svcAccount.ParentUser != "" && svcAccount.ParentUser != globalActiveCred.AccessKey { if svcAccount.ParentUser != "" && svcAccount.ParentUser != globalActiveCred.AccessKey {
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemSvcAcc, Type: madmin.SRIAMItemSvcAcc,
SvcAccChange: &madmin.SRSvcAccChange{ SvcAccChange: &madmin.SRSvcAccChange{
Delete: &madmin.SRSvcAccDelete{ Delete: &madmin.SRSvcAccDelete{
@ -1274,7 +1273,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
default: default:
policies, err := globalIAMSys.PolicyDBGet(accountName, cred.Groups...) policies, err := globalIAMSys.PolicyDBGet(accountName, cred.Groups...)
if err != nil { if err != nil {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
@ -1426,7 +1425,7 @@ func (a adminAPIHandlers) ListBucketPolicies(w http.ResponseWriter, r *http.Requ
for name, p := range policies { for name, p := range policies {
_, err = json.Marshal(p) _, err = json.Marshal(p)
if err != nil { if err != nil {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
continue continue
} }
newPolicies[name] = p newPolicies[name] = p
@ -1456,7 +1455,7 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
for name, p := range policies { for name, p := range policies {
_, err = json.Marshal(p) _, err = json.Marshal(p)
if err != nil { if err != nil {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
continue continue
} }
newPolicies[name] = p newPolicies[name] = p
@ -1486,7 +1485,7 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
// Call cluster-replication policy creation hook to replicate policy deletion to // Call cluster-replication policy creation hook to replicate policy deletion to
// other minio clusters. // other minio clusters.
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemPolicy, Type: madmin.SRIAMItemPolicy,
Name: policyName, Name: policyName,
UpdatedAt: UTCNow(), UpdatedAt: UTCNow(),
@ -1549,7 +1548,7 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
// Call cluster-replication policy creation hook to replicate policy to // Call cluster-replication policy creation hook to replicate policy to
// other minio clusters. // other minio clusters.
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemPolicy, Type: madmin.SRIAMItemPolicy,
Name: policyName, Name: policyName,
Policy: iamPolicyBytes, Policy: iamPolicyBytes,
@ -1617,7 +1616,7 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
return return
} }
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemPolicyMapping, Type: madmin.SRIAMItemPolicyMapping,
PolicyMapping: &madmin.SRPolicyMapping{ PolicyMapping: &madmin.SRPolicyMapping{
UserOrGroup: entityName, UserOrGroup: entityName,
@ -1791,17 +1790,17 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
sys: nil, sys: nil,
}) })
if zerr != nil { if zerr != nil {
logger.LogIf(ctx, zerr) adminLogIf(ctx, zerr)
return nil return nil
} }
header.Method = zip.Deflate header.Method = zip.Deflate
zwriter, zerr := zipWriter.CreateHeader(header) zwriter, zerr := zipWriter.CreateHeader(header)
if zerr != nil { if zerr != nil {
logger.LogIf(ctx, zerr) adminLogIf(ctx, zerr)
return nil return nil
} }
if _, err := io.Copy(zwriter, r); err != nil { if _, err := io.Copy(zwriter, r); err != nil {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
} }
return nil return nil
} }
@ -1822,7 +1821,7 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
case allPoliciesFile: case allPoliciesFile:
allPolicies, err := globalIAMSys.ListPolicies(ctx, "") allPolicies, err := globalIAMSys.ListPolicies(ctx, "")
if err != nil { if err != nil {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL) writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
return return
} }

View File

@ -130,7 +130,7 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
// Download Binary Once // Download Binary Once
binC, bin, err := downloadBinary(u, mode) binC, bin, err := downloadBinary(u, mode)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err)) adminLogIf(ctx, fmt.Errorf("server update failed with %w", err))
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
@ -354,7 +354,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
// Download Binary Once // Download Binary Once
binC, bin, err := downloadBinary(u, mode) binC, bin, err := downloadBinary(u, mode)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err)) adminLogIf(ctx, fmt.Errorf("server update failed with %w", err))
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
@ -368,7 +368,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
StatusCode: http.StatusInternalServerError, StatusCode: http.StatusInternalServerError,
} }
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err)) adminLogIf(ctx, fmt.Errorf("server update failed with %w", err))
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
@ -376,7 +376,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
err = verifyBinary(u, sha256Sum, releaseInfo, mode, bytes.NewReader(bin)) err = verifyBinary(u, sha256Sum, releaseInfo, mode, bytes.NewReader(bin))
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err)) adminLogIf(ctx, fmt.Errorf("server update failed with %w", err))
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
@ -389,7 +389,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
StatusCode: http.StatusInternalServerError, StatusCode: http.StatusInternalServerError,
} }
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err)) adminLogIf(ctx, fmt.Errorf("server update failed with %w", err))
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
@ -397,7 +397,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
err = commitBinary() err = commitBinary()
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err)) adminLogIf(ctx, fmt.Errorf("server update failed with %w", err))
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
@ -420,7 +420,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
for _, nerr := range globalNotificationSys.SignalService(serviceRestart) { for _, nerr := range globalNotificationSys.SignalService(serviceRestart) {
if nerr.Err != nil { if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err) adminLogIf(ctx, nerr.Err)
} }
} }
@ -451,7 +451,7 @@ func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request)
case madmin.ServiceActionUnfreeze: case madmin.ServiceActionUnfreeze:
serviceSig = serviceUnFreeze serviceSig = serviceUnFreeze
default: default:
logger.LogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind) adminLogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
return return
} }
@ -473,7 +473,7 @@ func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request)
for _, nerr := range globalNotificationSys.SignalService(serviceSig) { for _, nerr := range globalNotificationSys.SignalService(serviceSig) {
if nerr.Err != nil { if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err) adminLogIf(ctx, nerr.Err)
} }
} }
@ -534,7 +534,7 @@ func (a adminAPIHandlers) ServiceV2Handler(w http.ResponseWriter, r *http.Reques
case madmin.ServiceActionUnfreeze: case madmin.ServiceActionUnfreeze:
serviceSig = serviceUnFreeze serviceSig = serviceUnFreeze
default: default:
logger.LogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind) adminLogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
return return
} }
@ -1239,7 +1239,7 @@ func extractHealInitParams(vars map[string]string, qParams url.Values, r io.Read
if hip.clientToken == "" { if hip.clientToken == "" {
jerr := json.NewDecoder(r).Decode(&hip.hs) jerr := json.NewDecoder(r).Decode(&hip.hs)
if jerr != nil { if jerr != nil {
logger.LogIf(GlobalContext, jerr, logger.ErrorKind) adminLogIf(GlobalContext, jerr, logger.ErrorKind)
err = ErrRequestBodyParse err = ErrRequestBodyParse
return return
} }
@ -1433,7 +1433,7 @@ func getAggregatedBackgroundHealState(ctx context.Context, o ObjectLayer) (madmi
var errCount int var errCount int
for _, nerr := range nerrs { for _, nerr := range nerrs {
if nerr.Err != nil { if nerr.Err != nil {
logger.LogIf(ctx, nerr.Err) adminLogIf(ctx, nerr.Err)
errCount++ errCount++
} }
} }
@ -1561,7 +1561,7 @@ func (a adminAPIHandlers) ClientDevNull(w http.ResponseWriter, r *http.Request)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
// would mean the network is not stable. Logging here will help in debugging network issues. // would mean the network is not stable. Logging here will help in debugging network issues.
if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) { if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
} }
} }
totalRx += n totalRx += n
@ -2800,7 +2800,7 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
w.Header().Get(xhttp.AmzRequestID), w.Header().Get(xhttp.AmzRequestHostID)) w.Header().Get(xhttp.AmzRequestID), w.Header().Get(xhttp.AmzRequestHostID))
encodedErrorResponse := encodeResponse(errorResponse) encodedErrorResponse := encodeResponse(errorResponse)
healthInfo.Error = string(encodedErrorResponse) healthInfo.Error = string(encodedErrorResponse)
logger.LogIf(ctx, enc.Encode(healthInfo)) adminLogIf(ctx, enc.Encode(healthInfo))
} }
deadline := 10 * time.Second // Default deadline is 10secs for health diagnostics. deadline := 10 * time.Second // Default deadline is 10secs for health diagnostics.
@ -3113,7 +3113,7 @@ func getClusterMetaInfo(ctx context.Context) []byte {
case ci := <-resultCh: case ci := <-resultCh:
out, err := json.MarshalIndent(ci, "", " ") out, err := json.MarshalIndent(ci, "", " ")
if err != nil { if err != nil {
logger.LogIf(ctx, err) bugLogIf(ctx, err)
return nil return nil
} }
return out return out
@ -3206,18 +3206,18 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
clusterKey, err := bytesToPublicKey(getSubnetAdminPublicKey()) clusterKey, err := bytesToPublicKey(getSubnetAdminPublicKey())
if err != nil { if err != nil {
logger.LogIf(ctx, stream.AddError(err.Error())) bugLogIf(ctx, stream.AddError(err.Error()))
return return
} }
err = stream.AddKeyEncrypted(clusterKey) err = stream.AddKeyEncrypted(clusterKey)
if err != nil { if err != nil {
logger.LogIf(ctx, stream.AddError(err.Error())) bugLogIf(ctx, stream.AddError(err.Error()))
return return
} }
if b := getClusterMetaInfo(ctx); len(b) > 0 { if b := getClusterMetaInfo(ctx); len(b) > 0 {
w, err := stream.AddEncryptedStream("cluster.info", nil) w, err := stream.AddEncryptedStream("cluster.info", nil)
if err != nil { if err != nil {
logger.LogIf(ctx, err) bugLogIf(ctx, err)
return return
} }
w.Write(b) w.Write(b)
@ -3226,12 +3226,12 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
// Add new key for inspect data. // Add new key for inspect data.
if err := stream.AddKeyEncrypted(publicKey); err != nil { if err := stream.AddKeyEncrypted(publicKey); err != nil {
logger.LogIf(ctx, stream.AddError(err.Error())) bugLogIf(ctx, stream.AddError(err.Error()))
return return
} }
encStream, err := stream.AddEncryptedStream("inspect.zip", nil) encStream, err := stream.AddEncryptedStream("inspect.zip", nil)
if err != nil { if err != nil {
logger.LogIf(ctx, stream.AddError(err.Error())) bugLogIf(ctx, stream.AddError(err.Error()))
return return
} }
defer encStream.Close() defer encStream.Close()
@ -3244,7 +3244,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
// MUST use crypto/rand // MUST use crypto/rand
n, err := crand.Read(key[:]) n, err := crand.Read(key[:])
if err != nil || n != len(key) { if err != nil || n != len(key) {
logger.LogIf(ctx, err) bugLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
@ -3258,7 +3258,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
stream, err := sio.AES_256_GCM.Stream(key[:]) stream, err := sio.AES_256_GCM.Stream(key[:])
if err != nil { if err != nil {
logger.LogIf(ctx, err) bugLogIf(ctx, err)
return return
} }
// Zero nonce, we only use each key once, and 32 bytes is plenty. // Zero nonce, we only use each key once, and 32 bytes is plenty.
@ -3272,7 +3272,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
defer inspectZipW.Close() defer inspectZipW.Close()
if b := getClusterMetaInfo(ctx); len(b) > 0 { if b := getClusterMetaInfo(ctx); len(b) > 0 {
logger.LogIf(ctx, embedFileInZip(inspectZipW, "cluster.info", b, 0o600)) adminLogIf(ctx, embedFileInZip(inspectZipW, "cluster.info", b, 0o600))
} }
} }
@ -3300,23 +3300,23 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
sys: nil, sys: nil,
}) })
if zerr != nil { if zerr != nil {
logger.LogIf(ctx, zerr) bugLogIf(ctx, zerr)
return nil return nil
} }
header.Method = zip.Deflate header.Method = zip.Deflate
zwriter, zerr := inspectZipW.CreateHeader(header) zwriter, zerr := inspectZipW.CreateHeader(header)
if zerr != nil { if zerr != nil {
logger.LogIf(ctx, zerr) bugLogIf(ctx, zerr)
return nil return nil
} }
if _, err := io.Copy(zwriter, r); err != nil { if _, err := io.Copy(zwriter, r); err != nil {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
} }
return nil return nil
} }
err := o.GetRawData(ctx, volume, file, rawDataFn) err := o.GetRawData(ctx, volume, file, rawDataFn)
if !errors.Is(err, errFileNotFound) { if !errors.Is(err, errFileNotFound) {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
} }
// save the format.json as part of inspect by default // save the format.json as part of inspect by default
@ -3324,7 +3324,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn) err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn)
} }
if !errors.Is(err, errFileNotFound) { if !errors.Is(err, errFileNotFound) {
logger.LogIf(ctx, err) adminLogIf(ctx, err)
} }
// save args passed to inspect command // save args passed to inspect command
@ -3336,7 +3336,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
sb.WriteString(pool.CmdLine) sb.WriteString(pool.CmdLine)
} }
sb.WriteString("\n") sb.WriteString("\n")
logger.LogIf(ctx, embedFileInZip(inspectZipW, "inspect-input.txt", sb.Bytes(), 0o600)) adminLogIf(ctx, embedFileInZip(inspectZipW, "inspect-input.txt", sb.Bytes(), 0o600))
scheme := "https" scheme := "https"
if !globalIsTLS { if !globalIsTLS {
@ -3370,7 +3370,7 @@ function main() {
} }
main "$@"`, scheme) main "$@"`, scheme)
logger.LogIf(ctx, embedFileInZip(inspectZipW, "start-minio.sh", scrb.Bytes(), 0o755)) adminLogIf(ctx, embedFileInZip(inspectZipW, "start-minio.sh", scrb.Bytes(), 0o755))
} }
func getSubnetAdminPublicKey() []byte { func getSubnetAdminPublicKey() []byte {

View File

@ -347,7 +347,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay
StartTime: h.startTime, StartTime: h.startTime,
}) })
if err != nil { if err != nil {
logger.LogIf(h.ctx, err) bugLogIf(h.ctx, err)
return nil, toAdminAPIErr(h.ctx, err), "" return nil, toAdminAPIErr(h.ctx, err), ""
} }
return b, noError, "" return b, noError, ""
@ -394,7 +394,7 @@ func (ahs *allHealState) PopHealStatusJSON(hpath string,
if err != nil { if err != nil {
h.currentStatus.Items = nil h.currentStatus.Items = nil
logger.LogIf(h.ctx, err) bugLogIf(h.ctx, err)
return nil, ErrInternalError return nil, ErrInternalError
} }

View File

@ -31,7 +31,6 @@ import (
"github.com/minio/madmin-go/v3" "github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/config" "github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/kms" "github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
) )
// getLocalServerProperty - returns madmin.ServerProperties for only the // getLocalServerProperty - returns madmin.ServerProperties for only the
@ -67,7 +66,7 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
} else { } else {
network[nodeName] = string(madmin.ItemOffline) network[nodeName] = string(madmin.ItemOffline)
// log once the error // log once the error
logger.LogOnceIf(context.Background(), err, nodeName) peersLogOnceIf(context.Background(), err, nodeName)
} }
} }
} }

View File

@ -2519,7 +2519,7 @@ func toAPIError(ctx context.Context, err error) APIError {
// Make sure to log the errors which we cannot translate // Make sure to log the errors which we cannot translate
// to a meaningful S3 API errors. This is added to aid in // to a meaningful S3 API errors. This is added to aid in
// debugging unexpected/unhandled errors. // debugging unexpected/unhandled errors.
logger.LogIf(ctx, err) internalLogIf(ctx, err)
} }
return apiErr return apiErr

View File

@ -30,7 +30,6 @@ import (
"github.com/minio/minio-go/v7/pkg/tags" "github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/crypto"
xhttp "github.com/minio/minio/internal/http" xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
xxml "github.com/minio/xxml" xxml "github.com/minio/xxml"
) )
@ -68,7 +67,7 @@ func encodeResponse(response interface{}) []byte {
var buf bytes.Buffer var buf bytes.Buffer
buf.WriteString(xml.Header) buf.WriteString(xml.Header)
if err := xml.NewEncoder(&buf).Encode(response); err != nil { if err := xml.NewEncoder(&buf).Encode(response); err != nil {
logger.LogIf(GlobalContext, err) bugLogIf(GlobalContext, err)
return nil return nil
} }
return buf.Bytes() return buf.Bytes()
@ -86,7 +85,7 @@ func encodeResponseList(response interface{}) []byte {
var buf bytes.Buffer var buf bytes.Buffer
buf.WriteString(xxml.Header) buf.WriteString(xxml.Header)
if err := xxml.NewEncoder(&buf).Encode(response); err != nil { if err := xxml.NewEncoder(&buf).Encode(response); err != nil {
logger.LogIf(GlobalContext, err) bugLogIf(GlobalContext, err)
return nil return nil
} }
return buf.Bytes() return buf.Bytes()

View File

@ -891,7 +891,7 @@ func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType
} }
// Similar check to http.checkWriteHeaderCode // Similar check to http.checkWriteHeaderCode
if statusCode < 100 || statusCode > 999 { if statusCode < 100 || statusCode > 999 {
logger.LogIf(context.Background(), fmt.Errorf("invalid WriteHeader code %v", statusCode)) bugLogIf(context.Background(), fmt.Errorf("invalid WriteHeader code %v", statusCode))
statusCode = http.StatusInternalServerError statusCode = http.StatusInternalServerError
} }
setCommonHeaders(w) setCommonHeaders(w)
@ -961,7 +961,7 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
// Similar check to http.checkWriteHeaderCode // Similar check to http.checkWriteHeaderCode
if err.HTTPStatusCode < 100 || err.HTTPStatusCode > 999 { if err.HTTPStatusCode < 100 || err.HTTPStatusCode > 999 {
logger.LogIf(ctx, fmt.Errorf("invalid WriteHeader code %v from %v", err.HTTPStatusCode, err.Code)) bugLogIf(ctx, fmt.Errorf("invalid WriteHeader code %v from %v", err.HTTPStatusCode, err.Code))
err.HTTPStatusCode = http.StatusInternalServerError err.HTTPStatusCode = http.StatusInternalServerError
} }

View File

@ -126,7 +126,7 @@ func getRequestAuthType(r *http.Request) (at authType) {
var err error var err error
r.Form, err = url.ParseQuery(r.URL.RawQuery) r.Form, err = url.ParseQuery(r.URL.RawQuery)
if err != nil { if err != nil {
logger.LogIf(r.Context(), err) authNLogIf(r.Context(), err)
return authTypeUnknown return authTypeUnknown
} }
} }
@ -257,7 +257,7 @@ func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{},
if err != nil { if err != nil {
// Base64 decoding fails, we should log to indicate // Base64 decoding fails, we should log to indicate
// something is malforming the request sent by client. // something is malforming the request sent by client.
logger.LogIf(GlobalContext, err, logger.ErrorKind) authNLogIf(GlobalContext, err, logger.ErrorKind)
return nil, errAuthentication return nil, errAuthentication
} }
claims.MapClaims[sessionPolicyNameExtracted] = string(spBytes) claims.MapClaims[sessionPolicyNameExtracted] = string(spBytes)
@ -353,7 +353,7 @@ func checkRequestAuthTypeWithVID(ctx context.Context, r *http.Request, action po
func authenticateRequest(ctx context.Context, r *http.Request, action policy.Action) (s3Err APIErrorCode) { func authenticateRequest(ctx context.Context, r *http.Request, action policy.Action) (s3Err APIErrorCode) {
if logger.GetReqInfo(ctx) == nil { if logger.GetReqInfo(ctx) == nil {
logger.LogIf(ctx, errors.New("unexpected context.Context does not have a logger.ReqInfo"), logger.ErrorKind) bugLogIf(ctx, errors.New("unexpected context.Context does not have a logger.ReqInfo"), logger.ErrorKind)
return ErrAccessDenied return ErrAccessDenied
} }
@ -392,7 +392,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
// To extract region from XML in request body, get copy of request body. // To extract region from XML in request body, get copy of request body.
payload, err := io.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize)) payload, err := io.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
if err != nil { if err != nil {
logger.LogIf(ctx, err, logger.ErrorKind) authZLogIf(ctx, err, logger.ErrorKind)
return ErrMalformedXML return ErrMalformedXML
} }

View File

@ -25,7 +25,6 @@ import (
"time" "time"
"github.com/minio/madmin-go/v3" "github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/env" "github.com/minio/pkg/v2/env"
) )
@ -158,7 +157,7 @@ func newHealRoutine() *healRoutine {
if envHealWorkers := env.Get("_MINIO_HEAL_WORKERS", ""); envHealWorkers != "" { if envHealWorkers := env.Get("_MINIO_HEAL_WORKERS", ""); envHealWorkers != "" {
if numHealers, err := strconv.Atoi(envHealWorkers); err != nil { if numHealers, err := strconv.Atoi(envHealWorkers); err != nil {
logger.LogIf(context.Background(), fmt.Errorf("invalid _MINIO_HEAL_WORKERS value: %w", err)) bugLogIf(context.Background(), fmt.Errorf("invalid _MINIO_HEAL_WORKERS value: %w", err))
} else { } else {
workers = numHealers workers = numHealers
} }

View File

@ -33,7 +33,6 @@ import (
"github.com/minio/madmin-go/v3" "github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7/pkg/set" "github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/config" "github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/env" "github.com/minio/pkg/v2/env"
) )
@ -409,11 +408,11 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
if errors.Is(err, errFileNotFound) { if errors.Is(err, errFileNotFound) {
return nil return nil
} }
logger.LogIf(ctx, fmt.Errorf("Unable to load healing tracker on '%s': %w, re-initializing..", disk, err)) healingLogIf(ctx, fmt.Errorf("Unable to load healing tracker on '%s': %w, re-initializing..", disk, err))
tracker = initHealingTracker(disk, mustGetUUID()) tracker = initHealingTracker(disk, mustGetUUID())
} }
logger.Event(ctx, "Healing drive '%s' - 'mc admin heal alias/ --verbose' to check the current status.", endpoint) healingLogEvent(ctx, "Healing drive '%s' - 'mc admin heal alias/ --verbose' to check the current status.", endpoint)
buckets, _ := z.ListBuckets(ctx, BucketOptions{}) buckets, _ := z.ListBuckets(ctx, BucketOptions{})
// Buckets data are dispersed in multiple pools/sets, make // Buckets data are dispersed in multiple pools/sets, make
@ -452,7 +451,7 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
return err return err
} }
logger.Event(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed) healingLogEvent(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
if len(tracker.QueuedBuckets) > 0 { if len(tracker.QueuedBuckets) > 0 {
return fmt.Errorf("not all buckets were healed: %v", tracker.QueuedBuckets) return fmt.Errorf("not all buckets were healed: %v", tracker.QueuedBuckets)
@ -464,7 +463,7 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
} }
if tracker.HealID == "" { // HealID was empty only before Feb 2023 if tracker.HealID == "" { // HealID was empty only before Feb 2023
logger.LogIf(ctx, tracker.delete(ctx)) bugLogIf(ctx, tracker.delete(ctx))
return nil return nil
} }
@ -482,7 +481,7 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
t, err := loadHealingTracker(ctx, disk) t, err := loadHealingTracker(ctx, disk)
if err != nil { if err != nil {
if !errors.Is(err, errFileNotFound) { if !errors.Is(err, errFileNotFound) {
logger.LogIf(ctx, err) healingLogIf(ctx, err)
} }
continue continue
} }
@ -517,7 +516,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools) {
// Reformat disks immediately // Reformat disks immediately
_, err := z.HealFormat(context.Background(), false) _, err := z.HealFormat(context.Background(), false)
if err != nil && !errors.Is(err, errNoHealRequired) { if err != nil && !errors.Is(err, errNoHealRequired) {
logger.LogIf(ctx, err) healingLogIf(ctx, err)
// Reset for next interval. // Reset for next interval.
diskCheckTimer.Reset(defaultMonitorNewDiskInterval) diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
continue continue

View File

@ -33,7 +33,6 @@ import (
"github.com/minio/minio/internal/bucket/versioning" "github.com/minio/minio/internal/bucket/versioning"
xhttp "github.com/minio/minio/internal/http" xhttp "github.com/minio/minio/internal/http"
xioutil "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/env" "github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v2/wildcard" "github.com/minio/pkg/v2/wildcard"
"github.com/minio/pkg/v2/workers" "github.com/minio/pkg/v2/workers"
@ -156,7 +155,7 @@ func (ef BatchJobExpireFilter) Matches(obj ObjectInfo, now time.Time) bool {
} }
default: default:
// we should never come here, Validate should have caught this. // we should never come here, Validate should have caught this.
logger.LogOnceIf(context.Background(), fmt.Errorf("invalid filter type: %s", ef.Type), ef.Type) batchLogOnceIf(context.Background(), fmt.Errorf("invalid filter type: %s", ef.Type), ef.Type)
return false return false
} }
@ -433,7 +432,7 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
}) })
if err != nil { if err != nil {
stopFn(exp, err) stopFn(exp, err)
logger.LogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", toExpire[i].Bucket, toExpire[i].Name, toExpire[i].VersionID, err, attempts)) batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", toExpire[i].Bucket, toExpire[i].Name, toExpire[i].VersionID, err, attempts))
} else { } else {
stopFn(exp, err) stopFn(exp, err)
success = true success = true
@ -471,7 +470,7 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
for i, err := range errs { for i, err := range errs {
if err != nil { if err != nil {
stopFn(toDelCopy[i], err) stopFn(toDelCopy[i], err)
logger.LogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID, err, attempts)) batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID, err, attempts))
failed++ failed++
if attempts == retryAttempts { // all retry attempts failed, record failure if attempts == retryAttempts { // all retry attempts failed, record failure
if oi, ok := oiCache.Get(toDelCopy[i]); ok { if oi, ok := oiCache.Get(toDelCopy[i]); ok {
@ -557,16 +556,16 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
select { select {
case <-saveTicker.C: case <-saveTicker.C:
// persist in-memory state to disk after every 10secs. // persist in-memory state to disk after every 10secs.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
case <-ctx.Done(): case <-ctx.Done():
// persist in-memory state immediately before exiting due to context cancellation. // persist in-memory state immediately before exiting due to context cancellation.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job)) batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
return return
case <-saverQuitCh: case <-saverQuitCh:
// persist in-memory state immediately to disk. // persist in-memory state immediately to disk.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job)) batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
return return
} }
} }
@ -670,7 +669,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
// Notify expire jobs final status to the configured endpoint // Notify expire jobs final status to the configured endpoint
buf, _ := json.Marshal(ri) buf, _ := json.Marshal(ri)
if err := r.Notify(context.Background(), bytes.NewReader(buf)); err != nil { if err := r.Notify(context.Background(), bytes.NewReader(buf)); err != nil {
logger.LogIf(context.Background(), fmt.Errorf("unable to notify %v", err)) batchLogIf(context.Background(), fmt.Errorf("unable to notify %v", err))
} }
return nil return nil

View File

@ -48,7 +48,6 @@ import (
xhttp "github.com/minio/minio/internal/http" xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/ioutil"
xioutil "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/console" "github.com/minio/pkg/v2/console"
"github.com/minio/pkg/v2/env" "github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v2/policy" "github.com/minio/pkg/v2/policy"
@ -206,7 +205,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a
if aerr == nil { if aerr == nil {
return return
} }
logger.LogIf(ctx, batchLogIf(ctx,
fmt.Errorf("trying %s: Unable to cleanup failed multipart replication %s on remote %s/%s: %w - this may consume space on remote cluster", fmt.Errorf("trying %s: Unable to cleanup failed multipart replication %s on remote %s/%s: %w - this may consume space on remote cluster",
humanize.Ordinal(attempts), res.UploadID, tgtBucket, tgtObject, aerr)) humanize.Ordinal(attempts), res.UploadID, tgtBucket, tgtObject, aerr))
attempts++ attempts++
@ -402,7 +401,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
} else { } else {
if !isErrMethodNotAllowed(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) && if !isErrMethodNotAllowed(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) &&
!isErrObjectNotFound(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) { !isErrObjectNotFound(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) {
logger.LogIf(ctx, err) batchLogIf(ctx, err)
} }
continue continue
} }
@ -414,7 +413,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
} else { } else {
if !isErrMethodNotAllowed(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) && if !isErrMethodNotAllowed(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) &&
!isErrObjectNotFound(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) { !isErrObjectNotFound(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) {
logger.LogIf(ctx, err) batchLogIf(ctx, err)
} }
continue continue
} }
@ -443,7 +442,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
return return
} }
stopFn(oi, err) stopFn(oi, err)
logger.LogIf(ctx, err) batchLogIf(ctx, err)
success = false success = false
} else { } else {
stopFn(oi, nil) stopFn(oi, nil)
@ -451,7 +450,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
ri.trackCurrentBucketObject(r.Target.Bucket, oi, success) ri.trackCurrentBucketObject(r.Target.Bucket, oi, success)
globalBatchJobsMetrics.save(job.ID, ri) globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk after every 10secs. // persist in-memory state to disk after every 10secs.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
if wait := globalBatchConfig.ReplicationWait(); wait > 0 { if wait := globalBatchConfig.ReplicationWait(); wait > 0 {
time.Sleep(wait) time.Sleep(wait)
@ -466,10 +465,10 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
globalBatchJobsMetrics.save(job.ID, ri) globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk. // persist in-memory state to disk.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job)) batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
if err := r.Notify(ctx, ri); err != nil { if err := r.Notify(ctx, ri); err != nil {
logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err)) batchLogIf(ctx, fmt.Errorf("unable to notify %v", err))
} }
cancel() cancel()
@ -553,7 +552,7 @@ func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLa
VersionID: entry.VersionID, VersionID: entry.VersionID,
}) })
if err != nil { if err != nil {
logger.LogIf(ctx, err) batchLogIf(ctx, err)
continue continue
} }
@ -572,7 +571,7 @@ func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLa
opts, err := batchReplicationOpts(ctx, "", gr.ObjInfo) opts, err := batchReplicationOpts(ctx, "", gr.ObjInfo)
if err != nil { if err != nil {
logger.LogIf(ctx, err) batchLogIf(ctx, err)
continue continue
} }
@ -1072,7 +1071,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
BucketLookup: lookupStyle(r.Target.Path), BucketLookup: lookupStyle(r.Target.Path),
}) })
if err != nil { if err != nil {
logger.LogIf(ctx, err) batchLogIf(ctx, err)
return return
} }
@ -1083,7 +1082,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
writeFn := func(batch []ObjectInfo) { writeFn := func(batch []ObjectInfo) {
if len(batch) > 0 { if len(batch) > 0 {
if err := r.writeAsArchive(ctx, api, cl, batch); err != nil { if err := r.writeAsArchive(ctx, api, cl, batch); err != nil {
logger.LogIf(ctx, err) batchLogIf(ctx, err)
for _, b := range batch { for _, b := range batch {
slowCh <- b slowCh <- b
} }
@ -1091,7 +1090,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
ri.trackCurrentBucketBatch(r.Source.Bucket, batch) ri.trackCurrentBucketBatch(r.Source.Bucket, batch)
globalBatchJobsMetrics.save(job.ID, ri) globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk after every 10secs. // persist in-memory state to disk after every 10secs.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
} }
} }
} }
@ -1179,7 +1178,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
return return
} }
stopFn(result, err) stopFn(result, err)
logger.LogIf(ctx, err) batchLogIf(ctx, err)
success = false success = false
} else { } else {
stopFn(result, nil) stopFn(result, nil)
@ -1187,7 +1186,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
ri.trackCurrentBucketObject(r.Source.Bucket, result, success) ri.trackCurrentBucketObject(r.Source.Bucket, result, success)
globalBatchJobsMetrics.save(job.ID, ri) globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk after every 10secs. // persist in-memory state to disk after every 10secs.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
if wait := globalBatchConfig.ReplicationWait(); wait > 0 { if wait := globalBatchConfig.ReplicationWait(); wait > 0 {
time.Sleep(wait) time.Sleep(wait)
@ -1202,10 +1201,10 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
globalBatchJobsMetrics.save(job.ID, ri) globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk. // persist in-memory state to disk.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job)) batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
if err := r.Notify(ctx, ri); err != nil { if err := r.Notify(ctx, ri); err != nil {
logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err)) batchLogIf(ctx, fmt.Errorf("unable to notify %v", err))
} }
cancel() cancel()
@ -1500,7 +1499,7 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
req := &BatchJobRequest{} req := &BatchJobRequest{}
if err := req.load(ctx, objectAPI, result.Name); err != nil { if err := req.load(ctx, objectAPI, result.Name); err != nil {
if !errors.Is(err, errNoSuchJob) { if !errors.Is(err, errNoSuchJob) {
logger.LogIf(ctx, err) batchLogIf(ctx, err)
} }
continue continue
} }
@ -1516,7 +1515,7 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
} }
} }
logger.LogIf(ctx, json.NewEncoder(w).Encode(&listResult)) batchLogIf(ctx, json.NewEncoder(w).Encode(&listResult))
} }
var errNoSuchJob = errors.New("no such job") var errNoSuchJob = errors.New("no such job")
@ -1539,7 +1538,7 @@ func (a adminAPIHandlers) DescribeBatchJob(w http.ResponseWriter, r *http.Reques
req := &BatchJobRequest{} req := &BatchJobRequest{}
if err := req.load(ctx, objectAPI, pathJoin(batchJobPrefix, jobID)); err != nil { if err := req.load(ctx, objectAPI, pathJoin(batchJobPrefix, jobID)); err != nil {
if !errors.Is(err, errNoSuchJob) { if !errors.Is(err, errNoSuchJob) {
logger.LogIf(ctx, err) batchLogIf(ctx, err)
} }
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
@ -1548,7 +1547,7 @@ func (a adminAPIHandlers) DescribeBatchJob(w http.ResponseWriter, r *http.Reques
buf, err := yaml.Marshal(req) buf, err := yaml.Marshal(req)
if err != nil { if err != nil {
logger.LogIf(ctx, err) batchLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -1707,7 +1706,7 @@ func (j *BatchJobPool) resume() {
ctx, cancel := context.WithCancel(j.ctx) ctx, cancel := context.WithCancel(j.ctx)
defer cancel() defer cancel()
if err := j.objLayer.Walk(ctx, minioMetaBucket, batchJobPrefix, results, WalkOptions{}); err != nil { if err := j.objLayer.Walk(ctx, minioMetaBucket, batchJobPrefix, results, WalkOptions{}); err != nil {
logger.LogIf(j.ctx, err) batchLogIf(j.ctx, err)
return return
} }
for result := range results { for result := range results {
@ -1717,7 +1716,7 @@ func (j *BatchJobPool) resume() {
} }
req := &BatchJobRequest{} req := &BatchJobRequest{}
if err := req.load(ctx, j.objLayer, result.Name); err != nil { if err := req.load(ctx, j.objLayer, result.Name); err != nil {
logger.LogIf(ctx, err) batchLogIf(ctx, err)
continue continue
} }
_, nodeIdx := parseRequestToken(req.ID) _, nodeIdx := parseRequestToken(req.ID)
@ -1726,7 +1725,7 @@ func (j *BatchJobPool) resume() {
continue continue
} }
if err := j.queueJob(req); err != nil { if err := j.queueJob(req); err != nil {
logger.LogIf(ctx, err) batchLogIf(ctx, err)
continue continue
} }
} }
@ -1750,7 +1749,7 @@ func (j *BatchJobPool) AddWorker() {
if job.Replicate.RemoteToLocal() { if job.Replicate.RemoteToLocal() {
if err := job.Replicate.StartFromSource(job.ctx, j.objLayer, *job); err != nil { if err := job.Replicate.StartFromSource(job.ctx, j.objLayer, *job); err != nil {
if !isErrBucketNotFound(err) { if !isErrBucketNotFound(err) {
logger.LogIf(j.ctx, err) batchLogIf(j.ctx, err)
j.canceler(job.ID, false) j.canceler(job.ID, false)
continue continue
} }
@ -1759,7 +1758,7 @@ func (j *BatchJobPool) AddWorker() {
} else { } else {
if err := job.Replicate.Start(job.ctx, j.objLayer, *job); err != nil { if err := job.Replicate.Start(job.ctx, j.objLayer, *job); err != nil {
if !isErrBucketNotFound(err) { if !isErrBucketNotFound(err) {
logger.LogIf(j.ctx, err) batchLogIf(j.ctx, err)
j.canceler(job.ID, false) j.canceler(job.ID, false)
continue continue
} }
@ -1769,14 +1768,14 @@ func (j *BatchJobPool) AddWorker() {
case job.KeyRotate != nil: case job.KeyRotate != nil:
if err := job.KeyRotate.Start(job.ctx, j.objLayer, *job); err != nil { if err := job.KeyRotate.Start(job.ctx, j.objLayer, *job); err != nil {
if !isErrBucketNotFound(err) { if !isErrBucketNotFound(err) {
logger.LogIf(j.ctx, err) batchLogIf(j.ctx, err)
continue continue
} }
} }
case job.Expire != nil: case job.Expire != nil:
if err := job.Expire.Start(job.ctx, j.objLayer, *job); err != nil { if err := job.Expire.Start(job.ctx, j.objLayer, *job); err != nil {
if !isErrBucketNotFound(err) { if !isErrBucketNotFound(err) {
logger.LogIf(j.ctx, err) batchLogIf(j.ctx, err)
continue continue
} }
} }

View File

@ -33,7 +33,6 @@ import (
"github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/crypto"
xhttp "github.com/minio/minio/internal/http" xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/kms" "github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/env" "github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v2/workers" "github.com/minio/pkg/v2/workers"
) )
@ -383,7 +382,7 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
success := true success := true
if err := r.KeyRotate(ctx, api, result); err != nil { if err := r.KeyRotate(ctx, api, result); err != nil {
stopFn(result, err) stopFn(result, err)
logger.LogIf(ctx, err) batchLogIf(ctx, err)
success = false success = false
} else { } else {
stopFn(result, nil) stopFn(result, nil)
@ -392,7 +391,7 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
ri.RetryAttempts = attempts ri.RetryAttempts = attempts
globalBatchJobsMetrics.save(job.ID, ri) globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk after every 10secs. // persist in-memory state to disk after every 10secs.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
if success { if success {
break break
} }
@ -412,10 +411,10 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
ri.Failed = ri.ObjectsFailed > 0 ri.Failed = ri.ObjectsFailed > 0
globalBatchJobsMetrics.save(job.ID, ri) globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk. // persist in-memory state to disk.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job)) batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
if err := r.Notify(ctx, ri); err != nil { if err := r.Notify(ctx, ri); err != nil {
logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err)) batchLogIf(ctx, fmt.Errorf("unable to notify %v", err))
} }
cancel() cancel()

View File

@ -198,7 +198,7 @@ func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointS
if err != nil { if err != nil {
bootstrapTraceMsg(fmt.Sprintf("clnt.Verify: %v, endpoint: %s", err, clnt)) bootstrapTraceMsg(fmt.Sprintf("clnt.Verify: %v, endpoint: %s", err, clnt))
if !isNetworkError(err) { if !isNetworkError(err) {
logger.LogOnceIf(context.Background(), fmt.Errorf("%s has incorrect configuration: %w", clnt, err), "incorrect_"+clnt.String()) bootLogOnceIf(context.Background(), fmt.Errorf("%s has incorrect configuration: %w", clnt, err), "incorrect_"+clnt.String())
incorrectConfigs = append(incorrectConfigs, fmt.Errorf("%s has incorrect configuration: %w", clnt, err)) incorrectConfigs = append(incorrectConfigs, fmt.Errorf("%s has incorrect configuration: %w", clnt, err))
} else { } else {
offlineEndpoints = append(offlineEndpoints, fmt.Errorf("%s is unreachable: %w", clnt, err)) offlineEndpoints = append(offlineEndpoints, fmt.Errorf("%s is unreachable: %w", clnt, err))

View File

@ -114,7 +114,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
// We encode the xml bytes as base64 to ensure there are no encoding // We encode the xml bytes as base64 to ensure there are no encoding
// errors. // errors.
cfgStr := base64.StdEncoding.EncodeToString(configData) cfgStr := base64.StdEncoding.EncodeToString(configData)
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeSSEConfig, Type: madmin.SRBucketMetaTypeSSEConfig,
Bucket: bucket, Bucket: bucket,
SSEConfig: &cfgStr, SSEConfig: &cfgStr,
@ -203,7 +203,7 @@ func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter
} }
// Call site replication hook. // Call site replication hook.
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeSSEConfig, Type: madmin.SRBucketMetaTypeSSEConfig,
Bucket: bucket, Bucket: bucket,
SSEConfig: nil, SSEConfig: nil,

View File

@ -98,7 +98,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
// Get buckets in the DNS // Get buckets in the DNS
dnsBuckets, err := globalDNSConfig.List() dnsBuckets, err := globalDNSConfig.List()
if err != nil && !IsErrIgnored(err, dns.ErrNoEntriesFound, dns.ErrNotImplemented, dns.ErrDomainMissing) { if err != nil && !IsErrIgnored(err, dns.ErrNoEntriesFound, dns.ErrNotImplemented, dns.ErrDomainMissing) {
logger.LogIf(GlobalContext, err) dnsLogIf(GlobalContext, err)
return return
} }
@ -160,13 +160,13 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
ctx := GlobalContext ctx := GlobalContext
for _, err := range g.Wait() { for _, err := range g.Wait() {
if err != nil { if err != nil {
logger.LogIf(ctx, err) dnsLogIf(ctx, err)
return return
} }
} }
for _, bucket := range bucketsInConflict.ToSlice() { for _, bucket := range bucketsInConflict.ToSlice() {
logger.LogIf(ctx, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket", bucket, globalDomainIPs.ToSlice())) dnsLogIf(ctx, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket", bucket, globalDomainIPs.ToSlice()))
} }
var wg sync.WaitGroup var wg sync.WaitGroup
@ -187,7 +187,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
// We go to here, so we know the bucket no longer exists, // We go to here, so we know the bucket no longer exists,
// but is registered in DNS to this server // but is registered in DNS to this server
if err := globalDNSConfig.Delete(bucket); err != nil { if err := globalDNSConfig.Delete(bucket); err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w", dnsLogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w",
bucket, err)) bucket, err))
} }
}(bucket) }(bucket)
@ -790,7 +790,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
// check if client is attempting to create more buckets, complain about it. // check if client is attempting to create more buckets, complain about it.
if currBuckets := globalBucketMetadataSys.Count(); currBuckets+1 > maxBuckets { if currBuckets := globalBucketMetadataSys.Count(); currBuckets+1 > maxBuckets {
logger.LogIf(ctx, fmt.Errorf("Please avoid creating more buckets %d beyond recommended %d", currBuckets+1, maxBuckets)) internalLogIf(ctx, fmt.Errorf("Please avoid creating more buckets %d beyond recommended %d", currBuckets+1, maxBuckets), logger.WarningKind)
} }
opts := MakeBucketOptions{ opts := MakeBucketOptions{
@ -871,7 +871,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket) globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
// Call site replication hook // Call site replication hook
logger.LogIf(ctx, globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts)) replLogIf(ctx, globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts))
// Make sure to add Location information here only for bucket // Make sure to add Location information here only for bucket
w.Header().Set(xhttp.Location, pathJoin(SlashSeparator, bucket)) w.Header().Set(xhttp.Location, pathJoin(SlashSeparator, bucket))
@ -1693,7 +1693,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
if globalDNSConfig != nil { if globalDNSConfig != nil {
if err := globalDNSConfig.Delete(bucket); err != nil { if err := globalDNSConfig.Delete(bucket); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually, bucket on MinIO no longer exists", err)) dnsLogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually, bucket on MinIO no longer exists", err))
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -1703,7 +1703,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
globalReplicationPool.deleteResyncMetadata(ctx, bucket) globalReplicationPool.deleteResyncMetadata(ctx, bucket)
// Call site replication hook. // Call site replication hook.
logger.LogIf(ctx, globalSiteReplicationSys.DeleteBucketHook(ctx, bucket, forceDelete)) replLogIf(ctx, globalSiteReplicationSys.DeleteBucketHook(ctx, bucket, forceDelete))
// Write success response. // Write success response.
writeSuccessNoContent(w) writeSuccessNoContent(w)
@ -1776,7 +1776,7 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
// We encode the xml bytes as base64 to ensure there are no encoding // We encode the xml bytes as base64 to ensure there are no encoding
// errors. // errors.
cfgStr := base64.StdEncoding.EncodeToString(configData) cfgStr := base64.StdEncoding.EncodeToString(configData)
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeObjectLockConfig, Type: madmin.SRBucketMetaTypeObjectLockConfig,
Bucket: bucket, Bucket: bucket,
ObjectLockConfig: &cfgStr, ObjectLockConfig: &cfgStr,
@ -1880,7 +1880,7 @@ func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *h
// We encode the xml bytes as base64 to ensure there are no encoding // We encode the xml bytes as base64 to ensure there are no encoding
// errors. // errors.
cfgStr := base64.StdEncoding.EncodeToString(configData) cfgStr := base64.StdEncoding.EncodeToString(configData)
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeTags, Type: madmin.SRBucketMetaTypeTags,
Bucket: bucket, Bucket: bucket,
Tags: &cfgStr, Tags: &cfgStr,
@ -1956,7 +1956,7 @@ func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r
return return
} }
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeTags, Type: madmin.SRBucketMetaTypeTags,
Bucket: bucket, Bucket: bucket,
UpdatedAt: updatedAt, UpdatedAt: updatedAt,

View File

@ -336,7 +336,7 @@ func (es *expiryState) Worker(input <-chan expiryOp) {
case newerNoncurrentTask: case newerNoncurrentTask:
deleteObjectVersions(es.ctx, es.objAPI, v.bucket, v.versions, v.event) deleteObjectVersions(es.ctx, es.objAPI, v.bucket, v.versions, v.event)
case jentry: case jentry:
logger.LogIf(es.ctx, deleteObjectFromRemoteTier(es.ctx, v.ObjName, v.VersionID, v.TierName)) transitionLogIf(es.ctx, deleteObjectFromRemoteTier(es.ctx, v.ObjName, v.VersionID, v.TierName))
case freeVersionTask: case freeVersionTask:
oi := v.ObjectInfo oi := v.ObjectInfo
traceFn := globalLifecycleSys.trace(oi) traceFn := globalLifecycleSys.trace(oi)
@ -355,7 +355,7 @@ func (es *expiryState) Worker(input <-chan expiryOp) {
// Remove the remote object // Remove the remote object
err := deleteObjectFromRemoteTier(es.ctx, oi.TransitionedObject.Name, oi.TransitionedObject.VersionID, oi.TransitionedObject.Tier) err := deleteObjectFromRemoteTier(es.ctx, oi.TransitionedObject.Name, oi.TransitionedObject.VersionID, oi.TransitionedObject.Tier)
if ignoreNotFoundErr(err) != nil { if ignoreNotFoundErr(err) != nil {
logger.LogIf(es.ctx, err) transitionLogIf(es.ctx, err)
return return
} }
@ -368,10 +368,10 @@ func (es *expiryState) Worker(input <-chan expiryOp) {
auditLogLifecycle(es.ctx, oi, ILMFreeVersionDelete, nil, traceFn) auditLogLifecycle(es.ctx, oi, ILMFreeVersionDelete, nil, traceFn)
} }
if ignoreNotFoundErr(err) != nil { if ignoreNotFoundErr(err) != nil {
logger.LogIf(es.ctx, err) transitionLogIf(es.ctx, err)
} }
default: default:
logger.LogIf(es.ctx, fmt.Errorf("Invalid work type - %v", v)) bugLogIf(es.ctx, fmt.Errorf("Invalid work type - %v", v))
} }
} }
} }
@ -486,7 +486,7 @@ func (t *transitionState) worker(objectAPI ObjectLayer) {
if err := transitionObject(t.ctx, objectAPI, task.objInfo, newLifecycleAuditEvent(task.src, task.event)); err != nil { if err := transitionObject(t.ctx, objectAPI, task.objInfo, newLifecycleAuditEvent(task.src, task.event)); err != nil {
if !isErrVersionNotFound(err) && !isErrObjectNotFound(err) && !xnet.IsNetworkOrHostDown(err, false) { if !isErrVersionNotFound(err) && !isErrObjectNotFound(err) && !xnet.IsNetworkOrHostDown(err, false) {
if !strings.Contains(err.Error(), "use of closed network connection") { if !strings.Contains(err.Error(), "use of closed network connection") {
logger.LogIf(t.ctx, fmt.Errorf("Transition to %s failed for %s/%s version:%s with %w", transitionLogIf(t.ctx, fmt.Errorf("Transition to %s failed for %s/%s version:%s with %w",
task.event.StorageClass, task.objInfo.Bucket, task.objInfo.Name, task.objInfo.VersionID, err)) task.event.StorageClass, task.objInfo.Bucket, task.objInfo.Name, task.objInfo.VersionID, err))
} }
} }
@ -614,7 +614,7 @@ func expireTransitionedObject(ctx context.Context, objectAPI ObjectLayer, oi *Ob
// remote object // remote object
opts.SkipFreeVersion = true opts.SkipFreeVersion = true
} else { } else {
logger.LogIf(ctx, err) transitionLogIf(ctx, err)
} }
// Now, delete object from hot-tier namespace // Now, delete object from hot-tier namespace
@ -879,7 +879,7 @@ func postRestoreOpts(ctx context.Context, r *http.Request, bucket, object string
if vid != "" && vid != nullVersionID { if vid != "" && vid != nullVersionID {
_, err := uuid.Parse(vid) _, err := uuid.Parse(vid)
if err != nil { if err != nil {
logger.LogIf(ctx, err) s3LogIf(ctx, err)
return opts, InvalidVersionID{ return opts, InvalidVersionID{
Bucket: bucket, Bucket: bucket,
Object: object, Object: object,

View File

@ -500,7 +500,7 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
errs := g.Wait() errs := g.Wait()
for _, err := range errs { for _, err := range errs {
if err != nil { if err != nil {
logger.LogIf(ctx, err) internalLogIf(ctx, err, logger.WarningKind)
} }
} }
@ -542,7 +542,7 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa
case <-t.C: case <-t.C:
buckets, err := sys.objAPI.ListBuckets(ctx, BucketOptions{}) buckets, err := sys.objAPI.ListBuckets(ctx, BucketOptions{})
if err != nil { if err != nil {
logger.LogIf(ctx, err) internalLogIf(ctx, err, logger.WarningKind)
break break
} }
@ -560,7 +560,7 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa
meta, err := loadBucketMetadata(ctx, sys.objAPI, buckets[i].Name) meta, err := loadBucketMetadata(ctx, sys.objAPI, buckets[i].Name)
if err != nil { if err != nil {
logger.LogIf(ctx, err) internalLogIf(ctx, err, logger.WarningKind)
wait() // wait to proceed to next entry. wait() // wait to proceed to next entry.
continue continue
} }

View File

@ -145,7 +145,7 @@ func (b *BucketMetadata) SetCreatedAt(createdAt time.Time) {
// If an error is returned the returned metadata will be default initialized. // If an error is returned the returned metadata will be default initialized.
func readBucketMetadata(ctx context.Context, api ObjectLayer, name string) (BucketMetadata, error) { func readBucketMetadata(ctx context.Context, api ObjectLayer, name string) (BucketMetadata, error) {
if name == "" { if name == "" {
logger.LogIf(ctx, errors.New("bucket name cannot be empty")) internalLogIf(ctx, errors.New("bucket name cannot be empty"), logger.WarningKind)
return BucketMetadata{}, errInvalidArgument return BucketMetadata{}, errInvalidArgument
} }
b := newBucketMetadata(name) b := newBucketMetadata(name)
@ -400,7 +400,7 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
for legacyFile := range configs { for legacyFile := range configs {
configFile := path.Join(bucketMetaPrefix, b.Name, legacyFile) configFile := path.Join(bucketMetaPrefix, b.Name, legacyFile)
if err := deleteConfig(ctx, objectAPI, configFile); err != nil && !errors.Is(err, errConfigNotFound) { if err := deleteConfig(ctx, objectAPI, configFile); err != nil && !errors.Is(err, errConfigNotFound) {
logger.LogIf(ctx, err) internalLogIf(ctx, err, logger.WarningKind)
} }
} }

View File

@ -66,7 +66,7 @@ func enforceRetentionForDeletion(ctx context.Context, objInfo ObjectInfo) (locke
if ret.Mode.Valid() && (ret.Mode == objectlock.RetCompliance || ret.Mode == objectlock.RetGovernance) { if ret.Mode.Valid() && (ret.Mode == objectlock.RetCompliance || ret.Mode == objectlock.RetGovernance) {
t, err := objectlock.UTCNowNTP() t, err := objectlock.UTCNowNTP()
if err != nil { if err != nil {
logger.LogIf(ctx, err) internalLogIf(ctx, err, logger.WarningKind)
return true return true
} }
if ret.RetainUntilDate.After(t) { if ret.RetainUntilDate.After(t) {
@ -114,7 +114,7 @@ func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucke
// duration of the retention period. // duration of the retention period.
t, err := objectlock.UTCNowNTP() t, err := objectlock.UTCNowNTP()
if err != nil { if err != nil {
logger.LogIf(ctx, err) internalLogIf(ctx, err, logger.WarningKind)
return ObjectLocked{} return ObjectLocked{}
} }
@ -140,7 +140,7 @@ func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucke
if !byPassSet { if !byPassSet {
t, err := objectlock.UTCNowNTP() t, err := objectlock.UTCNowNTP()
if err != nil { if err != nil {
logger.LogIf(ctx, err) internalLogIf(ctx, err, logger.WarningKind)
return ObjectLocked{} return ObjectLocked{}
} }
@ -170,7 +170,7 @@ func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, oi Objec
t, err := objectlock.UTCNowNTP() t, err := objectlock.UTCNowNTP()
if err != nil { if err != nil {
logger.LogIf(ctx, err) internalLogIf(ctx, err, logger.WarningKind)
return ObjectLocked{Bucket: oi.Bucket, Object: oi.Name, VersionID: oi.VersionID} return ObjectLocked{Bucket: oi.Bucket, Object: oi.Name, VersionID: oi.VersionID}
} }
@ -277,7 +277,7 @@ func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, ob
r := objectlock.GetObjectRetentionMeta(objInfo.UserDefined) r := objectlock.GetObjectRetentionMeta(objInfo.UserDefined)
t, err := objectlock.UTCNowNTP() t, err := objectlock.UTCNowNTP()
if err != nil { if err != nil {
logger.LogIf(ctx, err) internalLogIf(ctx, err, logger.WarningKind)
return mode, retainDate, legalHold, ErrObjectLocked return mode, retainDate, legalHold, ErrObjectLocked
} }
if r.Mode == objectlock.RetCompliance && r.RetainUntilDate.After(t) { if r.Mode == objectlock.RetCompliance && r.RetainUntilDate.After(t) {
@ -324,7 +324,7 @@ func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, ob
t, err := objectlock.UTCNowNTP() t, err := objectlock.UTCNowNTP()
if err != nil { if err != nil {
logger.LogIf(ctx, err) internalLogIf(ctx, err, logger.WarningKind)
return mode, retainDate, legalHold, ErrObjectLocked return mode, retainDate, legalHold, ErrObjectLocked
} }

View File

@ -113,7 +113,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
} }
// Call site replication hook. // Call site replication hook.
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypePolicy, Type: madmin.SRBucketMetaTypePolicy,
Bucket: bucket, Bucket: bucket,
Policy: bucketPolicyBytes, Policy: bucketPolicyBytes,
@ -157,7 +157,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
} }
// Call site replication hook. // Call site replication hook.
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypePolicy, Type: madmin.SRBucketMetaTypePolicy,
Bucket: bucket, Bucket: bucket,
UpdatedAt: updatedAt, UpdatedAt: updatedAt,

View File

@ -53,7 +53,7 @@ func (sys *PolicySys) IsAllowed(args policy.BucketPolicyArgs) bool {
// Log unhandled errors. // Log unhandled errors.
if _, ok := err.(BucketPolicyNotFound); !ok { if _, ok := err.(BucketPolicyNotFound); !ok {
logger.LogIf(GlobalContext, err) internalLogIf(GlobalContext, err, logger.WarningKind)
} }
// As policy is not available for given bucket name, returns IsOwner i.e. // As policy is not available for given bucket name, returns IsOwner i.e.

View File

@ -64,9 +64,9 @@ func (sys *BucketQuotaSys) GetBucketUsageInfo(bucket string) (BucketUsageInfo, e
timedout := OperationTimedOut{} timedout := OperationTimedOut{}
if err != nil && !errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &timedout) { if err != nil && !errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &timedout) {
if len(dui.BucketsUsage) > 0 { if len(dui.BucketsUsage) > 0 {
logger.LogOnceIf(GlobalContext, fmt.Errorf("unable to retrieve usage information for bucket: %s, relying on older value cached in-memory: err(%v)", bucket, err), "bucket-usage-cache-"+bucket) internalLogOnceIf(GlobalContext, fmt.Errorf("unable to retrieve usage information for bucket: %s, relying on older value cached in-memory: err(%v)", bucket, err), "bucket-usage-cache-"+bucket)
} else { } else {
logger.LogOnceIf(GlobalContext, errors.New("unable to retrieve usage information for bucket: %s, no reliable usage value available - quota will not be enforced"), "bucket-usage-empty-"+bucket) internalLogOnceIf(GlobalContext, errors.New("unable to retrieve usage information for bucket: %s, no reliable usage value available - quota will not be enforced"), "bucket-usage-empty-"+bucket)
} }
} }
@ -87,7 +87,7 @@ func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota,
} }
if !quotaCfg.IsValid() { if !quotaCfg.IsValid() {
if quotaCfg.Type == "fifo" { if quotaCfg.Type == "fifo" {
logger.LogIf(GlobalContext, errors.New("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore. Please clear your quota configs using 'mc admin bucket quota alias/bucket --clear' and use 'mc ilm add' for expiration of objects")) internalLogIf(GlobalContext, errors.New("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore. Please clear your quota configs using 'mc admin bucket quota alias/bucket --clear' and use 'mc ilm add' for expiration of objects"), logger.WarningKind)
return quotaCfg, fmt.Errorf("invalid quota type 'fifo'") return quotaCfg, fmt.Errorf("invalid quota type 'fifo'")
} }
return quotaCfg, fmt.Errorf("Invalid quota config %#v", quotaCfg) return quotaCfg, fmt.Errorf("Invalid quota config %#v", quotaCfg)

View File

@ -423,7 +423,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
rcfg, err := getReplicationConfig(ctx, bucket) rcfg, err := getReplicationConfig(ctx, bucket)
if err != nil || rcfg == nil { if err != nil || rcfg == nil {
logger.LogOnceIf(ctx, fmt.Errorf("unable to obtain replication config for bucket: %s: err: %s", bucket, err), bucket) replLogOnceIf(ctx, fmt.Errorf("unable to obtain replication config for bucket: %s: err: %s", bucket, err), bucket)
sendEvent(eventArgs{ sendEvent(eventArgs{
BucketName: bucket, BucketName: bucket,
Object: ObjectInfo{ Object: ObjectInfo{
@ -440,7 +440,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
} }
dsc, err := parseReplicateDecision(ctx, bucket, dobj.ReplicationState.ReplicateDecisionStr) dsc, err := parseReplicateDecision(ctx, bucket, dobj.ReplicationState.ReplicateDecisionStr)
if err != nil { if err != nil {
logger.LogOnceIf(ctx, fmt.Errorf("unable to parse replication decision parameters for bucket: %s, err: %s, decision: %s", replLogOnceIf(ctx, fmt.Errorf("unable to parse replication decision parameters for bucket: %s, err: %s, decision: %s",
bucket, err, dobj.ReplicationState.ReplicateDecisionStr), dobj.ReplicationState.ReplicateDecisionStr) bucket, err, dobj.ReplicationState.ReplicateDecisionStr), dobj.ReplicationState.ReplicateDecisionStr)
sendEvent(eventArgs{ sendEvent(eventArgs{
BucketName: bucket, BucketName: bucket,
@ -494,7 +494,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
tgtClnt := globalBucketTargetSys.GetRemoteTargetClient(bucket, tgtEntry.Arn) tgtClnt := globalBucketTargetSys.GetRemoteTargetClient(bucket, tgtEntry.Arn)
if tgtClnt == nil { if tgtClnt == nil {
// Skip stale targets if any and log them to be missing at least once. // Skip stale targets if any and log them to be missing at least once.
logger.LogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtEntry.Arn), tgtEntry.Arn) replLogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtEntry.Arn), tgtEntry.Arn)
sendEvent(eventArgs{ sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked, EventName: event.ObjectReplicationNotTracked,
BucketName: bucket, BucketName: bucket,
@ -606,7 +606,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
return return
} }
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) { if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", dobj.Bucket, tgt.ARN), "replication-target-offline-delete-"+tgt.ARN) replLogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", dobj.Bucket, tgt.ARN), "replication-target-offline-delete-"+tgt.ARN)
sendEvent(eventArgs{ sendEvent(eventArgs{
BucketName: dobj.Bucket, BucketName: dobj.Bucket,
Object: ObjectInfo{ Object: ObjectInfo{
@ -681,7 +681,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
} else { } else {
rinfo.VersionPurgeStatus = Failed rinfo.VersionPurgeStatus = Failed
} }
logger.LogIf(ctx, fmt.Errorf("unable to replicate delete marker to %s: %s/%s(%s): %w", tgt.EndpointURL(), tgt.Bucket, dobj.ObjectName, versionID, rmErr)) replLogIf(ctx, fmt.Errorf("unable to replicate delete marker to %s: %s/%s(%s): %w", tgt.EndpointURL(), tgt.Bucket, dobj.ObjectName, versionID, rmErr))
if rmErr != nil && minio.IsNetworkOrHostDown(rmErr, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) { if rmErr != nil && minio.IsNetworkOrHostDown(rmErr, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
globalBucketTargetSys.markOffline(tgt.EndpointURL()) globalBucketTargetSys.markOffline(tgt.EndpointURL())
} }
@ -994,7 +994,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
cfg, err := getReplicationConfig(ctx, bucket) cfg, err := getReplicationConfig(ctx, bucket)
if err != nil { if err != nil {
logger.LogOnceIf(ctx, err, "get-replication-config-"+bucket) replLogOnceIf(ctx, err, "get-replication-config-"+bucket)
sendEvent(eventArgs{ sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked, EventName: event.ObjectReplicationNotTracked,
BucketName: bucket, BucketName: bucket,
@ -1033,7 +1033,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
for _, tgtArn := range tgtArns { for _, tgtArn := range tgtArns {
tgt := globalBucketTargetSys.GetRemoteTargetClient(bucket, tgtArn) tgt := globalBucketTargetSys.GetRemoteTargetClient(bucket, tgtArn)
if tgt == nil { if tgt == nil {
logger.LogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtArn), tgtArn) replLogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtArn), tgtArn)
sendEvent(eventArgs{ sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked, EventName: event.ObjectReplicationNotTracked,
BucketName: bucket, BucketName: bucket,
@ -1155,7 +1155,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
} }
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) { if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s retry:%d", bucket, tgt.ARN, ri.RetryCount), "replication-target-offline"+tgt.ARN) replLogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s retry:%d", bucket, tgt.ARN, ri.RetryCount), "replication-target-offline"+tgt.ARN)
sendEvent(eventArgs{ sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked, EventName: event.ObjectReplicationNotTracked,
BucketName: bucket, BucketName: bucket,
@ -1185,7 +1185,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
UserAgent: "Internal: [Replication]", UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName, Host: globalLocalNodeName,
}) })
logger.LogOnceIf(ctx, fmt.Errorf("unable to read source object %s/%s(%s): %w", bucket, object, objInfo.VersionID, err), object+":"+objInfo.VersionID) replLogOnceIf(ctx, fmt.Errorf("unable to read source object %s/%s(%s): %w", bucket, object, objInfo.VersionID, err), object+":"+objInfo.VersionID)
} }
return return
} }
@ -1198,7 +1198,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
size, err := objInfo.GetActualSize() size, err := objInfo.GetActualSize()
if err != nil { if err != nil {
logger.LogIf(ctx, err) replLogIf(ctx, err)
sendEvent(eventArgs{ sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked, EventName: event.ObjectReplicationNotTracked,
BucketName: bucket, BucketName: bucket,
@ -1210,7 +1210,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
} }
if tgt.Bucket == "" { if tgt.Bucket == "" {
logger.LogIf(ctx, fmt.Errorf("unable to replicate object %s(%s), bucket is empty for target %s", objInfo.Name, objInfo.VersionID, tgt.EndpointURL())) replLogIf(ctx, fmt.Errorf("unable to replicate object %s(%s), bucket is empty for target %s", objInfo.Name, objInfo.VersionID, tgt.EndpointURL()))
sendEvent(eventArgs{ sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked, EventName: event.ObjectReplicationNotTracked,
BucketName: bucket, BucketName: bucket,
@ -1236,7 +1236,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
putOpts, err := putReplicationOpts(ctx, tgt.StorageClass, objInfo) putOpts, err := putReplicationOpts(ctx, tgt.StorageClass, objInfo)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("failure setting options for replication bucket:%s err:%w", bucket, err)) replLogIf(ctx, fmt.Errorf("failure setting options for replication bucket:%s err:%w", bucket, err))
sendEvent(eventArgs{ sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked, EventName: event.ObjectReplicationNotTracked,
BucketName: bucket, BucketName: bucket,
@ -1271,14 +1271,14 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
r, objInfo, putOpts); rinfo.Err != nil { r, objInfo, putOpts); rinfo.Err != nil {
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" { if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
rinfo.ReplicationStatus = replication.Failed rinfo.ReplicationStatus = replication.Failed
logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s (target: %s)", bucket, objInfo.Name, objInfo.VersionID, rinfo.Err, tgt.EndpointURL())) replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s (target: %s)", bucket, objInfo.Name, objInfo.VersionID, rinfo.Err, tgt.EndpointURL()))
} }
} }
} else { } else {
if _, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts); rinfo.Err != nil { if _, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts); rinfo.Err != nil {
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" { if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
rinfo.ReplicationStatus = replication.Failed rinfo.ReplicationStatus = replication.Failed
logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s (target: %s)", bucket, objInfo.Name, objInfo.VersionID, rinfo.Err, tgt.EndpointURL())) replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s (target: %s)", bucket, objInfo.Name, objInfo.VersionID, rinfo.Err, tgt.EndpointURL()))
} }
} }
} }
@ -1313,7 +1313,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
} }
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) { if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s retry:%d", bucket, tgt.ARN, ri.RetryCount), "replication-target-offline-heal"+tgt.ARN) replLogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s retry:%d", bucket, tgt.ARN, ri.RetryCount), "replication-target-offline-heal"+tgt.ARN)
sendEvent(eventArgs{ sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked, EventName: event.ObjectReplicationNotTracked,
BucketName: bucket, BucketName: bucket,
@ -1344,7 +1344,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
UserAgent: "Internal: [Replication]", UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName, Host: globalLocalNodeName,
}) })
logger.LogIf(ctx, fmt.Errorf("unable to replicate to target %s for %s/%s(%s): %w", tgt.EndpointURL(), bucket, object, objInfo.VersionID, err)) replLogIf(ctx, fmt.Errorf("unable to replicate to target %s for %s/%s(%s): %w", tgt.EndpointURL(), bucket, object, objInfo.VersionID, err))
} }
return return
} }
@ -1364,7 +1364,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
size, err := objInfo.GetActualSize() size, err := objInfo.GetActualSize()
if err != nil { if err != nil {
logger.LogIf(ctx, err) replLogIf(ctx, err)
sendEvent(eventArgs{ sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked, EventName: event.ObjectReplicationNotTracked,
BucketName: bucket, BucketName: bucket,
@ -1381,7 +1381,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
} }
if tgt.Bucket == "" { if tgt.Bucket == "" {
logger.LogIf(ctx, fmt.Errorf("unable to replicate object %s(%s) to %s, target bucket is missing", objInfo.Name, objInfo.VersionID, tgt.EndpointURL())) replLogIf(ctx, fmt.Errorf("unable to replicate object %s(%s) to %s, target bucket is missing", objInfo.Name, objInfo.VersionID, tgt.EndpointURL()))
sendEvent(eventArgs{ sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked, EventName: event.ObjectReplicationNotTracked,
BucketName: bucket, BucketName: bucket,
@ -1411,7 +1411,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
if rAction == replicateNone { if rAction == replicateNone {
if ri.OpType == replication.ExistingObjectReplicationType && if ri.OpType == replication.ExistingObjectReplicationType &&
objInfo.ModTime.Unix() > oi.LastModified.Unix() && objInfo.VersionID == nullVersionID { objInfo.ModTime.Unix() > oi.LastModified.Unix() && objInfo.VersionID == nullVersionID {
logger.LogIf(ctx, fmt.Errorf("unable to replicate %s/%s (null). Newer version exists on target %s", bucket, object, tgt.EndpointURL())) replLogIf(ctx, fmt.Errorf("unable to replicate %s/%s (null). Newer version exists on target %s", bucket, object, tgt.EndpointURL()))
sendEvent(eventArgs{ sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked, EventName: event.ObjectReplicationNotTracked,
BucketName: bucket, BucketName: bucket,
@ -1451,7 +1451,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
rAction = replicateAll rAction = replicateAll
default: default:
rinfo.Err = cerr rinfo.Err = cerr
logger.LogIf(ctx, fmt.Errorf("unable to replicate %s/%s (%s). Target (%s) returned %s error on HEAD", replLogIf(ctx, fmt.Errorf("unable to replicate %s/%s (%s). Target (%s) returned %s error on HEAD",
bucket, object, objInfo.VersionID, tgt.EndpointURL(), cerr)) bucket, object, objInfo.VersionID, tgt.EndpointURL(), cerr))
sendEvent(eventArgs{ sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked, EventName: event.ObjectReplicationNotTracked,
@ -1501,13 +1501,13 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
} }
if _, rinfo.Err = c.CopyObject(ctx, tgt.Bucket, object, tgt.Bucket, object, getCopyObjMetadata(objInfo, tgt.StorageClass), srcOpts, dstOpts); rinfo.Err != nil { if _, rinfo.Err = c.CopyObject(ctx, tgt.Bucket, object, tgt.Bucket, object, getCopyObjMetadata(objInfo, tgt.StorageClass), srcOpts, dstOpts); rinfo.Err != nil {
rinfo.ReplicationStatus = replication.Failed rinfo.ReplicationStatus = replication.Failed
logger.LogIf(ctx, fmt.Errorf("unable to replicate metadata for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err)) replLogIf(ctx, fmt.Errorf("unable to replicate metadata for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
} }
} else { } else {
var putOpts minio.PutObjectOptions var putOpts minio.PutObjectOptions
putOpts, err = putReplicationOpts(ctx, tgt.StorageClass, objInfo) putOpts, err = putReplicationOpts(ctx, tgt.StorageClass, objInfo)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("failed to set replicate options for object %s/%s(%s) (target %s) err:%w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), err)) replLogIf(ctx, fmt.Errorf("failed to set replicate options for object %s/%s(%s) (target %s) err:%w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), err))
sendEvent(eventArgs{ sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked, EventName: event.ObjectReplicationNotTracked,
BucketName: bucket, BucketName: bucket,
@ -1541,7 +1541,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
r, objInfo, putOpts); rinfo.Err != nil { r, objInfo, putOpts); rinfo.Err != nil {
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" { if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
rinfo.ReplicationStatus = replication.Failed rinfo.ReplicationStatus = replication.Failed
logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err)) replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
} else { } else {
rinfo.ReplicationStatus = replication.Completed rinfo.ReplicationStatus = replication.Completed
} }
@ -1550,7 +1550,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
if _, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts); rinfo.Err != nil { if _, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts); rinfo.Err != nil {
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" { if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
rinfo.ReplicationStatus = replication.Failed rinfo.ReplicationStatus = replication.Failed
logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err)) replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
} else { } else {
rinfo.ReplicationStatus = replication.Completed rinfo.ReplicationStatus = replication.Completed
} }
@ -1598,7 +1598,7 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
return return
} }
acancel() acancel()
logger.LogIf(actx, replLogIf(actx,
fmt.Errorf("trying %s: Unable to cleanup failed multipart replication %s on remote %s/%s: %w - this may consume space on remote cluster", fmt.Errorf("trying %s: Unable to cleanup failed multipart replication %s on remote %s/%s: %w - this may consume space on remote cluster",
humanize.Ordinal(attempts), uploadID, bucket, object, aerr)) humanize.Ordinal(attempts), uploadID, bucket, object, aerr))
attempts++ attempts++
@ -1866,7 +1866,7 @@ func (p *ReplicationPool) AddMRFWorker() {
globalReplicationStats.decQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType) globalReplicationStats.decQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType)
default: default:
logger.LogOnceIf(p.ctx, fmt.Errorf("unknown mrf replication type: %T", oi), "unknown-mrf-replicate-type") bugLogIf(p.ctx, fmt.Errorf("unknown mrf replication type: %T", oi), "unknown-mrf-replicate-type")
} }
case <-p.mrfWorkerKillCh: case <-p.mrfWorkerKillCh:
return return
@ -1910,7 +1910,7 @@ func (p *ReplicationPool) AddWorker(input <-chan ReplicationWorkerOperation, opT
atomic.AddInt32(opTracker, -1) atomic.AddInt32(opTracker, -1)
} }
default: default:
logger.LogOnceIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type") bugLogIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type")
} }
} }
} }
@ -1949,7 +1949,7 @@ func (p *ReplicationPool) AddLargeWorker(input <-chan ReplicationWorkerOperation
case DeletedObjectReplicationInfo: case DeletedObjectReplicationInfo:
replicateDelete(p.ctx, v, p.objLayer) replicateDelete(p.ctx, v, p.objLayer)
default: default:
logger.LogOnceIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type") bugLogIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type")
} }
} }
} }
@ -2096,9 +2096,9 @@ func (p *ReplicationPool) queueReplicaTask(ri ReplicateObjectInfo) {
p.mu.RUnlock() p.mu.RUnlock()
switch prio { switch prio {
case "fast": case "fast":
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming traffic"), string(replicationSubsystem)) replLogOnceIf(GlobalContext, fmt.Errorf("Unable to keep up with incoming traffic"), string(replicationSubsystem), logger.WarningKind)
case "slow": case "slow":
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming traffic - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem)) replLogOnceIf(GlobalContext, fmt.Errorf("Unable to keep up with incoming traffic - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem), logger.WarningKind)
default: default:
maxWorkers = min(maxWorkers, WorkerMaxLimit) maxWorkers = min(maxWorkers, WorkerMaxLimit)
if p.ActiveWorkers() < maxWorkers { if p.ActiveWorkers() < maxWorkers {
@ -2153,9 +2153,9 @@ func (p *ReplicationPool) queueReplicaDeleteTask(doi DeletedObjectReplicationInf
p.mu.RUnlock() p.mu.RUnlock()
switch prio { switch prio {
case "fast": case "fast":
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming deletes"), string(replicationSubsystem)) replLogOnceIf(GlobalContext, fmt.Errorf("Unable to keep up with incoming deletes"), string(replicationSubsystem), logger.WarningKind)
case "slow": case "slow":
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming deletes - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem)) replLogOnceIf(GlobalContext, fmt.Errorf("Unable to keep up with incoming deletes - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem), logger.WarningKind)
default: default:
maxWorkers = min(maxWorkers, WorkerMaxLimit) maxWorkers = min(maxWorkers, WorkerMaxLimit)
if p.ActiveWorkers() < maxWorkers { if p.ActiveWorkers() < maxWorkers {
@ -2288,7 +2288,7 @@ func proxyHeadToRepTarget(ctx context.Context, bucket, object string, rs *HTTPRa
if rs != nil { if rs != nil {
h, err := rs.ToHeader() h, err := rs.ToHeader()
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("invalid range header for %s/%s(%s) - %w", bucket, object, opts.VersionID, err)) replLogIf(ctx, fmt.Errorf("invalid range header for %s/%s(%s) - %w", bucket, object, opts.VersionID, err))
continue continue
} }
gopts.Set(xhttp.Range, h) gopts.Set(xhttp.Range, h)
@ -2656,7 +2656,7 @@ func (s *replicationResyncer) PersistToDisk(ctx context.Context, objectAPI Objec
} }
if updt { if updt {
if err := saveResyncStatus(ctx, bucket, brs, objectAPI); err != nil { if err := saveResyncStatus(ctx, bucket, brs, objectAPI); err != nil {
logger.LogIf(ctx, fmt.Errorf("could not save resync metadata to drive for %s - %w", bucket, err)) replLogIf(ctx, fmt.Errorf("could not save resync metadata to drive for %s - %w", bucket, err))
} else { } else {
lastResyncStatusSave[bucket] = brs.LastUpdate lastResyncStatusSave[bucket] = brs.LastUpdate
} }
@ -2744,12 +2744,12 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
objInfoCh := make(chan ObjectInfo) objInfoCh := make(chan ObjectInfo)
cfg, err := getReplicationConfig(ctx, opts.bucket) cfg, err := getReplicationConfig(ctx, opts.bucket)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed with %w", opts.bucket, opts.arn, err)) replLogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed with %w", opts.bucket, opts.arn, err))
return return
} }
tgts, err := globalBucketTargetSys.ListBucketTargets(ctx, opts.bucket) tgts, err := globalBucketTargetSys.ListBucketTargets(ctx, opts.bucket)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed %w", opts.bucket, opts.arn, err)) replLogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed %w", opts.bucket, opts.arn, err))
return return
} }
rcfg := replicationConfig{ rcfg := replicationConfig{
@ -2762,12 +2762,12 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
TargetArn: opts.arn, TargetArn: opts.arn,
}) })
if len(tgtArns) != 1 { if len(tgtArns) != 1 {
logger.LogIf(ctx, fmt.Errorf("replication resync failed for %s - arn specified %s is missing in the replication config", opts.bucket, opts.arn)) replLogIf(ctx, fmt.Errorf("replication resync failed for %s - arn specified %s is missing in the replication config", opts.bucket, opts.arn))
return return
} }
tgt := globalBucketTargetSys.GetRemoteTargetClient(opts.bucket, opts.arn) tgt := globalBucketTargetSys.GetRemoteTargetClient(opts.bucket, opts.arn)
if tgt == nil { if tgt == nil {
logger.LogIf(ctx, fmt.Errorf("replication resync failed for %s - target could not be created for arn %s", opts.bucket, opts.arn)) replLogIf(ctx, fmt.Errorf("replication resync failed for %s - target could not be created for arn %s", opts.bucket, opts.arn))
return return
} }
// mark resync status as resync started // mark resync status as resync started
@ -2778,7 +2778,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
// Walk through all object versions - Walk() is always in ascending order needed to ensure // Walk through all object versions - Walk() is always in ascending order needed to ensure
// delete marker replicated to target after object version is first created. // delete marker replicated to target after object version is first created.
if err := objectAPI.Walk(ctx, opts.bucket, "", objInfoCh, WalkOptions{}); err != nil { if err := objectAPI.Walk(ctx, opts.bucket, "", objInfoCh, WalkOptions{}); err != nil {
logger.LogIf(ctx, err) replLogIf(ctx, err)
return return
} }
@ -3053,7 +3053,7 @@ func (p *ReplicationPool) loadResync(ctx context.Context, buckets []BucketInfo,
meta, err := loadBucketResyncMetadata(ctx, bucket, objAPI) meta, err := loadBucketResyncMetadata(ctx, bucket, objAPI)
if err != nil { if err != nil {
if !errors.Is(err, errVolumeNotFound) { if !errors.Is(err, errVolumeNotFound) {
logger.LogIf(ctx, err) replLogIf(ctx, err)
} }
continue continue
} }
@ -3140,18 +3140,18 @@ func saveResyncStatus(ctx context.Context, bucket string, brs BucketReplicationR
func getReplicationDiff(ctx context.Context, objAPI ObjectLayer, bucket string, opts madmin.ReplDiffOpts) (chan madmin.DiffInfo, error) { func getReplicationDiff(ctx context.Context, objAPI ObjectLayer, bucket string, opts madmin.ReplDiffOpts) (chan madmin.DiffInfo, error) {
cfg, err := getReplicationConfig(ctx, bucket) cfg, err := getReplicationConfig(ctx, bucket)
if err != nil { if err != nil {
logger.LogIf(ctx, err) replLogIf(ctx, err)
return nil, err return nil, err
} }
tgts, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket) tgts, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
if err != nil { if err != nil {
logger.LogIf(ctx, err) replLogIf(ctx, err)
return nil, err return nil, err
} }
objInfoCh := make(chan ObjectInfo, 10) objInfoCh := make(chan ObjectInfo, 10)
if err := objAPI.Walk(ctx, bucket, opts.Prefix, objInfoCh, WalkOptions{}); err != nil { if err := objAPI.Walk(ctx, bucket, opts.Prefix, objInfoCh, WalkOptions{}); err != nil {
logger.LogIf(ctx, err) replLogIf(ctx, err)
return nil, err return nil, err
} }
rcfg := replicationConfig{ rcfg := replicationConfig{
@ -3535,7 +3535,7 @@ func (p *ReplicationPool) processMRF() {
continue continue
} }
if err := p.queueMRFHeal(); err != nil && !osIsNotExist(err) { if err := p.queueMRFHeal(); err != nil && !osIsNotExist(err) {
logger.LogIf(p.ctx, err) replLogIf(p.ctx, err)
} }
pTimer.Reset(mrfQueueInterval) pTimer.Reset(mrfQueueInterval)
case <-p.ctx.Done(): case <-p.ctx.Done():

View File

@ -20,7 +20,6 @@ package cmd
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"net/url" "net/url"
"sync" "sync"
"time" "time"
@ -32,7 +31,6 @@ import (
"github.com/minio/minio/internal/bucket/replication" "github.com/minio/minio/internal/bucket/replication"
"github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/kms" "github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
) )
const ( const (
@ -131,7 +129,7 @@ func (sys *BucketTargetSys) initHC(ep *url.URL) {
func newHCClient() *madmin.AnonymousClient { func newHCClient() *madmin.AnonymousClient {
clnt, e := madmin.NewAnonymousClientNoEndpoint() clnt, e := madmin.NewAnonymousClientNoEndpoint()
if e != nil { if e != nil {
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to initialize health check client"), string(replicationSubsystem)) bugLogIf(GlobalContext, errors.New("Unable to initialize health check client"))
return nil return nil
} }
clnt.SetCustomTransport(globalRemoteTargetTransport) clnt.SetCustomTransport(globalRemoteTargetTransport)
@ -624,7 +622,7 @@ func (sys *BucketTargetSys) set(bucket BucketInfo, meta BucketMetadata) {
for _, tgt := range cfg.Targets { for _, tgt := range cfg.Targets {
tgtClient, err := sys.getRemoteTargetClient(&tgt) tgtClient, err := sys.getRemoteTargetClient(&tgt)
if err != nil { if err != nil {
logger.LogIf(GlobalContext, err) replLogIf(GlobalContext, err)
continue continue
} }
sys.arnRemotesMap[tgt.Arn] = arnTarget{Client: tgtClient} sys.arnRemotesMap[tgt.Arn] = arnTarget{Client: tgtClient}

View File

@ -108,7 +108,7 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r
// We encode the xml bytes as base64 to ensure there are no encoding // We encode the xml bytes as base64 to ensure there are no encoding
// errors. // errors.
cfgStr := base64.StdEncoding.EncodeToString(configData) cfgStr := base64.StdEncoding.EncodeToString(configData)
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeVersionConfig, Type: madmin.SRBucketMetaTypeVersionConfig,
Bucket: bucket, Bucket: bucket,
Versioning: &cfgStr, Versioning: &cfgStr,

View File

@ -29,7 +29,6 @@ import (
"time" "time"
"github.com/minio/madmin-go/v3" "github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/logger"
) )
var callhomeLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second) var callhomeLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
@ -112,7 +111,7 @@ func performCallhome(ctx context.Context) {
deadline := 10 * time.Second // Default deadline is 10secs for callhome deadline := 10 * time.Second // Default deadline is 10secs for callhome
objectAPI := newObjectLayerFn() objectAPI := newObjectLayerFn()
if objectAPI == nil { if objectAPI == nil {
logger.LogIf(ctx, errors.New("Callhome: object layer not ready")) internalLogIf(ctx, errors.New("Callhome: object layer not ready"))
return return
} }
@ -145,7 +144,7 @@ func performCallhome(ctx context.Context) {
// Received all data. Send to SUBNET and return // Received all data. Send to SUBNET and return
err := sendHealthInfo(ctx, healthInfo) err := sendHealthInfo(ctx, healthInfo)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to perform callhome: %w", err)) internalLogIf(ctx, fmt.Errorf("Unable to perform callhome: %w", err))
} }
return return
} }
@ -180,12 +179,12 @@ func createHealthJSONGzip(ctx context.Context, healthInfo madmin.HealthInfo) []b
enc := json.NewEncoder(gzWriter) enc := json.NewEncoder(gzWriter)
if e := enc.Encode(header); e != nil { if e := enc.Encode(header); e != nil {
logger.LogIf(ctx, fmt.Errorf("Could not encode health info header: %w", e)) internalLogIf(ctx, fmt.Errorf("Could not encode health info header: %w", e))
return nil return nil
} }
if e := enc.Encode(healthInfo); e != nil { if e := enc.Encode(healthInfo); e != nil {
logger.LogIf(ctx, fmt.Errorf("Could not encode health info: %w", e)) internalLogIf(ctx, fmt.Errorf("Could not encode health info: %w", e))
return nil return nil
} }

View File

@ -1044,7 +1044,7 @@ func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secu
} }
if err = manager.AddCertificate(certFile, keyFile); err != nil { if err = manager.AddCertificate(certFile, keyFile); err != nil {
err = fmt.Errorf("Unable to load TLS certificate '%s,%s': %w", certFile, keyFile, err) err = fmt.Errorf("Unable to load TLS certificate '%s,%s': %w", certFile, keyFile, err)
logger.LogIf(GlobalContext, err, logger.ErrorKind) bootLogIf(GlobalContext, err, logger.ErrorKind)
} }
} }
secureConn = true secureConn = true

View File

@ -479,7 +479,7 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
dnsURL, dnsUser, dnsPass, err := env.LookupEnv(config.EnvDNSWebhook) dnsURL, dnsUser, dnsPass, err := env.LookupEnv(config.EnvDNSWebhook)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err)) configLogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err))
} }
if err == nil && dnsURL != "" { if err == nil && dnsURL != "" {
bootstrapTraceMsg("initialize remote bucket DNS store") bootstrapTraceMsg("initialize remote bucket DNS store")
@ -487,27 +487,27 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
dns.Authentication(dnsUser, dnsPass), dns.Authentication(dnsUser, dnsPass),
dns.RootCAs(globalRootCAs)) dns.RootCAs(globalRootCAs))
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err)) configLogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err))
} }
} }
etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs) etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err)) configLogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err))
} }
if etcdCfg.Enabled { if etcdCfg.Enabled {
bootstrapTraceMsg("initialize etcd store") bootstrapTraceMsg("initialize etcd store")
globalEtcdClient, err = etcd.New(etcdCfg) globalEtcdClient, err = etcd.New(etcdCfg)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err)) configLogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err))
} }
if len(globalDomainNames) != 0 && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil { if len(globalDomainNames) != 0 && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil {
if globalDNSConfig != nil { if globalDNSConfig != nil {
// if global DNS is already configured, indicate with a warning, in case // if global DNS is already configured, indicate with a warning, in case
// users are confused. // users are confused.
logger.LogIf(ctx, fmt.Errorf("DNS store is already configured with %s, etcd is not used for DNS store", globalDNSConfig)) configLogIf(ctx, fmt.Errorf("DNS store is already configured with %s, etcd is not used for DNS store", globalDNSConfig))
} else { } else {
globalDNSConfig, err = dns.NewCoreDNS(etcdCfg.Config, globalDNSConfig, err = dns.NewCoreDNS(etcdCfg.Config,
dns.DomainNames(globalDomainNames), dns.DomainNames(globalDomainNames),
@ -516,7 +516,7 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
dns.CoreDNSPath(etcdCfg.CoreDNSPath), dns.CoreDNSPath(etcdCfg.CoreDNSPath),
) )
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize DNS config for %s: %w", configLogIf(ctx, fmt.Errorf("Unable to initialize DNS config for %s: %w",
globalDomainNames, err)) globalDomainNames, err))
} }
} }
@ -532,7 +532,7 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
globalSite, err = config.LookupSite(s[config.SiteSubSys][config.Default], s[config.RegionSubSys][config.Default]) globalSite, err = config.LookupSite(s[config.SiteSubSys][config.Default], s[config.RegionSubSys][config.Default])
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Invalid site configuration: %w", err)) configLogIf(ctx, fmt.Errorf("Invalid site configuration: %w", err))
} }
globalAutoEncryption = crypto.LookupAutoEncryption() // Enable auto-encryption if enabled globalAutoEncryption = crypto.LookupAutoEncryption() // Enable auto-encryption if enabled
@ -545,19 +545,19 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
bootstrapTraceMsg("initialize the event notification targets") bootstrapTraceMsg("initialize the event notification targets")
globalNotifyTargetList, err = notify.FetchEnabledTargets(GlobalContext, s, transport) globalNotifyTargetList, err = notify.FetchEnabledTargets(GlobalContext, s, transport)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err)) configLogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
} }
bootstrapTraceMsg("initialize the lambda targets") bootstrapTraceMsg("initialize the lambda targets")
globalLambdaTargetList, err = lambda.FetchEnabledTargets(GlobalContext, s, transport) globalLambdaTargetList, err = lambda.FetchEnabledTargets(GlobalContext, s, transport)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize lambda target(s): %w", err)) configLogIf(ctx, fmt.Errorf("Unable to initialize lambda target(s): %w", err))
} }
bootstrapTraceMsg("applying the dynamic configuration") bootstrapTraceMsg("applying the dynamic configuration")
// Apply dynamic config values // Apply dynamic config values
if err := applyDynamicConfig(ctx, objAPI, s); err != nil { if err := applyDynamicConfig(ctx, objAPI, s); err != nil {
logger.LogIf(ctx, err) configLogIf(ctx, err)
} }
} }
@ -571,7 +571,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
case config.APISubSys: case config.APISubSys:
apiConfig, err := api.LookupConfig(s[config.APISubSys][config.Default]) apiConfig, err := api.LookupConfig(s[config.APISubSys][config.Default])
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err)) configLogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
} }
globalAPIConfig.init(apiConfig, setDriveCounts) globalAPIConfig.init(apiConfig, setDriveCounts)
@ -607,33 +607,33 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
scannerCycle.Store(scannerCfg.Cycle) scannerCycle.Store(scannerCfg.Cycle)
scannerExcessObjectVersions.Store(scannerCfg.ExcessVersions) scannerExcessObjectVersions.Store(scannerCfg.ExcessVersions)
scannerExcessFolders.Store(scannerCfg.ExcessFolders) scannerExcessFolders.Store(scannerCfg.ExcessFolders)
logger.LogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait)) configLogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait))
case config.LoggerWebhookSubSys: case config.LoggerWebhookSubSys:
loggerCfg, err := logger.LookupConfigForSubSys(ctx, s, config.LoggerWebhookSubSys) loggerCfg, err := logger.LookupConfigForSubSys(ctx, s, config.LoggerWebhookSubSys)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to load logger webhook config: %w", err)) configLogIf(ctx, fmt.Errorf("Unable to load logger webhook config: %w", err))
} }
userAgent := getUserAgent(getMinioMode()) userAgent := getUserAgent(getMinioMode())
for n, l := range loggerCfg.HTTP { for n, l := range loggerCfg.HTTP {
if l.Enabled { if l.Enabled {
l.LogOnceIf = logger.LogOnceConsoleIf l.LogOnceIf = configLogOnceConsoleIf
l.UserAgent = userAgent l.UserAgent = userAgent
l.Transport = NewHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey) l.Transport = NewHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey)
} }
loggerCfg.HTTP[n] = l loggerCfg.HTTP[n] = l
} }
if errs := logger.UpdateHTTPWebhooks(ctx, loggerCfg.HTTP); len(errs) > 0 { if errs := logger.UpdateHTTPWebhooks(ctx, loggerCfg.HTTP); len(errs) > 0 {
logger.LogIf(ctx, fmt.Errorf("Unable to update logger webhook config: %v", errs)) configLogIf(ctx, fmt.Errorf("Unable to update logger webhook config: %v", errs))
} }
case config.AuditWebhookSubSys: case config.AuditWebhookSubSys:
loggerCfg, err := logger.LookupConfigForSubSys(ctx, s, config.AuditWebhookSubSys) loggerCfg, err := logger.LookupConfigForSubSys(ctx, s, config.AuditWebhookSubSys)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to load audit webhook config: %w", err)) configLogIf(ctx, fmt.Errorf("Unable to load audit webhook config: %w", err))
} }
userAgent := getUserAgent(getMinioMode()) userAgent := getUserAgent(getMinioMode())
for n, l := range loggerCfg.AuditWebhook { for n, l := range loggerCfg.AuditWebhook {
if l.Enabled { if l.Enabled {
l.LogOnceIf = logger.LogOnceConsoleIf l.LogOnceIf = configLogOnceConsoleIf
l.UserAgent = userAgent l.UserAgent = userAgent
l.Transport = NewHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey) l.Transport = NewHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey)
} }
@ -641,30 +641,30 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
} }
if errs := logger.UpdateAuditWebhooks(ctx, loggerCfg.AuditWebhook); len(errs) > 0 { if errs := logger.UpdateAuditWebhooks(ctx, loggerCfg.AuditWebhook); len(errs) > 0 {
logger.LogIf(ctx, fmt.Errorf("Unable to update audit webhook targets: %v", errs)) configLogIf(ctx, fmt.Errorf("Unable to update audit webhook targets: %v", errs))
} }
case config.AuditKafkaSubSys: case config.AuditKafkaSubSys:
loggerCfg, err := logger.LookupConfigForSubSys(ctx, s, config.AuditKafkaSubSys) loggerCfg, err := logger.LookupConfigForSubSys(ctx, s, config.AuditKafkaSubSys)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to load audit kafka config: %w", err)) configLogIf(ctx, fmt.Errorf("Unable to load audit kafka config: %w", err))
} }
for n, l := range loggerCfg.AuditKafka { for n, l := range loggerCfg.AuditKafka {
if l.Enabled { if l.Enabled {
if l.TLS.Enable { if l.TLS.Enable {
l.TLS.RootCAs = globalRootCAs l.TLS.RootCAs = globalRootCAs
} }
l.LogOnce = logger.LogOnceIf l.LogOnce = configLogOnceIf
loggerCfg.AuditKafka[n] = l loggerCfg.AuditKafka[n] = l
} }
} }
if errs := logger.UpdateAuditKafkaTargets(ctx, loggerCfg); len(errs) > 0 { if errs := logger.UpdateAuditKafkaTargets(ctx, loggerCfg); len(errs) > 0 {
logger.LogIf(ctx, fmt.Errorf("Unable to update audit kafka targets: %v", errs)) configLogIf(ctx, fmt.Errorf("Unable to update audit kafka targets: %v", errs))
} }
case config.StorageClassSubSys: case config.StorageClassSubSys:
for i, setDriveCount := range setDriveCounts { for i, setDriveCount := range setDriveCounts {
sc, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount) sc, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err)) configLogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err))
break break
} }
// if we validated all setDriveCounts and it was successful // if we validated all setDriveCounts and it was successful
@ -676,7 +676,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
case config.SubnetSubSys: case config.SubnetSubSys:
subnetConfig, err := subnet.LookupConfig(s[config.SubnetSubSys][config.Default], globalProxyTransport) subnetConfig, err := subnet.LookupConfig(s[config.SubnetSubSys][config.Default], globalProxyTransport)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to parse subnet configuration: %w", err)) configLogIf(ctx, fmt.Errorf("Unable to parse subnet configuration: %w", err))
} else { } else {
globalSubnetConfig.Update(subnetConfig, globalIsCICD) globalSubnetConfig.Update(subnetConfig, globalIsCICD)
globalSubnetConfig.ApplyEnv() // update environment settings for Console UI globalSubnetConfig.ApplyEnv() // update environment settings for Console UI
@ -684,7 +684,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
case config.CallhomeSubSys: case config.CallhomeSubSys:
callhomeCfg, err := callhome.LookupConfig(s[config.CallhomeSubSys][config.Default]) callhomeCfg, err := callhome.LookupConfig(s[config.CallhomeSubSys][config.Default])
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to load callhome config: %w", err)) configLogIf(ctx, fmt.Errorf("Unable to load callhome config: %w", err))
} else { } else {
enable := callhomeCfg.Enable && !globalCallhomeConfig.Enabled() enable := callhomeCfg.Enable && !globalCallhomeConfig.Enabled()
globalCallhomeConfig.Update(callhomeCfg) globalCallhomeConfig.Update(callhomeCfg)
@ -694,17 +694,17 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
} }
case config.DriveSubSys: case config.DriveSubSys:
if driveConfig, err := drive.LookupConfig(s[config.DriveSubSys][config.Default]); err != nil { if driveConfig, err := drive.LookupConfig(s[config.DriveSubSys][config.Default]); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to load drive config: %w", err)) configLogIf(ctx, fmt.Errorf("Unable to load drive config: %w", err))
} else { } else {
err := globalDriveConfig.Update(driveConfig) err := globalDriveConfig.Update(driveConfig)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to update drive config: %v", err)) configLogIf(ctx, fmt.Errorf("Unable to update drive config: %v", err))
} }
} }
case config.CacheSubSys: case config.CacheSubSys:
cacheCfg, err := cache.LookupConfig(s[config.CacheSubSys][config.Default], globalRemoteTargetTransport) cacheCfg, err := cache.LookupConfig(s[config.CacheSubSys][config.Default], globalRemoteTargetTransport)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to load cache config: %w", err)) configLogIf(ctx, fmt.Errorf("Unable to load cache config: %w", err))
} else { } else {
globalCacheConfig.Update(cacheCfg) globalCacheConfig.Update(cacheCfg)
} }
@ -749,7 +749,7 @@ func autoGenerateRootCredentials() {
if manager, ok := GlobalKMS.(kms.KeyManager); ok { if manager, ok := GlobalKMS.(kms.KeyManager); ok {
stat, err := GlobalKMS.Stat(GlobalContext) stat, err := GlobalKMS.Stat(GlobalContext)
if err != nil { if err != nil {
logger.LogIf(GlobalContext, err, "Unable to generate root credentials using KMS") kmsLogIf(GlobalContext, err, "Unable to generate root credentials using KMS")
return return
} }

View File

@ -41,7 +41,6 @@ import (
"github.com/minio/minio/internal/config/heal" "github.com/minio/minio/internal/config/heal"
"github.com/minio/minio/internal/event" "github.com/minio/minio/internal/event"
xioutil "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/console" "github.com/minio/pkg/v2/console"
uatomic "go.uber.org/atomic" uatomic "go.uber.org/atomic"
) )
@ -122,13 +121,13 @@ func readBackgroundHealInfo(ctx context.Context, objAPI ObjectLayer) backgroundH
buf, err := readConfig(ctx, objAPI, backgroundHealInfoPath) buf, err := readConfig(ctx, objAPI, backgroundHealInfoPath)
if err != nil { if err != nil {
if !errors.Is(err, errConfigNotFound) { if !errors.Is(err, errConfigNotFound) {
logger.LogOnceIf(ctx, err, backgroundHealInfoPath) internalLogOnceIf(ctx, err, backgroundHealInfoPath)
} }
return backgroundHealInfo{} return backgroundHealInfo{}
} }
var info backgroundHealInfo var info backgroundHealInfo
if err = json.Unmarshal(buf, &info); err != nil { if err = json.Unmarshal(buf, &info); err != nil {
logger.LogOnceIf(ctx, err, backgroundHealInfoPath) bugLogIf(ctx, err, backgroundHealInfoPath)
} }
return info return info
} }
@ -140,13 +139,13 @@ func saveBackgroundHealInfo(ctx context.Context, objAPI ObjectLayer, info backgr
b, err := json.Marshal(info) b, err := json.Marshal(info)
if err != nil { if err != nil {
logger.LogIf(ctx, err) bugLogIf(ctx, err)
return return
} }
// Get last healing information // Get last healing information
err = saveConfig(ctx, objAPI, backgroundHealInfoPath, b) err = saveConfig(ctx, objAPI, backgroundHealInfoPath, b)
if err != nil { if err != nil {
logger.LogIf(ctx, err) internalLogIf(ctx, err)
} }
} }
@ -167,7 +166,7 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
cycleInfo.next = binary.LittleEndian.Uint64(buf[:8]) cycleInfo.next = binary.LittleEndian.Uint64(buf[:8])
buf = buf[8:] buf = buf[8:]
_, err := cycleInfo.UnmarshalMsg(buf) _, err := cycleInfo.UnmarshalMsg(buf)
logger.LogIf(ctx, err) bugLogIf(ctx, err)
} }
scannerTimer := time.NewTimer(scannerCycle.Load()) scannerTimer := time.NewTimer(scannerCycle.Load())
@ -204,7 +203,7 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
results := make(chan DataUsageInfo, 1) results := make(chan DataUsageInfo, 1)
go storeDataUsageInBackend(ctx, objAPI, results) go storeDataUsageInBackend(ctx, objAPI, results)
err := objAPI.NSScanner(ctx, results, uint32(cycleInfo.current), scanMode) err := objAPI.NSScanner(ctx, results, uint32(cycleInfo.current), scanMode)
logger.LogOnceIf(ctx, err, "ns-scanner") scannerLogIf(ctx, err)
res := map[string]string{"cycle": strconv.FormatUint(cycleInfo.current, 10)} res := map[string]string{"cycle": strconv.FormatUint(cycleInfo.current, 10)}
if err != nil { if err != nil {
res["error"] = err.Error() res["error"] = err.Error()
@ -224,7 +223,7 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
binary.LittleEndian.PutUint64(tmp, cycleInfo.next) binary.LittleEndian.PutUint64(tmp, cycleInfo.next)
tmp, _ = cycleInfo.MarshalMsg(tmp) tmp, _ = cycleInfo.MarshalMsg(tmp)
err = saveConfig(ctx, objAPI, dataUsageBloomNamePath, tmp) err = saveConfig(ctx, objAPI, dataUsageBloomNamePath, tmp)
logger.LogOnceIf(ctx, err, dataUsageBloomNamePath) scannerLogIf(ctx, err, dataUsageBloomNamePath)
} }
} }
} }
@ -752,7 +751,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
versionID: "", versionID: "",
}, madmin.HealItemObject) }, madmin.HealItemObject)
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) { if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
logger.LogOnceIf(ctx, err, entry.name) scannerLogIf(ctx, err)
} }
foundObjs = foundObjs || err == nil foundObjs = foundObjs || err == nil
return return
@ -769,7 +768,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
}, madmin.HealItemObject) }, madmin.HealItemObject)
stopFn(int(ver.Size)) stopFn(int(ver.Size))
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) { if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
logger.LogOnceIf(ctx, err, fiv.Name) scannerLogIf(ctx, err, fiv.Name)
} }
if err == nil { if err == nil {
successVersions++ successVersions++
@ -945,7 +944,7 @@ func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, oi Object
func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi ObjectInfo) (action lifecycle.Action, size int64) { func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi ObjectInfo) (action lifecycle.Action, size int64) {
size, err := oi.GetActualSize() size, err := oi.GetActualSize()
if i.debug { if i.debug {
logger.LogIf(ctx, err) scannerLogIf(ctx, err)
} }
if i.lifeCycle == nil { if i.lifeCycle == nil {
return action, size return action, size
@ -1123,7 +1122,7 @@ func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, oi Object
err := o.CheckAbandonedParts(ctx, i.bucket, i.objectPath(), madmin.HealOpts{Remove: healDeleteDangling}) err := o.CheckAbandonedParts(ctx, i.bucket, i.objectPath(), madmin.HealOpts{Remove: healDeleteDangling})
done() done()
if err != nil { if err != nil {
logger.LogOnceIf(ctx, fmt.Errorf("unable to check object %s/%s for abandoned data: %w", i.bucket, i.objectPath(), err), i.objectPath()) healingLogIf(ctx, fmt.Errorf("unable to check object %s/%s for abandoned data: %w", i.bucket, i.objectPath(), err), i.objectPath())
} }
} }
} }
@ -1199,7 +1198,7 @@ func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer,
if isErrObjectNotFound(err) || isErrVersionNotFound(err) { if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
return false return false
} }
logger.LogOnceIf(ctx, err, obj.Name) ilmLogIf(ctx, err)
return false return false
} }
// Notification already sent in *expireTransitionedObject*, just return 'true' here. // Notification already sent in *expireTransitionedObject*, just return 'true' here.
@ -1248,7 +1247,7 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay
return false return false
} }
// Assume it is still there. // Assume it is still there.
logger.LogOnceIf(ctx, err, "non-transition-expiry") ilmLogOnceIf(ctx, err, "non-transition-expiry")
return false return false
} }
if dobj.Name == "" { if dobj.Name == "" {

View File

@ -37,7 +37,6 @@ import (
"github.com/minio/madmin-go/v3" "github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/bucket/lifecycle" "github.com/minio/minio/internal/bucket/lifecycle"
"github.com/minio/minio/internal/hash" "github.com/minio/minio/internal/hash"
"github.com/minio/minio/internal/logger"
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
"github.com/valyala/bytebufferpool" "github.com/valyala/bytebufferpool"
) )
@ -635,7 +634,7 @@ func (d *dataUsageCache) copyWithChildren(src *dataUsageCache, hash dataUsageHas
d.Cache[hash.Key()] = e d.Cache[hash.Key()] = e
for ch := range e.Children { for ch := range e.Children {
if ch == hash.Key() { if ch == hash.Key() {
logger.LogIf(GlobalContext, errors.New("dataUsageCache.copyWithChildren: Circular reference")) scannerLogIf(GlobalContext, errors.New("dataUsageCache.copyWithChildren: Circular reference"))
return return
} }
d.copyWithChildren(src, dataUsageHash(ch), &hash) d.copyWithChildren(src, dataUsageHash(ch), &hash)
@ -1041,7 +1040,7 @@ func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string)
} }
if retries == 5 { if retries == 5 {
logger.LogOnceIf(ctx, fmt.Errorf("maximum retry reached to load the data usage cache `%s`", name), "retry-loading-data-usage-cache") scannerLogOnceIf(ctx, fmt.Errorf("maximum retry reached to load the data usage cache `%s`", name), "retry-loading-data-usage-cache")
} }
return nil return nil

View File

@ -25,7 +25,6 @@ import (
jsoniter "github.com/json-iterator/go" jsoniter "github.com/json-iterator/go"
"github.com/minio/minio/internal/cachevalue" "github.com/minio/minio/internal/cachevalue"
"github.com/minio/minio/internal/logger"
) )
const ( const (
@ -49,7 +48,7 @@ func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan
json := jsoniter.ConfigCompatibleWithStandardLibrary json := jsoniter.ConfigCompatibleWithStandardLibrary
dataUsageJSON, err := json.Marshal(dataUsageInfo) dataUsageJSON, err := json.Marshal(dataUsageInfo)
if err != nil { if err != nil {
logger.LogIf(ctx, err) scannerLogIf(ctx, err)
continue continue
} }
if attempts > 10 { if attempts > 10 {
@ -57,7 +56,7 @@ func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan
attempts = 1 attempts = 1
} }
if err = saveConfig(ctx, objAPI, dataUsageObjNamePath, dataUsageJSON); err != nil { if err = saveConfig(ctx, objAPI, dataUsageObjNamePath, dataUsageJSON); err != nil {
logger.LogOnceIf(ctx, err, dataUsageObjNamePath) scannerLogOnceIf(ctx, err, dataUsageObjNamePath)
} }
attempts++ attempts++
} }

View File

@ -1089,7 +1089,7 @@ func (o *ObjectInfo) decryptPartsChecksums() {
if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted { if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted {
decrypted, err := o.metadataDecrypter()("object-checksum", data) decrypted, err := o.metadataDecrypter()("object-checksum", data)
if err != nil { if err != nil {
logger.LogIf(GlobalContext, err) encLogIf(GlobalContext, err)
return return
} }
data = decrypted data = decrypted
@ -1151,7 +1151,7 @@ func (o *ObjectInfo) decryptChecksums(part int) map[string]string {
if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted { if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted {
decrypted, err := o.metadataDecrypter()("object-checksum", data) decrypted, err := o.metadataDecrypter()("object-checksum", data)
if err != nil { if err != nil {
logger.LogIf(GlobalContext, err) encLogIf(GlobalContext, err)
return nil return nil
} }
data = decrypted data = decrypted

View File

@ -514,7 +514,7 @@ func (l EndpointServerPools) hostsSorted() []*xnet.Host {
} }
host, err := xnet.ParseHost(hostStr) host, err := xnet.ParseHost(hostStr)
if err != nil { if err != nil {
logger.LogIf(GlobalContext, err) internalLogIf(GlobalContext, err)
continue continue
} }
hosts[i] = host hosts[i] = host
@ -645,7 +645,7 @@ func (endpoints Endpoints) UpdateIsLocal() error {
)) ))
ctx := logger.SetReqInfo(GlobalContext, ctx := logger.SetReqInfo(GlobalContext,
reqInfo) reqInfo)
logger.LogOnceIf(ctx, fmt.Errorf("%s resolves to localhost in a containerized deployment, waiting for it to resolve to a valid IP", bootLogOnceIf(ctx, fmt.Errorf("%s resolves to localhost in a containerized deployment, waiting for it to resolve to a valid IP",
endpoints[i].Hostname()), endpoints[i].Hostname(), logger.ErrorKind) endpoints[i].Hostname()), endpoints[i].Hostname(), logger.ErrorKind)
} }
@ -675,7 +675,7 @@ func (endpoints Endpoints) UpdateIsLocal() error {
)) ))
ctx := logger.SetReqInfo(GlobalContext, ctx := logger.SetReqInfo(GlobalContext,
reqInfo) reqInfo)
logger.LogOnceIf(ctx, err, endpoints[i].Hostname(), logger.ErrorKind) bootLogOnceIf(ctx, err, endpoints[i].Hostname(), logger.ErrorKind)
} }
} else { } else {
resolvedList[i] = true resolvedList[i] = true
@ -837,7 +837,7 @@ func (p PoolEndpointList) UpdateIsLocal() error {
)) ))
ctx := logger.SetReqInfo(GlobalContext, ctx := logger.SetReqInfo(GlobalContext,
reqInfo) reqInfo)
logger.LogOnceIf(ctx, fmt.Errorf("%s resolves to localhost in a containerized deployment, waiting for it to resolve to a valid IP", bootLogOnceIf(ctx, fmt.Errorf("%s resolves to localhost in a containerized deployment, waiting for it to resolve to a valid IP",
endpoint.Hostname()), endpoint.Hostname(), logger.ErrorKind) endpoint.Hostname()), endpoint.Hostname(), logger.ErrorKind)
} }
continue continue
@ -866,7 +866,7 @@ func (p PoolEndpointList) UpdateIsLocal() error {
)) ))
ctx := logger.SetReqInfo(GlobalContext, ctx := logger.SetReqInfo(GlobalContext,
reqInfo) reqInfo)
logger.LogOnceIf(ctx, fmt.Errorf("Unable to resolve DNS for %s: %w", endpoint, err), endpoint.Hostname(), logger.ErrorKind) bootLogOnceIf(ctx, fmt.Errorf("Unable to resolve DNS for %s: %w", endpoint, err), endpoint.Hostname(), logger.ErrorKind)
} }
} else { } else {
resolvedList[endpoint] = true resolvedList[endpoint] = true

View File

@ -25,7 +25,6 @@ import (
"sync" "sync"
"time" "time"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/sync/errgroup" "github.com/minio/pkg/v2/sync/errgroup"
) )
@ -163,7 +162,7 @@ func readMultipleFiles(ctx context.Context, disks []StorageAPI, req ReadMultiple
continue continue
} }
if !IsErr(err, ignoredErrs...) { if !IsErr(err, ignoredErrs...) {
logger.LogOnceIf(ctx, fmt.Errorf("Drive %s, path (%s/%s) returned an error (%w)", storageLogOnceIf(ctx, fmt.Errorf("Drive %s, path (%s/%s) returned an error (%w)",
disks[index], req.Bucket, req.Prefix, err), disks[index], req.Bucket, req.Prefix, err),
disks[index].String()) disks[index].String())
} }

View File

@ -450,7 +450,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
if !latestMeta.Deleted && len(latestMeta.Erasure.Distribution) != len(availableDisks) { if !latestMeta.Deleted && len(latestMeta.Erasure.Distribution) != len(availableDisks) {
err := fmt.Errorf("unexpected file distribution (%v) from available disks (%v), looks like backend disks have been manually modified refusing to heal %s/%s(%s)", err := fmt.Errorf("unexpected file distribution (%v) from available disks (%v), looks like backend disks have been manually modified refusing to heal %s/%s(%s)",
latestMeta.Erasure.Distribution, availableDisks, bucket, object, versionID) latestMeta.Erasure.Distribution, availableDisks, bucket, object, versionID)
logger.LogOnceIf(ctx, err, "heal-object-available-disks") healingLogOnceIf(ctx, err, "heal-object-available-disks")
return er.defaultHealResult(latestMeta, storageDisks, storageEndpoints, errs, return er.defaultHealResult(latestMeta, storageDisks, storageEndpoints, errs,
bucket, object, versionID), err bucket, object, versionID), err
} }
@ -460,7 +460,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
if !latestMeta.Deleted && len(latestMeta.Erasure.Distribution) != len(outDatedDisks) { if !latestMeta.Deleted && len(latestMeta.Erasure.Distribution) != len(outDatedDisks) {
err := fmt.Errorf("unexpected file distribution (%v) from outdated disks (%v), looks like backend disks have been manually modified refusing to heal %s/%s(%s)", err := fmt.Errorf("unexpected file distribution (%v) from outdated disks (%v), looks like backend disks have been manually modified refusing to heal %s/%s(%s)",
latestMeta.Erasure.Distribution, outDatedDisks, bucket, object, versionID) latestMeta.Erasure.Distribution, outDatedDisks, bucket, object, versionID)
logger.LogOnceIf(ctx, err, "heal-object-outdated-disks") healingLogOnceIf(ctx, err, "heal-object-outdated-disks")
return er.defaultHealResult(latestMeta, storageDisks, storageEndpoints, errs, return er.defaultHealResult(latestMeta, storageDisks, storageEndpoints, errs,
bucket, object, versionID), err bucket, object, versionID), err
} }
@ -470,7 +470,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
if !latestMeta.Deleted && len(latestMeta.Erasure.Distribution) != len(partsMetadata) { if !latestMeta.Deleted && len(latestMeta.Erasure.Distribution) != len(partsMetadata) {
err := fmt.Errorf("unexpected file distribution (%v) from metadata entries (%v), looks like backend disks have been manually modified refusing to heal %s/%s(%s)", err := fmt.Errorf("unexpected file distribution (%v) from metadata entries (%v), looks like backend disks have been manually modified refusing to heal %s/%s(%s)",
latestMeta.Erasure.Distribution, len(partsMetadata), bucket, object, versionID) latestMeta.Erasure.Distribution, len(partsMetadata), bucket, object, versionID)
logger.LogOnceIf(ctx, err, "heal-object-metadata-entries") healingLogOnceIf(ctx, err, "heal-object-metadata-entries")
return er.defaultHealResult(latestMeta, storageDisks, storageEndpoints, errs, return er.defaultHealResult(latestMeta, storageDisks, storageEndpoints, errs,
bucket, object, versionID), err bucket, object, versionID), err
} }

View File

@ -22,7 +22,6 @@ import (
"errors" "errors"
"hash/crc32" "hash/crc32"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/sync/errgroup" "github.com/minio/pkg/v2/sync/errgroup"
) )
@ -284,7 +283,7 @@ func shuffleDisks(disks []StorageAPI, distribution []int) (shuffledDisks []Stora
// the corresponding error in errs slice is not nil // the corresponding error in errs slice is not nil
func evalDisks(disks []StorageAPI, errs []error) []StorageAPI { func evalDisks(disks []StorageAPI, errs []error) []StorageAPI {
if len(errs) != len(disks) { if len(errs) != len(disks) {
logger.LogIf(GlobalContext, errors.New("unexpected drives/errors slice length")) bugLogIf(GlobalContext, errors.New("unexpected drives/errors slice length"))
return nil return nil
} }
newDisks := make([]StorageAPI, len(disks)) newDisks := make([]StorageAPI, len(disks))

View File

@ -30,7 +30,6 @@ import (
"github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/hash/sha256" "github.com/minio/minio/internal/hash/sha256"
xhttp "github.com/minio/minio/internal/http" xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/sync/errgroup" "github.com/minio/pkg/v2/sync/errgroup"
) )
@ -268,7 +267,7 @@ func (fi FileInfo) ObjectToPartOffset(ctx context.Context, offset int64) (partIn
// Continue to towards the next part. // Continue to towards the next part.
partOffset -= part.Size partOffset -= part.Size
} }
logger.LogIf(ctx, InvalidRange{}) internalLogIf(ctx, InvalidRange{})
// Offset beyond the size of the object return InvalidRange. // Offset beyond the size of the object return InvalidRange.
return 0, 0, InvalidRange{} return 0, 0, InvalidRange{}
} }

View File

@ -590,7 +590,7 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
data := r.Reader data := r.Reader
// Validate input data size and it can never be less than zero. // Validate input data size and it can never be less than zero.
if data.Size() < -1 { if data.Size() < -1 {
logger.LogIf(ctx, errInvalidArgument, logger.ErrorKind) bugLogIf(ctx, errInvalidArgument, logger.ErrorKind)
return pi, toObjectErr(errInvalidArgument) return pi, toObjectErr(errInvalidArgument)
} }
@ -1026,7 +1026,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
if len(partInfoFiles) != len(parts) { if len(partInfoFiles) != len(parts) {
// Should only happen through internal error // Should only happen through internal error
err := fmt.Errorf("unexpected part result count: %d, want %d", len(partInfoFiles), len(parts)) err := fmt.Errorf("unexpected part result count: %d, want %d", len(partInfoFiles), len(parts))
logger.LogIf(ctx, err) bugLogIf(ctx, err)
return oi, toObjectErr(err, bucket, object) return oi, toObjectErr(err, bucket, object)
} }
@ -1096,7 +1096,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
_, err := pfi.UnmarshalMsg(part.Data) _, err := pfi.UnmarshalMsg(part.Data)
if err != nil { if err != nil {
// Maybe crash or similar. // Maybe crash or similar.
logger.LogIf(ctx, err) bugLogIf(ctx, err)
return oi, InvalidPart{ return oi, InvalidPart{
PartNumber: partID, PartNumber: partID,
} }
@ -1105,7 +1105,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
partI := pfi.Parts[0] partI := pfi.Parts[0]
partNumber := partI.Number partNumber := partI.Number
if partID != partNumber { if partID != partNumber {
logger.LogIf(ctx, fmt.Errorf("part.%d.meta has incorrect corresponding part number: expected %d, got %d", partID, partID, partI.Number)) internalLogIf(ctx, fmt.Errorf("part.%d.meta has incorrect corresponding part number: expected %d, got %d", partID, partID, partI.Number))
return oi, InvalidPart{ return oi, InvalidPart{
PartNumber: partID, PartNumber: partID,
} }

View File

@ -928,7 +928,7 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
if !fi.Deleted && len(fi.Erasure.Distribution) != len(onlineDisks) { if !fi.Deleted && len(fi.Erasure.Distribution) != len(onlineDisks) {
err := fmt.Errorf("unexpected file distribution (%v) from online disks (%v), looks like backend disks have been manually modified refusing to heal %s/%s(%s)", err := fmt.Errorf("unexpected file distribution (%v) from online disks (%v), looks like backend disks have been manually modified refusing to heal %s/%s(%s)",
fi.Erasure.Distribution, onlineDisks, bucket, object, opts.VersionID) fi.Erasure.Distribution, onlineDisks, bucket, object, opts.VersionID)
logger.LogOnceIf(ctx, err, "get-object-file-info-manually-modified") storageLogOnceIf(ctx, err, "get-object-file-info-manually-modified")
return fi, nil, nil, toObjectErr(err, bucket, object, opts.VersionID) return fi, nil, nil, toObjectErr(err, bucket, object, opts.VersionID)
} }
@ -1107,7 +1107,7 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *
// Validate input data size and it can never be less than zero. // Validate input data size and it can never be less than zero.
if data.Size() < -1 { if data.Size() < -1 {
logger.LogIf(ctx, errInvalidArgument, logger.ErrorKind) bugLogIf(ctx, errInvalidArgument, logger.ErrorKind)
return ObjectInfo{}, toObjectErr(errInvalidArgument) return ObjectInfo{}, toObjectErr(errInvalidArgument)
} }
@ -1297,7 +1297,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
// Validate input data size and it can never be less than -1. // Validate input data size and it can never be less than -1.
if data.Size() < -1 { if data.Size() < -1 {
logger.LogIf(ctx, errInvalidArgument, logger.ErrorKind) bugLogIf(ctx, errInvalidArgument, logger.ErrorKind)
return ObjectInfo{}, toObjectErr(errInvalidArgument) return ObjectInfo{}, toObjectErr(errInvalidArgument)
} }
@ -1459,7 +1459,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
toEncode = ra toEncode = ra
defer ra.Close() defer ra.Close()
} }
logger.LogIf(ctx, err) bugLogIf(ctx, err)
} }
n, erasureErr := erasure.Encode(ctx, toEncode, writers, buffer, writeQuorum) n, erasureErr := erasure.Encode(ctx, toEncode, writers, buffer, writeQuorum)
closeBitrotWriters(writers) closeBitrotWriters(writers)
@ -2389,7 +2389,7 @@ func (er erasureObjects) updateRestoreMetadata(ctx context.Context, bucket, obje
}, ObjectOptions{ }, ObjectOptions{
VersionID: oi.VersionID, VersionID: oi.VersionID,
}); err != nil { }); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to update transition restore metadata for %s/%s(%s): %s", bucket, object, oi.VersionID, err)) storageLogIf(ctx, fmt.Errorf("Unable to update transition restore metadata for %s/%s(%s): %s", bucket, object, oi.VersionID, err))
return err return err
} }
return nil return nil

View File

@ -467,7 +467,7 @@ func (p poolMeta) save(ctx context.Context, pools []*erasureSets) error {
for i, eset := range pools { for i, eset := range pools {
if err = saveConfig(ctx, eset, poolMetaName, buf); err != nil { if err = saveConfig(ctx, eset, poolMetaName, buf); err != nil {
if !errors.Is(err, context.Canceled) { if !errors.Is(err, context.Canceled) {
logger.LogIf(ctx, fmt.Errorf("saving pool.bin for pool index %d failed with: %v", i, err)) storageLogIf(ctx, fmt.Errorf("saving pool.bin for pool index %d failed with: %v", i, err))
} }
return err return err
} }
@ -542,11 +542,11 @@ func (z *erasureServerPools) Init(ctx context.Context) error {
return return
} }
if configRetriableErrors(err) { if configRetriableErrors(err) {
logger.LogIf(ctx, fmt.Errorf("Unable to resume decommission of pools %v: %w: retrying..", pools, err)) decomLogIf(ctx, fmt.Errorf("Unable to resume decommission of pools %v: %w: retrying..", pools, err))
time.Sleep(time.Second + time.Duration(r.Float64()*float64(5*time.Second))) time.Sleep(time.Second + time.Duration(r.Float64()*float64(5*time.Second)))
continue continue
} }
logger.LogIf(ctx, fmt.Errorf("Unable to resume decommission of pool %v: %w", pools, err)) decomLogIf(ctx, fmt.Errorf("Unable to resume decommission of pool %v: %w", pools, err))
return return
} }
} }
@ -741,7 +741,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
const envDecomWorkers = "_MINIO_DECOMMISSION_WORKERS" const envDecomWorkers = "_MINIO_DECOMMISSION_WORKERS"
workerSize, err := env.GetInt(envDecomWorkers, len(pool.sets)) workerSize, err := env.GetInt(envDecomWorkers, len(pool.sets))
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("invalid workers value err: %v, defaulting to %d", err, len(pool.sets))) decomLogIf(ctx, fmt.Errorf("invalid workers value err: %v, defaulting to %d", err, len(pool.sets)))
workerSize = len(pool.sets) workerSize = len(pool.sets)
} }
@ -852,7 +852,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
} }
stopFn(err) stopFn(err)
if err != nil { if err != nil {
logger.LogIf(ctx, err) decomLogIf(ctx, err)
failure = true failure = true
} }
z.poolMetaMutex.Lock() z.poolMetaMutex.Lock()
@ -877,7 +877,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
}); err != nil { }); err != nil {
stopFn(err) stopFn(err)
failure = true failure = true
logger.LogIf(ctx, err) decomLogIf(ctx, err)
continue continue
} }
stopFn(nil) stopFn(nil)
@ -906,20 +906,20 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
if bi.Name == minioMetaBucket && strings.Contains(version.Name, dataUsageCacheName) { if bi.Name == minioMetaBucket && strings.Contains(version.Name, dataUsageCacheName) {
ignore = true ignore = true
stopFn(err) stopFn(err)
logger.LogIf(ctx, err) decomLogIf(ctx, err)
break break
} }
} }
if err != nil { if err != nil {
failure = true failure = true
logger.LogIf(ctx, err) decomLogIf(ctx, err)
stopFn(err) stopFn(err)
continue continue
} }
if err = z.decommissionObject(ctx, bi.Name, gr); err != nil { if err = z.decommissionObject(ctx, bi.Name, gr); err != nil {
stopFn(err) stopFn(err)
failure = true failure = true
logger.LogIf(ctx, err) decomLogIf(ctx, err)
continue continue
} }
stopFn(nil) stopFn(nil)
@ -953,13 +953,13 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
stopFn(err) stopFn(err)
auditLogDecom(ctx, "DecomDeleteObject", bi.Name, entry.name, "", err) auditLogDecom(ctx, "DecomDeleteObject", bi.Name, entry.name, "", err)
if err != nil { if err != nil {
logger.LogIf(ctx, err) decomLogIf(ctx, err)
} }
} }
z.poolMetaMutex.Lock() z.poolMetaMutex.Lock()
z.poolMeta.TrackCurrentBucketObject(idx, bi.Name, entry.name) z.poolMeta.TrackCurrentBucketObject(idx, bi.Name, entry.name)
ok, err := z.poolMeta.updateAfter(ctx, idx, z.serverPools, 30*time.Second) ok, err := z.poolMeta.updateAfter(ctx, idx, z.serverPools, 30*time.Second)
logger.LogIf(ctx, err) decomLogIf(ctx, err)
if ok { if ok {
globalNotificationSys.ReloadPoolMeta(ctx) globalNotificationSys.ReloadPoolMeta(ctx)
} }
@ -987,7 +987,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
} }
setN := humanize.Ordinal(setIdx + 1) setN := humanize.Ordinal(setIdx + 1)
retryDur := time.Duration(rand.Float64() * float64(5*time.Second)) retryDur := time.Duration(rand.Float64() * float64(5*time.Second))
logger.LogOnceIf(ctx, fmt.Errorf("listing objects from %s set failed with %v, retrying in %v", setN, err, retryDur), "decom-listing-failed"+setN) decomLogOnceIf(ctx, fmt.Errorf("listing objects from %s set failed with %v, retrying in %v", setN, err, retryDur), "decom-listing-failed"+setN)
time.Sleep(retryDur) time.Sleep(retryDur)
} }
}(setIdx) }(setIdx)
@ -1055,7 +1055,7 @@ func (z *erasureServerPools) decommissionInBackground(ctx context.Context, idx i
z.poolMetaMutex.Lock() z.poolMetaMutex.Lock()
if z.poolMeta.BucketDone(idx, bucket) { if z.poolMeta.BucketDone(idx, bucket) {
// remove from pendingBuckets and persist. // remove from pendingBuckets and persist.
logger.LogIf(ctx, z.poolMeta.save(ctx, z.serverPools)) decomLogIf(ctx, z.poolMeta.save(ctx, z.serverPools))
} }
z.poolMetaMutex.Unlock() z.poolMetaMutex.Unlock()
continue continue
@ -1072,7 +1072,7 @@ func (z *erasureServerPools) decommissionInBackground(ctx context.Context, idx i
z.poolMetaMutex.Lock() z.poolMetaMutex.Lock()
if z.poolMeta.BucketDone(idx, bucket) { if z.poolMeta.BucketDone(idx, bucket) {
logger.LogIf(ctx, z.poolMeta.save(ctx, z.serverPools)) decomLogIf(ctx, z.poolMeta.save(ctx, z.serverPools))
} }
z.poolMetaMutex.Unlock() z.poolMetaMutex.Unlock()
} }
@ -1170,8 +1170,8 @@ func (z *erasureServerPools) doDecommissionInRoutine(ctx context.Context, idx in
dctx = logger.SetReqInfo(dctx, &logger.ReqInfo{}) dctx = logger.SetReqInfo(dctx, &logger.ReqInfo{})
if err := z.decommissionInBackground(dctx, idx); err != nil { if err := z.decommissionInBackground(dctx, idx); err != nil {
logger.LogIf(GlobalContext, err) decomLogIf(GlobalContext, err)
logger.LogIf(GlobalContext, z.DecommissionFailed(dctx, idx)) decomLogIf(GlobalContext, z.DecommissionFailed(dctx, idx))
return return
} }
@ -1181,20 +1181,20 @@ func (z *erasureServerPools) doDecommissionInRoutine(ctx context.Context, idx in
z.poolMetaMutex.Unlock() z.poolMetaMutex.Unlock()
if !failed { if !failed {
logger.Event(dctx, "Decommissioning complete for pool '%s', verifying for any pending objects", poolCmdLine) decomLogEvent(dctx, "Decommissioning complete for pool '%s', verifying for any pending objects", poolCmdLine)
err := z.checkAfterDecom(dctx, idx) err := z.checkAfterDecom(dctx, idx)
if err != nil { if err != nil {
logger.LogIf(ctx, err) decomLogIf(ctx, err)
failed = true failed = true
} }
} }
if failed { if failed {
// Decommission failed indicate as such. // Decommission failed indicate as such.
logger.LogIf(GlobalContext, z.DecommissionFailed(dctx, idx)) decomLogIf(GlobalContext, z.DecommissionFailed(dctx, idx))
} else { } else {
// Complete the decommission.. // Complete the decommission..
logger.LogIf(GlobalContext, z.CompleteDecommission(dctx, idx)) decomLogIf(GlobalContext, z.CompleteDecommission(dctx, idx))
} }
} }

View File

@ -146,7 +146,7 @@ func (z *erasureServerPools) updateRebalanceStats(ctx context.Context) error {
lock := z.serverPools[0].NewNSLock(minioMetaBucket, rebalMetaName) lock := z.serverPools[0].NewNSLock(minioMetaBucket, rebalMetaName)
lkCtx, err := lock.GetLock(ctx, globalOperationTimeout) lkCtx, err := lock.GetLock(ctx, globalOperationTimeout)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("failed to acquire write lock on %s/%s: %w", minioMetaBucket, rebalMetaName, err)) rebalanceLogIf(ctx, fmt.Errorf("failed to acquire write lock on %s/%s: %w", minioMetaBucket, rebalMetaName, err))
return err return err
} }
defer lock.Unlock(lkCtx) defer lock.Unlock(lkCtx)
@ -423,7 +423,7 @@ func (z *erasureServerPools) rebalanceBuckets(ctx context.Context, poolIdx int)
stopFn := globalRebalanceMetrics.log(rebalanceMetricSaveMetadata, poolIdx, traceMsg) stopFn := globalRebalanceMetrics.log(rebalanceMetricSaveMetadata, poolIdx, traceMsg)
err := z.saveRebalanceStats(ctx, poolIdx, rebalSaveStats) err := z.saveRebalanceStats(ctx, poolIdx, rebalSaveStats)
stopFn(err) stopFn(err)
logger.LogIf(ctx, err) rebalanceLogIf(ctx, err)
timer.Reset(randSleepFor()) timer.Reset(randSleepFor())
if rebalDone { if rebalDone {
@ -432,7 +432,7 @@ func (z *erasureServerPools) rebalanceBuckets(ctx context.Context, poolIdx int)
} }
}() }()
logger.Event(ctx, "Pool %d rebalancing is started", poolIdx+1) rebalanceLogEvent(ctx, "Pool %d rebalancing is started", poolIdx+1)
for { for {
select { select {
@ -451,14 +451,14 @@ func (z *erasureServerPools) rebalanceBuckets(ctx context.Context, poolIdx int)
err = z.rebalanceBucket(ctx, bucket, poolIdx) err = z.rebalanceBucket(ctx, bucket, poolIdx)
if err != nil { if err != nil {
stopFn(err) stopFn(err)
logger.LogIf(ctx, err) rebalanceLogIf(ctx, err)
return return
} }
stopFn(nil) stopFn(nil)
z.bucketRebalanceDone(bucket, poolIdx) z.bucketRebalanceDone(bucket, poolIdx)
} }
logger.Event(ctx, "Pool %d rebalancing is done", poolIdx+1) rebalanceLogEvent(ctx, "Pool %d rebalancing is done", poolIdx+1)
return err return err
} }
@ -535,7 +535,7 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string,
const envRebalanceWorkers = "_MINIO_REBALANCE_WORKERS" const envRebalanceWorkers = "_MINIO_REBALANCE_WORKERS"
workerSize, err := env.GetInt(envRebalanceWorkers, len(pool.sets)) workerSize, err := env.GetInt(envRebalanceWorkers, len(pool.sets))
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("invalid workers value err: %v, defaulting to %d", err, len(pool.sets))) rebalanceLogIf(ctx, fmt.Errorf("invalid workers value err: %v, defaulting to %d", err, len(pool.sets)))
workerSize = len(pool.sets) workerSize = len(pool.sets)
} }
@ -630,7 +630,7 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string,
}) })
var failure bool var failure bool
if err != nil && !isErrObjectNotFound(err) && !isErrVersionNotFound(err) { if err != nil && !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
logger.LogIf(ctx, err) rebalanceLogIf(ctx, err)
failure = true failure = true
} }
@ -665,14 +665,14 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string,
} }
if err != nil { if err != nil {
failure = true failure = true
logger.LogIf(ctx, err) rebalanceLogIf(ctx, err)
stopFn(err) stopFn(err)
continue continue
} }
if err = z.rebalanceObject(ctx, bucket, gr); err != nil { if err = z.rebalanceObject(ctx, bucket, gr); err != nil {
failure = true failure = true
logger.LogIf(ctx, err) rebalanceLogIf(ctx, err)
stopFn(err) stopFn(err)
continue continue
} }
@ -706,7 +706,7 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string,
stopFn(err) stopFn(err)
auditLogRebalance(ctx, "Rebalance:DeleteObject", bucket, entry.name, "", err) auditLogRebalance(ctx, "Rebalance:DeleteObject", bucket, entry.name, "", err)
if err != nil { if err != nil {
logger.LogIf(ctx, err) rebalanceLogIf(ctx, err)
} }
} }
} }
@ -724,7 +724,7 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string,
return return
} }
setN := humanize.Ordinal(setIdx + 1) setN := humanize.Ordinal(setIdx + 1)
logger.LogOnceIf(ctx, fmt.Errorf("listing objects from %s set failed with %v", setN, err), "rebalance-listing-failed"+setN) rebalanceLogIf(ctx, fmt.Errorf("listing objects from %s set failed with %v", setN, err), "rebalance-listing-failed"+setN)
}(setIdx) }(setIdx)
} }
@ -743,7 +743,7 @@ func (z *erasureServerPools) saveRebalanceStats(ctx context.Context, poolIdx int
lock := z.serverPools[0].NewNSLock(minioMetaBucket, rebalMetaName) lock := z.serverPools[0].NewNSLock(minioMetaBucket, rebalMetaName)
lkCtx, err := lock.GetLock(ctx, globalOperationTimeout) lkCtx, err := lock.GetLock(ctx, globalOperationTimeout)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("failed to acquire write lock on %s/%s: %w", minioMetaBucket, rebalMetaName, err)) rebalanceLogIf(ctx, fmt.Errorf("failed to acquire write lock on %s/%s: %w", minioMetaBucket, rebalMetaName, err))
return err return err
} }
defer lock.Unlock(lkCtx) defer lock.Unlock(lkCtx)

View File

@ -207,7 +207,7 @@ func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServ
logger.Fatal(err, "Unable to initialize backend") logger.Fatal(err, "Unable to initialize backend")
} }
retry := time.Duration(r.Float64() * float64(5*time.Second)) retry := time.Duration(r.Float64() * float64(5*time.Second))
logger.LogIf(ctx, fmt.Errorf("Unable to initialize backend: %w, retrying in %s", err, retry)) storageLogIf(ctx, fmt.Errorf("Unable to initialize backend: %w, retrying in %s", err, retry))
time.Sleep(retry) time.Sleep(retry)
attempt++ attempt++
continue continue
@ -376,7 +376,7 @@ func (z *erasureServerPools) getAvailablePoolIdx(ctx context.Context, bucket, ob
} }
} }
// Should not happen, but print values just in case. // Should not happen, but print values just in case.
logger.LogIf(ctx, fmt.Errorf("reached end of serverPools (total: %v, atTotal: %v, choose: %v)", total, atTotal, choose)) storageLogIf(ctx, fmt.Errorf("reached end of serverPools (total: %v, atTotal: %v, choose: %v)", total, atTotal, choose))
return -1 return -1
} }
@ -610,7 +610,7 @@ func (z *erasureServerPools) Shutdown(ctx context.Context) error {
for _, err := range g.Wait() { for _, err := range g.Wait() {
if err != nil { if err != nil {
logger.LogIf(ctx, err) storageLogIf(ctx, err)
} }
// let's the rest shutdown // let's the rest shutdown
} }
@ -714,7 +714,7 @@ func (z *erasureServerPools) NSScanner(ctx context.Context, updates chan<- DataU
// Start scanner. Blocks until done. // Start scanner. Blocks until done.
err := erObj.nsScanner(ctx, allBuckets, wantCycle, updates, healScanMode) err := erObj.nsScanner(ctx, allBuckets, wantCycle, updates, healScanMode)
if err != nil { if err != nil {
logger.LogIf(ctx, err) scannerLogIf(ctx, err)
mu.Lock() mu.Lock()
if firstErr == nil { if firstErr == nil {
firstErr = err firstErr = err
@ -1329,7 +1329,7 @@ func (z *erasureServerPools) ListObjectVersions(ctx context.Context, bucket, pre
merged, err := z.listPath(ctx, &opts) merged, err := z.listPath(ctx, &opts)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
if !isErrBucketNotFound(err) { if !isErrBucketNotFound(err) {
logger.LogOnceIf(ctx, err, "erasure-list-objects-path-"+bucket) storageLogOnceIf(ctx, err, "erasure-list-objects-path-"+bucket)
} }
return loi, toObjectErr(err, bucket) return loi, toObjectErr(err, bucket)
} }
@ -1523,7 +1523,7 @@ func (z *erasureServerPools) listObjectsGeneric(ctx context.Context, bucket, pre
merged, err := z.listPath(ctx, &opts) merged, err := z.listPath(ctx, &opts)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
if !isErrBucketNotFound(err) { if !isErrBucketNotFound(err) {
logger.LogOnceIf(ctx, err, "erasure-list-objects-path-"+bucket) storageLogOnceIf(ctx, err, "erasure-list-objects-path-"+bucket)
} }
return loi, toObjectErr(err, bucket) return loi, toObjectErr(err, bucket)
} }
@ -1945,7 +1945,7 @@ func (z *erasureServerPools) HealFormat(ctx context.Context, dryRun bool) (madmi
for _, pool := range z.serverPools { for _, pool := range z.serverPools {
result, err := pool.HealFormat(ctx, dryRun) result, err := pool.HealFormat(ctx, dryRun)
if err != nil && !errors.Is(err, errNoHealRequired) { if err != nil && !errors.Is(err, errNoHealRequired) {
logger.LogOnceIf(ctx, err, "erasure-heal-format") healingLogOnceIf(ctx, err, "erasure-heal-format")
continue continue
} }
// Count errNoHealRequired across all serverPools, // Count errNoHealRequired across all serverPools,
@ -2136,7 +2136,7 @@ func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, re
} }
if err := listPathRaw(ctx, lopts); err != nil { if err := listPathRaw(ctx, lopts); err != nil {
logger.LogIf(ctx, fmt.Errorf("listPathRaw returned %w: opts(%#v)", err, lopts)) storageLogIf(ctx, fmt.Errorf("listPathRaw returned %w: opts(%#v)", err, lopts))
cancel() cancel()
return return
} }
@ -2181,7 +2181,7 @@ func (z *erasureServerPools) HealObjects(ctx context.Context, bucket, prefix str
if opts.Remove && !opts.DryRun { if opts.Remove && !opts.DryRun {
err := z.CheckAbandonedParts(ctx, bucket, entry.name, opts) err := z.CheckAbandonedParts(ctx, bucket, entry.name, opts)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("unable to check object %s/%s for abandoned data: %w", bucket, entry.name, err)) healingLogIf(ctx, fmt.Errorf("unable to check object %s/%s for abandoned data: %w", bucket, entry.name, err))
} }
} }
for _, version := range fivs.Versions { for _, version := range fivs.Versions {
@ -2385,7 +2385,7 @@ func (z *erasureServerPools) Health(ctx context.Context, opts HealthOptions) Hea
// Check if disks are healing on in-case of VMware vsphere deployments. // Check if disks are healing on in-case of VMware vsphere deployments.
if opts.Maintenance && opts.DeploymentType == vmware { if opts.Maintenance && opts.DeploymentType == vmware {
if drivesHealing > 0 { if drivesHealing > 0 {
logger.LogIf(logger.SetReqInfo(ctx, reqInfo), fmt.Errorf("Total drives to be healed %d", drivesHealing)) healingLogIf(logger.SetReqInfo(ctx, reqInfo), fmt.Errorf("Total drives to be healed %d", drivesHealing))
} }
} }
@ -2445,15 +2445,15 @@ func (z *erasureServerPools) Health(ctx context.Context, opts HealthOptions) Hea
healthy := erasureSetUpCount[poolIdx][setIdx].online >= poolWriteQuorums[poolIdx] healthy := erasureSetUpCount[poolIdx][setIdx].online >= poolWriteQuorums[poolIdx]
if !healthy { if !healthy {
logger.LogIf(logger.SetReqInfo(ctx, reqInfo), storageLogIf(logger.SetReqInfo(ctx, reqInfo),
fmt.Errorf("Write quorum may be lost on pool: %d, set: %d, expected write quorum: %d", fmt.Errorf("Write quorum may be lost on pool: %d, set: %d, expected write quorum: %d",
poolIdx, setIdx, poolWriteQuorums[poolIdx])) poolIdx, setIdx, poolWriteQuorums[poolIdx]), logger.FatalKind)
} }
result.Healthy = result.Healthy && healthy result.Healthy = result.Healthy && healthy
healthyRead := erasureSetUpCount[poolIdx][setIdx].online >= poolReadQuorums[poolIdx] healthyRead := erasureSetUpCount[poolIdx][setIdx].online >= poolReadQuorums[poolIdx]
if !healthyRead { if !healthyRead {
logger.LogIf(logger.SetReqInfo(ctx, reqInfo), storageLogIf(logger.SetReqInfo(ctx, reqInfo),
fmt.Errorf("Read quorum may be lost on pool: %d, set: %d, expected read quorum: %d", fmt.Errorf("Read quorum may be lost on pool: %d, set: %d, expected read quorum: %d",
poolIdx, setIdx, poolReadQuorums[poolIdx])) poolIdx, setIdx, poolReadQuorums[poolIdx]))
} }

View File

@ -448,7 +448,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
diskID, err := disk.GetDiskID() diskID, err := disk.GetDiskID()
if err != nil { if err != nil {
if !errors.Is(err, errUnformattedDisk) { if !errors.Is(err, errUnformattedDisk) {
logger.LogIf(ctx, err) bootLogIf(ctx, err)
} }
return return
} }
@ -457,11 +457,11 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
} }
m, n, err := findDiskIndexByDiskID(format, diskID) m, n, err := findDiskIndexByDiskID(format, diskID)
if err != nil { if err != nil {
logger.LogIf(ctx, err) bootLogIf(ctx, err)
return return
} }
if m != i || n != j { if m != i || n != j {
logger.LogIf(ctx, fmt.Errorf("Detected unexpected drive ordering refusing to use the drive - poolID: %s, found drive mounted at (set=%s, drive=%s) expected mount at (set=%s, drive=%s): %s(%s)", humanize.Ordinal(poolIdx+1), humanize.Ordinal(m+1), humanize.Ordinal(n+1), humanize.Ordinal(i+1), humanize.Ordinal(j+1), disk, diskID)) bootLogIf(ctx, fmt.Errorf("Detected unexpected drive ordering refusing to use the drive - poolID: %s, found drive mounted at (set=%s, drive=%s) expected mount at (set=%s, drive=%s): %s(%s)", humanize.Ordinal(poolIdx+1), humanize.Ordinal(m+1), humanize.Ordinal(n+1), humanize.Ordinal(i+1), humanize.Ordinal(j+1), disk, diskID))
s.erasureDisks[i][j] = &unrecognizedDisk{storage: disk} s.erasureDisks[i][j] = &unrecognizedDisk{storage: disk}
return return
} }
@ -1083,7 +1083,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H
if !reflect.DeepEqual(s.format, refFormat) { if !reflect.DeepEqual(s.format, refFormat) {
// Format is corrupted and unrecognized by the running instance. // Format is corrupted and unrecognized by the running instance.
logger.LogIf(ctx, fmt.Errorf("Unable to heal the newly replaced drives due to format.json inconsistencies, please engage MinIO support for further assistance: %w", healingLogIf(ctx, fmt.Errorf("Unable to heal the newly replaced drives due to format.json inconsistencies, please engage MinIO support for further assistance: %w",
errCorruptedFormat)) errCorruptedFormat))
return res, errCorruptedFormat return res, errCorruptedFormat
} }
@ -1112,7 +1112,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H
continue continue
} }
if err := saveFormatErasure(storageDisks[index], format, formatOpID); err != nil { if err := saveFormatErasure(storageDisks[index], format, formatOpID); err != nil {
logger.LogIf(ctx, fmt.Errorf("Drive %s failed to write updated 'format.json': %v", storageDisks[index], err)) healingLogIf(ctx, fmt.Errorf("Drive %s failed to write updated 'format.json': %v", storageDisks[index], err))
storageDisks[index].Close() storageDisks[index].Close()
tmpNewFormats[index] = nil // this disk failed to write new format tmpNewFormats[index] = nil // this disk failed to write new format
} }
@ -1127,7 +1127,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H
m, n, err := findDiskIndexByDiskID(refFormat, format.Erasure.This) m, n, err := findDiskIndexByDiskID(refFormat, format.Erasure.This)
if err != nil { if err != nil {
logger.LogIf(ctx, err) healingLogIf(ctx, err)
continue continue
} }

View File

@ -31,7 +31,6 @@ import (
"github.com/minio/madmin-go/v3" "github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/dsync" "github.com/minio/minio/internal/dsync"
xioutil "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/sync/errgroup" "github.com/minio/pkg/v2/sync/errgroup"
) )
@ -384,7 +383,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa
// Collect disks we can use. // Collect disks we can use.
disks, healing := er.getOnlineDisksWithHealing(false) disks, healing := er.getOnlineDisksWithHealing(false)
if len(disks) == 0 { if len(disks) == 0 {
logger.LogIf(ctx, errors.New("data-scanner: all drives are offline or being healed, skipping scanner cycle")) scannerLogIf(ctx, errors.New("data-scanner: all drives are offline or being healed, skipping scanner cycle"))
return nil return nil
} }
@ -449,7 +448,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa
if cache.Info.LastUpdate.Equal(lastSave) { if cache.Info.LastUpdate.Equal(lastSave) {
continue continue
} }
logger.LogOnceIf(ctx, cache.save(ctx, er, dataUsageCacheName), "nsscanner-cache-update") scannerLogOnceIf(ctx, cache.save(ctx, er, dataUsageCacheName), "nsscanner-cache-update")
updates <- cache.clone() updates <- cache.clone()
lastSave = cache.Info.LastUpdate lastSave = cache.Info.LastUpdate
@ -458,7 +457,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa
// Save final state... // Save final state...
cache.Info.NextCycle = wantCycle cache.Info.NextCycle = wantCycle
cache.Info.LastUpdate = time.Now() cache.Info.LastUpdate = time.Now()
logger.LogOnceIf(ctx, cache.save(ctx, er, dataUsageCacheName), "nsscanner-channel-closed") scannerLogOnceIf(ctx, cache.save(ctx, er, dataUsageCacheName), "nsscanner-channel-closed")
updates <- cache.clone() updates <- cache.clone()
return return
} }
@ -494,7 +493,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa
// Load cache for bucket // Load cache for bucket
cacheName := pathJoin(bucket.Name, dataUsageCacheName) cacheName := pathJoin(bucket.Name, dataUsageCacheName)
cache := dataUsageCache{} cache := dataUsageCache{}
logger.LogIf(ctx, cache.load(ctx, er, cacheName)) scannerLogIf(ctx, cache.load(ctx, er, cacheName))
if cache.Info.Name == "" { if cache.Info.Name == "" {
cache.Info.Name = bucket.Name cache.Info.Name = bucket.Name
} }
@ -530,9 +529,9 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa
cache, err = disk.NSScanner(ctx, cache, updates, healScanMode, nil) cache, err = disk.NSScanner(ctx, cache, updates, healScanMode, nil)
if err != nil { if err != nil {
if !cache.Info.LastUpdate.IsZero() && cache.Info.LastUpdate.After(before) { if !cache.Info.LastUpdate.IsZero() && cache.Info.LastUpdate.After(before) {
logger.LogIf(ctx, cache.save(ctx, er, cacheName)) scannerLogIf(ctx, cache.save(ctx, er, cacheName))
} else { } else {
logger.LogIf(ctx, err) scannerLogIf(ctx, err)
} }
// This ensures that we don't close // This ensures that we don't close
// bucketResults channel while the // bucketResults channel while the
@ -562,7 +561,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa
} }
// Save cache // Save cache
logger.LogIf(ctx, cache.save(ctx, er, cacheName)) scannerLogIf(ctx, cache.save(ctx, er, cacheName))
} }
}(i) }(i)
} }

View File

@ -22,7 +22,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"github.com/minio/minio/internal/logger"
etcd "go.etcd.io/etcd/client/v3" etcd "go.etcd.io/etcd/client/v3"
) )
@ -48,7 +47,7 @@ func saveKeyEtcdWithTTL(ctx context.Context, client *etcd.Client, key string, da
return etcdErrToErr(err, client.Endpoints()) return etcdErrToErr(err, client.Endpoints())
} }
_, err = client.Put(timeoutCtx, key, string(data), etcd.WithLease(lease.ID)) _, err = client.Put(timeoutCtx, key, string(data), etcd.WithLease(lease.ID))
logger.LogIf(ctx, err) etcdLogIf(ctx, err)
return etcdErrToErr(err, client.Endpoints()) return etcdErrToErr(err, client.Endpoints())
} }
@ -59,7 +58,7 @@ func saveKeyEtcd(ctx context.Context, client *etcd.Client, key string, data []by
return saveKeyEtcdWithTTL(ctx, client, key, data, opts[0].ttl) return saveKeyEtcdWithTTL(ctx, client, key, data, opts[0].ttl)
} }
_, err := client.Put(timeoutCtx, key, string(data)) _, err := client.Put(timeoutCtx, key, string(data))
logger.LogIf(ctx, err) etcdLogIf(ctx, err)
return etcdErrToErr(err, client.Endpoints()) return etcdErrToErr(err, client.Endpoints())
} }
@ -68,7 +67,7 @@ func deleteKeyEtcd(ctx context.Context, client *etcd.Client, key string) error {
defer cancel() defer cancel()
_, err := client.Delete(timeoutCtx, key) _, err := client.Delete(timeoutCtx, key)
logger.LogIf(ctx, err) etcdLogIf(ctx, err)
return etcdErrToErr(err, client.Endpoints()) return etcdErrToErr(err, client.Endpoints())
} }
@ -77,7 +76,7 @@ func readKeyEtcd(ctx context.Context, client *etcd.Client, key string) ([]byte,
defer cancel() defer cancel()
resp, err := client.Get(timeoutCtx, key) resp, err := client.Get(timeoutCtx, key)
if err != nil { if err != nil {
logger.LogOnceIf(ctx, err, "etcd-retrieve-keys") etcdLogOnceIf(ctx, err, "etcd-retrieve-keys")
return nil, etcdErrToErr(err, client.Endpoints()) return nil, etcdErrToErr(err, client.Endpoints())
} }
if resp.Count == 0 { if resp.Count == 0 {

View File

@ -28,7 +28,6 @@ import (
"github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/event" "github.com/minio/minio/internal/event"
xhttp "github.com/minio/minio/internal/http" xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/pubsub" "github.com/minio/minio/internal/pubsub"
"github.com/minio/pkg/v2/policy" "github.com/minio/pkg/v2/policy"
) )
@ -83,7 +82,7 @@ func (evnot *EventNotifier) set(bucket BucketInfo, meta BucketMetadata) {
config.SetRegion(globalSite.Region) config.SetRegion(globalSite.Region)
if err := config.Validate(globalSite.Region, globalEventNotifier.targetList); err != nil { if err := config.Validate(globalSite.Region, globalEventNotifier.targetList); err != nil {
if _, ok := err.(*event.ErrARNNotFound); !ok { if _, ok := err.(*event.ErrARNNotFound); !ok {
logger.LogIf(GlobalContext, err) internalLogIf(GlobalContext, err)
} }
} }
evnot.AddRulesMap(bucket.Name, config.ToRulesMap()) evnot.AddRulesMap(bucket.Name, config.ToRulesMap())

View File

@ -278,7 +278,7 @@ func formatErasureMigrateV2ToV3(data []byte, export, version string) ([]byte, er
tmpOld := pathJoin(export, minioMetaTmpDeletedBucket, mustGetUUID()) tmpOld := pathJoin(export, minioMetaTmpDeletedBucket, mustGetUUID())
if err := renameAll(pathJoin(export, minioMetaMultipartBucket), if err := renameAll(pathJoin(export, minioMetaMultipartBucket),
tmpOld, export); err != nil && err != errFileNotFound { tmpOld, export); err != nil && err != errFileNotFound {
logger.LogIf(GlobalContext, fmt.Errorf("unable to rename (%s -> %s) %w, drive may be faulty please investigate", bootLogIf(GlobalContext, fmt.Errorf("unable to rename (%s -> %s) %w, drive may be faulty please investigate",
pathJoin(export, minioMetaMultipartBucket), pathJoin(export, minioMetaMultipartBucket),
tmpOld, tmpOld,
osErrToFileErr(err))) osErrToFileErr(err)))
@ -570,7 +570,7 @@ func formatErasureFixLocalDeploymentID(endpoints Endpoints, storageDisks []Stora
format.ID = refFormat.ID format.ID = refFormat.ID
// Heal the drive if we fixed its deployment ID. // Heal the drive if we fixed its deployment ID.
if err := saveFormatErasure(storageDisks[index], format, mustGetUUID()); err != nil { if err := saveFormatErasure(storageDisks[index], format, mustGetUUID()); err != nil {
logger.LogIf(GlobalContext, err) bootLogIf(GlobalContext, err)
return fmt.Errorf("Unable to save format.json, %w", err) return fmt.Errorf("Unable to save format.json, %w", err)
} }
} }

View File

@ -33,7 +33,6 @@ import (
"github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio/internal/auth" "github.com/minio/minio/internal/auth"
xioutil "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
ftp "goftp.io/server/v2" ftp "goftp.io/server/v2"
) )
@ -323,7 +322,7 @@ func (driver *ftpDriver) getMinIOClient(ctx *ftp.Context) (*minio.Client, error)
} }
// Call hook for site replication. // Call hook for site replication.
logger.LogIf(context.Background(), globalSiteReplicationSys.IAMChangeHook(context.Background(), madmin.SRIAMItem{ replLogIf(context.Background(), globalSiteReplicationSys.IAMChangeHook(context.Background(), madmin.SRIAMItem{
Type: madmin.SRIAMItemSTSAcc, Type: madmin.SRIAMItemSTSAcc,
STSCredential: &madmin.SRSTSCredential{ STSCredential: &madmin.SRSTSCredential{
AccessKey: cred.AccessKey, AccessKey: cred.AccessKey,

View File

@ -153,7 +153,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
_, err := objAPI.HealBucket(ctx, bucket, madmin.HealOpts{ScanMode: scanMode}) _, err := objAPI.HealBucket(ctx, bucket, madmin.HealOpts{ScanMode: scanMode})
if err != nil { if err != nil {
// Log bucket healing error if any, we shall retry again. // Log bucket healing error if any, we shall retry again.
logger.LogIf(ctx, err) healingLogIf(ctx, err)
} }
} }
@ -177,7 +177,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
numHealers = uint64(v) numHealers = uint64(v)
} }
logger.Event(ctx, fmt.Sprintf("Healing drive '%s' - use %d parallel workers.", tracker.disk.String(), numHealers)) healingLogEvent(ctx, fmt.Sprintf("Healing drive '%s' - use %d parallel workers.", tracker.disk.String(), numHealers))
jt, _ := workers.New(int(numHealers)) jt, _ := workers.New(int(numHealers))
@ -204,7 +204,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
if _, err := objAPI.HealBucket(ctx, bucket, madmin.HealOpts{ if _, err := objAPI.HealBucket(ctx, bucket, madmin.HealOpts{
ScanMode: scanMode, ScanMode: scanMode,
}); err != nil { }); err != nil {
logger.LogIf(ctx, err) healingLogIf(ctx, err)
continue continue
} }
@ -226,7 +226,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
if len(disks) == 0 { if len(disks) == 0 {
// No object healing necessary // No object healing necessary
tracker.bucketDone(bucket) tracker.bucketDone(bucket)
logger.LogIf(ctx, tracker.update(ctx)) healingLogIf(ctx, tracker.update(ctx))
continue continue
} }
@ -293,7 +293,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
if res.entryDone { if res.entryDone {
tracker.setObject(res.name) tracker.setObject(res.name)
if time.Since(tracker.getLastUpdate()) > time.Minute { if time.Since(tracker.getLastUpdate()) > time.Minute {
logger.LogIf(ctx, tracker.update(ctx)) healingLogIf(ctx, tracker.update(ctx))
} }
continue continue
} }
@ -306,7 +306,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
select { select {
case <-ctx.Done(): case <-ctx.Done():
if !contextCanceled(ctx) { if !contextCanceled(ctx) {
logger.LogIf(ctx, ctx.Err()) healingLogIf(ctx, ctx.Err())
} }
return false return false
case results <- result: case results <- result:
@ -360,7 +360,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
return return
} }
result = healEntryFailure(0) result = healEntryFailure(0)
logger.LogIf(ctx, fmt.Errorf("unable to heal object %s/%s: %w", bucket, entry.name, err)) healingLogIf(ctx, fmt.Errorf("unable to heal object %s/%s: %w", bucket, entry.name, err))
} else { } else {
result = healEntrySuccess(0) result = healEntrySuccess(0)
} }
@ -399,9 +399,9 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
// If not deleted, assume they failed. // If not deleted, assume they failed.
result = healEntryFailure(uint64(version.Size)) result = healEntryFailure(uint64(version.Size))
if version.VersionID != "" { if version.VersionID != "" {
logger.LogIf(ctx, fmt.Errorf("unable to heal object %s/%s-v(%s): %w", bucket, version.Name, version.VersionID, err)) healingLogIf(ctx, fmt.Errorf("unable to heal object %s/%s-v(%s): %w", bucket, version.Name, version.VersionID, err))
} else { } else {
logger.LogIf(ctx, fmt.Errorf("unable to heal object %s/%s: %w", bucket, version.Name, err)) healingLogIf(ctx, fmt.Errorf("unable to heal object %s/%s: %w", bucket, version.Name, err))
} }
} else { } else {
result = healEntrySuccess(uint64(version.Size)) result = healEntrySuccess(uint64(version.Size))
@ -465,7 +465,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
// we let the caller retry this disk again for the // we let the caller retry this disk again for the
// buckets it failed to list. // buckets it failed to list.
retErr = err retErr = err
logger.LogIf(ctx, err) healingLogIf(ctx, err)
continue continue
} }
@ -475,7 +475,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
return ctx.Err() return ctx.Err()
default: default:
tracker.bucketDone(bucket) tracker.bucketDone(bucket)
logger.LogIf(ctx, tracker.update(ctx)) healingLogIf(ctx, tracker.update(ctx))
} }
} }

View File

@ -51,7 +51,7 @@ func parseLocationConstraint(r *http.Request) (location string, s3Error APIError
locationConstraint := createBucketLocationConfiguration{} locationConstraint := createBucketLocationConfiguration{}
err := xmlDecoder(r.Body, &locationConstraint, r.ContentLength) err := xmlDecoder(r.Body, &locationConstraint, r.ContentLength)
if err != nil && r.ContentLength != 0 { if err != nil && r.ContentLength != 0 {
logger.LogOnceIf(GlobalContext, err, "location-constraint-xml-parsing") internalLogOnceIf(GlobalContext, err, "location-constraint-xml-parsing")
// Treat all other failures as XML parsing errors. // Treat all other failures as XML parsing errors.
return "", ErrMalformedXML return "", ErrMalformedXML
} // else for both err as nil or io.EOF } // else for both err as nil or io.EOF
@ -191,7 +191,7 @@ func extractMetadata(ctx context.Context, mimesHeader ...textproto.MIMEHeader) (
// extractMetadata extracts metadata from map values. // extractMetadata extracts metadata from map values.
func extractMetadataFromMime(ctx context.Context, v textproto.MIMEHeader, m map[string]string) error { func extractMetadataFromMime(ctx context.Context, v textproto.MIMEHeader, m map[string]string) error {
if v == nil { if v == nil {
logger.LogIf(ctx, errInvalidArgument) bugLogIf(ctx, errInvalidArgument)
return errInvalidArgument return errInvalidArgument
} }
@ -461,7 +461,7 @@ func proxyRequest(ctx context.Context, w http.ResponseWriter, r *http.Request, e
ErrorHandler: func(w http.ResponseWriter, r *http.Request, err error) { ErrorHandler: func(w http.ResponseWriter, r *http.Request, err error) {
success = false success = false
if err != nil && !errors.Is(err, context.Canceled) { if err != nil && !errors.Is(err, context.Canceled) {
logger.LogIf(GlobalContext, err) replLogIf(GlobalContext, err)
} }
}, },
}) })

View File

@ -30,7 +30,6 @@ import (
"github.com/minio/minio-go/v7/pkg/set" "github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/config" "github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/kms" "github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/puzpuzpuz/xsync/v3" "github.com/puzpuzpuz/xsync/v3"
"go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/api/v3/mvccpb"
etcd "go.etcd.io/etcd/client/v3" etcd "go.etcd.io/etcd/client/v3"
@ -460,7 +459,7 @@ func (ies *IAMEtcdStore) watch(ctx context.Context, keyPath string) <-chan iamWa
goto outerLoop goto outerLoop
} }
if err := watchResp.Err(); err != nil { if err := watchResp.Err(); err != nil {
logger.LogIf(ctx, err) iamLogIf(ctx, err)
// log and retry. // log and retry.
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
// Upon an error on watch channel // Upon an error on watch channel

View File

@ -34,7 +34,6 @@ import (
"github.com/minio/minio/internal/config" "github.com/minio/minio/internal/config"
xioutil "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/kms" "github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/puzpuzpuz/xsync/v3" "github.com/puzpuzpuz/xsync/v3"
) )
@ -448,7 +447,7 @@ func (iamOS *IAMObjectStore) PurgeExpiredSTS(ctx context.Context) error {
iamListing, ok := iamOS.cachedIAMListing.Load().(map[string][]string) iamListing, ok := iamOS.cachedIAMListing.Load().(map[string][]string)
if !ok { if !ok {
// There has been no store yet. This should never happen! // There has been no store yet. This should never happen!
logger.LogIf(GlobalContext, errors.New("WARNING: no cached IAM listing found")) iamLogIf(GlobalContext, errors.New("WARNING: no cached IAM listing found"))
return nil return nil
} }
@ -461,7 +460,7 @@ func (iamOS *IAMObjectStore) PurgeExpiredSTS(ctx context.Context) error {
// loadUser() will delete expired user during the load. // loadUser() will delete expired user during the load.
err := iamOS.loadUser(ctx, userName, stsUser, stsAccountsFromStore) err := iamOS.loadUser(ctx, userName, stsUser, stsAccountsFromStore)
if err != nil && !errors.Is(err, errNoSuchUser) { if err != nil && !errors.Is(err, errNoSuchUser) {
logger.LogIf(GlobalContext, iamLogIf(GlobalContext,
fmt.Errorf("unable to load user during STS purge: %w (%s)", err, item)) fmt.Errorf("unable to load user during STS purge: %w (%s)", err, item))
} }
@ -472,7 +471,7 @@ func (iamOS *IAMObjectStore) PurgeExpiredSTS(ctx context.Context) error {
stsName := strings.TrimSuffix(item, ".json") stsName := strings.TrimSuffix(item, ".json")
err := iamOS.loadMappedPolicy(ctx, stsName, stsUser, false, stsAccPoliciesFromStore) err := iamOS.loadMappedPolicy(ctx, stsName, stsUser, false, stsAccPoliciesFromStore)
if err != nil && !errors.Is(err, errNoSuchPolicy) { if err != nil && !errors.Is(err, errNoSuchPolicy) {
logger.LogIf(GlobalContext, iamLogIf(GlobalContext,
fmt.Errorf("unable to load policies during STS purge: %w (%s)", err, item)) fmt.Errorf("unable to load policies during STS purge: %w (%s)", err, item))
} }

View File

@ -33,7 +33,6 @@ import (
"github.com/minio/minio/internal/auth" "github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/config/identity/openid" "github.com/minio/minio/internal/config/identity/openid"
"github.com/minio/minio/internal/jwt" "github.com/minio/minio/internal/jwt"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/policy" "github.com/minio/pkg/v2/policy"
"github.com/puzpuzpuz/xsync/v3" "github.com/puzpuzpuz/xsync/v3"
) )
@ -1882,7 +1881,7 @@ func (store *IAMStoreSys) DeleteUsers(ctx context.Context, users []string) error
// we are only logging errors, not handling them. // we are only logging errors, not handling them.
err := store.deleteUserIdentity(ctx, user, userType) err := store.deleteUserIdentity(ctx, user, userType)
logger.LogIf(GlobalContext, err) iamLogIf(GlobalContext, err)
delete(cache.iamUsersMap, user) delete(cache.iamUsersMap, user)
deleted = true deleted = true

View File

@ -230,42 +230,42 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etc
openidConfig, err := openid.LookupConfig(s, openidConfig, err := openid.LookupConfig(s,
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region) NewHTTPTransport(), xhttp.DrainBody, globalSite.Region)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize OpenID: %w", err)) iamLogIf(ctx, fmt.Errorf("Unable to initialize OpenID: %w", err), logger.WarningKind)
} }
// Initialize if LDAP is enabled // Initialize if LDAP is enabled
ldapConfig, err := xldap.Lookup(s, globalRootCAs) ldapConfig, err := xldap.Lookup(s, globalRootCAs)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to parse LDAP configuration: %w", err)) iamLogIf(ctx, fmt.Errorf("Unable to parse LDAP configuration: %w", err), logger.WarningKind)
} }
stsTLSConfig, err := xtls.Lookup(s[config.IdentityTLSSubSys][config.Default]) stsTLSConfig, err := xtls.Lookup(s[config.IdentityTLSSubSys][config.Default])
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize X.509/TLS STS API: %w", err)) iamLogIf(ctx, fmt.Errorf("Unable to initialize X.509/TLS STS API: %w", err), logger.WarningKind)
} }
if stsTLSConfig.InsecureSkipVerify { if stsTLSConfig.InsecureSkipVerify {
logger.LogIf(ctx, fmt.Errorf("CRITICAL: enabling %s is not recommended in a production environment", xtls.EnvIdentityTLSSkipVerify)) iamLogIf(ctx, fmt.Errorf("Enabling %s is not recommended in a production environment", xtls.EnvIdentityTLSSkipVerify), logger.WarningKind)
} }
authNPluginCfg, err := idplugin.LookupConfig(s[config.IdentityPluginSubSys][config.Default], authNPluginCfg, err := idplugin.LookupConfig(s[config.IdentityPluginSubSys][config.Default],
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region) NewHTTPTransport(), xhttp.DrainBody, globalSite.Region)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize AuthNPlugin: %w", err)) iamLogIf(ctx, fmt.Errorf("Unable to initialize AuthNPlugin: %w", err), logger.WarningKind)
} }
setGlobalAuthNPlugin(idplugin.New(GlobalContext, authNPluginCfg)) setGlobalAuthNPlugin(idplugin.New(GlobalContext, authNPluginCfg))
authZPluginCfg, err := polplugin.LookupConfig(s, GetDefaultConnSettings(), xhttp.DrainBody) authZPluginCfg, err := polplugin.LookupConfig(s, GetDefaultConnSettings(), xhttp.DrainBody)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize AuthZPlugin: %w", err)) iamLogIf(ctx, fmt.Errorf("Unable to initialize AuthZPlugin: %w", err), logger.WarningKind)
} }
if authZPluginCfg.URL == nil { if authZPluginCfg.URL == nil {
opaCfg, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default], opaCfg, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
NewHTTPTransport(), xhttp.DrainBody) NewHTTPTransport(), xhttp.DrainBody)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize AuthZPlugin from legacy OPA config: %w", err)) iamLogIf(ctx, fmt.Errorf("Unable to initialize AuthZPlugin from legacy OPA config: %w", err))
} else { } else {
authZPluginCfg.URL = opaCfg.URL authZPluginCfg.URL = opaCfg.URL
authZPluginCfg.AuthToken = opaCfg.AuthToken authZPluginCfg.AuthToken = opaCfg.AuthToken
@ -301,7 +301,7 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etc
time.Sleep(time.Duration(r.Float64() * float64(time.Second))) time.Sleep(time.Duration(r.Float64() * float64(time.Second)))
continue continue
} }
logger.LogIf(ctx, fmt.Errorf("IAM sub-system is partially initialized, unable to write the IAM format: %w", err)) iamLogIf(ctx, fmt.Errorf("IAM sub-system is partially initialized, unable to write the IAM format: %w", err), logger.WarningKind)
return return
} }
@ -317,7 +317,7 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etc
continue continue
} }
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize IAM sub-system, some users may not be available: %w", err)) iamLogIf(ctx, fmt.Errorf("Unable to initialize IAM sub-system, some users may not be available: %w", err), logger.WarningKind)
} }
} }
break break
@ -355,7 +355,7 @@ func (sys *IAMSys) periodicRoutines(ctx context.Context, baseInterval time.Durat
for event := range ch { for event := range ch {
if err := sys.loadWatchedEvent(ctx, event); err != nil { if err := sys.loadWatchedEvent(ctx, event); err != nil {
// we simply log errors // we simply log errors
logger.LogIf(ctx, fmt.Errorf("Failure in loading watch event: %v", err)) iamLogIf(ctx, fmt.Errorf("Failure in loading watch event: %v", err), logger.WarningKind)
} }
} }
}() }()
@ -388,7 +388,7 @@ func (sys *IAMSys) periodicRoutines(ctx context.Context, baseInterval time.Durat
// Load all IAM items (except STS creds) periodically. // Load all IAM items (except STS creds) periodically.
refreshStart := time.Now() refreshStart := time.Now()
if err := sys.Load(ctx, false); err != nil { if err := sys.Load(ctx, false); err != nil {
logger.LogIf(ctx, fmt.Errorf("Failure in periodic refresh for IAM (took %.2fs): %v", time.Since(refreshStart).Seconds(), err)) iamLogIf(ctx, fmt.Errorf("Failure in periodic refresh for IAM (took %.2fs): %v", time.Since(refreshStart).Seconds(), err), logger.WarningKind)
} else { } else {
took := time.Since(refreshStart).Seconds() took := time.Since(refreshStart).Seconds()
if took > maxDurationSecondsForLog { if took > maxDurationSecondsForLog {
@ -400,7 +400,7 @@ func (sys *IAMSys) periodicRoutines(ctx context.Context, baseInterval time.Durat
// Purge expired STS credentials. // Purge expired STS credentials.
purgeStart := time.Now() purgeStart := time.Now()
if err := sys.store.PurgeExpiredSTS(ctx); err != nil { if err := sys.store.PurgeExpiredSTS(ctx); err != nil {
logger.LogIf(ctx, fmt.Errorf("Failure in periodic STS purge for IAM (took %.2fs): %v", time.Since(purgeStart).Seconds(), err)) iamLogIf(ctx, fmt.Errorf("Failure in periodic STS purge for IAM (took %.2fs): %v", time.Since(purgeStart).Seconds(), err))
} else { } else {
took := time.Since(purgeStart).Seconds() took := time.Since(purgeStart).Seconds()
if took > maxDurationSecondsForLog { if took > maxDurationSecondsForLog {
@ -450,7 +450,7 @@ func (sys *IAMSys) validateAndAddRolePolicyMappings(ctx context.Context, m map[a
errMsg := fmt.Errorf( errMsg := fmt.Errorf(
"The policies \"%s\" mapped to role ARN %s are not defined - this role may not work as expected.", "The policies \"%s\" mapped to role ARN %s are not defined - this role may not work as expected.",
unknownPoliciesSet.ToSlice(), arn.String()) unknownPoliciesSet.ToSlice(), arn.String())
logger.LogIf(ctx, errMsg) authZLogIf(ctx, errMsg, logger.WarningKind)
} }
} }
sys.rolesMap[arn] = rolePolicies sys.rolesMap[arn] = rolePolicies
@ -573,7 +573,7 @@ func (sys *IAMSys) DeletePolicy(ctx context.Context, policyName string, notifyPe
for _, nerr := range globalNotificationSys.DeletePolicy(policyName) { for _, nerr := range globalNotificationSys.DeletePolicy(policyName) {
if nerr.Err != nil { if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err) iamLogIf(ctx, nerr.Err)
} }
} }
@ -638,7 +638,7 @@ func (sys *IAMSys) SetPolicy(ctx context.Context, policyName string, p policy.Po
for _, nerr := range globalNotificationSys.LoadPolicy(policyName) { for _, nerr := range globalNotificationSys.LoadPolicy(policyName) {
if nerr.Err != nil { if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err) iamLogIf(ctx, nerr.Err)
} }
} }
} }
@ -660,7 +660,7 @@ func (sys *IAMSys) DeleteUser(ctx context.Context, accessKey string, notifyPeers
for _, nerr := range globalNotificationSys.DeleteUser(accessKey) { for _, nerr := range globalNotificationSys.DeleteUser(accessKey) {
if nerr.Err != nil { if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err) iamLogIf(ctx, nerr.Err)
} }
} }
} }
@ -686,7 +686,7 @@ func (sys *IAMSys) notifyForUser(ctx context.Context, accessKey string, isTemp b
for _, nerr := range globalNotificationSys.LoadUser(accessKey, isTemp) { for _, nerr := range globalNotificationSys.LoadUser(accessKey, isTemp) {
if nerr.Err != nil { if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err) iamLogIf(ctx, nerr.Err)
} }
} }
} }
@ -931,7 +931,7 @@ func (sys *IAMSys) notifyForServiceAccount(ctx context.Context, accessKey string
for _, nerr := range globalNotificationSys.LoadServiceAccount(accessKey) { for _, nerr := range globalNotificationSys.LoadServiceAccount(accessKey) {
if nerr.Err != nil { if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err) iamLogIf(ctx, nerr.Err)
} }
} }
} }
@ -1252,7 +1252,7 @@ func (sys *IAMSys) DeleteServiceAccount(ctx context.Context, accessKey string, n
for _, nerr := range globalNotificationSys.DeleteServiceAccount(accessKey) { for _, nerr := range globalNotificationSys.DeleteServiceAccount(accessKey) {
if nerr.Err != nil { if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err) iamLogIf(ctx, nerr.Err)
} }
} }
} }
@ -1327,14 +1327,14 @@ func (sys *IAMSys) purgeExpiredCredentialsForExternalSSO(ctx context.Context) {
roleArns := puInfo.roleArns.ToSlice() roleArns := puInfo.roleArns.ToSlice()
var roleArn string var roleArn string
if len(roleArns) == 0 { if len(roleArns) == 0 {
logger.LogIf(GlobalContext, iamLogIf(GlobalContext,
fmt.Errorf("parentUser: %s had no roleArns mapped!", parentUser)) fmt.Errorf("parentUser: %s had no roleArns mapped!", parentUser))
continue continue
} }
roleArn = roleArns[0] roleArn = roleArns[0]
u, err := sys.OpenIDConfig.LookupUser(roleArn, puInfo.subClaimValue) u, err := sys.OpenIDConfig.LookupUser(roleArn, puInfo.subClaimValue)
if err != nil { if err != nil {
logger.LogIf(GlobalContext, err) iamLogIf(GlobalContext, err)
continue continue
} }
// If user is set to "disabled", we will remove them // If user is set to "disabled", we will remove them
@ -1364,7 +1364,7 @@ func (sys *IAMSys) purgeExpiredCredentialsForLDAP(ctx context.Context) {
expiredUsers, err := sys.LDAPConfig.GetNonEligibleUserDistNames(allDistNames) expiredUsers, err := sys.LDAPConfig.GetNonEligibleUserDistNames(allDistNames)
if err != nil { if err != nil {
// Log and return on error - perhaps it'll work the next time. // Log and return on error - perhaps it'll work the next time.
logger.LogIf(GlobalContext, err) iamLogIf(GlobalContext, err)
return return
} }
@ -1445,7 +1445,7 @@ func (sys *IAMSys) updateGroupMembershipsForLDAP(ctx context.Context) {
updatedGroups, err := sys.LDAPConfig.LookupGroupMemberships(parentUsers, parentUserToLDAPUsernameMap) updatedGroups, err := sys.LDAPConfig.LookupGroupMemberships(parentUsers, parentUserToLDAPUsernameMap)
if err != nil { if err != nil {
// Log and return on error - perhaps it'll work the next time. // Log and return on error - perhaps it'll work the next time.
logger.LogIf(GlobalContext, err) iamLogIf(GlobalContext, err)
return return
} }
@ -1469,7 +1469,7 @@ func (sys *IAMSys) updateGroupMembershipsForLDAP(ctx context.Context) {
cred.Groups = currGroups cred.Groups = currGroups
if err := sys.store.UpdateUserIdentity(ctx, cred); err != nil { if err := sys.store.UpdateUserIdentity(ctx, cred); err != nil {
// Log and continue error - perhaps it'll work the next time. // Log and continue error - perhaps it'll work the next time.
logger.LogIf(GlobalContext, err) iamLogIf(GlobalContext, err)
} }
} }
} }
@ -1508,7 +1508,7 @@ func (sys *IAMSys) notifyForGroup(ctx context.Context, group string) {
for _, nerr := range globalNotificationSys.LoadGroup(group) { for _, nerr := range globalNotificationSys.LoadGroup(group) {
if nerr.Err != nil { if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err) iamLogIf(ctx, nerr.Err)
} }
} }
} }
@ -1612,7 +1612,7 @@ func (sys *IAMSys) PolicyDBSet(ctx context.Context, name, policy string, userTyp
for _, nerr := range globalNotificationSys.LoadPolicyMapping(name, userType, isGroup) { for _, nerr := range globalNotificationSys.LoadPolicyMapping(name, userType, isGroup) {
if nerr.Err != nil { if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err) iamLogIf(ctx, nerr.Err)
} }
} }
} }
@ -1680,12 +1680,12 @@ func (sys *IAMSys) PolicyDBUpdateBuiltin(ctx context.Context, isAttach bool,
for _, nerr := range globalNotificationSys.LoadPolicyMapping(userOrGroup, regUser, isGroup) { for _, nerr := range globalNotificationSys.LoadPolicyMapping(userOrGroup, regUser, isGroup) {
if nerr.Err != nil { if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err) iamLogIf(ctx, nerr.Err)
} }
} }
} }
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemPolicyMapping, Type: madmin.SRIAMItemPolicyMapping,
PolicyMapping: &madmin.SRPolicyMapping{ PolicyMapping: &madmin.SRPolicyMapping{
UserOrGroup: userOrGroup, UserOrGroup: userOrGroup,
@ -1714,7 +1714,7 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool,
if r.User != "" { if r.User != "" {
dn, err = sys.LDAPConfig.GetValidatedDNForUsername(r.User) dn, err = sys.LDAPConfig.GetValidatedDNForUsername(r.User)
if err != nil { if err != nil {
logger.LogIf(ctx, err) iamLogIf(ctx, err)
return return
} }
if dn == "" { if dn == "" {
@ -1731,7 +1731,7 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool,
if isAttach { if isAttach {
var foundGroupDN string var foundGroupDN string
if foundGroupDN, err = sys.LDAPConfig.GetValidatedGroupDN(r.Group); err != nil { if foundGroupDN, err = sys.LDAPConfig.GetValidatedGroupDN(r.Group); err != nil {
logger.LogIf(ctx, err) iamLogIf(ctx, err)
return return
} else if foundGroupDN == "" { } else if foundGroupDN == "" {
err = errNoSuchGroup err = errNoSuchGroup
@ -1758,12 +1758,12 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool,
for _, nerr := range globalNotificationSys.LoadPolicyMapping(dn, userType, isGroup) { for _, nerr := range globalNotificationSys.LoadPolicyMapping(dn, userType, isGroup) {
if nerr.Err != nil { if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err) iamLogIf(ctx, nerr.Err)
} }
} }
} }
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemPolicyMapping, Type: madmin.SRIAMItemPolicyMapping,
PolicyMapping: &madmin.SRPolicyMapping{ PolicyMapping: &madmin.SRPolicyMapping{
UserOrGroup: dn, UserOrGroup: dn,
@ -1826,7 +1826,7 @@ func (sys *IAMSys) IsAllowedServiceAccount(args policy.Args, parentUser string)
case roleArn != "": case roleArn != "":
arn, err := arn.Parse(roleArn) arn, err := arn.Parse(roleArn)
if err != nil { if err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("error parsing role ARN %s: %v", roleArn, err)) iamLogIf(GlobalContext, fmt.Errorf("error parsing role ARN %s: %v", roleArn, err))
return false return false
} }
svcPolicies = newMappedPolicy(sys.rolesMap[arn]).toSlice() svcPolicies = newMappedPolicy(sys.rolesMap[arn]).toSlice()
@ -1835,7 +1835,7 @@ func (sys *IAMSys) IsAllowedServiceAccount(args policy.Args, parentUser string)
// Check policy for parent user of service account. // Check policy for parent user of service account.
svcPolicies, err = sys.PolicyDBGet(parentUser, args.Groups...) svcPolicies, err = sys.PolicyDBGet(parentUser, args.Groups...)
if err != nil { if err != nil {
logger.LogIf(GlobalContext, err) iamLogIf(GlobalContext, err)
return false return false
} }
@ -1910,7 +1910,7 @@ func (sys *IAMSys) IsAllowedSTS(args policy.Args, parentUser string) bool {
// If a roleARN is present, the role policy is applied. // If a roleARN is present, the role policy is applied.
arn, err := arn.Parse(roleArn) arn, err := arn.Parse(roleArn)
if err != nil { if err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("error parsing role ARN %s: %v", roleArn, err)) iamLogIf(GlobalContext, fmt.Errorf("error parsing role ARN %s: %v", roleArn, err))
return false return false
} }
policies = newMappedPolicy(sys.rolesMap[arn]).toSlice() policies = newMappedPolicy(sys.rolesMap[arn]).toSlice()
@ -1920,7 +1920,7 @@ func (sys *IAMSys) IsAllowedSTS(args policy.Args, parentUser string) bool {
var err error var err error
policies, err = sys.store.PolicyDBGet(parentUser, args.Groups...) policies, err = sys.store.PolicyDBGet(parentUser, args.Groups...)
if err != nil { if err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("error fetching policies on %s: %v", parentUser, err)) iamLogIf(GlobalContext, fmt.Errorf("error fetching policies on %s: %v", parentUser, err))
return false return false
} }
@ -1955,11 +1955,11 @@ func (sys *IAMSys) IsAllowedSTS(args policy.Args, parentUser string) bool {
_, err := sys.store.GetPolicy(pname) _, err := sys.store.GetPolicy(pname)
if errors.Is(err, errNoSuchPolicy) { if errors.Is(err, errNoSuchPolicy) {
// all policies presented in the claim should exist // all policies presented in the claim should exist
logger.LogIf(GlobalContext, fmt.Errorf("expected policy (%s) missing from the JWT claim %s, rejecting the request", pname, iamPolicyClaimNameOpenID())) iamLogIf(GlobalContext, fmt.Errorf("expected policy (%s) missing from the JWT claim %s, rejecting the request", pname, iamPolicyClaimNameOpenID()))
return false return false
} }
} }
logger.LogIf(GlobalContext, fmt.Errorf("all policies were unexpectedly present!")) iamLogIf(GlobalContext, fmt.Errorf("all policies were unexpectedly present!"))
return false return false
} }
@ -2001,7 +2001,7 @@ func isAllowedBySessionPolicyForServiceAccount(args policy.Args) (hasSessionPoli
subPolicy, err := policy.ParseConfig(bytes.NewReader([]byte(spolicyStr))) subPolicy, err := policy.ParseConfig(bytes.NewReader([]byte(spolicyStr)))
if err != nil { if err != nil {
// Log any error in input session policy config. // Log any error in input session policy config.
logger.LogIf(GlobalContext, err) iamLogIf(GlobalContext, err)
return return
} }
@ -2062,7 +2062,7 @@ func isAllowedBySessionPolicy(args policy.Args) (hasSessionPolicy bool, isAllowe
subPolicy, err := policy.ParseConfig(bytes.NewReader([]byte(spolicyStr))) subPolicy, err := policy.ParseConfig(bytes.NewReader([]byte(spolicyStr)))
if err != nil { if err != nil {
// Log any error in input session policy config. // Log any error in input session policy config.
logger.LogIf(GlobalContext, err) iamLogIf(GlobalContext, err)
return return
} }
@ -2100,7 +2100,7 @@ func (sys *IAMSys) IsAllowed(args policy.Args) bool {
if authz := newGlobalAuthZPluginFn(); authz != nil { if authz := newGlobalAuthZPluginFn(); authz != nil {
ok, err := authz.IsAllowed(args) ok, err := authz.IsAllowed(args)
if err != nil { if err != nil {
logger.LogIf(GlobalContext, err) authZLogIf(GlobalContext, err)
} }
return ok return ok
} }

View File

@ -62,7 +62,7 @@ func cachedAuthenticateNode(ttl time.Duration) func(accessKey, secretKey, audien
} }
cache, err := lru.NewARC(100) cache, err := lru.NewARC(100)
if err != nil { if err != nil {
logger.LogIf(GlobalContext, err) bugLogIf(GlobalContext, err)
return authenticateNode return authenticateNode
} }
return func(accessKey, secretKey, audience string) (string, error) { return func(accessKey, secretKey, audience string) (string, error) {

View File

@ -23,7 +23,6 @@ import (
"math/rand" "math/rand"
"time" "time"
"github.com/minio/minio/internal/logger"
"github.com/tidwall/gjson" "github.com/tidwall/gjson"
) )
@ -85,13 +84,13 @@ func performLicenseUpdate(ctx context.Context, objectAPI ObjectLayer) {
resp, err := globalSubnetConfig.Post(url, nil) resp, err := globalSubnetConfig.Post(url, nil)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("error from %s: %w", url, err)) subnetLogIf(ctx, fmt.Errorf("error from %s: %w", url, err))
return return
} }
r := gjson.Parse(resp).Get("license_v2") r := gjson.Parse(resp).Get("license_v2")
if r.Index == 0 { if r.Index == 0 {
logger.LogIf(ctx, fmt.Errorf("license not found in response from %s", url)) internalLogIf(ctx, fmt.Errorf("license not found in response from %s", url))
return return
} }
@ -104,13 +103,13 @@ func performLicenseUpdate(ctx context.Context, objectAPI ObjectLayer) {
kv := "subnet license=" + lic kv := "subnet license=" + lic
result, err := setConfigKV(ctx, objectAPI, []byte(kv)) result, err := setConfigKV(ctx, objectAPI, []byte(kv))
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("error setting subnet license config: %w", err)) internalLogIf(ctx, fmt.Errorf("error setting subnet license config: %w", err))
return return
} }
if result.Dynamic { if result.Dynamic {
if err := applyDynamicConfigForSubSys(GlobalContext, objectAPI, result.Cfg, result.SubSys); err != nil { if err := applyDynamicConfigForSubSys(GlobalContext, objectAPI, result.Cfg, result.SubSys); err != nil {
logger.LogIf(ctx, fmt.Errorf("error applying subnet dynamic config: %w", err)) subnetLogIf(ctx, fmt.Errorf("error applying subnet dynamic config: %w", err))
return return
} }
globalNotificationSys.SignalConfigReload(result.SubSys) globalNotificationSys.SignalConfigReload(result.SubSys)

View File

@ -132,7 +132,7 @@ func (api objectAPIHandlers) ListenNotificationHandler(w http.ResponseWriter, r
buf.Reset() buf.Reset()
tmpEvt.Records[0] = ev tmpEvt.Records[0] = ev
if err := enc.Encode(tmpEvt); err != nil { if err := enc.Encode(tmpEvt); err != nil {
logger.LogOnceIf(ctx, err, "event: Encode failed") bugLogIf(ctx, err, "event: Encode failed")
continue continue
} }
mergeCh <- append(grid.GetByteBuffer()[:0], buf.Bytes()...) mergeCh <- append(grid.GetByteBuffer()[:0], buf.Bytes()...)

195
cmd/logging.go Normal file
View File

@ -0,0 +1,195 @@
package cmd
import (
"context"
"github.com/minio/minio/internal/logger"
)
func replLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "replication", err, errKind...)
}
func replLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
logger.LogOnceIf(ctx, "replication", err, id, errKind...)
}
func iamLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "iam", err, errKind...)
}
func rebalanceLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "rebalance", err, errKind...)
}
func rebalanceLogEvent(ctx context.Context, msg string, args ...interface{}) {
logger.Event(ctx, "rebalance", msg, args...)
}
func adminLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "admin", err, errKind...)
}
func authNLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "authN", err, errKind...)
}
func authZLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "authZ", err, errKind...)
}
func peersLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "peers", err, errKind...)
}
func peersLogAlwaysIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogAlwaysIf(ctx, "peers", err, errKind...)
}
func peersLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
logger.LogOnceIf(ctx, "peers", err, id, errKind...)
}
func bugLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "internal", err, errKind...)
}
func healingLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "healing", err, errKind...)
}
func healingLogEvent(ctx context.Context, msg string, args ...interface{}) {
logger.Event(ctx, "healing", msg, args...)
}
func healingLogOnceIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "healing", err, errKind...)
}
func batchLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "batch", err, errKind...)
}
func batchLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
logger.LogOnceIf(ctx, "batch", err, id, errKind...)
}
func bootLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "bootstrap", err, errKind...)
}
func bootLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
logger.LogOnceIf(ctx, "bootstrap", err, id, errKind...)
}
func dnsLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "dns", err, errKind...)
}
func internalLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "internal", err, errKind...)
}
func internalLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
logger.LogOnceIf(ctx, "internal", err, id, errKind...)
}
func transitionLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "transition", err, errKind...)
}
func configLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "config", err, errKind...)
}
func configLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
logger.LogOnceIf(ctx, "config", err, id, errKind...)
}
func configLogOnceConsoleIf(ctx context.Context, err error, id string, errKind ...interface{}) {
logger.LogOnceConsoleIf(ctx, "config", err, id, errKind...)
}
func scannerLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "scanner", err, errKind...)
}
func scannerLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
logger.LogOnceIf(ctx, "scanner", err, id, errKind...)
}
func ilmLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "ilm", err, errKind...)
}
func ilmLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
logger.LogOnceIf(ctx, "ilm", err, id, errKind...)
}
func encLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "encryption", err, errKind...)
}
func storageLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "storage", err, errKind...)
}
func storageLogAlwaysIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogAlwaysIf(ctx, "storage", err, errKind...)
}
func storageLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
logger.LogOnceIf(ctx, "storage", err, id, errKind...)
}
func decomLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "decom", err, errKind...)
}
func decomLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
logger.LogOnceIf(ctx, "decom", err, id, errKind...)
}
func decomLogEvent(ctx context.Context, msg string, args ...interface{}) {
logger.Event(ctx, "decom", msg, args...)
}
func etcdLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "etcd", err, errKind...)
}
func etcdLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
logger.LogOnceIf(ctx, "etcd", err, id, errKind...)
}
func subnetLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "subnet", err, errKind...)
}
func metricsLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "metrics", err, errKind...)
}
func s3LogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "s3", err, errKind...)
}
func sftpLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
logger.LogOnceIf(ctx, "sftp", err, id, errKind...)
}
func shutdownLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "shutdown", err, errKind...)
}
func stsLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "sts", err, errKind...)
}
func tierLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "tier", err, errKind...)
}
func kmsLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "kms", err, errKind...)
}

View File

@ -221,7 +221,7 @@ func (b *bucketMetacache) deleteAll() {
ez, ok := objAPI.(deleteAllStorager) ez, ok := objAPI.(deleteAllStorager)
if !ok { if !ok {
logger.LogIf(ctx, errors.New("bucketMetacache: expected objAPI to be 'deleteAllStorager'")) bugLogIf(ctx, errors.New("bucketMetacache: expected objAPI to be 'deleteAllStorager'"))
return return
} }

View File

@ -27,7 +27,6 @@ import (
"strings" "strings"
xioutil "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/console" "github.com/minio/pkg/v2/console"
) )
@ -377,7 +376,7 @@ func (m metaCacheEntries) resolve(r *metadataResolutionParams) (selected *metaCa
xl, err := entry.xlmeta() xl, err := entry.xlmeta()
if err != nil { if err != nil {
if !errors.Is(err, errFileNotFound) { if !errors.Is(err, errFileNotFound) {
logger.LogIf(GlobalContext, err) internalLogIf(GlobalContext, err)
} }
continue continue
} }
@ -437,7 +436,7 @@ func (m metaCacheEntries) resolve(r *metadataResolutionParams) (selected *metaCa
var err error var err error
selected.metadata, err = selected.cached.AppendTo(metaDataPoolGet()) selected.metadata, err = selected.cached.AppendTo(metaDataPoolGet())
if err != nil { if err != nil {
logger.LogIf(context.Background(), err) bugLogIf(context.Background(), err)
return nil, false return nil, false
} }
return selected, true return selected, true

View File

@ -22,8 +22,6 @@ import (
"fmt" "fmt"
"strconv" "strconv"
"strings" "strings"
"github.com/minio/minio/internal/logger"
) )
// markerTagVersion is the marker version. // markerTagVersion is the marker version.
@ -86,7 +84,7 @@ func (o listPathOptions) encodeMarker(marker string) string {
return fmt.Sprintf("%s[minio_cache:%s,return:]", marker, markerTagVersion) return fmt.Sprintf("%s[minio_cache:%s,return:]", marker, markerTagVersion)
} }
if strings.ContainsAny(o.ID, "[:,") { if strings.ContainsAny(o.ID, "[:,") {
logger.LogIf(context.Background(), fmt.Errorf("encodeMarker: uuid %s contained invalid characters", o.ID)) internalLogIf(context.Background(), fmt.Errorf("encodeMarker: uuid %s contained invalid characters", o.ID))
} }
return fmt.Sprintf("%s[minio_cache:%s,id:%s,p:%d,s:%d]", marker, markerTagVersion, o.ID, o.pool, o.set) return fmt.Sprintf("%s[minio_cache:%s,id:%s,p:%d,s:%d]", marker, markerTagVersion, o.ID, o.pool, o.set)
} }

View File

@ -29,7 +29,6 @@ import (
"time" "time"
xioutil "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
) )
func renameAllBucketMetacache(epPath string) error { func renameAllBucketMetacache(epPath string) error {
@ -136,7 +135,7 @@ func (z *erasureServerPools) listPath(ctx context.Context, o *listPathOptions) (
} }
if !errors.Is(err, context.DeadlineExceeded) { if !errors.Is(err, context.DeadlineExceeded) {
// Report error once per bucket, but continue listing. // Report error once per bucket, but continue listing.
logger.LogOnceIf(ctx, err, "GetMetacacheListing:"+o.Bucket) storageLogOnceIf(ctx, err, "GetMetacacheListing:"+o.Bucket)
} }
o.Transient = true o.Transient = true
o.Create = false o.Create = false
@ -322,7 +321,7 @@ func (z *erasureServerPools) listMerged(ctx context.Context, o listPathOptions,
allAtEOF = false allAtEOF = false
continue continue
} }
logger.LogIf(ctx, err) storageLogIf(ctx, err)
return err return err
} }
if allAtEOF { if allAtEOF {

View File

@ -38,7 +38,6 @@ import (
"github.com/minio/minio/internal/color" "github.com/minio/minio/internal/color"
"github.com/minio/minio/internal/hash" "github.com/minio/minio/internal/hash"
xioutil "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/console" "github.com/minio/pkg/v2/console"
) )
@ -285,7 +284,7 @@ func (o *listPathOptions) findFirstPart(fi FileInfo) (int, error) {
} }
err := json.Unmarshal([]byte(v), &tmp) err := json.Unmarshal([]byte(v), &tmp)
if !ok { if !ok {
logger.LogIf(context.Background(), err) bugLogIf(context.Background(), err)
return -1, err return -1, err
} }
if tmp.First == "" && tmp.Last == "" && tmp.EOS { if tmp.First == "" && tmp.Last == "" && tmp.EOS {
@ -538,7 +537,7 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt
} }
loadedPart = partN loadedPart = partN
bi, err := getMetacacheBlockInfo(fi, partN) bi, err := getMetacacheBlockInfo(fi, partN)
logger.LogIf(ctx, err) internalLogIf(ctx, err)
if err == nil { if err == nil {
if bi.pastPrefix(o.Prefix) { if bi.pastPrefix(o.Prefix) {
return entries, io.EOF return entries, io.EOF
@ -577,7 +576,7 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt
time.Sleep(retryDelay250) time.Sleep(retryDelay250)
continue continue
default: default:
logger.LogIf(ctx, err) internalLogIf(ctx, err)
return entries, err return entries, err
} }
} }
@ -585,7 +584,7 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt
// We finished at the end of the block. // We finished at the end of the block.
// And should not expect any more results. // And should not expect any more results.
bi, err := getMetacacheBlockInfo(fi, partN) bi, err := getMetacacheBlockInfo(fi, partN)
logger.LogIf(ctx, err) internalLogIf(ctx, err)
if err != nil || bi.EOS { if err != nil || bi.EOS {
// We are done and there are no more parts. // We are done and there are no more parts.
return entries, io.EOF return entries, io.EOF
@ -868,7 +867,7 @@ func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCache
} }
o.debugln(color.Green("saveMetaCacheStream:")+" saving block", b.n, "to", o.objectPath(b.n)) o.debugln(color.Green("saveMetaCacheStream:")+" saving block", b.n, "to", o.objectPath(b.n))
r, err := hash.NewReader(ctx, bytes.NewReader(b.data), int64(len(b.data)), "", "", int64(len(b.data))) r, err := hash.NewReader(ctx, bytes.NewReader(b.data), int64(len(b.data)), "", "", int64(len(b.data)))
logger.LogIf(ctx, err) bugLogIf(ctx, err)
custom := b.headerKV() custom := b.headerKV()
_, err = er.putMetacacheObject(ctx, o.objectPath(b.n), NewPutObjReader(r), ObjectOptions{ _, err = er.putMetacacheObject(ctx, o.objectPath(b.n), NewPutObjReader(r), ObjectOptions{
UserDefined: custom, UserDefined: custom,
@ -902,7 +901,7 @@ func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCache
return err return err
case InsufficientReadQuorum: case InsufficientReadQuorum:
default: default:
logger.LogIf(ctx, err) internalLogIf(ctx, err)
} }
if retries >= maxTries { if retries >= maxTries {
return err return err

View File

@ -28,7 +28,6 @@ import (
jsoniter "github.com/json-iterator/go" jsoniter "github.com/json-iterator/go"
"github.com/klauspost/compress/s2" "github.com/klauspost/compress/s2"
xioutil "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
"github.com/valyala/bytebufferpool" "github.com/valyala/bytebufferpool"
) )
@ -845,7 +844,7 @@ func (b metacacheBlock) headerKV() map[string]string {
json := jsoniter.ConfigCompatibleWithStandardLibrary json := jsoniter.ConfigCompatibleWithStandardLibrary
v, err := json.Marshal(b) v, err := json.Marshal(b)
if err != nil { if err != nil {
logger.LogIf(context.Background(), err) // Unlikely bugLogIf(context.Background(), err) // Unlikely
return nil return nil
} }
return map[string]string{fmt.Sprintf("%s-metacache-part-%d", ReservedMetadataPrefixLower, b.n): string(v)} return map[string]string{fmt.Sprintf("%s-metacache-part-%d", ReservedMetadataPrefixLower, b.n): string(v)}

View File

@ -25,7 +25,6 @@ import (
"github.com/minio/minio/internal/grid" "github.com/minio/minio/internal/grid"
xioutil "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/valyala/bytebufferpool" "github.com/valyala/bytebufferpool"
) )
@ -171,7 +170,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
if err != nil { if err != nil {
// Folder could have gone away in-between // Folder could have gone away in-between
if err != errVolumeNotFound && err != errFileNotFound { if err != errVolumeNotFound && err != errFileNotFound {
logger.LogOnceIf(ctx, err, "metacache-walk-scan-dir") internalLogOnceIf(ctx, err, "metacache-walk-scan-dir")
} }
if opts.ReportNotFound && err == errFileNotFound && current == opts.BaseDir { if opts.ReportNotFound && err == errFileNotFound && current == opts.BaseDir {
err = errFileNotFound err = errFileNotFound
@ -239,7 +238,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
// while being concurrently listed at the same time in // while being concurrently listed at the same time in
// such scenarios the 'xl.meta' might get truncated // such scenarios the 'xl.meta' might get truncated
if !IsErrIgnored(err, io.EOF, io.ErrUnexpectedEOF) { if !IsErrIgnored(err, io.EOF, io.ErrUnexpectedEOF) {
logger.LogOnceIf(ctx, err, "metacache-walk-read-metadata") internalLogOnceIf(ctx, err, "metacache-walk-read-metadata")
} }
continue continue
} }
@ -257,7 +256,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
diskHealthCheckOK(ctx, err) diskHealthCheckOK(ctx, err)
if err != nil { if err != nil {
if !IsErrIgnored(err, io.EOF, io.ErrUnexpectedEOF) { if !IsErrIgnored(err, io.EOF, io.ErrUnexpectedEOF) {
logger.LogIf(ctx, err) internalLogIf(ctx, err)
} }
continue continue
} }
@ -308,7 +307,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
// Scan folder we found. Should be in correct sort order where we are. // Scan folder we found. Should be in correct sort order where we are.
err := scanDir(pop) err := scanDir(pop)
if err != nil && !IsErrIgnored(err, context.Canceled) { if err != nil && !IsErrIgnored(err, context.Canceled) {
logger.LogIf(ctx, err) internalLogIf(ctx, err)
} }
} }
dirStack = dirStack[:len(dirStack)-1] dirStack = dirStack[:len(dirStack)-1]
@ -379,7 +378,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
} }
if opts.Recursive { if opts.Recursive {
// Scan folder we found. Should be in correct sort order where we are. // Scan folder we found. Should be in correct sort order where we are.
logger.LogIf(ctx, scanDir(pop)) internalLogIf(ctx, scanDir(pop))
} }
dirStack = dirStack[:len(dirStack)-1] dirStack = dirStack[:len(dirStack)-1]
} }

View File

@ -24,8 +24,6 @@ import (
"path" "path"
"strings" "strings"
"time" "time"
"github.com/minio/minio/internal/logger"
) )
type scanStatus uint8 type scanStatus uint8
@ -148,16 +146,17 @@ func (m *metacache) update(update metacache) {
// delete all cache data on disks. // delete all cache data on disks.
func (m *metacache) delete(ctx context.Context) { func (m *metacache) delete(ctx context.Context) {
if m.bucket == "" || m.id == "" { if m.bucket == "" || m.id == "" {
logger.LogIf(ctx, fmt.Errorf("metacache.delete: bucket (%s) or id (%s) empty", m.bucket, m.id)) bugLogIf(ctx, fmt.Errorf("metacache.delete: bucket (%s) or id (%s) empty", m.bucket, m.id))
return
} }
objAPI := newObjectLayerFn() objAPI := newObjectLayerFn()
if objAPI == nil { if objAPI == nil {
logger.LogIf(ctx, errors.New("metacache.delete: no object layer")) internalLogIf(ctx, errors.New("metacache.delete: no object layer"))
return return
} }
ez, ok := objAPI.(deleteAllStorager) ez, ok := objAPI.(deleteAllStorager)
if !ok { if !ok {
logger.LogIf(ctx, errors.New("metacache.delete: expected objAPI to be 'deleteAllStorager'")) bugLogIf(ctx, errors.New("metacache.delete: expected objAPI to be 'deleteAllStorager'"))
return return
} }
ez.deleteAll(ctx, minioMetaBucket, metacachePrefixForID(m.bucket, m.id)) ez.deleteAll(ctx, minioMetaBucket, metacachePrefixForID(m.bucket, m.id))

View File

@ -1690,7 +1690,7 @@ func getMinioProcMetrics() *MetricsGroupV2 {
p, err := procfs.Self() p, err := procfs.Self()
if err != nil { if err != nil {
logger.LogOnceIf(ctx, err, string(nodeMetricNamespace)) internalLogOnceIf(ctx, err, string(nodeMetricNamespace))
return return
} }
@ -1846,7 +1846,7 @@ func getHistogramMetrics(hist *prometheus.HistogramVec, desc MetricDescription,
if err != nil { if err != nil {
// Log error and continue to receive other metric // Log error and continue to receive other metric
// values // values
logger.LogIf(GlobalContext, err) bugLogIf(GlobalContext, err)
continue continue
} }
@ -2476,7 +2476,7 @@ func getReplicationSiteMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
if globalSiteReplicationSys.isEnabled() { if globalSiteReplicationSys.isEnabled() {
m, err := globalSiteReplicationSys.getSiteMetrics(GlobalContext) m, err := globalSiteReplicationSys.getSiteMetrics(GlobalContext)
if err != nil { if err != nil {
logger.LogIf(GlobalContext, err) metricsLogIf(GlobalContext, err)
return ml return ml
} }
ml = append(ml, MetricV2{ ml = append(ml, MetricV2{
@ -3126,7 +3126,7 @@ func getClusterUsageMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
metrics = make([]MetricV2, 0, 50) metrics = make([]MetricV2, 0, 50)
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objLayer) dataUsageInfo, err := loadDataUsageFromBackend(ctx, objLayer)
if err != nil { if err != nil {
logger.LogIf(ctx, err) metricsLogIf(ctx, err)
return return
} }
@ -3229,7 +3229,7 @@ func getBucketUsageMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
metrics = make([]MetricV2, 0, 50) metrics = make([]MetricV2, 0, 50)
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objLayer) dataUsageInfo, err := loadDataUsageFromBackend(ctx, objLayer)
if err != nil { if err != nil {
logger.LogIf(ctx, err) metricsLogIf(ctx, err)
return return
} }
@ -3463,7 +3463,7 @@ func getClusterTierMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
dui, err := loadDataUsageFromBackend(ctx, objLayer) dui, err := loadDataUsageFromBackend(ctx, objLayer)
if err != nil { if err != nil {
logger.LogIf(ctx, err) metricsLogIf(ctx, err)
return return
} }
// data usage has not captured any tier stats yet. // data usage has not captured any tier stats yet.
@ -4013,7 +4013,7 @@ func collectMetric(metric MetricV2, labels []string, values []string, metricName
if err != nil { if err != nil {
// Enable for debugging // Enable for debugging
if serverDebugLog { if serverDebugLog {
logger.LogOnceIf(GlobalContext, fmt.Errorf("unable to validate prometheus metric (%w) %v+%v", err, values, metric.Histogram), metricName+"-metrics-histogram") bugLogIf(GlobalContext, fmt.Errorf("unable to validate prometheus metric (%w) %v+%v", err, values, metric.Histogram))
} }
} else { } else {
out <- pmetric out <- pmetric
@ -4040,7 +4040,7 @@ func collectMetric(metric MetricV2, labels []string, values []string, metricName
if err != nil { if err != nil {
// Enable for debugging // Enable for debugging
if serverDebugLog { if serverDebugLog {
logger.LogOnceIf(GlobalContext, fmt.Errorf("unable to validate prometheus metric (%w) %v", err, values), metricName+"-metrics") bugLogIf(GlobalContext, fmt.Errorf("unable to validate prometheus metric (%w) %v", err, values))
} }
} else { } else {
out <- pmetric out <- pmetric
@ -4366,7 +4366,7 @@ func metricsNodeHandler() http.Handler {
enc := expfmt.NewEncoder(w, contentType) enc := expfmt.NewEncoder(w, contentType)
for _, mf := range mfs { for _, mf := range mfs {
if err := enc.Encode(mf); err != nil { if err := enc.Encode(mf); err != nil {
logger.LogIf(r.Context(), err) metricsLogIf(r.Context(), err)
return return
} }
} }

View File

@ -20,8 +20,6 @@ package cmd
import ( import (
"context" "context"
"time" "time"
"github.com/minio/minio/internal/logger"
) )
const ( const (
@ -60,7 +58,7 @@ var (
func loadClusterUsageObjectMetrics(ctx context.Context, m MetricValues, c *metricsCache) error { func loadClusterUsageObjectMetrics(ctx context.Context, m MetricValues, c *metricsCache) error {
dataUsageInfo, err := c.dataUsageInfo.Get() dataUsageInfo, err := c.dataUsageInfo.Get()
if err != nil { if err != nil {
logger.LogIf(ctx, err) metricsLogIf(ctx, err)
return nil return nil
} }
@ -144,7 +142,7 @@ var (
func loadClusterUsageBucketMetrics(ctx context.Context, m MetricValues, c *metricsCache, buckets []string) error { func loadClusterUsageBucketMetrics(ctx context.Context, m MetricValues, c *metricsCache, buckets []string) error {
dataUsageInfo, err := c.dataUsageInfo.Get() dataUsageInfo, err := c.dataUsageInfo.Get()
if err != nil { if err != nil {
logger.LogIf(ctx, err) metricsLogIf(ctx, err)
return nil return nil
} }
@ -164,7 +162,7 @@ func loadClusterUsageBucketMetrics(ctx context.Context, m MetricValues, c *metri
if err != nil { if err != nil {
// Log and continue if we are unable to retrieve metrics for this // Log and continue if we are unable to retrieve metrics for this
// bucket. // bucket.
logger.LogIf(ctx, err) metricsLogIf(ctx, err)
continue continue
} }

View File

@ -24,7 +24,6 @@ import (
"slices" "slices"
"strings" "strings"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/mcontext" "github.com/minio/minio/internal/mcontext"
"github.com/minio/mux" "github.com/minio/mux"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
@ -39,7 +38,7 @@ func (p promLogger) Println(v ...interface{}) {
s = append(s, fmt.Sprintf("%v", val)) s = append(s, fmt.Sprintf("%v", val))
} }
err := fmt.Errorf("metrics handler error: %v", strings.Join(s, " ")) err := fmt.Errorf("metrics handler error: %v", strings.Join(s, " "))
logger.LogIf(GlobalContext, err) metricsLogIf(GlobalContext, err)
} }
type metricsV3Server struct { type metricsV3Server struct {

View File

@ -20,8 +20,6 @@ package cmd
import ( import (
"context" "context"
"strconv" "strconv"
"github.com/minio/minio/internal/logger"
) )
// label constants // label constants
@ -83,7 +81,7 @@ var (
func loadDriveMetrics(ctx context.Context, m MetricValues, c *metricsCache) error { func loadDriveMetrics(ctx context.Context, m MetricValues, c *metricsCache) error {
driveMetrics, err := c.driveMetrics.Get() driveMetrics, err := c.driveMetrics.Get()
if err != nil { if err != nil {
logger.LogIf(ctx, err) metricsLogIf(ctx, err)
return nil return nil
} }

View File

@ -126,7 +126,7 @@ func (g *NotificationGroup) Go(ctx context.Context, f func() error, index int, a
if i == g.retryCount-1 { if i == g.retryCount-1 {
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", addr.String()) reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", addr.String())
ctx := logger.SetReqInfo(ctx, reqInfo) ctx := logger.SetReqInfo(ctx, reqInfo)
logger.LogOnceIf(ctx, err, addr.String()) peersLogOnceIf(ctx, err, addr.String())
} }
// Wait for a minimum of 100ms and dynamically increase this based on number of attempts. // Wait for a minimum of 100ms and dynamically increase this based on number of attempts.
if i < g.retryCount-1 { if i < g.retryCount-1 {
@ -312,7 +312,7 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io
if err != nil { if err != nil {
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", client.host.String()) reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", client.host.String())
ctx := logger.SetReqInfo(ctx, reqInfo) ctx := logger.SetReqInfo(ctx, reqInfo)
logger.LogIf(ctx, err) peersLogIf(ctx, err)
continue continue
} }
@ -323,7 +323,7 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io
if err != nil { if err != nil {
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", client.host.String()) reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", client.host.String())
ctx := logger.SetReqInfo(ctx, reqInfo) ctx := logger.SetReqInfo(ctx, reqInfo)
logger.LogIf(ctx, err) peersLogIf(ctx, err)
} }
} }
} }
@ -331,7 +331,7 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io
// Local host // Local host
thisAddr, err := xnet.ParseHost(globalLocalNodeName) thisAddr, err := xnet.ParseHost(globalLocalNodeName)
if err != nil { if err != nil {
logger.LogIf(ctx, err) bugLogIf(ctx, err)
return profilingDataFound return profilingDataFound
} }
@ -339,7 +339,7 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io
if err != nil { if err != nil {
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", thisAddr.String()) reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", thisAddr.String())
ctx := logger.SetReqInfo(ctx, reqInfo) ctx := logger.SetReqInfo(ctx, reqInfo)
logger.LogIf(ctx, err) bugLogIf(ctx, err)
return profilingDataFound return profilingDataFound
} }
@ -348,10 +348,10 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io
// Send profiling data to zip as file // Send profiling data to zip as file
for typ, data := range data { for typ, data := range data {
err := embedFileInZip(zipWriter, fmt.Sprintf("profile-%s-%s", thisAddr, typ), data, 0o600) err := embedFileInZip(zipWriter, fmt.Sprintf("profile-%s-%s", thisAddr, typ), data, 0o600)
logger.LogIf(ctx, err) internalLogIf(ctx, err)
} }
if b := getClusterMetaInfo(ctx); len(b) > 0 { if b := getClusterMetaInfo(ctx); len(b) > 0 {
logger.LogIf(ctx, embedFileInZip(zipWriter, "cluster.info", b, 0o600)) internalLogIf(ctx, embedFileInZip(zipWriter, "cluster.info", b, 0o600))
} }
return return
@ -480,7 +480,7 @@ func (sys *NotificationSys) GetLocks(ctx context.Context, r *http.Request) []*Pe
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress",
sys.peerClients[index].host.String()) sys.peerClients[index].host.String())
ctx := logger.SetReqInfo(ctx, reqInfo) ctx := logger.SetReqInfo(ctx, reqInfo)
logger.LogOnceIf(ctx, err, sys.peerClients[index].host.String()) peersLogOnceIf(ctx, err, sys.peerClients[index].host.String())
} }
locksResp = append(locksResp, &PeerLocks{ locksResp = append(locksResp, &PeerLocks{
Addr: getHostName(r), Addr: getHostName(r),
@ -504,7 +504,7 @@ func (sys *NotificationSys) LoadBucketMetadata(ctx context.Context, bucketName s
for _, nErr := range ng.Wait() { for _, nErr := range ng.Wait() {
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String())
if nErr.Err != nil { if nErr.Err != nil {
logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String())
} }
} }
} }
@ -534,7 +534,7 @@ func (sys *NotificationSys) DeleteBucketMetadata(ctx context.Context, bucketName
for _, nErr := range ng.Wait() { for _, nErr := range ng.Wait() {
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String())
if nErr.Err != nil { if nErr.Err != nil {
logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String())
} }
} }
} }
@ -561,7 +561,7 @@ func (sys *NotificationSys) GetClusterAllBucketStats(ctx context.Context) []Buck
for _, nErr := range ng.Wait() { for _, nErr := range ng.Wait() {
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String())
if nErr.Err != nil { if nErr.Err != nil {
logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String())
} }
} }
@ -603,7 +603,7 @@ func (sys *NotificationSys) GetClusterBucketStats(ctx context.Context, bucketNam
for _, nErr := range ng.Wait() { for _, nErr := range ng.Wait() {
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String())
if nErr.Err != nil { if nErr.Err != nil {
logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String())
} }
} }
bucketStats = append(bucketStats, BucketStats{ bucketStats = append(bucketStats, BucketStats{
@ -636,7 +636,7 @@ func (sys *NotificationSys) GetClusterSiteMetrics(ctx context.Context) []SRMetri
for _, nErr := range ng.Wait() { for _, nErr := range ng.Wait() {
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String())
if nErr.Err != nil { if nErr.Err != nil {
logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String())
} }
} }
siteStats = append(siteStats, globalReplicationStats.getSRMetricsForNode()) siteStats = append(siteStats, globalReplicationStats.getSRMetricsForNode())
@ -658,7 +658,7 @@ func (sys *NotificationSys) ReloadPoolMeta(ctx context.Context) {
for _, nErr := range ng.Wait() { for _, nErr := range ng.Wait() {
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String())
if nErr.Err != nil { if nErr.Err != nil {
logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String())
} }
} }
} }
@ -679,13 +679,13 @@ func (sys *NotificationSys) StopRebalance(ctx context.Context) {
for _, nErr := range ng.Wait() { for _, nErr := range ng.Wait() {
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String())
if nErr.Err != nil { if nErr.Err != nil {
logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String())
} }
} }
objAPI := newObjectLayerFn() objAPI := newObjectLayerFn()
if objAPI == nil { if objAPI == nil {
logger.LogIf(ctx, errServerNotInitialized) internalLogIf(ctx, errServerNotInitialized)
return return
} }
@ -711,7 +711,7 @@ func (sys *NotificationSys) LoadRebalanceMeta(ctx context.Context, startRebalanc
for _, nErr := range ng.Wait() { for _, nErr := range ng.Wait() {
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String())
if nErr.Err != nil { if nErr.Err != nil {
logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String())
} }
} }
} }
@ -732,7 +732,7 @@ func (sys *NotificationSys) LoadTransitionTierConfig(ctx context.Context) {
for _, nErr := range ng.Wait() { for _, nErr := range ng.Wait() {
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String())
if nErr.Err != nil { if nErr.Err != nil {
logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String())
} }
} }
} }
@ -946,7 +946,7 @@ func (sys *NotificationSys) addNodeErr(nodeInfo madmin.NodeInfo, peerClient *pee
addr := peerClient.host.String() addr := peerClient.host.String()
reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr) reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
ctx := logger.SetReqInfo(GlobalContext, reqInfo) ctx := logger.SetReqInfo(GlobalContext, reqInfo)
logger.LogOnceIf(ctx, err, "add-node-err-"+addr) peersLogOnceIf(ctx, err, "add-node-err-"+addr)
nodeInfo.SetAddr(addr) nodeInfo.SetAddr(addr)
nodeInfo.SetError(err.Error()) nodeInfo.SetError(err.Error())
} }
@ -1187,7 +1187,7 @@ func (sys *NotificationSys) GetBandwidthReports(ctx context.Context, buckets ...
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress",
sys.peerClients[index].host.String()) sys.peerClients[index].host.String())
ctx := logger.SetReqInfo(ctx, reqInfo) ctx := logger.SetReqInfo(ctx, reqInfo)
logger.LogOnceIf(ctx, err, sys.peerClients[index].host.String()) peersLogOnceIf(ctx, err, sys.peerClients[index].host.String())
} }
reports = append(reports, globalBucketMonitor.GetReport(bandwidth.SelectBuckets(buckets...))) reports = append(reports, globalBucketMonitor.GetReport(bandwidth.SelectBuckets(buckets...)))
consolidatedReport := bandwidth.BucketBandwidthReport{ consolidatedReport := bandwidth.BucketBandwidthReport{
@ -1222,9 +1222,9 @@ func (sys *NotificationSys) collectPeerMetrics(ctx context.Context, peerChannels
if sys.peerClients[index] != nil { if sys.peerClients[index] != nil {
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress",
sys.peerClients[index].host.String()) sys.peerClients[index].host.String())
logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), err, sys.peerClients[index].host.String()) peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), err, sys.peerClients[index].host.String())
} else { } else {
logger.LogOnceIf(ctx, err, "peer-offline") peersLogOnceIf(ctx, err, "peer-offline")
} }
continue continue
} }
@ -1460,7 +1460,7 @@ func (sys *NotificationSys) DriveSpeedTest(ctx context.Context, opts madmin.Driv
reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", client.host.String()) reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", client.host.String())
ctx := logger.SetReqInfo(GlobalContext, reqInfo) ctx := logger.SetReqInfo(GlobalContext, reqInfo)
logger.LogOnceIf(ctx, err, client.host.String()) peersLogOnceIf(ctx, err, client.host.String())
}(client) }(client)
} }
@ -1521,7 +1521,7 @@ func (sys *NotificationSys) GetLastDayTierStats(ctx context.Context) DailyAllTie
merged := globalTransitionState.getDailyAllTierStats() merged := globalTransitionState.getDailyAllTierStats()
for i, stat := range lastDayStats { for i, stat := range lastDayStats {
if errs[i] != nil { if errs[i] != nil {
logger.LogOnceIf(ctx, fmt.Errorf("failed to fetch last day tier stats: %w", errs[i]), sys.peerClients[i].host.String()) peersLogOnceIf(ctx, fmt.Errorf("failed to fetch last day tier stats: %w", errs[i]), sys.peerClients[i].host.String())
continue continue
} }
merged.merge(stat) merged.merge(stat)
@ -1556,9 +1556,9 @@ func (sys *NotificationSys) GetReplicationMRF(ctx context.Context, bucket, node
if sys.peerClients[index] != nil { if sys.peerClients[index] != nil {
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress",
sys.peerClients[index].host.String()) sys.peerClients[index].host.String())
logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), err, sys.peerClients[index].host.String()) peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), err, sys.peerClients[index].host.String())
} else { } else {
logger.LogOnceIf(ctx, err, "peer-offline") peersLogOnceIf(ctx, err, "peer-offline")
} }
continue continue
} }

View File

@ -27,7 +27,6 @@ import (
"github.com/minio/madmin-go/v3" "github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/bucket/replication" "github.com/minio/minio/internal/bucket/replication"
"github.com/minio/minio/internal/hash" "github.com/minio/minio/internal/hash"
"github.com/minio/minio/internal/logger"
) )
//go:generate msgp -file $GOFILE -io=false -tests=false -unexported=false //go:generate msgp -file $GOFILE -io=false -tests=false -unexported=false
@ -246,7 +245,7 @@ func (o *ObjectInfo) ArchiveInfo() []byte {
if v, ok := o.UserDefined[archiveTypeMetadataKey]; ok && v == archiveTypeEnc { if v, ok := o.UserDefined[archiveTypeMetadataKey]; ok && v == archiveTypeEnc {
decrypted, err := o.metadataDecrypter()(archiveTypeEnc, data) decrypted, err := o.metadataDecrypter()(archiveTypeEnc, data)
if err != nil { if err != nil {
logger.LogIf(GlobalContext, err) encLogIf(GlobalContext, err)
return nil return nil
} }
data = decrypted data = decrypted

View File

@ -24,7 +24,6 @@ import (
"strings" "strings"
"github.com/minio/minio-go/v7/pkg/s3utils" "github.com/minio/minio-go/v7/pkg/s3utils"
"github.com/minio/minio/internal/logger"
) )
// Checks on CopyObject arguments, bucket and object. // Checks on CopyObject arguments, bucket and object.
@ -71,10 +70,6 @@ func checkListObjsArgs(ctx context.Context, bucket, prefix, marker string) error
// Validates object prefix validity after bucket exists. // Validates object prefix validity after bucket exists.
if !IsValidObjectPrefix(prefix) { if !IsValidObjectPrefix(prefix) {
logger.LogIf(ctx, ObjectNameInvalid{
Bucket: bucket,
Object: prefix,
})
return ObjectNameInvalid{ return ObjectNameInvalid{
Bucket: bucket, Bucket: bucket,
Object: prefix, Object: prefix,
@ -90,10 +85,6 @@ func checkListMultipartArgs(ctx context.Context, bucket, prefix, keyMarker, uplo
} }
if uploadIDMarker != "" { if uploadIDMarker != "" {
if HasSuffix(keyMarker, SlashSeparator) { if HasSuffix(keyMarker, SlashSeparator) {
logger.LogIf(ctx, InvalidUploadIDKeyCombination{
UploadIDMarker: uploadIDMarker,
KeyMarker: keyMarker,
})
return InvalidUploadIDKeyCombination{ return InvalidUploadIDKeyCombination{
UploadIDMarker: uploadIDMarker, UploadIDMarker: uploadIDMarker,
KeyMarker: keyMarker, KeyMarker: keyMarker,
@ -101,7 +92,6 @@ func checkListMultipartArgs(ctx context.Context, bucket, prefix, keyMarker, uplo
} }
_, err := base64.RawURLEncoding.DecodeString(uploadIDMarker) _, err := base64.RawURLEncoding.DecodeString(uploadIDMarker)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return MalformedUploadID{ return MalformedUploadID{
UploadID: uploadIDMarker, UploadID: uploadIDMarker,
} }

View File

@ -498,7 +498,7 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
proxyGetErr := ErrorRespToObjectError(perr, bucket, object) proxyGetErr := ErrorRespToObjectError(perr, bucket, object)
if !isErrBucketNotFound(proxyGetErr) && !isErrObjectNotFound(proxyGetErr) && !isErrVersionNotFound(proxyGetErr) && if !isErrBucketNotFound(proxyGetErr) && !isErrObjectNotFound(proxyGetErr) && !isErrVersionNotFound(proxyGetErr) &&
!isErrPreconditionFailed(proxyGetErr) && !isErrInvalidRange(proxyGetErr) { !isErrPreconditionFailed(proxyGetErr) && !isErrInvalidRange(proxyGetErr) {
logger.LogIf(ctx, fmt.Errorf("Proxying request (replication) failed for %s/%s(%s) - %w", bucket, object, opts.VersionID, perr)) replLogIf(ctx, fmt.Errorf("Proxying request (replication) failed for %s/%s(%s) - %w", bucket, object, opts.VersionID, perr))
} }
} }
if reader != nil && proxy.Proxy && perr == nil { if reader != nil && proxy.Proxy && perr == nil {
@ -3788,7 +3788,7 @@ func (api objectAPIHandlers) PostRestoreObjectHandler(w http.ResponseWriter, r *
VersionID: objInfo.VersionID, VersionID: objInfo.VersionID,
} }
if err := objectAPI.RestoreTransitionedObject(rctx, bucket, object, opts); err != nil { if err := objectAPI.RestoreTransitionedObject(rctx, bucket, object, opts); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to restore transitioned bucket/object %s/%s: %w", bucket, object, err)) s3LogIf(ctx, fmt.Errorf("Unable to restore transitioned bucket/object %s/%s: %w", bucket, object, err))
return return
} }

View File

@ -83,7 +83,7 @@ func newPeerRESTClient(peer *xnet.Host, gridHost string) *peerRESTClient {
// Lazy initialization of grid connection. // Lazy initialization of grid connection.
// When we create this peer client, the grid connection is likely not yet initialized. // When we create this peer client, the grid connection is likely not yet initialized.
if gridHost == "" { if gridHost == "" {
logger.LogOnceIf(context.Background(), fmt.Errorf("gridHost is empty for peer %s", peer.String()), peer.String()+":gridHost") bugLogIf(context.Background(), fmt.Errorf("gridHost is empty for peer %s", peer.String()), peer.String()+":gridHost")
return nil return nil
} }
gc := gridConn.Load() gc := gridConn.Load()
@ -96,7 +96,7 @@ func newPeerRESTClient(peer *xnet.Host, gridHost string) *peerRESTClient {
} }
gc = gm.Connection(gridHost) gc = gm.Connection(gridHost)
if gc == nil { if gc == nil {
logger.LogOnceIf(context.Background(), fmt.Errorf("gridHost %q not found for peer %s", gridHost, peer.String()), peer.String()+":gridHost") bugLogIf(context.Background(), fmt.Errorf("gridHost %q not found for peer %s", gridHost, peer.String()), peer.String()+":gridHost")
return nil return nil
} }
gridConn.Store(gc) gridConn.Store(gc)
@ -500,7 +500,7 @@ func (client *peerRESTClient) doTrace(ctx context.Context, traceCh chan<- []byte
payload, err := json.Marshal(traceOpts) payload, err := json.Marshal(traceOpts)
if err != nil { if err != nil {
logger.LogIf(ctx, err) bugLogIf(ctx, err)
return return
} }
@ -628,7 +628,7 @@ func newPeerRestClients(endpoints EndpointServerPools) (remote, all []*peerRESTC
remote = append(remote, all[i]) remote = append(remote, all[i])
} }
if len(all) != len(remote)+1 { if len(all) != len(remote)+1 {
logger.LogIf(context.Background(), fmt.Errorf("WARNING: Expected number of all hosts (%v) to be remote +1 (%v)", len(all), len(remote))) peersLogIf(context.Background(), fmt.Errorf("Expected number of all hosts (%v) to be remote +1 (%v)", len(all), len(remote)), logger.WarningKind)
} }
return remote, all return remote, all
} }

View File

@ -349,7 +349,7 @@ func (s *peerRESTServer) DownloadProfilingDataHandler(w http.ResponseWriter, r *
s.writeErrorResponse(w, err) s.writeErrorResponse(w, err)
return return
} }
logger.LogIf(ctx, gob.NewEncoder(w).Encode(profileData)) peersLogIf(ctx, gob.NewEncoder(w).Encode(profileData))
} }
func (s *peerRESTServer) LocalStorageInfoHandler(mss *grid.MSS) (*grid.JSON[madmin.StorageInfo], *grid.RemoteErr) { func (s *peerRESTServer) LocalStorageInfoHandler(mss *grid.MSS) (*grid.JSON[madmin.StorageInfo], *grid.RemoteErr) {
@ -815,7 +815,7 @@ func (s *peerRESTServer) ListenHandler(ctx context.Context, v *grid.URLValues, o
buf.Reset() buf.Reset()
tmpEvt.Records[0] = ev tmpEvt.Records[0] = ev
if err := enc.Encode(tmpEvt); err != nil { if err := enc.Encode(tmpEvt); err != nil {
logger.LogOnceIf(ctx, err, "event: Encode failed") peersLogOnceIf(ctx, err, "event: Encode failed")
continue continue
} }
out <- grid.NewBytesWithCopyOf(buf.Bytes()) out <- grid.NewBytesWithCopyOf(buf.Bytes())
@ -866,7 +866,7 @@ func (s *peerRESTServer) ReloadSiteReplicationConfigHandler(mss *grid.MSS) (np g
return np, grid.NewRemoteErr(errServerNotInitialized) return np, grid.NewRemoteErr(errServerNotInitialized)
} }
logger.LogIf(context.Background(), globalSiteReplicationSys.Init(context.Background(), objAPI)) peersLogIf(context.Background(), globalSiteReplicationSys.Init(context.Background(), objAPI))
return return
} }
@ -939,7 +939,7 @@ func (s *peerRESTServer) LoadTransitionTierConfigHandler(mss *grid.MSS) (np grid
go func() { go func() {
err := globalTierConfigMgr.Reload(context.Background(), newObjectLayerFn()) err := globalTierConfigMgr.Reload(context.Background(), newObjectLayerFn())
if err != nil { if err != nil {
logger.LogIf(context.Background(), fmt.Errorf("Failed to reload remote tier config %s", err)) peersLogIf(context.Background(), fmt.Errorf("Failed to reload remote tier config %s", err))
} }
}() }()
@ -1090,7 +1090,7 @@ func (s *peerRESTServer) SpeedTestHandler(w http.ResponseWriter, r *http.Request
} }
done(nil) done(nil)
logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result)) peersLogIf(r.Context(), gob.NewEncoder(w).Encode(result))
} }
// GetLastDayTierStatsHandler - returns per-tier stats in the last 24hrs for this server // GetLastDayTierStatsHandler - returns per-tier stats in the last 24hrs for this server
@ -1139,7 +1139,7 @@ func (s *peerRESTServer) DriveSpeedTestHandler(w http.ResponseWriter, r *http.Re
result := driveSpeedTest(r.Context(), opts) result := driveSpeedTest(r.Context(), opts)
done(nil) done(nil)
logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result)) peersLogIf(r.Context(), gob.NewEncoder(w).Encode(result))
} }
// GetReplicationMRFHandler - returns replication MRF for bucket // GetReplicationMRFHandler - returns replication MRF for bucket
@ -1186,7 +1186,7 @@ func (s *peerRESTServer) DevNull(w http.ResponseWriter, r *http.Request) {
// If there is a disconnection before globalNetPerfMinDuration (we give a margin of error of 1 sec) // If there is a disconnection before globalNetPerfMinDuration (we give a margin of error of 1 sec)
// would mean the network is not stable. Logging here will help in debugging network issues. // would mean the network is not stable. Logging here will help in debugging network issues.
if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) { if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) {
logger.LogIf(ctx, err) peersLogIf(ctx, err)
} }
} }
if err != nil { if err != nil {
@ -1208,7 +1208,7 @@ func (s *peerRESTServer) NetSpeedTestHandler(w http.ResponseWriter, r *http.Requ
duration = time.Second * 10 duration = time.Second * 10
} }
result := netperf(r.Context(), duration.Round(time.Second)) result := netperf(r.Context(), duration.Round(time.Second))
logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result)) peersLogIf(r.Context(), gob.NewEncoder(w).Encode(result))
} }
func (s *peerRESTServer) HealBucketHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { func (s *peerRESTServer) HealBucketHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {

View File

@ -28,7 +28,6 @@ import (
"github.com/minio/madmin-go/v3" "github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/grid" "github.com/minio/minio/internal/grid"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/sync/errgroup" "github.com/minio/pkg/v2/sync/errgroup"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
) )
@ -511,7 +510,7 @@ func newPeerS3Client(node Node) peerS3Client {
// Lazy initialization of grid connection. // Lazy initialization of grid connection.
// When we create this peer client, the grid connection is likely not yet initialized. // When we create this peer client, the grid connection is likely not yet initialized.
if node.GridHost == "" { if node.GridHost == "" {
logger.LogOnceIf(context.Background(), fmt.Errorf("gridHost is empty for peer %s", node.Host), node.Host+":gridHost") bugLogIf(context.Background(), fmt.Errorf("gridHost is empty for peer %s", node.Host), node.Host+":gridHost")
return nil return nil
} }
gc := gridConn.Load() gc := gridConn.Load()
@ -524,7 +523,7 @@ func newPeerS3Client(node Node) peerS3Client {
} }
gc = gm.Connection(node.GridHost) gc = gm.Connection(node.GridHost)
if gc == nil { if gc == nil {
logger.LogOnceIf(context.Background(), fmt.Errorf("gridHost %s not found for peer %s", node.GridHost, node.Host), node.Host+":gridHost") bugLogIf(context.Background(), fmt.Errorf("gridHost %s not found for peer %s", node.GridHost, node.Host), node.Host+":gridHost")
return nil return nil
} }
gridConn.Store(gc) gridConn.Store(gc)

View File

@ -48,7 +48,7 @@ var printEndpointError = func() func(Endpoint, error, bool) {
printOnce[endpoint] = m printOnce[endpoint] = m
if once { if once {
m[err.Error()]++ m[err.Error()]++
logger.LogAlwaysIf(ctx, err) peersLogAlwaysIf(ctx, err)
return return
} }
} }
@ -60,7 +60,7 @@ var printEndpointError = func() func(Endpoint, error, bool) {
// once not set, check if same error occurred 3 times in // once not set, check if same error occurred 3 times in
// a row, then make sure we print it to call attention. // a row, then make sure we print it to call attention.
if m[err.Error()] > 2 { if m[err.Error()] > 2 {
logger.LogAlwaysIf(ctx, fmt.Errorf("Following error has been printed %d times.. %w", m[err.Error()], err)) peersLogAlwaysIf(ctx, fmt.Errorf("Following error has been printed %d times.. %w", m[err.Error()], err))
// Reduce the count to introduce further delay in printing // Reduce the count to introduce further delay in printing
// but let it again print after the 2th attempt // but let it again print after the 2th attempt
m[err.Error()]-- m[err.Error()]--
@ -86,14 +86,14 @@ func bgFormatErasureCleanupTmp(diskPath string) {
tmpOld := pathJoin(diskPath, minioMetaTmpBucket+"-old", tmpID) tmpOld := pathJoin(diskPath, minioMetaTmpBucket+"-old", tmpID)
if err := renameAll(pathJoin(diskPath, minioMetaTmpBucket), if err := renameAll(pathJoin(diskPath, minioMetaTmpBucket),
tmpOld, diskPath); err != nil && !errors.Is(err, errFileNotFound) { tmpOld, diskPath); err != nil && !errors.Is(err, errFileNotFound) {
logger.LogIf(GlobalContext, fmt.Errorf("unable to rename (%s -> %s) %w, drive may be faulty please investigate", storageLogIf(GlobalContext, fmt.Errorf("unable to rename (%s -> %s) %w, drive may be faulty, please investigate",
pathJoin(diskPath, minioMetaTmpBucket), pathJoin(diskPath, minioMetaTmpBucket),
tmpOld, tmpOld,
osErrToFileErr(err))) osErrToFileErr(err)))
} }
if err := mkdirAll(pathJoin(diskPath, minioMetaTmpDeletedBucket), 0o777, diskPath); err != nil { if err := mkdirAll(pathJoin(diskPath, minioMetaTmpDeletedBucket), 0o777, diskPath); err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("unable to create (%s) %w, drive may be faulty please investigate", storageLogIf(GlobalContext, fmt.Errorf("unable to create (%s) %w, drive may be faulty, please investigate",
pathJoin(diskPath, minioMetaTmpBucket), pathJoin(diskPath, minioMetaTmpBucket),
err)) err))
} }
@ -240,7 +240,7 @@ func connectLoadInitFormats(verboseLogging bool, firstDisk bool, endpoints Endpo
format, err = getFormatErasureInQuorum(formatConfigs) format, err = getFormatErasureInQuorum(formatConfigs)
if err != nil { if err != nil {
logger.LogIf(GlobalContext, err) internalLogIf(GlobalContext, err)
return nil, nil, err return nil, nil, err
} }
@ -250,7 +250,7 @@ func connectLoadInitFormats(verboseLogging bool, firstDisk bool, endpoints Endpo
return nil, nil, errNotFirstDisk return nil, nil, errNotFirstDisk
} }
if err = formatErasureFixDeploymentID(endpoints, storageDisks, format, formatConfigs); err != nil { if err = formatErasureFixDeploymentID(endpoints, storageDisks, format, formatConfigs); err != nil {
logger.LogIf(GlobalContext, err) storageLogIf(GlobalContext, err)
return nil, nil, err return nil, nil, err
} }
} }
@ -258,7 +258,7 @@ func connectLoadInitFormats(verboseLogging bool, firstDisk bool, endpoints Endpo
globalDeploymentIDPtr.Store(&format.ID) globalDeploymentIDPtr.Store(&format.ID)
if err = formatErasureFixLocalDeploymentID(endpoints, storageDisks, format); err != nil { if err = formatErasureFixLocalDeploymentID(endpoints, storageDisks, format); err != nil {
logger.LogIf(GlobalContext, err) storageLogIf(GlobalContext, err)
return nil, nil, err return nil, nil, err
} }

View File

@ -367,7 +367,7 @@ func serverHandleCmdArgs(ctxt serverCtxt) {
RoundTripper: NewHTTPTransportWithTimeout(1 * time.Hour), RoundTripper: NewHTTPTransportWithTimeout(1 * time.Hour),
Logger: func(err error) { Logger: func(err error) {
if err != nil && !errors.Is(err, context.Canceled) { if err != nil && !errors.Is(err, context.Canceled) {
logger.LogIf(GlobalContext, err) replLogIf(GlobalContext, err)
} }
}, },
}) })
@ -577,7 +577,7 @@ func initConfigSubsystem(ctx context.Context, newObject ObjectLayer) error {
} }
// Any other config errors we simply print a message and proceed forward. // Any other config errors we simply print a message and proceed forward.
logger.LogIf(ctx, fmt.Errorf("Unable to initialize config, some features may be missing: %w", err)) configLogIf(ctx, fmt.Errorf("Unable to initialize config, some features may be missing: %w", err))
} }
return nil return nil
@ -777,7 +777,7 @@ func serverMain(ctx *cli.Context) {
httpServer.TCPOptions.Trace = bootstrapTraceMsg httpServer.TCPOptions.Trace = bootstrapTraceMsg
go func() { go func() {
serveFn, err := httpServer.Init(GlobalContext, func(listenAddr string, err error) { serveFn, err := httpServer.Init(GlobalContext, func(listenAddr string, err error) {
logger.LogIf(GlobalContext, fmt.Errorf("Unable to listen on `%s`: %v", listenAddr, err)) bootLogIf(GlobalContext, fmt.Errorf("Unable to listen on `%s`: %v", listenAddr, err))
}) })
if err != nil { if err != nil {
globalHTTPServerErrorCh <- err globalHTTPServerErrorCh <- err
@ -839,7 +839,7 @@ func serverMain(ctx *cli.Context) {
logger.FatalIf(err, "Server startup canceled upon user request") logger.FatalIf(err, "Server startup canceled upon user request")
} }
logger.LogIf(GlobalContext, err) bootLogIf(GlobalContext, err)
} }
if !globalServerCtxt.StrictS3Compat { if !globalServerCtxt.StrictS3Compat {
@ -935,14 +935,14 @@ func serverMain(ctx *cli.Context) {
// Initialize transition tier configuration manager // Initialize transition tier configuration manager
bootstrapTrace("globalTierConfigMgr.Init", func() { bootstrapTrace("globalTierConfigMgr.Init", func() {
if err := globalTierConfigMgr.Init(GlobalContext, newObject); err != nil { if err := globalTierConfigMgr.Init(GlobalContext, newObject); err != nil {
logger.LogIf(GlobalContext, err) bootLogIf(GlobalContext, err)
} }
}) })
}() }()
// Initialize bucket notification system. // Initialize bucket notification system.
bootstrapTrace("initBucketTargets", func() { bootstrapTrace("initBucketTargets", func() {
logger.LogIf(GlobalContext, globalEventNotifier.InitBucketTargets(GlobalContext, newObject)) bootLogIf(GlobalContext, globalEventNotifier.InitBucketTargets(GlobalContext, newObject))
}) })
var buckets []BucketInfo var buckets []BucketInfo
@ -956,7 +956,7 @@ func serverMain(ctx *cli.Context) {
time.Sleep(time.Duration(r.Float64() * float64(time.Second))) time.Sleep(time.Duration(r.Float64() * float64(time.Second)))
continue continue
} }
logger.LogIf(GlobalContext, fmt.Errorf("Unable to list buckets to initialize bucket metadata sub-system: %w", err)) bootLogIf(GlobalContext, fmt.Errorf("Unable to list buckets to initialize bucket metadata sub-system: %w", err))
} }
break break

View File

@ -33,7 +33,6 @@ import (
"github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio/internal/auth" "github.com/minio/minio/internal/auth"
xioutil "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/pkg/sftp" "github.com/pkg/sftp"
"golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh"
) )
@ -136,7 +135,7 @@ func (f *sftpDriver) getMinIOClient() (*minio.Client, error) {
} }
// Call hook for site replication. // Call hook for site replication.
logger.LogIf(context.Background(), globalSiteReplicationSys.IAMChangeHook(context.Background(), madmin.SRIAMItem{ replLogIf(context.Background(), globalSiteReplicationSys.IAMChangeHook(context.Background(), madmin.SRIAMItem{
Type: madmin.SRIAMItemSTSAcc, Type: madmin.SRIAMItemSTSAcc,
STSCredential: &madmin.SRSTSCredential{ STSCredential: &madmin.SRSTSCredential{
AccessKey: cred.AccessKey, AccessKey: cred.AccessKey,

View File

@ -43,13 +43,13 @@ func (s *sftpLogger) Info(tag xsftp.LogType, msg string) {
func (s *sftpLogger) Error(tag xsftp.LogType, err error) { func (s *sftpLogger) Error(tag xsftp.LogType, err error) {
switch tag { switch tag {
case xsftp.AcceptNetworkError: case xsftp.AcceptNetworkError:
logger.LogOnceIf(context.Background(), err, "accept-limit-sftp") sftpLogOnceIf(context.Background(), err, "accept-limit-sftp")
case xsftp.AcceptChannelError: case xsftp.AcceptChannelError:
logger.LogOnceIf(context.Background(), err, "accept-channel-sftp") sftpLogOnceIf(context.Background(), err, "accept-channel-sftp")
case xsftp.SSHKeyExchangeError: case xsftp.SSHKeyExchangeError:
logger.LogOnceIf(context.Background(), err, "key-exchange-sftp") sftpLogOnceIf(context.Background(), err, "key-exchange-sftp")
default: default:
logger.LogOnceIf(context.Background(), err, "unknown-error-sftp") sftpLogOnceIf(context.Background(), err, "unknown-error-sftp")
} }
} }

View File

@ -51,16 +51,16 @@ func handleSignals() {
if httpServer := newHTTPServerFn(); httpServer != nil { if httpServer := newHTTPServerFn(); httpServer != nil {
if err := httpServer.Shutdown(); err != nil && !errors.Is(err, http.ErrServerClosed) { if err := httpServer.Shutdown(); err != nil && !errors.Is(err, http.ErrServerClosed) {
logger.LogIf(context.Background(), err) shutdownLogIf(context.Background(), err)
} }
} }
if objAPI := newObjectLayerFn(); objAPI != nil { if objAPI := newObjectLayerFn(); objAPI != nil {
logger.LogIf(context.Background(), objAPI.Shutdown(context.Background())) shutdownLogIf(context.Background(), objAPI.Shutdown(context.Background()))
} }
if srv := newConsoleServerFn(); srv != nil { if srv := newConsoleServerFn(); srv != nil {
logger.LogIf(context.Background(), srv.Shutdown()) shutdownLogIf(context.Background(), srv.Shutdown())
} }
if globalEventNotifier != nil { if globalEventNotifier != nil {
@ -73,7 +73,7 @@ func handleSignals() {
for { for {
select { select {
case err := <-globalHTTPServerErrorCh: case err := <-globalHTTPServerErrorCh:
logger.LogIf(context.Background(), err) shutdownLogIf(context.Background(), err)
exit(stopProcess()) exit(stopProcess())
case osSignal := <-globalOSSignalCh: case osSignal := <-globalOSSignalCh:
logger.Info("Exiting on signal: %s", strings.ToUpper(osSignal.String())) logger.Info("Exiting on signal: %s", strings.ToUpper(osSignal.String()))
@ -89,7 +89,7 @@ func handleSignals() {
if rerr == nil { if rerr == nil {
daemon.SdNotify(false, daemon.SdNotifyReady) daemon.SdNotify(false, daemon.SdNotifyReady)
} }
logger.LogIf(context.Background(), rerr) shutdownLogIf(context.Background(), rerr)
exit(stop && rerr == nil) exit(stop && rerr == nil)
case serviceStop: case serviceStop:
logger.Info("Stopping on service signal") logger.Info("Stopping on service signal")

View File

@ -238,7 +238,7 @@ func (c *SiteReplicationSys) Init(ctx context.Context, objAPI ObjectLayer) error
if err == nil { if err == nil {
break break
} }
logger.LogOnceIf(context.Background(), fmt.Errorf("unable to initialize site replication subsystem: (%w)", err), "site-relication-init") replLogOnceIf(context.Background(), fmt.Errorf("unable to initialize site replication subsystem: (%w)", err), "site-relication-init")
duration := time.Duration(r.Float64() * float64(time.Minute)) duration := time.Duration(r.Float64() * float64(time.Minute))
if duration < time.Second { if duration < time.Second {
@ -313,7 +313,7 @@ func (c *SiteReplicationSys) saveToDisk(ctx context.Context, state srState) erro
} }
for _, err := range globalNotificationSys.ReloadSiteReplicationConfig(ctx) { for _, err := range globalNotificationSys.ReloadSiteReplicationConfig(ctx) {
logger.LogIf(ctx, err) replLogIf(ctx, err)
} }
c.Lock() c.Lock()
@ -334,7 +334,7 @@ func (c *SiteReplicationSys) removeFromDisk(ctx context.Context) error {
} }
for _, err := range globalNotificationSys.ReloadSiteReplicationConfig(ctx) { for _, err := range globalNotificationSys.ReloadSiteReplicationConfig(ctx) {
logger.LogIf(ctx, err) replLogIf(ctx, err)
} }
c.Lock() c.Lock()
@ -1186,7 +1186,7 @@ func (c *SiteReplicationSys) PeerBucketDeleteHandler(ctx context.Context, bucket
if err != nil { if err != nil {
if globalDNSConfig != nil { if globalDNSConfig != nil {
if err2 := globalDNSConfig.Put(bucket); err2 != nil { if err2 := globalDNSConfig.Put(bucket); err2 != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to restore bucket DNS entry %w, please fix it manually", err2)) replLogIf(ctx, fmt.Errorf("Unable to restore bucket DNS entry %w, please fix it manually", err2))
} }
} }
return err return err
@ -4074,7 +4074,7 @@ func (c *SiteReplicationSys) EditPeerCluster(ctx context.Context, peer madmin.Pe
wg.Wait() wg.Wait()
for dID, err := range errs { for dID, err := range errs {
logger.LogOnceIf(ctx, fmt.Errorf("unable to update peer %s: %w", state.Peers[dID].Name, err), "site-relication-edit") replLogOnceIf(ctx, fmt.Errorf("unable to update peer %s: %w", state.Peers[dID].Name, err), "site-relication-edit")
} }
// we can now save the cluster replication configuration state. // we can now save the cluster replication configuration state.
@ -4141,21 +4141,21 @@ func (c *SiteReplicationSys) updateTargetEndpoints(ctx context.Context, prevInfo
} }
err := globalBucketTargetSys.SetTarget(ctx, bucket, &bucketTarget, true) err := globalBucketTargetSys.SetTarget(ctx, bucket, &bucketTarget, true)
if err != nil { if err != nil {
logger.LogIf(ctx, c.annotatePeerErr(peer.Name, "Bucket target creation error", err)) replLogIf(ctx, c.annotatePeerErr(peer.Name, "Bucket target creation error", err))
continue continue
} }
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket) targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
if err != nil { if err != nil {
logger.LogIf(ctx, err) replLogIf(ctx, err)
continue continue
} }
tgtBytes, err := json.Marshal(&targets) tgtBytes, err := json.Marshal(&targets)
if err != nil { if err != nil {
logger.LogIf(ctx, err) bugLogIf(ctx, err)
continue continue
} }
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil { if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil {
logger.LogIf(ctx, err) replLogIf(ctx, err)
continue continue
} }
} }
@ -4390,7 +4390,7 @@ func (c *SiteReplicationSys) healILMExpiryConfig(ctx context.Context, objAPI Obj
return wrapSRErr(err) return wrapSRErr(err)
} }
if err = admClient.SRStateEdit(ctx, madmin.SRStateEditReq{Peers: latestPeers, UpdatedAt: lastUpdate}); err != nil { if err = admClient.SRStateEdit(ctx, madmin.SRStateEditReq{Peers: latestPeers, UpdatedAt: lastUpdate}); err != nil {
logger.LogIf(ctx, c.annotatePeerErr(ps.Name, siteReplicationEdit, replLogIf(ctx, c.annotatePeerErr(ps.Name, siteReplicationEdit,
fmt.Errorf("Unable to heal site replication state for peer %s from peer %s : %w", fmt.Errorf("Unable to heal site replication state for peer %s from peer %s : %w",
ps.Name, latestPeerName, err))) ps.Name, latestPeerName, err)))
} }
@ -4493,7 +4493,7 @@ func (c *SiteReplicationSys) healBucketILMExpiry(ctx context.Context, objAPI Obj
if dID == globalDeploymentID() { if dID == globalDeploymentID() {
if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketLifecycleConfig, finalConfigData); err != nil { if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketLifecycleConfig, finalConfigData); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal bucket ILM expiry data from peer site %s : %w", latestPeerName, err)) replLogIf(ctx, fmt.Errorf("Unable to heal bucket ILM expiry data from peer site %s : %w", latestPeerName, err))
} }
continue continue
} }
@ -4509,7 +4509,7 @@ func (c *SiteReplicationSys) healBucketILMExpiry(ctx context.Context, objAPI Obj
ExpiryLCConfig: latestExpLCConfig, ExpiryLCConfig: latestExpLCConfig,
UpdatedAt: lastUpdate, UpdatedAt: lastUpdate,
}); err != nil { }); err != nil {
logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, replLogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata,
fmt.Errorf("Unable to heal bucket ILM expiry data for peer %s from peer %s : %w", fmt.Errorf("Unable to heal bucket ILM expiry data for peer %s from peer %s : %w",
peerName, latestPeerName, err))) peerName, latestPeerName, err)))
} }
@ -4566,7 +4566,7 @@ func (c *SiteReplicationSys) healTagMetadata(ctx context.Context, objAPI ObjectL
} }
if dID == globalDeploymentID() { if dID == globalDeploymentID() {
if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, latestTaggingConfigBytes); err != nil { if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, latestTaggingConfigBytes); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal tagging metadata from peer site %s : %w", latestPeerName, err)) replLogIf(ctx, fmt.Errorf("Unable to heal tagging metadata from peer site %s : %w", latestPeerName, err))
} }
continue continue
} }
@ -4582,7 +4582,7 @@ func (c *SiteReplicationSys) healTagMetadata(ctx context.Context, objAPI ObjectL
Tags: latestTaggingConfig, Tags: latestTaggingConfig,
}) })
if err != nil { if err != nil {
logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, replLogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata,
fmt.Errorf("Unable to heal tagging metadata for peer %s from peer %s : %w", peerName, latestPeerName, err))) fmt.Errorf("Unable to heal tagging metadata for peer %s from peer %s : %w", peerName, latestPeerName, err)))
} }
} }
@ -4630,7 +4630,7 @@ func (c *SiteReplicationSys) healBucketPolicies(ctx context.Context, objAPI Obje
} }
if dID == globalDeploymentID() { if dID == globalDeploymentID() {
if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, latestIAMPolicy); err != nil { if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, latestIAMPolicy); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal bucket policy metadata from peer site %s : %w", latestPeerName, err)) replLogIf(ctx, fmt.Errorf("Unable to heal bucket policy metadata from peer site %s : %w", latestPeerName, err))
} }
continue continue
} }
@ -4646,7 +4646,7 @@ func (c *SiteReplicationSys) healBucketPolicies(ctx context.Context, objAPI Obje
Policy: latestIAMPolicy, Policy: latestIAMPolicy,
UpdatedAt: lastUpdate, UpdatedAt: lastUpdate,
}); err != nil { }); err != nil {
logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, replLogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata,
fmt.Errorf("Unable to heal bucket policy metadata for peer %s from peer %s : %w", fmt.Errorf("Unable to heal bucket policy metadata for peer %s from peer %s : %w",
peerName, latestPeerName, err))) peerName, latestPeerName, err)))
} }
@ -4705,7 +4705,7 @@ func (c *SiteReplicationSys) healBucketQuotaConfig(ctx context.Context, objAPI O
} }
if dID == globalDeploymentID() { if dID == globalDeploymentID() {
if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, latestQuotaConfigBytes); err != nil { if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, latestQuotaConfigBytes); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal quota metadata from peer site %s : %w", latestPeerName, err)) replLogIf(ctx, fmt.Errorf("Unable to heal quota metadata from peer site %s : %w", latestPeerName, err))
} }
continue continue
} }
@ -4722,7 +4722,7 @@ func (c *SiteReplicationSys) healBucketQuotaConfig(ctx context.Context, objAPI O
Quota: latestQuotaConfigBytes, Quota: latestQuotaConfigBytes,
UpdatedAt: lastUpdate, UpdatedAt: lastUpdate,
}); err != nil { }); err != nil {
logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, replLogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata,
fmt.Errorf("Unable to heal quota config metadata for peer %s from peer %s : %w", fmt.Errorf("Unable to heal quota config metadata for peer %s from peer %s : %w",
peerName, latestPeerName, err))) peerName, latestPeerName, err)))
} }
@ -4780,7 +4780,7 @@ func (c *SiteReplicationSys) healVersioningMetadata(ctx context.Context, objAPI
} }
if dID == globalDeploymentID() { if dID == globalDeploymentID() {
if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketVersioningConfig, latestVersioningConfigBytes); err != nil { if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketVersioningConfig, latestVersioningConfigBytes); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal versioning metadata from peer site %s : %w", latestPeerName, err)) replLogIf(ctx, fmt.Errorf("Unable to heal versioning metadata from peer site %s : %w", latestPeerName, err))
} }
continue continue
} }
@ -4797,7 +4797,7 @@ func (c *SiteReplicationSys) healVersioningMetadata(ctx context.Context, objAPI
UpdatedAt: lastUpdate, UpdatedAt: lastUpdate,
}) })
if err != nil { if err != nil {
logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, replLogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata,
fmt.Errorf("Unable to heal versioning config metadata for peer %s from peer %s : %w", fmt.Errorf("Unable to heal versioning config metadata for peer %s from peer %s : %w",
peerName, latestPeerName, err))) peerName, latestPeerName, err)))
} }
@ -4855,7 +4855,7 @@ func (c *SiteReplicationSys) healSSEMetadata(ctx context.Context, objAPI ObjectL
} }
if dID == globalDeploymentID() { if dID == globalDeploymentID() {
if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, latestSSEConfigBytes); err != nil { if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, latestSSEConfigBytes); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal sse metadata from peer site %s : %w", latestPeerName, err)) replLogIf(ctx, fmt.Errorf("Unable to heal sse metadata from peer site %s : %w", latestPeerName, err))
} }
continue continue
} }
@ -4872,7 +4872,7 @@ func (c *SiteReplicationSys) healSSEMetadata(ctx context.Context, objAPI ObjectL
UpdatedAt: lastUpdate, UpdatedAt: lastUpdate,
}) })
if err != nil { if err != nil {
logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, replLogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata,
fmt.Errorf("Unable to heal SSE config metadata for peer %s from peer %s : %w", fmt.Errorf("Unable to heal SSE config metadata for peer %s from peer %s : %w",
peerName, latestPeerName, err))) peerName, latestPeerName, err)))
} }
@ -4930,7 +4930,7 @@ func (c *SiteReplicationSys) healOLockConfigMetadata(ctx context.Context, objAPI
} }
if dID == globalDeploymentID() { if dID == globalDeploymentID() {
if _, err := globalBucketMetadataSys.Update(ctx, bucket, objectLockConfig, latestObjLockConfigBytes); err != nil { if _, err := globalBucketMetadataSys.Update(ctx, bucket, objectLockConfig, latestObjLockConfigBytes); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal objectlock config metadata from peer site %s : %w", latestPeerName, err)) replLogIf(ctx, fmt.Errorf("Unable to heal objectlock config metadata from peer site %s : %w", latestPeerName, err))
} }
continue continue
} }
@ -4947,7 +4947,7 @@ func (c *SiteReplicationSys) healOLockConfigMetadata(ctx context.Context, objAPI
UpdatedAt: lastUpdate, UpdatedAt: lastUpdate,
}) })
if err != nil { if err != nil {
logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, replLogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata,
fmt.Errorf("Unable to heal object lock config metadata for peer %s from peer %s : %w", fmt.Errorf("Unable to heal object lock config metadata for peer %s from peer %s : %w",
peerName, latestPeerName, err))) peerName, latestPeerName, err)))
} }
@ -5184,7 +5184,7 @@ func (c *SiteReplicationSys) healBucketReplicationConfig(ctx context.Context, ob
} }
if replMismatch { if replMismatch {
logger.LogIf(ctx, c.annotateErr(configureReplication, c.PeerBucketConfigureReplHandler(ctx, bucket))) replLogIf(ctx, c.annotateErr(configureReplication, c.PeerBucketConfigureReplHandler(ctx, bucket)))
} }
return nil return nil
} }
@ -5277,7 +5277,7 @@ func (c *SiteReplicationSys) healPolicies(ctx context.Context, objAPI ObjectLaye
UpdatedAt: lastUpdate, UpdatedAt: lastUpdate,
}) })
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal IAM policy %s from peer site %s -> site %s : %w", policy, latestPeerName, peerName, err)) replLogIf(ctx, fmt.Errorf("Unable to heal IAM policy %s from peer site %s -> site %s : %w", policy, latestPeerName, peerName, err))
} }
} }
return nil return nil
@ -5338,7 +5338,7 @@ func (c *SiteReplicationSys) healUserPolicies(ctx context.Context, objAPI Object
UpdatedAt: lastUpdate, UpdatedAt: lastUpdate,
}) })
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal IAM user policy mapping for %s from peer site %s -> site %s : %w", user, latestPeerName, peerName, err)) replLogIf(ctx, fmt.Errorf("Unable to heal IAM user policy mapping for %s from peer site %s -> site %s : %w", user, latestPeerName, peerName, err))
} }
} }
return nil return nil
@ -5401,7 +5401,7 @@ func (c *SiteReplicationSys) healGroupPolicies(ctx context.Context, objAPI Objec
UpdatedAt: lastUpdate, UpdatedAt: lastUpdate,
}) })
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal IAM group policy mapping for %s from peer site %s -> site %s : %w", group, latestPeerName, peerName, err)) replLogIf(ctx, fmt.Errorf("Unable to heal IAM group policy mapping for %s from peer site %s -> site %s : %w", group, latestPeerName, peerName, err))
} }
} }
return nil return nil
@ -5462,13 +5462,13 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer,
if creds.IsServiceAccount() { if creds.IsServiceAccount() {
claims, err := globalIAMSys.GetClaimsForSvcAcc(ctx, creds.AccessKey) claims, err := globalIAMSys.GetClaimsForSvcAcc(ctx, creds.AccessKey)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) replLogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err))
continue continue
} }
_, policy, err := globalIAMSys.GetServiceAccount(ctx, creds.AccessKey) _, policy, err := globalIAMSys.GetServiceAccount(ctx, creds.AccessKey)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) replLogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err))
continue continue
} }
@ -5476,7 +5476,7 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer,
if policy != nil { if policy != nil {
policyJSON, err = json.Marshal(policy) policyJSON, err = json.Marshal(policy)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) replLogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err))
continue continue
} }
} }
@ -5499,7 +5499,7 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer,
}, },
UpdatedAt: lastUpdate, UpdatedAt: lastUpdate,
}); err != nil { }); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) replLogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err))
} }
continue continue
} }
@ -5512,7 +5512,7 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer,
// policy. The session token will contain info about policy to // policy. The session token will contain info about policy to
// be applied. // be applied.
if !errors.Is(err, errNoSuchUser) { if !errors.Is(err, errNoSuchUser) {
logger.LogIf(ctx, fmt.Errorf("Unable to heal temporary credentials %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) replLogIf(ctx, fmt.Errorf("Unable to heal temporary credentials %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err))
continue continue
} }
} else { } else {
@ -5530,7 +5530,7 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer,
}, },
UpdatedAt: lastUpdate, UpdatedAt: lastUpdate,
}); err != nil { }); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal temporary credentials %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) replLogIf(ctx, fmt.Errorf("Unable to heal temporary credentials %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err))
} }
continue continue
} }
@ -5546,7 +5546,7 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer,
}, },
UpdatedAt: lastUpdate, UpdatedAt: lastUpdate,
}); err != nil { }); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal user %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) replLogIf(ctx, fmt.Errorf("Unable to heal user %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err))
} }
} }
return nil return nil
@ -5610,7 +5610,7 @@ func (c *SiteReplicationSys) healGroups(ctx context.Context, objAPI ObjectLayer,
}, },
UpdatedAt: lastUpdate, UpdatedAt: lastUpdate,
}); err != nil { }); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to heal group %s from peer site %s -> site %s : %w", group, latestPeerName, peerName, err)) replLogIf(ctx, fmt.Errorf("Unable to heal group %s from peer site %s -> site %s : %w", group, latestPeerName, peerName, err))
} }
} }
return nil return nil

View File

@ -20,8 +20,6 @@ package cmd
import ( import (
"context" "context"
"errors" "errors"
"github.com/minio/minio/internal/logger"
) )
// errMaxVersionsExceeded return error beyond 10000 (default) versions per object // errMaxVersionsExceeded return error beyond 10000 (default) versions per object
@ -176,7 +174,7 @@ func osErrToFileErr(err error) error {
return errFaultyDisk return errFaultyDisk
} }
if isSysErrInvalidArg(err) { if isSysErrInvalidArg(err) {
logger.LogIf(context.Background(), err) storageLogIf(context.Background(), err)
// For some odd calls with O_DIRECT reads // For some odd calls with O_DIRECT reads
// filesystems can return EINVAL, handle // filesystems can return EINVAL, handle
// these as FileNotFound instead. // these as FileNotFound instead.

View File

@ -38,7 +38,6 @@ import (
"github.com/minio/minio/internal/grid" "github.com/minio/minio/internal/grid"
xhttp "github.com/minio/minio/internal/http" xhttp "github.com/minio/minio/internal/http"
xioutil "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/rest" "github.com/minio/minio/internal/rest"
xnet "github.com/minio/pkg/v2/net" xnet "github.com/minio/pkg/v2/net"
xbufio "github.com/philhofer/fwd" xbufio "github.com/philhofer/fwd"
@ -695,7 +694,7 @@ func (client *storageRESTClient) DeleteVersions(ctx context.Context, volume stri
for _, version := range versions { for _, version := range versions {
version.EncodeMsg(encoder) version.EncodeMsg(encoder)
} }
logger.LogIf(ctx, encoder.Flush()) storageLogIf(ctx, encoder.Flush())
errs = make([]error, len(versions)) errs = make([]error, len(versions))

View File

@ -414,7 +414,7 @@ func (s *storageRESTServer) ReadVersionHandler(w http.ResponseWriter, r *http.Re
return return
} }
logger.LogIf(r.Context(), msgp.Encode(w, &fi)) storageLogIf(r.Context(), msgp.Encode(w, &fi))
} }
// WriteMetadataHandler rpc handler to write new updated metadata. // WriteMetadataHandler rpc handler to write new updated metadata.
@ -495,7 +495,7 @@ func (s *storageRESTServer) ReadXLHandler(w http.ResponseWriter, r *http.Request
return return
} }
logger.LogIf(r.Context(), msgp.Encode(w, &rf)) storageLogIf(r.Context(), msgp.Encode(w, &rf))
} }
// ReadXLHandlerWS - read xl.meta for an object at path. // ReadXLHandlerWS - read xl.meta for an object at path.
@ -597,7 +597,7 @@ func (s *storageRESTServer) ReadFileStreamHandler(w http.ResponseWriter, r *http
if ok { if ok {
_, err = rf.ReadFrom(sr.Reader) _, err = rf.ReadFrom(sr.Reader)
if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients
logger.LogIf(r.Context(), err) storageLogIf(r.Context(), err)
} }
if err == nil || !errors.Is(err, xhttp.ErrNotImplemented) { if err == nil || !errors.Is(err, xhttp.ErrNotImplemented) {
return return
@ -607,7 +607,7 @@ func (s *storageRESTServer) ReadFileStreamHandler(w http.ResponseWriter, r *http
_, err = xioutil.Copy(w, rc) _, err = xioutil.Copy(w, rc)
if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients
logger.LogIf(r.Context(), err) storageLogIf(r.Context(), err)
} }
} }
@ -1180,25 +1180,25 @@ func logFatalErrs(err error, endpoint Endpoint, exit bool) {
hint = fmt.Sprintf("Run the following command to add write permissions: `sudo chown -R %s. <path> && sudo chmod u+rxw <path>`", username) hint = fmt.Sprintf("Run the following command to add write permissions: `sudo chown -R %s. <path> && sudo chmod u+rxw <path>`", username)
} }
if !exit { if !exit {
logger.LogOnceIf(GlobalContext, fmt.Errorf("Drive is not writable %s, %s", endpoint, hint), "log-fatal-errs") storageLogOnceIf(GlobalContext, fmt.Errorf("Drive is not writable %s, %s", endpoint, hint), "log-fatal-errs")
} else { } else {
logger.Fatal(config.ErrUnableToWriteInBackend(err).Hint(hint), "Unable to initialize backend") logger.Fatal(config.ErrUnableToWriteInBackend(err).Hint(hint), "Unable to initialize backend")
} }
case errors.Is(err, errFaultyDisk): case errors.Is(err, errFaultyDisk):
if !exit { if !exit {
logger.LogOnceIf(GlobalContext, fmt.Errorf("Drive is faulty at %s, please replace the drive - drive will be offline", endpoint), "log-fatal-errs") storageLogOnceIf(GlobalContext, fmt.Errorf("Drive is faulty at %s, please replace the drive - drive will be offline", endpoint), "log-fatal-errs")
} else { } else {
logger.Fatal(err, "Unable to initialize backend") logger.Fatal(err, "Unable to initialize backend")
} }
case errors.Is(err, errDiskFull): case errors.Is(err, errDiskFull):
if !exit { if !exit {
logger.LogOnceIf(GlobalContext, fmt.Errorf("Drive is already full at %s, incoming I/O will fail - drive will be offline", endpoint), "log-fatal-errs") storageLogOnceIf(GlobalContext, fmt.Errorf("Drive is already full at %s, incoming I/O will fail - drive will be offline", endpoint), "log-fatal-errs")
} else { } else {
logger.Fatal(err, "Unable to initialize backend") logger.Fatal(err, "Unable to initialize backend")
} }
default: default:
if !exit { if !exit {
logger.LogOnceIf(GlobalContext, fmt.Errorf("Drive %s returned an unexpected error: %w, please investigate - drive will be offline", endpoint, err), "log-fatal-errs") storageLogOnceIf(GlobalContext, fmt.Errorf("Drive %s returned an unexpected error: %w, please investigate - drive will be offline", endpoint, err), "log-fatal-errs")
} else { } else {
logger.Fatal(err, "Unable to initialize backend") logger.Fatal(err, "Unable to initialize backend")
} }

View File

@ -40,7 +40,7 @@ func writeSTSErrorResponse(ctx context.Context, w http.ResponseWriter, errCode S
} }
switch errCode { switch errCode {
case ErrSTSInternalError, ErrSTSUpstreamError: case ErrSTSInternalError, ErrSTSUpstreamError:
logger.LogIf(ctx, err, logger.ErrorKind) stsLogIf(ctx, err, logger.ErrorKind)
} }
encodedErrorResponse := encodeResponse(stsErrorResponse) encodedErrorResponse := encodeResponse(stsErrorResponse)
writeResponse(w, stsErr.HTTPStatusCode, encodedErrorResponse, mimeXML) writeResponse(w, stsErr.HTTPStatusCode, encodedErrorResponse, mimeXML)

View File

@ -314,7 +314,7 @@ func (sts *stsAPIHandlers) AssumeRole(w http.ResponseWriter, r *http.Request) {
// Call hook for site replication. // Call hook for site replication.
if cred.ParentUser != globalActiveCred.AccessKey { if cred.ParentUser != globalActiveCred.AccessKey {
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemSTSAcc, Type: madmin.SRIAMItemSTSAcc,
STSCredential: &madmin.SRSTSCredential{ STSCredential: &madmin.SRSTSCredential{
AccessKey: cred.AccessKey, AccessKey: cred.AccessKey,
@ -547,7 +547,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithSSO(w http.ResponseWriter, r *http.Requ
} }
// Call hook for site replication. // Call hook for site replication.
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemSTSAcc, Type: madmin.SRIAMItemSTSAcc,
STSCredential: &madmin.SRSTSCredential{ STSCredential: &madmin.SRSTSCredential{
AccessKey: cred.AccessKey, AccessKey: cred.AccessKey,
@ -728,7 +728,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithLDAPIdentity(w http.ResponseWriter, r *
} }
// Call hook for site replication. // Call hook for site replication.
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemSTSAcc, Type: madmin.SRIAMItemSTSAcc,
STSCredential: &madmin.SRSTSCredential{ STSCredential: &madmin.SRSTSCredential{
AccessKey: cred.AccessKey, AccessKey: cred.AccessKey,
@ -898,7 +898,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithCertificate(w http.ResponseWriter, r *h
} }
// Call hook for site replication. // Call hook for site replication.
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemSTSAcc, Type: madmin.SRIAMItemSTSAcc,
STSCredential: &madmin.SRSTSCredential{ STSCredential: &madmin.SRSTSCredential{
AccessKey: tmpCredentials.AccessKey, AccessKey: tmpCredentials.AccessKey,
@ -1028,7 +1028,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithCustomToken(w http.ResponseWriter, r *h
} }
// Call hook for site replication. // Call hook for site replication.
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemSTSAcc, Type: madmin.SRIAMItemSTSAcc,
STSCredential: &madmin.SRSTSCredential{ STSCredential: &madmin.SRSTSCredential{
AccessKey: tmpCredentials.AccessKey, AccessKey: tmpCredentials.AccessKey,

View File

@ -34,7 +34,6 @@ import (
"github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/hash" "github.com/minio/minio/internal/hash"
"github.com/minio/minio/internal/kms" "github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@ -534,7 +533,7 @@ func (config *TierConfigMgr) refreshTierConfig(ctx context.Context, objAPI Objec
case <-t.C: case <-t.C:
err := config.Reload(ctx, objAPI) err := config.Reload(ctx, objAPI)
if err != nil { if err != nil {
logger.LogIf(ctx, err) tierLogIf(ctx, err)
} }
} }
t.Reset(tierCfgRefresh + randInterval()) t.Reset(tierCfgRefresh + randInterval())

View File

@ -36,7 +36,6 @@ import (
"github.com/klauspost/compress/s2" "github.com/klauspost/compress/s2"
"github.com/klauspost/compress/zstd" "github.com/klauspost/compress/zstd"
gzip "github.com/klauspost/pgzip" gzip "github.com/klauspost/pgzip"
"github.com/minio/minio/internal/logger"
"github.com/pierrec/lz4" "github.com/pierrec/lz4"
) )
@ -249,7 +248,7 @@ func untar(ctx context.Context, r io.Reader, putObject func(reader io.Reader, in
}() }()
if err := putObject(&rc, fi, name); err != nil { if err := putObject(&rc, fi, name); err != nil {
if o.ignoreErrs { if o.ignoreErrs {
logger.LogIf(ctx, err) s3LogIf(ctx, err)
return return
} }
asyncErrMu.Lock() asyncErrMu.Lock()
@ -273,7 +272,7 @@ func untar(ctx context.Context, r io.Reader, putObject func(reader io.Reader, in
if err := putObject(&rc, header.FileInfo(), name); err != nil { if err := putObject(&rc, header.FileInfo(), name); err != nil {
rc.Close() rc.Close()
if o.ignoreErrs { if o.ignoreErrs {
logger.LogIf(ctx, err) s3LogIf(ctx, err)
continue continue
} }
return err return err

View File

@ -142,7 +142,7 @@ func IsDocker() bool {
} }
// Log error, as we will not propagate it to caller // Log error, as we will not propagate it to caller
logger.LogIf(GlobalContext, err) internalLogIf(GlobalContext, err)
return err == nil return err == nil
} }
@ -172,7 +172,7 @@ func IsBOSH() bool {
} }
// Log error, as we will not propagate it to caller // Log error, as we will not propagate it to caller
logger.LogIf(GlobalContext, err) internalLogIf(GlobalContext, err)
return err == nil return err == nil
} }
@ -189,7 +189,7 @@ func getHelmVersion(helmInfoFilePath string) string {
if !osIsNotExist(err) { if !osIsNotExist(err) {
reqInfo := (&logger.ReqInfo{}).AppendTags("helmInfoFilePath", helmInfoFilePath) reqInfo := (&logger.ReqInfo{}).AppendTags("helmInfoFilePath", helmInfoFilePath)
ctx := logger.SetReqInfo(GlobalContext, reqInfo) ctx := logger.SetReqInfo(GlobalContext, reqInfo)
logger.LogIf(ctx, err) internalLogIf(ctx, err)
} }
return "" return ""
} }

View File

@ -624,7 +624,7 @@ func NewHTTPTransportWithClientCerts(clientCert, clientKey string) *http.Transpo
defer cancel() defer cancel()
transport, err := s.NewHTTPTransportWithClientCerts(ctx, clientCert, clientKey) transport, err := s.NewHTTPTransportWithClientCerts(ctx, clientCert, clientKey)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to load client key and cert, please check your client certificate configuration: %w", err)) internalLogIf(ctx, fmt.Errorf("Unable to load client key and cert, please check your client certificate configuration: %w", err))
} }
return transport return transport
} }

View File

@ -969,7 +969,7 @@ func (p *xlStorageDiskIDCheck) monitorDiskWritable(ctx context.Context) {
goOffline := func(err error, spent time.Duration) { goOffline := func(err error, spent time.Duration) {
if p.health.status.CompareAndSwap(diskHealthOK, diskHealthFaulty) { if p.health.status.CompareAndSwap(diskHealthOK, diskHealthFaulty) {
logger.LogAlwaysIf(ctx, fmt.Errorf("node(%s): taking drive %s offline: %v", globalLocalNodeName, p.storage.String(), err)) storageLogAlwaysIf(ctx, fmt.Errorf("node(%s): taking drive %s offline: %v", globalLocalNodeName, p.storage.String(), err))
p.health.waiting.Add(1) p.health.waiting.Add(1)
go p.monitorDiskStatus(spent, fn) go p.monitorDiskStatus(spent, fn)
} }

Some files were not shown because too many files have changed in this diff Show More