mirror of
https://github.com/minio/minio.git
synced 2025-01-11 15:03:22 -05:00
Print certain log messages once per error (#17484)
This commit is contained in:
parent
1f8b9b4bd5
commit
21fbe88e1f
@ -165,9 +165,9 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
b.rc, err = b.disk.ReadFileStream(context.TODO(), b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
||||
if err != nil {
|
||||
if !IsErr(err, ignoredErrs...) {
|
||||
logger.LogIf(GlobalContext,
|
||||
logger.LogOnceIf(GlobalContext,
|
||||
fmt.Errorf("Reading erasure shards at (%s: %s/%s) returned '%w', will attempt to reconstruct if we have quorum",
|
||||
b.disk, b.volume, b.filePath, err))
|
||||
b.disk, b.volume, b.filePath, err), "bitrot-read-file-stream"+b.volume+b.filePath)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -605,7 +605,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
|
||||
return
|
||||
}
|
||||
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||
logger.LogIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", dobj.Bucket, tgt.ARN))
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", dobj.Bucket, tgt.ARN), "replication-target-offline-delete"+tgt.ARN)
|
||||
sendEvent(eventArgs{
|
||||
BucketName: dobj.Bucket,
|
||||
Object: ObjectInfo{
|
||||
@ -970,7 +970,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
||||
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogOnceIf(ctx, err, "get-replication-config"+bucket)
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
@ -1121,7 +1121,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
|
||||
}
|
||||
|
||||
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||
logger.LogIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", bucket, tgt.ARN))
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", bucket, tgt.ARN), "replication-target-offline-obj"+tgt.ARN)
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
@ -1271,7 +1271,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
||||
}
|
||||
|
||||
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||
logger.LogIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", bucket, tgt.ARN))
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", bucket, tgt.ARN), "replication-target-offline-all"+tgt.ARN)
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
|
@ -1194,7 +1194,7 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay
|
||||
return false
|
||||
}
|
||||
// Assume it is still there.
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogOnceIf(ctx, err, "non-transition-expiry")
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -317,7 +317,7 @@ func (e Erasure) Heal(ctx context.Context, writers []io.Writer, readers []io.Rea
|
||||
}
|
||||
|
||||
if err = e.DecodeDataAndParityBlocks(ctx, bufs); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogOnceIf(ctx, err, "erasure-heal-decode")
|
||||
return err
|
||||
}
|
||||
|
||||
@ -328,7 +328,7 @@ func (e Erasure) Heal(ctx context.Context, writers []io.Writer, readers []io.Rea
|
||||
}
|
||||
|
||||
if err = w.Write(ctx, bufs); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogOnceIf(ctx, err, "erasure-heal-write")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -567,7 +567,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
|
||||
if !latestMeta.XLV1 && !latestMeta.Deleted && !recreate && disksToHealCount > latestMeta.Erasure.ParityBlocks {
|
||||
// When disk to heal count is greater than parity blocks we should simply error out.
|
||||
err := fmt.Errorf("(%d > %d) more drives are expected to heal than parity, returned errors: %v (dataErrs %v) -> %s/%s(%s)", disksToHealCount, latestMeta.Erasure.ParityBlocks, errs, dataErrs, bucket, object, versionID)
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogOnceIf(ctx, err, "heal-object-count-gt-parity")
|
||||
return er.defaultHealResult(latestMeta, storageDisks, storageEndpoints, errs,
|
||||
bucket, object, versionID), err
|
||||
}
|
||||
@ -591,7 +591,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
|
||||
if !latestMeta.Deleted && len(latestMeta.Erasure.Distribution) != len(availableDisks) {
|
||||
err := fmt.Errorf("unexpected file distribution (%v) from available disks (%v), looks like backend disks have been manually modified refusing to heal %s/%s(%s)",
|
||||
latestMeta.Erasure.Distribution, availableDisks, bucket, object, versionID)
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogOnceIf(ctx, err, "heal-object-available-disks")
|
||||
return er.defaultHealResult(latestMeta, storageDisks, storageEndpoints, errs,
|
||||
bucket, object, versionID), err
|
||||
}
|
||||
@ -601,7 +601,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
|
||||
if !latestMeta.Deleted && len(latestMeta.Erasure.Distribution) != len(outDatedDisks) {
|
||||
err := fmt.Errorf("unexpected file distribution (%v) from outdated disks (%v), looks like backend disks have been manually modified refusing to heal %s/%s(%s)",
|
||||
latestMeta.Erasure.Distribution, outDatedDisks, bucket, object, versionID)
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogOnceIf(ctx, err, "heal-object-outdated-disks")
|
||||
return er.defaultHealResult(latestMeta, storageDisks, storageEndpoints, errs,
|
||||
bucket, object, versionID), err
|
||||
}
|
||||
@ -611,7 +611,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
|
||||
if !latestMeta.Deleted && len(latestMeta.Erasure.Distribution) != len(partsMetadata) {
|
||||
err := fmt.Errorf("unexpected file distribution (%v) from metadata entries (%v), looks like backend disks have been manually modified refusing to heal %s/%s(%s)",
|
||||
latestMeta.Erasure.Distribution, len(partsMetadata), bucket, object, versionID)
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogOnceIf(ctx, err, "heal-object-metadata-entries")
|
||||
return er.defaultHealResult(latestMeta, storageDisks, storageEndpoints, errs,
|
||||
bucket, object, versionID), err
|
||||
}
|
||||
|
@ -673,7 +673,7 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
|
||||
if !fi.Deleted && len(fi.Erasure.Distribution) != len(onlineDisks) {
|
||||
err := fmt.Errorf("unexpected file distribution (%v) from online disks (%v), looks like backend disks have been manually modified refusing to heal %s/%s(%s)",
|
||||
fi.Erasure.Distribution, onlineDisks, bucket, object, opts.VersionID)
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogOnceIf(ctx, err, "get-object-file-info-manually-modified")
|
||||
return fi, nil, nil, toObjectErr(err, bucket, object, opts.VersionID)
|
||||
}
|
||||
|
||||
@ -1322,7 +1322,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
||||
if errors.Is(err, errFileNotFound) {
|
||||
return ObjectInfo{}, toObjectErr(errErasureWriteQuorum, bucket, object)
|
||||
}
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogOnceIf(ctx, err, "erasure-object-rename"+bucket+object)
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
|
@ -950,7 +950,7 @@ func (z *erasureServerPools) PutObject(ctx context.Context, bucket string, objec
|
||||
if !isMinioMetaBucketName(bucket) {
|
||||
avail, err := hasSpaceFor(getDiskInfos(ctx, z.serverPools[0].getHashedSet(object).getDisks()...), data.Size())
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogOnceIf(ctx, err, "erasure-write-quorum")
|
||||
return ObjectInfo{}, toObjectErr(errErasureWriteQuorum)
|
||||
}
|
||||
if !avail {
|
||||
@ -1342,7 +1342,7 @@ func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, ma
|
||||
merged, err := z.listPath(ctx, &opts)
|
||||
if err != nil && err != io.EOF {
|
||||
if !isErrBucketNotFound(err) {
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogOnceIf(ctx, err, "erasure-list-objects-path"+bucket)
|
||||
}
|
||||
return loi, err
|
||||
}
|
||||
@ -1735,7 +1735,7 @@ func (z *erasureServerPools) HealFormat(ctx context.Context, dryRun bool) (madmi
|
||||
for _, pool := range z.serverPools {
|
||||
result, err := pool.HealFormat(ctx, dryRun)
|
||||
if err != nil && !errors.Is(err, errNoHealRequired) {
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogOnceIf(ctx, err, "erasure-heal-format")
|
||||
continue
|
||||
}
|
||||
// Count errNoHealRequired across all serverPools,
|
||||
|
@ -176,7 +176,7 @@ func getDisksInfo(disks []StorageAPI, endpoints []Endpoint) (disksInfo []madmin.
|
||||
g.Go(func() error {
|
||||
diskEndpoint := endpoints[index].String()
|
||||
if disks[index] == OfflineDisk {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("%s: %s", errDiskNotFound, endpoints[index]))
|
||||
logger.LogOnceIf(GlobalContext, fmt.Errorf("%s: %s", errDiskNotFound, endpoints[index]), "get-disks-info-offline"+diskEndpoint)
|
||||
disksInfo[index] = madmin.Disk{
|
||||
State: diskErrToDriveState(errDiskNotFound),
|
||||
Endpoint: diskEndpoint,
|
||||
@ -410,7 +410,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa
|
||||
if cache.Info.LastUpdate.Equal(lastSave) {
|
||||
continue
|
||||
}
|
||||
logger.LogIf(ctx, cache.save(ctx, er, dataUsageCacheName))
|
||||
logger.LogOnceIf(ctx, cache.save(ctx, er, dataUsageCacheName), "nsscanner-cache-update")
|
||||
updates <- cache.clone()
|
||||
lastSave = cache.Info.LastUpdate
|
||||
case v, ok := <-bucketResults:
|
||||
@ -418,7 +418,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa
|
||||
// Save final state...
|
||||
cache.Info.NextCycle = wantCycle
|
||||
cache.Info.LastUpdate = time.Now()
|
||||
logger.LogIf(ctx, cache.save(ctx, er, dataUsageCacheName))
|
||||
logger.LogOnceIf(ctx, cache.save(ctx, er, dataUsageCacheName), "nsscanner-channel-closed")
|
||||
updates <- cache
|
||||
return
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ func readKeyEtcd(ctx context.Context, client *etcd.Client, key string) ([]byte,
|
||||
defer cancel()
|
||||
resp, err := client.Get(timeoutCtx, key)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogOnceIf(ctx, err, "etcd-retrieve-keys")
|
||||
return nil, etcdErrToErr(err, client.Endpoints())
|
||||
}
|
||||
if resp.Count == 0 {
|
||||
|
@ -48,7 +48,7 @@ func parseLocationConstraint(r *http.Request) (location string, s3Error APIError
|
||||
locationConstraint := createBucketLocationConfiguration{}
|
||||
err := xmlDecoder(r.Body, &locationConstraint, r.ContentLength)
|
||||
if err != nil && r.ContentLength != 0 {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
logger.LogOnceIf(GlobalContext, err, "location-constraint-xml-parsing")
|
||||
// Treat all other failures as XML parsing errors.
|
||||
return "", ErrMalformedXML
|
||||
} // else for both err as nil or io.EOF
|
||||
|
@ -149,7 +149,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
||||
if err != nil {
|
||||
// Folder could have gone away in-between
|
||||
if err != errVolumeNotFound && err != errFileNotFound {
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogOnceIf(ctx, err, "metacache-walk-scan-dir")
|
||||
}
|
||||
if opts.ReportNotFound && err == errFileNotFound && current == opts.BaseDir {
|
||||
return errFileNotFound
|
||||
@ -211,7 +211,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
||||
// while being concurrently listed at the same time in
|
||||
// such scenarios the 'xl.meta' might get truncated
|
||||
if !IsErrIgnored(err, io.EOF, io.ErrUnexpectedEOF) {
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogOnceIf(ctx, err, "metacache-walk-read-metadata")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -804,7 +804,7 @@ func (sys *NotificationSys) addNodeErr(nodeInfo madmin.NodeInfo, peerClient *pee
|
||||
addr := peerClient.host.String()
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
|
||||
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogOnceIf(ctx, err, "add-node-err"+addr)
|
||||
nodeInfo.SetAddr(addr)
|
||||
nodeInfo.SetError(err.Error())
|
||||
}
|
||||
|
@ -550,7 +550,7 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
|
||||
return
|
||||
}
|
||||
if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to write all the data to client: %w", err))
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("Unable to write all the data to client: %w", err), "get-object-handler-write")
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -561,7 +561,7 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
|
||||
return
|
||||
}
|
||||
if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to write all the data to client: %w", err))
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("Unable to write all the data to client: %w", err), "get-object-handler-close")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -480,7 +480,7 @@ func (s *peerRESTServer) GetSysConfigHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
info := madmin.GetSysConfig(ctx, r.Host)
|
||||
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
logger.LogOnceIf(ctx, gob.NewEncoder(w).Encode(info), "get-sys-config")
|
||||
}
|
||||
|
||||
// GetSysServicesHandler - returns system services information.
|
||||
|
@ -2164,10 +2164,10 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
|
||||
}
|
||||
if err != nil && !IsErr(err, ignoredErrs...) && !contextCanceled(ctx) {
|
||||
// Only log these errors if context is not yet canceled.
|
||||
logger.LogIf(ctx, fmt.Errorf("srcVolume: %s, srcPath: %s, dstVolume: %s:, dstPath: %s - error %v",
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("srcVolume: %s, srcPath: %s, dstVolume: %s:, dstPath: %s - error %v",
|
||||
srcVolume, srcPath,
|
||||
dstVolume, dstPath,
|
||||
err))
|
||||
err), "xl-storage-rename-data"+srcVolume+dstVolume)
|
||||
}
|
||||
if err == nil && s.globalSync {
|
||||
globalSync()
|
||||
|
Loading…
Reference in New Issue
Block a user