mirror of https://github.com/minio/minio.git
Fix style of logOnceIf calls w/unique identifiers (#17631)
This commit is contained in:
parent
82075e8e3a
commit
f64d62b01d
|
@ -167,7 +167,7 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
|||
if !IsErr(err, ignoredErrs...) {
|
||||
logger.LogOnceIf(GlobalContext,
|
||||
fmt.Errorf("Reading erasure shards at (%s: %s/%s) returned '%w', will attempt to reconstruct if we have quorum",
|
||||
b.disk, b.volume, b.filePath, err), "bitrot-read-file-stream"+b.volume+b.filePath)
|
||||
b.disk, b.volume, b.filePath, err), "bitrot-read-file-stream-"+b.volume+"-"+b.filePath)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -605,7 +605,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
|
|||
return
|
||||
}
|
||||
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", dobj.Bucket, tgt.ARN), "replication-target-offline-delete"+tgt.ARN)
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", dobj.Bucket, tgt.ARN), "replication-target-offline-delete-"+tgt.ARN)
|
||||
sendEvent(eventArgs{
|
||||
BucketName: dobj.Bucket,
|
||||
Object: ObjectInfo{
|
||||
|
@ -976,7 +976,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
|||
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
logger.LogOnceIf(ctx, err, "get-replication-config"+bucket)
|
||||
logger.LogOnceIf(ctx, err, "get-replication-config-"+bucket)
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
|
@ -1127,7 +1127,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
|
|||
}
|
||||
|
||||
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", bucket, tgt.ARN), "replication-target-offline-obj"+tgt.ARN)
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", bucket, tgt.ARN), "replication-target-offline-obj-"+tgt.ARN)
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
|
@ -1277,7 +1277,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
|||
}
|
||||
|
||||
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", bucket, tgt.ARN), "replication-target-offline-all"+tgt.ARN)
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", bucket, tgt.ARN), "replication-target-offline-all-"+tgt.ARN)
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
|
|
|
@ -1335,7 +1335,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||
if errors.Is(err, errFileNotFound) {
|
||||
return ObjectInfo{}, toObjectErr(errErasureWriteQuorum, bucket, object)
|
||||
}
|
||||
logger.LogOnceIf(ctx, err, "erasure-object-rename"+bucket+object)
|
||||
logger.LogOnceIf(ctx, err, "erasure-object-rename-"+bucket+"-"+object)
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
|
|
|
@ -1343,7 +1343,7 @@ func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, ma
|
|||
merged, err := z.listPath(ctx, &opts)
|
||||
if err != nil && err != io.EOF {
|
||||
if !isErrBucketNotFound(err) {
|
||||
logger.LogOnceIf(ctx, err, "erasure-list-objects-path"+bucket)
|
||||
logger.LogOnceIf(ctx, err, "erasure-list-objects-path-"+bucket)
|
||||
}
|
||||
return loi, err
|
||||
}
|
||||
|
|
|
@ -176,7 +176,7 @@ func getDisksInfo(disks []StorageAPI, endpoints []Endpoint) (disksInfo []madmin.
|
|||
g.Go(func() error {
|
||||
diskEndpoint := endpoints[index].String()
|
||||
if disks[index] == OfflineDisk {
|
||||
logger.LogOnceIf(GlobalContext, fmt.Errorf("%s: %s", errDiskNotFound, endpoints[index]), "get-disks-info-offline"+diskEndpoint)
|
||||
logger.LogOnceIf(GlobalContext, fmt.Errorf("%s: %s", errDiskNotFound, endpoints[index]), "get-disks-info-offline-"+diskEndpoint)
|
||||
disksInfo[index] = madmin.Disk{
|
||||
State: diskErrToDriveState(errDiskNotFound),
|
||||
Endpoint: diskEndpoint,
|
||||
|
|
|
@ -807,7 +807,7 @@ func (sys *NotificationSys) addNodeErr(nodeInfo madmin.NodeInfo, peerClient *pee
|
|||
addr := peerClient.host.String()
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
|
||||
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
|
||||
logger.LogOnceIf(ctx, err, "add-node-err"+addr)
|
||||
logger.LogOnceIf(ctx, err, "add-node-err-"+addr)
|
||||
nodeInfo.SetAddr(addr)
|
||||
nodeInfo.SetError(err.Error())
|
||||
}
|
||||
|
|
|
@ -2168,7 +2168,7 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
|
|||
logger.LogOnceIf(ctx, fmt.Errorf("srcVolume: %s, srcPath: %s, dstVolume: %s:, dstPath: %s - error %v",
|
||||
srcVolume, srcPath,
|
||||
dstVolume, dstPath,
|
||||
err), "xl-storage-rename-data"+srcVolume+dstVolume)
|
||||
err), "xl-storage-rename-data-"+srcVolume+"-"+dstVolume)
|
||||
}
|
||||
if err == nil && s.globalSync {
|
||||
globalSync()
|
||||
|
|
Loading…
Reference in New Issue