logging: Add subsystem to log API (#19002)

Create new code paths for multiple subsystems in the code. This will
make maintaing this easier later.

Also introduce bugLogIf() for errors that should not happen in the first
place.
This commit is contained in:
Anis Eleuch
2024-04-04 13:04:40 +01:00
committed by GitHub
parent 2228eb61cb
commit 95bf4a57b6
123 changed files with 972 additions and 786 deletions

View File

@@ -467,7 +467,7 @@ func (p poolMeta) save(ctx context.Context, pools []*erasureSets) error {
for i, eset := range pools {
if err = saveConfig(ctx, eset, poolMetaName, buf); err != nil {
if !errors.Is(err, context.Canceled) {
logger.LogIf(ctx, fmt.Errorf("saving pool.bin for pool index %d failed with: %v", i, err))
storageLogIf(ctx, fmt.Errorf("saving pool.bin for pool index %d failed with: %v", i, err))
}
return err
}
@@ -542,11 +542,11 @@ func (z *erasureServerPools) Init(ctx context.Context) error {
return
}
if configRetriableErrors(err) {
logger.LogIf(ctx, fmt.Errorf("Unable to resume decommission of pools %v: %w: retrying..", pools, err))
decomLogIf(ctx, fmt.Errorf("Unable to resume decommission of pools %v: %w: retrying..", pools, err))
time.Sleep(time.Second + time.Duration(r.Float64()*float64(5*time.Second)))
continue
}
logger.LogIf(ctx, fmt.Errorf("Unable to resume decommission of pool %v: %w", pools, err))
decomLogIf(ctx, fmt.Errorf("Unable to resume decommission of pool %v: %w", pools, err))
return
}
}
@@ -741,7 +741,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
const envDecomWorkers = "_MINIO_DECOMMISSION_WORKERS"
workerSize, err := env.GetInt(envDecomWorkers, len(pool.sets))
if err != nil {
logger.LogIf(ctx, fmt.Errorf("invalid workers value err: %v, defaulting to %d", err, len(pool.sets)))
decomLogIf(ctx, fmt.Errorf("invalid workers value err: %v, defaulting to %d", err, len(pool.sets)))
workerSize = len(pool.sets)
}
@@ -852,7 +852,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
}
stopFn(err)
if err != nil {
logger.LogIf(ctx, err)
decomLogIf(ctx, err)
failure = true
}
z.poolMetaMutex.Lock()
@@ -877,7 +877,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
}); err != nil {
stopFn(err)
failure = true
logger.LogIf(ctx, err)
decomLogIf(ctx, err)
continue
}
stopFn(nil)
@@ -906,20 +906,20 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
if bi.Name == minioMetaBucket && strings.Contains(version.Name, dataUsageCacheName) {
ignore = true
stopFn(err)
logger.LogIf(ctx, err)
decomLogIf(ctx, err)
break
}
}
if err != nil {
failure = true
logger.LogIf(ctx, err)
decomLogIf(ctx, err)
stopFn(err)
continue
}
if err = z.decommissionObject(ctx, bi.Name, gr); err != nil {
stopFn(err)
failure = true
logger.LogIf(ctx, err)
decomLogIf(ctx, err)
continue
}
stopFn(nil)
@@ -953,13 +953,13 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
stopFn(err)
auditLogDecom(ctx, "DecomDeleteObject", bi.Name, entry.name, "", err)
if err != nil {
logger.LogIf(ctx, err)
decomLogIf(ctx, err)
}
}
z.poolMetaMutex.Lock()
z.poolMeta.TrackCurrentBucketObject(idx, bi.Name, entry.name)
ok, err := z.poolMeta.updateAfter(ctx, idx, z.serverPools, 30*time.Second)
logger.LogIf(ctx, err)
decomLogIf(ctx, err)
if ok {
globalNotificationSys.ReloadPoolMeta(ctx)
}
@@ -987,7 +987,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
}
setN := humanize.Ordinal(setIdx + 1)
retryDur := time.Duration(rand.Float64() * float64(5*time.Second))
logger.LogOnceIf(ctx, fmt.Errorf("listing objects from %s set failed with %v, retrying in %v", setN, err, retryDur), "decom-listing-failed"+setN)
decomLogOnceIf(ctx, fmt.Errorf("listing objects from %s set failed with %v, retrying in %v", setN, err, retryDur), "decom-listing-failed"+setN)
time.Sleep(retryDur)
}
}(setIdx)
@@ -1055,7 +1055,7 @@ func (z *erasureServerPools) decommissionInBackground(ctx context.Context, idx i
z.poolMetaMutex.Lock()
if z.poolMeta.BucketDone(idx, bucket) {
// remove from pendingBuckets and persist.
logger.LogIf(ctx, z.poolMeta.save(ctx, z.serverPools))
decomLogIf(ctx, z.poolMeta.save(ctx, z.serverPools))
}
z.poolMetaMutex.Unlock()
continue
@@ -1072,7 +1072,7 @@ func (z *erasureServerPools) decommissionInBackground(ctx context.Context, idx i
z.poolMetaMutex.Lock()
if z.poolMeta.BucketDone(idx, bucket) {
logger.LogIf(ctx, z.poolMeta.save(ctx, z.serverPools))
decomLogIf(ctx, z.poolMeta.save(ctx, z.serverPools))
}
z.poolMetaMutex.Unlock()
}
@@ -1170,8 +1170,8 @@ func (z *erasureServerPools) doDecommissionInRoutine(ctx context.Context, idx in
dctx = logger.SetReqInfo(dctx, &logger.ReqInfo{})
if err := z.decommissionInBackground(dctx, idx); err != nil {
logger.LogIf(GlobalContext, err)
logger.LogIf(GlobalContext, z.DecommissionFailed(dctx, idx))
decomLogIf(GlobalContext, err)
decomLogIf(GlobalContext, z.DecommissionFailed(dctx, idx))
return
}
@@ -1181,20 +1181,20 @@ func (z *erasureServerPools) doDecommissionInRoutine(ctx context.Context, idx in
z.poolMetaMutex.Unlock()
if !failed {
logger.Event(dctx, "Decommissioning complete for pool '%s', verifying for any pending objects", poolCmdLine)
decomLogEvent(dctx, "Decommissioning complete for pool '%s', verifying for any pending objects", poolCmdLine)
err := z.checkAfterDecom(dctx, idx)
if err != nil {
logger.LogIf(ctx, err)
decomLogIf(ctx, err)
failed = true
}
}
if failed {
// Decommission failed indicate as such.
logger.LogIf(GlobalContext, z.DecommissionFailed(dctx, idx))
decomLogIf(GlobalContext, z.DecommissionFailed(dctx, idx))
} else {
// Complete the decommission..
logger.LogIf(GlobalContext, z.CompleteDecommission(dctx, idx))
decomLogIf(GlobalContext, z.CompleteDecommission(dctx, idx))
}
}