log: Add logger.Event to send to console and other logger targets (#19060)

Add a new function logger.Event() to send the log to Console and
http/kafka log webhooks. This will include some internal events such as
disk healing and rebalance/decommissioning
This commit is contained in:
Anis Eleuch
2024-02-16 00:13:30 +01:00
committed by GitHub
parent f9dbf41e27
commit 68dde2359f
12 changed files with 71 additions and 39 deletions

View File

@@ -713,7 +713,7 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
select {
case globalBackgroundHealRoutine.tasks <- task:
if serverDebugLog {
logger.Info("Task in the queue: %#v", task)
fmt.Printf("Task in the queue: %#v\n", task)
}
default:
// task queue is full, no more workers, we shall move on and heal later.
@@ -730,7 +730,7 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
select {
case globalBackgroundHealRoutine.tasks <- task:
if serverDebugLog {
logger.Info("Task in the queue: %#v", task)
fmt.Printf("Task in the queue: %#v\n", task)
}
case <-h.ctx.Done():
return nil

View File

@@ -891,7 +891,7 @@ func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType
}
// Similar check to http.checkWriteHeaderCode
if statusCode < 100 || statusCode > 999 {
logger.Error(fmt.Sprintf("invalid WriteHeader code %v", statusCode))
logger.LogIf(context.Background(), fmt.Errorf("invalid WriteHeader code %v", statusCode))
statusCode = http.StatusInternalServerError
}
setCommonHeaders(w)
@@ -961,7 +961,7 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
// Similar check to http.checkWriteHeaderCode
if err.HTTPStatusCode < 100 || err.HTTPStatusCode > 999 {
logger.Error(fmt.Sprintf("invalid WriteHeader code %v from %v", err.HTTPStatusCode, err.Code))
logger.LogIf(ctx, fmt.Errorf("invalid WriteHeader code %v from %v", err.HTTPStatusCode, err.Code))
err.HTTPStatusCode = http.StatusInternalServerError
}

View File

@@ -420,7 +420,7 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
tracker = initHealingTracker(disk, mustGetUUID())
}
logger.Info(fmt.Sprintf("Healing drive '%s' - 'mc admin heal alias/ --verbose' to check the current status.", endpoint))
logger.Event(ctx, "Healing drive '%s' - 'mc admin heal alias/ --verbose' to check the current status.", endpoint)
buckets, _ := z.ListBuckets(ctx, BucketOptions{})
// Buckets data are dispersed in multiple pools/sets, make
@@ -440,10 +440,6 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
return buckets[i].Created.After(buckets[j].Created)
})
if serverDebugLog {
logger.Info("Healing drive '%v' on %s pool, belonging to %s erasure set", disk, humanize.Ordinal(poolIdx+1), humanize.Ordinal(setIdx+1))
}
// Load bucket totals
cache := dataUsageCache{}
if err := cache.load(ctx, z.serverPools[poolIdx].sets[setIdx], dataUsageCacheName); err == nil {
@@ -464,9 +460,9 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
}
if tracker.ItemsFailed > 0 {
logger.Info("Healing of drive '%s' failed (healed: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsFailed)
logger.Event(ctx, "Healing of drive '%s' failed (healed: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsFailed)
} else {
logger.Info("Healing of drive '%s' complete (healed: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsFailed)
logger.Event(ctx, "Healing of drive '%s' complete (healed: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsFailed)
}
if len(tracker.QueuedBuckets) > 0 {
@@ -475,7 +471,7 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
if serverDebugLog {
tracker.printTo(os.Stdout)
logger.Info("\n")
fmt.Printf("\n")
}
if tracker.HealID == "" { // HealID was empty only before Feb 2023

View File

@@ -1167,7 +1167,7 @@ func (z *erasureServerPools) doDecommissionInRoutine(ctx context.Context, idx in
z.poolMetaMutex.Unlock()
if !failed {
logger.Info("Decommissioning complete for pool '%s', verifying for any pending objects", poolCmdLine)
logger.Event(dctx, "Decommissioning complete for pool '%s', verifying for any pending objects", poolCmdLine)
err := z.checkAfterDecom(dctx, idx)
if err != nil {
logger.LogIf(ctx, err)

View File

@@ -432,6 +432,8 @@ func (z *erasureServerPools) rebalanceBuckets(ctx context.Context, poolIdx int)
}
}()
logger.Event(ctx, "Pool %d rebalancing is started", poolIdx+1)
for {
select {
case <-ctx.Done():
@@ -456,6 +458,8 @@ func (z *erasureServerPools) rebalanceBuckets(ctx context.Context, poolIdx int)
z.bucketRebalanceDone(bucket, poolIdx)
}
logger.Event(ctx, "Pool %d rebalancing is done", poolIdx+1)
return err
}

View File

@@ -35,7 +35,7 @@ type minioLogger struct{}
// Print implement Logger
func (log *minioLogger) Print(sessionID string, message interface{}) {
if serverDebugLog {
logger.Info("%s %s", sessionID, message)
fmt.Printf("%s %s\n", sessionID, message)
}
}
@@ -43,9 +43,9 @@ func (log *minioLogger) Print(sessionID string, message interface{}) {
func (log *minioLogger) Printf(sessionID string, format string, v ...interface{}) {
if serverDebugLog {
if sessionID != "" {
logger.Info("%s %s", sessionID, fmt.Sprintf(format, v...))
fmt.Printf("%s %s\n", sessionID, fmt.Sprintf(format, v...))
} else {
logger.Info(format, v...)
fmt.Printf(format+"\n", v...)
}
}
}
@@ -54,9 +54,9 @@ func (log *minioLogger) Printf(sessionID string, format string, v ...interface{}
func (log *minioLogger) PrintCommand(sessionID string, command string, params string) {
if serverDebugLog {
if command == "PASS" {
logger.Info("%s > PASS ****", sessionID)
fmt.Printf("%s > PASS ****\n", sessionID)
} else {
logger.Info("%s > %s %s", sessionID, command, params)
fmt.Printf("%s > %s %s\n", sessionID, command, params)
}
}
}
@@ -64,7 +64,7 @@ func (log *minioLogger) PrintCommand(sessionID string, command string, params st
// PrintResponse implement Logger
func (log *minioLogger) PrintResponse(sessionID string, code int, message string) {
if serverDebugLog {
logger.Info("%s < %d %s", sessionID, code, message)
fmt.Printf("%s < %d %s\n", sessionID, code, message)
}
}

View File

@@ -177,7 +177,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
numHealers = uint64(v)
}
logger.Info(fmt.Sprintf("Healing drive '%s' - use %d parallel workers.", tracker.disk.String(), numHealers))
logger.Event(ctx, fmt.Sprintf("Healing drive '%s' - use %d parallel workers.", tracker.disk.String(), numHealers))
jt, _ := workers.New(int(numHealers))

View File

@@ -478,7 +478,7 @@ func bootstrapTraceMsg(msg string) {
globalBootstrapTracer.Record(info)
if serverDebugLog {
logger.Info(fmt.Sprint(time.Now().Round(time.Millisecond).Format(time.RFC3339), " bootstrap: ", msg))
fmt.Println(time.Now().Round(time.Millisecond).Format(time.RFC3339), " bootstrap: ", msg)
}
noSubs := globalTrace.NumSubscribers(madmin.TraceBootstrap) == 0
@@ -491,7 +491,7 @@ func bootstrapTraceMsg(msg string) {
func bootstrapTrace(msg string, worker func()) {
if serverDebugLog {
logger.Info(fmt.Sprint(time.Now().Round(time.Millisecond).Format(time.RFC3339), " bootstrap: ", msg))
fmt.Println(time.Now().Round(time.Millisecond).Format(time.RFC3339), " bootstrap: ", msg)
}
now := time.Now()
@@ -1031,8 +1031,8 @@ func serverMain(ctx *cli.Context) {
globalMinioClient.SetAppInfo("minio-perf-test", ReleaseTag)
if serverDebugLog {
logger.Info("== DEBUG Mode enabled ==")
logger.Info("Currently set environment settings:")
fmt.Println("== DEBUG Mode enabled ==")
fmt.Println("Currently set environment settings:")
ks := []string{
config.EnvAccessKey,
config.EnvSecretKey,
@@ -1044,9 +1044,9 @@ func serverMain(ctx *cli.Context) {
if slices.Contains(ks, strings.Split(v, "=")[0]) {
continue
}
logger.Info(v)
fmt.Println(v)
}
logger.Info("======")
fmt.Println("======")
}
daemon.SdNotify(false, daemon.SdNotifyReady)

View File

@@ -912,7 +912,7 @@ func (p *xlStorageDiskIDCheck) monitorDiskStatus(spent time.Duration, fn string)
})
if err == nil {
logger.Info("node(%s): Read/Write/Delete successful, bringing drive %s online", globalLocalNodeName, p.storage.String())
logger.Event(context.Background(), "node(%s): Read/Write/Delete successful, bringing drive %s online", globalLocalNodeName, p.storage.String())
p.health.status.Store(diskHealthOK)
p.health.waiting.Add(-1)
return