results must be a single channel to avoid overwriting `healing.bin` (#19702)

This commit is contained in:
Harshavardhana 2024-05-09 10:15:03 -07:00 committed by GitHub
parent f5e3eedf34
commit 3549e583a6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 259 additions and 231 deletions

View File

@ -121,10 +121,6 @@ func listAllBuckets(ctx context.Context, storageDisks []StorageAPI, healBuckets
// we ignore disk not found errors // we ignore disk not found errors
return nil return nil
} }
if storageDisks[index].Healing() != nil {
// we ignore disks under healing
return nil
}
volsInfo, err := storageDisks[index].ListVols(ctx) volsInfo, err := storageDisks[index].ListVols(ctx)
if err != nil { if err != nil {
return err return err
@ -216,7 +212,7 @@ func (fi FileInfo) DataMov() bool {
return ok return ok
} }
func auditHealObject(ctx context.Context, bucket, object, versionID string, result madmin.HealResultItem, err error) { func (er *erasureObjects) auditHealObject(ctx context.Context, bucket, object, versionID string, result madmin.HealResultItem, err error) {
if len(logger.AuditTargets()) == 0 { if len(logger.AuditTargets()) == 0 {
return return
} }
@ -231,8 +227,14 @@ func auditHealObject(ctx context.Context, bucket, object, versionID string, resu
opts.Error = err.Error() opts.Error = err.Error()
} }
if result.After.Drives != nil { opts.Tags = map[string]interface{}{
opts.Tags = map[string]interface{}{"drives-result": result.After.Drives} "healResult": result,
"objectLocation": auditObjectOp{
Name: decodeDirObject(object),
Pool: er.poolIndex + 1,
Set: er.setIndex + 1,
Drives: er.getEndpointStrings(),
},
} }
auditLogInternal(ctx, opts) auditLogInternal(ctx, opts)
@ -247,7 +249,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
storageEndpoints := er.getEndpoints() storageEndpoints := er.getEndpoints()
defer func() { defer func() {
auditHealObject(ctx, bucket, object, versionID, result, err) er.auditHealObject(ctx, bucket, object, versionID, result, err)
}() }()
if globalTrace.NumSubscribers(madmin.TraceHealing) > 0 { if globalTrace.NumSubscribers(madmin.TraceHealing) > 0 {
@ -289,21 +291,18 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
readQuorum, _, err := objectQuorumFromMeta(ctx, partsMetadata, errs, er.defaultParityCount) readQuorum, _, err := objectQuorumFromMeta(ctx, partsMetadata, errs, er.defaultParityCount)
if err != nil { if err != nil {
m, err := er.deleteIfDangling(ctx, bucket, object, partsMetadata, errs, nil, ObjectOptions{ m, derr := er.deleteIfDangling(ctx, bucket, object, partsMetadata, errs, nil, ObjectOptions{
VersionID: versionID, VersionID: versionID,
}) })
errs = make([]error, len(errs)) errs = make([]error, len(errs))
for i := range errs { if derr == nil {
errs[i] = err derr = errFileNotFound
}
if err == nil {
// Dangling object successfully purged, size is '0'
m.Size = 0
}
// Generate file/version not found with default heal result
err = errFileNotFound
if versionID != "" { if versionID != "" {
err = errFileVersionNotFound derr = errFileVersionNotFound
}
// We did find a new danging object
return er.defaultHealResult(m, storageDisks, storageEndpoints,
errs, bucket, object, versionID), derr
} }
return er.defaultHealResult(m, storageDisks, storageEndpoints, return er.defaultHealResult(m, storageDisks, storageEndpoints,
errs, bucket, object, versionID), err errs, bucket, object, versionID), err
@ -360,11 +359,10 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
switch { switch {
case v != nil: case v != nil:
driveState = madmin.DriveStateOk driveState = madmin.DriveStateOk
case errs[i] == errDiskNotFound, dataErrs[i] == errDiskNotFound: case errors.Is(errs[i], errDiskNotFound), errors.Is(dataErrs[i], errDiskNotFound):
driveState = madmin.DriveStateOffline driveState = madmin.DriveStateOffline
case errs[i] == errFileNotFound, errs[i] == errFileVersionNotFound, errs[i] == errVolumeNotFound: case IsErr(errs[i], errFileNotFound, errFileVersionNotFound, errVolumeNotFound),
fallthrough IsErr(dataErrs[i], errFileNotFound, errFileVersionNotFound, errVolumeNotFound):
case dataErrs[i] == errFileNotFound, dataErrs[i] == errFileVersionNotFound, dataErrs[i] == errVolumeNotFound:
driveState = madmin.DriveStateMissing driveState = madmin.DriveStateMissing
default: default:
// all remaining cases imply corrupt data/metadata // all remaining cases imply corrupt data/metadata
@ -417,18 +415,18 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
VersionID: versionID, VersionID: versionID,
}) })
errs = make([]error, len(errs)) errs = make([]error, len(errs))
for i := range errs {
errs[i] = err
}
if err == nil { if err == nil {
// Dangling object successfully purged, size is '0'
m.Size = 0
}
// Generate file/version not found with default heal result
err = errFileNotFound err = errFileNotFound
if versionID != "" { if versionID != "" {
err = errFileVersionNotFound err = errFileVersionNotFound
} }
// We did find a new danging object
return er.defaultHealResult(m, storageDisks, storageEndpoints,
errs, bucket, object, versionID), err
}
for i := range errs {
errs[i] = err
}
return er.defaultHealResult(m, storageDisks, storageEndpoints, return er.defaultHealResult(m, storageDisks, storageEndpoints,
errs, bucket, object, versionID), err errs, bucket, object, versionID), err
} }
@ -641,6 +639,7 @@ func (er *erasureObjects) checkAbandonedParts(ctx context.Context, bucket string
if !opts.Remove || opts.DryRun { if !opts.Remove || opts.DryRun {
return nil return nil
} }
if globalTrace.NumSubscribers(madmin.TraceHealing) > 0 { if globalTrace.NumSubscribers(madmin.TraceHealing) > 0 {
startTime := time.Now() startTime := time.Now()
defer func() { defer func() {
@ -983,12 +982,12 @@ func isObjectDangling(metaArr []FileInfo, errs []error, dataErrs []error) (valid
// However this requires a bit of a rewrite, leave this up for // However this requires a bit of a rewrite, leave this up for
// future work. // future work.
if notFoundMetaErrs > 0 && notFoundMetaErrs > validMeta.Erasure.ParityBlocks { if notFoundMetaErrs > 0 && notFoundMetaErrs > validMeta.Erasure.ParityBlocks {
// All xl.meta is beyond data blocks missing, this is dangling // All xl.meta is beyond parity blocks missing, this is dangling
return validMeta, true return validMeta, true
} }
if !validMeta.IsRemote() && notFoundPartsErrs > 0 && notFoundPartsErrs > validMeta.Erasure.ParityBlocks { if !validMeta.IsRemote() && notFoundPartsErrs > 0 && notFoundPartsErrs > validMeta.Erasure.ParityBlocks {
// All data-dir is beyond data blocks missing, this is dangling // All data-dir is beyond parity blocks missing, this is dangling
return validMeta, true return validMeta, true
} }
@ -1069,8 +1068,7 @@ func healTrace(funcName healingMetric, startTime time.Time, bucket, object strin
} }
if err != nil { if err != nil {
tr.Error = err.Error() tr.Error = err.Error()
} else {
tr.HealResult = result
} }
tr.HealResult = result
globalTrace.Publish(tr) globalTrace.Publish(tr)
} }

View File

@ -1497,7 +1497,13 @@ func TestHealObjectErasure(t *testing.T) {
er.getDisks = func() []StorageAPI { er.getDisks = func() []StorageAPI {
// Nil more than half the disks, to remove write quorum. // Nil more than half the disks, to remove write quorum.
for i := 0; i <= len(erasureDisks)/2; i++ { for i := 0; i <= len(erasureDisks)/2; i++ {
erasureDisks[i] = nil err := erasureDisks[i].Delete(context.Background(), bucket, object, DeleteOptions{
Recursive: true,
Immediate: false,
})
if err != nil {
t.Fatalf("Failed to delete a file - %v", err)
}
} }
return erasureDisks return erasureDisks
} }

View File

@ -103,8 +103,12 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
if err != nil { if err != nil {
if errors.Is(err, errErasureReadQuorum) && !strings.HasPrefix(srcBucket, minioMetaBucket) { if errors.Is(err, errErasureReadQuorum) && !strings.HasPrefix(srcBucket, minioMetaBucket) {
_, derr := er.deleteIfDangling(context.Background(), srcBucket, srcObject, metaArr, errs, nil, srcOpts) _, derr := er.deleteIfDangling(context.Background(), srcBucket, srcObject, metaArr, errs, nil, srcOpts)
if derr != nil { if derr == nil {
err = derr if srcOpts.VersionID != "" {
err = errFileVersionNotFound
} else {
err = errFileNotFound
}
} }
} }
return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject) return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject)
@ -485,9 +489,12 @@ func joinErrs(errs []error) []string {
} }
func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object string, metaArr []FileInfo, errs []error, dataErrs []error, opts ObjectOptions) (FileInfo, error) { func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object string, metaArr []FileInfo, errs []error, dataErrs []error, opts ObjectOptions) (FileInfo, error) {
var err error
m, ok := isObjectDangling(metaArr, errs, dataErrs) m, ok := isObjectDangling(metaArr, errs, dataErrs)
if ok { if !ok {
// We only come here if we cannot figure out if the object
// can be deleted safely, in such a scenario return ReadQuorum error.
return FileInfo{}, errErasureReadQuorum
}
tags := make(map[string]interface{}, 4) tags := make(map[string]interface{}, 4)
tags["set"] = er.setIndex tags["set"] = er.setIndex
tags["pool"] = er.poolIndex tags["pool"] = er.poolIndex
@ -522,11 +529,6 @@ func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object st
defer auditDanglingObjectDeletion(ctx, bucket, object, m.VersionID, tags) defer auditDanglingObjectDeletion(ctx, bucket, object, m.VersionID, tags)
err = errFileNotFound
if opts.VersionID != "" {
err = errFileVersionNotFound
}
fi := FileInfo{ fi := FileInfo{
VersionID: m.VersionID, VersionID: m.VersionID,
} }
@ -562,8 +564,7 @@ func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object st
rmDisks[diskName] = errStr rmDisks[diskName] = errStr
} }
tags["cleanupResult"] = rmDisks tags["cleanupResult"] = rmDisks
} return m, nil
return m, err
} }
func fileInfoFromRaw(ri RawFileInfo, bucket, object string, readData, inclFreeVers, allParts bool) (FileInfo, error) { func fileInfoFromRaw(ri RawFileInfo, bucket, object string, readData, inclFreeVers, allParts bool) (FileInfo, error) {
@ -925,8 +926,12 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
// not we simply ignore it, since we can't tell for sure if its dangling object. // not we simply ignore it, since we can't tell for sure if its dangling object.
if totalResp == er.setDriveCount && shouldCheckForDangling(err, errs, bucket) { if totalResp == er.setDriveCount && shouldCheckForDangling(err, errs, bucket) {
_, derr := er.deleteIfDangling(context.Background(), bucket, object, metaArr, errs, nil, opts) _, derr := er.deleteIfDangling(context.Background(), bucket, object, metaArr, errs, nil, opts)
if derr != nil { if derr == nil {
err = derr if opts.VersionID != "" {
err = errFileVersionNotFound
} else {
err = errFileNotFound
}
} }
} }
return fi, nil, nil, toObjectErr(err, bucket, object) return fi, nil, nil, toObjectErr(err, bucket, object)
@ -2141,8 +2146,12 @@ func (er erasureObjects) PutObjectMetadata(ctx context.Context, bucket, object s
if err != nil { if err != nil {
if errors.Is(err, errErasureReadQuorum) && !strings.HasPrefix(bucket, minioMetaBucket) { if errors.Is(err, errErasureReadQuorum) && !strings.HasPrefix(bucket, minioMetaBucket) {
_, derr := er.deleteIfDangling(context.Background(), bucket, object, metaArr, errs, nil, opts) _, derr := er.deleteIfDangling(context.Background(), bucket, object, metaArr, errs, nil, opts)
if derr != nil { if derr == nil {
err = derr if opts.VersionID != "" {
err = errFileVersionNotFound
} else {
err = errFileNotFound
}
} }
} }
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
@ -2214,8 +2223,12 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin
if err != nil { if err != nil {
if errors.Is(err, errErasureReadQuorum) && !strings.HasPrefix(bucket, minioMetaBucket) { if errors.Is(err, errErasureReadQuorum) && !strings.HasPrefix(bucket, minioMetaBucket) {
_, derr := er.deleteIfDangling(context.Background(), bucket, object, metaArr, errs, nil, opts) _, derr := er.deleteIfDangling(context.Background(), bucket, object, metaArr, errs, nil, opts)
if derr != nil { if derr == nil {
err = derr if opts.VersionID != "" {
err = errFileVersionNotFound
} else {
err = errFileNotFound
}
} }
} }
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)

View File

@ -557,7 +557,7 @@ type auditObjectOp struct {
Name string `json:"name"` Name string `json:"name"`
Pool int `json:"poolId"` Pool int `json:"poolId"`
Set int `json:"setId"` Set int `json:"setId"`
Disks []string `json:"disks"` Drives []string `json:"drives"`
} }
// Add erasure set information to the current context // Add erasure set information to the current context
@ -570,7 +570,7 @@ func auditObjectErasureSet(ctx context.Context, object string, set *erasureObjec
Name: decodeDirObject(object), Name: decodeDirObject(object),
Pool: set.poolIndex + 1, Pool: set.poolIndex + 1,
Set: set.setIndex + 1, Set: set.setIndex + 1,
Disks: set.getEndpointStrings(), Drives: set.getEndpointStrings(),
} }
logger.GetReqInfo(ctx).AppendTags("objectLocation", op) logger.GetReqInfo(ctx).AppendTags("objectLocation", op)

View File

@ -34,7 +34,6 @@ import (
"github.com/minio/minio/internal/bucket/versioning" "github.com/minio/minio/internal/bucket/versioning"
"github.com/minio/minio/internal/color" "github.com/minio/minio/internal/color"
"github.com/minio/minio/internal/config/storageclass" "github.com/minio/minio/internal/config/storageclass"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/console" "github.com/minio/pkg/v2/console"
"github.com/minio/pkg/v2/wildcard" "github.com/minio/pkg/v2/wildcard"
@ -141,6 +140,14 @@ func getLocalBackgroundHealStatus(ctx context.Context, o ObjectLayer) (madmin.Bg
return status, true return status, true
} }
type healEntryResult struct {
bytes uint64
success bool
skipped bool
entryDone bool
name string
}
// healErasureSet lists and heals all objects in a specific erasure set // healErasureSet lists and heals all objects in a specific erasure set
func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, tracker *healingTracker) error { func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, tracker *healingTracker) error {
scanMode := madmin.HealNormalScan scanMode := madmin.HealNormalScan
@ -187,22 +194,69 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
jt, _ := workers.New(int(numHealers)) jt, _ := workers.New(int(numHealers))
healEntryDone := func(name string) healEntryResult {
return healEntryResult{
entryDone: true,
name: name,
}
}
healEntrySuccess := func(sz uint64) healEntryResult {
return healEntryResult{
bytes: sz,
success: true,
}
}
healEntryFailure := func(sz uint64) healEntryResult {
return healEntryResult{
bytes: sz,
}
}
healEntrySkipped := func(sz uint64) healEntryResult {
return healEntryResult{
bytes: sz,
skipped: true,
}
}
// Collect updates to tracker from concurrent healEntry calls
results := make(chan healEntryResult, 1000)
defer close(results)
go func() {
for res := range results {
if res.entryDone {
tracker.setObject(res.name)
if time.Since(tracker.getLastUpdate()) > time.Minute {
healingLogIf(ctx, tracker.update(ctx))
}
continue
}
tracker.updateProgress(res.success, res.skipped, res.bytes)
}
}()
var retErr error var retErr error
// Heal all buckets with all objects // Heal all buckets with all objects
for _, bucket := range healBuckets { for _, bucket := range healBuckets {
if tracker.isHealed(bucket) { if tracker.isHealed(bucket) {
continue continue
} }
var forwardTo string var forwardTo string
// If we resume to the same bucket, forward to last known item. // If we resume to the same bucket, forward to last known item.
if b := tracker.getBucket(); b != "" { b := tracker.getBucket()
if b == bucket { if b == bucket {
forwardTo = tracker.getObject() forwardTo = tracker.getObject()
} else { }
if b != "" {
// Reset to where last bucket ended if resuming. // Reset to where last bucket ended if resuming.
tracker.resume() tracker.resume()
} }
}
tracker.setObject("") tracker.setObject("")
tracker.setBucket(bucket) tracker.setBucket(bucket)
// Heal current bucket again in case if it is failed // Heal current bucket again in case if it is failed
@ -280,37 +334,6 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
fallbackDisks := disks[expectedDisks:] fallbackDisks := disks[expectedDisks:]
disks = disks[:expectedDisks] disks = disks[:expectedDisks]
type healEntryResult struct {
bytes uint64
success bool
skipped bool
entryDone bool
name string
}
healEntryDone := func(name string) healEntryResult {
return healEntryResult{
entryDone: true,
name: name,
}
}
healEntrySuccess := func(sz uint64) healEntryResult {
return healEntryResult{
bytes: sz,
success: true,
}
}
healEntryFailure := func(sz uint64) healEntryResult {
return healEntryResult{
bytes: sz,
}
}
healEntrySkipped := func(sz uint64) healEntryResult {
return healEntryResult{
bytes: sz,
skipped: true,
}
}
filterLifecycle := func(bucket, object string, fi FileInfo) bool { filterLifecycle := func(bucket, object string, fi FileInfo) bool {
if lc == nil { if lc == nil {
return false return false
@ -331,22 +354,6 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
} }
} }
// Collect updates to tracker from concurrent healEntry calls
results := make(chan healEntryResult, 1000)
go func() {
for res := range results {
if res.entryDone {
tracker.setObject(res.name)
if time.Since(tracker.getLastUpdate()) > time.Minute {
healingLogIf(ctx, tracker.update(ctx))
}
continue
}
tracker.updateProgress(res.success, res.skipped, res.bytes)
}
}()
send := func(result healEntryResult) bool { send := func(result healEntryResult) bool {
select { select {
case <-ctx.Done(): case <-ctx.Done():
@ -393,7 +400,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
var result healEntryResult var result healEntryResult
fivs, err := entry.fileInfoVersions(bucket) fivs, err := entry.fileInfoVersions(bucket)
if err != nil { if err != nil {
_, err := er.HealObject(ctx, bucket, encodedEntryName, "", res, err := er.HealObject(ctx, bucket, encodedEntryName, "",
madmin.HealOpts{ madmin.HealOpts{
ScanMode: scanMode, ScanMode: scanMode,
Remove: healDeleteDangling, Remove: healDeleteDangling,
@ -407,7 +414,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
result = healEntryFailure(0) result = healEntryFailure(0)
healingLogIf(ctx, fmt.Errorf("unable to heal object %s/%s: %w", bucket, entry.name, err)) healingLogIf(ctx, fmt.Errorf("unable to heal object %s/%s: %w", bucket, entry.name, err))
} else { } else {
result = healEntrySuccess(0) result = healEntrySuccess(uint64(res.ObjectSize))
} }
send(result) send(result)
@ -430,11 +437,12 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
continue continue
} }
if _, err := er.HealObject(ctx, bucket, encodedEntryName, res, err := er.HealObject(ctx, bucket, encodedEntryName,
version.VersionID, madmin.HealOpts{ version.VersionID, madmin.HealOpts{
ScanMode: scanMode, ScanMode: scanMode,
Remove: healDeleteDangling, Remove: healDeleteDangling,
}); err != nil { })
if err != nil {
if isErrObjectNotFound(err) || isErrVersionNotFound(err) { if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
// queueing happens across namespace, ignore // queueing happens across namespace, ignore
// objects that are not found. // objects that are not found.
@ -449,22 +457,20 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
healingLogIf(ctx, fmt.Errorf("unable to heal object %s/%s: %w", bucket, version.Name, err)) healingLogIf(ctx, fmt.Errorf("unable to heal object %s/%s: %w", bucket, version.Name, err))
} }
} else { } else {
result = healEntrySuccess(uint64(version.Size)) result = healEntrySuccess(uint64(res.ObjectSize))
} }
if !send(result) { if !send(result) {
return return
} }
} }
// All versions resulted in 'ObjectNotFound/VersionNotFound' // All versions resulted in 'ObjectNotFound/VersionNotFound'
if versionNotFound == len(fivs.Versions) { if versionNotFound == len(fivs.Versions) {
return return
} }
select {
case <-ctx.Done(): send(healEntryDone(entry.name))
return
case results <- healEntryDone(entry.name):
}
// Wait and proceed if there are active requests // Wait and proceed if there are active requests
waitForLowHTTPReq() waitForLowHTTPReq()
@ -502,7 +508,6 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
finished: nil, finished: nil,
}) })
jt.Wait() // synchronize all the concurrent heal jobs jt.Wait() // synchronize all the concurrent heal jobs
xioutil.SafeClose(results)
if err != nil { if err != nil {
// Set this such that when we return this function // Set this such that when we return this function
// we let the caller retry this disk again for the // we let the caller retry this disk again for the

View File

@ -417,6 +417,10 @@ func (s *xlStorage) Healing() *healingTracker {
if err != nil { if err != nil {
return nil return nil
} }
if len(b) == 0 {
// 'healing.bin' might be truncated
return nil
}
h := newHealingTracker() h := newHealingTracker()
_, err = h.UnmarshalMsg(b) _, err = h.UnmarshalMsg(b)
bugLogIf(GlobalContext, err) bugLogIf(GlobalContext, err)

View File

@ -74,7 +74,7 @@ Setting this environment variable automatically enables audit logging to the HTT
NOTE: NOTE:
- `timeToFirstByte` and `timeToResponse` will be expressed in Nanoseconds. - `timeToFirstByte` and `timeToResponse` will be expressed in Nanoseconds.
- Additionally in the case of the erasure coded setup `tags.objectErasureMap` provides per object details about - Additionally in the case of the erasure coded setup `tags.objectLocation` provides per object details about
- Pool number the object operation was performed on. - Pool number the object operation was performed on.
- Set number the object operation was performed on. - Set number the object operation was performed on.
- The list of drives participating in this operation belong to the set. - The list of drives participating in this operation belong to the set.
@ -82,8 +82,9 @@ NOTE:
```json ```json
{ {
"version": "1", "version": "1",
"deploymentid": "51bcc7b9-a447-4251-a940-d9d0aab9af69", "deploymentid": "90e81272-45d9-4fe8-9c45-c9a7322bf4b5",
"time": "2021-10-08T00:46:36.801714978Z", "time": "2024-05-09T07:38:10.449688982Z",
"event": "",
"trigger": "incoming", "trigger": "incoming",
"api": { "api": {
"name": "PutObject", "name": "PutObject",
@ -91,40 +92,41 @@ NOTE:
"object": "hosts", "object": "hosts",
"status": "OK", "status": "OK",
"statusCode": 200, "statusCode": 200,
"rx": 380, "rx": 401,
"tx": 476, "tx": 0,
"timeToResponse": "257694819ns" "timeToResponse": "13309747ns",
"timeToResponseInNS": "13309747"
}, },
"remotehost": "127.0.0.1", "remotehost": "127.0.0.1",
"requestID": "16ABE7A785E7AC2C", "requestID": "17CDC1F4D7E69123",
"userAgent": "MinIO (linux; amd64) minio-go/v7.0.15 mc/DEVELOPMENT.2021-10-06T23-39-34Z", "userAgent": "MinIO (linux; amd64) minio-go/v7.0.70 mc/RELEASE.2024-04-30T17-44-48Z",
"requestPath": "/testbucket/hosts",
"requestHost": "localhost:9000",
"requestHeader": { "requestHeader": {
"Authorization": "AWS4-HMAC-SHA256 Credential=minio/20211008/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length,Signature=4c60a59e5eb3b0a68693c7fee9dbb5a8a509e0717668669194d37bf182fde031", "Accept-Encoding": "zstd,gzip",
"Content-Length": "380", "Authorization": "AWS4-HMAC-SHA256 Credential=minioadmin/20240509/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length,Signature=d4d6862e6cc61011a61fa801da71048ece4f32a0562cad6bb88bdda50d7fcb95",
"Content-Length": "401",
"Content-Type": "application/octet-stream", "Content-Type": "application/octet-stream",
"User-Agent": "MinIO (linux; amd64) minio-go/v7.0.15 mc/DEVELOPMENT.2021-10-06T23-39-34Z", "User-Agent": "MinIO (linux; amd64) minio-go/v7.0.70 mc/RELEASE.2024-04-30T17-44-48Z",
"X-Amz-Content-Sha256": "STREAMING-AWS4-HMAC-SHA256-PAYLOAD", "X-Amz-Content-Sha256": "STREAMING-AWS4-HMAC-SHA256-PAYLOAD",
"X-Amz-Date": "20211008T004636Z", "X-Amz-Date": "20240509T073810Z",
"X-Amz-Decoded-Content-Length": "207", "X-Amz-Decoded-Content-Length": "228"
"X-Amz-Server-Side-Encryption": "aws:kms"
}, },
"responseHeader": { "responseHeader": {
"Accept-Ranges": "bytes", "Accept-Ranges": "bytes",
"Content-Length": "0", "Content-Length": "0",
"ETag": "4939450d1beec11e10a91ee7700bb593", "ETag": "9fe7a344ef4227d3e53751e9d88ce41e",
"Server": "MinIO", "Server": "MinIO",
"Strict-Transport-Security": "max-age=31536000; includeSubDomains", "Strict-Transport-Security": "max-age=31536000; includeSubDomains",
"Vary": "Origin,Accept-Encoding", "Vary": "Origin,Accept-Encoding",
"X-Amz-Request-Id": "16ABE7A785E7AC2C", "X-Amz-Id-2": "dd9025bab4ad464b049177c95eb6ebf374d3b3fd1af9251148b658df7ac2e3e8",
"X-Amz-Server-Side-Encryption": "aws:kms", "X-Amz-Request-Id": "17CDC1F4D7E69123",
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": "arn:aws:kms:my-minio-key",
"X-Content-Type-Options": "nosniff", "X-Content-Type-Options": "nosniff",
"X-Xss-Protection": "1; mode=block", "X-Xss-Protection": "1; mode=block"
"x-amz-version-id": "ac4639f6-c544-4f3f-af1e-b4c0736f67f9"
}, },
"tags": { "tags": {
"objectErasureMap": { "objectLocation": {
"hosts": { "name": "hosts",
"poolId": 1, "poolId": 1,
"setId": 1, "setId": 1,
"drives": [ "drives": [
@ -134,8 +136,8 @@ NOTE:
"/mnt/data4" "/mnt/data4"
] ]
} }
} },
} "accessKey": "minioadmin"
} }
``` ```
@ -176,7 +178,7 @@ On another terminal assuming you have `kafkacat` installed
``` ```
kafkacat -b localhost:29092 -t auditlog -C kafkacat -b localhost:29092 -t auditlog -C
{"version":"1","deploymentid":"8a1d8091-b874-45df-b9ea-e044eede6ace","time":"2021-07-13T02:00:47.020547414Z","trigger":"incoming","api":{"name":"ListBuckets","status":"OK","statusCode":200,"timeToFirstByte":"261795ns","timeToResponse":"312490ns"},"remotehost":"127.0.0.1","requestID":"16913736591C237F","userAgent":"MinIO (linux; amd64) minio-go/v7.0.11 mc/DEVELOPMENT.2021-07-09T02-22-26Z","requestHeader":{"Authorization":"AWS4-HMAC-SHA256 Credential=minio/20210713/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=7fe65c5467e05ca21de64094688da43f96f34fec82e8955612827079f4600527","User-Agent":"MinIO (linux; amd64) minio-go/v7.0.11 mc/DEVELOPMENT.2021-07-09T02-22-26Z","X-Amz-Content-Sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","X-Amz-Date":"20210713T020047Z"},"responseHeader":{"Accept-Ranges":"bytes","Content-Length":"547","Content-Security-Policy":"block-all-mixed-content","Content-Type":"application/xml","Server":"MinIO","Vary":"Origin,Accept-Encoding","X-Amz-Request-Id":"16913736591C237F","X-Xss-Protection":"1; mode=block"}} {"version":"1","deploymentid":"90e81272-45d9-4fe8-9c45-c9a7322bf4b5","time":"2024-05-09T07:38:10.449688982Z","event":"","trigger":"incoming","api":{"name":"PutObject","bucket":"testbucket","object":"hosts","status":"OK","statusCode":200,"rx":401,"tx":0,"timeToResponse":"13309747ns","timeToResponseInNS":"13309747"},"remotehost":"127.0.0.1","requestID":"17CDC1F4D7E69123","userAgent":"MinIO (linux; amd64) minio-go/v7.0.70 mc/RELEASE.2024-04-30T17-44-48Z","requestPath":"/testbucket/hosts","requestHost":"localhost:9000","requestHeader":{"Accept-Encoding":"zstd,gzip","Authorization":"AWS4-HMAC-SHA256 Credential=minioadmin/20240509/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length,Signature=d4d6862e6cc61011a61fa801da71048ece4f32a0562cad6bb88bdda50d7fcb95","Content-Length":"401","Content-Type":"application/octet-stream","User-Agent":"MinIO (linux; amd64) minio-go/v7.0.70 mc/RELEASE.2024-04-30T17-44-48Z","X-Amz-Content-Sha256":"STREAMING-AWS4-HMAC-SHA256-PAYLOAD","X-Amz-Date":"20240509T073810Z","X-Amz-Decoded-Content-Length":"228"},"responseHeader":{"Accept-Ranges":"bytes","Content-Length":"0","ETag":"9fe7a344ef4227d3e53751e9d88ce41e","Server":"MinIO","Strict-Transport-Security":"max-age=31536000; includeSubDomains","Vary":"Origin,Accept-Encoding","X-Amz-Id-2":"dd9025bab4ad464b049177c95eb6ebf374d3b3fd1af9251148b658df7ac2e3e8","X-Amz-Request-Id":"17CDC1F4D7E69123","X-Content-Type-Options":"nosniff","X-Xss-Protection":"1; mode=block"},"tags":{"objectLocation":{"name":"hosts","poolId":1,"setId":1,"drives":["/mnt/data1","/mnt/data2","/mnt/data3","/mnt/data4"]}},"accessKey":"minioadmin"}
``` ```
MinIO also honors environment variable for Kafka target Audit logging as shown below, this setting will override the endpoint settings in the MinIO server config. MinIO also honors environment variable for Kafka target Audit logging as shown below, this setting will override the endpoint settings in the MinIO server config.
@ -215,7 +217,7 @@ Setting this environment variable automatically enables audit logging to the Kaf
NOTE: NOTE:
- `timeToFirstByte` and `timeToResponse` will be expressed in Nanoseconds. - `timeToFirstByte` and `timeToResponse` will be expressed in Nanoseconds.
- Additionally in the case of the erasure coded setup `tags.objectErasureMap` provides per object details about - Additionally in the case of the erasure coded setup `tags.objectLocation` provides per object details about
- Pool number the object operation was performed on. - Pool number the object operation was performed on.
- Set number the object operation was performed on. - Set number the object operation was performed on.
- The list of drives participating in this operation belong to the set. - The list of drives participating in this operation belong to the set.