remove all the frivolous logs, that may or may not be actionable (#18922)

for actionable, inspections we have `mc support inspect`

we do not need double logging, healing will report relevant
errors if any, in terms of quorum lost etc.
This commit is contained in:
Harshavardhana 2024-01-30 18:11:45 -08:00 committed by GitHub
parent 057192913c
commit caac9d216e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 13 additions and 123 deletions

View File

@ -20,16 +20,12 @@ package cmd
import ( import (
"bytes" "bytes"
"context" "context"
"encoding/hex"
"fmt"
"hash" "hash"
"io" "io"
"strings"
"sync" "sync"
xhttp "github.com/minio/minio/internal/http" xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
) )
// Calculates bitrot in chunks and writes the hash into the stream. // Calculates bitrot in chunks and writes the hash into the stream.
@ -154,29 +150,12 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
// Can never happen unless there are programmer bugs // Can never happen unless there are programmer bugs
return 0, errUnexpected return 0, errUnexpected
} }
ignoredErrs := []error{
errDiskNotFound,
}
if strings.HasPrefix(b.volume, minioMetaBucket) {
ignoredErrs = append(ignoredErrs,
errFileNotFound,
errVolumeNotFound,
errFileVersionNotFound,
)
}
if b.rc == nil { if b.rc == nil {
// For the first ReadAt() call we need to open the stream for reading. // For the first ReadAt() call we need to open the stream for reading.
b.currOffset = offset b.currOffset = offset
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
if len(b.data) == 0 && b.tillOffset != streamOffset { if len(b.data) == 0 && b.tillOffset != streamOffset {
b.rc, err = b.disk.ReadFileStream(context.TODO(), b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset) b.rc, err = b.disk.ReadFileStream(context.TODO(), b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
if err != nil {
if !IsErr(err, ignoredErrs...) {
logger.LogOnceIf(GlobalContext,
fmt.Errorf("Reading erasure shards at (%s: %s/%s) returned '%w', will attempt to reconstruct if we have quorum",
b.disk, b.volume, b.filePath, err), "bitrot-read-file-stream-"+b.volume+"-"+b.filePath)
}
}
} else { } else {
b.rc = io.NewSectionReader(bytes.NewReader(b.data), streamOffset, b.tillOffset-streamOffset) b.rc = io.NewSectionReader(bytes.NewReader(b.data), streamOffset, b.tillOffset-streamOffset)
} }
@ -198,10 +177,7 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
return 0, err return 0, err
} }
b.h.Write(buf) b.h.Write(buf)
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) { if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s -> %s/%s - content hash does not match - expected %s, got %s",
b.disk, b.volume, b.filePath, hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))))
return 0, errFileCorrupt return 0, errFileCorrupt
} }
b.currOffset += int64(len(buf)) b.currOffset += int64(len(buf))

View File

@ -19,11 +19,8 @@ package cmd
import ( import (
"context" "context"
"fmt"
"hash" "hash"
"io" "io"
"github.com/minio/minio/internal/logger"
) )
// Implementation to calculate bitrot for the whole file. // Implementation to calculate bitrot for the whole file.
@ -38,12 +35,10 @@ type wholeBitrotWriter struct {
func (b *wholeBitrotWriter) Write(p []byte) (int, error) { func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
err := b.disk.AppendFile(context.TODO(), b.volume, b.filePath, p) err := b.disk.AppendFile(context.TODO(), b.volume, b.filePath, p)
if err != nil { if err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s returned %w", b.disk, err))
return 0, err return 0, err
} }
_, err = b.Hash.Write(p) _, err = b.Hash.Write(p)
if err != nil { if err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s returned %w", b.disk, err))
return 0, err return 0, err
} }
return len(p), nil return len(p), nil
@ -72,12 +67,10 @@ func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error)
if b.buf == nil { if b.buf == nil {
b.buf = make([]byte, b.tillOffset-offset) b.buf = make([]byte, b.tillOffset-offset)
if _, err := b.disk.ReadFile(context.TODO(), b.volume, b.filePath, offset, b.buf, b.verifier); err != nil { if _, err := b.disk.ReadFile(context.TODO(), b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, err))
return 0, err return 0, err
} }
} }
if len(b.buf) < len(buf) { if len(b.buf) < len(buf) {
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, errLessData))
return 0, errLessData return 0, errLessData
} }
n = copy(buf, b.buf) n = copy(buf, b.buf)

View File

@ -80,11 +80,9 @@ func (e *Erasure) EncodeData(ctx context.Context, data []byte) ([][]byte, error)
} }
encoded, err := e.encoder().Split(data) encoded, err := e.encoder().Split(data)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return nil, err return nil, err
} }
if err = e.encoder().Encode(encoded); err != nil { if err = e.encoder().Encode(encoded); err != nil {
logger.LogIf(ctx, err)
return nil, err return nil, err
} }
return encoded, nil return encoded, nil
@ -111,11 +109,7 @@ func (e *Erasure) DecodeDataBlocks(data [][]byte) error {
// DecodeDataAndParityBlocks decodes the given erasure-coded data and verifies it. // DecodeDataAndParityBlocks decodes the given erasure-coded data and verifies it.
// It returns an error if the decoding failed. // It returns an error if the decoding failed.
func (e *Erasure) DecodeDataAndParityBlocks(ctx context.Context, data [][]byte) error { func (e *Erasure) DecodeDataAndParityBlocks(ctx context.Context, data [][]byte) error {
if err := e.encoder().Reconstruct(data); err != nil { return e.encoder().Reconstruct(data)
logger.LogIf(ctx, err)
return err
}
return nil
} }
// ShardSize - returns actual shared size from erasure blockSize. // ShardSize - returns actual shared size from erasure blockSize.

View File

@ -26,7 +26,6 @@ import (
"sync/atomic" "sync/atomic"
xioutil "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
) )
// Reads in parallel from readers. // Reads in parallel from readers.
@ -211,11 +210,9 @@ func (p *parallelReader) Read(dst [][]byte) ([][]byte, error) {
// A set of preferred drives can be supplied. In that case they will be used and the data reconstructed. // A set of preferred drives can be supplied. In that case they will be used and the data reconstructed.
func (e Erasure) Decode(ctx context.Context, writer io.Writer, readers []io.ReaderAt, offset, length, totalLength int64, prefer []bool) (written int64, derr error) { func (e Erasure) Decode(ctx context.Context, writer io.Writer, readers []io.ReaderAt, offset, length, totalLength int64, prefer []bool) (written int64, derr error) {
if offset < 0 || length < 0 { if offset < 0 || length < 0 {
logger.LogIf(ctx, errInvalidArgument)
return -1, errInvalidArgument return -1, errInvalidArgument
} }
if offset+length > totalLength { if offset+length > totalLength {
logger.LogIf(ctx, errInvalidArgument)
return -1, errInvalidArgument return -1, errInvalidArgument
} }
@ -269,7 +266,6 @@ func (e Erasure) Decode(ctx context.Context, writer io.Writer, readers []io.Read
} }
if err = e.DecodeDataBlocks(bufs); err != nil { if err = e.DecodeDataBlocks(bufs); err != nil {
logger.LogIf(ctx, err)
return -1, err return -1, err
} }
@ -282,7 +278,6 @@ func (e Erasure) Decode(ctx context.Context, writer io.Writer, readers []io.Read
} }
if bytesWritten != length { if bytesWritten != length {
logger.LogIf(ctx, errLessData)
return bytesWritten, errLessData return bytesWritten, errLessData
} }
@ -321,7 +316,6 @@ func (e Erasure) Heal(ctx context.Context, writers []io.Writer, readers []io.Rea
} }
if err = e.DecodeDataAndParityBlocks(ctx, bufs); err != nil { if err = e.DecodeDataAndParityBlocks(ctx, bufs); err != nil {
logger.LogOnceIf(ctx, err, "erasure-heal-decode")
return err return err
} }
@ -332,7 +326,6 @@ func (e Erasure) Heal(ctx context.Context, writers []io.Writer, readers []io.Rea
} }
if err = w.Write(ctx, bufs); err != nil { if err = w.Write(ctx, bufs); err != nil {
logger.LogOnceIf(ctx, err, "erasure-heal-write")
return err return err
} }
} }

View File

@ -22,9 +22,6 @@ import (
"fmt" "fmt"
"io" "io"
"sync" "sync"
"github.com/minio/minio/internal/hash"
"github.com/minio/minio/internal/logger"
) )
// Writes in parallel to writers // Writes in parallel to writers
@ -92,29 +89,26 @@ func (e *Erasure) Encode(ctx context.Context, src io.Reader, writers []io.Writer
io.EOF, io.EOF,
io.ErrUnexpectedEOF, io.ErrUnexpectedEOF,
}...) { }...) {
if !hash.IsChecksumMismatch(err) {
logger.LogIf(ctx, err)
}
return 0, err return 0, err
} }
} }
eof := err == io.EOF || err == io.ErrUnexpectedEOF eof := err == io.EOF || err == io.ErrUnexpectedEOF
if n == 0 && total != 0 { if n == 0 && total != 0 {
// Reached EOF, nothing more to be done. // Reached EOF, nothing more to be done.
break break
} }
// We take care of the situation where if n == 0 and total == 0 by creating empty data and parity files. // We take care of the situation where if n == 0 and total == 0 by creating empty data and parity files.
blocks, err = e.EncodeData(ctx, buf[:n]) blocks, err = e.EncodeData(ctx, buf[:n])
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return 0, err return 0, err
} }
if err = writer.Write(ctx, blocks); err != nil { if err = writer.Write(ctx, blocks); err != nil {
logger.LogIf(ctx, err)
return 0, err return 0, err
} }
total += int64(n) total += int64(n)
if eof { if eof {
break break

View File

@ -20,9 +20,7 @@ package cmd
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"hash/crc32" "hash/crc32"
"io"
"github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/sync/errgroup" "github.com/minio/pkg/v2/sync/errgroup"
@ -148,26 +146,6 @@ func hashOrder(key string, cardinality int) []int {
return nums return nums
} }
var readFileInfoIgnoredErrs = append(objectOpIgnoredErrs,
errFileNotFound,
errVolumeNotFound,
errFileVersionNotFound,
io.ErrUnexpectedEOF, // some times we would read without locks, ignore these errors
io.EOF, // some times we would read without locks, ignore these errors
)
func readFileInfo(ctx context.Context, disk StorageAPI, origbucket, bucket, object, versionID string, opts ReadOptions) (FileInfo, error) {
fi, err := disk.ReadVersion(ctx, origbucket, bucket, object, versionID, opts)
if err != nil && !IsErr(err, readFileInfoIgnoredErrs...) {
logger.LogOnceIf(ctx, fmt.Errorf("Drive %s, path (%s/%s) returned an error (%w)",
disk.String(), bucket, object, err),
disk.String())
}
return fi, err
}
// Reads all `xl.meta` metadata as a FileInfo slice. // Reads all `xl.meta` metadata as a FileInfo slice.
// Returns error slice indicating the failed metadata reads. // Returns error slice indicating the failed metadata reads.
func readAllFileInfo(ctx context.Context, disks []StorageAPI, origbucket string, bucket, object, versionID string, readData, healing bool) ([]FileInfo, []error) { func readAllFileInfo(ctx context.Context, disks []StorageAPI, origbucket string, bucket, object, versionID string, readData, healing bool) ([]FileInfo, []error) {
@ -186,7 +164,7 @@ func readAllFileInfo(ctx context.Context, disks []StorageAPI, origbucket string,
if disks[index] == nil { if disks[index] == nil {
return errDiskNotFound return errDiskNotFound
} }
metadataArray[index], err = readFileInfo(ctx, disks[index], origbucket, bucket, object, versionID, opts) metadataArray[index], err = disks[index].ReadVersion(ctx, origbucket, bucket, object, versionID, opts)
return err return err
}, index) }, index)
} }
@ -330,15 +308,12 @@ var (
// returns error if totalSize is -1, partSize is 0, partIndex is 0. // returns error if totalSize is -1, partSize is 0, partIndex is 0.
func calculatePartSizeFromIdx(ctx context.Context, totalSize int64, partSize int64, partIndex int) (currPartSize int64, err error) { func calculatePartSizeFromIdx(ctx context.Context, totalSize int64, partSize int64, partIndex int) (currPartSize int64, err error) {
if totalSize < -1 { if totalSize < -1 {
logger.LogIf(ctx, errInvalidArgument)
return 0, errInvalidArgument return 0, errInvalidArgument
} }
if partSize == 0 { if partSize == 0 {
logger.LogIf(ctx, errPartSizeZero)
return 0, errPartSizeZero return 0, errPartSizeZero
} }
if partIndex < 1 { if partIndex < 1 {
logger.LogIf(ctx, errPartSizeIndex)
return 0, errPartSizeIndex return 0, errPartSizeIndex
} }
if totalSize == -1 { if totalSize == -1 {

View File

@ -46,7 +46,6 @@ import (
"github.com/minio/pkg/v2/mimedb" "github.com/minio/pkg/v2/mimedb"
"github.com/minio/pkg/v2/sync/errgroup" "github.com/minio/pkg/v2/sync/errgroup"
"github.com/minio/pkg/v2/wildcard" "github.com/minio/pkg/v2/wildcard"
"github.com/tinylib/msgp/msgp"
) )
// list all errors which can be ignored in object operations. // list all errors which can be ignored in object operations.
@ -546,29 +545,6 @@ func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object st
return m, err return m, err
} }
var readRawFileInfoErrs = append(objectOpIgnoredErrs,
errFileNotFound,
errFileNameTooLong,
errVolumeNotFound,
errFileVersionNotFound,
io.ErrUnexpectedEOF, // some times we would read without locks, ignore these errors
io.EOF, // some times we would read without locks, ignore these errors
msgp.ErrShortBytes,
context.DeadlineExceeded,
context.Canceled,
)
func readRawFileInfo(ctx context.Context, disk StorageAPI, bucket, object string, readData bool) (RawFileInfo, error) {
rf, err := disk.ReadXL(ctx, bucket, object, readData)
if err != nil && !IsErr(err, readRawFileInfoErrs...) {
logger.LogOnceIf(ctx, fmt.Errorf("Drive %s, path (%s/%s) returned an error (%w)",
disk.String(), bucket, object, err),
disk.String())
}
return rf, err
}
func fileInfoFromRaw(ri RawFileInfo, bucket, object string, readData, inclFreeVers, allParts bool) (FileInfo, error) { func fileInfoFromRaw(ri RawFileInfo, bucket, object string, readData, inclFreeVers, allParts bool) (FileInfo, error) {
var xl xlMetaV2 var xl xlMetaV2
if err := xl.LoadOrConvert(ri.Buf); err != nil { if err := xl.LoadOrConvert(ri.Buf); err != nil {
@ -611,7 +587,7 @@ func readAllRawFileInfo(ctx context.Context, disks []StorageAPI, bucket, object
if disks[index] == nil { if disks[index] == nil {
return errDiskNotFound return errDiskNotFound
} }
rf, err := readRawFileInfo(ctx, disks[index], bucket, object, readData) rf, err := disks[index].ReadXL(ctx, bucket, object, readData)
if err != nil { if err != nil {
return err return err
} }
@ -791,10 +767,10 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
if opts.VersionID != "" { if opts.VersionID != "" {
// Read a specific version ID // Read a specific version ID
fi, err = readFileInfo(ctx, disk, "", bucket, object, opts.VersionID, ropts) fi, err = disk.ReadVersion(ctx, "", bucket, object, opts.VersionID, ropts)
} else { } else {
// Read the latest version // Read the latest version
rfi, err = readRawFileInfo(ctx, disk, bucket, object, readData) rfi, err = disk.ReadXL(ctx, bucket, object, readData)
if err == nil { if err == nil {
fi, err = fileInfoFromRaw(rfi, bucket, object, readData, opts.InclFreeVersions, true) fi, err = fileInfoFromRaw(rfi, bucket, object, readData, opts.InclFreeVersions, true)
} }
@ -829,7 +805,11 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
var missingBlocks int var missingBlocks int
for i := range errs { for i := range errs {
if errors.Is(errs[i], errFileNotFound) { if IsErr(errs[i],
errFileNotFound,
errFileVersionNotFound,
errFileCorrupt,
) {
missingBlocks++ missingBlocks++
} }
} }

View File

@ -25,7 +25,6 @@ import (
"strings" "strings"
"github.com/klauspost/reedsolomon" "github.com/klauspost/reedsolomon"
"github.com/minio/minio/internal/logger"
) )
// getDataBlockLen - get length of data blocks from encoded blocks. // getDataBlockLen - get length of data blocks from encoded blocks.
@ -43,19 +42,16 @@ func getDataBlockLen(enBlocks [][]byte, dataBlocks int) int {
func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, dataBlocks int, offset int64, length int64) (int64, error) { func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, dataBlocks int, offset int64, length int64) (int64, error) {
// Offset and out size cannot be negative. // Offset and out size cannot be negative.
if offset < 0 || length < 0 { if offset < 0 || length < 0 {
logger.LogIf(ctx, errUnexpected)
return 0, errUnexpected return 0, errUnexpected
} }
// Do we have enough blocks? // Do we have enough blocks?
if len(enBlocks) < dataBlocks { if len(enBlocks) < dataBlocks {
logger.LogIf(ctx, fmt.Errorf("diskBlocks(%d)/dataBlocks(%d) - %w", len(enBlocks), dataBlocks, reedsolomon.ErrTooFewShards))
return 0, reedsolomon.ErrTooFewShards return 0, reedsolomon.ErrTooFewShards
} }
// Do we have enough data? // Do we have enough data?
if int64(getDataBlockLen(enBlocks, dataBlocks)) < length { if int64(getDataBlockLen(enBlocks, dataBlocks)) < length {
logger.LogIf(ctx, fmt.Errorf("getDataBlockLen(enBlocks, dataBlocks)(%d)/length(%d) - %w", getDataBlockLen(enBlocks, dataBlocks), length, reedsolomon.ErrShortData))
return 0, reedsolomon.ErrShortData return 0, reedsolomon.ErrShortData
} }
@ -85,11 +81,6 @@ func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, data
if write < int64(len(block)) { if write < int64(len(block)) {
n, err := dst.Write(block[:write]) n, err := dst.Write(block[:write])
if err != nil { if err != nil {
// The writer will be closed in case of range queries, which will emit ErrClosedPipe.
// The reader pipe might be closed at ListObjects io.EOF ignore it.
if err != io.ErrClosedPipe && err != io.EOF {
logger.LogIf(ctx, err)
}
return 0, err return 0, err
} }
totalWritten += int64(n) totalWritten += int64(n)
@ -99,11 +90,6 @@ func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, data
// Copy the block. // Copy the block.
n, err := dst.Write(block) n, err := dst.Write(block)
if err != nil { if err != nil {
// The writer will be closed in case of range queries, which will emit ErrClosedPipe.
// The reader pipe might be closed at ListObjects io.EOF ignore it.
if err != io.ErrClosedPipe && err != io.EOF {
logger.LogIf(ctx, err)
}
return 0, err return 0, err
} }

View File

@ -497,7 +497,6 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa
cache.Info.SkipHealing = healing cache.Info.SkipHealing = healing
cache.Info.NextCycle = wantCycle cache.Info.NextCycle = wantCycle
if cache.Info.Name != bucket.Name { if cache.Info.Name != bucket.Name {
logger.LogIf(ctx, fmt.Errorf("cache name mismatch: %s != %s", cache.Info.Name, bucket.Name))
cache.Info = dataUsageCacheInfo{ cache.Info = dataUsageCacheInfo{
Name: bucket.Name, Name: bucket.Name,
LastUpdate: time.Time{}, LastUpdate: time.Time{},