2021-04-18 15:41:13 -04:00
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2019-01-17 07:58:18 -05:00
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2020-09-04 12:45:06 -04:00
|
|
|
"context"
|
2019-01-17 07:58:18 -05:00
|
|
|
"encoding/hex"
|
2019-10-01 16:12:15 -04:00
|
|
|
"fmt"
|
2019-01-17 07:58:18 -05:00
|
|
|
"hash"
|
|
|
|
"io"
|
2022-10-12 07:28:21 -04:00
|
|
|
"strings"
|
2021-05-11 12:18:37 -04:00
|
|
|
"sync"
|
2019-01-17 07:58:18 -05:00
|
|
|
|
2021-06-01 17:59:40 -04:00
|
|
|
xhttp "github.com/minio/minio/internal/http"
|
|
|
|
"github.com/minio/minio/internal/ioutil"
|
|
|
|
"github.com/minio/minio/internal/logger"
|
2019-01-17 07:58:18 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
// Calculates bitrot in chunks and writes the hash into the stream.
|
|
|
|
type streamingBitrotWriter struct {
|
2021-03-29 20:00:55 -04:00
|
|
|
iow io.WriteCloser
|
|
|
|
closeWithErr func(err error) error
|
|
|
|
h hash.Hash
|
|
|
|
shardSize int64
|
2021-05-11 12:18:37 -04:00
|
|
|
canClose *sync.WaitGroup
|
2019-01-17 07:58:18 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
|
|
|
|
if len(p) == 0 {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
b.h.Reset()
|
|
|
|
b.h.Write(p)
|
|
|
|
hashBytes := b.h.Sum(nil)
|
2019-05-02 00:46:00 -04:00
|
|
|
_, err := b.iow.Write(hashBytes)
|
|
|
|
if err != nil {
|
2021-05-17 11:32:28 -04:00
|
|
|
b.closeWithErr(err)
|
2019-01-17 07:58:18 -05:00
|
|
|
return 0, err
|
|
|
|
}
|
2021-05-17 11:32:28 -04:00
|
|
|
n, err := b.iow.Write(p)
|
|
|
|
if err != nil {
|
|
|
|
b.closeWithErr(err)
|
|
|
|
return n, err
|
|
|
|
}
|
2021-08-27 12:16:36 -04:00
|
|
|
if n != len(p) {
|
|
|
|
err = io.ErrShortWrite
|
|
|
|
b.closeWithErr(err)
|
|
|
|
}
|
2021-05-17 11:32:28 -04:00
|
|
|
return n, err
|
2019-01-17 07:58:18 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (b *streamingBitrotWriter) Close() error {
|
|
|
|
err := b.iow.Close()
|
|
|
|
// Wait for all data to be written before returning else it causes race conditions.
|
|
|
|
// Race condition is because of io.PipeWriter implementation. i.e consider the following
|
|
|
|
// sequent of operations:
|
|
|
|
// 1) pipe.Write()
|
|
|
|
// 2) pipe.Close()
|
|
|
|
// Now pipe.Close() can return before the data is read on the other end of the pipe and written to the disk
|
|
|
|
// Hence an immediate Read() on the file can return incorrect data.
|
2021-03-29 20:00:55 -04:00
|
|
|
if b.canClose != nil {
|
2021-05-11 12:18:37 -04:00
|
|
|
b.canClose.Wait()
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
2019-01-17 07:58:18 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-05-17 11:32:28 -04:00
|
|
|
// newStreamingBitrotWriterBuffer returns streaming bitrot writer implementation.
|
|
|
|
// The output is written to the supplied writer w.
|
|
|
|
func newStreamingBitrotWriterBuffer(w io.Writer, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
|
|
|
return &streamingBitrotWriter{iow: ioutil.NopCloser(w), h: algo.New(), shardSize: shardSize, canClose: nil, closeWithErr: func(err error) error {
|
|
|
|
// Similar to CloseWithError on pipes we always return nil.
|
|
|
|
return nil
|
|
|
|
}}
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
|
|
|
|
2019-01-17 07:58:18 -05:00
|
|
|
// Returns streaming bitrot writer implementation.
|
2021-05-17 11:32:28 -04:00
|
|
|
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
2019-01-17 07:58:18 -05:00
|
|
|
r, w := io.Pipe()
|
|
|
|
h := algo.New()
|
2021-03-18 17:09:55 -04:00
|
|
|
|
2023-07-25 19:58:31 -04:00
|
|
|
bw := &streamingBitrotWriter{
|
2023-11-27 12:15:06 -05:00
|
|
|
iow: ioutil.NewDeadlineWriter(w, globalDriveConfig.GetMaxTimeout()),
|
2023-07-25 19:58:31 -04:00
|
|
|
closeWithErr: w.CloseWithError,
|
|
|
|
h: h,
|
|
|
|
shardSize: shardSize,
|
|
|
|
canClose: &sync.WaitGroup{},
|
|
|
|
}
|
2021-05-11 12:18:37 -04:00
|
|
|
bw.canClose.Add(1)
|
2019-01-17 07:58:18 -05:00
|
|
|
go func() {
|
2023-07-25 19:58:31 -04:00
|
|
|
defer bw.canClose.Done()
|
|
|
|
|
2019-04-30 19:27:31 -04:00
|
|
|
totalFileSize := int64(-1) // For compressed objects length will be unknown (represented by length=-1)
|
|
|
|
if length != -1 {
|
|
|
|
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
|
|
|
totalFileSize = bitrotSumsTotalSize + length
|
|
|
|
}
|
2021-03-18 17:09:55 -04:00
|
|
|
r.CloseWithError(disk.CreateFile(context.TODO(), volume, filePath, totalFileSize, r))
|
2019-01-17 07:58:18 -05:00
|
|
|
}()
|
|
|
|
return bw
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadAt() implementation which verifies the bitrot hash available as part of the stream.
|
|
|
|
type streamingBitrotReader struct {
|
|
|
|
disk StorageAPI
|
2021-01-07 22:27:31 -05:00
|
|
|
data []byte
|
2021-01-03 14:27:57 -05:00
|
|
|
rc io.Reader
|
2019-01-17 07:58:18 -05:00
|
|
|
volume string
|
|
|
|
filePath string
|
|
|
|
tillOffset int64
|
|
|
|
currOffset int64
|
|
|
|
h hash.Hash
|
|
|
|
shardSize int64
|
|
|
|
hashBytes []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *streamingBitrotReader) Close() error {
|
|
|
|
if b.rc == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2021-01-03 14:27:57 -05:00
|
|
|
if closer, ok := b.rc.(io.Closer); ok {
|
2021-05-04 13:12:08 -04:00
|
|
|
// drain the body for connection re-use at network layer.
|
|
|
|
xhttp.DrainBody(struct {
|
|
|
|
io.Reader
|
|
|
|
io.Closer
|
|
|
|
}{
|
|
|
|
Reader: b.rc,
|
|
|
|
Closer: closeWrapper(func() error { return nil }),
|
|
|
|
})
|
2021-01-03 14:27:57 -05:00
|
|
|
return closer.Close()
|
|
|
|
}
|
|
|
|
return nil
|
2019-01-17 07:58:18 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
|
|
|
var err error
|
|
|
|
if offset%b.shardSize != 0 {
|
|
|
|
// Offset should always be aligned to b.shardSize
|
2019-05-02 00:46:00 -04:00
|
|
|
// Can never happen unless there are programmer bugs
|
2019-01-17 07:58:18 -05:00
|
|
|
return 0, errUnexpected
|
|
|
|
}
|
2022-10-12 07:28:21 -04:00
|
|
|
ignoredErrs := []error{
|
|
|
|
errDiskNotFound,
|
|
|
|
}
|
|
|
|
if strings.HasPrefix(b.volume, minioMetaBucket) {
|
|
|
|
ignoredErrs = append(ignoredErrs,
|
|
|
|
errFileNotFound,
|
|
|
|
errVolumeNotFound,
|
|
|
|
errFileVersionNotFound,
|
|
|
|
)
|
|
|
|
}
|
2019-01-17 07:58:18 -05:00
|
|
|
if b.rc == nil {
|
|
|
|
// For the first ReadAt() call we need to open the stream for reading.
|
|
|
|
b.currOffset = offset
|
|
|
|
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
|
2021-03-29 20:00:55 -04:00
|
|
|
if len(b.data) == 0 && b.tillOffset != streamOffset {
|
2021-01-07 22:27:31 -05:00
|
|
|
b.rc, err = b.disk.ReadFileStream(context.TODO(), b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
|
2022-10-12 07:28:21 -04:00
|
|
|
if err != nil {
|
|
|
|
if !IsErr(err, ignoredErrs...) {
|
2023-06-24 23:29:13 -04:00
|
|
|
logger.LogOnceIf(GlobalContext,
|
2022-10-12 07:28:21 -04:00
|
|
|
fmt.Errorf("Reading erasure shards at (%s: %s/%s) returned '%w', will attempt to reconstruct if we have quorum",
|
2023-07-11 16:17:45 -04:00
|
|
|
b.disk, b.volume, b.filePath, err), "bitrot-read-file-stream-"+b.volume+"-"+b.filePath)
|
2022-10-12 07:28:21 -04:00
|
|
|
}
|
2021-05-14 19:50:47 -04:00
|
|
|
}
|
2021-01-07 22:27:31 -05:00
|
|
|
} else {
|
|
|
|
b.rc = io.NewSectionReader(bytes.NewReader(b.data), streamOffset, b.tillOffset-streamOffset)
|
|
|
|
}
|
2021-01-04 21:51:52 -05:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
2019-01-17 07:58:18 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if offset != b.currOffset {
|
2019-05-02 00:46:00 -04:00
|
|
|
// Can never happen unless there are programmer bugs
|
2019-01-17 07:58:18 -05:00
|
|
|
return 0, errUnexpected
|
|
|
|
}
|
|
|
|
b.h.Reset()
|
|
|
|
_, err = io.ReadFull(b.rc, b.hashBytes)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
_, err = io.ReadFull(b.rc, buf)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
b.h.Write(buf)
|
|
|
|
|
2019-02-13 07:59:36 -05:00
|
|
|
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
|
2022-08-04 19:10:08 -04:00
|
|
|
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s -> %s/%s - content hash does not match - expected %s, got %s",
|
2021-01-27 13:21:14 -05:00
|
|
|
b.disk, b.volume, b.filePath, hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))))
|
|
|
|
return 0, errFileCorrupt
|
2019-01-17 07:58:18 -05:00
|
|
|
}
|
|
|
|
b.currOffset += int64(len(buf))
|
|
|
|
return len(buf), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns streaming bitrot reader implementation.
|
2021-01-07 22:27:31 -05:00
|
|
|
func newStreamingBitrotReader(disk StorageAPI, data []byte, volume, filePath string, tillOffset int64, algo BitrotAlgorithm, shardSize int64) *streamingBitrotReader {
|
2019-01-17 07:58:18 -05:00
|
|
|
h := algo.New()
|
|
|
|
return &streamingBitrotReader{
|
2021-03-29 20:00:55 -04:00
|
|
|
disk: disk,
|
|
|
|
data: data,
|
|
|
|
volume: volume,
|
|
|
|
filePath: filePath,
|
|
|
|
tillOffset: ceilFrac(tillOffset, shardSize)*int64(h.Size()) + tillOffset,
|
|
|
|
h: h,
|
|
|
|
shardSize: shardSize,
|
|
|
|
hashBytes: make([]byte, h.Size()),
|
2019-01-17 07:58:18 -05:00
|
|
|
}
|
|
|
|
}
|