diff --git a/cmd/bitrot.go b/cmd/bitrot.go index 948ef06fc..607c62043 100644 --- a/cmd/bitrot.go +++ b/cmd/bitrot.go @@ -164,7 +164,6 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w h := algo.New() hashBuf := make([]byte, h.Size()) - buf := make([]byte, shardSize) left := wantSize // Calculate the size of the bitrot file and compare @@ -173,6 +172,9 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w return errFileCorrupt } + bufp := xlPoolSmall.Get().(*[]byte) + defer xlPoolSmall.Put(bufp) + for left > 0 { // Read expected hash... h.Reset() @@ -186,13 +188,15 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w if left < shardSize { shardSize = left } - read, err := io.CopyBuffer(h, io.LimitReader(r, shardSize), buf) + + read, err := io.CopyBuffer(h, io.LimitReader(r, shardSize), *bufp) if err != nil { // Read's failed for object with right size, at different offsets. - return err + return errFileCorrupt } + left -= read - if !bytes.Equal(h.Sum(nil), hashBuf) { + if !bytes.Equal(h.Sum(nil), hashBuf[:n]) { return errFileCorrupt } } diff --git a/cmd/erasure-healing-common.go b/cmd/erasure-healing-common.go index 9a38eff9c..90ed3d187 100644 --- a/cmd/erasure-healing-common.go +++ b/cmd/erasure-healing-common.go @@ -261,7 +261,7 @@ func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetad // Always check data, if we got it. if (len(meta.Data) > 0 || meta.Size == 0) && len(meta.Parts) > 0 { checksumInfo := meta.Erasure.GetChecksumInfo(meta.Parts[0].Number) - dataErrs[i] = bitrotVerify(bytes.NewBuffer(meta.Data), + dataErrs[i] = bitrotVerify(bytes.NewReader(meta.Data), int64(len(meta.Data)), meta.Erasure.ShardFileSize(meta.Size), checksumInfo.Algorithm, diff --git a/cmd/xl-storage.go b/cmd/xl-storage.go index 4d024a8e0..533e794b3 100644 --- a/cmd/xl-storage.go +++ b/cmd/xl-storage.go @@ -98,6 +98,27 @@ func isValidVolname(volname string) bool { return true } +var ( + xlPoolReallyLarge = sync.Pool{ + New: func() interface{} { + b := disk.AlignedBlock(blockSizeReallyLarge) + return &b + }, + } + xlPoolLarge = sync.Pool{ + New: func() interface{} { + b := disk.AlignedBlock(blockSizeLarge) + return &b + }, + } + xlPoolSmall = sync.Pool{ + New: func() interface{} { + b := disk.AlignedBlock(blockSizeSmall) + return &b + }, + } +) + // xlStorage - implements StorageAPI interface. type xlStorage struct { diskPath string @@ -105,10 +126,6 @@ type xlStorage struct { globalSync bool - poolReallyLarge sync.Pool - poolLarge sync.Pool - poolSmall sync.Pool - rootDisk bool diskID string @@ -254,26 +271,8 @@ func newXLStorage(ep Endpoint) (*xlStorage, error) { } p := &xlStorage{ - diskPath: path, - endpoint: ep, - poolReallyLarge: sync.Pool{ - New: func() interface{} { - b := disk.AlignedBlock(blockSizeReallyLarge) - return &b - }, - }, - poolLarge: sync.Pool{ - New: func() interface{} { - b := disk.AlignedBlock(blockSizeLarge) - return &b - }, - }, - poolSmall: sync.Pool{ - New: func() interface{} { - b := disk.AlignedBlock(blockSizeSmall) - return &b - }, - }, + diskPath: path, + endpoint: ep, globalSync: env.Get(config.EnvFSOSync, config.EnableOff) == config.EnableOn, ctx: GlobalContext, rootDisk: rootDisk, @@ -1323,9 +1322,9 @@ func (o *odirectReader) Read(buf []byte) (n int, err error) { } if o.buf == nil { if o.smallFile { - o.bufp = o.s.poolSmall.Get().(*[]byte) + o.bufp = xlPoolSmall.Get().(*[]byte) } else { - o.bufp = o.s.poolLarge.Get().(*[]byte) + o.bufp = xlPoolLarge.Get().(*[]byte) } } if o.freshRead { @@ -1367,9 +1366,9 @@ func (o *odirectReader) Read(buf []byte) (n int, err error) { // Close - Release the buffer and close the file. func (o *odirectReader) Close() error { if o.smallFile { - o.s.poolSmall.Put(o.bufp) + xlPoolSmall.Put(o.bufp) } else { - o.s.poolLarge.Put(o.bufp) + xlPoolLarge.Put(o.bufp) } return o.f.Close() } @@ -1554,11 +1553,11 @@ func (s *xlStorage) CreateFile(ctx context.Context, volume, path string, fileSiz var bufp *[]byte if fileSize > 0 && fileSize >= reallyLargeFileThreshold { // use a larger 4MiB buffer for really large streams. - bufp = s.poolReallyLarge.Get().(*[]byte) - defer s.poolReallyLarge.Put(bufp) + bufp = xlPoolReallyLarge.Get().(*[]byte) + defer xlPoolReallyLarge.Put(bufp) } else { - bufp = s.poolLarge.Get().(*[]byte) - defer s.poolLarge.Put(bufp) + bufp = xlPoolLarge.Get().(*[]byte) + defer xlPoolLarge.Put(bufp) } written, err := xioutil.CopyAligned(w, r, *bufp, fileSize)