2016-03-28 00:52:38 -04:00
|
|
|
/*
|
|
|
|
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-03-28 00:52:38 -04:00
|
|
|
|
2016-05-31 23:23:31 -04:00
|
|
|
import (
|
2016-06-19 16:35:26 -04:00
|
|
|
"io"
|
2017-11-25 14:58:29 -05:00
|
|
|
|
|
|
|
"github.com/minio/minio/pkg/errors"
|
2016-05-31 23:23:31 -04:00
|
|
|
)
|
2016-03-28 00:52:38 -04:00
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
// ReadFile reads as much data as requested from the file under the given volume and path and writes the data to the provided writer.
|
|
|
|
// The algorithm and the keys/checksums are used to verify the integrity of the given file. ReadFile will read data from the given offset
|
|
|
|
// up to the given length. If parts of the file are corrupted ReadFile tries to reconstruct the data.
|
2017-09-27 13:29:42 -04:00
|
|
|
func (s ErasureStorage) ReadFile(writer io.Writer, volume, path string, offset, length int64, totalLength int64, checksums [][]byte, algorithm BitrotAlgorithm, blocksize int64) (f ErasureFileInfo, err error) {
|
2017-08-14 21:08:42 -04:00
|
|
|
if offset < 0 || length < 0 {
|
2017-11-25 14:58:29 -05:00
|
|
|
return f, errors.Trace(errUnexpected)
|
2016-06-24 05:06:23 -04:00
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
if offset+length > totalLength {
|
2017-11-25 14:58:29 -05:00
|
|
|
return f, errors.Trace(errUnexpected)
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
|
|
|
if !algorithm.Available() {
|
2017-11-25 14:58:29 -05:00
|
|
|
return f, errors.Trace(errBitrotHashAlgoInvalid)
|
2016-06-24 05:06:23 -04:00
|
|
|
}
|
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
f.Checksums = make([][]byte, len(s.disks))
|
|
|
|
verifiers := make([]*BitrotVerifier, len(s.disks))
|
|
|
|
for i, disk := range s.disks {
|
|
|
|
if disk == OfflineDisk {
|
2016-06-24 21:00:34 -04:00
|
|
|
continue
|
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
verifiers[i] = NewBitrotVerifier(algorithm, checksums[i])
|
2016-06-24 21:00:34 -04:00
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
errChans := make([]chan error, len(s.disks))
|
|
|
|
for i := range errChans {
|
|
|
|
errChans[i] = make(chan error, 1)
|
2016-06-24 21:00:34 -04:00
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
lastBlock := totalLength / blocksize
|
|
|
|
startOffset := offset % blocksize
|
|
|
|
chunksize := getChunkSize(blocksize, s.dataBlocks)
|
2016-06-24 21:00:34 -04:00
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
blocks := make([][]byte, len(s.disks))
|
2017-09-27 13:29:42 -04:00
|
|
|
for i := range blocks {
|
|
|
|
blocks[i] = make([]byte, chunksize)
|
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
for off := offset / blocksize; length > 0; off++ {
|
|
|
|
blockOffset := off * chunksize
|
|
|
|
|
|
|
|
if currentBlock := (offset + f.Size) / blocksize; currentBlock == lastBlock {
|
|
|
|
blocksize = totalLength % blocksize
|
|
|
|
chunksize = getChunkSize(blocksize, s.dataBlocks)
|
2017-09-27 13:29:42 -04:00
|
|
|
for i := range blocks {
|
|
|
|
blocks[i] = blocks[i][:chunksize]
|
|
|
|
}
|
2016-06-24 21:00:34 -04:00
|
|
|
}
|
2017-09-27 13:29:42 -04:00
|
|
|
err = s.readConcurrent(volume, path, blockOffset, blocks, verifiers, errChans)
|
2017-08-14 21:08:42 -04:00
|
|
|
if err != nil {
|
2017-11-25 14:58:29 -05:00
|
|
|
return f, errors.Trace(errXLReadQuorum)
|
2016-06-24 21:00:34 -04:00
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
|
|
|
|
writeLength := blocksize - startOffset
|
|
|
|
if length < writeLength {
|
|
|
|
writeLength = length
|
2016-06-24 21:00:34 -04:00
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
n, err := writeDataBlocks(writer, blocks, s.dataBlocks, startOffset, writeLength)
|
|
|
|
if err != nil {
|
|
|
|
return f, err
|
|
|
|
}
|
|
|
|
startOffset = 0
|
2017-08-18 14:45:16 -04:00
|
|
|
f.Size += n
|
|
|
|
length -= n
|
2016-06-24 21:00:34 -04:00
|
|
|
}
|
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
f.Algorithm = algorithm
|
|
|
|
for i, disk := range s.disks {
|
|
|
|
if disk == OfflineDisk {
|
2016-06-27 16:24:55 -04:00
|
|
|
continue
|
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
f.Checksums[i] = verifiers[i].Sum(nil)
|
2016-06-27 16:24:55 -04:00
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
return f, nil
|
2016-06-27 16:24:55 -04:00
|
|
|
}
|
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
func erasureCountMissingBlocks(blocks [][]byte, limit int) int {
|
|
|
|
missing := 0
|
|
|
|
for i := range blocks[:limit] {
|
2017-09-27 13:29:42 -04:00
|
|
|
if len(blocks[i]) == 0 {
|
2017-08-14 21:08:42 -04:00
|
|
|
missing++
|
2016-06-21 17:34:11 -04:00
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
|
|
|
return missing
|
|
|
|
}
|
2016-06-21 00:40:10 -04:00
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
// readConcurrent reads all requested data concurrently from the disks into blocks. It returns an error if
|
|
|
|
// too many disks failed while reading.
|
2017-09-27 13:29:42 -04:00
|
|
|
func (s *ErasureStorage) readConcurrent(volume, path string, offset int64, blocks [][]byte, verifiers []*BitrotVerifier, errChans []chan error) (err error) {
|
2017-08-14 21:08:42 -04:00
|
|
|
errs := make([]error, len(s.disks))
|
2016-05-29 18:38:14 -04:00
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
erasureReadBlocksConcurrent(s.disks[:s.dataBlocks], volume, path, offset, blocks[:s.dataBlocks], verifiers[:s.dataBlocks], errs[:s.dataBlocks], errChans[:s.dataBlocks])
|
|
|
|
missingDataBlocks := erasureCountMissingBlocks(blocks, s.dataBlocks)
|
|
|
|
mustReconstruct := missingDataBlocks > 0
|
|
|
|
if mustReconstruct {
|
|
|
|
requiredReads := s.dataBlocks + missingDataBlocks
|
|
|
|
if requiredReads > s.dataBlocks+s.parityBlocks {
|
|
|
|
return errXLReadQuorum
|
2016-06-19 16:35:26 -04:00
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
erasureReadBlocksConcurrent(s.disks[s.dataBlocks:requiredReads], volume, path, offset, blocks[s.dataBlocks:requiredReads], verifiers[s.dataBlocks:requiredReads], errs[s.dataBlocks:requiredReads], errChans[s.dataBlocks:requiredReads])
|
|
|
|
if erasureCountMissingBlocks(blocks, requiredReads) > 0 {
|
|
|
|
erasureReadBlocksConcurrent(s.disks[requiredReads:], volume, path, offset, blocks[requiredReads:], verifiers[requiredReads:], errs[requiredReads:], errChans[requiredReads:])
|
2016-06-19 16:35:26 -04:00
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
|
|
|
if err = reduceReadQuorumErrs(errs, []error{}, s.dataBlocks); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if mustReconstruct {
|
|
|
|
if err = s.ErasureDecodeDataBlocks(blocks); err != nil {
|
|
|
|
return err
|
2016-07-20 04:30:30 -04:00
|
|
|
}
|
2016-05-31 23:23:31 -04:00
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
return nil
|
2016-03-28 00:52:38 -04:00
|
|
|
}
|
2016-06-02 04:49:46 -04:00
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
// erasureReadBlocksConcurrent reads all data from each disk to each data block in parallel.
|
|
|
|
// Therefore disks, blocks, verifiers errors and locks must have the same length.
|
|
|
|
func erasureReadBlocksConcurrent(disks []StorageAPI, volume, path string, offset int64, blocks [][]byte, verifiers []*BitrotVerifier, errors []error, errChans []chan error) {
|
|
|
|
for i := range errChans {
|
|
|
|
go erasureReadFromFile(disks[i], volume, path, offset, blocks[i], verifiers[i], errChans[i])
|
|
|
|
}
|
|
|
|
for i := range errChans {
|
|
|
|
errors[i] = <-errChans[i] // blocks until the go routine 'i' is done - no data race
|
|
|
|
if errors[i] != nil {
|
|
|
|
disks[i] = OfflineDisk
|
2017-09-27 13:29:42 -04:00
|
|
|
blocks[i] = blocks[i][:0] // mark shard as missing
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
2016-06-02 04:49:46 -04:00
|
|
|
}
|
2017-08-11 21:24:48 -04:00
|
|
|
}
|
2016-06-24 05:06:23 -04:00
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
// erasureReadFromFile reads data from the disk to buffer in parallel.
|
|
|
|
// It sends the returned error through the error channel.
|
|
|
|
func erasureReadFromFile(disk StorageAPI, volume, path string, offset int64, buffer []byte, verifier *BitrotVerifier, errChan chan<- error) {
|
|
|
|
if disk == OfflineDisk {
|
2017-11-25 14:58:29 -05:00
|
|
|
errChan <- errors.Trace(errDiskNotFound)
|
2017-08-14 21:08:42 -04:00
|
|
|
return
|
2016-06-02 04:49:46 -04:00
|
|
|
}
|
2017-09-25 14:32:56 -04:00
|
|
|
_, err := disk.ReadFile(volume, path, offset, buffer, verifier)
|
2017-08-14 21:08:42 -04:00
|
|
|
errChan <- err
|
2016-06-02 04:49:46 -04:00
|
|
|
}
|