2016-08-17 14:36:33 -04:00
|
|
|
/*
|
|
|
|
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-08-17 14:36:33 -04:00
|
|
|
|
2017-09-20 12:50:27 -04:00
|
|
|
import (
|
2018-04-05 18:04:40 -04:00
|
|
|
"context"
|
2017-09-28 18:57:19 -04:00
|
|
|
"fmt"
|
2017-09-20 12:50:27 -04:00
|
|
|
"hash"
|
2017-09-28 18:57:19 -04:00
|
|
|
"strings"
|
2017-11-25 14:58:29 -05:00
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
2017-09-20 12:50:27 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
// HealFile tries to reconstruct an erasure-coded file spread over all
|
|
|
|
// available disks. HealFile will read the valid parts of the file,
|
|
|
|
// reconstruct the missing data and write the reconstructed parts back
|
2017-09-28 18:57:19 -04:00
|
|
|
// to `staleDisks` at the destination `dstVol/dstPath/`. Parts are
|
|
|
|
// verified against the given BitrotAlgorithm and checksums.
|
2017-09-20 12:50:27 -04:00
|
|
|
//
|
|
|
|
// `staleDisks` is a slice of disks where each non-nil entry has stale
|
|
|
|
// or no data, and so will be healed.
|
|
|
|
//
|
|
|
|
// It is required that `s.disks` have a (read-quorum) majority of
|
|
|
|
// disks with valid data for healing to work.
|
|
|
|
//
|
|
|
|
// In addition, `staleDisks` and `s.disks` must have the same ordering
|
|
|
|
// of disks w.r.t. erasure coding of the object.
|
|
|
|
//
|
2017-09-28 18:57:19 -04:00
|
|
|
// Errors when writing to `staleDisks` are not propagated as long as
|
|
|
|
// writes succeed for at least one disk. This allows partial healing
|
|
|
|
// despite stale disks being faulty.
|
2017-09-20 12:50:27 -04:00
|
|
|
//
|
2017-09-28 18:57:19 -04:00
|
|
|
// It returns bitrot checksums for the non-nil staleDisks on which
|
|
|
|
// healing succeeded.
|
2018-04-05 18:04:40 -04:00
|
|
|
func (s ErasureStorage) HealFile(ctx context.Context, staleDisks []StorageAPI, volume, path string, blocksize int64,
|
2017-09-28 18:57:19 -04:00
|
|
|
dstVol, dstPath string, size int64, alg BitrotAlgorithm, checksums [][]byte) (
|
|
|
|
f ErasureFileInfo, err error) {
|
2016-08-17 14:36:33 -04:00
|
|
|
|
2017-09-28 18:57:19 -04:00
|
|
|
if !alg.Available() {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, errBitrotHashAlgoInvalid)
|
|
|
|
return f, errBitrotHashAlgoInvalid
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
2017-09-20 12:50:27 -04:00
|
|
|
|
|
|
|
// Initialization
|
2017-08-14 21:08:42 -04:00
|
|
|
f.Checksums = make([][]byte, len(s.disks))
|
2017-09-20 12:50:27 -04:00
|
|
|
hashers := make([]hash.Hash, len(s.disks))
|
|
|
|
verifiers := make([]*BitrotVerifier, len(s.disks))
|
2017-08-14 21:08:42 -04:00
|
|
|
for i, disk := range s.disks {
|
2017-09-20 12:50:27 -04:00
|
|
|
switch {
|
|
|
|
case staleDisks[i] != nil:
|
2017-09-28 18:57:19 -04:00
|
|
|
hashers[i] = alg.New()
|
2017-09-20 12:50:27 -04:00
|
|
|
case disk == nil:
|
|
|
|
// disregard unavailable disk
|
|
|
|
continue
|
|
|
|
default:
|
2017-09-28 18:57:19 -04:00
|
|
|
verifiers[i] = NewBitrotVerifier(alg, checksums[i])
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
2017-09-28 18:57:19 -04:00
|
|
|
writeErrors := make([]error, len(s.disks))
|
2017-09-20 12:50:27 -04:00
|
|
|
|
2018-03-04 17:16:45 -05:00
|
|
|
// Read part file data on each disk
|
|
|
|
chunksize := ceilFrac(blocksize, int64(s.dataBlocks))
|
|
|
|
numBlocks := ceilFrac(size, blocksize)
|
|
|
|
|
|
|
|
readLen := chunksize * (numBlocks - 1)
|
|
|
|
|
|
|
|
lastChunkSize := chunksize
|
|
|
|
hasSmallerLastBlock := size%blocksize != 0
|
|
|
|
if hasSmallerLastBlock {
|
|
|
|
lastBlockLen := size % blocksize
|
|
|
|
lastChunkSize = ceilFrac(lastBlockLen, int64(s.dataBlocks))
|
|
|
|
}
|
|
|
|
readLen += lastChunkSize
|
|
|
|
var buffers [][]byte
|
2018-04-05 18:04:40 -04:00
|
|
|
buffers, _, err = s.readConcurrent(ctx, volume, path, 0, readLen, verifiers)
|
2018-03-04 17:16:45 -05:00
|
|
|
if err != nil {
|
|
|
|
return f, err
|
|
|
|
}
|
|
|
|
|
2017-09-20 12:50:27 -04:00
|
|
|
// Scan part files on disk, block-by-block reconstruct it and
|
|
|
|
// write to stale disks.
|
2017-09-27 13:29:42 -04:00
|
|
|
blocks := make([][]byte, len(s.disks))
|
2018-03-04 17:16:45 -05:00
|
|
|
|
|
|
|
if numBlocks > 1 {
|
|
|
|
// Allocate once for all the equal length blocks. The
|
|
|
|
// last block may have a different length - allocation
|
|
|
|
// for this happens inside the for loop below.
|
|
|
|
for i := range blocks {
|
|
|
|
if len(buffers[i]) == 0 {
|
|
|
|
blocks[i] = make([]byte, chunksize)
|
2017-09-27 13:29:42 -04:00
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
2018-03-04 17:16:45 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
var buffOffset int64
|
|
|
|
for blockNumber := int64(0); blockNumber < numBlocks; blockNumber++ {
|
|
|
|
if blockNumber == numBlocks-1 && lastChunkSize != chunksize {
|
|
|
|
for i := range blocks {
|
|
|
|
if len(buffers[i]) == 0 {
|
|
|
|
blocks[i] = make([]byte, lastChunkSize)
|
2017-09-27 13:29:42 -04:00
|
|
|
}
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
|
|
|
}
|
2017-09-20 12:50:27 -04:00
|
|
|
|
2018-03-04 17:16:45 -05:00
|
|
|
for i := range blocks {
|
|
|
|
if len(buffers[i]) == 0 {
|
|
|
|
blocks[i] = blocks[i][0:0]
|
|
|
|
}
|
|
|
|
}
|
2017-09-20 12:50:27 -04:00
|
|
|
|
2018-03-04 17:16:45 -05:00
|
|
|
csize := chunksize
|
|
|
|
if blockNumber == numBlocks-1 {
|
|
|
|
csize = lastChunkSize
|
|
|
|
}
|
|
|
|
for i := range blocks {
|
|
|
|
if len(buffers[i]) != 0 {
|
|
|
|
blocks[i] = buffers[i][buffOffset : buffOffset+csize]
|
2017-12-22 17:57:57 -05:00
|
|
|
}
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
2018-03-04 17:16:45 -05:00
|
|
|
buffOffset += csize
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
if err = s.ErasureDecodeDataAndParityBlocks(ctx, blocks); err != nil {
|
2018-03-04 17:16:45 -05:00
|
|
|
return f, err
|
|
|
|
}
|
2017-09-20 12:50:27 -04:00
|
|
|
|
|
|
|
// write computed shards as chunks on file in each
|
|
|
|
// stale disk
|
2017-09-28 18:57:19 -04:00
|
|
|
writeSucceeded := false
|
2017-09-20 12:50:27 -04:00
|
|
|
for i, disk := range staleDisks {
|
2017-09-28 18:57:19 -04:00
|
|
|
// skip nil disk or disk that had error on
|
|
|
|
// previous write
|
|
|
|
if disk == nil || writeErrors[i] != nil {
|
2016-08-17 14:36:33 -04:00
|
|
|
continue
|
|
|
|
}
|
2017-09-20 12:50:27 -04:00
|
|
|
|
2017-09-28 18:57:19 -04:00
|
|
|
writeErrors[i] = disk.AppendFile(dstVol, dstPath, blocks[i])
|
|
|
|
if writeErrors[i] == nil {
|
|
|
|
hashers[i].Write(blocks[i])
|
|
|
|
writeSucceeded = true
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
2017-09-28 18:57:19 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// If all disks had write errors we quit.
|
|
|
|
if !writeSucceeded {
|
|
|
|
// build error from all write errors
|
2018-04-05 18:04:40 -04:00
|
|
|
err := joinWriteErrors(writeErrors)
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return f, err
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
|
|
|
}
|
2017-09-20 12:50:27 -04:00
|
|
|
|
|
|
|
// copy computed file hashes into output variable
|
2017-08-14 21:08:42 -04:00
|
|
|
f.Size = size
|
2017-09-28 18:57:19 -04:00
|
|
|
f.Algorithm = alg
|
2017-09-20 12:50:27 -04:00
|
|
|
for i, disk := range staleDisks {
|
2017-09-28 18:57:19 -04:00
|
|
|
if disk == nil || writeErrors[i] != nil {
|
2016-08-17 14:36:33 -04:00
|
|
|
continue
|
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
f.Checksums[i] = hashers[i].Sum(nil)
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
return f, nil
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
2017-09-28 18:57:19 -04:00
|
|
|
|
|
|
|
func joinWriteErrors(errs []error) error {
|
|
|
|
msgs := []string{}
|
|
|
|
for i, err := range errs {
|
|
|
|
if err == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
msgs = append(msgs, fmt.Sprintf("disk %d: %v", i+1, err))
|
|
|
|
}
|
|
|
|
return fmt.Errorf("all stale disks had write errors during healing: %s",
|
|
|
|
strings.Join(msgs, ", "))
|
|
|
|
}
|