mirror of https://github.com/minio/minio.git
add healing for invalid shards by skipping the blocks (#13978)
Built on top of #13945, now we need to simply skip the shards and its automated.
This commit is contained in:
parent
9ad6012782
commit
7e3a7d7044
|
@ -25,7 +25,7 @@ node_modules/
|
|||
mc.*
|
||||
s3-check-md5*
|
||||
xl-meta*
|
||||
healing-bin*
|
||||
healing-*
|
||||
inspect*
|
||||
200M*
|
||||
hash-set
|
||||
|
|
|
@ -80,8 +80,6 @@ function start_minio_16drive() {
|
|||
## - 1st data shard.
|
||||
rm -rf "${WORK_DIR}/xl3/healing-shard-bucket/unaligned"
|
||||
sleep 10
|
||||
## Heal the shard
|
||||
"${WORK_DIR}/mc" admin heal --quiet --recursive minio/healing-shard-bucket
|
||||
|
||||
go build ./docs/debugging/s3-check-md5/
|
||||
if ! ./s3-check-md5 \
|
||||
|
@ -120,7 +118,27 @@ function start_minio_16drive() {
|
|||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
mkdir inspects
|
||||
mkdir -p inspects
|
||||
(cd inspects; "${WORK_DIR}/mc" admin inspect minio/healing-shard-bucket/unaligned/**)
|
||||
|
||||
"${WORK_DIR}/mc" mb play/inspects
|
||||
"${WORK_DIR}/mc" mirror inspects play/inspects
|
||||
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
"${WORK_DIR}/mc" admin heal --quiet --recursive minio/healing-shard-bucket
|
||||
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
mkdir -p inspects
|
||||
(cd inspects; "${WORK_DIR}/mc" admin inspect minio/healing-shard-bucket/unaligned/**)
|
||||
|
||||
"${WORK_DIR}/mc" mb play/inspects
|
||||
|
|
|
@ -225,10 +225,11 @@ func getLatestFileInfo(ctx context.Context, partsMetadata []FileInfo, errs []err
|
|||
// - slice of errors about the state of data files on disk - can have
|
||||
// a not-found error or a hash-mismatch error.
|
||||
func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetadata []FileInfo,
|
||||
errs []error, latestMeta FileInfo,
|
||||
bucket, object string, scanMode madmin.HealScanMode) ([]StorageAPI, []error) {
|
||||
errs []error, latestMeta FileInfo, bucket, object string,
|
||||
scanMode madmin.HealScanMode) ([]StorageAPI, []error, time.Time) {
|
||||
|
||||
var diskMTime time.Time
|
||||
var shardFix bool
|
||||
if !latestMeta.DataShardFixed() {
|
||||
diskMTime = pickValidDiskTimeWithQuorum(partsMetadata,
|
||||
latestMeta.Erasure.DataBlocks)
|
||||
|
@ -283,6 +284,8 @@ func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetad
|
|||
|
||||
if erasureDistributionReliable {
|
||||
if !meta.IsValid() {
|
||||
partsMetadata[i] = FileInfo{}
|
||||
dataErrs[i] = errFileCorrupt
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -305,6 +308,23 @@ func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetad
|
|||
}
|
||||
}
|
||||
|
||||
if !diskMTime.Equal(timeSentinel) && !diskMTime.IsZero() {
|
||||
if !partsMetadata[i].AcceptableDelta(diskMTime, shardDiskTimeDelta) {
|
||||
// not with in acceptable delta, skip.
|
||||
// If disk mTime mismatches it is considered outdated
|
||||
// https://github.com/minio/minio/pull/13803
|
||||
//
|
||||
// This check only is active if we could find maximally
|
||||
// occurring disk mtimes that are somewhat same across
|
||||
// the quorum. Allowing to skip those shards which we
|
||||
// might think are wrong.
|
||||
shardFix = true
|
||||
partsMetadata[i] = FileInfo{}
|
||||
dataErrs[i] = errFileCorrupt
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Always check data, if we got it.
|
||||
if (len(meta.Data) > 0 || meta.Size == 0) && len(meta.Parts) > 0 {
|
||||
checksumInfo := meta.Erasure.GetChecksumInfo(meta.Parts[0].Number)
|
||||
|
@ -338,14 +358,6 @@ func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetad
|
|||
}
|
||||
}
|
||||
|
||||
if !diskMTime.Equal(timeSentinel) && !diskMTime.IsZero() {
|
||||
if !partsMetadata[i].AcceptableDelta(diskMTime, shardDiskTimeDelta) {
|
||||
// not with in acceptable delta, skip.
|
||||
partsMetadata[i] = FileInfo{}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if dataErrs[i] == nil {
|
||||
// All parts verified, mark it as all data available.
|
||||
availableDisks[i] = onlineDisk
|
||||
|
@ -355,5 +367,10 @@ func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetad
|
|||
}
|
||||
}
|
||||
|
||||
return availableDisks, dataErrs
|
||||
if shardFix {
|
||||
// Only when shard is fixed return an appropriate disk mtime value.
|
||||
return availableDisks, dataErrs, diskMTime
|
||||
} // else return timeSentinel for disk mtime
|
||||
|
||||
return availableDisks, dataErrs, timeSentinel
|
||||
}
|
||||
|
|
|
@ -251,7 +251,8 @@ func TestListOnlineDisks(t *testing.T) {
|
|||
t.Fatalf("Expected modTime to be equal to %v but was found to be %v",
|
||||
test.expectedTime, modTime)
|
||||
}
|
||||
availableDisks, newErrs := disksWithAllParts(ctx, onlineDisks, partsMetadata, test.errs, fi, bucket, object, madmin.HealDeepScan)
|
||||
availableDisks, newErrs, _ := disksWithAllParts(ctx, onlineDisks, partsMetadata,
|
||||
test.errs, fi, bucket, object, madmin.HealDeepScan)
|
||||
test.errs = newErrs
|
||||
|
||||
if test._tamperBackend != noTamper {
|
||||
|
@ -433,7 +434,8 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
|
|||
test.expectedTime, modTime)
|
||||
}
|
||||
|
||||
availableDisks, newErrs := disksWithAllParts(ctx, onlineDisks, partsMetadata, test.errs, fi, bucket, object, madmin.HealDeepScan)
|
||||
availableDisks, newErrs, _ := disksWithAllParts(ctx, onlineDisks, partsMetadata,
|
||||
test.errs, fi, bucket, object, madmin.HealDeepScan)
|
||||
test.errs = newErrs
|
||||
|
||||
if test._tamperBackend != noTamper {
|
||||
|
@ -494,7 +496,8 @@ func TestDisksWithAllParts(t *testing.T) {
|
|||
|
||||
erasureDisks, _ = listOnlineDisks(erasureDisks, partsMetadata, errs)
|
||||
|
||||
filteredDisks, errs := disksWithAllParts(ctx, erasureDisks, partsMetadata, errs, fi, bucket, object, madmin.HealDeepScan)
|
||||
filteredDisks, errs, _ := disksWithAllParts(ctx, erasureDisks, partsMetadata,
|
||||
errs, fi, bucket, object, madmin.HealDeepScan)
|
||||
|
||||
if len(filteredDisks) != len(erasureDisks) {
|
||||
t.Errorf("Unexpected number of disks: %d", len(filteredDisks))
|
||||
|
@ -515,7 +518,8 @@ func TestDisksWithAllParts(t *testing.T) {
|
|||
partsMetadata[0].ModTime = partsMetadata[0].ModTime.Add(-1 * time.Hour)
|
||||
|
||||
errs = make([]error, len(erasureDisks))
|
||||
filteredDisks, _ = disksWithAllParts(ctx, erasureDisks, partsMetadata, errs, fi, bucket, object, madmin.HealDeepScan)
|
||||
filteredDisks, _, _ = disksWithAllParts(ctx, erasureDisks, partsMetadata,
|
||||
errs, fi, bucket, object, madmin.HealDeepScan)
|
||||
|
||||
if len(filteredDisks) != len(erasureDisks) {
|
||||
t.Errorf("Unexpected number of disks: %d", len(filteredDisks))
|
||||
|
@ -535,7 +539,8 @@ func TestDisksWithAllParts(t *testing.T) {
|
|||
partsMetadata[1].DataDir = "foo-random"
|
||||
|
||||
errs = make([]error, len(erasureDisks))
|
||||
filteredDisks, _ = disksWithAllParts(ctx, erasureDisks, partsMetadata, errs, fi, bucket, object, madmin.HealDeepScan)
|
||||
filteredDisks, _, _ = disksWithAllParts(ctx, erasureDisks, partsMetadata,
|
||||
errs, fi, bucket, object, madmin.HealDeepScan)
|
||||
|
||||
if len(filteredDisks) != len(erasureDisks) {
|
||||
t.Errorf("Unexpected number of disks: %d", len(filteredDisks))
|
||||
|
@ -571,7 +576,8 @@ func TestDisksWithAllParts(t *testing.T) {
|
|||
}
|
||||
|
||||
errs = make([]error, len(erasureDisks))
|
||||
filteredDisks, errs = disksWithAllParts(ctx, erasureDisks, partsMetadata, errs, fi, bucket, object, madmin.HealDeepScan)
|
||||
filteredDisks, errs, _ = disksWithAllParts(ctx, erasureDisks, partsMetadata,
|
||||
errs, fi, bucket, object, madmin.HealDeepScan)
|
||||
|
||||
if len(filteredDisks) != len(erasureDisks) {
|
||||
t.Errorf("Unexpected number of disks: %d", len(filteredDisks))
|
||||
|
|
|
@ -331,7 +331,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
|||
// used here for reconstruction. This is done to ensure that
|
||||
// we do not skip drives that have inconsistent metadata to be
|
||||
// skipped from purging when they are stale.
|
||||
availableDisks, dataErrs := disksWithAllParts(ctx, onlineDisks, partsMetadata,
|
||||
availableDisks, dataErrs, diskMTime := disksWithAllParts(ctx, onlineDisks, partsMetadata,
|
||||
errs, latestMeta, bucket, object, scanMode)
|
||||
|
||||
// Loop to find number of disks with valid data, per-drive
|
||||
|
@ -581,6 +581,20 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
|||
}
|
||||
}
|
||||
|
||||
if !diskMTime.Equal(timeSentinel) && !diskMTime.IsZero() {
|
||||
// Update metadata to indicate special fix.
|
||||
_, err = er.PutObjectMetadata(ctx, bucket, object, ObjectOptions{
|
||||
NoLock: true,
|
||||
UserDefined: map[string]string{
|
||||
reservedMetadataPrefixLowerDataShardFix: "true",
|
||||
// another reserved metadata to capture original disk-mtime
|
||||
// captured for this version of the object, to be used
|
||||
// possibly in future to heal other versions if possible.
|
||||
ReservedMetadataPrefixLower + "disk-mtime": diskMTime.String(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Set the size of the object in the heal result
|
||||
result.ObjectSize = latestMeta.Size
|
||||
|
||||
|
|
|
@ -125,10 +125,6 @@ func main() {
|
|||
log.Println("Objects encrypted with SSE-C do not have md5sum as ETag:", object.Key)
|
||||
continue
|
||||
}
|
||||
if _, ok := object.UserMetadata["X-Amz-Server-Side-Encryption-Customer-Algorithm"]; ok {
|
||||
log.Println("Objects encrypted with SSE-C do not have md5sum as ETag:", object.Key)
|
||||
continue
|
||||
}
|
||||
if v, ok := object.UserMetadata["X-Amz-Server-Side-Encryption"]; ok && v == "aws:kms" {
|
||||
log.Println("Objects encrypted with SSE-KMS do not have md5sum as ETag:", object.Key)
|
||||
continue
|
||||
|
|
6
go.mod
6
go.mod
|
@ -49,10 +49,10 @@ require (
|
|||
github.com/minio/csvparser v1.0.0
|
||||
github.com/minio/highwayhash v1.0.2
|
||||
github.com/minio/kes v0.14.0
|
||||
github.com/minio/madmin-go v1.1.20
|
||||
github.com/minio/minio-go/v7 v7.0.17
|
||||
github.com/minio/madmin-go v1.1.21
|
||||
github.com/minio/minio-go/v7 v7.0.19
|
||||
github.com/minio/parquet-go v1.1.0
|
||||
github.com/minio/pkg v1.1.9
|
||||
github.com/minio/pkg v1.1.10
|
||||
github.com/minio/selfupdate v0.4.0
|
||||
github.com/minio/sha256-simd v1.0.0
|
||||
github.com/minio/simdjson-go v0.2.1
|
||||
|
|
12
go.sum
12
go.sum
|
@ -1088,8 +1088,9 @@ github.com/minio/kes v0.14.0/go.mod h1:OUensXz2BpgMfiogslKxv7Anyx/wj+6bFC6qA7BQc
|
|||
github.com/minio/madmin-go v1.0.12/go.mod h1:BK+z4XRx7Y1v8SFWXsuLNqQqnq5BO/axJ8IDJfgyvfs=
|
||||
github.com/minio/madmin-go v1.1.15/go.mod h1:Iu0OnrMWNBYx1lqJTW+BFjBMx0Hi0wjw8VmqhiOs2Jo=
|
||||
github.com/minio/madmin-go v1.1.17/go.mod h1:Iu0OnrMWNBYx1lqJTW+BFjBMx0Hi0wjw8VmqhiOs2Jo=
|
||||
github.com/minio/madmin-go v1.1.20 h1:jig4gJi0CD+FYz+Cnd+TNo0oqhNaZcLmfUqNl5b46Eo=
|
||||
github.com/minio/madmin-go v1.1.20/go.mod h1:Iu0OnrMWNBYx1lqJTW+BFjBMx0Hi0wjw8VmqhiOs2Jo=
|
||||
github.com/minio/madmin-go v1.1.19/go.mod h1:Iu0OnrMWNBYx1lqJTW+BFjBMx0Hi0wjw8VmqhiOs2Jo=
|
||||
github.com/minio/madmin-go v1.1.21 h1:RzWjnFP/UzMf3BTCfL38z6hoi7TWT+kqW917nKOmh5o=
|
||||
github.com/minio/madmin-go v1.1.21/go.mod h1:vIDiJEjYOG27M/CgZPmxBdgdn3Yz5SIwtXtMzGAsqsA=
|
||||
github.com/minio/mc v0.0.0-20211207230606-23a05f5a17f2 h1:xocb1RGyrDJ8PxkNn0NSbaBlfdU6J/Ag9QK62pb7nR8=
|
||||
github.com/minio/mc v0.0.0-20211207230606-23a05f5a17f2/go.mod h1:siI9jWTzj1KsNXgz6NOL/S7OTaAUM0OMi+zEkF08gnA=
|
||||
github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw=
|
||||
|
@ -1098,8 +1099,10 @@ github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEp
|
|||
github.com/minio/minio-go/v7 v7.0.10/go.mod h1:td4gW1ldOsj1PbSNS+WYK43j+P1XVhX/8W8awaYlBFo=
|
||||
github.com/minio/minio-go/v7 v7.0.11-0.20210302210017-6ae69c73ce78/go.mod h1:mTh2uJuAbEqdhMVl6CMIIZLUeiMiWtJR4JB8/5g2skw=
|
||||
github.com/minio/minio-go/v7 v7.0.16-0.20211108161804-a7a36ee131df/go.mod h1:pUV0Pc+hPd1nccgmzQF/EXh48l/Z/yps6QPF1aaie4g=
|
||||
github.com/minio/minio-go/v7 v7.0.17 h1:5SiS3pqiQDbNhmXMxtqn2HzAInbN5cbHT7ip9F0F07E=
|
||||
github.com/minio/minio-go/v7 v7.0.17/go.mod h1:SyQ1IFeJuaa+eV5yEDxW7hYE1s5VVq5sgImDe27R+zg=
|
||||
github.com/minio/minio-go/v7 v7.0.18/go.mod h1:SyQ1IFeJuaa+eV5yEDxW7hYE1s5VVq5sgImDe27R+zg=
|
||||
github.com/minio/minio-go/v7 v7.0.19 h1:7igdH+/zj3DO3VDr3RBUXfbCnkauKWk/tIw3IA9P1GE=
|
||||
github.com/minio/minio-go/v7 v7.0.19/go.mod h1:SyQ1IFeJuaa+eV5yEDxW7hYE1s5VVq5sgImDe27R+zg=
|
||||
github.com/minio/operator v0.0.0-20211011212245-31460bbbc4b7 h1:dkfuMNslMjGoJ4ArAMSoQhidYNdm3SgzLBP+f96O3/E=
|
||||
github.com/minio/operator v0.0.0-20211011212245-31460bbbc4b7/go.mod h1:lDpuz8nwsfhKlfiBaA3Z8AW019fWEAjO2gltfLbdorE=
|
||||
github.com/minio/operator/logsearchapi v0.0.0-20211011212245-31460bbbc4b7 h1:vFtQqCt67ETp0JAkOKRWTKkgwFv14Vc1jJSxmQ8wJE0=
|
||||
|
@ -1110,8 +1113,9 @@ github.com/minio/pkg v1.0.3/go.mod h1:obU54TZ9QlMv0TRaDgQ/JTzf11ZSXxnSfLrm4tMtBP
|
|||
github.com/minio/pkg v1.0.4/go.mod h1:obU54TZ9QlMv0TRaDgQ/JTzf11ZSXxnSfLrm4tMtBP8=
|
||||
github.com/minio/pkg v1.0.11/go.mod h1:32x/3OmGB0EOi1N+3ggnp+B5VFkSBBB9svPMVfpnf14=
|
||||
github.com/minio/pkg v1.1.3/go.mod h1:32x/3OmGB0EOi1N+3ggnp+B5VFkSBBB9svPMVfpnf14=
|
||||
github.com/minio/pkg v1.1.9 h1:NJrcrQyFCSgyF+u6v7FbPXjjNV0oSnBuBevhsTKmA2U=
|
||||
github.com/minio/pkg v1.1.9/go.mod h1:32x/3OmGB0EOi1N+3ggnp+B5VFkSBBB9svPMVfpnf14=
|
||||
github.com/minio/pkg v1.1.10 h1:EZvPb8XsTQaafg7EfZVWu/CkNRAf38dtuWsfrOmDqG8=
|
||||
github.com/minio/pkg v1.1.10/go.mod h1:3I8LLp1/HDhNKl35I8ve0mhzp7+bvVTrJmLqkdkOHME=
|
||||
github.com/minio/selfupdate v0.3.1/go.mod h1:b8ThJzzH7u2MkF6PcIra7KaXO9Khf6alWPvMSyTDCFM=
|
||||
github.com/minio/selfupdate v0.4.0 h1:A7t07pN4Ch1tBTIRStW0KhUVyykz+2muCqFsITQeEW8=
|
||||
github.com/minio/selfupdate v0.4.0/go.mod h1:mcDkzMgq8PRcpCRJo/NlPY7U45O5dfYl2Y0Rg7IustY=
|
||||
|
|
Loading…
Reference in New Issue