mirror of
https://github.com/minio/minio.git
synced 2024-12-23 21:55:53 -05:00
heal: Remove transitioned objects' parts from outdated disks (#13018)
Bonus: check equality for replication and other metadata
This commit is contained in:
parent
901d1314af
commit
db35bcf2ce
@ -24,7 +24,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
@ -204,7 +203,7 @@ func listAllBuckets(ctx context.Context, storageDisks []StorageAPI, healBuckets
|
||||
|
||||
// Only heal on disks where we are sure that healing is needed. We can expand
|
||||
// this list as and when we figure out more errors can be added to this list safely.
|
||||
func shouldHealObjectOnDisk(erErr, dataErr error, meta FileInfo, quorumModTime time.Time) bool {
|
||||
func shouldHealObjectOnDisk(erErr, dataErr error, meta FileInfo, latestMeta FileInfo) bool {
|
||||
switch {
|
||||
case errors.Is(erErr, errFileNotFound) || errors.Is(erErr, errFileVersionNotFound):
|
||||
return true
|
||||
@ -222,7 +221,16 @@ func shouldHealObjectOnDisk(erErr, dataErr error, meta FileInfo, quorumModTime t
|
||||
return true
|
||||
}
|
||||
}
|
||||
if !quorumModTime.Equal(meta.ModTime) {
|
||||
if !latestMeta.MetadataEquals(meta) {
|
||||
return true
|
||||
}
|
||||
if !latestMeta.TransitionInfoEquals(meta) {
|
||||
return true
|
||||
}
|
||||
if !latestMeta.ReplicationInfoEquals(meta) {
|
||||
return true
|
||||
}
|
||||
if !latestMeta.ModTime.Equal(meta.ModTime) {
|
||||
return true
|
||||
}
|
||||
if meta.XLV1 {
|
||||
@ -295,6 +303,13 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
availableDisks, dataErrs := disksWithAllParts(ctx, storageDisks, partsMetadata,
|
||||
errs, bucket, object, scanMode)
|
||||
|
||||
// Latest FileInfo for reference. If a valid metadata is not
|
||||
// present, it is as good as object not found.
|
||||
latestMeta, err := pickValidFileInfo(ctx, partsMetadata, modTime, dataDir, result.DataBlocks)
|
||||
if err != nil {
|
||||
return result, toObjectErr(err, bucket, object, versionID)
|
||||
}
|
||||
|
||||
// Loop to find number of disks with valid data, per-drive
|
||||
// data state and a list of outdated disks on which data needs
|
||||
// to be healed.
|
||||
@ -325,7 +340,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
driveState = madmin.DriveStateCorrupt
|
||||
}
|
||||
|
||||
if shouldHealObjectOnDisk(errs[i], dataErrs[i], partsMetadata[i], modTime) {
|
||||
if shouldHealObjectOnDisk(errs[i], dataErrs[i], partsMetadata[i], latestMeta) {
|
||||
outDatedDisks[i] = storageDisks[i]
|
||||
disksToHealCount++
|
||||
result.Before.Drives = append(result.Before.Drives, madmin.HealDriveInfo{
|
||||
@ -379,20 +394,12 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Latest FileInfo for reference. If a valid metadata is not
|
||||
// present, it is as good as object not found.
|
||||
latestMeta, err := pickValidFileInfo(ctx, partsMetadata, modTime, dataDir, result.DataBlocks)
|
||||
if err != nil {
|
||||
return result, toObjectErr(err, bucket, object, versionID)
|
||||
}
|
||||
|
||||
cleanFileInfo := func(fi FileInfo) FileInfo {
|
||||
// Returns a copy of the 'fi' with checksums and parts nil'ed.
|
||||
nfi := fi
|
||||
nfi.Erasure.Index = 0
|
||||
nfi.Erasure.Checksums = nil
|
||||
if fi.IsRemote() {
|
||||
nfi.Parts = nil
|
||||
if !fi.IsRemote() {
|
||||
nfi.Erasure.Index = 0
|
||||
nfi.Erasure.Checksums = nil
|
||||
}
|
||||
return nfi
|
||||
}
|
||||
@ -425,16 +432,16 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
inlineBuffers = make([]*bytes.Buffer, len(outDatedDisks))
|
||||
}
|
||||
|
||||
// Reorder so that we have data disks first and parity disks next.
|
||||
latestDisks := shuffleDisks(availableDisks, latestMeta.Erasure.Distribution)
|
||||
outDatedDisks = shuffleDisks(outDatedDisks, latestMeta.Erasure.Distribution)
|
||||
partsMetadata = shufflePartsMetadata(partsMetadata, latestMeta.Erasure.Distribution)
|
||||
copyPartsMetadata = shufflePartsMetadata(copyPartsMetadata, latestMeta.Erasure.Distribution)
|
||||
|
||||
if !latestMeta.Deleted && !latestMeta.IsRemote() {
|
||||
result.DataBlocks = latestMeta.Erasure.DataBlocks
|
||||
result.ParityBlocks = latestMeta.Erasure.ParityBlocks
|
||||
|
||||
// Reorder so that we have data disks first and parity disks next.
|
||||
latestDisks := shuffleDisks(availableDisks, latestMeta.Erasure.Distribution)
|
||||
outDatedDisks = shuffleDisks(outDatedDisks, latestMeta.Erasure.Distribution)
|
||||
partsMetadata = shufflePartsMetadata(partsMetadata, latestMeta.Erasure.Distribution)
|
||||
copyPartsMetadata = shufflePartsMetadata(copyPartsMetadata, latestMeta.Erasure.Distribution)
|
||||
|
||||
// Heal each part. erasureHealFile() will write the healed
|
||||
// part to .minio/tmp/uuid/ which needs to be renamed later to
|
||||
// the final location.
|
||||
@ -531,22 +538,21 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
if disk == OfflineDisk {
|
||||
continue
|
||||
}
|
||||
|
||||
// record the index of the updated disks
|
||||
partsMetadata[i].Erasure.Index = i + 1
|
||||
|
||||
// dataDir should be empty when
|
||||
// - transitionStatus is complete and not in restored state
|
||||
if partsMetadata[i].IsRemote() {
|
||||
partsMetadata[i].DataDir = ""
|
||||
}
|
||||
|
||||
// Attempt a rename now from healed data to final location.
|
||||
if err = disk.RenameData(ctx, minioMetaTmpBucket, tmpID, partsMetadata[i], bucket, object); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return result, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// Remove any remaining parts from outdated disks from before transition.
|
||||
if partsMetadata[i].IsRemote() {
|
||||
rmDataDir := partsMetadata[i].DataDir
|
||||
disk.DeleteVol(ctx, pathJoin(bucket, encodeDirObject(object), rmDataDir), true)
|
||||
}
|
||||
|
||||
for i, v := range result.Before.Drives {
|
||||
if v.Endpoint == disk.String() {
|
||||
result.After.Drives[i].State = madmin.DriveStateOk
|
||||
|
@ -207,6 +207,43 @@ func (fi FileInfo) ToObjectInfo(bucket, object string) ObjectInfo {
|
||||
return objInfo
|
||||
}
|
||||
|
||||
// TransitionInfoEquals returns true if transition related information are equal, false otherwise.
|
||||
func (fi FileInfo) TransitionInfoEquals(ofi FileInfo) bool {
|
||||
switch {
|
||||
case fi.TransitionStatus != ofi.TransitionStatus,
|
||||
fi.TransitionTier != ofi.TransitionTier,
|
||||
fi.TransitionedObjName != ofi.TransitionedObjName,
|
||||
fi.TransitionVersionID != ofi.TransitionVersionID:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// MetadataEquals returns true if FileInfos Metadata maps are equal, false otherwise.
|
||||
func (fi FileInfo) MetadataEquals(ofi FileInfo) bool {
|
||||
if len(fi.Metadata) != len(ofi.Metadata) {
|
||||
return false
|
||||
}
|
||||
for k, v := range fi.Metadata {
|
||||
if ov, ok := ofi.Metadata[k]; !ok || ov != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ReplicationInfoEquals returns true if server-side replication related fields are equal, false otherwise.
|
||||
func (fi FileInfo) ReplicationInfoEquals(ofi FileInfo) bool {
|
||||
switch {
|
||||
case fi.MarkDeleted != ofi.MarkDeleted,
|
||||
fi.DeleteMarkerReplicationStatus != ofi.DeleteMarkerReplicationStatus,
|
||||
fi.VersionPurgeStatus != ofi.VersionPurgeStatus,
|
||||
fi.Metadata[xhttp.AmzBucketReplicationStatus] != ofi.Metadata[xhttp.AmzBucketReplicationStatus]:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// objectPartIndex - returns the index of matching object part number.
|
||||
func objectPartIndex(parts []ObjectPartInfo, partNumber int) int {
|
||||
for i, part := range parts {
|
||||
|
@ -215,3 +215,62 @@ func TestFindFileInfoInQuorum(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransitionInfoEquals(t *testing.T) {
|
||||
inputs := []struct {
|
||||
tier string
|
||||
remoteObjName string
|
||||
remoteVersionID string
|
||||
status string
|
||||
}{
|
||||
{
|
||||
tier: "S3TIER-1",
|
||||
remoteObjName: mustGetUUID(),
|
||||
remoteVersionID: mustGetUUID(),
|
||||
status: "complete",
|
||||
},
|
||||
{
|
||||
tier: "S3TIER-2",
|
||||
remoteObjName: mustGetUUID(),
|
||||
remoteVersionID: mustGetUUID(),
|
||||
status: "complete",
|
||||
},
|
||||
}
|
||||
|
||||
var i uint
|
||||
for i = 0; i < 8; i++ {
|
||||
fi := FileInfo{
|
||||
TransitionTier: inputs[0].tier,
|
||||
TransitionedObjName: inputs[0].remoteObjName,
|
||||
TransitionVersionID: inputs[0].remoteVersionID,
|
||||
TransitionStatus: inputs[0].status,
|
||||
}
|
||||
ofi := fi
|
||||
if i&(1<<0) != 0 {
|
||||
ofi.TransitionTier = inputs[1].tier
|
||||
}
|
||||
if i&(1<<1) != 0 {
|
||||
ofi.TransitionedObjName = inputs[1].remoteObjName
|
||||
}
|
||||
if i&(1<<2) != 0 {
|
||||
ofi.TransitionVersionID = inputs[1].remoteVersionID
|
||||
}
|
||||
actual := fi.TransitionInfoEquals(ofi)
|
||||
if i == 0 && !actual {
|
||||
t.Fatalf("Test %d: Expected FileInfo's transition info to be equal: fi %v ofi %v", i, fi, ofi)
|
||||
}
|
||||
if i != 0 && actual {
|
||||
t.Fatalf("Test %d: Expected FileInfo's transition info to be inequal: fi %v ofi %v", i, fi, ofi)
|
||||
}
|
||||
}
|
||||
fi := FileInfo{
|
||||
TransitionTier: inputs[0].tier,
|
||||
TransitionedObjName: inputs[0].remoteObjName,
|
||||
TransitionVersionID: inputs[0].remoteVersionID,
|
||||
TransitionStatus: inputs[0].status,
|
||||
}
|
||||
ofi := FileInfo{}
|
||||
if fi.TransitionInfoEquals(ofi) {
|
||||
t.Fatalf("Expected to be inequal: fi %v ofi %v", fi, ofi)
|
||||
}
|
||||
}
|
||||
|
@ -1930,7 +1930,10 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
|
||||
|
||||
var srcDataPath string
|
||||
var dstDataPath string
|
||||
dataDir := retainSlash(fi.DataDir)
|
||||
var dataDir string
|
||||
if !fi.IsRemote() {
|
||||
dataDir = retainSlash(fi.DataDir)
|
||||
}
|
||||
if dataDir != "" {
|
||||
srcDataPath = retainSlash(pathJoin(srcVolumeDir, srcPath, dataDir))
|
||||
// make sure to always use path.Join here, do not use pathJoin as
|
||||
|
Loading…
Reference in New Issue
Block a user