mirror of
https://github.com/minio/minio.git
synced 2025-04-19 18:17:30 -04:00
fix: add more protection distribution to match EcIndex (#10772)
allows for more stricter validation in picking up the right set of disks for reconstruction.
This commit is contained in:
parent
858e2a43df
commit
5b30bbda92
@ -19,7 +19,6 @@ package cmd
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
@ -194,7 +193,7 @@ func migrateIAMConfigsEtcdToEncrypted(ctx context.Context, client *etcd.Client)
|
|||||||
// Config is already encrypted with right keys
|
// Config is already encrypted with right keys
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return errors.New("config data not in plain-text form or encrypted")
|
return fmt.Errorf("Decrypting config failed %w, possibly credentials are incorrect", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cencdata, err = madmin.EncryptData(globalActiveCred.String(), data)
|
cencdata, err = madmin.EncryptData(globalActiveCred.String(), data)
|
||||||
@ -274,7 +273,7 @@ func migrateConfigPrefixToEncrypted(objAPI ObjectLayer, activeCredOld auth.Crede
|
|||||||
// Config is already encrypted with right keys
|
// Config is already encrypted with right keys
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return errors.New("config data not in plain-text form or encrypted")
|
return fmt.Errorf("Decrypting config failed %w, possibly credentials are incorrect", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cencdata, err = madmin.EncryptData(globalActiveCred.String(), data)
|
cencdata, err = madmin.EncryptData(globalActiveCred.String(), data)
|
||||||
|
@ -153,28 +153,54 @@ func readVersionFromDisks(ctx context.Context, disks []StorageAPI, bucket, objec
|
|||||||
return metadataArray, g.Wait()
|
return metadataArray, g.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return disks ordered by the meta.Erasure.Index information.
|
func shuffleDisksAndPartsMetadataByIndex(disks []StorageAPI, metaArr []FileInfo, distribution []int) (shuffledDisks []StorageAPI, shuffledPartsMetadata []FileInfo) {
|
||||||
func shuffleDisksByIndex(disks []StorageAPI, metaArr []FileInfo) (shuffledDisks []StorageAPI) {
|
|
||||||
shuffledDisks = make([]StorageAPI, len(disks))
|
shuffledDisks = make([]StorageAPI, len(disks))
|
||||||
|
shuffledPartsMetadata = make([]FileInfo, len(disks))
|
||||||
|
var inconsistent int
|
||||||
for i, meta := range metaArr {
|
for i, meta := range metaArr {
|
||||||
if disks[i] == nil {
|
if disks[i] == nil {
|
||||||
|
// Assuming offline drives as inconsistent,
|
||||||
|
// to be safe and fallback to original
|
||||||
|
// distribution order.
|
||||||
|
inconsistent++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// check if erasure distribution order matches the index
|
||||||
|
// position if this is not correct we discard the disk
|
||||||
|
// and move to collect others
|
||||||
|
if distribution[i] != meta.Erasure.Index {
|
||||||
|
inconsistent++ // keep track of inconsistent entries
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
shuffledDisks[meta.Erasure.Index-1] = disks[i]
|
shuffledDisks[meta.Erasure.Index-1] = disks[i]
|
||||||
}
|
shuffledPartsMetadata[meta.Erasure.Index-1] = metaArr[i]
|
||||||
return shuffledDisks
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return FileInfo slice ordered by the meta.Erasure.Index information.
|
// Inconsistent meta info is with in the limit of
|
||||||
func shufflePartsMetadataByIndex(disks []StorageAPI, metaArr []FileInfo) []FileInfo {
|
// expected quorum, proceed with EcIndex based
|
||||||
newMetaArr := make([]FileInfo, len(disks))
|
// disk order.
|
||||||
for i, meta := range metaArr {
|
if inconsistent < len(disks)/2 {
|
||||||
if disks[i] == nil {
|
return shuffledDisks, shuffledPartsMetadata
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
newMetaArr[meta.Erasure.Index-1] = metaArr[i]
|
|
||||||
|
// fall back to original distribution based order.
|
||||||
|
return shuffleDisksAndPartsMetadata(disks, metaArr, distribution)
|
||||||
}
|
}
|
||||||
return newMetaArr
|
|
||||||
|
// Return shuffled partsMetadata depending on distribution.
|
||||||
|
func shuffleDisksAndPartsMetadata(disks []StorageAPI, partsMetadata []FileInfo, distribution []int) (shuffledDisks []StorageAPI, shuffledPartsMetadata []FileInfo) {
|
||||||
|
if distribution == nil {
|
||||||
|
return disks, partsMetadata
|
||||||
|
}
|
||||||
|
shuffledDisks = make([]StorageAPI, len(disks))
|
||||||
|
shuffledPartsMetadata = make([]FileInfo, len(partsMetadata))
|
||||||
|
// Shuffle slice xl metadata for expected distribution.
|
||||||
|
for index := range partsMetadata {
|
||||||
|
blockIndex := distribution[index]
|
||||||
|
shuffledPartsMetadata[blockIndex-1] = partsMetadata[index]
|
||||||
|
shuffledDisks[blockIndex-1] = disks[index]
|
||||||
|
}
|
||||||
|
return shuffledDisks, shuffledPartsMetadata
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return shuffled partsMetadata depending on distribution.
|
// Return shuffled partsMetadata depending on distribution.
|
||||||
|
@ -214,13 +214,15 @@ func (fi FileInfo) ObjectToPartOffset(ctx context.Context, offset int64) (partIn
|
|||||||
|
|
||||||
func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, quorum int) (xmv FileInfo, e error) {
|
func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, quorum int) (xmv FileInfo, e error) {
|
||||||
metaHashes := make([]string, len(metaArr))
|
metaHashes := make([]string, len(metaArr))
|
||||||
|
h := sha256.New()
|
||||||
for i, meta := range metaArr {
|
for i, meta := range metaArr {
|
||||||
if meta.IsValid() && meta.ModTime.Equal(modTime) {
|
if meta.IsValid() && meta.ModTime.Equal(modTime) {
|
||||||
h := sha256.New()
|
|
||||||
for _, part := range meta.Parts {
|
for _, part := range meta.Parts {
|
||||||
h.Write([]byte(fmt.Sprintf("part.%d", part.Number)))
|
h.Write([]byte(fmt.Sprintf("part.%d", part.Number)))
|
||||||
}
|
}
|
||||||
|
h.Write([]byte(fmt.Sprintf("%v", meta.Erasure.Distribution)))
|
||||||
metaHashes[i] = hex.EncodeToString(h.Sum(nil))
|
metaHashes[i] = hex.EncodeToString(h.Sum(nil))
|
||||||
|
h.Reset()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"strconv"
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
humanize "github.com/dustin/go-humanize"
|
humanize "github.com/dustin/go-humanize"
|
||||||
)
|
)
|
||||||
@ -154,3 +155,48 @@ func TestObjectToPartOffset(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFindFileInfoInQuorum(t *testing.T) {
|
||||||
|
getNFInfo := func(n int, quorum int, t int64) []FileInfo {
|
||||||
|
fi := newFileInfo("test", 8, 8)
|
||||||
|
fi.AddObjectPart(1, "etag", 100, 100)
|
||||||
|
fi.ModTime = time.Unix(t, 0)
|
||||||
|
fis := make([]FileInfo, n)
|
||||||
|
for i := range fis {
|
||||||
|
fis[i] = fi
|
||||||
|
fis[i].Erasure.Index = i + 1
|
||||||
|
quorum--
|
||||||
|
if quorum == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fis
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
fis []FileInfo
|
||||||
|
modTime time.Time
|
||||||
|
expectedErr error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
fis: getNFInfo(16, 16, 1603863445),
|
||||||
|
modTime: time.Unix(1603863445, 0),
|
||||||
|
expectedErr: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fis: getNFInfo(16, 7, 1603863445),
|
||||||
|
modTime: time.Unix(1603863445, 0),
|
||||||
|
expectedErr: errErasureReadQuorum,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
test := test
|
||||||
|
t.Run("", func(t *testing.T) {
|
||||||
|
_, err := findFileInfoInQuorum(context.Background(), test.fis, test.modTime, 8)
|
||||||
|
if err != test.expectedErr {
|
||||||
|
t.Errorf("Expected %s, got %s", test.expectedErr, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -302,6 +302,8 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
|
|||||||
partsMetadata[i] = fi
|
partsMetadata[i] = fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadata(onlineDisks, partsMetadata, fi.Erasure.Distribution)
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
// Write updated `xl.meta` to all disks.
|
// Write updated `xl.meta` to all disks.
|
||||||
onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempUploadIDPath, partsMetadata, writeQuorum)
|
onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempUploadIDPath, partsMetadata, writeQuorum)
|
||||||
@ -743,10 +745,8 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Order online disks in accordance with distribution order.
|
// Order online disks in accordance with distribution order.
|
||||||
onlineDisks = shuffleDisks(onlineDisks, fi.Erasure.Distribution)
|
|
||||||
|
|
||||||
// Order parts metadata in accordance with distribution order.
|
// Order parts metadata in accordance with distribution order.
|
||||||
partsMetadata = shufflePartsMetadata(partsMetadata, fi.Erasure.Distribution)
|
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadataByIndex(onlineDisks, partsMetadata, fi.Erasure.Distribution)
|
||||||
|
|
||||||
// Save current erasure metadata for validation.
|
// Save current erasure metadata for validation.
|
||||||
var currentFI = fi
|
var currentFI = fi
|
||||||
|
@ -46,6 +46,7 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
|
|||||||
if !srcInfo.metadataOnly {
|
if !srcInfo.metadataOnly {
|
||||||
return oi, NotImplemented{}
|
return oi, NotImplemented{}
|
||||||
}
|
}
|
||||||
|
|
||||||
defer ObjectPathUpdated(path.Join(dstBucket, dstObject))
|
defer ObjectPathUpdated(path.Join(dstBucket, dstObject))
|
||||||
lk := er.NewNSLock(ctx, dstBucket, dstObject)
|
lk := er.NewNSLock(ctx, dstBucket, dstObject)
|
||||||
if err := lk.GetLock(globalOperationTimeout); err != nil {
|
if err := lk.GetLock(globalOperationTimeout); err != nil {
|
||||||
@ -72,6 +73,8 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
|
|||||||
return oi, toObjectErr(err, srcBucket, srcObject)
|
return oi, toObjectErr(err, srcBucket, srcObject)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi.Erasure.Distribution)
|
||||||
|
|
||||||
if fi.Deleted {
|
if fi.Deleted {
|
||||||
if srcOpts.VersionID == "" {
|
if srcOpts.VersionID == "" {
|
||||||
return oi, toObjectErr(errFileNotFound, srcBucket, srcObject)
|
return oi, toObjectErr(errFileNotFound, srcBucket, srcObject)
|
||||||
@ -215,12 +218,10 @@ func (er erasureObjects) GetObject(ctx context.Context, bucket, object string, s
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions, fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI) error {
|
func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions, fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI) error {
|
||||||
tmpDisks := onlineDisks
|
|
||||||
// Reorder online disks based on erasure distribution order.
|
|
||||||
onlineDisks = shuffleDisksByIndex(tmpDisks, metaArr)
|
|
||||||
|
|
||||||
|
// Reorder online disks based on erasure distribution order.
|
||||||
// Reorder parts metadata based on erasure distribution order.
|
// Reorder parts metadata based on erasure distribution order.
|
||||||
metaArr = shufflePartsMetadataByIndex(tmpDisks, metaArr)
|
onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi.Erasure.Distribution)
|
||||||
|
|
||||||
// For negative length read everything.
|
// For negative length read everything.
|
||||||
if length < 0 {
|
if length < 0 {
|
||||||
@ -375,7 +376,6 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fi, nil, nil, err
|
return fi, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return fi, metaArr, onlineDisks, nil
|
return fi, metaArr, onlineDisks, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -584,7 +584,8 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Order disks according to erasure distribution
|
// Order disks according to erasure distribution
|
||||||
onlineDisks := shuffleDisks(storageDisks, fi.Erasure.Distribution)
|
var onlineDisks []StorageAPI
|
||||||
|
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadata(storageDisks, partsMetadata, fi.Erasure.Distribution)
|
||||||
|
|
||||||
erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
|
erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -973,7 +974,7 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin
|
|||||||
}
|
}
|
||||||
|
|
||||||
// List all online disks.
|
// List all online disks.
|
||||||
_, modTime := listOnlineDisks(disks, metaArr, errs)
|
onlineDisks, modTime := listOnlineDisks(disks, metaArr, errs)
|
||||||
|
|
||||||
// Pick latest valid metadata.
|
// Pick latest valid metadata.
|
||||||
fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
|
fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
|
||||||
@ -981,6 +982,8 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin
|
|||||||
return toObjectErr(err, bucket, object)
|
return toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi.Erasure.Distribution)
|
||||||
|
|
||||||
if fi.Deleted {
|
if fi.Deleted {
|
||||||
if opts.VersionID == "" {
|
if opts.VersionID == "" {
|
||||||
return toObjectErr(errFileNotFound, bucket, object)
|
return toObjectErr(errFileNotFound, bucket, object)
|
||||||
@ -1006,12 +1009,12 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin
|
|||||||
tempObj := mustGetUUID()
|
tempObj := mustGetUUID()
|
||||||
|
|
||||||
// Write unique `xl.meta` for each disk.
|
// Write unique `xl.meta` for each disk.
|
||||||
if disks, err = writeUniqueFileInfo(ctx, disks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil {
|
if onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil {
|
||||||
return toObjectErr(err, bucket, object)
|
return toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Atomically rename metadata from tmp location to destination for each disk.
|
// Atomically rename metadata from tmp location to destination for each disk.
|
||||||
if _, err = renameFileInfo(ctx, disks, minioMetaTmpBucket, tempObj, bucket, object, writeQuorum); err != nil {
|
if _, err = renameFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, bucket, object, writeQuorum); err != nil {
|
||||||
return toObjectErr(err, bucket, object)
|
return toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -799,6 +799,7 @@ func (s *erasureSets) CopyObject(ctx context.Context, srcBucket, srcObject, dstB
|
|||||||
|
|
||||||
// Check if this request is only metadata update.
|
// Check if this request is only metadata update.
|
||||||
if cpSrcDstSame && srcInfo.metadataOnly {
|
if cpSrcDstSame && srcInfo.metadataOnly {
|
||||||
|
|
||||||
// Version ID is set for the destination and source == destination version ID.
|
// Version ID is set for the destination and source == destination version ID.
|
||||||
// perform an in-place update.
|
// perform an in-place update.
|
||||||
if dstOpts.VersionID != "" && srcOpts.VersionID == dstOpts.VersionID {
|
if dstOpts.VersionID != "" && srcOpts.VersionID == dstOpts.VersionID {
|
||||||
|
@ -2179,7 +2179,8 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
apiRouter.ServeHTTP(rec, req)
|
apiRouter.ServeHTTP(rec, req)
|
||||||
// Assert the response code with the expected status.
|
// Assert the response code with the expected status.
|
||||||
if rec.Code != testCase.expectedRespStatus {
|
if rec.Code != testCase.expectedRespStatus {
|
||||||
t.Fatalf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
|
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
if rec.Code == http.StatusOK {
|
if rec.Code == http.StatusOK {
|
||||||
var cpObjResp CopyObjectResponse
|
var cpObjResp CopyObjectResponse
|
||||||
|
@ -194,6 +194,14 @@ func newAllSubsystems() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func initServer(ctx context.Context, newObject ObjectLayer) error {
|
func initServer(ctx context.Context, newObject ObjectLayer) error {
|
||||||
|
// Once the config is fully loaded, initialize the new object layer.
|
||||||
|
globalObjLayerMutex.Lock()
|
||||||
|
globalObjectAPI = newObject
|
||||||
|
globalObjLayerMutex.Unlock()
|
||||||
|
|
||||||
|
// Initialize IAM store
|
||||||
|
globalIAMSys.InitStore(newObject)
|
||||||
|
|
||||||
// Create cancel context to control 'newRetryTimer' go routine.
|
// Create cancel context to control 'newRetryTimer' go routine.
|
||||||
retryCtx, cancel := context.WithCancel(ctx)
|
retryCtx, cancel := context.WithCancel(ctx)
|
||||||
|
|
||||||
@ -330,14 +338,6 @@ func initAllSubsystems(ctx context.Context, newObject ObjectLayer) (err error) {
|
|||||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize config, some features may be missing %w", err))
|
logger.LogIf(ctx, fmt.Errorf("Unable to initialize config, some features may be missing %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Once the config is fully loaded, initialize the new object layer.
|
|
||||||
globalObjLayerMutex.Lock()
|
|
||||||
globalObjectAPI = newObject
|
|
||||||
globalObjLayerMutex.Unlock()
|
|
||||||
|
|
||||||
// Initialize IAM store
|
|
||||||
globalIAMSys.InitStore(newObject)
|
|
||||||
|
|
||||||
// Populate existing buckets to the etcd backend
|
// Populate existing buckets to the etcd backend
|
||||||
if globalDNSConfig != nil {
|
if globalDNSConfig != nil {
|
||||||
// Background this operation.
|
// Background this operation.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user