mirror of
https://github.com/minio/minio.git
synced 2025-04-28 21:58:01 -04:00
offline drives more than 50% of total drives return error (#17252)
This commit is contained in:
parent
7875efbf61
commit
ef54200db7
@ -39,6 +39,7 @@ import (
|
|||||||
"github.com/minio/minio/internal/logger"
|
"github.com/minio/minio/internal/logger"
|
||||||
"github.com/minio/pkg/mimedb"
|
"github.com/minio/pkg/mimedb"
|
||||||
"github.com/minio/pkg/sync/errgroup"
|
"github.com/minio/pkg/sync/errgroup"
|
||||||
|
uatomic "go.uber.org/atomic"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (er erasureObjects) getUploadIDDir(bucket, object, uploadID string) string {
|
func (er erasureObjects) getUploadIDDir(bucket, object, uploadID string) string {
|
||||||
@ -371,29 +372,61 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
|
|||||||
userDefined["etag"] = opts.PreserveETag
|
userDefined["etag"] = opts.PreserveETag
|
||||||
}
|
}
|
||||||
onlineDisks := er.getDisks()
|
onlineDisks := er.getDisks()
|
||||||
|
|
||||||
|
// Get parity and data drive count based on storage class metadata
|
||||||
parityDrives := globalStorageClass.GetParityForSC(userDefined[xhttp.AmzStorageClass])
|
parityDrives := globalStorageClass.GetParityForSC(userDefined[xhttp.AmzStorageClass])
|
||||||
if parityDrives < 0 {
|
if parityDrives < 0 {
|
||||||
parityDrives = er.defaultParityCount
|
parityDrives = er.defaultParityCount
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If we have offline disks upgrade the number of erasure codes for this object.
|
||||||
parityOrig := parityDrives
|
parityOrig := parityDrives
|
||||||
|
|
||||||
|
atomicParityDrives := uatomic.NewInt64(0)
|
||||||
|
atomicOfflineDrives := uatomic.NewInt64(0)
|
||||||
|
|
||||||
|
// Start with current parityDrives
|
||||||
|
atomicParityDrives.Store(int64(parityDrives))
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
for _, disk := range onlineDisks {
|
for _, disk := range onlineDisks {
|
||||||
if parityDrives >= len(onlineDisks)/2 {
|
|
||||||
parityDrives = len(onlineDisks) / 2
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if disk == nil {
|
if disk == nil {
|
||||||
parityDrives++
|
atomicParityDrives.Inc()
|
||||||
|
atomicOfflineDrives.Inc()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
di, err := disk.DiskInfo(ctx)
|
if !disk.IsOnline() {
|
||||||
if err != nil || di.ID == "" {
|
atomicParityDrives.Inc()
|
||||||
parityDrives++
|
atomicOfflineDrives.Inc()
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
wg.Add(1)
|
||||||
|
go func(disk StorageAPI) {
|
||||||
|
defer wg.Done()
|
||||||
|
di, err := disk.DiskInfo(ctx)
|
||||||
|
if err != nil || di.ID == "" {
|
||||||
|
atomicOfflineDrives.Inc()
|
||||||
|
atomicParityDrives.Inc()
|
||||||
|
}
|
||||||
|
}(disk)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if int(atomicOfflineDrives.Load()) > len(onlineDisks)/2 {
|
||||||
|
// if offline drives are more than 50% of the drives
|
||||||
|
// we have no quorum, we shouldn't proceed just
|
||||||
|
// fail at that point.
|
||||||
|
return nil, toObjectErr(errErasureWriteQuorum, bucket, object)
|
||||||
|
}
|
||||||
|
|
||||||
|
parityDrives = int(atomicParityDrives.Load())
|
||||||
|
if parityDrives >= len(onlineDisks)/2 {
|
||||||
|
parityDrives = len(onlineDisks) / 2
|
||||||
}
|
}
|
||||||
if parityOrig != parityDrives {
|
if parityOrig != parityDrives {
|
||||||
userDefined[minIOErasureUpgraded] = strconv.Itoa(parityOrig) + "->" + strconv.Itoa(parityDrives)
|
userDefined[minIOErasureUpgraded] = strconv.Itoa(parityOrig) + "->" + strconv.Itoa(parityDrives)
|
||||||
}
|
}
|
||||||
|
|
||||||
dataDrives := len(onlineDisks) - parityDrives
|
dataDrives := len(onlineDisks) - parityDrives
|
||||||
|
|
||||||
// we now know the number of blocks this object needs for data and parity.
|
// we now know the number of blocks this object needs for data and parity.
|
||||||
|
@ -1001,6 +1001,8 @@ func healObjectVersionsDisparity(bucket string, entry metaCacheEntry) error {
|
|||||||
func (er erasureObjects) putObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
func (er erasureObjects) putObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
auditObjectErasureSet(ctx, object, &er)
|
auditObjectErasureSet(ctx, object, &er)
|
||||||
|
|
||||||
|
data := r.Reader
|
||||||
|
|
||||||
if opts.CheckPrecondFn != nil {
|
if opts.CheckPrecondFn != nil {
|
||||||
obj, err := er.getObjectInfo(ctx, bucket, object, opts)
|
obj, err := er.getObjectInfo(ctx, bucket, object, opts)
|
||||||
if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) {
|
if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) {
|
||||||
@ -1011,7 +1013,11 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
data := r.Reader
|
// Validate input data size and it can never be less than -1.
|
||||||
|
if data.Size() < -1 {
|
||||||
|
logger.LogIf(ctx, errInvalidArgument, logger.Application)
|
||||||
|
return ObjectInfo{}, toObjectErr(errInvalidArgument)
|
||||||
|
}
|
||||||
|
|
||||||
userDefined := cloneMSS(opts.UserDefined)
|
userDefined := cloneMSS(opts.UserDefined)
|
||||||
|
|
||||||
@ -1029,6 +1035,8 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||||||
parityOrig := parityDrives
|
parityOrig := parityDrives
|
||||||
|
|
||||||
atomicParityDrives := uatomic.NewInt64(0)
|
atomicParityDrives := uatomic.NewInt64(0)
|
||||||
|
atomicOfflineDrives := uatomic.NewInt64(0)
|
||||||
|
|
||||||
// Start with current parityDrives
|
// Start with current parityDrives
|
||||||
atomicParityDrives.Store(int64(parityDrives))
|
atomicParityDrives.Store(int64(parityDrives))
|
||||||
|
|
||||||
@ -1036,10 +1044,12 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||||||
for _, disk := range storageDisks {
|
for _, disk := range storageDisks {
|
||||||
if disk == nil {
|
if disk == nil {
|
||||||
atomicParityDrives.Inc()
|
atomicParityDrives.Inc()
|
||||||
|
atomicOfflineDrives.Inc()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !disk.IsOnline() {
|
if !disk.IsOnline() {
|
||||||
atomicParityDrives.Inc()
|
atomicParityDrives.Inc()
|
||||||
|
atomicOfflineDrives.Inc()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
@ -1047,12 +1057,20 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
di, err := disk.DiskInfo(ctx)
|
di, err := disk.DiskInfo(ctx)
|
||||||
if err != nil || di.ID == "" {
|
if err != nil || di.ID == "" {
|
||||||
|
atomicOfflineDrives.Inc()
|
||||||
atomicParityDrives.Inc()
|
atomicParityDrives.Inc()
|
||||||
}
|
}
|
||||||
}(disk)
|
}(disk)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
|
if int(atomicOfflineDrives.Load()) >= (len(storageDisks)+1)/2 {
|
||||||
|
// if offline drives are more than 50% of the drives
|
||||||
|
// we have no quorum, we shouldn't proceed just
|
||||||
|
// fail at that point.
|
||||||
|
return ObjectInfo{}, toObjectErr(errErasureWriteQuorum, bucket, object)
|
||||||
|
}
|
||||||
|
|
||||||
parityDrives = int(atomicParityDrives.Load())
|
parityDrives = int(atomicParityDrives.Load())
|
||||||
if parityDrives >= len(storageDisks)/2 {
|
if parityDrives >= len(storageDisks)/2 {
|
||||||
parityDrives = len(storageDisks) / 2
|
parityDrives = len(storageDisks) / 2
|
||||||
@ -1070,12 +1088,6 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||||||
writeQuorum++
|
writeQuorum++
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate input data size and it can never be less than zero.
|
|
||||||
if data.Size() < -1 {
|
|
||||||
logger.LogIf(ctx, errInvalidArgument, logger.Application)
|
|
||||||
return ObjectInfo{}, toObjectErr(errInvalidArgument)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize parts metadata
|
// Initialize parts metadata
|
||||||
partsMetadata := make([]FileInfo, len(storageDisks))
|
partsMetadata := make([]FileInfo, len(storageDisks))
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user