fix: re-use er.getDisks() properly in certain calls (#11043)

This commit is contained in:
Harshavardhana 2020-12-07 10:04:07 -08:00 committed by GitHub
parent 8d036ed6d8
commit ce93b2681b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 16 additions and 18 deletions

View File

@ -382,8 +382,10 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
return pi, toObjectErr(err, bucket, object, uploadID) return pi, toObjectErr(err, bucket, object, uploadID)
} }
storageDisks := er.getDisks()
// Read metadata associated with the object from all disks. // Read metadata associated with the object from all disks.
partsMetadata, errs = readAllFileInfo(ctx, er.getDisks(), minioMetaMultipartBucket, partsMetadata, errs = readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket,
uploadIDPath, "") uploadIDPath, "")
// get Quorum for this object // get Quorum for this object
@ -398,7 +400,7 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
} }
// List all online disks. // List all online disks.
onlineDisks, modTime := listOnlineDisks(er.getDisks(), partsMetadata, errs) onlineDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// Pick one from the first valid metadata. // Pick one from the first valid metadata.
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum) fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum)

View File

@ -608,7 +608,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
} }
// Initialize parts metadata // Initialize parts metadata
partsMetadata := make([]FileInfo, len(er.getDisks())) partsMetadata := make([]FileInfo, len(storageDisks))
fi := newFileInfo(object, dataDrives, parityDrives) fi := newFileInfo(object, dataDrives, parityDrives)
@ -767,19 +767,18 @@ func (er erasureObjects) deleteObjectVersion(ctx context.Context, bucket, object
// all the disks in parallel, including `xl.meta` associated with the // all the disks in parallel, including `xl.meta` associated with the
// object. // object.
func (er erasureObjects) deleteObject(ctx context.Context, bucket, object string, writeQuorum int) error { func (er erasureObjects) deleteObject(ctx context.Context, bucket, object string, writeQuorum int) error {
var disks []StorageAPI
var err error var err error
defer ObjectPathUpdated(pathJoin(bucket, object)) defer ObjectPathUpdated(pathJoin(bucket, object))
tmpObj := mustGetUUID() tmpObj := mustGetUUID()
disks := er.getDisks()
if bucket == minioMetaTmpBucket { if bucket == minioMetaTmpBucket {
tmpObj = object tmpObj = object
disks = er.getDisks()
} else { } else {
// Rename the current object while requiring write quorum, but also consider // Rename the current object while requiring write quorum, but also consider
// that a non found object in a given disk as a success since it already // that a non found object in a given disk as a success since it already
// confirms that the object doesn't have a part in that disk (already removed) // confirms that the object doesn't have a part in that disk (already removed)
disks, err = rename(ctx, er.getDisks(), bucket, object, minioMetaTmpBucket, tmpObj, true, writeQuorum, disks, err = rename(ctx, disks, bucket, object, minioMetaTmpBucket, tmpObj, true, writeQuorum,
[]error{errFileNotFound}) []error{errFileNotFound})
if err != nil { if err != nil {
return toObjectErr(err, bucket, object) return toObjectErr(err, bucket, object)
@ -787,7 +786,6 @@ func (er erasureObjects) deleteObject(ctx context.Context, bucket, object string
} }
g := errgroup.WithNErrs(len(disks)) g := errgroup.WithNErrs(len(disks))
for index := range disks { for index := range disks {
index := index index := index
g.Go(func() error { g.Go(func() error {

View File

@ -388,12 +388,13 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto
// Initialize erasure objects for a given set. // Initialize erasure objects for a given set.
s.sets[i] = &erasureObjects{ s.sets[i] = &erasureObjects{
getDisks: s.GetDisks(i), setDriveCount: setDriveCount,
getLockers: s.GetLockers(i), getDisks: s.GetDisks(i),
getEndpoints: s.GetEndpoints(i), getLockers: s.GetLockers(i),
nsMutex: mutex, getEndpoints: s.GetEndpoints(i),
bp: bp, nsMutex: mutex,
mrfOpCh: make(chan partialOperation, 10000), bp: bp,
mrfOpCh: make(chan partialOperation, 10000),
} }
go s.sets[i].cleanupStaleUploads(ctx, go s.sets[i].cleanupStaleUploads(ctx,

View File

@ -48,6 +48,8 @@ type partialOperation struct {
type erasureObjects struct { type erasureObjects struct {
GatewayUnsupported GatewayUnsupported
setDriveCount int
// getDisks returns list of storageAPIs. // getDisks returns list of storageAPIs.
getDisks func() []StorageAPI getDisks func() []StorageAPI
@ -72,11 +74,6 @@ func (er erasureObjects) NewNSLock(bucket string, objects ...string) RWLocker {
return er.nsMutex.NewNSLock(er.getLockers, bucket, objects...) return er.nsMutex.NewNSLock(er.getLockers, bucket, objects...)
} }
// SetDriveCount returns the current drives per set.
func (er erasureObjects) SetDriveCount() int {
return len(er.getDisks())
}
// Shutdown function for object storage interface. // Shutdown function for object storage interface.
func (er erasureObjects) Shutdown(ctx context.Context) error { func (er erasureObjects) Shutdown(ctx context.Context) error {
// Add any object layer shutdown activities here. // Add any object layer shutdown activities here.