mirror of
https://github.com/minio/minio.git
synced 2025-11-07 21:02:58 -05:00
Remove erasureSets and erasureObjects from ObjectLayer (#10442)
This commit is contained in:
@@ -488,11 +488,6 @@ func (s *erasureSets) StorageInfo(ctx context.Context, local bool) (StorageInfo,
|
||||
return storageInfo, errs
|
||||
}
|
||||
|
||||
func (s *erasureSets) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error {
|
||||
// Use the zone-level implementation instead.
|
||||
return NotImplemented{API: "CrawlAndGetDataUsage"}
|
||||
}
|
||||
|
||||
// Shutdown shutsdown all erasure coded sets in parallel
|
||||
// returns error upon first error.
|
||||
func (s *erasureSets) Shutdown(ctx context.Context) error {
|
||||
@@ -510,7 +505,14 @@ func (s *erasureSets) Shutdown(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case _, ok := <-s.disksConnectEvent:
|
||||
if ok {
|
||||
close(s.disksConnectEvent)
|
||||
}
|
||||
default:
|
||||
close(s.disksConnectEvent)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -589,11 +591,6 @@ func (s *erasureSets) GetBucketInfo(ctx context.Context, bucket string) (bucketI
|
||||
return s.getHashedSet("").GetBucketInfo(ctx, bucket)
|
||||
}
|
||||
|
||||
// ListObjectsV2 lists all objects in bucket filtered by prefix
|
||||
func (s *erasureSets) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
|
||||
return result, NotImplemented{}
|
||||
}
|
||||
|
||||
// IsNotificationSupported returns whether bucket notification is applicable for this layer.
|
||||
func (s *erasureSets) IsNotificationSupported() bool {
|
||||
return s.getHashedSet("").IsNotificationSupported()
|
||||
@@ -1038,22 +1035,6 @@ func (s *erasureSets) startMergeWalksN(ctx context.Context, bucket, prefix, mark
|
||||
return entryChs
|
||||
}
|
||||
|
||||
// ListObjectVersions - implements listing of objects across disks, each disk is indepenently
|
||||
// walked and merged at this layer. Resulting value through the merge process sends
|
||||
// the data in lexically sorted order.
|
||||
func (s *erasureSets) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionIDMarker, delimiter string, maxKeys int) (loi ListObjectVersionsInfo, err error) {
|
||||
// Shouldn't be called directly, caller Zones already has an implementation
|
||||
return loi, NotImplemented{}
|
||||
}
|
||||
|
||||
// ListObjects - implements listing of objects across disks, each disk is indepenently
|
||||
// walked and merged at this layer. Resulting value through the merge process sends
|
||||
// the data in lexically sorted order.
|
||||
func (s *erasureSets) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) {
|
||||
// Shouldn't be called directly, caller Zones already has an implementation
|
||||
return loi, NotImplemented{}
|
||||
}
|
||||
|
||||
func (s *erasureSets) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) {
|
||||
// In list multipart uploads we are going to treat input prefix as the object,
|
||||
// this means that we are not supporting directory navigation.
|
||||
@@ -1621,18 +1602,6 @@ func (s *erasureSets) GetObjectTags(ctx context.Context, bucket, object string,
|
||||
return s.getHashedSet(object).GetObjectTags(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
// GetMetrics - no op
|
||||
func (s *erasureSets) GetMetrics(ctx context.Context) (*Metrics, error) {
|
||||
logger.LogIf(ctx, NotImplemented{})
|
||||
return &Metrics{}, NotImplemented{}
|
||||
}
|
||||
|
||||
// Health shouldn't be called directly - will panic
|
||||
func (s *erasureSets) Health(ctx context.Context, _ HealthOptions) HealthResult {
|
||||
logger.CriticalIf(ctx, NotImplemented{})
|
||||
return HealthResult{}
|
||||
}
|
||||
|
||||
// maintainMRFList gathers the list of successful partial uploads
|
||||
// from all underlying er.sets and puts them in a global map which
|
||||
// should not have more than 10000 entries.
|
||||
|
||||
Reference in New Issue
Block a user