mirror of
https://github.com/minio/minio.git
synced 2025-11-08 21:24:55 -05:00
avoid a crash in crawler when lifecycle is not initialized (#11170)
Bonus for static buffers use bytes.NewReader instead of bytes.NewBuffer, to use a more reader friendly implementation
This commit is contained in:
@@ -245,8 +245,8 @@ func (er erasureObjects) crawlAndGetDataUsage(ctx context.Context, buckets []Buc
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect disks we can use.
|
||||
disks := er.getOnlineDisks()
|
||||
// Collect disks we can use, sorted by least inode usage.
|
||||
disks := er.getOnlineDisksSortedByUsedInodes()
|
||||
if len(disks) == 0 {
|
||||
logger.Info(color.Green("data-crawl:") + " all disks are offline or being healed, skipping crawl")
|
||||
return nil
|
||||
@@ -312,7 +312,6 @@ func (er erasureObjects) crawlAndGetDataUsage(ctx context.Context, buckets []Buc
|
||||
defer saverWg.Done()
|
||||
var lastSave time.Time
|
||||
|
||||
saveLoop:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -327,17 +326,17 @@ func (er erasureObjects) crawlAndGetDataUsage(ctx context.Context, buckets []Buc
|
||||
lastSave = cache.Info.LastUpdate
|
||||
case v, ok := <-bucketResults:
|
||||
if !ok {
|
||||
break saveLoop
|
||||
// Save final state...
|
||||
cache.Info.NextCycle++
|
||||
cache.Info.LastUpdate = time.Now()
|
||||
logger.LogIf(ctx, cache.save(ctx, er, dataUsageCacheName))
|
||||
updates <- cache
|
||||
return
|
||||
}
|
||||
cache.replace(v.Name, v.Parent, v.Entry)
|
||||
cache.Info.LastUpdate = time.Now()
|
||||
}
|
||||
}
|
||||
// Save final state...
|
||||
cache.Info.NextCycle++
|
||||
cache.Info.LastUpdate = time.Now()
|
||||
logger.LogIf(ctx, cache.save(ctx, er, dataUsageCacheName))
|
||||
updates <- cache
|
||||
}()
|
||||
|
||||
// Start one crawler per disk
|
||||
|
||||
Reference in New Issue
Block a user