Disable crawler in FS/NAS gateway mode (#9695)

No one really uses FS for large scale accounting
usage, neither we crawl in NAS gateway mode. It is
worthwhile to simply disable this feature as its
not useful for anyone.

Bonus disable bucket quota ops as well in, FS
and gateway mode
This commit is contained in:
Harshavardhana
2020-05-25 00:17:52 -07:00
committed by GitHub
parent 301de169e9
commit eba423bb9d
10 changed files with 29 additions and 77 deletions

View File

@@ -19,7 +19,6 @@ package cmd
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
@@ -31,7 +30,6 @@ import (
"strings"
"sync"
"sync/atomic"
"time"
jsoniter "github.com/json-iterator/go"
"github.com/minio/minio-go/v6/pkg/s3utils"
@@ -39,7 +37,6 @@ import (
"github.com/minio/minio/cmd/config"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/color"
"github.com/minio/minio/pkg/lock"
"github.com/minio/minio/pkg/madmin"
"github.com/minio/minio/pkg/mimedb"
@@ -229,61 +226,9 @@ func (fs *FSObjects) StorageInfo(ctx context.Context, _ bool) StorageInfo {
return storageInfo
}
func (fs *FSObjects) waitForLowActiveIO() {
for atomic.LoadInt64(&fs.activeIOCount) >= fs.maxActiveIOCount {
time.Sleep(lowActiveIOWaitTick)
}
}
// CrawlAndGetDataUsage returns data usage stats of the current FS deployment
func (fs *FSObjects) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error {
// Load bucket totals
var oldCache dataUsageCache
err := oldCache.load(ctx, fs, dataUsageCacheName)
if err != nil {
return err
}
if oldCache.Info.Name == "" {
oldCache.Info.Name = dataUsageRoot
}
buckets, err := fs.ListBuckets(ctx)
if err != nil {
return err
}
oldCache.Info.BloomFilter = nil
if bf != nil {
oldCache.Info.BloomFilter = bf.bytes()
}
if false && intDataUpdateTracker.debug {
b, _ := json.MarshalIndent(bf, "", " ")
logger.Info("Bloom filter: %v", string(b))
}
cache, err := updateUsage(ctx, fs.fsPath, oldCache, fs.waitForLowActiveIO, func(item Item) (int64, error) {
// Get file size, symlinks which cannot be
// followed are automatically filtered by fastwalk.
fi, err := os.Stat(item.Path)
if err != nil {
return 0, errSkipFile
}
return fi.Size(), nil
})
cache.Info.BloomFilter = nil
// Even if there was an error, the new cache may have better info.
if cache.Info.LastUpdate.After(oldCache.Info.LastUpdate) {
if intDataUpdateTracker.debug {
logger.Info(color.Green("CrawlAndGetDataUsage:")+" Saving cache with %d entries", len(cache.Cache))
}
logger.LogIf(ctx, cache.save(ctx, fs, dataUsageCacheName))
updates <- cache.dui(dataUsageRoot, buckets)
} else {
if intDataUpdateTracker.debug {
logger.Info(color.Green("CrawlAndGetDataUsage:")+" Cache not updated, %d entries", len(cache.Cache))
}
}
return err
return NotImplemented{}
}
/// Bucket operations