Disable crawler in FS/NAS gateway mode (#9695)

No one really uses FS for large scale accounting
usage, neither we crawl in NAS gateway mode. It is
worthwhile to simply disable this feature as its
not useful for anyone.

Bonus disable bucket quota ops as well in, FS
and gateway mode
This commit is contained in:
Harshavardhana 2020-05-25 00:17:52 -07:00 committed by GitHub
parent 301de169e9
commit eba423bb9d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 29 additions and 77 deletions

View File

@ -636,6 +636,7 @@ func (a adminAPIHandlers) AccountUsageInfoHandler(w http.ResponseWriter, r *http
// Load the latest calculated data usage
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
if err != nil {
// log the error, continue with the accounting response
logger.LogIf(ctx, err)
}

View File

@ -74,7 +74,7 @@ func prepareAdminXLTestBed(ctx context.Context) (*adminXLTestBed, error) {
// Setup admin mgmt REST API handlers.
adminRouter := mux.NewRouter()
registerAdminRouter(adminRouter, true, true, false)
registerAdminRouter(adminRouter, true, true)
return &adminXLTestBed{
xlDirs: xlDirs,

View File

@ -20,6 +20,8 @@ import (
"net/http"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/pkg/env"
"github.com/minio/minio/pkg/madmin"
)
@ -35,7 +37,7 @@ const (
type adminAPIHandlers struct{}
// registerAdminRouter - Add handler functions for each service REST API routes.
func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps, enableBucketQuotaOps bool) {
func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) {
adminAPI := adminAPIHandlers{}
// Admin router
@ -170,13 +172,15 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps, enab
}
// Quota operations
if enableConfigOps && enableBucketQuotaOps {
// GetBucketQuotaConfig
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
// PutBucketQuotaConfig
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
if globalIsXL || globalIsDistXL {
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
// GetBucketQuotaConfig
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
// PutBucketQuotaConfig
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
}
}
// -- Top APIs --

View File

@ -55,8 +55,12 @@ const (
// initDataUsageStats will start the crawler unless disabled.
func initDataUsageStats(ctx context.Context, objAPI ObjectLayer) {
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
go runDataUsageInfo(ctx, objAPI)
// data usage stats are only available erasure
// coded mode
if globalIsXL || globalIsDistXL {
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
go runDataUsageInfo(ctx, objAPI)
}
}
}

View File

@ -19,7 +19,6 @@ package cmd
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
@ -31,7 +30,6 @@ import (
"strings"
"sync"
"sync/atomic"
"time"
jsoniter "github.com/json-iterator/go"
"github.com/minio/minio-go/v6/pkg/s3utils"
@ -39,7 +37,6 @@ import (
"github.com/minio/minio/cmd/config"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/color"
"github.com/minio/minio/pkg/lock"
"github.com/minio/minio/pkg/madmin"
"github.com/minio/minio/pkg/mimedb"
@ -229,61 +226,9 @@ func (fs *FSObjects) StorageInfo(ctx context.Context, _ bool) StorageInfo {
return storageInfo
}
func (fs *FSObjects) waitForLowActiveIO() {
for atomic.LoadInt64(&fs.activeIOCount) >= fs.maxActiveIOCount {
time.Sleep(lowActiveIOWaitTick)
}
}
// CrawlAndGetDataUsage returns data usage stats of the current FS deployment
func (fs *FSObjects) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error {
// Load bucket totals
var oldCache dataUsageCache
err := oldCache.load(ctx, fs, dataUsageCacheName)
if err != nil {
return err
}
if oldCache.Info.Name == "" {
oldCache.Info.Name = dataUsageRoot
}
buckets, err := fs.ListBuckets(ctx)
if err != nil {
return err
}
oldCache.Info.BloomFilter = nil
if bf != nil {
oldCache.Info.BloomFilter = bf.bytes()
}
if false && intDataUpdateTracker.debug {
b, _ := json.MarshalIndent(bf, "", " ")
logger.Info("Bloom filter: %v", string(b))
}
cache, err := updateUsage(ctx, fs.fsPath, oldCache, fs.waitForLowActiveIO, func(item Item) (int64, error) {
// Get file size, symlinks which cannot be
// followed are automatically filtered by fastwalk.
fi, err := os.Stat(item.Path)
if err != nil {
return 0, errSkipFile
}
return fi.Size(), nil
})
cache.Info.BloomFilter = nil
// Even if there was an error, the new cache may have better info.
if cache.Info.LastUpdate.After(oldCache.Info.LastUpdate) {
if intDataUpdateTracker.debug {
logger.Info(color.Green("CrawlAndGetDataUsage:")+" Saving cache with %d entries", len(cache.Cache))
}
logger.LogIf(ctx, cache.save(ctx, fs, dataUsageCacheName))
updates <- cache.dui(dataUsageRoot, buckets)
} else {
if intDataUpdateTracker.debug {
logger.Info(color.Green("CrawlAndGetDataUsage:")+" Cache not updated, %d entries", len(cache.Cache))
}
}
return err
return NotImplemented{}
}
/// Bucket operations

View File

@ -180,11 +180,12 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
}
enableIAMOps := globalEtcdClient != nil
enableBucketQuotaOps := env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn
// Enable IAM admin APIs if etcd is enabled, if not just enable basic
// operations such as profiling, server info etc.
registerAdminRouter(router, enableConfigOps, enableIAMOps, enableBucketQuotaOps)
//
// quota opts are disabled in gateway mode.
registerAdminRouter(router, enableConfigOps, enableIAMOps)
// Add healthcheck router
registerHealthCheckRouter(router)

View File

@ -81,7 +81,7 @@ var globalHandlers = []HandlerFunc{
}
// configureServer handler returns final handler for the http server.
func configureServerHandler(endpointZones EndpointZones, enableBucketQuotaOps bool) (http.Handler, error) {
func configureServerHandler(endpointZones EndpointZones) (http.Handler, error) {
// Initialize router. `SkipClean(true)` stops gorilla/mux from
// normalizing URL path minio/minio#3256
router := mux.NewRouter().SkipClean(true).UseEncodedPath()
@ -95,7 +95,7 @@ func configureServerHandler(endpointZones EndpointZones, enableBucketQuotaOps bo
registerSTSRouter(router)
// Add Admin router, all APIs are enabled in server mode.
registerAdminRouter(router, true, true, enableBucketQuotaOps)
registerAdminRouter(router, true, true)
// Add healthcheck router
registerHealthCheckRouter(router)

View File

@ -24,7 +24,6 @@ import (
"io"
"log"
"net"
"net/http"
"os"
"os/signal"
"strings"
@ -441,9 +440,7 @@ func serverMain(ctx *cli.Context) {
}
// Configure server.
var handler http.Handler
enableBucketQuotaOps := env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn
handler, err = configureServerHandler(globalEndpoints, enableBucketQuotaOps)
handler, err := configureServerHandler(globalEndpoints)
if err != nil {
logger.Fatal(config.ErrUnexpectedError(err), "Unable to configure one of server's RPC services")
}

View File

@ -311,7 +311,7 @@ func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer {
testServer.AccessKey = credentials.AccessKey
testServer.SecretKey = credentials.SecretKey
httpHandler, err := configureServerHandler(testServer.Disks, false)
httpHandler, err := configureServerHandler(testServer.Disks)
if err != nil {
t.Fatalf("Failed to configure one of the RPC services <ERROR> %s", err)
}

View File

@ -257,7 +257,7 @@ This behavior is consistent across all keys, each key self documents itself with
## Environment only settings (not in config)
#### Usage crawler
Data usage crawler is enabled by default, following ENVs allow for more staggered delay in terms of usage calculation.
Data usage crawler is enabled by default on erasure coded and distributed erasure coded deployments.
The crawler adapts to the system speed and completely pauses when the system is under load. It is possible to adjust the speed of the crawler and thereby the latency of updates being reflected. The delays between each operation of the crawl can be adjusted by the `MINIO_DISK_USAGE_CRAWL_DELAY` environment variable. By default the value is `10`. This means the crawler will sleep *10x* the time each operation takes.