mirror of
https://github.com/minio/minio.git
synced 2024-12-24 22:25:54 -05:00
Disable crawler in FS/NAS gateway mode (#9695)
No one really uses FS for large scale accounting usage, neither we crawl in NAS gateway mode. It is worthwhile to simply disable this feature as its not useful for anyone. Bonus disable bucket quota ops as well in, FS and gateway mode
This commit is contained in:
parent
301de169e9
commit
eba423bb9d
@ -636,6 +636,7 @@ func (a adminAPIHandlers) AccountUsageInfoHandler(w http.ResponseWriter, r *http
|
|||||||
// Load the latest calculated data usage
|
// Load the latest calculated data usage
|
||||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// log the error, continue with the accounting response
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -74,7 +74,7 @@ func prepareAdminXLTestBed(ctx context.Context) (*adminXLTestBed, error) {
|
|||||||
|
|
||||||
// Setup admin mgmt REST API handlers.
|
// Setup admin mgmt REST API handlers.
|
||||||
adminRouter := mux.NewRouter()
|
adminRouter := mux.NewRouter()
|
||||||
registerAdminRouter(adminRouter, true, true, false)
|
registerAdminRouter(adminRouter, true, true)
|
||||||
|
|
||||||
return &adminXLTestBed{
|
return &adminXLTestBed{
|
||||||
xlDirs: xlDirs,
|
xlDirs: xlDirs,
|
||||||
|
@ -20,6 +20,8 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
|
"github.com/minio/minio/cmd/config"
|
||||||
|
"github.com/minio/minio/pkg/env"
|
||||||
"github.com/minio/minio/pkg/madmin"
|
"github.com/minio/minio/pkg/madmin"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -35,7 +37,7 @@ const (
|
|||||||
type adminAPIHandlers struct{}
|
type adminAPIHandlers struct{}
|
||||||
|
|
||||||
// registerAdminRouter - Add handler functions for each service REST API routes.
|
// registerAdminRouter - Add handler functions for each service REST API routes.
|
||||||
func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps, enableBucketQuotaOps bool) {
|
func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) {
|
||||||
|
|
||||||
adminAPI := adminAPIHandlers{}
|
adminAPI := adminAPIHandlers{}
|
||||||
// Admin router
|
// Admin router
|
||||||
@ -170,7 +172,8 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps, enab
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Quota operations
|
// Quota operations
|
||||||
if enableConfigOps && enableBucketQuotaOps {
|
if globalIsXL || globalIsDistXL {
|
||||||
|
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
|
||||||
// GetBucketQuotaConfig
|
// GetBucketQuotaConfig
|
||||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
|
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
|
||||||
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||||
@ -178,6 +181,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps, enab
|
|||||||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
|
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
|
||||||
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// -- Top APIs --
|
// -- Top APIs --
|
||||||
// Top locks
|
// Top locks
|
||||||
|
@ -55,10 +55,14 @@ const (
|
|||||||
|
|
||||||
// initDataUsageStats will start the crawler unless disabled.
|
// initDataUsageStats will start the crawler unless disabled.
|
||||||
func initDataUsageStats(ctx context.Context, objAPI ObjectLayer) {
|
func initDataUsageStats(ctx context.Context, objAPI ObjectLayer) {
|
||||||
|
// data usage stats are only available erasure
|
||||||
|
// coded mode
|
||||||
|
if globalIsXL || globalIsDistXL {
|
||||||
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
|
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
|
||||||
go runDataUsageInfo(ctx, objAPI)
|
go runDataUsageInfo(ctx, objAPI)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func runDataUsageInfo(ctx context.Context, objAPI ObjectLayer) {
|
func runDataUsageInfo(ctx context.Context, objAPI ObjectLayer) {
|
||||||
// Load current bloom cycle
|
// Load current bloom cycle
|
||||||
|
57
cmd/fs-v1.go
57
cmd/fs-v1.go
@ -19,7 +19,6 @@ package cmd
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@ -31,7 +30,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
|
||||||
|
|
||||||
jsoniter "github.com/json-iterator/go"
|
jsoniter "github.com/json-iterator/go"
|
||||||
"github.com/minio/minio-go/v6/pkg/s3utils"
|
"github.com/minio/minio-go/v6/pkg/s3utils"
|
||||||
@ -39,7 +37,6 @@ import (
|
|||||||
"github.com/minio/minio/cmd/config"
|
"github.com/minio/minio/cmd/config"
|
||||||
xhttp "github.com/minio/minio/cmd/http"
|
xhttp "github.com/minio/minio/cmd/http"
|
||||||
"github.com/minio/minio/cmd/logger"
|
"github.com/minio/minio/cmd/logger"
|
||||||
"github.com/minio/minio/pkg/color"
|
|
||||||
"github.com/minio/minio/pkg/lock"
|
"github.com/minio/minio/pkg/lock"
|
||||||
"github.com/minio/minio/pkg/madmin"
|
"github.com/minio/minio/pkg/madmin"
|
||||||
"github.com/minio/minio/pkg/mimedb"
|
"github.com/minio/minio/pkg/mimedb"
|
||||||
@ -229,61 +226,9 @@ func (fs *FSObjects) StorageInfo(ctx context.Context, _ bool) StorageInfo {
|
|||||||
return storageInfo
|
return storageInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *FSObjects) waitForLowActiveIO() {
|
|
||||||
for atomic.LoadInt64(&fs.activeIOCount) >= fs.maxActiveIOCount {
|
|
||||||
time.Sleep(lowActiveIOWaitTick)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CrawlAndGetDataUsage returns data usage stats of the current FS deployment
|
// CrawlAndGetDataUsage returns data usage stats of the current FS deployment
|
||||||
func (fs *FSObjects) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error {
|
func (fs *FSObjects) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error {
|
||||||
// Load bucket totals
|
return NotImplemented{}
|
||||||
var oldCache dataUsageCache
|
|
||||||
err := oldCache.load(ctx, fs, dataUsageCacheName)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if oldCache.Info.Name == "" {
|
|
||||||
oldCache.Info.Name = dataUsageRoot
|
|
||||||
}
|
|
||||||
buckets, err := fs.ListBuckets(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
oldCache.Info.BloomFilter = nil
|
|
||||||
if bf != nil {
|
|
||||||
oldCache.Info.BloomFilter = bf.bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
if false && intDataUpdateTracker.debug {
|
|
||||||
b, _ := json.MarshalIndent(bf, "", " ")
|
|
||||||
logger.Info("Bloom filter: %v", string(b))
|
|
||||||
}
|
|
||||||
cache, err := updateUsage(ctx, fs.fsPath, oldCache, fs.waitForLowActiveIO, func(item Item) (int64, error) {
|
|
||||||
// Get file size, symlinks which cannot be
|
|
||||||
// followed are automatically filtered by fastwalk.
|
|
||||||
fi, err := os.Stat(item.Path)
|
|
||||||
if err != nil {
|
|
||||||
return 0, errSkipFile
|
|
||||||
}
|
|
||||||
return fi.Size(), nil
|
|
||||||
})
|
|
||||||
cache.Info.BloomFilter = nil
|
|
||||||
|
|
||||||
// Even if there was an error, the new cache may have better info.
|
|
||||||
if cache.Info.LastUpdate.After(oldCache.Info.LastUpdate) {
|
|
||||||
if intDataUpdateTracker.debug {
|
|
||||||
logger.Info(color.Green("CrawlAndGetDataUsage:")+" Saving cache with %d entries", len(cache.Cache))
|
|
||||||
}
|
|
||||||
logger.LogIf(ctx, cache.save(ctx, fs, dataUsageCacheName))
|
|
||||||
updates <- cache.dui(dataUsageRoot, buckets)
|
|
||||||
} else {
|
|
||||||
if intDataUpdateTracker.debug {
|
|
||||||
logger.Info(color.Green("CrawlAndGetDataUsage:")+" Cache not updated, %d entries", len(cache.Cache))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Bucket operations
|
/// Bucket operations
|
||||||
|
@ -180,11 +180,12 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
enableIAMOps := globalEtcdClient != nil
|
enableIAMOps := globalEtcdClient != nil
|
||||||
enableBucketQuotaOps := env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn
|
|
||||||
|
|
||||||
// Enable IAM admin APIs if etcd is enabled, if not just enable basic
|
// Enable IAM admin APIs if etcd is enabled, if not just enable basic
|
||||||
// operations such as profiling, server info etc.
|
// operations such as profiling, server info etc.
|
||||||
registerAdminRouter(router, enableConfigOps, enableIAMOps, enableBucketQuotaOps)
|
//
|
||||||
|
// quota opts are disabled in gateway mode.
|
||||||
|
registerAdminRouter(router, enableConfigOps, enableIAMOps)
|
||||||
|
|
||||||
// Add healthcheck router
|
// Add healthcheck router
|
||||||
registerHealthCheckRouter(router)
|
registerHealthCheckRouter(router)
|
||||||
|
@ -81,7 +81,7 @@ var globalHandlers = []HandlerFunc{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// configureServer handler returns final handler for the http server.
|
// configureServer handler returns final handler for the http server.
|
||||||
func configureServerHandler(endpointZones EndpointZones, enableBucketQuotaOps bool) (http.Handler, error) {
|
func configureServerHandler(endpointZones EndpointZones) (http.Handler, error) {
|
||||||
// Initialize router. `SkipClean(true)` stops gorilla/mux from
|
// Initialize router. `SkipClean(true)` stops gorilla/mux from
|
||||||
// normalizing URL path minio/minio#3256
|
// normalizing URL path minio/minio#3256
|
||||||
router := mux.NewRouter().SkipClean(true).UseEncodedPath()
|
router := mux.NewRouter().SkipClean(true).UseEncodedPath()
|
||||||
@ -95,7 +95,7 @@ func configureServerHandler(endpointZones EndpointZones, enableBucketQuotaOps bo
|
|||||||
registerSTSRouter(router)
|
registerSTSRouter(router)
|
||||||
|
|
||||||
// Add Admin router, all APIs are enabled in server mode.
|
// Add Admin router, all APIs are enabled in server mode.
|
||||||
registerAdminRouter(router, true, true, enableBucketQuotaOps)
|
registerAdminRouter(router, true, true)
|
||||||
|
|
||||||
// Add healthcheck router
|
// Add healthcheck router
|
||||||
registerHealthCheckRouter(router)
|
registerHealthCheckRouter(router)
|
||||||
|
@ -24,7 +24,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"strings"
|
"strings"
|
||||||
@ -441,9 +440,7 @@ func serverMain(ctx *cli.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Configure server.
|
// Configure server.
|
||||||
var handler http.Handler
|
handler, err := configureServerHandler(globalEndpoints)
|
||||||
enableBucketQuotaOps := env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn
|
|
||||||
handler, err = configureServerHandler(globalEndpoints, enableBucketQuotaOps)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Fatal(config.ErrUnexpectedError(err), "Unable to configure one of server's RPC services")
|
logger.Fatal(config.ErrUnexpectedError(err), "Unable to configure one of server's RPC services")
|
||||||
}
|
}
|
||||||
|
@ -311,7 +311,7 @@ func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer {
|
|||||||
testServer.AccessKey = credentials.AccessKey
|
testServer.AccessKey = credentials.AccessKey
|
||||||
testServer.SecretKey = credentials.SecretKey
|
testServer.SecretKey = credentials.SecretKey
|
||||||
|
|
||||||
httpHandler, err := configureServerHandler(testServer.Disks, false)
|
httpHandler, err := configureServerHandler(testServer.Disks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to configure one of the RPC services <ERROR> %s", err)
|
t.Fatalf("Failed to configure one of the RPC services <ERROR> %s", err)
|
||||||
}
|
}
|
||||||
|
@ -257,7 +257,7 @@ This behavior is consistent across all keys, each key self documents itself with
|
|||||||
## Environment only settings (not in config)
|
## Environment only settings (not in config)
|
||||||
|
|
||||||
#### Usage crawler
|
#### Usage crawler
|
||||||
Data usage crawler is enabled by default, following ENVs allow for more staggered delay in terms of usage calculation.
|
Data usage crawler is enabled by default on erasure coded and distributed erasure coded deployments.
|
||||||
|
|
||||||
The crawler adapts to the system speed and completely pauses when the system is under load. It is possible to adjust the speed of the crawler and thereby the latency of updates being reflected. The delays between each operation of the crawl can be adjusted by the `MINIO_DISK_USAGE_CRAWL_DELAY` environment variable. By default the value is `10`. This means the crawler will sleep *10x* the time each operation takes.
|
The crawler adapts to the system speed and completely pauses when the system is under load. It is possible to adjust the speed of the crawler and thereby the latency of updates being reflected. The delays between each operation of the crawl can be adjusted by the `MINIO_DISK_USAGE_CRAWL_DELAY` environment variable. By default the value is `10`. This means the crawler will sleep *10x* the time each operation takes.
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user