mirror of
https://github.com/minio/minio.git
synced 2025-04-20 02:27:50 -04:00
fix: Avoid double usage calculation on every restart (#8856)
On every restart of the server, usage was being calculated which is not useful instead wait for sufficient time to start the crawling routine. This PR also avoids lots of double allocations through strings, optimizes usage of string builders and also avoids crawling through symbolic links. Fixes #8844
This commit is contained in:
parent
e2b3c083aa
commit
f14f60a487
@ -302,13 +302,13 @@ func (a adminAPIHandlers) DataUsageInfoHandler(w http.ResponseWriter, r *http.Re
|
|||||||
|
|
||||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
dataUsageInfoJSON, err := json.Marshal(dataUsageInfo)
|
dataUsageInfoJSON, err := json.Marshal(dataUsageInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ func (h *healRoutine) run() {
|
|||||||
|
|
||||||
var res madmin.HealResultItem
|
var res madmin.HealResultItem
|
||||||
var err error
|
var err error
|
||||||
bucket, object := urlPath2BucketObjectName(task.path)
|
bucket, object := path2BucketObject(task.path)
|
||||||
switch {
|
switch {
|
||||||
case bucket == "" && object == "":
|
case bucket == "" && object == "":
|
||||||
res, err = bgHealDiskFormat(ctx, task.opts)
|
res, err = bgHealDiskFormat(ctx, task.opts)
|
||||||
|
@ -20,6 +20,8 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
jsoniter "github.com/json-iterator/go"
|
jsoniter "github.com/json-iterator/go"
|
||||||
@ -48,40 +50,36 @@ func runDataUsageInfoUpdateRoutine() {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
runDataUsageInfo(context.Background(), objAPI, GlobalServiceDoneCh)
|
||||||
|
|
||||||
switch v := objAPI.(type) {
|
|
||||||
case *xlZones:
|
|
||||||
runDataUsageInfoForXLZones(ctx, v, GlobalServiceDoneCh)
|
|
||||||
case *FSObjects:
|
|
||||||
runDataUsageInfoForFS(ctx, v, GlobalServiceDoneCh)
|
|
||||||
default:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func runDataUsageInfoForFS(ctx context.Context, fsObj *FSObjects, endCh <-chan struct{}) {
|
// timeToNextCrawl returns the duration until next crawl should occur
|
||||||
t := time.NewTicker(dataUsageCrawlInterval)
|
// this is validated by verifying the LastUpdate time.
|
||||||
defer t.Stop()
|
func timeToCrawl(ctx context.Context, objAPI ObjectLayer) time.Duration {
|
||||||
for {
|
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objAPI)
|
||||||
// Get data usage info of the FS Object
|
if err != nil {
|
||||||
usageInfo := fsObj.crawlAndGetDataUsageInfo(ctx, endCh)
|
// Upon an error wait for like 10
|
||||||
// Save the data usage in the disk
|
// seconds to start the crawler.
|
||||||
err := storeDataUsageInBackend(ctx, fsObj, usageInfo)
|
return 10 * time.Second
|
||||||
if err != nil {
|
|
||||||
logger.LogIf(ctx, err)
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-endCh:
|
|
||||||
return
|
|
||||||
// Wait until the next crawl interval
|
|
||||||
case <-t.C:
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
// File indeed doesn't exist when LastUpdate is zero
|
||||||
|
// so we have never crawled, start crawl right away.
|
||||||
|
if dataUsageInfo.LastUpdate.IsZero() {
|
||||||
|
return 1 * time.Second
|
||||||
|
}
|
||||||
|
waitDuration := dataUsageInfo.LastUpdate.Sub(UTCNow())
|
||||||
|
if waitDuration > dataUsageCrawlInterval {
|
||||||
|
// Waited long enough start crawl in a 1 second
|
||||||
|
return 1 * time.Second
|
||||||
|
}
|
||||||
|
// No crawling needed, ask the routine to wait until
|
||||||
|
// the daily interval 12hrs - delta between last update
|
||||||
|
// with current time.
|
||||||
|
return dataUsageCrawlInterval - waitDuration
|
||||||
}
|
}
|
||||||
|
|
||||||
func runDataUsageInfoForXLZones(ctx context.Context, z *xlZones, endCh <-chan struct{}) {
|
func runDataUsageInfo(ctx context.Context, objAPI ObjectLayer, endCh <-chan struct{}) {
|
||||||
locker := z.NewNSLock(ctx, minioMetaBucket, "leader-data-usage-info")
|
locker := objAPI.NewNSLock(ctx, minioMetaBucket, "leader-data-usage-info")
|
||||||
for {
|
for {
|
||||||
err := locker.GetLock(newDynamicTimeout(time.Millisecond, time.Millisecond))
|
err := locker.GetLock(newDynamicTimeout(time.Millisecond, time.Millisecond))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -93,19 +91,17 @@ func runDataUsageInfoForXLZones(ctx context.Context, z *xlZones, endCh <-chan st
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
t := time.NewTicker(dataUsageCrawlInterval)
|
|
||||||
defer t.Stop()
|
|
||||||
for {
|
for {
|
||||||
usageInfo := z.crawlAndGetDataUsage(ctx, endCh)
|
wait := timeToCrawl(ctx, objAPI)
|
||||||
err := storeDataUsageInBackend(ctx, z, usageInfo)
|
|
||||||
if err != nil {
|
|
||||||
logger.LogIf(ctx, err)
|
|
||||||
}
|
|
||||||
select {
|
select {
|
||||||
case <-endCh:
|
case <-endCh:
|
||||||
locker.Unlock()
|
locker.Unlock()
|
||||||
return
|
return
|
||||||
case <-t.C:
|
case <-time.NewTimer(wait).C:
|
||||||
|
// Crawl only when no previous crawl has occurred,
|
||||||
|
// or its been too long since last crawl.
|
||||||
|
err := storeDataUsageInBackend(ctx, objAPI, objAPI.CrawlAndGetDataUsage(ctx, endCh))
|
||||||
|
logger.LogIf(ctx, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -131,7 +127,10 @@ func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (DataUsag
|
|||||||
|
|
||||||
err := objAPI.GetObject(ctx, minioMetaBackgroundOpsBucket, dataUsageObjName, 0, -1, &dataUsageInfoJSON, "", ObjectOptions{})
|
err := objAPI.GetObject(ctx, minioMetaBackgroundOpsBucket, dataUsageObjName, 0, -1, &dataUsageInfoJSON, "", ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return DataUsageInfo{}, nil
|
if isErrObjectNotFound(err) {
|
||||||
|
return DataUsageInfo{}, nil
|
||||||
|
}
|
||||||
|
return DataUsageInfo{}, toObjectErr(err, minioMetaBackgroundOpsBucket, dataUsageObjName)
|
||||||
}
|
}
|
||||||
|
|
||||||
var dataUsageInfo DataUsageInfo
|
var dataUsageInfo DataUsageInfo
|
||||||
@ -143,3 +142,85 @@ func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (DataUsag
|
|||||||
|
|
||||||
return dataUsageInfo, nil
|
return dataUsageInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Item represents each file while walking.
|
||||||
|
type Item struct {
|
||||||
|
Path string
|
||||||
|
Typ os.FileMode
|
||||||
|
}
|
||||||
|
|
||||||
|
type getSizeFn func(item Item) (int64, error)
|
||||||
|
type activeIOFn func() error
|
||||||
|
|
||||||
|
func updateUsage(basePath string, endCh <-chan struct{}, waitForLowActiveIO activeIOFn, getSize getSizeFn) DataUsageInfo {
|
||||||
|
var dataUsageInfo = DataUsageInfo{
|
||||||
|
BucketsSizes: make(map[string]uint64),
|
||||||
|
ObjectsSizesHistogram: make(map[string]uint64),
|
||||||
|
}
|
||||||
|
|
||||||
|
itemCh := make(chan Item)
|
||||||
|
skipCh := make(chan error)
|
||||||
|
defer close(skipCh)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(itemCh)
|
||||||
|
fastWalk(basePath, func(path string, typ os.FileMode) error {
|
||||||
|
if err := waitForLowActiveIO(); err != nil {
|
||||||
|
return filepath.SkipDir
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-endCh:
|
||||||
|
return filepath.SkipDir
|
||||||
|
case itemCh <- Item{path, typ}:
|
||||||
|
}
|
||||||
|
return <-skipCh
|
||||||
|
})
|
||||||
|
}()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-endCh:
|
||||||
|
return dataUsageInfo
|
||||||
|
case item, ok := <-itemCh:
|
||||||
|
if !ok {
|
||||||
|
return dataUsageInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
bucket, entry := path2BucketObjectWithBasePath(basePath, item.Path)
|
||||||
|
if bucket == "" {
|
||||||
|
skipCh <- nil
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if isReservedOrInvalidBucket(bucket, false) {
|
||||||
|
skipCh <- filepath.SkipDir
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry == "" && item.Typ&os.ModeDir != 0 {
|
||||||
|
dataUsageInfo.BucketsCount++
|
||||||
|
dataUsageInfo.BucketsSizes[bucket] = 0
|
||||||
|
skipCh <- nil
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if item.Typ&os.ModeDir != 0 {
|
||||||
|
skipCh <- nil
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
size, err := getSize(item)
|
||||||
|
if err != nil {
|
||||||
|
skipCh <- errSkipFile
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
dataUsageInfo.ObjectsCount++
|
||||||
|
dataUsageInfo.ObjectsTotalSize += uint64(size)
|
||||||
|
dataUsageInfo.BucketsSizes[bucket] += uint64(size)
|
||||||
|
dataUsageInfo.ObjectsSizesHistogram[objSizeToHistoInterval(uint64(size))]++
|
||||||
|
skipCh <- nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -17,14 +17,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrTraverseLink is used as a return value from WalkFuncs to indicate that the
|
var errSkipFile = errors.New("fastwalk: skip this file")
|
||||||
// symlink named in the call may be traversed.
|
|
||||||
var ErrTraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory")
|
|
||||||
|
|
||||||
// ErrSkipFiles is a used as a return value from WalkFuncs to indicate that the
|
|
||||||
// callback should not be called for any other files in the current directory.
|
|
||||||
// Child directories will still be traversed.
|
|
||||||
var ErrSkipFiles = errors.New("fastwalk: skip remaining files in directory")
|
|
||||||
|
|
||||||
// Walk is a faster implementation of filepath.Walk.
|
// Walk is a faster implementation of filepath.Walk.
|
||||||
//
|
//
|
||||||
@ -161,25 +154,32 @@ func (w *walker) enqueue(it walkItem) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var stringsBuilderPool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return &strings.Builder{}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error {
|
func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error {
|
||||||
joined := dirName + string(os.PathSeparator) + baseName
|
builder := stringsBuilderPool.Get().(*strings.Builder)
|
||||||
|
defer func() {
|
||||||
|
builder.Reset()
|
||||||
|
stringsBuilderPool.Put(builder)
|
||||||
|
}()
|
||||||
|
|
||||||
|
builder.WriteString(dirName)
|
||||||
|
if !strings.HasSuffix(dirName, SlashSeparator) {
|
||||||
|
builder.WriteString(SlashSeparator)
|
||||||
|
}
|
||||||
|
builder.WriteString(baseName)
|
||||||
if typ == os.ModeDir {
|
if typ == os.ModeDir {
|
||||||
w.enqueue(walkItem{dir: joined})
|
w.enqueue(walkItem{dir: builder.String()})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
err := w.fn(joined, typ)
|
err := w.fn(builder.String(), typ)
|
||||||
if typ == os.ModeSymlink {
|
if err == filepath.SkipDir || err == errSkipFile {
|
||||||
if err == ErrTraverseLink {
|
return nil
|
||||||
// Set callbackDone so we don't call it twice for both the
|
|
||||||
// symlink-as-symlink and the symlink-as-directory later:
|
|
||||||
w.enqueue(walkItem{dir: joined, callbackDone: true})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err == filepath.SkipDir {
|
|
||||||
// Permit SkipDir on symlinks too.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -189,22 +189,13 @@ func readDirFn(dirName string, fn func(dirName, entName string, typ os.FileMode)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
skipFiles := false
|
|
||||||
for _, fi := range fis {
|
for _, fi := range fis {
|
||||||
var mode os.FileMode
|
var mode os.FileMode
|
||||||
if strings.HasSuffix(fi, SlashSeparator) {
|
if strings.HasSuffix(fi, SlashSeparator) {
|
||||||
mode |= os.ModeDir
|
mode |= os.ModeDir
|
||||||
}
|
}
|
||||||
|
|
||||||
if mode == 0 && skipFiles {
|
if err = fn(dirName, fi, mode); err != nil {
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := fn(dirName, fi, mode); err != nil {
|
|
||||||
if err == ErrSkipFiles {
|
|
||||||
skipFiles = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -214,7 +205,7 @@ func readDirFn(dirName string, fn func(dirName, entName string, typ os.FileMode)
|
|||||||
func (w *walker) walk(root string, runUserCallback bool) error {
|
func (w *walker) walk(root string, runUserCallback bool) error {
|
||||||
if runUserCallback {
|
if runUserCallback {
|
||||||
err := w.fn(root, os.ModeDir)
|
err := w.fn(root, os.ModeDir)
|
||||||
if err == filepath.SkipDir {
|
if err == filepath.SkipDir || err == errSkipFile {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
94
cmd/fs-v1.go
94
cmd/fs-v1.go
@ -27,7 +27,6 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"os/user"
|
"os/user"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@ -111,8 +110,7 @@ func initMetaVolumeFS(fsPath, fsUUID string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
metaStatsPath := pathJoin(fsPath, minioMetaBackgroundOpsBucket, fsUUID)
|
if err := os.MkdirAll(pathJoin(fsPath, minioMetaBackgroundOpsBucket), 0777); err != nil {
|
||||||
if err := os.MkdirAll(metaStatsPath, 0777); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -229,90 +227,30 @@ func (fs *FSObjects) StorageInfo(ctx context.Context) StorageInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (fs *FSObjects) waitForLowActiveIO() error {
|
func (fs *FSObjects) waitForLowActiveIO() error {
|
||||||
t := time.NewTicker(lowActiveIOWaitTick)
|
for atomic.LoadInt64(&fs.activeIOCount) >= fs.maxActiveIOCount {
|
||||||
defer t.Stop()
|
select {
|
||||||
for {
|
case <-GlobalServiceDoneCh:
|
||||||
if atomic.LoadInt64(&fs.activeIOCount) >= fs.maxActiveIOCount {
|
return errors.New("forced exit")
|
||||||
select {
|
case <-time.NewTimer(lowActiveIOWaitTick).C:
|
||||||
case <-GlobalServiceDoneCh:
|
continue
|
||||||
return errors.New("forced exit")
|
|
||||||
case <-t.C:
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// crawlAndGetDataUsageInfo returns data usage stats of the current FS deployment
|
// CrawlAndGetDataUsage returns data usage stats of the current FS deployment
|
||||||
func (fs *FSObjects) crawlAndGetDataUsageInfo(ctx context.Context, endCh <-chan struct{}) DataUsageInfo {
|
func (fs *FSObjects) CrawlAndGetDataUsage(ctx context.Context, endCh <-chan struct{}) DataUsageInfo {
|
||||||
|
dataUsageInfo := updateUsage(fs.fsPath, endCh, fs.waitForLowActiveIO, func(item Item) (int64, error) {
|
||||||
var dataUsageInfoMu sync.Mutex
|
// Get file size, symlinks which cannot bex
|
||||||
var dataUsageInfo = DataUsageInfo{
|
// followed are automatically filtered by fastwalk.
|
||||||
BucketsSizes: make(map[string]uint64),
|
fi, err := os.Stat(item.Path)
|
||||||
ObjectsSizesHistogram: make(map[string]uint64),
|
|
||||||
}
|
|
||||||
|
|
||||||
walkFn := func(origPath string, typ os.FileMode) error {
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-GlobalServiceDoneCh:
|
|
||||||
return filepath.SkipDir
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := fs.waitForLowActiveIO(); err != nil {
|
|
||||||
return filepath.SkipDir
|
|
||||||
}
|
|
||||||
|
|
||||||
path := strings.TrimPrefix(origPath, fs.fsPath)
|
|
||||||
path = strings.TrimPrefix(path, SlashSeparator)
|
|
||||||
|
|
||||||
splits := splitN(path, SlashSeparator, 2)
|
|
||||||
bucket := splits[0]
|
|
||||||
prefix := splits[1]
|
|
||||||
|
|
||||||
if bucket == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if isReservedOrInvalidBucket(bucket, false) {
|
|
||||||
return filepath.SkipDir
|
|
||||||
}
|
|
||||||
|
|
||||||
if prefix == "" {
|
|
||||||
dataUsageInfoMu.Lock()
|
|
||||||
dataUsageInfo.BucketsCount++
|
|
||||||
dataUsageInfo.BucketsSizes[bucket] = 0
|
|
||||||
dataUsageInfoMu.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if typ&os.ModeDir != 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get file size
|
|
||||||
fi, err := os.Stat(origPath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return 0, errSkipFile
|
||||||
}
|
}
|
||||||
size := fi.Size()
|
return fi.Size(), nil
|
||||||
|
})
|
||||||
dataUsageInfoMu.Lock()
|
|
||||||
dataUsageInfo.ObjectsCount++
|
|
||||||
dataUsageInfo.ObjectsTotalSize += uint64(size)
|
|
||||||
dataUsageInfo.BucketsSizes[bucket] += uint64(size)
|
|
||||||
dataUsageInfo.ObjectsSizesHistogram[objSizeToHistoInterval(uint64(size))]++
|
|
||||||
dataUsageInfoMu.Unlock()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
fastWalk(fs.fsPath, walkFn)
|
|
||||||
|
|
||||||
dataUsageInfo.LastUpdate = UTCNow()
|
dataUsageInfo.LastUpdate = UTCNow()
|
||||||
atomic.StoreUint64(&fs.totalUsed, dataUsageInfo.ObjectsTotalSize)
|
atomic.StoreUint64(&fs.totalUsed, dataUsageInfo.ObjectsTotalSize)
|
||||||
|
@ -46,6 +46,12 @@ func NewGatewayLayerWithLocker(gwLayer ObjectLayer) ObjectLayer {
|
|||||||
// GatewayUnsupported list of unsupported call stubs for gateway.
|
// GatewayUnsupported list of unsupported call stubs for gateway.
|
||||||
type GatewayUnsupported struct{}
|
type GatewayUnsupported struct{}
|
||||||
|
|
||||||
|
// CrawlAndGetDataUsage - crawl is not implemented for gateway
|
||||||
|
func (a GatewayUnsupported) CrawlAndGetDataUsage(ctx context.Context, endCh <-chan struct{}) DataUsageInfo {
|
||||||
|
logger.CriticalIf(ctx, errors.New("not implemented"))
|
||||||
|
return DataUsageInfo{}
|
||||||
|
}
|
||||||
|
|
||||||
// NewNSLock is a dummy stub for gateway.
|
// NewNSLock is a dummy stub for gateway.
|
||||||
func (a GatewayUnsupported) NewNSLock(ctx context.Context, bucket string, object string) RWLocker {
|
func (a GatewayUnsupported) NewNSLock(ctx context.Context, bucket string, object string) RWLocker {
|
||||||
logger.CriticalIf(ctx, errors.New("not implemented"))
|
logger.CriticalIf(ctx, errors.New("not implemented"))
|
||||||
|
@ -631,11 +631,11 @@ func (f bucketForwardingHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques
|
|||||||
switch r.Method {
|
switch r.Method {
|
||||||
case http.MethodPut:
|
case http.MethodPut:
|
||||||
if getRequestAuthType(r) == authTypeJWT {
|
if getRequestAuthType(r) == authTypeJWT {
|
||||||
bucket, _ = urlPath2BucketObjectName(strings.TrimPrefix(r.URL.Path, minioReservedBucketPath+"/upload"))
|
bucket, _ = path2BucketObjectWithBasePath(minioReservedBucketPath+"/upload", r.URL.Path)
|
||||||
}
|
}
|
||||||
case http.MethodGet:
|
case http.MethodGet:
|
||||||
if t := r.URL.Query().Get("token"); t != "" {
|
if t := r.URL.Query().Get("token"); t != "" {
|
||||||
bucket, _ = urlPath2BucketObjectName(strings.TrimPrefix(r.URL.Path, minioReservedBucketPath+"/download"))
|
bucket, _ = path2BucketObjectWithBasePath(minioReservedBucketPath+"/download", r.URL.Path)
|
||||||
} else if getRequestAuthType(r) != authTypeJWT && !strings.HasPrefix(r.URL.Path, minioReservedBucketPath) {
|
} else if getRequestAuthType(r) != authTypeJWT && !strings.HasPrefix(r.URL.Path, minioReservedBucketPath) {
|
||||||
bucket, _ = request2BucketObjectName(r)
|
bucket, _ = request2BucketObjectName(r)
|
||||||
}
|
}
|
||||||
@ -687,7 +687,7 @@ func (f bucketForwardingHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques
|
|||||||
// requests have target bucket and object in URI and source details are in
|
// requests have target bucket and object in URI and source details are in
|
||||||
// header fields
|
// header fields
|
||||||
if r.Method == http.MethodPut && r.Header.Get(xhttp.AmzCopySource) != "" {
|
if r.Method == http.MethodPut && r.Header.Get(xhttp.AmzCopySource) != "" {
|
||||||
bucket, object = urlPath2BucketObjectName(r.Header.Get(xhttp.AmzCopySource))
|
bucket, object = path2BucketObject(r.Header.Get(xhttp.AmzCopySource))
|
||||||
if bucket == "" || object == "" {
|
if bucket == "" || object == "" {
|
||||||
f.handler.ServeHTTP(w, r)
|
f.handler.ServeHTTP(w, r)
|
||||||
return
|
return
|
||||||
|
@ -98,23 +98,6 @@ func isDirectiveReplace(value string) bool {
|
|||||||
return value == replaceDirective
|
return value == replaceDirective
|
||||||
}
|
}
|
||||||
|
|
||||||
// Splits an incoming path into bucket and object components.
|
|
||||||
func path2BucketAndObject(path string) (bucket, object string) {
|
|
||||||
// Skip the first element if it is '/', split the rest.
|
|
||||||
path = strings.TrimPrefix(path, SlashSeparator)
|
|
||||||
pathComponents := strings.SplitN(path, SlashSeparator, 2)
|
|
||||||
|
|
||||||
// Save the bucket and object extracted from path.
|
|
||||||
switch len(pathComponents) {
|
|
||||||
case 1:
|
|
||||||
bucket = pathComponents[0]
|
|
||||||
case 2:
|
|
||||||
bucket = pathComponents[0]
|
|
||||||
object = pathComponents[1]
|
|
||||||
}
|
|
||||||
return bucket, object
|
|
||||||
}
|
|
||||||
|
|
||||||
// userMetadataKeyPrefixes contains the prefixes of used-defined metadata keys.
|
// userMetadataKeyPrefixes contains the prefixes of used-defined metadata keys.
|
||||||
// All values stored with a key starting with one of the following prefixes
|
// All values stored with a key starting with one of the following prefixes
|
||||||
// must be extracted from the header.
|
// must be extracted from the header.
|
||||||
|
@ -57,6 +57,7 @@ type ObjectLayer interface {
|
|||||||
|
|
||||||
// Storage operations.
|
// Storage operations.
|
||||||
Shutdown(context.Context) error
|
Shutdown(context.Context) error
|
||||||
|
CrawlAndGetDataUsage(context.Context, <-chan struct{}) DataUsageInfo
|
||||||
StorageInfo(context.Context) StorageInfo
|
StorageInfo(context.Context) StorageInfo
|
||||||
|
|
||||||
// Bucket operations.
|
// Bucket operations.
|
||||||
|
@ -743,7 +743,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
srcBucket, srcObject := path2BucketAndObject(cpSrcPath)
|
srcBucket, srcObject := path2BucketObject(cpSrcPath)
|
||||||
// If source object is empty or bucket is empty, reply back invalid copy source.
|
// If source object is empty or bucket is empty, reply back invalid copy source.
|
||||||
if srcObject == "" || srcBucket == "" {
|
if srcObject == "" || srcBucket == "" {
|
||||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL, guessIsBrowserReq(r))
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL, guessIsBrowserReq(r))
|
||||||
@ -1578,7 +1578,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
srcBucket, srcObject := path2BucketAndObject(cpSrcPath)
|
srcBucket, srcObject := path2BucketObject(cpSrcPath)
|
||||||
// If source object is empty or bucket is empty, reply back invalid copy source.
|
// If source object is empty or bucket is empty, reply back invalid copy source.
|
||||||
if srcObject == "" || srcBucket == "" {
|
if srcObject == "" || srcBucket == "" {
|
||||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL, guessIsBrowserReq(r))
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL, guessIsBrowserReq(r))
|
||||||
|
@ -58,6 +58,11 @@ func isSysErrTooLong(err error) bool {
|
|||||||
return errors.Is(err, syscall.ENAMETOOLONG)
|
return errors.Is(err, syscall.ENAMETOOLONG)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check if the given error corresponds to the ELOOP (too many symlinks).
|
||||||
|
func isSysErrTooManySymlinks(err error) bool {
|
||||||
|
return errors.Is(err, syscall.ELOOP)
|
||||||
|
}
|
||||||
|
|
||||||
// Check if the given error corresponds to ENOTEMPTY for unix
|
// Check if the given error corresponds to ENOTEMPTY for unix
|
||||||
// and ERROR_DIR_NOT_EMPTY for windows (directory not empty).
|
// and ERROR_DIR_NOT_EMPTY for windows (directory not empty).
|
||||||
func isSysErrNotEmpty(err error) bool {
|
func isSysErrNotEmpty(err error) bool {
|
||||||
|
@ -122,8 +122,11 @@ func readDirN(dirPath string, count int) (entries []string, err error) {
|
|||||||
if typ == unexpectedFileMode || typ&os.ModeSymlink == os.ModeSymlink {
|
if typ == unexpectedFileMode || typ&os.ModeSymlink == os.ModeSymlink {
|
||||||
fi, err := os.Stat(pathJoin(dirPath, name))
|
fi, err := os.Stat(pathJoin(dirPath, name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// It got deleted in the meantime.
|
// It got deleted in the meantime, not found
|
||||||
if os.IsNotExist(err) {
|
// or returns too many symlinks ignore this
|
||||||
|
// file/directory.
|
||||||
|
if os.IsNotExist(err) || isSysErrPathNotFound(err) ||
|
||||||
|
isSysErrTooManySymlinks(err) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
|
90
cmd/posix.go
90
cmd/posix.go
@ -334,91 +334,39 @@ func isQuitting(endCh chan struct{}) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *posix) waitForLowActiveIO() error {
|
func (s *posix) waitForLowActiveIO() error {
|
||||||
t := time.NewTicker(lowActiveIOWaitTick)
|
for atomic.LoadInt32(&s.activeIOCount) >= s.maxActiveIOCount {
|
||||||
defer t.Stop()
|
select {
|
||||||
for {
|
case <-GlobalServiceDoneCh:
|
||||||
if atomic.LoadInt32(&s.activeIOCount) >= s.maxActiveIOCount {
|
return errors.New("forced exit")
|
||||||
select {
|
case <-time.NewTimer(lowActiveIOWaitTick).C:
|
||||||
case <-GlobalServiceDoneCh:
|
continue
|
||||||
return errors.New("forced exit")
|
|
||||||
case <-t.C:
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
break
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *posix) CrawlAndGetDataUsage(endCh <-chan struct{}) (DataUsageInfo, error) {
|
func (s *posix) CrawlAndGetDataUsage(endCh <-chan struct{}) (DataUsageInfo, error) {
|
||||||
|
dataUsageInfo := updateUsage(s.diskPath, endCh, s.waitForLowActiveIO, func(item Item) (int64, error) {
|
||||||
var dataUsageInfoMu sync.Mutex
|
// Look for `xl.json' at the leaf.
|
||||||
var dataUsageInfo = DataUsageInfo{
|
if !strings.HasSuffix(item.Path, SlashSeparator+xlMetaJSONFile) {
|
||||||
BucketsSizes: make(map[string]uint64),
|
// if no xl.json found, skip the file.
|
||||||
ObjectsSizesHistogram: make(map[string]uint64),
|
return 0, errSkipFile
|
||||||
}
|
|
||||||
|
|
||||||
walkFn := func(origPath string, typ os.FileMode) error {
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-GlobalServiceDoneCh:
|
|
||||||
return filepath.SkipDir
|
|
||||||
default:
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.waitForLowActiveIO(); err != nil {
|
xlMetaBuf, err := ioutil.ReadFile(item.Path)
|
||||||
return filepath.SkipDir
|
if err != nil {
|
||||||
|
return 0, errSkipFile
|
||||||
}
|
}
|
||||||
|
|
||||||
path := strings.TrimPrefix(origPath, s.diskPath)
|
meta, err := xlMetaV1UnmarshalJSON(context.Background(), xlMetaBuf)
|
||||||
path = strings.TrimPrefix(path, SlashSeparator)
|
if err != nil {
|
||||||
|
return 0, errSkipFile
|
||||||
splits := splitN(path, SlashSeparator, 2)
|
|
||||||
|
|
||||||
bucket := splits[0]
|
|
||||||
prefix := splits[1]
|
|
||||||
|
|
||||||
if bucket == "" {
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if isReservedOrInvalidBucket(bucket, false) {
|
return meta.Stat.Size, nil
|
||||||
return nil
|
})
|
||||||
}
|
|
||||||
|
|
||||||
if prefix == "" {
|
|
||||||
dataUsageInfoMu.Lock()
|
|
||||||
dataUsageInfo.BucketsCount++
|
|
||||||
dataUsageInfo.BucketsSizes[bucket] = 0
|
|
||||||
dataUsageInfoMu.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasSuffix(prefix, SlashSeparator+xlMetaJSONFile) {
|
|
||||||
xlMetaBuf, err := ioutil.ReadFile(origPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
meta, err := xlMetaV1UnmarshalJSON(context.Background(), xlMetaBuf)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
dataUsageInfoMu.Lock()
|
|
||||||
dataUsageInfo.ObjectsCount++
|
|
||||||
dataUsageInfo.ObjectsTotalSize += uint64(meta.Stat.Size)
|
|
||||||
dataUsageInfo.BucketsSizes[bucket] += uint64(meta.Stat.Size)
|
|
||||||
dataUsageInfo.ObjectsSizesHistogram[objSizeToHistoInterval(uint64(meta.Stat.Size))]++
|
|
||||||
dataUsageInfoMu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
fastWalk(s.diskPath, walkFn)
|
|
||||||
|
|
||||||
dataUsageInfo.LastUpdate = UTCNow()
|
dataUsageInfo.LastUpdate = UTCNow()
|
||||||
|
|
||||||
atomic.StoreUint64(&s.totalUsed, dataUsageInfo.ObjectsTotalSize)
|
atomic.StoreUint64(&s.totalUsed, dataUsageInfo.ObjectsTotalSize)
|
||||||
return dataUsageInfo, nil
|
return dataUsageInfo, nil
|
||||||
}
|
}
|
||||||
|
42
cmd/utils.go
42
cmd/utils.go
@ -67,28 +67,24 @@ func request2BucketObjectName(r *http.Request) (bucketName, objectName string) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
logger.CriticalIf(context.Background(), err)
|
logger.CriticalIf(context.Background(), err)
|
||||||
}
|
}
|
||||||
return urlPath2BucketObjectName(path)
|
|
||||||
|
return path2BucketObject(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert url path into bucket and object name.
|
// path2BucketObjectWithBasePath returns bucket and prefix, if any,
|
||||||
func urlPath2BucketObjectName(path string) (bucketName, objectName string) {
|
// of a 'path'. basePath is trimmed from the front of the 'path'.
|
||||||
if path == "" || path == SlashSeparator {
|
func path2BucketObjectWithBasePath(basePath, path string) (bucket, prefix string) {
|
||||||
return "", ""
|
path = strings.TrimPrefix(path, basePath)
|
||||||
|
path = strings.TrimPrefix(path, SlashSeparator)
|
||||||
|
m := strings.Index(path, SlashSeparator)
|
||||||
|
if m < 0 {
|
||||||
|
return path, ""
|
||||||
}
|
}
|
||||||
|
return path[:m], path[m+len(SlashSeparator):]
|
||||||
|
}
|
||||||
|
|
||||||
// Trim any preceding slash separator.
|
func path2BucketObject(s string) (bucket, prefix string) {
|
||||||
urlPath := strings.TrimPrefix(path, SlashSeparator)
|
return path2BucketObjectWithBasePath("", s)
|
||||||
|
|
||||||
// Split urlpath using slash separator into a given number of
|
|
||||||
// expected tokens.
|
|
||||||
tokens := strings.SplitN(urlPath, SlashSeparator, 2)
|
|
||||||
bucketName = tokens[0]
|
|
||||||
if len(tokens) == 2 {
|
|
||||||
objectName = tokens[1]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Success.
|
|
||||||
return bucketName, objectName
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// URI scheme constants.
|
// URI scheme constants.
|
||||||
@ -553,16 +549,6 @@ func getMinioMode() string {
|
|||||||
return mode
|
return mode
|
||||||
}
|
}
|
||||||
|
|
||||||
func splitN(str, delim string, num int) []string {
|
|
||||||
stdSplit := strings.SplitN(str, delim, num)
|
|
||||||
retSplit := make([]string, num)
|
|
||||||
for i := 0; i < len(stdSplit); i++ {
|
|
||||||
retSplit[i] = stdSplit[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
return retSplit
|
|
||||||
}
|
|
||||||
|
|
||||||
func iamPolicyClaimName() string {
|
func iamPolicyClaimName() string {
|
||||||
return globalOpenIDConfig.ClaimPrefix + globalOpenIDConfig.ClaimName
|
return globalOpenIDConfig.ClaimPrefix + globalOpenIDConfig.ClaimName
|
||||||
}
|
}
|
||||||
|
@ -107,85 +107,74 @@ func TestMaxPartID(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests extracting bucket and objectname from various types of URL paths.
|
// Tests extracting bucket and objectname from various types of paths.
|
||||||
func TestURL2BucketObjectName(t *testing.T) {
|
func TestPath2BucketObjectName(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
u *url.URL
|
path string
|
||||||
bucket, object string
|
bucket, object string
|
||||||
}{
|
}{
|
||||||
// Test case 1 normal case.
|
// Test case 1 normal case.
|
||||||
{
|
{
|
||||||
u: &url.URL{
|
path: "/bucket/object",
|
||||||
Path: "/bucket/object",
|
|
||||||
},
|
|
||||||
bucket: "bucket",
|
bucket: "bucket",
|
||||||
object: "object",
|
object: "object",
|
||||||
},
|
},
|
||||||
// Test case 2 where url only has separator.
|
// Test case 2 where url only has separator.
|
||||||
{
|
{
|
||||||
u: &url.URL{
|
path: SlashSeparator,
|
||||||
Path: SlashSeparator,
|
|
||||||
},
|
|
||||||
bucket: "",
|
bucket: "",
|
||||||
object: "",
|
object: "",
|
||||||
},
|
},
|
||||||
// Test case 3 only bucket is present.
|
// Test case 3 only bucket is present.
|
||||||
{
|
{
|
||||||
u: &url.URL{
|
path: "/bucket",
|
||||||
Path: "/bucket",
|
|
||||||
},
|
|
||||||
bucket: "bucket",
|
bucket: "bucket",
|
||||||
object: "",
|
object: "",
|
||||||
},
|
},
|
||||||
// Test case 4 many separators and object is a directory.
|
// Test case 4 many separators and object is a directory.
|
||||||
{
|
{
|
||||||
u: &url.URL{
|
path: "/bucket/object/1/",
|
||||||
Path: "/bucket/object/1/",
|
|
||||||
},
|
|
||||||
bucket: "bucket",
|
bucket: "bucket",
|
||||||
object: "object/1/",
|
object: "object/1/",
|
||||||
},
|
},
|
||||||
// Test case 5 object has many trailing separators.
|
// Test case 5 object has many trailing separators.
|
||||||
{
|
{
|
||||||
u: &url.URL{
|
path: "/bucket/object/1///",
|
||||||
Path: "/bucket/object/1///",
|
|
||||||
},
|
|
||||||
bucket: "bucket",
|
bucket: "bucket",
|
||||||
object: "object/1///",
|
object: "object/1///",
|
||||||
},
|
},
|
||||||
// Test case 6 object has only trailing separators.
|
// Test case 6 object has only trailing separators.
|
||||||
{
|
{
|
||||||
u: &url.URL{
|
path: "/bucket/object///////",
|
||||||
Path: "/bucket/object///////",
|
|
||||||
},
|
|
||||||
bucket: "bucket",
|
bucket: "bucket",
|
||||||
object: "object///////",
|
object: "object///////",
|
||||||
},
|
},
|
||||||
// Test case 7 object has preceding separators.
|
// Test case 7 object has preceding separators.
|
||||||
{
|
{
|
||||||
u: &url.URL{
|
path: "/bucket////object////",
|
||||||
Path: "/bucket////object////",
|
|
||||||
},
|
|
||||||
bucket: "bucket",
|
bucket: "bucket",
|
||||||
object: "///object////",
|
object: "///object////",
|
||||||
},
|
},
|
||||||
// Test case 9 url path is empty.
|
// Test case 8 url path is empty.
|
||||||
{
|
{
|
||||||
u: &url.URL{},
|
path: "",
|
||||||
bucket: "",
|
bucket: "",
|
||||||
object: "",
|
object: "",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate all test cases.
|
// Validate all test cases.
|
||||||
for i, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
bucketName, objectName := urlPath2BucketObjectName(testCase.u.Path)
|
testCase := testCase
|
||||||
if bucketName != testCase.bucket {
|
t.Run("", func(t *testing.T) {
|
||||||
t.Errorf("Test %d: failed expected bucket name \"%s\", got \"%s\"", i+1, testCase.bucket, bucketName)
|
bucketName, objectName := path2BucketObject(testCase.path)
|
||||||
}
|
if bucketName != testCase.bucket {
|
||||||
if objectName != testCase.object {
|
t.Errorf("failed expected bucket name \"%s\", got \"%s\"", testCase.bucket, bucketName)
|
||||||
t.Errorf("Test %d: failed expected bucket name \"%s\", got \"%s\"", i+1, testCase.object, objectName)
|
}
|
||||||
}
|
if objectName != testCase.object {
|
||||||
|
t.Errorf("failed expected bucket name \"%s\", got \"%s\"", testCase.object, objectName)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1723,7 +1723,7 @@ func (web *webAPIHandlers) ListAllBucketPolicies(r *http.Request, args *ListAllB
|
|||||||
|
|
||||||
reply.UIVersion = browser.UIVersion
|
reply.UIVersion = browser.UIVersion
|
||||||
for prefix, policy := range miniogopolicy.GetPolicies(policyInfo.Statements, args.BucketName, "") {
|
for prefix, policy := range miniogopolicy.GetPolicies(policyInfo.Statements, args.BucketName, "") {
|
||||||
bucketName, objectPrefix := urlPath2BucketObjectName(prefix)
|
bucketName, objectPrefix := path2BucketObject(prefix)
|
||||||
objectPrefix = strings.TrimSuffix(objectPrefix, "*")
|
objectPrefix = strings.TrimSuffix(objectPrefix, "*")
|
||||||
reply.Policies = append(reply.Policies, BucketAccessPolicy{
|
reply.Policies = append(reply.Policies, BucketAccessPolicy{
|
||||||
Bucket: bucketName,
|
Bucket: bucketName,
|
||||||
|
@ -435,6 +435,10 @@ func (s *xlSets) StorageInfo(ctx context.Context) StorageInfo {
|
|||||||
return storageInfo
|
return storageInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *xlSets) CrawlAndGetDataUsage(ctx context.Context, endCh <-chan struct{}) DataUsageInfo {
|
||||||
|
return DataUsageInfo{}
|
||||||
|
}
|
||||||
|
|
||||||
// Shutdown shutsdown all erasure coded sets in parallel
|
// Shutdown shutsdown all erasure coded sets in parallel
|
||||||
// returns error upon first error.
|
// returns error upon first error.
|
||||||
func (s *xlSets) Shutdown(ctx context.Context) error {
|
func (s *xlSets) Shutdown(ctx context.Context) error {
|
||||||
|
@ -192,15 +192,14 @@ func (xl xlObjects) StorageInfo(ctx context.Context) StorageInfo {
|
|||||||
return getStorageInfo(xl.getDisks())
|
return getStorageInfo(xl.getDisks())
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetrics - no op
|
// GetMetrics - is not implemented and shouldn't be called.
|
||||||
func (xl xlObjects) GetMetrics(ctx context.Context) (*Metrics, error) {
|
func (xl xlObjects) GetMetrics(ctx context.Context) (*Metrics, error) {
|
||||||
logger.LogIf(ctx, NotImplemented{})
|
logger.LogIf(ctx, NotImplemented{})
|
||||||
return &Metrics{}, NotImplemented{}
|
return &Metrics{}, NotImplemented{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// crawlAndGetDataUsage picks three random disks to crawl and get data usage
|
// CrawlAndGetDataUsage picks three random disks to crawl and get data usage
|
||||||
func (xl xlObjects) crawlAndGetDataUsage(ctx context.Context, endCh <-chan struct{}) DataUsageInfo {
|
func (xl xlObjects) CrawlAndGetDataUsage(ctx context.Context, endCh <-chan struct{}) DataUsageInfo {
|
||||||
|
|
||||||
var randomDisks []StorageAPI
|
var randomDisks []StorageAPI
|
||||||
for _, d := range xl.getLoadBalancedDisks() {
|
for _, d := range xl.getLoadBalancedDisks() {
|
||||||
if d == nil || !d.IsOnline() {
|
if d == nil || !d.IsOnline() {
|
||||||
|
@ -212,7 +212,7 @@ func (z *xlZones) StorageInfo(ctx context.Context) StorageInfo {
|
|||||||
return storageInfo
|
return storageInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func (z *xlZones) crawlAndGetDataUsage(ctx context.Context, endCh <-chan struct{}) DataUsageInfo {
|
func (z *xlZones) CrawlAndGetDataUsage(ctx context.Context, endCh <-chan struct{}) DataUsageInfo {
|
||||||
var aggDataUsageInfo = struct {
|
var aggDataUsageInfo = struct {
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
DataUsageInfo
|
DataUsageInfo
|
||||||
@ -227,7 +227,7 @@ func (z *xlZones) crawlAndGetDataUsage(ctx context.Context, endCh <-chan struct{
|
|||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(xl *xlObjects) {
|
go func(xl *xlObjects) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
info := xl.crawlAndGetDataUsage(ctx, endCh)
|
info := xl.CrawlAndGetDataUsage(ctx, endCh)
|
||||||
|
|
||||||
aggDataUsageInfo.Lock()
|
aggDataUsageInfo.Lock()
|
||||||
aggDataUsageInfo.ObjectsCount += info.ObjectsCount
|
aggDataUsageInfo.ObjectsCount += info.ObjectsCount
|
||||||
|
Loading…
x
Reference in New Issue
Block a user