2019-12-12 09:02:37 -05:00
|
|
|
/*
|
|
|
|
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"encoding/json"
|
2020-01-21 17:07:49 -05:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2019-12-12 09:02:37 -05:00
|
|
|
"time"
|
|
|
|
|
2020-01-08 06:31:43 -05:00
|
|
|
jsoniter "github.com/json-iterator/go"
|
2019-12-12 09:02:37 -05:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
|
|
|
"github.com/minio/minio/pkg/hash"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
dataUsageObjName = "data-usage"
|
|
|
|
dataUsageCrawlInterval = 12 * time.Hour
|
|
|
|
)
|
|
|
|
|
|
|
|
func initDataUsageStats() {
|
|
|
|
go runDataUsageInfoUpdateRoutine()
|
|
|
|
}
|
|
|
|
|
|
|
|
func runDataUsageInfoUpdateRoutine() {
|
|
|
|
// Wait until the object layer is ready
|
|
|
|
var objAPI ObjectLayer
|
|
|
|
for {
|
|
|
|
objAPI = newObjectLayerWithoutSafeModeFn()
|
|
|
|
if objAPI == nil {
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2020-01-21 17:07:49 -05:00
|
|
|
runDataUsageInfo(context.Background(), objAPI, GlobalServiceDoneCh)
|
2019-12-12 09:02:37 -05:00
|
|
|
}
|
|
|
|
|
2020-01-21 17:07:49 -05:00
|
|
|
// timeToNextCrawl returns the duration until next crawl should occur
|
|
|
|
// this is validated by verifying the LastUpdate time.
|
|
|
|
func timeToCrawl(ctx context.Context, objAPI ObjectLayer) time.Duration {
|
|
|
|
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objAPI)
|
|
|
|
if err != nil {
|
|
|
|
// Upon an error wait for like 10
|
|
|
|
// seconds to start the crawler.
|
|
|
|
return 10 * time.Second
|
|
|
|
}
|
|
|
|
// File indeed doesn't exist when LastUpdate is zero
|
|
|
|
// so we have never crawled, start crawl right away.
|
|
|
|
if dataUsageInfo.LastUpdate.IsZero() {
|
|
|
|
return 1 * time.Second
|
2019-12-12 09:02:37 -05:00
|
|
|
}
|
2020-01-21 17:07:49 -05:00
|
|
|
waitDuration := dataUsageInfo.LastUpdate.Sub(UTCNow())
|
|
|
|
if waitDuration > dataUsageCrawlInterval {
|
|
|
|
// Waited long enough start crawl in a 1 second
|
|
|
|
return 1 * time.Second
|
|
|
|
}
|
|
|
|
// No crawling needed, ask the routine to wait until
|
|
|
|
// the daily interval 12hrs - delta between last update
|
|
|
|
// with current time.
|
|
|
|
return dataUsageCrawlInterval - waitDuration
|
2019-12-12 09:02:37 -05:00
|
|
|
}
|
|
|
|
|
2020-01-21 17:07:49 -05:00
|
|
|
func runDataUsageInfo(ctx context.Context, objAPI ObjectLayer, endCh <-chan struct{}) {
|
|
|
|
locker := objAPI.NewNSLock(ctx, minioMetaBucket, "leader-data-usage-info")
|
2019-12-12 09:02:37 -05:00
|
|
|
for {
|
|
|
|
err := locker.GetLock(newDynamicTimeout(time.Millisecond, time.Millisecond))
|
|
|
|
if err != nil {
|
|
|
|
time.Sleep(5 * time.Minute)
|
|
|
|
continue
|
|
|
|
}
|
2020-01-17 12:59:37 -05:00
|
|
|
// Break without unlocking, this node will acquire
|
|
|
|
// data usage calculator role for its lifetime.
|
2019-12-12 09:02:37 -05:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
2020-01-21 17:07:49 -05:00
|
|
|
wait := timeToCrawl(ctx, objAPI)
|
2019-12-12 09:02:37 -05:00
|
|
|
select {
|
|
|
|
case <-endCh:
|
|
|
|
locker.Unlock()
|
|
|
|
return
|
2020-01-21 17:07:49 -05:00
|
|
|
case <-time.NewTimer(wait).C:
|
|
|
|
// Crawl only when no previous crawl has occurred,
|
|
|
|
// or its been too long since last crawl.
|
|
|
|
err := storeDataUsageInBackend(ctx, objAPI, objAPI.CrawlAndGetDataUsage(ctx, endCh))
|
|
|
|
logger.LogIf(ctx, err)
|
2019-12-12 09:02:37 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dataUsageInfo DataUsageInfo) error {
|
|
|
|
dataUsageJSON, err := json.Marshal(dataUsageInfo)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
size := int64(len(dataUsageJSON))
|
|
|
|
r, err := hash.NewReader(bytes.NewReader(dataUsageJSON), size, "", "", size, false)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = objAPI.PutObject(ctx, minioMetaBackgroundOpsBucket, dataUsageObjName, NewPutObjReader(r, nil, nil), ObjectOptions{})
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (DataUsageInfo, error) {
|
|
|
|
var dataUsageInfoJSON bytes.Buffer
|
|
|
|
|
|
|
|
err := objAPI.GetObject(ctx, minioMetaBackgroundOpsBucket, dataUsageObjName, 0, -1, &dataUsageInfoJSON, "", ObjectOptions{})
|
|
|
|
if err != nil {
|
2020-01-21 17:07:49 -05:00
|
|
|
if isErrObjectNotFound(err) {
|
|
|
|
return DataUsageInfo{}, nil
|
|
|
|
}
|
|
|
|
return DataUsageInfo{}, toObjectErr(err, minioMetaBackgroundOpsBucket, dataUsageObjName)
|
2019-12-12 09:02:37 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
var dataUsageInfo DataUsageInfo
|
2020-01-08 06:31:43 -05:00
|
|
|
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
2019-12-12 09:02:37 -05:00
|
|
|
err = json.Unmarshal(dataUsageInfoJSON.Bytes(), &dataUsageInfo)
|
|
|
|
if err != nil {
|
|
|
|
return DataUsageInfo{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return dataUsageInfo, nil
|
|
|
|
}
|
2020-01-21 17:07:49 -05:00
|
|
|
|
|
|
|
// Item represents each file while walking.
|
|
|
|
type Item struct {
|
|
|
|
Path string
|
|
|
|
Typ os.FileMode
|
|
|
|
}
|
|
|
|
|
|
|
|
type getSizeFn func(item Item) (int64, error)
|
|
|
|
type activeIOFn func() error
|
|
|
|
|
|
|
|
func updateUsage(basePath string, endCh <-chan struct{}, waitForLowActiveIO activeIOFn, getSize getSizeFn) DataUsageInfo {
|
|
|
|
var dataUsageInfo = DataUsageInfo{
|
|
|
|
BucketsSizes: make(map[string]uint64),
|
|
|
|
ObjectsSizesHistogram: make(map[string]uint64),
|
|
|
|
}
|
|
|
|
|
|
|
|
itemCh := make(chan Item)
|
|
|
|
skipCh := make(chan error)
|
|
|
|
defer close(skipCh)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer close(itemCh)
|
|
|
|
fastWalk(basePath, func(path string, typ os.FileMode) error {
|
|
|
|
if err := waitForLowActiveIO(); err != nil {
|
|
|
|
return filepath.SkipDir
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-endCh:
|
|
|
|
return filepath.SkipDir
|
|
|
|
case itemCh <- Item{path, typ}:
|
|
|
|
}
|
|
|
|
return <-skipCh
|
|
|
|
})
|
|
|
|
}()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-endCh:
|
|
|
|
return dataUsageInfo
|
|
|
|
case item, ok := <-itemCh:
|
|
|
|
if !ok {
|
|
|
|
return dataUsageInfo
|
|
|
|
}
|
|
|
|
|
|
|
|
bucket, entry := path2BucketObjectWithBasePath(basePath, item.Path)
|
|
|
|
if bucket == "" {
|
|
|
|
skipCh <- nil
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if isReservedOrInvalidBucket(bucket, false) {
|
|
|
|
skipCh <- filepath.SkipDir
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if entry == "" && item.Typ&os.ModeDir != 0 {
|
|
|
|
dataUsageInfo.BucketsCount++
|
|
|
|
dataUsageInfo.BucketsSizes[bucket] = 0
|
|
|
|
skipCh <- nil
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if item.Typ&os.ModeDir != 0 {
|
|
|
|
skipCh <- nil
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
size, err := getSize(item)
|
|
|
|
if err != nil {
|
|
|
|
skipCh <- errSkipFile
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
dataUsageInfo.ObjectsCount++
|
|
|
|
dataUsageInfo.ObjectsTotalSize += uint64(size)
|
|
|
|
dataUsageInfo.BucketsSizes[bucket] += uint64(size)
|
|
|
|
dataUsageInfo.ObjectsSizesHistogram[objSizeToHistoInterval(uint64(size))]++
|
|
|
|
skipCh <- nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|