2016-05-20 23:48:47 -04:00
|
|
|
/*
|
2019-04-09 14:39:42 -04:00
|
|
|
* MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc.
|
2016-05-20 23:48:47 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-05-20 23:48:47 -04:00
|
|
|
|
|
|
|
import (
|
2018-03-14 15:01:47 -04:00
|
|
|
"context"
|
2020-04-27 13:06:21 -04:00
|
|
|
"errors"
|
2020-03-18 19:19:29 -04:00
|
|
|
"fmt"
|
2016-05-26 17:13:10 -04:00
|
|
|
"sort"
|
2019-12-12 09:02:37 -05:00
|
|
|
"sync"
|
2020-03-18 19:19:29 -04:00
|
|
|
"time"
|
2016-05-26 17:13:10 -04:00
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
2018-02-15 20:45:57 -05:00
|
|
|
"github.com/minio/minio/pkg/bpool"
|
2020-04-27 13:06:21 -04:00
|
|
|
"github.com/minio/minio/pkg/color"
|
2019-11-13 15:17:45 -05:00
|
|
|
"github.com/minio/minio/pkg/dsync"
|
2019-10-23 00:01:14 -04:00
|
|
|
"github.com/minio/minio/pkg/madmin"
|
2019-10-14 12:44:51 -04:00
|
|
|
"github.com/minio/minio/pkg/sync/errgroup"
|
2016-05-20 23:48:47 -04:00
|
|
|
)
|
|
|
|
|
2016-05-30 19:51:59 -04:00
|
|
|
// XL constants.
|
2016-05-20 23:48:47 -04:00
|
|
|
const (
|
2016-05-30 19:51:59 -04:00
|
|
|
// XL metadata file carries per object metadata.
|
|
|
|
xlMetaJSONFile = "xl.json"
|
2016-05-20 23:48:47 -04:00
|
|
|
)
|
|
|
|
|
2018-08-24 02:35:37 -04:00
|
|
|
// OfflineDisk represents an unavailable disk.
|
|
|
|
var OfflineDisk StorageAPI // zero value is nil
|
|
|
|
|
2020-01-15 21:30:32 -05:00
|
|
|
// partialUpload is a successful upload of an object
|
|
|
|
// but not written in all disks (having quorum)
|
|
|
|
type partialUpload struct {
|
|
|
|
bucket string
|
|
|
|
object string
|
|
|
|
failedSet int
|
|
|
|
}
|
|
|
|
|
2016-05-30 19:51:59 -04:00
|
|
|
// xlObjects - Implements XL object layer.
|
2016-05-20 23:48:47 -04:00
|
|
|
type xlObjects struct {
|
2020-05-19 16:53:54 -04:00
|
|
|
GatewayUnsupported
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// getDisks returns list of storageAPIs.
|
|
|
|
getDisks func() []StorageAPI
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2019-11-13 15:17:45 -05:00
|
|
|
// getLockers returns list of remote and local lockers.
|
|
|
|
getLockers func() []dsync.NetLocker
|
|
|
|
|
|
|
|
// Locker mutex map.
|
|
|
|
nsMutex *nsLockMap
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// Byte pools used for temporary i/o buffers.
|
|
|
|
bp *bpool.BytePoolCap
|
2018-02-12 04:46:12 -05:00
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// TODO: ListObjects pool management, should be removed in future.
|
2019-04-17 12:52:08 -04:00
|
|
|
listPool *TreeWalkPool
|
2020-01-15 21:30:32 -05:00
|
|
|
|
|
|
|
mrfUploadCh chan partialUpload
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
|
|
|
|
2019-11-13 15:17:45 -05:00
|
|
|
// NewNSLock - initialize a new namespace RWLocker instance.
|
2020-02-21 00:59:57 -05:00
|
|
|
func (xl xlObjects) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker {
|
|
|
|
return xl.nsMutex.NewNSLock(ctx, xl.getLockers, bucket, objects...)
|
2019-11-13 15:17:45 -05:00
|
|
|
}
|
|
|
|
|
2016-08-15 02:55:48 -04:00
|
|
|
// Shutdown function for object storage interface.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (xl xlObjects) Shutdown(ctx context.Context) error {
|
2016-08-15 02:55:48 -04:00
|
|
|
// Add any object layer shutdown activities here.
|
2018-04-04 00:58:48 -04:00
|
|
|
closeStorageDisks(xl.getDisks())
|
2016-08-15 02:55:48 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-05-26 17:13:10 -04:00
|
|
|
// byDiskTotal is a collection satisfying sort.Interface.
|
2018-05-23 06:11:29 -04:00
|
|
|
type byDiskTotal []DiskInfo
|
2016-05-26 17:13:10 -04:00
|
|
|
|
|
|
|
func (d byDiskTotal) Len() int { return len(d) }
|
|
|
|
func (d byDiskTotal) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
|
|
|
|
func (d byDiskTotal) Less(i, j int) bool {
|
|
|
|
return d[i].Total < d[j].Total
|
|
|
|
}
|
|
|
|
|
2016-10-05 15:48:07 -04:00
|
|
|
// getDisksInfo - fetch disks info across all other storage API.
|
2020-05-28 16:03:04 -04:00
|
|
|
func getDisksInfo(disks []StorageAPI) (disksInfo []DiskInfo, errs []error, onlineDisks, offlineDisks madmin.BackendDisks) {
|
2018-05-23 06:11:29 -04:00
|
|
|
disksInfo = make([]DiskInfo, len(disks))
|
2020-05-28 16:03:04 -04:00
|
|
|
errs = make([]error, len(disks))
|
2019-10-14 12:44:51 -04:00
|
|
|
|
|
|
|
g := errgroup.WithNErrs(len(disks))
|
|
|
|
for index := range disks {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
|
|
|
if disks[index] == nil {
|
|
|
|
// Storage disk is empty, perhaps ignored disk or not available.
|
|
|
|
return errDiskNotFound
|
|
|
|
}
|
|
|
|
info, err := disks[index].DiskInfo()
|
2019-08-22 23:02:40 -04:00
|
|
|
if err != nil {
|
2020-05-28 16:03:04 -04:00
|
|
|
if !IsErr(err, baseErrs...) {
|
|
|
|
reqInfo := (&logger.ReqInfo{}).AppendTags("disk", disks[index].String())
|
|
|
|
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
|
|
|
|
logger.LogIf(ctx, err)
|
2019-08-22 23:02:40 -04:00
|
|
|
}
|
2020-05-28 16:03:04 -04:00
|
|
|
return err
|
2016-10-05 15:48:07 -04:00
|
|
|
}
|
2019-10-14 12:44:51 -04:00
|
|
|
disksInfo[index] = info
|
|
|
|
return nil
|
|
|
|
}, index)
|
2016-05-26 17:13:10 -04:00
|
|
|
}
|
2019-08-23 20:03:15 -04:00
|
|
|
|
2019-10-23 00:01:14 -04:00
|
|
|
onlineDisks = make(madmin.BackendDisks)
|
|
|
|
offlineDisks = make(madmin.BackendDisks)
|
2020-02-19 22:51:33 -05:00
|
|
|
|
2020-05-28 16:03:04 -04:00
|
|
|
errs = g.Wait()
|
2019-10-23 00:01:14 -04:00
|
|
|
// Wait for the routines.
|
2020-05-28 16:03:04 -04:00
|
|
|
for i, diskInfoErr := range errs {
|
2020-05-19 17:27:20 -04:00
|
|
|
if disks[i] == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
peerAddr := disks[i].Hostname()
|
2019-10-23 00:01:14 -04:00
|
|
|
if _, ok := offlineDisks[peerAddr]; !ok {
|
|
|
|
offlineDisks[peerAddr] = 0
|
|
|
|
}
|
|
|
|
if _, ok := onlineDisks[peerAddr]; !ok {
|
|
|
|
onlineDisks[peerAddr] = 0
|
|
|
|
}
|
2020-03-04 19:18:32 -05:00
|
|
|
if disks[i] == nil || diskInfoErr != nil {
|
2019-10-23 00:01:14 -04:00
|
|
|
offlineDisks[peerAddr]++
|
2019-10-31 12:13:54 -04:00
|
|
|
continue
|
2019-10-23 00:01:14 -04:00
|
|
|
}
|
|
|
|
onlineDisks[peerAddr]++
|
2019-08-23 20:03:15 -04:00
|
|
|
}
|
|
|
|
|
2020-05-19 17:27:20 -04:00
|
|
|
// Iterate over the passed endpoints arguments and check
|
|
|
|
// if there are still disks missing from the offline/online lists
|
|
|
|
// and update them accordingly.
|
|
|
|
missingOfflineDisks := make(map[string]int)
|
|
|
|
for _, zone := range globalEndpoints {
|
|
|
|
for _, endpoint := range zone.Endpoints {
|
|
|
|
if _, ok := offlineDisks[endpoint.Host]; !ok {
|
|
|
|
missingOfflineDisks[endpoint.Host]++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for missingDisk, n := range missingOfflineDisks {
|
|
|
|
onlineDisks[missingDisk] = 0
|
|
|
|
offlineDisks[missingDisk] = n
|
|
|
|
}
|
|
|
|
|
2016-10-05 15:48:07 -04:00
|
|
|
// Success.
|
2020-05-28 16:03:04 -04:00
|
|
|
return disksInfo, errs, onlineDisks, offlineDisks
|
2016-10-05 15:48:07 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get an aggregated storage info across all disks.
|
2020-05-28 16:03:04 -04:00
|
|
|
func getStorageInfo(disks []StorageAPI) (StorageInfo, []error) {
|
|
|
|
disksInfo, errs, onlineDisks, offlineDisks := getDisksInfo(disks)
|
2016-10-17 17:31:33 -04:00
|
|
|
|
|
|
|
// Sort so that the first element is the smallest.
|
2020-01-10 05:35:06 -05:00
|
|
|
sort.Sort(byDiskTotal(disksInfo))
|
2016-10-17 17:31:33 -04:00
|
|
|
|
2019-04-05 00:21:50 -04:00
|
|
|
// Combine all disks to get total usage
|
2020-01-10 05:35:06 -05:00
|
|
|
usedList := make([]uint64, len(disksInfo))
|
|
|
|
totalList := make([]uint64, len(disksInfo))
|
|
|
|
availableList := make([]uint64, len(disksInfo))
|
|
|
|
mountPaths := make([]string, len(disksInfo))
|
2019-10-23 00:01:14 -04:00
|
|
|
|
2020-01-10 05:35:06 -05:00
|
|
|
for i, di := range disksInfo {
|
2019-10-23 00:01:14 -04:00
|
|
|
usedList[i] = di.Used
|
|
|
|
totalList[i] = di.Total
|
|
|
|
availableList[i] = di.Free
|
2020-02-19 22:51:33 -05:00
|
|
|
mountPaths[i] = di.MountPath
|
2018-05-23 06:11:29 -04:00
|
|
|
}
|
|
|
|
|
2019-02-13 07:59:36 -05:00
|
|
|
storageInfo := StorageInfo{
|
2019-10-23 00:01:14 -04:00
|
|
|
Used: usedList,
|
|
|
|
Total: totalList,
|
|
|
|
Available: availableList,
|
|
|
|
MountPaths: mountPaths,
|
2019-02-13 07:59:36 -05:00
|
|
|
}
|
2019-04-05 00:21:50 -04:00
|
|
|
|
2018-08-24 02:35:37 -04:00
|
|
|
storageInfo.Backend.Type = BackendErasure
|
2016-10-05 15:48:07 -04:00
|
|
|
storageInfo.Backend.OnlineDisks = onlineDisks
|
|
|
|
storageInfo.Backend.OfflineDisks = offlineDisks
|
2017-12-22 06:28:13 -05:00
|
|
|
|
2020-05-28 16:03:04 -04:00
|
|
|
return storageInfo, errs
|
2016-10-05 15:48:07 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// StorageInfo - returns underlying storage statistics.
|
2020-05-28 16:03:04 -04:00
|
|
|
func (xl xlObjects) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) {
|
2020-04-25 22:48:07 -04:00
|
|
|
|
|
|
|
disks := xl.getDisks()
|
|
|
|
if local {
|
|
|
|
var localDisks []StorageAPI
|
2020-05-19 17:27:20 -04:00
|
|
|
for _, disk := range disks {
|
2020-04-25 22:48:07 -04:00
|
|
|
if disk != nil {
|
2020-05-19 17:27:20 -04:00
|
|
|
if disk.IsLocal() {
|
2020-04-25 22:48:07 -04:00
|
|
|
// Append this local disk since local flag is true
|
|
|
|
localDisks = append(localDisks, disk)
|
|
|
|
}
|
2020-02-19 22:51:33 -05:00
|
|
|
}
|
|
|
|
}
|
2020-04-25 22:48:07 -04:00
|
|
|
disks = localDisks
|
2020-02-19 22:51:33 -05:00
|
|
|
}
|
2020-05-19 17:27:20 -04:00
|
|
|
return getStorageInfo(disks)
|
2016-05-26 17:13:10 -04:00
|
|
|
}
|
2019-12-06 02:16:06 -05:00
|
|
|
|
2020-01-21 17:07:49 -05:00
|
|
|
// GetMetrics - is not implemented and shouldn't be called.
|
2019-12-06 02:16:06 -05:00
|
|
|
func (xl xlObjects) GetMetrics(ctx context.Context) (*Metrics, error) {
|
|
|
|
logger.LogIf(ctx, NotImplemented{})
|
|
|
|
return &Metrics{}, NotImplemented{}
|
|
|
|
}
|
2019-12-12 09:02:37 -05:00
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
// CrawlAndGetDataUsage will start crawling buckets and send updated totals as they are traversed.
|
|
|
|
// Updates are sent on a regular basis and the caller *must* consume them.
|
2020-04-27 13:06:21 -04:00
|
|
|
func (xl xlObjects) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error {
|
|
|
|
// This should only be called from runDataUsageInfo and this setup should not happen (zones).
|
|
|
|
return errors.New("xlObjects CrawlAndGetDataUsage not implemented")
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// CrawlAndGetDataUsage will start crawling buckets and send updated totals as they are traversed.
|
|
|
|
// Updates are sent on a regular basis and the caller *must* consume them.
|
2020-04-27 13:06:21 -04:00
|
|
|
func (xl xlObjects) crawlAndGetDataUsage(ctx context.Context, buckets []BucketInfo, bf *bloomFilter, updates chan<- dataUsageCache) error {
|
2020-03-18 19:19:29 -04:00
|
|
|
var disks []StorageAPI
|
|
|
|
|
2019-12-12 09:02:37 -05:00
|
|
|
for _, d := range xl.getLoadBalancedDisks() {
|
|
|
|
if d == nil || !d.IsOnline() {
|
|
|
|
continue
|
|
|
|
}
|
2020-03-18 19:19:29 -04:00
|
|
|
disks = append(disks, d)
|
|
|
|
}
|
|
|
|
if len(disks) == 0 || len(buckets) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load bucket totals
|
|
|
|
oldCache := dataUsageCache{}
|
|
|
|
err := oldCache.load(ctx, xl, dataUsageCacheName)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2019-12-12 09:02:37 -05:00
|
|
|
}
|
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
// New cache..
|
|
|
|
cache := dataUsageCache{
|
|
|
|
Info: dataUsageCacheInfo{
|
|
|
|
Name: dataUsageRoot,
|
|
|
|
NextCycle: oldCache.Info.NextCycle,
|
|
|
|
},
|
|
|
|
Cache: make(map[dataUsageHash]dataUsageEntry, len(oldCache.Cache)),
|
|
|
|
}
|
2019-12-12 09:02:37 -05:00
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
// Put all buckets into channel.
|
|
|
|
bucketCh := make(chan BucketInfo, len(buckets))
|
|
|
|
// Add new buckets first
|
|
|
|
for _, b := range buckets {
|
|
|
|
if oldCache.find(b.Name) == nil {
|
|
|
|
bucketCh <- b
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Add existing buckets.
|
|
|
|
for _, b := range buckets {
|
|
|
|
e := oldCache.find(b.Name)
|
|
|
|
if e != nil {
|
2020-04-27 13:06:21 -04:00
|
|
|
if bf == nil || bf.containsDir(b.Name) {
|
|
|
|
bucketCh <- b
|
|
|
|
cache.replace(b.Name, dataUsageRoot, *e)
|
|
|
|
} else {
|
|
|
|
if intDataUpdateTracker.debug {
|
|
|
|
logger.Info(color.Green("crawlAndGetDataUsage:")+" Skipping bucket %v, not updated", b.Name)
|
|
|
|
}
|
|
|
|
}
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
close(bucketCh)
|
|
|
|
bucketResults := make(chan dataUsageEntryInfo, len(disks))
|
|
|
|
|
|
|
|
// Start async collector/saver.
|
|
|
|
// This goroutine owns the cache.
|
|
|
|
var saverWg sync.WaitGroup
|
|
|
|
saverWg.Add(1)
|
|
|
|
go func() {
|
|
|
|
const updateTime = 30 * time.Second
|
|
|
|
t := time.NewTicker(updateTime)
|
|
|
|
defer t.Stop()
|
|
|
|
defer saverWg.Done()
|
|
|
|
var lastSave time.Time
|
|
|
|
|
|
|
|
saveLoop:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
// Return without saving.
|
|
|
|
return
|
|
|
|
case <-t.C:
|
|
|
|
if cache.Info.LastUpdate.Equal(lastSave) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
logger.LogIf(ctx, cache.save(ctx, xl, dataUsageCacheName))
|
|
|
|
updates <- cache.clone()
|
|
|
|
lastSave = cache.Info.LastUpdate
|
|
|
|
case v, ok := <-bucketResults:
|
|
|
|
if !ok {
|
|
|
|
break saveLoop
|
|
|
|
}
|
|
|
|
cache.replace(v.Name, v.Parent, v.Entry)
|
|
|
|
cache.Info.LastUpdate = time.Now()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Save final state...
|
|
|
|
cache.Info.NextCycle++
|
|
|
|
cache.Info.LastUpdate = time.Now()
|
|
|
|
logger.LogIf(ctx, cache.save(ctx, xl, dataUsageCacheName))
|
2020-04-27 13:06:21 -04:00
|
|
|
if intDataUpdateTracker.debug {
|
|
|
|
logger.Info(color.Green("crawlAndGetDataUsage:")+" Cache saved, Next Cycle: %d", cache.Info.NextCycle)
|
|
|
|
}
|
2020-03-18 19:19:29 -04:00
|
|
|
updates <- cache
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Start one crawler per disk
|
2019-12-12 09:02:37 -05:00
|
|
|
var wg sync.WaitGroup
|
2020-03-18 19:19:29 -04:00
|
|
|
wg.Add(len(disks))
|
|
|
|
for i := range disks {
|
|
|
|
go func(i int) {
|
2019-12-12 09:02:37 -05:00
|
|
|
defer wg.Done()
|
2020-03-18 19:19:29 -04:00
|
|
|
disk := disks[i]
|
|
|
|
|
|
|
|
for bucket := range bucketCh {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
2020-03-19 12:47:47 -04:00
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
// Load cache for bucket
|
2020-03-19 12:47:47 -04:00
|
|
|
cacheName := pathJoin(bucket.Name, dataUsageCacheName)
|
2020-03-18 19:19:29 -04:00
|
|
|
cache := dataUsageCache{}
|
|
|
|
logger.LogIf(ctx, cache.load(ctx, xl, cacheName))
|
|
|
|
if cache.Info.Name == "" {
|
|
|
|
cache.Info.Name = bucket.Name
|
|
|
|
}
|
|
|
|
if cache.Info.Name != bucket.Name {
|
|
|
|
logger.LogIf(ctx, fmt.Errorf("cache name mismatch: %s != %s", cache.Info.Name, bucket.Name))
|
|
|
|
cache.Info = dataUsageCacheInfo{
|
|
|
|
Name: bucket.Name,
|
|
|
|
LastUpdate: time.Time{},
|
|
|
|
NextCycle: 0,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Calc usage
|
|
|
|
before := cache.Info.LastUpdate
|
2020-04-27 13:06:21 -04:00
|
|
|
if bf != nil {
|
|
|
|
cache.Info.BloomFilter = bf.bytes()
|
|
|
|
}
|
2020-03-18 19:19:29 -04:00
|
|
|
cache, err = disk.CrawlAndGetDataUsage(ctx, cache)
|
2020-04-27 13:06:21 -04:00
|
|
|
cache.Info.BloomFilter = nil
|
2020-03-18 19:19:29 -04:00
|
|
|
if err != nil {
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
if cache.Info.LastUpdate.After(before) {
|
|
|
|
logger.LogIf(ctx, cache.save(ctx, xl, cacheName))
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
var root dataUsageEntry
|
|
|
|
if r := cache.root(); r != nil {
|
|
|
|
root = cache.flatten(*r)
|
|
|
|
}
|
|
|
|
bucketResults <- dataUsageEntryInfo{
|
|
|
|
Name: cache.Info.Name,
|
|
|
|
Parent: dataUsageRoot,
|
|
|
|
Entry: root,
|
|
|
|
}
|
|
|
|
// Save cache
|
|
|
|
logger.LogIf(ctx, cache.save(ctx, xl, cacheName))
|
2019-12-12 09:02:37 -05:00
|
|
|
}
|
2020-03-18 19:19:29 -04:00
|
|
|
}(i)
|
2019-12-12 09:02:37 -05:00
|
|
|
}
|
|
|
|
wg.Wait()
|
2020-03-18 19:19:29 -04:00
|
|
|
close(bucketResults)
|
|
|
|
saverWg.Wait()
|
2019-12-12 09:02:37 -05:00
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
return nil
|
2019-12-12 09:02:37 -05:00
|
|
|
}
|
2019-12-28 11:54:43 -05:00
|
|
|
|
|
|
|
// IsReady - No Op.
|
|
|
|
func (xl xlObjects) IsReady(ctx context.Context) bool {
|
|
|
|
logger.CriticalIf(ctx, NotImplemented{})
|
|
|
|
return true
|
|
|
|
}
|